swift 2.0 equivalent for NSOpenPanel - cocoa

i found this swift 1.2 tutorial to open up a panel. but it doesn't work in swift 2.0.
#IBAction func selectFile(sender: AnyObject) {
var openPanel = NSOpenPanel()
openPanel.title = "Select file"
openPanel.beginWithCompletionHandler({(result:Int) in
if (result = NSFILEHandlingPanelOKButton){
print(openPanel.URL!)
}
})
}
I am getting the error unresolved identifier NSOpenPanel, what would be the swift 2.0 equivalent?
I also tried creating Cocoa class under iOS and MacOS without any luck.

If you haven't, try importing AppKit:
import AppKit
You can read the Apple Docs on it.

as a bonus a custom view with popover..
func chooseDestFolder()->URL?{
//it's an OPEN... :)
let dialog = NSOpenPanel()
//dialog.title = "Choose destination folder"
dialog.message = "Choose destination folder"
dialog.showsResizeIndicator = true
dialog.showsHiddenFiles = false
dialog.canChooseDirectories = true
dialog.canChooseFiles = false
dialog.canCreateDirectories = true
dialog.allowsMultipleSelection = false
dialog.allowedFileTypes = [];
let sv = NSView(frame: NSRect(x: 0, y: 0, width: 300, height: 40))
let menu = NSPopUpButton(radioButtonWithTitle: "AAA", target: nil, action: nil)
menu.frame = CGRect(x: 0, y: 10, width: 100, height: 36)
menu.addItems(withTitles: ["JPG", "PDF", ])
sv.addSubview(menu)
dialog.accessoryView = sv
dialog.accessoryView?.wantsLayer = true
//dialog.accessoryView?.layer?.backgroundColor = NSColor.red.cgColor
dialog.isAccessoryViewDisclosed = true
if (dialog.runModal() == NSApplication.ModalResponse.OK) {
let destUrl = dialog.url
return destUrl
} else {
// User clicked on "Cancel"
return nil
}
}

Related

Method collectionView (didSelectItemAt...) doesn't Work at iTunesConnect; However, It does Work on Debug

I've updated my Xcode to the latest version, currently it's 10.2.1(10E1001) and migrated my project from Swift 4 to Swift 5.
It made me some troubles, but finally I've built my project and it works correctly from debug version on my iPhone.
After that I've had few troubles with archiving my project (maybe it could be a reason)
I've upload it in App Store and after that tried my app at TestFlight.
Plus, for some reason few code in my project works wrong.
It seems like collectionView(didSelectItemAtIndexPath...) doesn't work (but it perfectly works in Xcode) and my custom layout of collectionView doesn't work too (but also works on Debug).
It seems like layout works wrong, but I can't understand what's the difference between Debug and Release version except provisioning profile.
I can share you more videos, code, w/e you need, I really need to resolve this issue.
I've not found anything else like that in the web
I've taken that custom layout code from here https://codereview.stackexchange.com/questions/197017/page-and-center-uicollectionview-like-app-store
class SnapPagingLayout: UICollectionViewFlowLayout {
private var centerPosition = true
private var peekWidth: CGFloat = 0
private var indexOfCellBeforeDragging = 0
convenience init(centerPosition: Bool = true, peekWidth: CGFloat = 40, spacing: CGFloat? = nil, inset: CGFloat? = nil) {
self.init()
self.scrollDirection = .horizontal
self.centerPosition = centerPosition
self.peekWidth = peekWidth
if let spacing = spacing {
self.minimumLineSpacing = spacing
}
if let inset = inset {
self.sectionInset = UIEdgeInsets(top: 0, left: inset, bottom: 0, right: inset)
}
}
override func prepare() {
super.prepare()
guard let collectionView = collectionView else { return }
self.itemSize = calculateItemSize(from: collectionView.bounds.size)
}
override func shouldInvalidateLayout(forBoundsChange newBounds: CGRect) -> Bool {
guard let collectionView = collectionView,
!newBounds.size.equalTo(collectionView.bounds.size) else {
return false
}
itemSize = calculateItemSize(from: collectionView.bounds.size)
return true
}
}
private extension SnapPagingLayout {
func calculateItemSize(from bounds: CGSize) -> CGSize {
return CGSize(
width: bounds.width - peekWidth * 2,
height: (bounds.width - peekWidth * 2) / 1.77
)
}
func indexOfMajorCell() -> Int {
guard let collectionView = collectionView else { return 0 }
let proportionalOffset = collectionView.contentOffset.x
/ (itemSize.width + minimumLineSpacing)
return Int(round(proportionalOffset))
}
}
extension SnapPagingLayout {
func willBeginDragging() {
indexOfCellBeforeDragging = indexOfMajorCell()
}
func willEndDragging(withVelocity velocity: CGPoint, targetContentOffset: UnsafeMutablePointer<CGPoint>) {
guard let collectionView = collectionView else { return }
// Stop scrollView sliding
targetContentOffset.pointee = collectionView.contentOffset
// Calculate where scrollView should snap to
let indexOfMajorCell = self.indexOfMajorCell()
guard let dataSourceCount = collectionView.dataSource?.collectionView(collectionView, numberOfItemsInSection: 0),
dataSourceCount > 0 else {
return
}
// Calculate conditions
let swipeVelocityThreshold: CGFloat = 0.3 // After some trail and error
let hasEnoughVelocityToSlideToTheNextCell = indexOfCellBeforeDragging + 1 < dataSourceCount && velocity.x > swipeVelocityThreshold
let hasEnoughVelocityToSlideToThePreviousCell = indexOfCellBeforeDragging - 1 >= 0 && velocity.x < -swipeVelocityThreshold
let majorCellIsTheCellBeforeDragging = indexOfMajorCell == indexOfCellBeforeDragging
let didUseSwipeToSkipCell = majorCellIsTheCellBeforeDragging
&& (hasEnoughVelocityToSlideToTheNextCell || hasEnoughVelocityToSlideToThePreviousCell)
guard didUseSwipeToSkipCell else {
// Better way to scroll to a cell
collectionView.scrollToItem(
at: IndexPath(row: indexOfMajorCell, section: 0),
at: centerPosition ? .centeredHorizontally : .left, // TODO: Left ignores inset
animated: true
)
return
}
let snapToIndex = indexOfCellBeforeDragging + (hasEnoughVelocityToSlideToTheNextCell ? 1 : -1)
var toValue = CGFloat(snapToIndex) * (itemSize.width + minimumLineSpacing)
if centerPosition {
// Back up a bit to center
toValue = toValue - peekWidth + sectionInset.left
}
// Damping equal 1 => no oscillations => decay animation
UIView.animate(
withDuration: 0.3,
delay: 0,
usingSpringWithDamping: 1,
initialSpringVelocity: velocity.x,
options: .allowUserInteraction,
animations: {
collectionView.contentOffset = CGPoint(x: toValue, y: 0)
collectionView.layoutIfNeeded()
},
completion: nil
)
}
}
I wanna see page and center collection view like in App Store. And also I wanna make my didSelect-method work correctly.
This is a bug for Swift 5.0 compiler related to this references:
https://bugs.swift.org/browse/SR-10257
.
Update:
Further searching found an temporary answer at this link on Stackoverflow
You can work around it by explicitly tagging it with #objc for now.

Create video with AVVideoCompositionCoreAnimationTool and AVAssetExportSession SLOW

I create an animation with layers and I want to export a video with that animations. So I use AVAssetExportSession, but it take a long time to export.
Maybe I can use another thing? I really need help!
let videoURL = NSURL.init(fileURLWithPath: "/Users/Downloads/img_2040.mp4")
let audioURL = NSURL.init(fileURLWithPath: "/Users/Downloads/music_10sm.m4a")
let videoAsset = AVURLAsset.init(url: videoURL as URL)
let audioAsset = AVURLAsset.init(url: audioURL as URL)
let mixComposition = AVMutableComposition.init()
let compositionVideoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: kCMPersistentTrackID_Invalid)
// let mixCompositionAudio = AVMutableComposition.init()
let compositionAudioTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid)
// AVAssetTrack video of originalVideo
let originalVideoAsset = videoAsset.tracks(withMediaType: AVMediaTypeVideo).first
let originalAudioAsset = audioAsset.tracks(withMediaType: AVMediaTypeAudio).first
do {
try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: originalVideoAsset!, at: kCMTimeZero)
compositionVideoTrack.preferredTransform = (videoAsset.tracks(withMediaType: AVMediaTypeVideo).first?.preferredTransform)!
try compositionAudioTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, audioAsset.duration), of: originalAudioAsset!, at: kCMTimeZero)
compositionAudioTrack.preferredTransform = (audioAsset.tracks(withMediaType: AVMediaTypeAudio).first?.preferredTransform)!
let videoSize = originalVideoAsset?.naturalSize
let parentLayer = CALayer()
let videoLayer = CALayer()
parentLayer.bounds = CGRect(x: 0, y: 0, width: (videoSize?.width)!, height: (videoSize?.height)!)
parentLayer.position = CGPoint(x: (videoSize?.width)!/2, y: (videoSize?.height)!/2)
videoLayer.bounds = CGRect(x: 0, y: 0, width: (videoSize?.width)!, height: (videoSize?.height)!)
videoLayer.position = CGPoint(x: (videoSize?.width)!/2 + 20, y: (videoSize?.height)!/2)
let layerTest = CALayer()
layerTest.bounds = CGRect(x: 0, y: 0, width: 100, height: 100)
layerTest.backgroundColor = UIColor.green.cgColor
parentLayer.addSublayer(videoLayer)
parentLayer.insertSublayer(layerTest, below: videoLayer)
// My layer with animations
let cubeLayer = cubeAnimation(videoSize: containerLayer.frame.size, isVideo: true)
containerLayer.addSublayer(cubeLayer)
parentLayer.addSublayer(containerLayer)
parentLayer.isGeometryFlipped = true
let videoComposition = AVMutableVideoComposition.init()
videoComposition.renderSize = videoSize!
videoComposition.frameDuration = CMTimeMake(1, 30)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool.init(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
// Instruction
let instruction = AVMutableVideoCompositionInstruction.init()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, mixComposition.duration) // TEST CAMBIAR ESTA DURATION
// Video
let videoTrack = mixComposition.tracks(withMediaType: AVMediaTypeVideo).first
let layerInstructions = AVMutableVideoCompositionLayerInstruction.init(assetTrack: videoTrack!)
instruction.layerInstructions = [layerInstructions]
videoComposition.instructions = [instruction]
let assetExport = AVAssetExportSession.init(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
assetExport?.videoComposition = videoComposition
let exportPath = "/Users/CarolinaAitcin/Downloads/Test_ScrollBest91.mp4"
let exportUrl = URL.init(fileURLWithPath: exportPath)
assetExport?.outputFileType = AVFileTypeQuickTimeMovie
assetExport?.outputURL = exportUrl
assetExport?.shouldOptimizeForNetworkUse = true
assetExport?.exportAsynchronously {
print("Finish video")
print(NSDate())
}
Timer.schedule(repeatInterval: 1, handler: { (runTime) in
let progress = assetExport?.progress
print(progress)
})
} catch {
print("we have problem")
}
When I test the export in a device the time decreased a lot, it only takes me 20s. In the simulator takes almost 2.5 minutes.

Trouble with collision detection swift 2.0 spritekit

I am building a game for iOS for the first time. I am very close, however I got collision detection working early then I changed something and I lost it working. I have tried to get in back but haven't work. This is what I have so far, but nothing working. I am trying different versions of collision detection that I have found online but I am sticking to get this one to work. Just don't understand where I have went wrong.
Thank you in advanced.
import SpriteKit
enum ColliderType: UInt32 {
case Player = 1
case Traffic = 2
}
class GameScene: SKScene, SKPhysicsContactDelegate {
override func didMoveToView(view: SKView) {
createWalls()
self.backgroundColor = SKColor.whiteColor()
self.physicsWorld.gravity = CGVectorMake(0, 0)
player.physicsBody = SKPhysicsBody(rectangleOfSize: player.size)
player.physicsBody?.dynamic = true
player.physicsBody!.categoryBitMask = ColliderType.Player.rawValue
player.physicsBody!.contactTestBitMask = ColliderType.Traffic.rawValue
player.zPosition = 3
player.name = "player"
player.position = CGPoint(x: self.frame.midX, y: (self.frame.midY)/3)
player.setScale(1.0)
self.addChild(player)
_ = NSTimer.scheduledTimerWithTimeInterval(2.5, target: self, selector: #selector(GameScene.makeTraffic), userInfo: nil, repeats: true)
}
func makeTraffic(){
var aNumber = Int(arc4random_uniform(2))
let Pos1 = Int(self.frame.midX/2)+30
let Pos2 = Int((self.frame.size.width)/2)
let Pos3 = Int(self.frame.size.width-300)
let array = [Pos1, Pos2, Pos3]
let randomIndex = Int(arc4random_uniform(UInt32(array.count)))
let randomPOS = CGPoint(x:Int(array[randomIndex]), y:Int(self.frame.height))
if aNumber == 0 {
aNumber = aNumber + 1
}
switch aNumber {
case 1:
let car1 = SKSpriteNode(imageNamed: "Car_Green_Front")
car1.position = randomPOS
car1.zPosition = 3
car1.setScale(1.0)
car1.physicsBody = SKPhysicsBody(rectangleOfSize: car1.size)
car1.physicsBody?.dynamic = true
car1.physicsBody!.categoryBitMask = ColliderType.Player.rawValue
car1.physicsBody!.contactTestBitMask = ColliderType.Traffic.rawValue
self.addChild(car1)
case 2:
let car2 = SKSpriteNode(imageNamed: "Car_Purple_Front")
car2.position = randomPOS
car2.zPosition = 3
car2.setScale(1.0)
car2.physicsBody = SKPhysicsBody(rectangleOfSize: car2.size)
car2.physicsBody?.dynamic = true
car2.physicsBody!.categoryBitMask = ColliderType.Traffic.rawValue
car2.physicsBody!.contactTestBitMask = ColliderType.Player.rawValue
self.addChild(car2)
default:
return
}
}
func createWalls(){
let wallSize = CGSize(width: 5, height: self.frame.size.height)
let rightwall = SKShapeNode(rectOfSize: wallSize)
rightwall.physicsBody = SKPhysicsBody(rectangleOfSize: wallSize)
rightwall.physicsBody!.dynamic = false
rightwall.position = CGPoint(x: self.frame.maxX-300, y: self.frame.size.height/2)
rightwall.fillColor = UIColor.clearColor()
self.addChild(rightwall)
let leftwall = SKShapeNode(rectOfSize: wallSize)
leftwall.physicsBody = SKPhysicsBody(rectangleOfSize: wallSize)
leftwall.physicsBody!.dynamic = false
leftwall.position = CGPoint(x: self.frame.minX+300, y: self.frame.size.height/2)
leftwall.fillColor = UIColor.clearColor()
self.addChild(leftwall)
}
func didBeginContact(contact: SKPhysicsContact) {
print("Contact")
if contact.bodyA.categoryBitMask == ColliderType.Traffic.rawValue && contact.bodyB.categoryBitMask == ColliderType.Player.rawValue {
print("Hi")
} else {
print("Hello") }
}
}
Your code seems alrite, although there is some small changes I would make.
1) I would write my collider types like so
struct ColliderType {
static let player: UInt32 = 0x1 << 0
static let traffic: UInt32 = 0x1 << 1
}
because this way you only need to increment the last number by 1.
Your way if you decide to add more categories the next one would have to be 4, than 8, than 16 etc, which is more confusing (you are dealing with 32 bit integers)
Than use it like so
...categoryBitMask = ColliderType.player
2) It is recommended that you give the sprite the position before adding the physicsBody. You are doing it the other way round which could cause unexpected issues.
3) Change your collision method to this
func didBeginContact(contact: SKPhysicsContact) {
var firstBody: SKPhysicsBody
var secondBody: SKPhysicsBody
if contact.bodyA.categoryBitMask < contact.bodyB.categoryBitMask {
firstBody = contact.bodyA
secondBody = contact.bodyB
} else {
firstBody = contact.bodyB
secondBody = contact.bodyA
}
if (firstBody.categoryBitMask == ColliderType.player) && (secondBody.categoryBitMask == ColliderType.traffic) {
// player hit traffic, do something
}
}
4) Finally the most important part is that you need to set the delegate which I couldn't see in your code.
Call this in DidMoveToView
physicsWorld.contactDelegate = self
otherwise the DidBeginContact method will never fire.
Also it is a good idea if you follow apples naming conventions. So only classes, protocols, enums and structs should start with capital letters.
Hope this helps
A cleaner way to code didBeginContact is:
func didBeginContact(contact: SKPhysicsContact) {
let contactMask = contact.bodyA.categoryBitMask | contact.bodyB.categoryBitMask
switch contactMask {
case ColliderType.player | ColliderType.traffic:
// player and traffic have contacted
print("Collision between player and traffic")
default :
//Some other contact has occurred
print("Some other contact")
}
}
You can add as many case ColliderType.object1 | ColliderType.object2: as you like.

building project from a tutorial in xcode 7 swift 2, encountering unresolved identifier

I am following a tutorial written in Swift 2 for Xcode 7, part 1 of which (you can navigate to part IV, where my issue has come up) is here: http://www.mav3r1ck.io/spritekit-with-swift/
I am using my own sprites in place of those in the tutorial. When I run my code, an error appears on the first line of the following
let spawnRandomHead = SKAction.runBlock(spawnHead)
let waitTime = SKAction.waitForDuration(1.0)
let sequence = SKAction.sequence([spawnRandomHead,waitTime])
runAction(SKAction.repeatActionForever(sequence))
The full code is here:
import SpriteKit
class GameScene: SKScene, SKPhysicsContactDelegate {
enum bitMask: UInt32 {
case defender = 1
case head = 2
case frame = 4
}
let defender = SKSpriteNode(imageNamed: "Ivanovic is a boss")
override func didMoveToView(view: SKView) {
/* Setup your scene here */
backgroundColor = UIColor.blueColor()
defender.position = CGPoint(x: frame.size.width / 2, y:frame.size.height / 2)
defender.physicsBody = SKPhysicsBody(texture: defender.texture!, size: defender.frame.size)
defender.physicsBody?.dynamic = false
defender.physicsBody?.affectedByGravity = false
defender.physicsBody?.allowsRotation = false
defender.physicsBody?.categoryBitMask = bitMask.head.rawValue
defender.physicsBody?.contactTestBitMask = bitMask.head.rawValue
defender.physicsBody?.collisionBitMask = 0
addChild(defender)
let spawnRandomHead = SKAction.runBlock(spawnHead)
let waitTime = SKAction.waitForDuration(1.0)
let sequence = SKAction.sequence([spawnRandomHead,waitTime])
runAction(SKAction.repeatActionForever(sequence))
physicsWorld.contactDelegate = self
physicsWorld.gravity = CGVectorMake(0.0, -0.9)
defender.physicsBody?.contactTestBitMask = bitMask.frame.rawValue
defender.physicsBody?.collisionBitMask = bitMask.frame.rawValue
}
override func touchesBegan(touches: Set<UITouch>, withEvent event: UIEvent?) {
let touch = touches.first! as UITouch
let touchLocation = touch.locationInNode(self)
//print(touchLocation)
let moveTo = SKAction.moveTo(touchLocation, duration: 1.0)
defender.runAction(moveTo)
func randomNumber(min min: CGFloat, max: CGFloat) -> CGFloat {
let random = CGFloat(Float(arc4random()) / 0xFFFFFFFF)
return random * (max - min) + min
}
func spawnHead() {
let head = SKSpriteNode(imageNamed: "The Biter Strikes")
head.position = CGPoint(x: frame.size.width * randomNumber(min: 0, max: 1), y: frame.size.height + head.size.height)
head.physicsBody = SKPhysicsBody(texture: head.texture!, size: head.frame.size)
head.physicsBody?.categoryBitMask = bitMask.head.rawValue
head.physicsBody?.contactTestBitMask = bitMask.defender.rawValue
addChild(head)
}
func didBeginContact(contact: SKPhysicsContact) {
let contactMask = contact.bodyA.categoryBitMask | contact.bodyB.categoryBitMask
switch(contactMask) {
case bitMask.defender.rawValue | bitMask.head.rawValue:
let secondNode = contact.bodyB.node
secondNode?.physicsBody?.allowsRotation = true
let firstNode = contact.bodyA.node
firstNode?.physicsBody?.allowsRotation = true
firstNode?.removeFromParent()
default:
return
}
}
}
}
I have tried cleaning & rebuilding, restarting Xcode, and moving sections of the code around, but the error does not go away. I appreciate your support!
Hmm. Tried both. Now on the second line this
let spawnRandomHead = SKAction.runBlock({ [unowned self] () -> Void in
self.spawnHead()
})
let waitTime = SKAction.waitForDuration(1.0)
let sequence = SKAction.sequence([spawnRandomHead,waitTime])
runAction(SKAction.repeatActionForever(sequence))
a new error pops up saying " Value of type 'GameScene' has no member 'spawnHead' ".
The runBlock requires closure as an argument, so replace
let spawnRandomHead = SKAction.runBlock(spawnHead)
with
let spawnRandomHead = SKAction.runBlock({ [unowned self] () -> Void in
self.spawnHead()
})
or simply
let spawnRandomHead = SKAction.runBlock { [unowned self] in
self.spawnHead()
}

How would I put together a video using the AVAssetWriter in swift?

I'm currently making a small app that timelapses the webcam on my mac, saves the captured frame to png, and I am looking into exporting the captured frames as a single video.
I use CGImage to handle the original images and have them set in an array but I'm unsure on there to go from there. I gather from my own research that I have to use AVAssetWriter and AVAssetWriterInput somehow.
I've had a look about on here, read the apple docs and searched google. But all the guides etc, are in obj-c rather than swift which is making it really difficult to understand (As I have no experience in Obj-C).
Any help would be very much appreciated.
Many Thanks,
Luke.
I solved the same problem in Swift. Starting from an array oh UIImage, try this (it's a little long :-) but works):
var choosenPhotos: [UIImage] = [] *** your array of UIImages ***
var outputSize = CGSizeMake(1280, 720)
func build(outputSize outputSize: CGSize) {
let fileManager = NSFileManager.defaultManager()
let urls = fileManager.URLsForDirectory(.DocumentDirectory, inDomains: .UserDomainMask)
guard let documentDirectory: NSURL = urls.first else {
fatalError("documentDir Error")
}
let videoOutputURL = documentDirectory.URLByAppendingPathComponent("OutputVideo.mp4")
if NSFileManager.defaultManager().fileExistsAtPath(videoOutputURL.path!) {
do {
try NSFileManager.defaultManager().removeItemAtPath(videoOutputURL.path!)
} catch {
fatalError("Unable to delete file: \(error) : \(__FUNCTION__).")
}
}
guard let videoWriter = try? AVAssetWriter(URL: videoOutputURL, fileType: AVFileTypeMPEG4) else {
fatalError("AVAssetWriter error")
}
let outputSettings = [AVVideoCodecKey : AVVideoCodecH264, AVVideoWidthKey : NSNumber(float: Float(outputSize.width)), AVVideoHeightKey : NSNumber(float: Float(outputSize.height))]
guard videoWriter.canApplyOutputSettings(outputSettings, forMediaType: AVMediaTypeVideo) else {
fatalError("Negative : Can't apply the Output settings...")
}
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings)
let sourcePixelBufferAttributesDictionary = [kCVPixelBufferPixelFormatTypeKey as String : NSNumber(unsignedInt: kCVPixelFormatType_32ARGB), kCVPixelBufferWidthKey as String: NSNumber(float: Float(outputSize.width)), kCVPixelBufferHeightKey as String: NSNumber(float: Float(outputSize.height))]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
if videoWriter.canAddInput(videoWriterInput) {
videoWriter.addInput(videoWriterInput)
}
if videoWriter.startWriting() {
videoWriter.startSessionAtSourceTime(kCMTimeZero)
assert(pixelBufferAdaptor.pixelBufferPool != nil)
let media_queue = dispatch_queue_create("mediaInputQueue", nil)
videoWriterInput.requestMediaDataWhenReadyOnQueue(media_queue, usingBlock: { () -> Void in
let fps: Int32 = 1
let frameDuration = CMTimeMake(1, fps)
var frameCount: Int64 = 0
var appendSucceeded = true
while (!self.choosenPhotos.isEmpty) {
if (videoWriterInput.readyForMoreMediaData) {
let nextPhoto = self.choosenPhotos.removeAtIndex(0)
let lastFrameTime = CMTimeMake(frameCount, fps)
let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
var pixelBuffer: CVPixelBuffer? = nil
let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer)
if let pixelBuffer = pixelBuffer where status == 0 {
let managedPixelBuffer = pixelBuffer
CVPixelBufferLockBaseAddress(managedPixelBuffer, 0)
let data = CVPixelBufferGetBaseAddress(managedPixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGBitmapContextCreate(data, Int(self.outputSize.width), Int(self.outputSize.height), 8, CVPixelBufferGetBytesPerRow(managedPixelBuffer), rgbColorSpace, CGImageAlphaInfo.PremultipliedFirst.rawValue)
CGContextClearRect(context, CGRectMake(0, 0, CGFloat(self.outputSize.width), CGFloat(self.outputSize.height)))
let horizontalRatio = CGFloat(self.outputSize.width) / nextPhoto.size.width
let verticalRatio = CGFloat(self.outputSize.height) / nextPhoto.size.height
//aspectRatio = max(horizontalRatio, verticalRatio) // ScaleAspectFill
let aspectRatio = min(horizontalRatio, verticalRatio) // ScaleAspectFit
let newSize:CGSize = CGSizeMake(nextPhoto.size.width * aspectRatio, nextPhoto.size.height * aspectRatio)
let x = newSize.width < self.outputSize.width ? (self.outputSize.width - newSize.width) / 2 : 0
let y = newSize.height < self.outputSize.height ? (self.outputSize.height - newSize.height) / 2 : 0
CGContextDrawImage(context, CGRectMake(x, y, newSize.width, newSize.height), nextPhoto.CGImage)
CVPixelBufferUnlockBaseAddress(managedPixelBuffer, 0)
appendSucceeded = pixelBufferAdaptor.appendPixelBuffer(pixelBuffer, withPresentationTime: presentationTime)
} else {
print("Failed to allocate pixel buffer")
appendSucceeded = false
}
}
if !appendSucceeded {
break
}
frameCount++
}
videoWriterInput.markAsFinished()
videoWriter.finishWritingWithCompletionHandler { () -> Void in
print("FINISHED!!!!!")
}
})
}
}
Following is code to generate video from images working in Xcode 11.3.1 and Swift 5.1. This code is generated from the answer of #aleciufs Sep 25 '15 answer. The following function assumes the images are loaded and available var images array [UIImage]
func build(outputSize: CGSize) {
let fileManager = FileManager.default
let urls = fileManager.urls(for: .cachesDirectory, in: .userDomainMask)
guard let documentDirectory = urls.first else {
fatalError("documentDir Error")
}
let videoOutputURL = documentDirectory.appendingPathComponent("OutputVideo.mp4")
if FileManager.default.fileExists(atPath: videoOutputURL.path) {
do {
try FileManager.default.removeItem(atPath: videoOutputURL.path)
} catch {
fatalError("Unable to delete file: \(error) : \(#function).")
}
}
guard let videoWriter = try? AVAssetWriter(outputURL: videoOutputURL, fileType: AVFileType.mp4) else {
fatalError("AVAssetWriter error")
}
let outputSettings = [AVVideoCodecKey : AVVideoCodecType.h264, AVVideoWidthKey : NSNumber(value: Float(outputSize.width)), AVVideoHeightKey : NSNumber(value: Float(outputSize.height))] as [String : Any]
guard videoWriter.canApply(outputSettings: outputSettings, forMediaType: AVMediaType.video) else {
fatalError("Negative : Can't apply the Output settings...")
}
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: outputSettings)
let sourcePixelBufferAttributesDictionary = [
kCVPixelBufferPixelFormatTypeKey as String : NSNumber(value: kCVPixelFormatType_32ARGB),
kCVPixelBufferWidthKey as String: NSNumber(value: Float(outputSize.width)),
kCVPixelBufferHeightKey as String: NSNumber(value: Float(outputSize.height))
]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
if videoWriter.canAdd(videoWriterInput) {
videoWriter.add(videoWriterInput)
}
if videoWriter.startWriting() {
videoWriter.startSession(atSourceTime: CMTime.zero)
assert(pixelBufferAdaptor.pixelBufferPool != nil)
let media_queue = DispatchQueue(__label: "mediaInputQueue", attr: nil)
videoWriterInput.requestMediaDataWhenReady(on: media_queue, using: { () -> Void in
let fps: Int32 = 2
let frameDuration = CMTimeMake(value: 1, timescale: fps)
var frameCount: Int64 = 0
var appendSucceeded = true
while (!self.images.isEmpty) {
if (videoWriterInput.isReadyForMoreMediaData) {
let nextPhoto = self.images.remove(at: 0)
let lastFrameTime = CMTimeMake(value: frameCount, timescale: fps)
let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
var pixelBuffer: CVPixelBuffer? = nil
let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer)
if let pixelBuffer = pixelBuffer, status == 0 {
let managedPixelBuffer = pixelBuffer
CVPixelBufferLockBaseAddress(managedPixelBuffer, [])
let data = CVPixelBufferGetBaseAddress(managedPixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: data, width: Int(outputSize.width), height: Int(outputSize.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(managedPixelBuffer), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue)
context?.clear(CGRect(x: 0, y: 0, width: outputSize.width, height: outputSize.height))
let horizontalRatio = CGFloat(outputSize.width) / nextPhoto.size.width
let verticalRatio = CGFloat(outputSize.height) / nextPhoto.size.height
let aspectRatio = min(horizontalRatio, verticalRatio) // ScaleAspectFit
let newSize = CGSize(width: nextPhoto.size.width * aspectRatio, height: nextPhoto.size.height * aspectRatio)
let x = newSize.width < outputSize.width ? (outputSize.width - newSize.width) / 2 : 0
let y = newSize.height < outputSize.height ? (outputSize.height - newSize.height) / 2 : 0
context?.draw(nextPhoto.cgImage!, in: CGRect(x: x, y: y, width: newSize.width, height: newSize.height))
CVPixelBufferUnlockBaseAddress(managedPixelBuffer, [])
appendSucceeded = pixelBufferAdaptor.append(pixelBuffer, withPresentationTime: presentationTime)
} else {
print("Failed to allocate pixel buffer")
appendSucceeded = false
}
}
if !appendSucceeded {
break
}
frameCount += 1
}
videoWriterInput.markAsFinished()
videoWriter.finishWriting { () -> Void in
print("FINISHED!!!!!")
saveVideoToLibrary(videoURL: videoOutputURL)
}
})
}
}
The extra function I provide is:
func saveVideoToLibrary(videoURL: URL) {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: videoURL)
}) { saved, error in
if let error = error {
print("Error saving video to librayr: \(error.localizedDescription)")
}
if saved {
print("Video save to library")
}
}
}

Resources