I am trying to capture the face area.
Here is what I do , in didOutputMetadataObjects: is get the AVMetadataFaceObject and process it in didOutputSampleBuffer
didOutputMetadataObjects shows marker correctly, where I consider the Yaw, roll axis
What could be the best possible way, where I get only the face area and at the same time I see a face marker?
-(void)captureOutput:(AVCaptureOutput *)captureOutput didOutputMetadataObjects:(NSArray *)metadataObjects fromConnection:(AVCaptureConnection *)connection
{
for(AVMetadataObject *metaObject in metadataObjects){
if([metaObject isKindOfClass:[AVMetadataFaceObject class ]] && metaObject.type == AVMetadataObjectTypeFace){
AVMetadataFaceObject * adjustedMeta = (AVMetadataFaceObject*)[self.videoLayer transformedMetadataObjectForMetadataObject:metaObject];
self.metaFaceObject= adjustedMeta;
//Draw the face marker here
}
}
}
AVCaptureVideoDataOutputSampleBufferDelegate
-(void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection{
CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
if(pixelBuffer ){
CFDictionaryRef attachments = CMCopyDictionaryOfAttachments( kCFAllocatorDefault, sampleBuffer, kCMAttachmentMode_ShouldPropagate );
CIImage *ciImage = [[CIImage alloc] initWithCVPixelBuffer:pixelBuffer options:(__bridge NSDictionary<NSString *,id> * _Nullable)(attachments)];
ciImage = [ciImage imageByCroppingToRect:self.metaFaceObject.bounds];
//This Image is upside down. Second thing the it does not have the face.
UIImage *image=[UIImage imageWithCIImage:ciImage];
}
}
hello some suggests below:
1: add a stillImageOutPut
lazy var stillImageOutPut: AVCaptureStillImageOutput = {
let imageOutPut = AVCaptureStillImageOutput.init()
return imageOutPut
}()
2 add to session
if session.canAddOutput(stillImageOutPut){
session.addOutput(stillImageOutPut)
}
3 then implement this delegate function
// MARK: AVCaptureMetadataOutputObjectsDelegate
extension ZHFaceDetectorViewController: AVCaptureMetadataOutputObjectsDelegate {
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
printLog(Thread.current)
let metadataObject = metadataObjects.first
if let object = metadataObject {
while !hasDetectorFace {
if object.type == AVMetadataObject.ObjectType.face{
hasDetectorFace = true
DispatchQueue.global().async {
if let stillImageConnection = self.stillImageOutPut.connection(with: AVMediaType.video){
printLog(stillImageConnection)
printLog(connection)
stillImageConnection.videoOrientation = AVCaptureVideoOrientation(rawValue: UIDevice.current.orientation.rawValue)!
/// prepare settings 如果不设置 截取照片时屏幕会闪白
let settings = AVCaptureAutoExposureBracketedStillImageSettings.autoExposureSettings(exposureTargetBias: AVCaptureDevice.currentExposureTargetBias)
/// begin capture
self.stillImageOutPut.prepareToCaptureStillImageBracket(from: stillImageConnection, withSettingsArray: [settings], completionHandler: { (complete, error) in
if error == nil {
self.stillImageOutPut.captureStillImageAsynchronously(from: stillImageConnection, completionHandler: { (imageDataSampleBuffer, error) in
printLog(imageDataSampleBuffer)
printLog(error)
if error == nil {
if let sampleBuffer = imageDataSampleBuffer {
if let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer){
if let image = UIImage(data: imageData) {
/// operater your image
printLog(image)
}
}
}
}else{
printLog("something was wrong")
}
})
}
})
}
}
}
}
}
}
}
4 then i get my picture
log info
Related
I had created one project using ARKit and SceneKit framework. In which I am working with file extension .dae, the files are locally available in my project as shown in below screenshot.
Here I had applied many gestures on this virtual object such as Tap Gesture(When I tap on camera screen, it places the virtual object there), same way Pinch Gesture and Pan Gesture. All of these gestures are working perfectly fine. Now I wanted to apply rotation gesture, for which I got stuck how to do that, also I am not getting any such available sources to achieve this.
Below is my working code so far,
import UIKit
import SceneKit
import ARKit
class ViewController: UIViewController, ARSCNViewDelegate {
#IBOutlet var sceneView: ARSCNView!
private var movedObject: SCNNode?
private var hud :MBProgressHUD!
override func viewDidLoad() {
super.viewDidLoad()
self.sceneView.autoenablesDefaultLighting = true
sceneView.delegate = self
sceneView.showsStatistics = true
let scene = SCNScene()
sceneView.scene = scene
registerGestureRecognizers()
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
// Create a session configuration
let configuration = ARWorldTrackingConfiguration()
configuration.planeDetection = .horizontal
// Run the view's session
sceneView.session.run(configuration)
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
// Pause the view's session
sceneView.session.pause()
}
private func registerGestureRecognizers() {
let tapGestureRecognizer = UITapGestureRecognizer(target: self, action: #selector(tapped(recognizer:)))
tapGestureRecognizer.numberOfTapsRequired = 1
self.sceneView.addGestureRecognizer(tapGestureRecognizer)
let pinchGestureRecognizer = UIPinchGestureRecognizer(target: self, action: #selector(pinched(recognizer:)))
self.sceneView.addGestureRecognizer(pinchGestureRecognizer)
let panGestureRecognizer = UIPanGestureRecognizer(target: self, action: #selector(moveObject(recognizer:)))
panGestureRecognizer.maximumNumberOfTouches = 1
panGestureRecognizer.minimumNumberOfTouches = 1
self.sceneView.addGestureRecognizer(panGestureRecognizer)
let rotationGestureRecognizer = UIRotationGestureRecognizer(target: self, action: #selector(rotateObject(recognizer:)))
self.sceneView.addGestureRecognizer(rotationGestureRecognizer)
}
#objc func pinched(recognizer :UIPinchGestureRecognizer) {
if recognizer.state == .changed {
guard let sceneView = recognizer.view as? ARSCNView else {
return
}
let touch = recognizer.location(in: sceneView)
let hitTestResults = self.sceneView.hitTest(touch, options: nil)
if let hitTest = hitTestResults.first {
let chairNode = hitTest.node
let pinchScaleX = Float(recognizer.scale) * chairNode.scale.x
let pinchScaleY = Float(recognizer.scale) * chairNode.scale.y
let pinchScaleZ = Float(recognizer.scale) * chairNode.scale.z
chairNode.scale = SCNVector3(pinchScaleX,pinchScaleY,pinchScaleZ)
recognizer.scale = 1
}
}
}
#objc func moveObject(recognizer: UIPanGestureRecognizer) {
print("Move object")
if recognizer.state == .began {
print("Pan state began")
let tapPoint: CGPoint? = recognizer.location(in: sceneView)
let result = sceneView.hitTest(tapPoint ?? CGPoint.zero, options: nil)
if result.count == 0 {
return
}
let hitResult: SCNHitTestResult? = result.first
if (hitResult?.node.name == "free_car_1") {
movedObject = hitResult?.node
} else if (hitResult?.node.parent?.name == "free_car_1") {
movedObject = hitResult?.node.parent
}
if (movedObject != nil) {
print("Holding an Object")
}
}
if recognizer.state == .changed {
print("Pan State Changed")
if (movedObject != nil) {
let tapPoint: CGPoint? = recognizer.location(in: sceneView)
let hitResults = sceneView.hitTest(tapPoint ?? CGPoint.zero, types: .featurePoint)
let result: ARHitTestResult? = hitResults.last
let matrix: SCNMatrix4 = SCNMatrix4((result?.worldTransform)!)
//SCNMatrix4FromMat4((result?.worldTransform)!)
let vector: SCNVector3 = SCNVector3Make(matrix.m41, matrix.m42, matrix.m43)
movedObject?.position = vector
print("Moving object position")
}
}
if recognizer.state == .ended {
print("Done moving object homeie")
movedObject = nil
}
}
#objc func tapped(recognizer :UITapGestureRecognizer) {
guard let sceneView = recognizer.view as? ARSCNView else {
return
}
let touch = recognizer.location(in: sceneView)
let hitTestResults = sceneView.hitTest(touch)
guard let hitTest = hitTestResults.first?.node else {
let hitTestResultsWithExistingPlane = sceneView.hitTest(touch, types: .existingPlane)
let chairScene = SCNScene(named: "ShelbyWD.dae")!
guard let chairNode = chairScene.rootNode.childNode(withName: "ShelbyWD", recursively: true) else {
return
}
if let hitTestAvailable = hitTestResultsWithExistingPlane.first {
chairNode.position = SCNVector3(hitTestAvailable.worldTransform.columns.3.x,hitTestAvailable.worldTransform.columns.3.y,hitTestAvailable.worldTransform.columns.3.z)
self.sceneView.scene.rootNode.addChildNode(chairNode)
return
}
return
}
hitTest.removeFromParentNode()
}
#objc func rotateObject(recognizer :UIRotationGestureRecognizer)
{
}
}
Can anyone help me out to apply rotation gesture on my object?
Thank you!
In order to rotate an SCNNode, the 1st thing you need to do, is create a variable to store the rotationAngle around the YAxis or any other that you wish to perform the rotation on e.g:
var currentAngleY: Float = 0.0
Then have some way to have detected to node you wish to rotate, which in my example I am calling currentNode e.g.
var currentNode: SCNNode!
In my example I will just rotate around the YAxis.
You can use a UIPanGestureRecognizer like so:
/// Rotates An Object On It's YAxis
///
/// - Parameter gesture: UIPanGestureRecognizer
#objc func rotateObject(_ gesture: UIPanGestureRecognizer) {
guard let nodeToRotate = currentNode else { return }
let translation = gesture.translation(in: gesture.view!)
var newAngleY = (Float)(translation.x)*(Float)(Double.pi)/180.0
newAngleY += currentAngleY
nodeToRotate.eulerAngles.y = newAngleY
if(gesture.state == .ended) { currentAngleY = newAngleY }
print(nodeToRotate.eulerAngles)
}
Or if you wish to use a UIRotationGesture you can do something like this:
/// Rotates An SCNNode Around It's YAxis
///
/// - Parameter gesture: UIRotationGestureRecognizer
#objc func rotateNode(_ gesture: UIRotationGestureRecognizer){
//1. Get The Current Rotation From The Gesture
let rotation = Float(gesture.rotation)
//2. If The Gesture State Has Changed Set The Nodes EulerAngles.y
if gesture.state == .changed{
isRotating = true
currentNode.eulerAngles.y = currentAngleY + rotation
}
//3. If The Gesture Has Ended Store The Last Angle Of The Cube
if(gesture.state == .ended) {
currentAngleY = currentNode.eulerAngles.y
isRotating = false
}
}
Hope it helps...
I'm developing an app which has UICollectionView as like #zhangao0086/DKImagePickerController# example in Github. Now i need to upload the displayed UICollectionviewCell images into server. Can any one suggest me the right tuts for uploading. Thanks in advance.
UICollectionView code as follows:
func collectionView(collectionView: UICollectionView, cellForItemAtIndexPath indexPath: NSIndexPath) -> UICollectionViewCell {
let asset = self.assets![indexPath.row]
var cell: UICollectionViewCell?
var imageView: UIImageView?
if asset.isVideo {
cell = collectionView.dequeueReusableCellWithReuseIdentifier("CellVideo", forIndexPath: indexPath)
imageView = cell?.contentView.viewWithTag(1) as? UIImageView
} else {
cell = collectionView.dequeueReusableCellWithReuseIdentifier("CellImage", forIndexPath: indexPath)
imageView = cell?.contentView.viewWithTag(1) as? UIImageView
}
if let cell = cell, imageView = imageView {
let layout = collectionView.collectionViewLayout as! UICollectionViewFlowLayout
let tag = indexPath.row + 1
cell.tag = tag
asset.fetchImageWithSize(layout.itemSize.toPixel(), completeBlock: { image, info in
if cell.tag == tag {
imageView.image = image
}
})
}
return cell!
}
uploading the image into server
func barButtonItemClicked(barButtonItem: UIBarButtonItem)
{
let myUrl = NSURL(string: "http://moneymonkey.tokiiyo.com/api/signature");
let typeItem: InsuranceType = InsuranceManager.sharedInstance.TypeArray[0]
let compItem: Companies = InsuranceManager.sharedInstance.CompArray[0]
let request = NSMutableURLRequest(URL:myUrl!);
request.HTTPMethod = "POST";
let param = [
"api_key" : "AiK58j67",
"api_secret" : "a#9rJkmbOea90-",
"phone" : "\(mobile)",
"policy_type" : "\(typeItem.name)",
"company" : "\(compItem.cname)"
]
print("Policy_type: \(typeItem.name)")
let boundary = generateBoundaryString()
request.setValue("multipart/form-data; boundary=\(boundary)", forHTTPHeaderField: "Content-Type")
let imageData = UIImagePNGRepresentation(?) //here what imageView
if(imageData==nil) { return; }
request.HTTPBody = createBodyWithParameters(param, filePathKey: "file", imageDataKey: imageData!, boundary: boundary)
let task = NSURLSession.sharedSession().dataTaskWithRequest(request) {
data, response, error in
if error != nil {
print("error=\(error)")
return
}
// You can print out response object
print("******* response = \(response)")
// Print out reponse body
let responseString = NSString(data: data!, encoding: NSUTF8StringEncoding)
print("****** response data = \(responseString!)")
do{
_ = try NSJSONSerialization.JSONObjectWithData(data!, options: .MutableContainers) as? NSDictionary
dispatch_async(dispatch_get_main_queue(),{
});
}
catch
{
// report error
print("Oops!! Something went wrong\(error)")
}
}
task.resume()
}
func createBodyWithParameters(parameters: [String: String]?, filePathKey: String?, imageDataKey: NSData, boundary: String) -> NSData {
let body = NSMutableData();
if parameters != nil {
for (key, value) in parameters! {
body.appendString("--\(boundary)\r\n")
body.appendString("Content-Disposition: form-data; name=\"\(key)\"\r\n\r\n")
body.appendString("\(value)\r\n")
}
}
let filename = "image.png"
let mimetype = "image/png"
body.appendString("--\(boundary)\r\n")
body.appendString("Content-Disposition: form-data; name=\"\(filePathKey!)\"; filename=\"\(filename)\"\r\n")
body.appendString("Content-Type: \(mimetype)\r\n\r\n")
body.appendData(imageDataKey)
body.appendString("\r\n")
body.appendString("--\(boundary)--\r\n")
return body
}
func generateBoundaryString() -> String {
return "Boundary-\(NSUUID().UUIDString)"
}
J
you can use Alamofire https://github.com/Alamofire/Alamofire
Use like this :
Alamofire.upload(.POST, "YourURl", file: YourFile)
.progress { bytesWritten, totalBytesWritten, totalBytesExpectedToWrite in
print(totalBytesWritten)
// This closure is NOT called on the main queue for performance
// reasons. To update your ui, dispatch to the main queue.
dispatch_async(dispatch_get_main_queue()) {
print("Total bytes written on main queue: \(totalBytesWritten)")
}
}
.responseJSON { response in
debugPrint(response)
}
Hi I am trying to add UIPanGestureRecognizer to UIImageView (in my case, it's an emoji). All other UIGestureRecognizers such as long press, rotation, and pinch work well. However, it gives me an error: unrecognized selector sent to instance when I add UIPanGestureRecognizer. I've spent a day trying to figure out the reason but failed to fix it. Please help! Thanks in advance.
This is a function where I added UIGestureRecognizer to sticker
func emojiInsert(imageName: String) {
deleteButtonHides()
let stickerView: UIImageView = UIImageView(frame: CGRectMake(backgroundImage.frame.width/2 - 50, backgroundImage.frame.height/2 - 50, stickerSize, stickerSize))
stickerView.image = UIImage(named: imageName)
stickerView.userInteractionEnabled = true
stickerView.accessibilityIdentifier = "sticker"
let deleteStickerButton: UIImageView = UIImageView(frame: CGRectMake(stickerView.frame.width - 5 - stickerView.frame.width/3, 5, stickerView.frame.width/3, stickerView.frame.height/3))
deleteStickerButton.image = UIImage(named: "button_back")
deleteStickerButton.accessibilityIdentifier = "delete"
deleteStickerButton.userInteractionEnabled = true
deleteStickerButton.alpha = 0
deleteStickerButton.addGestureRecognizer(UITapGestureRecognizer(target: self, action: "deleteButtonTouches:"))
stickerView.addSubview(deleteStickerButton)
stickerView.addGestureRecognizer(UIPinchGestureRecognizer(target: self, action: "handlePinch:"))
stickerView.addGestureRecognizer(UIRotationGestureRecognizer(target: self, action: "handleRotate:"))
stickerView.addGestureRecognizer(UILongPressGestureRecognizer(target: self, action: "handleLongPress:"))
stickerView.addGestureRecognizer(UIPanGestureRecognizer(target: self, action: "handlePan"))
print("emojiInsert : \(imageName)")
backgroundImage.addSubview(stickerView)
}
Below are call back functions I added in the end of the view.swift. I used touchesbegan and touchesMoved to drag an emoji but emoji moved in weird way after rotation. So now I am trying to use UIPanGesture to drag an emoji.
#IBAction func handlePinch(recognizer : UIPinchGestureRecognizer) {
if(deleteMode) {
return
}
print("handlePinch \(recognizer.scale)")
if let view = recognizer.view {
view.transform = CGAffineTransformScale(view.transform,
recognizer.scale, recognizer.scale)
recognizer.scale = 1
}
}
#IBAction func handleRotate(recognizer : UIRotationGestureRecognizer) {
if(deleteMode) {
return
}
if let view = recognizer.view {
view.transform = CGAffineTransformRotate(view.transform, recognizer.rotation)
recognizer.rotation = 0
}
}
#IBAction func handlePan(recognizer:UIPanGestureRecognizer) {
if(deleteMode) {
return
}
let translation = recognizer.translationInView(self.view)
if let view = recognizer.view {
view.center = CGPoint(x:view.center.x + translation.x,
y:view.center.y + translation.y)
}
recognizer.setTranslation(CGPointZero, inView: self.view)
}
#IBAction func handleLongPress(recognizer: UILongPressGestureRecognizer) {
if(recognizer.state == UIGestureRecognizerState.Began) {
if(!deleteMode) {
print("LongPress - Delete Shows")
for (_, stickers) in self.backgroundImage.subviews.enumerate() {
for (_, deleteButtons) in stickers.subviews.enumerate() {
if let delete:UIImageView = deleteButtons as? UIImageView{
if(delete.accessibilityIdentifier == "delete") {
delete.alpha = 1
}
}
}
}
deleteMode = true
} else {
deleteButtonHides()
}
}
}
Again, please help! Thanks in advance.
The problem is that you're missing a colon. In the following line:
stickerView.addGestureRecognizer(UIPanGestureRecognizer(target: self, action: "handlePan"))
The handlePan should be handlePan:. That's because the Objective-C signature for your method is:
- (void)handlePan:(UIPanGestureRecognizer *)recognizer
The colon is part of the method name.
Is it possible to change the order of views in NSStackView by dragging the subviews, just like we do it in NSTableView ?
Here's an implementation of an NSStackView subclass whose contents can be reordered via dragging:
//
// DraggingStackView.swift
// Analysis
//
// Created by Mark Onyschuk on 2017-02-02.
// Copyright © 2017 Mark Onyschuk. All rights reserved.
//
import Cocoa
class DraggingStackView: NSStackView {
var isEnabled = true
// MARK: -
// MARK: Update Function
var update: (NSStackView, Array<NSView>)->Void = { stack, views in
stack.views.forEach {
stack.removeView($0)
}
views.forEach {
stack.addView($0, in: .leading)
switch stack.orientation {
case .horizontal:
$0.topAnchor.constraint(equalTo: stack.topAnchor).isActive = true
$0.bottomAnchor.constraint(equalTo: stack.bottomAnchor).isActive = true
case .vertical:
$0.leadingAnchor.constraint(equalTo: stack.leadingAnchor).isActive = true
$0.trailingAnchor.constraint(equalTo: stack.trailingAnchor).isActive = true
}
}
}
// MARK: -
// MARK: Event Handling
override func mouseDragged(with event: NSEvent) {
if isEnabled {
let location = convert(event.locationInWindow, from: nil)
if let dragged = views.first(where: { $0.hitTest(location) != nil }) {
reorder(view: dragged, event: event)
}
} else {
super.mouseDragged(with: event)
}
}
private func reorder(view: NSView, event: NSEvent) {
guard let layer = self.layer else { return }
guard let cached = try? self.cacheViews() else { return }
let container = CALayer()
container.frame = layer.bounds
container.zPosition = 1
container.backgroundColor = NSColor.underPageBackgroundColor.cgColor
cached
.filter { $0.view !== view }
.forEach { container.addSublayer($0) }
layer.addSublayer(container)
defer { container.removeFromSuperlayer() }
let dragged = cached.first(where: { $0.view === view })!
dragged.zPosition = 2
layer.addSublayer(dragged)
defer { dragged.removeFromSuperlayer() }
let d0 = view.frame.origin
let p0 = convert(event.locationInWindow, from: nil)
window!.trackEvents(matching: [.leftMouseDragged, .leftMouseUp], timeout: 1e6, mode: .eventTrackingRunLoopMode) { event, stop in
if event.type == .leftMouseDragged {
let p1 = self.convert(event.locationInWindow, from: nil)
let dx = (self.orientation == .horizontal) ? p1.x - p0.x : 0
let dy = (self.orientation == .vertical) ? p1.y - p0.y : 0
CATransaction.begin()
CATransaction.setDisableActions(true)
dragged.frame.origin.x = d0.x + dx
dragged.frame.origin.y = d0.y + dy
CATransaction.commit()
let reordered = self.views.map {
(view: $0,
position: $0 !== view
? NSPoint(x: $0.frame.midX, y: $0.frame.midY)
: NSPoint(x: dragged.frame.midX, y: dragged.frame.midY))
}
.sorted {
switch self.orientation {
case .vertical: return $0.position.y < $1.position.y
case .horizontal: return $0.position.x < $1.position.x
}
}
.map { $0.view }
let nextIndex = reordered.index(of: view)!
let prevIndex = self.views.index(of: view)!
if nextIndex != prevIndex {
self.update(self, reordered)
self.layoutSubtreeIfNeeded()
CATransaction.begin()
CATransaction.setAnimationDuration(0.15)
CATransaction.setAnimationTimingFunction(CAMediaTimingFunction(name: kCAMediaTimingFunctionEaseInEaseOut))
for layer in cached {
layer.position = NSPoint(x: layer.view.frame.midX, y: layer.view.frame.midY)
}
CATransaction.commit()
}
} else {
view.mouseUp(with: event)
stop.pointee = true
}
}
}
// MARK: -
// MARK: View Caching
private class CachedViewLayer: CALayer {
let view: NSView!
enum CacheError: Error {
case bitmapCreationFailed
}
override init(layer: Any) {
self.view = (layer as! CachedViewLayer).view
super.init(layer: layer)
}
init(view: NSView) throws {
self.view = view
super.init()
guard let bitmap = view.bitmapImageRepForCachingDisplay(in: view.bounds) else { throw CacheError.bitmapCreationFailed }
view.cacheDisplay(in: view.bounds, to: bitmap)
frame = view.frame
contents = bitmap.cgImage
}
required init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
}
private func cacheViews() throws -> [CachedViewLayer] {
return try views.map { try cacheView(view: $0) }
}
private func cacheView(view: NSView) throws -> CachedViewLayer {
return try CachedViewLayer(view: view)
}
}
The code requires your stack to be layer backed, and uses sublayers to simulate and animate its content views during drag handling. Dragging is detected by an override of mouseDragged(with:) so will not be initiated if the stack's contents consume this event.
There's no built-in support for re-ordering NSStackView subviews.
Previous, I have ask for how to attach video then send via email. Now it working. Advised by some friend from this website.
I found new problem that video size is very large and larger than send with default email app in iOS for same video file.
Please advice me how to compress video file before attach to an email application.
Thank you everyone.
func imagePickerController(picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [NSObject : AnyObject]) {
if let myImage = info[UIImagePickerControllerOriginalImage] as? UIImage {
image = info[UIImagePickerControllerOriginalImage] as! UIImage
self.dismissViewControllerAnimated(false, completion: nil)
sendmail()
}
else {
//picker.videoQuality = UIImagePickerControllerQualityTypeLow
videoURL = info[UIImagePickerControllerMediaURL] as! NSURL
self.dismissViewControllerAnimated(true, completion: nil)
sendmailVDO()
}
}
Below is the code for Compress video by half of the actual size
var assetWriter:AVAssetWriter?
var assetReader:AVAssetReader?
let bitrate:NSNumber = NSNumber(value:250000)
func compressFile(urlToCompress: URL, outputURL: URL, completion:#escaping (URL)->Void){
//video file to make the asset
var audioFinished = false
var videoFinished = false
let asset = AVAsset(url: urlToCompress);
let duration = asset.duration
let durationTime = CMTimeGetSeconds(duration)
print("Video Actual Duration -- \(durationTime)")
//create asset reader
do{
assetReader = try AVAssetReader(asset: asset)
} catch{
assetReader = nil
}
guard let reader = assetReader else{
fatalError("Could not initalize asset reader probably failed its try catch")
}
let videoTrack = asset.tracks(withMediaType: AVMediaType.video).first!
let audioTrack = asset.tracks(withMediaType: AVMediaType.audio).first!
let videoReaderSettings: [String:Any] = [(kCVPixelBufferPixelFormatTypeKey as String?)!:kCVPixelFormatType_32ARGB ]
// ADJUST BIT RATE OF VIDEO HERE
let videoSettings:[String:Any] = [
AVVideoCompressionPropertiesKey: [AVVideoAverageBitRateKey:self.bitrate],
AVVideoCodecKey: AVVideoCodecType.h264,
AVVideoHeightKey: videoTrack.naturalSize.height,
AVVideoWidthKey: videoTrack.naturalSize.width
]
let assetReaderVideoOutput = AVAssetReaderTrackOutput(track: videoTrack, outputSettings: videoReaderSettings)
let assetReaderAudioOutput = AVAssetReaderTrackOutput(track: audioTrack, outputSettings: nil)
if reader.canAdd(assetReaderVideoOutput){
reader.add(assetReaderVideoOutput)
}else{
fatalError("Couldn't add video output reader")
}
if reader.canAdd(assetReaderAudioOutput){
reader.add(assetReaderAudioOutput)
}else{
fatalError("Couldn't add audio output reader")
}
let audioInput = AVAssetWriterInput(mediaType: AVMediaType.audio, outputSettings: nil)
let videoInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: videoSettings)
videoInput.transform = videoTrack.preferredTransform
//we need to add samples to the video input
let videoInputQueue = DispatchQueue(label: "videoQueue")
let audioInputQueue = DispatchQueue(label: "audioQueue")
do{
assetWriter = try AVAssetWriter(outputURL: outputURL, fileType: AVFileType.mov)
}catch{
assetWriter = nil
}
guard let writer = assetWriter else{
fatalError("assetWriter was nil")
}
writer.shouldOptimizeForNetworkUse = true
writer.add(videoInput)
writer.add(audioInput)
writer.startWriting()
reader.startReading()
writer.startSession(atSourceTime: kCMTimeZero)
let closeWriter:()->Void = {
if (audioFinished && videoFinished){
self.assetWriter?.finishWriting(completionHandler: {
print("------ Finish Video Compressing")
self.checkFileSize(sizeUrl: (self.assetWriter?.outputURL)!, message: "The file size of the compressed file is: ")
completion((self.assetWriter?.outputURL)!)
})
self.assetReader?.cancelReading()
}
}
audioInput.requestMediaDataWhenReady(on: audioInputQueue) {
while(audioInput.isReadyForMoreMediaData){
let sample = assetReaderAudioOutput.copyNextSampleBuffer()
if (sample != nil){
audioInput.append(sample!)
}else{
audioInput.markAsFinished()
DispatchQueue.main.async {
audioFinished = true
closeWriter()
}
break;
}
}
}
videoInput.requestMediaDataWhenReady(on: videoInputQueue) {
//request data here
while(videoInput.isReadyForMoreMediaData){
let sample = assetReaderVideoOutput.copyNextSampleBuffer()
if (sample != nil){
let timeStamp = CMSampleBufferGetPresentationTimeStamp(sample!)
let timeSecond = CMTimeGetSeconds(timeStamp)
let per = timeSecond / durationTime
print("Duration --- \(per)")
DispatchQueue.main.async {
self.progress.progress = Float(per)
}
videoInput.append(sample!)
}else{
videoInput.markAsFinished()
DispatchQueue.main.async {
videoFinished = true
self.progress.progress = 1.0
closeWriter()
}
break;
}
}
}
}
You can also display the progress of video compression.