I have 3 files, one with only a red channel, one with only a green channel, one with only a blue channel. Now i want to combine those 3 images to one, where every image is one color-channel in the finished image.
How can i do this with cocoa? I have a solution that is working but is too slow:
NSBitmapImageRep *rRep = [[rImage representations] objectAtIndex: 0];
NSBitmapImageRep *gRep = [[gImage representations] objectAtIndex: 0];
NSBitmapImageRep *bRep = [[bImage representations] objectAtIndex: 0];
NSBitmapImageRep *finalRep = [rRep copy];
for (NSUInteger i = 0; i < [rRep pixelsWide]; i++) {
for (NSUInteger j = 0; j < [rRep pixelsHigh]; j++) {
CGFloat r = [[rRep colorAtX:i y:j] redComponent];
CGFloat g = [[gRep colorAtX:i y:j] greenComponent];
CGFloat b = [[bRep colorAtX:i y:j] blueComponent];
[finalRep setColor:[NSColor colorWithCalibratedRed:r green:g blue:b alpha:1.0] atX:i y:j];
}
}
NSData *data = [finalRep representationUsingType:NSJPEGFileType properties:[NSDictionary dictionaryWithObject:[NSNumber numberWithDouble:0.7] forKey:NSImageCompressionFactor]];
[data writeToURL:[panel URL] atomically:YES];
The Accelerate.framework provides a function to combine 3 planar images into one destination:
vImageConvert_Planar8toRGB888.
I haven't tried your approach but the vImage based method below is quite fast.
I was able to combine three (R,G,B) planes of a 1680x1050 image in ~0.1s on my Mac. The actual conversion takes ~1/3 of that time - The rest is setup & file IO.
- (void)applicationDidFinishLaunching:(NSNotification *)aNotification
{
NSDate* start = [NSDate date];
NSURL* redImageURL = [[NSBundle mainBundle] URLForImageResource:#"red"];
NSURL* greenImageURL = [[NSBundle mainBundle] URLForImageResource:#"green"];
NSURL* blueImageURL = [[NSBundle mainBundle] URLForImageResource:#"blue"];
NSData* redImageData = [self newChannelDataFromImageAtURL:redImageURL];
NSData* greenImageData = [self newChannelDataFromImageAtURL:greenImageURL];
NSData* blueImageData = [self newChannelDataFromImageAtURL:blueImageURL];
//We use our "Red" image to measure the dimensions. We assume that all images & the destination have the same size
CGImageSourceRef imageSource = CGImageSourceCreateWithURL((__bridge CFURLRef)redImageURL, NULL);
NSDictionary* properties = (__bridge NSDictionary*)CGImageSourceCopyPropertiesAtIndex(imageSource, 0, NULL);
CGFloat width = [properties[(id)kCGImagePropertyPixelWidth] doubleValue];
CGFloat height = [properties[(id)kCGImagePropertyPixelHeight] doubleValue];
self.image = [self newImageWithSize:CGSizeMake(width, height) fromRedChannel:redImageData greenChannel:greenImageData blueChannel:blueImageData];
NSLog(#"Combining 3 (R, G, B) planes of size %# took:%fs", NSStringFromSize(CGSizeMake(width, height)), [[NSDate date] timeIntervalSinceDate:start]);
}
- (NSImage*)newImageWithSize:(CGSize)size fromRedChannel:(NSData*)redImageData greenChannel:(NSData*)greenImageData blueChannel:(NSData*)blueImageData
{
vImage_Buffer redBuffer;
redBuffer.data = (void*)redImageData.bytes;
redBuffer.width = size.width;
redBuffer.height = size.height;
redBuffer.rowBytes = [redImageData length]/size.height;
vImage_Buffer greenBuffer;
greenBuffer.data = (void*)greenImageData.bytes;
greenBuffer.width = size.width;
greenBuffer.height = size.height;
greenBuffer.rowBytes = [greenImageData length]/size.height;
vImage_Buffer blueBuffer;
blueBuffer.data = (void*)blueImageData.bytes;
blueBuffer.width = size.width;
blueBuffer.height = size.height;
blueBuffer.rowBytes = [blueImageData length]/size.height;
size_t destinationImageBytesLength = size.width*size.height*3;
const void* destinationImageBytes = valloc(destinationImageBytesLength);
NSData* destinationImageData = [[NSData alloc] initWithBytes:destinationImageBytes length:destinationImageBytesLength];
vImage_Buffer destinationBuffer;
destinationBuffer.data = (void*)destinationImageData.bytes;
destinationBuffer.width = size.width;
destinationBuffer.height = size.height;
destinationBuffer.rowBytes = [destinationImageData length]/size.height;
vImage_Error result = vImageConvert_Planar8toRGB888(&redBuffer, &greenBuffer, &blueBuffer, &destinationBuffer, 0);
NSImage* image = nil;
if(result == kvImageNoError)
{
//TODO: If you need color matching, use an appropriate colorspace here
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGDataProviderRef dataProvider = CGDataProviderCreateWithCFData((__bridge CFDataRef)(destinationImageData));
CGImageRef finalImageRef = CGImageCreate(size.width, size.height, 8, 24, destinationBuffer.rowBytes, colorSpace, kCGBitmapByteOrder32Big|kCGImageAlphaNone, dataProvider, NULL, NO, kCGRenderingIntentDefault);
CGColorSpaceRelease(colorSpace);
CGDataProviderRelease(dataProvider);
image = [[NSImage alloc] initWithCGImage:finalImageRef size:NSMakeSize(size.width, size.height)];
CGImageRelease(finalImageRef);
}
free((void*)destinationImageBytes);
return image;
}
- (NSData*)newChannelDataFromImageAtURL:(NSURL*)imageURL
{
CGImageSourceRef imageSource = CGImageSourceCreateWithURL((__bridge CFURLRef)imageURL, NULL);
if(imageSource == NULL){return NULL;}
CGImageRef image = CGImageSourceCreateImageAtIndex(imageSource, 0, NULL);
CFRelease(imageSource);
if(image == NULL){return NULL;}
CGColorSpaceRef colorSpace = CGImageGetColorSpace(image);
CGFloat width = CGImageGetWidth(image);
CGFloat height = CGImageGetHeight(image);
size_t bytesPerRow = CGImageGetBytesPerRow(image);
CGBitmapInfo bitmapInfo = CGImageGetBitmapInfo(image);
CGContextRef bitmapContext = CGBitmapContextCreate(NULL, width, height, 8, bytesPerRow, colorSpace, bitmapInfo);
NSData* data = NULL;
if(NULL != bitmapContext)
{
CGContextDrawImage(bitmapContext, CGRectMake(0.0, 0.0, width, height), image);
CGImageRef imageRef = CGBitmapContextCreateImage(bitmapContext);
if(NULL != imageRef)
{
data = (NSData*)CFBridgingRelease(CGDataProviderCopyData(CGImageGetDataProvider(imageRef)));
}
CGImageRelease(imageRef);
CGContextRelease(bitmapContext);
}
CGImageRelease(image);
return data;
}
Your program creates many many many many many many color objects.
Although your program could simply access the image reps' bitmapData, it would require your program to know a lot about bitmap representations.
Before taking that approach, you should prefer to let Quartz do the heavy lifting by rendering each image to a CGBitmapContext (e.g. using CGContextDrawImage(gtx, rect, img.CGImage)) and then extracting/copying the rendered component values from the rendered result over to a destination RGB bitmap.
If your inputs are not multicomponent color models (e.g. grayscale), then you should render to the source color model to save a bunch of CPU time and memory.
Related
I am trying to write an image into a specific path. for that the code I written was:
- (void)thumbnailWithDataProvider:(CGDataProviderRef)dataProvider url:(NSURL *)url guid:(NSString *)guid {
// The caller of this method typically releases this strait after calling.
// We therefore retain it and release it at the end of the block.
CGDataProviderRetain(dataProvider);
// Dispatch the generation in a block on a queue sutable for this guid
dispatch_async([self queueForGuid:guid], ^{
NRLog(#"PDFORDER: Generate start %# %#", guid, url);
CGPDFDocumentRef documentRef = CGPDFDocumentCreateWithProvider(dataProvider);
size_t numPages = CGPDFDocumentGetNumberOfPages(documentRef);
if ( numPages ) {
CGPDFPageRef pageRef = CGPDFDocumentGetPage(documentRef, 1);
CGRect cropBox = CGPDFPageGetBoxRect(pageRef, kCGPDFCropBox);
for ( size_t i = 0; i < NRMThumbnailCount; i++ ) {
size_t scale = NRMThumbnailSizes[i];
NSString *path = [url path];
path = [NSString stringWithFormat:#"%#%lu.png", path, scale];
NSURL *outurl = [NSURL fileURLWithPath:path];
CGImageRef imageRef;
CGFloat scaleX = scale/cropBox.size.width;
CGFloat scaleY = scale/cropBox.size.height;
CGFloat pdfScale = ( scaleX < scaleY ? scaleX : scaleY );
CGFloat width = (CGFloat)ceil((double) pdfScale*cropBox.size.width);
CGFloat height = (CGFloat)ceil((double) pdfScale*cropBox.size.height);
CGColorSpaceRef colorSpace = CGColorSpaceCreateWithName(kCGColorSpaceGenericRGB);
CGContextRef context = CGBitmapContextCreate(NULL, width, height, 8, width*4, colorSpace, kCGImageAlphaPremultipliedLast);
CGColorSpaceRelease(colorSpace);
CGContextScaleCTM(context, pdfScale, pdfScale);
CGContextSetFillColor(context, NRMPDFBackgroundColorComponents);
CGContextFillRect(context, cropBox);
CGContextDrawPDFPage( context, pageRef );
imageRef = CGBitmapContextCreateImage(context);
CGContextRelease(context);
CGImageDestinationRef imageDest = CGImageDestinationCreateWithURL((CFURLRef)outurl, THUMBNAIL_TYPE, 1, NULL);//Getting error at this line as " <Error>: ImageIO: CGImageDestinationSetProperties image destination parameter is nil " and the app getting crashed.
if(!imageDest) {
NSLog(#"***Could not create image destination ***");
}
CFStringRef keys[1];
keys[0] = kCGImageDestinationLossyCompressionQuality;
CFNumberRef values[1];
CGFloat compression = (CGFloat)THUMBNAIL_COMPRESSION;
CFNumberRef compressionNumber = CFNumberCreate(kCFAllocatorDefault, kCFNumberCGFloatType, &compression);
values[0] = compressionNumber;
CFDictionaryRef properties = CFDictionaryCreate(kCFAllocatorDefault, (void *)keys, (void *)values, 1, &kCFCopyStringDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks);
CGImageDestinationSetProperties(imageDest, properties);
CGImageDestinationAddImage(imageDest, imageRef, NULL);
CGImageDestinationFinalize(imageDest);
CGImageRelease(imageRef);
CFSafeRelease(imageDest);
CFSafeRelease(compressionNumber);
CFSafeRelease(properties);
}
[[NSNotificationCenter defaultCenter] postNotificationOnMainThreadWithName:kNRMPDFThumbnailImageChangeNotification object:guid];
}
CGPDFDocumentRelease(documentRef);
CGDataProviderRelease(dataProvider);
NRLog(#"PDFORDER: Generate end %# %#", guid, url);
});
}
All the parameters I passed into the method has values still destination value becoming nil.
This happening only with Mac OS X 10.11
can anyone suggest on this.
I strongly suspect this code:
NSString *path = [url path];
path = [NSString stringWithFormat:#"%#%lu.png", path, scale];
NSURL *outurl = [NSURL fileURLWithPath:path];
Better is:
NSString *filename = [NSString stringWithFormat:#"%lu.png", scale];
NSURL *outurl = [url URLByAppendingPathComponent:filename];
ive updated my project to IOS 7 and now i am getting this error when resizing an image once added/taken within the app here is my code
-(UIImage *)resizeImage:(UIImage *)anImage width:(int)width height:(int)height
{
CGImageRef imageRef = [anImage CGImage];
CGImageAlphaInfo alphaInfo = CGImageGetAlphaInfo(imageRef);
if (alphaInfo == kCGImageAlphaNone)
alphaInfo = kCGImageAlphaNoneSkipLast;
CGContextRef bitmap = CGBitmapContextCreate(NULL, width, height, CGImageGetBitsPerComponent(imageRef), 4 * width, CGImageGetColorSpace(imageRef), alphaInfo);
CGContextDrawImage(bitmap, CGRectMake(0, 0, width, height), imageRef);
CGImageRef ref = CGBitmapContextCreateImage(bitmap);
UIImage *result = [UIImage imageWithCGImage:ref];
CGContextRelease(bitmap);
CGImageRelease(ref);
return result;
}
The error im getting is this
Implicit conversion from enumeration type 'CGImageAlphaInfo' (aka 'enum CGImageAlphaInfo') to different enumeration type 'CGBitmapInfo' (aka 'enum CGBitmapInfo')
I have inserted (CGBitmapInfo) before your variable alphaInfo.
Hope it solves your problem
-(UIImage *)resizeImage:(UIImage *)anImage width:(int)width height:(int)height
{
CGImageRef imageRef = [anImage CGImage];
CGImageAlphaInfo alphaInfo = CGImageGetAlphaInfo(imageRef);
if (alphaInfo == kCGImageAlphaNone)
alphaInfo = kCGImageAlphaNoneSkipLast;
CGContextRef bitmap = CGBitmapContextCreate(NULL, width, height, CGImageGetBitsPerComponent(imageRef), 4 * width, CGImageGetColorSpace(imageRef), (CGBitmapInfo)alphaInfo);
CGContextDrawImage(bitmap, CGRectMake(0, 0, width, height), imageRef);
CGImageRef ref = CGBitmapContextCreateImage(bitmap);
UIImage *result = [UIImage imageWithCGImage:ref];
CGContextRelease(bitmap);
CGImageRelease(ref);
return result;
}
while working with OpenCV I need to convert a NSImage to an OpenCV multi-channel 2D matrix (cvMat) and vice versa.
What's the best way to do it?
Greets,
Dom
Here's my outcome, which works pretty well.
NSImage+OpenCV.h:
//
// NSImage+OpenCV.h
//
#import <AppKit/AppKit.h>
#interface NSImage (NSImage_OpenCV) {
}
+(NSImage*)imageWithCVMat:(const cv::Mat&)cvMat;
-(id)initWithCVMat:(const cv::Mat&)cvMat;
#property(nonatomic, readonly) cv::Mat CVMat;
#property(nonatomic, readonly) cv::Mat CVGrayscaleMat;
#end
NSImage+OpenCV.mm:
//
// NSImage+OpenCV.mm
//
#import "NSImage+OpenCV.h"
static void ProviderReleaseDataNOP(void *info, const void *data, size_t size)
{
return;
}
#implementation NSImage (NSImage_OpenCV)
-(CGImageRef)CGImage
{
CGContextRef bitmapCtx = CGBitmapContextCreate(NULL/*data - pass NULL to let CG allocate the memory*/,
[self size].width,
[self size].height,
8 /*bitsPerComponent*/,
0 /*bytesPerRow - CG will calculate it for you if it's allocating the data. This might get padded out a bit for better alignment*/,
[[NSColorSpace genericRGBColorSpace] CGColorSpace],
kCGBitmapByteOrder32Host|kCGImageAlphaPremultipliedFirst);
[NSGraphicsContext saveGraphicsState];
[NSGraphicsContext setCurrentContext:[NSGraphicsContext graphicsContextWithGraphicsPort:bitmapCtx flipped:NO]];
[self drawInRect:NSMakeRect(0,0, [self size].width, [self size].height) fromRect:NSZeroRect operation:NSCompositeCopy fraction:1.0];
[NSGraphicsContext restoreGraphicsState];
CGImageRef cgImage = CGBitmapContextCreateImage(bitmapCtx);
CGContextRelease(bitmapCtx);
return cgImage;
}
-(cv::Mat)CVMat
{
CGImageRef imageRef = [self CGImage];
CGColorSpaceRef colorSpace = CGImageGetColorSpace(imageRef);
CGFloat cols = self.size.width;
CGFloat rows = self.size.height;
cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data, // Pointer to backing data
cols, // Width of bitmap
rows, // Height of bitmap
8, // Bits per component
cvMat.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNoneSkipLast |
kCGBitmapByteOrderDefault); // Bitmap info flags
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), imageRef);
CGContextRelease(contextRef);
CGImageRelease(imageRef);
return cvMat;
}
-(cv::Mat)CVGrayscaleMat
{
CGImageRef imageRef = [self CGImage];
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceGray();
CGFloat cols = self.size.width;
CGFloat rows = self.size.height;
cv::Mat cvMat = cv::Mat(rows, cols, CV_8UC1); // 8 bits per component, 1 channel
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data, // Pointer to backing data
cols, // Width of bitmap
rows, // Height of bitmap
8, // Bits per component
cvMat.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNone |
kCGBitmapByteOrderDefault); // Bitmap info flags
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), imageRef);
CGContextRelease(contextRef);
CGColorSpaceRelease(colorSpace);
CGImageRelease(imageRef);
return cvMat;
}
+ (NSImage *)imageWithCVMat:(const cv::Mat&)cvMat
{
return [[[NSImage alloc] initWithCVMat:cvMat] autorelease];
}
- (id)initWithCVMat:(const cv::Mat&)cvMat
{
NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize() * cvMat.total()];
CGColorSpaceRef colorSpace;
if (cvMat.elemSize() == 1)
{
colorSpace = CGColorSpaceCreateDeviceGray();
}
else
{
colorSpace = CGColorSpaceCreateDeviceRGB();
}
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
CGImageRef imageRef = CGImageCreate(cvMat.cols, // Width
cvMat.rows, // Height
8, // Bits per component
8 * cvMat.elemSize(), // Bits per pixel
cvMat.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNone | kCGBitmapByteOrderDefault, // Bitmap info flags
provider, // CGDataProviderRef
NULL, // Decode
false, // Should interpolate
kCGRenderingIntentDefault); // Intent
NSBitmapImageRep *bitmapRep = [[NSBitmapImageRep alloc] initWithCGImage:imageRef];
NSImage *image = [[NSImage alloc] init];
[image addRepresentation:bitmapRep];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace);
return image;
}
#end
Example usage:
Just import it like this:
#import "NSImage+OpenCV.h"
And use it like this:
cv::Mat cvMat_test;
NSImage *image = [NSImage imageNamed:#"test.jpg"];
cvMat_test = [image CVMat];
[myImageView setImage:[NSImage imageWithCVMat:cvMat_test]];
In -(id)initWithCVMat:(const cv::Mat&)cvMat, shouldn't you be adding the representation to self, rather than a new NSImage?
-(id)initWithCVMat:(const cv::Mat *)iMat
{
if(self = [super init]) {
NSData *tData = [NSData dataWithBytes:iMat->data length:iMat->elemSize() * iMat->total()];
CGColorSpaceRef tColorSpace;
if(iMat->elemSize() == 1) {
tColorSpace = CGColorSpaceCreateDeviceGray();
} else {
tColorSpace = CGColorSpaceCreateDeviceRGB();
}
CGDataProviderRef tProvider = CGDataProviderCreateWithCFData((CFDataRef) tData);
CGImageRef tImage = CGImageCreate(
iMat->cols,
iMat->rows,
8,
8 * iMat->elemSize(),
iMat->step[0],
tColorSpace,
kCGImageAlphaNone | kCGBitmapByteOrderDefault,
tProvider,
NULL,
false,
kCGRenderingIntentDefault);
NSBitmapImageRep *tBitmap = [[NSBitmapImageRep alloc] initWithCGImage:tImage];
[self addRepresentation:tBitmap];
[tBitmap release];
CGImageRelease(tImage);
CGDataProviderRelease(tProvider);
CGColorSpaceRelease(tColorSpace);
}
return self;
}
I have a CALayer (containerLayer) that I'm looking to convert to a NSBitmapImageRep before saving the data out as a flat file. containerLayer has its geometryFlipped property set to YES, and this seems to be causing issues. The PNG file that is ultimately generated renders the content correctly, but doesn't seem to takes the flipped geometry into account. I'm obviously looking for test.png to accurately represent the content shown to the left.
Attached below is a screenshot of the problem and the code I'm working with.
- (NSBitmapImageRep *)exportToImageRep
{
CGContextRef context = NULL;
CGColorSpaceRef colorSpace;
int bitmapByteCount;
int bitmapBytesPerRow;
int pixelsHigh = (int)[[self containerLayer] bounds].size.height;
int pixelsWide = (int)[[self containerLayer] bounds].size.width;
bitmapBytesPerRow = (pixelsWide * 4);
bitmapByteCount = (bitmapBytesPerRow * pixelsHigh);
colorSpace = CGColorSpaceCreateWithName(kCGColorSpaceGenericRGB);
context = CGBitmapContextCreate (NULL,
pixelsWide,
pixelsHigh,
8,
bitmapBytesPerRow,
colorSpace,
kCGImageAlphaPremultipliedLast);
if (context == NULL)
{
NSLog(#"Failed to create context.");
return nil;
}
CGColorSpaceRelease(colorSpace);
[[[self containerLayer] presentationLayer] renderInContext:context];
CGImageRef img = CGBitmapContextCreateImage(context);
NSBitmapImageRep *bitmap = [[NSBitmapImageRep alloc] initWithCGImage:img];
CFRelease(img);
return bitmap;
}
For reference, here's the code that actually saves out the generated NSBitmapImageRep:
NSData *imageData = [imageRep representationUsingType:NSPNGFileType properties:nil];
[imageData writeToFile:#"test.png" atomically:NO];
You need to flip the destination context BEFORE you render into it.
Update your code with this, I have just solved the same problem:
CGAffineTransform flipVertical = CGAffineTransformMake(1, 0, 0, -1, 0, pixelsHigh);
CGContextConcatCTM(context, flipVertical);
[[[self containerLayer] presentationLayer] renderInContext:context];
i always get : CGImageCreate: invalid image size: 0 x 0.
ALAssetsLibrary *library = [[ALAssetsLibrary alloc] init];
// Enumerate just the photos and videos group by using ALAssetsGroupSavedPhotos.
[library enumerateGroupsWithTypes:ALAssetsGroupSavedPhotos
usingBlock:^(ALAssetsGroup *group, BOOL *stop) {
// Within the group enumeration block, filter to enumerate just videos.
[group setAssetsFilter:[ALAssetsFilter allVideos]];
// For this example, we're only interested in the first item.
[group enumerateAssetsAtIndexes:[NSIndexSet indexSetWithIndex:0]
options:0
usingBlock:^(ALAsset *alAsset, NSUInteger index, BOOL *innerStop) {
// The end of the enumeration is signaled by asset == nil.
if (alAsset) {
ALAssetRepresentation *representation = [[alAsset defaultRepresentation] retain];
NSURL *url = [representation url];
AVURLAsset *avAsset = [[AVURLAsset URLAssetWithURL:url options:nil] retain];
AVAssetReader *assetReader = [[AVAssetReader assetReaderWithAsset:avAsset error:nil] retain];
NSArray *tracks = [avAsset tracksWithMediaType:AVMediaTypeVideo];
AVAssetTrack *videoTrack = [tracks objectAtIndex:0];
AVAssetReaderTrackOutput *assetReaderOutput = [[AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack:videoTrack outputSettings:nil] retain];
if (![assetReader canAddOutput:assetReaderOutput]) {printf("could not read reader output\n");}
[assetReader addOutput:assetReaderOutput];
[assetReader startReading];
CMSampleBufferRef nextBuffer = [assetReaderOutput copyNextSampleBuffer];
UIImage* image = imageFromSampleBuffer(nextBuffer);
}
}];
}
failureBlock: ^(NSError *error) {NSLog(#"No groups");}];
the imageFromSampleBuffer comes directly from apple:
UIImage* imageFromSampleBuffer(CMSampleBufferRef nextBuffer) {
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(nextBuffer);
printf("total size:%u\n",CMSampleBufferGetTotalSampleSize(nextBuffer));
// Lock the base address of the pixel buffer.
//CVPixelBufferLockBaseAddress(imageBuffer,0);
// Get the number of bytes per row for the pixel buffer.
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
// Get the pixel buffer width and height.
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
printf("b:%d w:%d h:%d\n",bytesPerRow,width,height);
// Create a device-dependent RGB color space.
static CGColorSpaceRef colorSpace = NULL;
if (colorSpace == NULL) {
colorSpace = CGColorSpaceCreateDeviceRGB();
if (colorSpace == NULL) {
// Handle the error appropriately.
return nil;
}
}
// Get the base address of the pixel buffer.
void *baseAddress = CVPixelBufferGetBaseAddress(imageBuffer);
// Get the data size for contiguous planes of the pixel buffer.
size_t bufferSize = CVPixelBufferGetDataSize(imageBuffer);
// Create a Quartz direct-access data provider that uses data we supply.
CGDataProviderRef dataProvider =
CGDataProviderCreateWithData(NULL, baseAddress, bufferSize, NULL);
// Create a bitmap image from data supplied by the data provider.
CGImageRef cgImage =
CGImageCreate(width, height, 8, 32, bytesPerRow,
colorSpace, kCGImageAlphaNoneSkipFirst | kCGBitmapByteOrder32Little,
dataProvider, NULL, true, kCGRenderingIntentDefault);
CGDataProviderRelease(dataProvider);
// Create and return an image object to represent the Quartz image.
UIImage *image = [UIImage imageWithCGImage:cgImage];
CGImageRelease(cgImage);
CVPixelBufferUnlockBaseAddress(imageBuffer, 0);
return image;
}
i try to get the length and width, basically it will print out the size of the sample buffer, knowing that the buffer itself is not inexistant, but i get no UIImage
for AVAssetReaderTrackOutput *assetReaderOutput...
NSMutableDictionary *outputSettings = [NSMutableDictionary dictionary];
[outputSettings setObject: [NSNumber numberWithInt:kCVPixelFormatType_32BGRA] forKey:(NSString*)kCVPixelBufferPixelFormatTypeKey];
I understand you want to read first image from all your local videos?
You can use simple way to do all of this.
ALAssetsLibrary *library = [[ALAssetsLibrary alloc] init];
// Enumerate just the photos and videos group by using ALAssetsGroupSavedPhotos.
[library enumerateGroupsWithTypes:ALAssetsGroupSavedPhotos
usingBlock:^(ALAssetsGroup *group, BOOL *stop) {
// Within the group enumeration block, filter to enumerate just videos.
[group setAssetsFilter:[ALAssetsFilter allVideos]];
// For this example, we're only interested in the first item.
[group enumerateAssetsAtIndexes:[NSIndexSet indexSetWithIndex:0]
options:0
usingBlock:^(ALAsset *alAsset, NSUInteger index, BOOL *innerStop) {
// The end of the enumeration is signaled by asset == nil.
if (alAsset) {
ALAssetRepresentation *representation = [[alAsset defaultRepresentation] retain];
NSURL *url = [representation url];
AVURLAsset *avAsset = [[AVURLAsset URLAssetWithURL:url options:nil] retain];
AVAssetImageGenerator *imageGenerator = [[AVAssetImageGenerator alloc] initWithAsset:avAsset];
CMTime thumbTime = CMTimeMakeWithSeconds(1, 30);
NSError *error;
CMTime actualTime;
[imageGenerator setMaximumSize:MAXSIZE];
CGImageRef imageRef = [imageGenerator copyCGImageAtTime:thumbTime actualTime:&actualTime error:&error];
}
}];
}
failureBlock: ^(NSError *error) {NSLog(#"No groups");}];