i always get : CGImageCreate: invalid image size: 0 x 0.
ALAssetsLibrary *library = [[ALAssetsLibrary alloc] init];
// Enumerate just the photos and videos group by using ALAssetsGroupSavedPhotos.
[library enumerateGroupsWithTypes:ALAssetsGroupSavedPhotos
usingBlock:^(ALAssetsGroup *group, BOOL *stop) {
// Within the group enumeration block, filter to enumerate just videos.
[group setAssetsFilter:[ALAssetsFilter allVideos]];
// For this example, we're only interested in the first item.
[group enumerateAssetsAtIndexes:[NSIndexSet indexSetWithIndex:0]
options:0
usingBlock:^(ALAsset *alAsset, NSUInteger index, BOOL *innerStop) {
// The end of the enumeration is signaled by asset == nil.
if (alAsset) {
ALAssetRepresentation *representation = [[alAsset defaultRepresentation] retain];
NSURL *url = [representation url];
AVURLAsset *avAsset = [[AVURLAsset URLAssetWithURL:url options:nil] retain];
AVAssetReader *assetReader = [[AVAssetReader assetReaderWithAsset:avAsset error:nil] retain];
NSArray *tracks = [avAsset tracksWithMediaType:AVMediaTypeVideo];
AVAssetTrack *videoTrack = [tracks objectAtIndex:0];
AVAssetReaderTrackOutput *assetReaderOutput = [[AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack:videoTrack outputSettings:nil] retain];
if (![assetReader canAddOutput:assetReaderOutput]) {printf("could not read reader output\n");}
[assetReader addOutput:assetReaderOutput];
[assetReader startReading];
CMSampleBufferRef nextBuffer = [assetReaderOutput copyNextSampleBuffer];
UIImage* image = imageFromSampleBuffer(nextBuffer);
}
}];
}
failureBlock: ^(NSError *error) {NSLog(#"No groups");}];
the imageFromSampleBuffer comes directly from apple:
UIImage* imageFromSampleBuffer(CMSampleBufferRef nextBuffer) {
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(nextBuffer);
printf("total size:%u\n",CMSampleBufferGetTotalSampleSize(nextBuffer));
// Lock the base address of the pixel buffer.
//CVPixelBufferLockBaseAddress(imageBuffer,0);
// Get the number of bytes per row for the pixel buffer.
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
// Get the pixel buffer width and height.
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
printf("b:%d w:%d h:%d\n",bytesPerRow,width,height);
// Create a device-dependent RGB color space.
static CGColorSpaceRef colorSpace = NULL;
if (colorSpace == NULL) {
colorSpace = CGColorSpaceCreateDeviceRGB();
if (colorSpace == NULL) {
// Handle the error appropriately.
return nil;
}
}
// Get the base address of the pixel buffer.
void *baseAddress = CVPixelBufferGetBaseAddress(imageBuffer);
// Get the data size for contiguous planes of the pixel buffer.
size_t bufferSize = CVPixelBufferGetDataSize(imageBuffer);
// Create a Quartz direct-access data provider that uses data we supply.
CGDataProviderRef dataProvider =
CGDataProviderCreateWithData(NULL, baseAddress, bufferSize, NULL);
// Create a bitmap image from data supplied by the data provider.
CGImageRef cgImage =
CGImageCreate(width, height, 8, 32, bytesPerRow,
colorSpace, kCGImageAlphaNoneSkipFirst | kCGBitmapByteOrder32Little,
dataProvider, NULL, true, kCGRenderingIntentDefault);
CGDataProviderRelease(dataProvider);
// Create and return an image object to represent the Quartz image.
UIImage *image = [UIImage imageWithCGImage:cgImage];
CGImageRelease(cgImage);
CVPixelBufferUnlockBaseAddress(imageBuffer, 0);
return image;
}
i try to get the length and width, basically it will print out the size of the sample buffer, knowing that the buffer itself is not inexistant, but i get no UIImage
for AVAssetReaderTrackOutput *assetReaderOutput...
NSMutableDictionary *outputSettings = [NSMutableDictionary dictionary];
[outputSettings setObject: [NSNumber numberWithInt:kCVPixelFormatType_32BGRA] forKey:(NSString*)kCVPixelBufferPixelFormatTypeKey];
I understand you want to read first image from all your local videos?
You can use simple way to do all of this.
ALAssetsLibrary *library = [[ALAssetsLibrary alloc] init];
// Enumerate just the photos and videos group by using ALAssetsGroupSavedPhotos.
[library enumerateGroupsWithTypes:ALAssetsGroupSavedPhotos
usingBlock:^(ALAssetsGroup *group, BOOL *stop) {
// Within the group enumeration block, filter to enumerate just videos.
[group setAssetsFilter:[ALAssetsFilter allVideos]];
// For this example, we're only interested in the first item.
[group enumerateAssetsAtIndexes:[NSIndexSet indexSetWithIndex:0]
options:0
usingBlock:^(ALAsset *alAsset, NSUInteger index, BOOL *innerStop) {
// The end of the enumeration is signaled by asset == nil.
if (alAsset) {
ALAssetRepresentation *representation = [[alAsset defaultRepresentation] retain];
NSURL *url = [representation url];
AVURLAsset *avAsset = [[AVURLAsset URLAssetWithURL:url options:nil] retain];
AVAssetImageGenerator *imageGenerator = [[AVAssetImageGenerator alloc] initWithAsset:avAsset];
CMTime thumbTime = CMTimeMakeWithSeconds(1, 30);
NSError *error;
CMTime actualTime;
[imageGenerator setMaximumSize:MAXSIZE];
CGImageRef imageRef = [imageGenerator copyCGImageAtTime:thumbTime actualTime:&actualTime error:&error];
}
}];
}
failureBlock: ^(NSError *error) {NSLog(#"No groups");}];
Related
I work on capture Mac screen using JPEG format, and then get the pixelBuffer and imageBuffer of the captured JPEG samplebuffer.
But, the pixelBuffer is always nil, while when I convert the JPEG buffer to NSImage, the image can be got and displayed successfully.
-(void)createSession
{
if(self.session == nil)
{
self.session = [[AVCaptureSession alloc] init];
self.session.sessionPreset = AVCaptureSessionPresetPhoto;
CGDirectDisplayID displayId = [self getDisplayID];
self.input = [[AVCaptureScreenInput alloc] initWithDisplayID:displayId];
[self.session addInput:self.input];
self.imageOutput = [[AVCaptureStillImageOutput alloc] init];
NSDictionary *outputSettings = #{AVVideoCodecKey : AVVideoCodecJPEG};
[self.imageOutput setOutputSettings:outputSettings];
[self.session addOutput:self.imageOutput];
[self.session startRunning];
}
}
-(void)processSampleBuffer
{
AVCaptureConnection *videoConnection = nil;
for (AVCaptureConnection *connection in self.imageOutput.connections) {
for (AVCaptureInputPort *port in [connection inputPorts]) {
if ([[port mediaType] isEqual:AVMediaTypeVideo] ) {
videoConnection = connection;
break;
}
}
if (videoConnection) { break; }
}
[self.imageOutput captureStillImageAsynchronouslyFromConnection:videoConnection completionHandler:^(CMSampleBufferRef imageDataSampleBuffer, NSError * error) {
if(imageDataSampleBuffer != nil)
{
NSData * imageData = [AVCaptureStillImageOutput jpegStillImageNSDataRepresentation:imageDataSampleBuffer];
self.image = [[NSImage alloc] initWithData:imageData];
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(imageDataSampleBuffer);
CVPixelBufferLockBaseAddress(imageBuffer,0);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
NSLog(#"width %zu height %zu", width,height);
CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(imageDataSampleBuffer);
float width1 = CVPixelBufferGetWidth(pixelBuffer);
float height1 = CVPixelBufferGetHeight(pixelBuffer);
NSLog(#"Pixelbuffer width %f height %f", width1,height1);
}
else
{
NSLog(#"error");
}
}];
}
in processSampleBuffer
the self.image can get a NSImage and displayed in NSImageView successfully.
but the imageBuffer and pixelBuffer are both nil.
it confused me a lot, could someone help to have a look ?
OK, finally, I found the answer here, hope it can help others.
https://developer.apple.com/documentation/avfoundation/avcapturephoto/2873914-pixelbuffer?language=objc
Discussion
If you requested photo capture in a RAW format, or in a processed format without compression such as TIFF, you can use this property to access the underlying sample buffer.
If you requested capture in a compressed format such as JPEG or HEVC/HEIF, this property's value is nil. Use the fileDataRepresentation or CGImageRepresentation method to obtain compressed image data.
On osx i use AVFoundation to capture image from a USB camera, all work fine, but the image I get is darker compared to live video.
Device capture configuration
-(BOOL)prepareCapture{
captureSession = [[AVCaptureSession alloc] init];
NSError *error;
imageOutput=[[AVCaptureStillImageOutput alloc] init];
NSNumber * pixelFormat = [NSNumber numberWithInt:k32BGRAPixelFormat];
[imageOutput setOutputSettings:[NSDictionary dictionaryWithObject:pixelFormat forKey:(id)kCVPixelBufferPixelFormatTypeKey]];
videoOutput=[[AVCaptureMovieFileOutput alloc] init];
AVCaptureDeviceInput *videoInput = [AVCaptureDeviceInput deviceInputWithDevice:MyVideoDevice error:&error];
if (videoInput) {
[captureSession beginConfiguration];
[captureSession addInput:videoInput];
[captureSession setSessionPreset:AVCaptureSessionPresetHigh];
//[captureSession setSessionPreset:AVCaptureSessionPresetPhoto];
[captureSession addOutput:imageOutput];
[captureSession addOutput:videoOutput];
[captureSession commitConfiguration];
}
else {
// Handle the failure.
return NO;
}
return YES;
}
Add view for live preview
-(void)settingPreview:(NSView*)View{
// Attach preview to session
previewView = View;
CALayer *previewViewLayer = [previewView layer];
[previewViewLayer setBackgroundColor:CGColorGetConstantColor(kCGColorBlack)];
AVCaptureVideoPreviewLayer *newPreviewLayer = [[AVCaptureVideoPreviewLayer alloc] initWithSession:captureSession];
[newPreviewLayer setFrame:[previewViewLayer bounds]];
[newPreviewLayer setAutoresizingMask:kCALayerWidthSizable | kCALayerHeightSizable];
[previewViewLayer addSublayer:newPreviewLayer];
//[self setPreviewLayer:newPreviewLayer];
[captureSession startRunning];
}
Code to capture the image
-(void)captureImage{
AVCaptureConnection *videoConnection = nil;
for (AVCaptureConnection *connection in imageOutput.connections) {
for (AVCaptureInputPort *port in [connection inputPorts]) {
if ([[port mediaType] isEqual:AVMediaTypeVideo] ) {
videoConnection = connection;
break;
}
}
if (videoConnection) { break; }
}
[imageOutput captureStillImageAsynchronouslyFromConnection:videoConnection completionHandler:
^(CMSampleBufferRef imageSampleBuffer, NSError *error) {
CFDictionaryRef exifAttachments =
CMGetAttachment(imageSampleBuffer, kCGImagePropertyExifDictionary, NULL);
if (exifAttachments) {
// Do something with the attachments.
}
// Continue as appropriate.
//IMG is a global NSImage
IMG = [self imageFromSampleBuffer:imageSampleBuffer];
[[self delegate] imageReady:IMG];
}];
}
Create a NSImage from sample buffer data, i think the problem is here
- (NSImage *) imageFromSampleBuffer:(CMSampleBufferRef) sampleBuffer
{
// Get a CMSampleBuffer's Core Video image buffer for the media data
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
// Lock the base address of the pixel buffer
CVPixelBufferLockBaseAddress(imageBuffer, 0);
// Get the number of bytes per row for the pixel buffer
void *baseAddress = CVPixelBufferGetBaseAddress(imageBuffer);
// Get the number of bytes per row for the pixel buffer
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
// Get the pixel buffer width and height
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
// Create a device-dependent RGB color space
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
// Create a bitmap graphics context with the sample buffer data
CGContextRef context = CGBitmapContextCreate(baseAddress, width, height, 8,
bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);
// Create a Quartz image from the pixel data in the bitmap graphics context
CGImageRef quartzImage = CGBitmapContextCreateImage(context);
// Unlock the pixel buffer
CVPixelBufferUnlockBaseAddress(imageBuffer,0);
// Free up the context and color space
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
// Create an image object from the Quartz image
//UIImage *image = [UIImage imageWithCGImage:quartzImage];
NSImage * image = [[NSImage alloc] initWithCGImage:quartzImage size:NSZeroSize];
// Release the Quartz image
CGImageRelease(quartzImage);
return (image);
}
Solution found
The problem was in imageFromSampleBuffer
I used this code and the picture is perfect
// Continue as appropriate.
//IMG = [self imageFromSampleBuffer:imageSampleBuffer];
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(imageSampleBuffer);
if (imageBuffer) {
CVBufferRetain(imageBuffer);
NSCIImageRep* imageRep = [NSCIImageRep imageRepWithCIImage: [CIImage imageWithCVImageBuffer: imageBuffer]];
IMG = [[NSImage alloc] initWithSize: [imageRep size]];
[IMG addRepresentation: imageRep];
CVBufferRelease(imageBuffer);
}
Code found in this answer
In my case, you still need to call captureStillImageAsynchronouslyFromConnection: multiple times to force the built-in camera to expose properly.
int primeCount = 8; //YMMV
for (int i = 0; i < primeCount; i++) {
[imageOutput captureStillImageAsynchronouslyFromConnection:videoConnection completionHandler: ^(CMSampleBufferRef imageSampleBuffer, NSError *error) {}];
}
[imageOutput captureStillImageAsynchronouslyFromConnection:videoConnection completionHandler: ^(CMSampleBufferRef imageSampleBuffer, NSError *error) {
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(imageSampleBuffer);
if (imageBuffer) {
CVBufferRetain(imageBuffer);
NSCIImageRep* imageRep = [NSCIImageRep imageRepWithCIImage: [CIImage imageWithCVImageBuffer: imageBuffer]];
IMG = [[NSImage alloc] initWithSize: [imageRep size]];
[IMG addRepresentation: imageRep];
}
}];
I know there is a way to save the PDF pages to NSImage and then output to JPG like this:
NSData *pdfData = [NSData dataWithContentsOfFile:pathToUrPDF];
NSPDFImageRep *pdfImg = [NSPDFImageRep imageRepWithData:pdfData];
NSFileManager *fileManager = [NSFileManager defaultManager];
NSInteger pageCount = [pdfImg pageCount];
for(int i = 0 ; i < pageCount ; i++) {
[pdfImg setCurrentPage:i];
NSImage *temp = [[NSImage alloc] init];
[temp addRepresentation:pdfImg];
NSBitmapImageRep *rep = [NSBitmapImageRep imageRepWithData:[temp TIFFRepresentation]];
NSData *finalData = [rep representationUsingType:NSJPEGFileType properties:nil];
NSString *pageName = [NSString stringWithFormat:#"Page_%ld.jpg", (long)[pdfImg currentPage]];
[fileManager createFileAtPath:[NSString stringWithFormat:#"%#/%#", #"pathWrUWantToSave", pageName] contents:finalData attributes:nil];
}
However the "TIFFRepresetation" can only output as 72 DPI highest. So I think the best way to get the high DPI image from a PDF is using CGImage on Mac OS X. How to do this? My destination application is for Mac OS X, not iOS...
Thanks alot
Finally I got the solution:
Since OS X 10.8, NSImage has a block based initialiser to draw vector based content into a bitmap.
The idea is to provide a drawing handler that is called whenever a representation of the image is requested.
The relation between points and pixels is expressed by passing a NSSize (in points) to the initialiser and to explicitly set the pixel dimensions for the representation:
NSString* localDocuments = [NSSearchPathForDirectoriesInDomains(NSDocumentDirectory,NSUserDomainMask, YES) objectAtIndex:0];
NSString* pdfPath = [localDocuments stringByAppendingPathComponent:#"1.pdf"];
NSData* pdfData = [NSData dataWithContentsOfFile:pdfPath];
NSPDFImageRep* pdfImageRep = [NSPDFImageRep imageRepWithData:pdfData];
CGFloat factor = 300/72;
NSInteger pageCount = [pdfImageRep pageCount];
for(int i = 0 ; i < pageCount ; i++) {
[pdfImageRep setCurrentPage:i];
NSImage* scaledImage = [NSImage imageWithSize:pdfImageRep.size flipped:NO drawingHandler:^BOOL(NSRect dstRect) {
[pdfImageRep drawInRect:dstRect];
return YES;
}];
NSImageRep* scaledImageRep = [[scaledImage representations] firstObject];
/*
* The sizes of the PDF Image Rep and the [NSImage imageWithSize: drawingHandler:]-context
* are define in terms of points.
* By explicitly setting the size of the scaled representation in in Pixels, you
* define the relation between ponts & pixels.
*/
scaledImageRep.pixelsWide = pdfImageRep.size.width * factor;
scaledImageRep.pixelsHigh = pdfImageRep.size.height * factor;
NSBitmapImageRep* pngImageRep = [NSBitmapImageRep imageRepWithData:[scaledImage TIFFRepresentation]];
NSData* finalData = [pngImageRep representationUsingType:NSJPEGFileType properties:nil];
NSString* pageName = [NSString stringWithFormat:#"Page_%ld.jpg", (long)[pdfImageRep currentPage]];
[[NSFileManager defaultManager] createFileAtPath:[NSString stringWithFormat:#"%#%#", pdfPath, pageName] contents:finalData attributes:nil];
}
I have 3 files, one with only a red channel, one with only a green channel, one with only a blue channel. Now i want to combine those 3 images to one, where every image is one color-channel in the finished image.
How can i do this with cocoa? I have a solution that is working but is too slow:
NSBitmapImageRep *rRep = [[rImage representations] objectAtIndex: 0];
NSBitmapImageRep *gRep = [[gImage representations] objectAtIndex: 0];
NSBitmapImageRep *bRep = [[bImage representations] objectAtIndex: 0];
NSBitmapImageRep *finalRep = [rRep copy];
for (NSUInteger i = 0; i < [rRep pixelsWide]; i++) {
for (NSUInteger j = 0; j < [rRep pixelsHigh]; j++) {
CGFloat r = [[rRep colorAtX:i y:j] redComponent];
CGFloat g = [[gRep colorAtX:i y:j] greenComponent];
CGFloat b = [[bRep colorAtX:i y:j] blueComponent];
[finalRep setColor:[NSColor colorWithCalibratedRed:r green:g blue:b alpha:1.0] atX:i y:j];
}
}
NSData *data = [finalRep representationUsingType:NSJPEGFileType properties:[NSDictionary dictionaryWithObject:[NSNumber numberWithDouble:0.7] forKey:NSImageCompressionFactor]];
[data writeToURL:[panel URL] atomically:YES];
The Accelerate.framework provides a function to combine 3 planar images into one destination:
vImageConvert_Planar8toRGB888.
I haven't tried your approach but the vImage based method below is quite fast.
I was able to combine three (R,G,B) planes of a 1680x1050 image in ~0.1s on my Mac. The actual conversion takes ~1/3 of that time - The rest is setup & file IO.
- (void)applicationDidFinishLaunching:(NSNotification *)aNotification
{
NSDate* start = [NSDate date];
NSURL* redImageURL = [[NSBundle mainBundle] URLForImageResource:#"red"];
NSURL* greenImageURL = [[NSBundle mainBundle] URLForImageResource:#"green"];
NSURL* blueImageURL = [[NSBundle mainBundle] URLForImageResource:#"blue"];
NSData* redImageData = [self newChannelDataFromImageAtURL:redImageURL];
NSData* greenImageData = [self newChannelDataFromImageAtURL:greenImageURL];
NSData* blueImageData = [self newChannelDataFromImageAtURL:blueImageURL];
//We use our "Red" image to measure the dimensions. We assume that all images & the destination have the same size
CGImageSourceRef imageSource = CGImageSourceCreateWithURL((__bridge CFURLRef)redImageURL, NULL);
NSDictionary* properties = (__bridge NSDictionary*)CGImageSourceCopyPropertiesAtIndex(imageSource, 0, NULL);
CGFloat width = [properties[(id)kCGImagePropertyPixelWidth] doubleValue];
CGFloat height = [properties[(id)kCGImagePropertyPixelHeight] doubleValue];
self.image = [self newImageWithSize:CGSizeMake(width, height) fromRedChannel:redImageData greenChannel:greenImageData blueChannel:blueImageData];
NSLog(#"Combining 3 (R, G, B) planes of size %# took:%fs", NSStringFromSize(CGSizeMake(width, height)), [[NSDate date] timeIntervalSinceDate:start]);
}
- (NSImage*)newImageWithSize:(CGSize)size fromRedChannel:(NSData*)redImageData greenChannel:(NSData*)greenImageData blueChannel:(NSData*)blueImageData
{
vImage_Buffer redBuffer;
redBuffer.data = (void*)redImageData.bytes;
redBuffer.width = size.width;
redBuffer.height = size.height;
redBuffer.rowBytes = [redImageData length]/size.height;
vImage_Buffer greenBuffer;
greenBuffer.data = (void*)greenImageData.bytes;
greenBuffer.width = size.width;
greenBuffer.height = size.height;
greenBuffer.rowBytes = [greenImageData length]/size.height;
vImage_Buffer blueBuffer;
blueBuffer.data = (void*)blueImageData.bytes;
blueBuffer.width = size.width;
blueBuffer.height = size.height;
blueBuffer.rowBytes = [blueImageData length]/size.height;
size_t destinationImageBytesLength = size.width*size.height*3;
const void* destinationImageBytes = valloc(destinationImageBytesLength);
NSData* destinationImageData = [[NSData alloc] initWithBytes:destinationImageBytes length:destinationImageBytesLength];
vImage_Buffer destinationBuffer;
destinationBuffer.data = (void*)destinationImageData.bytes;
destinationBuffer.width = size.width;
destinationBuffer.height = size.height;
destinationBuffer.rowBytes = [destinationImageData length]/size.height;
vImage_Error result = vImageConvert_Planar8toRGB888(&redBuffer, &greenBuffer, &blueBuffer, &destinationBuffer, 0);
NSImage* image = nil;
if(result == kvImageNoError)
{
//TODO: If you need color matching, use an appropriate colorspace here
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGDataProviderRef dataProvider = CGDataProviderCreateWithCFData((__bridge CFDataRef)(destinationImageData));
CGImageRef finalImageRef = CGImageCreate(size.width, size.height, 8, 24, destinationBuffer.rowBytes, colorSpace, kCGBitmapByteOrder32Big|kCGImageAlphaNone, dataProvider, NULL, NO, kCGRenderingIntentDefault);
CGColorSpaceRelease(colorSpace);
CGDataProviderRelease(dataProvider);
image = [[NSImage alloc] initWithCGImage:finalImageRef size:NSMakeSize(size.width, size.height)];
CGImageRelease(finalImageRef);
}
free((void*)destinationImageBytes);
return image;
}
- (NSData*)newChannelDataFromImageAtURL:(NSURL*)imageURL
{
CGImageSourceRef imageSource = CGImageSourceCreateWithURL((__bridge CFURLRef)imageURL, NULL);
if(imageSource == NULL){return NULL;}
CGImageRef image = CGImageSourceCreateImageAtIndex(imageSource, 0, NULL);
CFRelease(imageSource);
if(image == NULL){return NULL;}
CGColorSpaceRef colorSpace = CGImageGetColorSpace(image);
CGFloat width = CGImageGetWidth(image);
CGFloat height = CGImageGetHeight(image);
size_t bytesPerRow = CGImageGetBytesPerRow(image);
CGBitmapInfo bitmapInfo = CGImageGetBitmapInfo(image);
CGContextRef bitmapContext = CGBitmapContextCreate(NULL, width, height, 8, bytesPerRow, colorSpace, bitmapInfo);
NSData* data = NULL;
if(NULL != bitmapContext)
{
CGContextDrawImage(bitmapContext, CGRectMake(0.0, 0.0, width, height), image);
CGImageRef imageRef = CGBitmapContextCreateImage(bitmapContext);
if(NULL != imageRef)
{
data = (NSData*)CFBridgingRelease(CGDataProviderCopyData(CGImageGetDataProvider(imageRef)));
}
CGImageRelease(imageRef);
CGContextRelease(bitmapContext);
}
CGImageRelease(image);
return data;
}
Your program creates many many many many many many color objects.
Although your program could simply access the image reps' bitmapData, it would require your program to know a lot about bitmap representations.
Before taking that approach, you should prefer to let Quartz do the heavy lifting by rendering each image to a CGBitmapContext (e.g. using CGContextDrawImage(gtx, rect, img.CGImage)) and then extracting/copying the rendered component values from the rendered result over to a destination RGB bitmap.
If your inputs are not multicomponent color models (e.g. grayscale), then you should render to the source color model to save a bunch of CPU time and memory.
while working with OpenCV I need to convert a NSImage to an OpenCV multi-channel 2D matrix (cvMat) and vice versa.
What's the best way to do it?
Greets,
Dom
Here's my outcome, which works pretty well.
NSImage+OpenCV.h:
//
// NSImage+OpenCV.h
//
#import <AppKit/AppKit.h>
#interface NSImage (NSImage_OpenCV) {
}
+(NSImage*)imageWithCVMat:(const cv::Mat&)cvMat;
-(id)initWithCVMat:(const cv::Mat&)cvMat;
#property(nonatomic, readonly) cv::Mat CVMat;
#property(nonatomic, readonly) cv::Mat CVGrayscaleMat;
#end
NSImage+OpenCV.mm:
//
// NSImage+OpenCV.mm
//
#import "NSImage+OpenCV.h"
static void ProviderReleaseDataNOP(void *info, const void *data, size_t size)
{
return;
}
#implementation NSImage (NSImage_OpenCV)
-(CGImageRef)CGImage
{
CGContextRef bitmapCtx = CGBitmapContextCreate(NULL/*data - pass NULL to let CG allocate the memory*/,
[self size].width,
[self size].height,
8 /*bitsPerComponent*/,
0 /*bytesPerRow - CG will calculate it for you if it's allocating the data. This might get padded out a bit for better alignment*/,
[[NSColorSpace genericRGBColorSpace] CGColorSpace],
kCGBitmapByteOrder32Host|kCGImageAlphaPremultipliedFirst);
[NSGraphicsContext saveGraphicsState];
[NSGraphicsContext setCurrentContext:[NSGraphicsContext graphicsContextWithGraphicsPort:bitmapCtx flipped:NO]];
[self drawInRect:NSMakeRect(0,0, [self size].width, [self size].height) fromRect:NSZeroRect operation:NSCompositeCopy fraction:1.0];
[NSGraphicsContext restoreGraphicsState];
CGImageRef cgImage = CGBitmapContextCreateImage(bitmapCtx);
CGContextRelease(bitmapCtx);
return cgImage;
}
-(cv::Mat)CVMat
{
CGImageRef imageRef = [self CGImage];
CGColorSpaceRef colorSpace = CGImageGetColorSpace(imageRef);
CGFloat cols = self.size.width;
CGFloat rows = self.size.height;
cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data, // Pointer to backing data
cols, // Width of bitmap
rows, // Height of bitmap
8, // Bits per component
cvMat.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNoneSkipLast |
kCGBitmapByteOrderDefault); // Bitmap info flags
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), imageRef);
CGContextRelease(contextRef);
CGImageRelease(imageRef);
return cvMat;
}
-(cv::Mat)CVGrayscaleMat
{
CGImageRef imageRef = [self CGImage];
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceGray();
CGFloat cols = self.size.width;
CGFloat rows = self.size.height;
cv::Mat cvMat = cv::Mat(rows, cols, CV_8UC1); // 8 bits per component, 1 channel
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data, // Pointer to backing data
cols, // Width of bitmap
rows, // Height of bitmap
8, // Bits per component
cvMat.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNone |
kCGBitmapByteOrderDefault); // Bitmap info flags
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), imageRef);
CGContextRelease(contextRef);
CGColorSpaceRelease(colorSpace);
CGImageRelease(imageRef);
return cvMat;
}
+ (NSImage *)imageWithCVMat:(const cv::Mat&)cvMat
{
return [[[NSImage alloc] initWithCVMat:cvMat] autorelease];
}
- (id)initWithCVMat:(const cv::Mat&)cvMat
{
NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize() * cvMat.total()];
CGColorSpaceRef colorSpace;
if (cvMat.elemSize() == 1)
{
colorSpace = CGColorSpaceCreateDeviceGray();
}
else
{
colorSpace = CGColorSpaceCreateDeviceRGB();
}
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
CGImageRef imageRef = CGImageCreate(cvMat.cols, // Width
cvMat.rows, // Height
8, // Bits per component
8 * cvMat.elemSize(), // Bits per pixel
cvMat.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNone | kCGBitmapByteOrderDefault, // Bitmap info flags
provider, // CGDataProviderRef
NULL, // Decode
false, // Should interpolate
kCGRenderingIntentDefault); // Intent
NSBitmapImageRep *bitmapRep = [[NSBitmapImageRep alloc] initWithCGImage:imageRef];
NSImage *image = [[NSImage alloc] init];
[image addRepresentation:bitmapRep];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace);
return image;
}
#end
Example usage:
Just import it like this:
#import "NSImage+OpenCV.h"
And use it like this:
cv::Mat cvMat_test;
NSImage *image = [NSImage imageNamed:#"test.jpg"];
cvMat_test = [image CVMat];
[myImageView setImage:[NSImage imageWithCVMat:cvMat_test]];
In -(id)initWithCVMat:(const cv::Mat&)cvMat, shouldn't you be adding the representation to self, rather than a new NSImage?
-(id)initWithCVMat:(const cv::Mat *)iMat
{
if(self = [super init]) {
NSData *tData = [NSData dataWithBytes:iMat->data length:iMat->elemSize() * iMat->total()];
CGColorSpaceRef tColorSpace;
if(iMat->elemSize() == 1) {
tColorSpace = CGColorSpaceCreateDeviceGray();
} else {
tColorSpace = CGColorSpaceCreateDeviceRGB();
}
CGDataProviderRef tProvider = CGDataProviderCreateWithCFData((CFDataRef) tData);
CGImageRef tImage = CGImageCreate(
iMat->cols,
iMat->rows,
8,
8 * iMat->elemSize(),
iMat->step[0],
tColorSpace,
kCGImageAlphaNone | kCGBitmapByteOrderDefault,
tProvider,
NULL,
false,
kCGRenderingIntentDefault);
NSBitmapImageRep *tBitmap = [[NSBitmapImageRep alloc] initWithCGImage:tImage];
[self addRepresentation:tBitmap];
[tBitmap release];
CGImageRelease(tImage);
CGDataProviderRelease(tProvider);
CGColorSpaceRelease(tColorSpace);
}
return self;
}