-(void)drawRect : (NSRect)rect
{
imgRect.orgin = NSZeroPoint;
imgRect.size = [appleImage size];
drawRect = [self bounds];
[appleRect drawInRect:appleRect
fromRect:imgRect
operation:NSCompositeSourceOver
fraction:1.0];
...
}
I'm trying this but it's not easy and driving me crazy.
How to change NSRect type code to CGRect type code ?
plz help!
ps. i already know.. NSRect -> CGRect , NSZeroPoint -> CGRectZero
Just use these 2 Cocoa (no iOS) functions:
CGRect NSRectToCGRect(NSRect nsrect);
NSRect NSRectFromCGRect(CGRect cgrect);
Have a look here to deepen the topic: http://cocoadev.com/CGRect
If you encounter this in iOS, such as with UIKeyboardFrameEndUserInfoKey, it's important to know that NSRect is a subclass of NSValue.
if let rectValue = notification.userInfo?[UIKeyboardFrameEndUserInfoKey] as? NSValue {
let rect = rectValue.CGRectValue()
// Use your CGRect
}
Assuming you're using Apple's Foundation and not GNUStep or something else odd...
Swift
In Swift, NSRect is just a typealias of CGRect (NSGeometry.swift line 449-ish), so you can simply use either in place of the other anywhere.
let nsTest1 = NSRect(x: 1, y: 2, width: 3, height: 4)
let cgTest1 = CGRect(x: 1, y: 2, width: 3, height: 4)
let cgTest2 = CGRect(x: 100, y: 200, width: 300, height: 400)
print(nsTest1 == cgTest1) // true
func takeCgRect(_ rect: CGRect) {
print("A CoreGraphics rectangle at \(rect.origin) with size \(rect.size)")
}
func takeNsRect(_ rect: NSRect) {
print("A NeXTStep rectangle at \(rect.origin) with size \(rect.size)")
}
takeCgRect(nsTest1) // A core graphics rectangle at (1.0, 2.0) with size (3.0, 4.0)
takeNsRect(cgTest2) // A NeXTStep rectangle at (100.0, 200.0) with size (300.0, 400.0)
Objective-C
In Objective-C, NSRect is just a typedef of CGRect (NSGeometry.h line 26-ish), so you can simply use either in place of the other anywhere.
void takeCgRect(CGRect rect) {
NSLog(#"A CoreGraphics rectangle at %# with size %#", NSStringFromCGPoint(rect.origin), NSStringFromCGSize(rect.size))
}
void takeNsRect(NSRect rect) {
NSLog(#"A NeXTStep rectangle at %# with size %#", NSStringFromPoint(rect.origin), NSStringFromSize(rect.size))
}
// ELSEWHERE:
NSRect nsTest1 = NSMakeRect(1, 2, 3, 4)
CGRect cgTest1 = CGRectMake(1, 2, 3, 4)
CGRect cgTest2 = CGRectMake(100, 200, 300, 400)
NSLog(#"%#", #(nsTest1 == cgTest1)) // 1
takeCgRect(nsTest1) // A core graphics rectangle at {1.0, 2.0} with size {3.0, 4.0}
takeNsRect(cgTest2) // A NeXTStep rectangle at {100.0, 200.0} with size {300.0, 400.0}
SWIFT 5
It works for me. Use 'if' before let.
let keyboardFrame = (userInfo[UIResponder.keyboardFrameEndUserInfoKey] as? NSValue)?.cgRectValue
print("\(String(describing: keyboardFrame?.height))")
Related
There does not seem to be any standard AppKit control for creating a tab bar similar to the ones found in Xcode.
Any idea on whether this is possible or would I need to use a custom control of some sort, and if so any suggestions on whether any are available.
OK I figured it out by subclassing NSSegementedControl to get the desired behaviours.
import Cocoa
class OSSegmentedCell: NSSegmentedCell {
override func draw(withFrame cellFrame: NSRect, in controlView: NSView) {
// Do not call super to prevent the border from being draw
//super.draw(withFrame: cellFrame, in: controlView)
// Call this to ensure the overridden drawSegment() method gets called for each segment
super.drawInterior(withFrame: cellFrame, in: controlView)
}
override func drawSegment(_ segment: Int, inFrame frame: NSRect, with controlView: NSView) {
// Resize the view to leave a small gap
let d:CGFloat = 0.0
let width = frame.height - 2.0*d
let dx = (frame.width - width) / 2.0
let newRect = NSRect(x: frame.minX+dx, y: frame.minY, width: width, height: width)
if let image = self.image(forSegment: segment) {
if self.selectedSegment == segment {
let tintedImage = image.tintedImageWithColor(color: NSColor.blue)
tintedImage.draw(in: newRect)
} else {
image.draw(in: newRect)
}
}
}
}
extension NSImage {
func tintedImageWithColor(color:NSColor) -> NSImage {
let size = self.size
let imageBounds = NSMakeRect(0, 0, size.width, size.height)
let copiedImage = self.copy() as! NSImage
copiedImage.lockFocus()
color.set()
__NSRectFillUsingOperation(imageBounds, NSCompositingOperation.sourceIn)
copiedImage.unlockFocus()
return copiedImage
}
}
I have an NSImage object, I have an CIDetector object that detects QR codes on that image. After it detects, I wants to trim that image so it only has the QR code in it. This is how I've got the bounds of the QR code:
NSArray *features = [myQRDetector featureInImage:myCIImage];
CIQRCodeFeature *qrFeature = features[0];
CGRect qrBounds = qrFeature.bounds;
Now how can I trim the image so it only contains the area described by qrBounds variable.
In Swift 5
func trim(image: NSImage, rect: CGRect) -> NSImage {
let result = NSImage(size: rect.size)
result.lockFocus()
let destRect = CGRect(origin: .zero, size: result.size)
image.draw(in: destRect, from: rect, operation: .copy, fraction: 1.0)
result.unlockFocus()
return result
}
The answer from onmyway133 is great, but it doesn't preserve the datatype of the source image. For instance, if your source is an .hdr image, each color channel will be floats, but the cropped image will be an 8-bit integer RGBA image.
For preserving the format of the source, it seems you have to go down to the associated CGImage. I do this:
extension NSImage {
func cropping(to rect: CGRect) -> NSImage {
var imageRect = CGRect(x: 0, y: 0, width: size.width, height: size.height)
guard let imageRef = self.cgImage(forProposedRect: &imageRect, context: nil, hints: nil) else {
return NSImage(size: rect.size)
}
guard let crop = imageRef.cropping(to: rect) else {
return NSImage(size: rect.size)
}
return NSImage(cgImage: crop, size: NSZeroSize)
}
}
You need to create a new NSImage and draw the part of the original image you want to it.
NSImage* newImage = [[NSImage alloc] initWithSize:NSSizeFromCGSize(qrBounds.size)];
[newImage lockFocus];
NSRect dest = { NSZeroPoint, newImage.size };
[origImage drawInRect:dest fromRect:NSRectFromCGRect(qrBounds) operation:NSCompositeCopy fraction:1];
[newImage unlockFocus];
Trying to override drawPlaceholderInRect in Swift2.1 so that I change the text color and I am having a hard time converting the following Obj-C to Swift:
(void)drawPlaceholderInRect:(CGRect)rect
{
UIColor *colour = [UIColor whiteColor];
if ([self.placeholder respondsToSelector:#selector(drawInRect:withAttributes:)]) {
NSDictionary *attributes = #{NSForegroundColorAttributeName: colour, NSFontAttributeName: self.font};
CGRect boundingRect = [self.placeholder boundingRectWithSize:rect.size options:0 attributes:attributes context:nil];
[self.placeholder drawAtPoint:CGPointMake(0, (rect.size.height/2)-boundingRect.size.height/2) withAttributes:attributes];
}
I got this:
public override func drawPlaceholderInRect(rect: CGRect) {
let textDict: NSDictionary = [NSForegroundColorAttributeName: UIColor.whiteColor()]
...
..
.
}
Can someone help out please?
func drawPlaceholder(in rect: CGRect) {
let colour: UIColor? = UIColor.white
if placeholder.responds(to: Selector("drawInRect:withAttributes:")) {
let attributes = [NSForegroundColorAttributeName: colour, NSFontAttributeName: font]
let boundingRect: CGRect = placeholder.boundingRect(with: rect.size, options: [], attributes: attributes, context: nil)
placeholder.draw(at: CGPoint(x: 0, y: (rect.size.height / 2) - boundingRect.size.height / 2), withAttributes: attributes)
}
}
See if this is helpful:
https://developer.apple.com/library/ios/documentation/Swift/Conceptual/BuildingCocoaApps/index.html#//apple_ref/doc/uid/TP40014216-CH2-ID0
Also, if you have trouble translating Objective-C to Swift, which can be tricky sometimes, consider overriding the method in a derived Objective-C class and then using that class in Swift.
I have two instances of NSImage. I want image2 to be placed on top of image1, with a certain level of opacity. They are of matching dimensions. Neither of the images need to be visible in the UI.
How can I correctly set up a graphics context and draw images to it, one with full opacity and another semi-transparent?
I have been reading several answers here but I find it complicated, especially since most seem to be for either Objective-C or only apply to iOS. Any pointers are appriciated. If this can be accomplished without needing a CGContext at all that would be even better.
func blendImages(image1: NSImage, image2: NSImage, alpha: CGFloat) -> CGImage {
// Create context
var ctx: CGContextRef = CGBitmapContextCreate(0, inputImage.size.width, inputImage.size.height, 8, inputImage.size.width*4, NSColorSpace.genericRGBColorSpace(), PremultipliedLast)
let area = CGRectMake(0, 0, inputImage.size.width, inputImage.size.height)
CGContextScaleCTM(ctx, 1, -1)
// Draw image1 in context
// Draw image2 with alpha opacity
CGContextSetAlpha(ctx, CGFloat(0.5))
// Create CGImage from context
let outputImage = CGBitmapContextCreateImage(ctx)
return outputImage
}
Elsewhere I have this extension to get CGImages from my NSImages:
extension NSImage {
var CGImage: CGImageRef {
get {
let imageData = self.TIFFRepresentation
let source = CGImageSourceCreateWithData(imageData as! CFDataRef, nil)
let maskRef = CGImageSourceCreateImageAtIndex(source, 0, nil)
return maskRef
}
}
}
I managed to solve this with NSGraphicsContext.
func mergeImagesAB(pathA: String, pathB: String, fraction: CGFloat) -> CGImage {
guard let imgA = NSImage(byReferencingFile: pathA),
let imgB = NSImage(byReferencingFile: pathB),
let bitmap = imgA.representations[0] as? NSBitmapImageRep,
let ctx = NSGraphicsContext(bitmapImageRep: bitmap)
else {fatalError("Failed to load images.")}
let rect = NSRect(origin: CGPoint(x: 0, y: 0), size: bitmap.size)
NSGraphicsContext.saveGraphicsState()
NSGraphicsContext.setCurrentContext(ctx)
imgB.drawInRect(rect, fromRect: rect, operation: .CompositeSourceOver, fraction: fraction)
NSGraphicsContext.restoreGraphicsState()
guard let result = bitmap.CGImage
else {fatalError("Failed to create image.")}
return result;
}
Thanks Henrik, I needed this for Obj-C. Here's that version in case someone also needs:
-(CGImageRef) mergeImage:(NSImage*)a andB:(NSImage*)b fraction:(float)fraction{
NSBitmapImageRep *bitmap = (NSBitmapImageRep*)[[a representations] objectAtIndex:0];
NSGraphicsContext *ctx = [NSGraphicsContext graphicsContextWithBitmapImageRep:bitmap];
[NSGraphicsContext saveGraphicsState];
[NSGraphicsContext setCurrentContext:ctx];
CGRect rect = CGRectMake(0, 0, bitmap.size.width, bitmap.size.height);
[NSGraphicsContext saveGraphicsState];
[NSGraphicsContext setCurrentContext:ctx];
[b drawInRect:rect fromRect:rect operation:NSCompositeSourceOver fraction:fraction];
[NSGraphicsContext restoreGraphicsState];
return [bitmap CGImage];
}
How can I scale down the images used in a UISegmentedControl? I am creating the segmented control programmatically:
UISegmentedControl * segmentButton;
segmentButton = [UISegmentedControl segmentedControlWithItems:
[NSArray arrayWithObjects:
[UIImage imageNamed:#"option_one.png"],
[UIImage imageNamed:#"option_two.png"],
nil]];
segmentButton.contentMode = UIViewContentModeScaleToFill;
segmentButton.frame = CGRectMake(10, 10, 200, 32);
[view addSubview:segmentButton];
The result is not what I expect. The original .png images are about 100 pixels high, and they are not scaled down to fit the 32-pixel height of the segmented control. This results in a segmented control being drawn with enormous images overlapping it:
How can I tell the control to scale down those images?
You should never use a "big" image to display only a small picto. The full image will be loaded in memory, and only 10% of its pixels will be displayed, so you will use a lot of memory for nothing.
What you can do if you really want to use this resource is create a thumbnail with code before, and use this new generated thumbnail.
The following method returns a new image you can use in your UISegmentedControl, and you can release the big one.
- (UIImage *)imageWithImage:(UIImage *)image scaledToSize:(CGSize)newSize {
UIGraphicsBeginImageContext(newSize);
[image drawInRect:CGRectMake(0, 0, newSize.width, newSize.height)];
UIImage *newImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return newImage;
}
With your code:
UISegmentedControl * segmentButton;
segmentButton = [UISegmentedControl segmentedControlWithItems: [NSArray arrayWithObjects:
[self imageWithImage:[UIImage imageNamed:#"option_one.png"] scaledToSize:CGSizeMake(32, 32)],
[self imageWithImage:[UIImage imageNamed:#"option_two.png"] scaledToSize:CGSizeMake(32, 32)],
nil]];
segmentButton.contentMode = UIViewContentModeScaleToFill;
segmentButton.frame = CGRectMake(10, 10, 200, 32);
[view addSubview:segmentButton];
In swift3,
extension UIImage {
func scaleImage(scaleToSize: CGSize) -> UIImage {
UIGraphicsBeginImageContext(scaleToSize)
self.draw(in: CGRect(origin: CGPoint(x: 0, y: 0), size: scaleToSize))
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage!
}
}
For iOS 14 and later
UIGraphicsGetImageFromCurrentImageContext make some image not that clear, so I change to UIGraphicsImageRenderer, it works fine to me.
extension UIImage {
func resizedImage(to targetSize: CGSize) -> UIImage? {
let render = UIGraphicsImageRenderer(size: targetSize)
return render.image { ctx in
self.draw(in: .init(origin: .zero, size: targetSize))
}
}
}
class SomeView: UIView {
func initSegementedControl() {
exampleSegmenedControl = UISegmentedControl(items:[
leftImage.resizedImage(to: .init(width: 15, height: 15))!,
rightImage.resizedImage(to: .init(width: 15, height: 15))!
])
}
}