Applying a gradient effect on an SKSpriteNode in Swift 5.0 - filter

iOS 14, Swift 5.x
Trying to add a gradient to a spriteNode using SKEffect. Code compiles, but then crashes, am I attempting the impossible here.
let image2U = UIImage(named: "2140983")?.ciImage
let effectsNode = SKEffectNode()
let filter = CIFilter(name: "CILinearGradient")
let startColor = UIColor.red
let endColor = UIColor.yellow
let startVector = CIVector(cgPoint: CGPoint(x: 0, y: 0))
let endVector = CIVector(cgPoint: CGPoint(x: box.size.width, y: box.size.height))
filter?.setDefaults()
filter?.setValue(startVector, forKey: "inputPoint0")
filter?.setValue(endVector, forKey: "inputPoint1")
filter?.setValue(startColor, forKey: "inputColor0")
filter?.setValue(endColor, forKey: "inputColor1")
filter?.setValue(image2U, forKey: "inputImage")
effectsNode.filter = filter
self.addChild(effectsNode)
effectsNode.addChild(box)
Compiles, but then crashes with this message ...
2021-07-09 21:08:47.584142+0200 GameIV[19791:1140737] *** Terminating app due to uncaught exception 'NSUnknownKeyException', reason: '[<CILinearGradient 0x600002070d20> setValue:forUndefinedKey:]: this class is not key value coding-compliant for the key InputImage.'
And as you can see I added an inputImage? Tried a UIImage, same error... tried no image at all, same error?

Probably you have a typo in the key name. Maybe "inputImage" instead of "InputImage". Usually you'd use one of the constants to avoid these. There's a partial list at https://developer.apple.com/documentation/coreimage/cifilter/filter_parameter_keys

The answer to my question is that SKEffectNodes need an input image, that is what it is trying to tell me here. And the gradient filter doesn't need/work with an image, it simply creates a new image. This is CIFilter code to do just that.
extension UIImage {
func returnCheckerboard() -> UIImage {
let context = CIContext(options: nil)
let checkerFilter = CIFilter.checkerboardGenerator()
checkerFilter.color0 = .white
checkerFilter.color1 = .black
checkerFilter.center = CGPoint(x: 0, y: 0)
checkerFilter.sharpness = 1
checkerFilter.width = 8
guard let outputImage = checkerFilter.outputImage else { return UIImage() }
if let cgimg = context.createCGImage(outputImage, from: CGRect(x: 0, y: 0, width: 128, height: 128)) {
let filteredImage = UIImage(cgImage: cgimg)
return filteredImage
}
return UIImage()
}
}
This doesn't create a gradient, but the principle is the same. It creates a checkerboard, doesn't require an input image and couldn't/isn't a CIFilter you can use with SKEffectNode.

Related

Cropping NSImage with onscreen coordinates incorrect on different screen sizes

I'm trying to replicate macOS's screenshot functionality, dragging a selection onscreen to provide coordinates for cropping an image. I have it working fine on my desktop Mac (2560x1600), but testing on my laptop (2016 rMBP 15", 2880x1800), the cropped image is completely wrong. I don't understand why I'd get the right results on my desktop, but not on my laptop. I think it has something to do with the Quarts coordinates being different from Cocoa coordinates, seeing as how on the laptop, the resulting image seems like the coordinates are flipped on the Y-axis.
Here is the code I am using to generate the cropping CGRect:
# Segment used to draw the CAShapeLayer:
private func handleDragging(_ event: NSEvent) {
let mouseLoc = event.locationInWindow
if let point = self.startPoint,
let layer = self.shapeLayer {
let path = CGMutablePath()
path.move(to: point)
path.addLine(to: NSPoint(x: self.startPoint.x, y: mouseLoc.y))
path.addLine(to: mouseLoc)
path.addLine(to: NSPoint(x: mouseLoc.x, y: self.startPoint.y))
path.closeSubpath()
layer.path = path
self.selectionRect = path.boundingBox
}
}
private func startDragging(_ event: NSEvent) {
if let window = self.window,
let contentView = window.contentView,
let layer = contentView.layer,
!self.isDragging {
self.isDragging = true
self.startPoint = window.mouseLocationOutsideOfEventStream
shapeLayer = CAShapeLayer()
shapeLayer.lineWidth = 1.0
shapeLayer.fillColor = NSColor.white.withAlphaComponent(0.5).cgColor
shapeLayer.strokeColor = NSColor.systemGray.cgColor
layer.addSublayer(shapeLayer)
}
}
Then this is the code where I actually generate the screenshot and crop using the CGRect:
public func processResults(_ rect: CGRect) {
if let windowID = self.globalWindow?.windowNumber,
let screen = self.getScreenWithMouse(), rect.width > 5 && rect.height > 5 {
self.delegate?.processingResults()
let cgScreenshot = CGWindowListCreateImage(screen.frame, .optionOnScreenBelowWindow, CGWindowID(windowID), .bestResolution)
var rect2 = rect
rect2.origin.y = NSMaxY(self.getScreenWithMouse()!.frame) - NSMaxY(rect);
if let croppedCGScreenshot = cgScreenshot?.cropping(to: rect2) {
let rep = NSBitmapImageRep(cgImage: croppedCGScreenshot)
let image = NSImage()
image.addRepresentation(rep)
self.showPreviewWindow(image: image)
let requests = [self.getTextRecognitionRequest()]
let imageRequestHandler = VNImageRequestHandler(cgImage: croppedCGScreenshot, orientation: .up, options: [:])
DispatchQueue.global(qos: .userInitiated).async {
do {
try imageRequestHandler.perform(requests)
} catch let error {
print("Error: \(error)")
}
}
DispatchQueue.main.asyncAfter(deadline: .now() + 5.0) {
self.hidePreviewWindow()
}
}
}
self.globalWindow = nil
}
Not 15 minutes after I asked this question, I tried one more thing and it works!
Relevant snippet:
var correctedRect = rect
// Set the Y origin properly (counteracting the flipped Y-axis)
correctedRect.origin.y = screen.frame.height - rect.origin.y - rect.height;
// Checks if we're on another screen
if (screen.frame.origin.y < 0) {
correctedRect.origin.y = correctedRect.origin.y - screen.frame.origin.y
}
// Finally, correct the x origin (if we're on another screen, the origin will be larger than zero)
correctedRect.origin.x = correctedRect.origin.x + screen.frame.origin.x
// Generate the screenshot inside the requested rect
let cgScreenshot = CGWindowListCreateImage(correctedRect, .optionOnScreenBelowWindow, CGWindowID(windowID), .bestResolution)

NSImageView (added programmatically) doesn't show image, but shows color

Swift 4. Very simple project, all I did - just added a NSImageView programmatically, backgroundColor and NSImage from the .jpg file. I see the good pink color, but can't see the image at all! I tried many different approaches and some was successful (Image showed up well in collection view and if NSImageView was added manually in the story board) but I need in simple programmatically method. Here is all of my code:
class ViewController: NSViewController {
var image: NSImage = NSImage()
var ivTest = NSImageView()
override func viewDidLoad() {
super.viewDidLoad()
self.view.addSubview(self.ivTest)
self.ivTest.wantsLayer = true
self.ivTest.layer?.backgroundColor = NSColor.systemPink.cgColor
self.ivTest.layer?.frame = NSRect(x: 0, y: 0, width: 100, height: 100)
let manager = FileManager.default
var url = manager.urls(for: .documentDirectory, in: .userDomainMask).first
url = url?.appendingPathComponent("night.jpg")
image = NSImage(byReferencing: url!)
if (image.isValid == true){
print("valid")
print("image size \(image.size.width):\(image.size.height)")
self.ivTest.image = image
} else {
print("not valid")
}
}
override var representedObject: Any? {
didSet {
// Update the view, if already loaded.
}
}
}
output:
result:
thank so much...
--- edited ---
Yes, thank You! Just added this and saw image:
self.ivTest.frame = NSRect(x: 0, y: 0, width: 100, height: 100)

macOS Swift CALayer.contents does not load image

The code below creates a red rectangle that is animated to move across the view from left to right. I would like to have an arbitrary shape loaded from an image to either superimpose or replace the rectangle. However, the circleLayer.contents = NSImage statement in the initializeCircleLayer function doesn't produce any effect. The diagnostic print statement seems to verify that the image exists and has been found, but no image appears in the view. How do I get an image into the layer to replace the animated red rectangle? Thanks!
CODE BELOW:
import Cocoa
class ViewController: NSViewController {
var circleLayer = CALayer()
override func viewDidLoad() {
super.viewDidLoad()
self.view.wantsLayer = true
initializeCircleLayer()
simpleCAAnimationDemo()
}
func initializeCircleLayer(){
circleLayer.bounds = CGRect(x: 0, y: 0, width: 150, height: 150)
circleLayer.position = CGPoint(x: 50, y: 150)
circleLayer.backgroundColor = NSColor.red.cgColor
circleLayer.cornerRadius = 10.0
let testIm = NSImage(named: NSImage.Name(rawValue: "testImage"))
print("testIm = \(String(describing: testIm))")
circleLayer.contents = NSImage(named: NSImage.Name(rawValue: "testImage"))?.cgImage
circleLayer.contentsGravity = kCAGravityCenter
self.view.layer?.addSublayer(circleLayer)
}
func simpleCAAnimationDemo(){
circleLayer.removeAllAnimations()
let animation = CABasicAnimation(keyPath: "position")
let startingPoint = NSValue(point: NSPoint(x: 50, y: 150))
let endingPoint = NSValue(point: NSPoint(x: 600, y: 150))
animation.fromValue = startingPoint
animation.toValue = endingPoint
animation.repeatCount = Float.greatestFiniteMagnitude
animation.duration = 10.0
circleLayer.add(animation, forKey: "linearMovement")
}
}
Why it doesn't work
The reason why
circleLayer.contents = NSImage(named: NSImage.Name(rawValue: "testImage"))?.cgImage
doesn't work is because it's a reference to the cgImage(forProposedRect:context:hints:) method, meaning that its type is
((UnsafeMutablePointer<NSRect>?, NSGraphicsContext?, [NSImageRep.HintKey : Any]?) -> CGImage?)?
You can see this by assigning NSImage(named: NSImage.Name(rawValue: "testImage"))?.cgImage to a local variable and ⌥-clicking it to see its type.
The compiler allows this assignment because circleLayer.contents is an Any? property, so literally anything can be assigned to it.
How to fix it
As of macOS 10.6, you can assign NSImage objects to a layers contents directly:
circleLayer.contents = NSImage(named: NSImage.Name(rawValue: "testImage"))

Capturing MTKView texture to UIImageView efficiently

I want to capture the contents of the portion of an MTKView that has most recently updated into an UIImageView. I the following piece of code to accomplish this task:
let cicontext = CIContext(mtlDevice: self.device!) // set up once along with rest of renderPipeline
...
let lastSubStrokeCIImage = CIImage(mtlTexture: lastDrawableDisplayed.texture, options: nil)!.oriented(CGImagePropertyOrientation.downMirrored)
let bboxChunkSubCurvesScaledAndYFlipped = CGRect(...) // get bounding box of region just drawn
let imageCropCG = cicontext.createCGImage(lastSubStrokeCIImage, from: bboxStrokeAccumulatingScaledAndYFlipped)
// Now that we have a CGImage of just the right size, we have to do the following expensive operations before assigning to a UIIView
UIGraphicsBeginImageContextWithOptions(bboxStrokeAccumulating.size, false, 0) // open a bboxKeyframe-sized context
UIGraphicsGetCurrentContext()?.translateBy(x: 0, y: bboxStrokeAccumulating.height)
UIGraphicsGetCurrentContext()?.scaleBy(x: 1.0, y: -1.0)
UIGraphicsGetCurrentContext()?.draw(imageCropCG!, in: CGRect(x: 0, y: 0 , width: bboxStrokeAccumulating.width, height: bboxStrokeAccumulating.height))
// Okay, finally we create a CALayer to be a container for what we've just drawn
let layerStroke = CALayer()
layerStroke.frame = bboxStrokeAccumulating
layerStroke.contents = UIGraphicsGetImageFromCurrentImageContext()?.cgImage
UIGraphicsEndImageContext()
strokeView.layer.sublayers = nil // empty out strokeView
strokeView.layer.addSublayer(layerStroke) // add our hard-earned drawing in its latest state
So, this code works, but is not very efficient and makes the app lag when bboxStrokeAccumulating gets very large. Can anyone suggest more efficient alternatives?
For swift 4.0,
let lastDrawableDisplayed = metalView?.currentDrawable?.texture
if let imageRef = lastDrawableDisplayed?.toImage() {
let uiImage:UIImage = UIImage.init(cgImage: imageRef)
}
extension MTLTexture {
func bytes() -> UnsafeMutableRawPointer {
let width = self.width
let height = self.height
let rowBytes = self.width * 4
let p = malloc(width * height * 4)
self.getBytes(p!, bytesPerRow: rowBytes, from: MTLRegionMake2D(0, 0, width, height), mipmapLevel: 0)
return p!
}
func toImage() -> CGImage? {
let p = bytes()
let pColorSpace = CGColorSpaceCreateDeviceRGB()
let rawBitmapInfo = CGImageAlphaInfo.noneSkipFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue
let bitmapInfo:CGBitmapInfo = CGBitmapInfo(rawValue: rawBitmapInfo)
let selftureSize = self.width * self.height * 4
let rowBytes = self.width * 4
let releaseMaskImagePixelData: CGDataProviderReleaseDataCallback = { (info: UnsafeMutableRawPointer?, data: UnsafeRawPointer, size: Int) -> () in
return
}
let provider = CGDataProvider(dataInfo: nil, data: p, size: selftureSize, releaseData: releaseMaskImagePixelData)
let cgImageRef = CGImage(width: self.width, height: self.height, bitsPerComponent: 8, bitsPerPixel: 32, bytesPerRow: rowBytes, space: pColorSpace, bitmapInfo: bitmapInfo, provider: provider!, decode: nil, shouldInterpolate: true, intent: CGColorRenderingIntent.defaultIntent)!
return cgImageRef
}
}
you can set the uiImage to your uiImageView

Swift 3 Colour Space macOS not IOS

How can I convert an RGB image into its grayscaled colour space? I can find a lot of code for iOS but non for macOS.. And the Apple's documentations are all in objective C....
let width = image.size.width
let height = image.size.height
let imageRect = NSMakeRect(0, 0, width, height);
let colorSpace = CGColorSpaceCreateDeviceGray();
let bits = image.representations.first as! NSBitmapImageRep;
bitmap!.representationUsingType(NSBitmapImageFileType.NSPNGFileType, properties: nil)
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.none.rawValue);
let context = CGContext(data: nil, width: Int(width), height: Int(height), bitsPerComponent: 8, bytesPerRow: 0, space: colorSpace, bitmapInfo: bitmapInfo.rawValue);
context.draw(image.cgImage!, in : imageRect);// and this line is wrong obviously..
This is what I have got so far..just copy and pasting from the internet.. but I have no idea on how to go further...
I have found an interesting way to do this.. My code are simply copied from the three sources below.
how to create grayscale image from nsimage in swift?
Greyscale Image using COCOA and NSImage
Changing the Color Space of NSImage: The second reply
My Code:
func saveImage(image:NSImage, destination:URL) throws{
let rep = greyScale(image);
var data = rep.representation(using: NSJPEGFileType, properties: [:]);
try data?.write(to: destination);
}
// rgb2gray
func greyScale(image: NSImage) -> NSBitmapImageRep{
let w = image.size.width
let h = image.size.height
let imageRect : NSRect! = NSMakeRect(0,0, w, h);
let colourSpace : ColourSpace! = CGColorSpaceCreateDeviceGray();
let context : CGContext! = CGContext(data: nil, width: Int(w),
height: Int(h), bitsPerComponent: 8,
bytesPerRow: 0, space: colorSpace,
bitmapInfo: CGImageAlphaInfo.none.rawValue);
context.draw(nsImageToCGImage(image: image), in: imageRect);
let greyImage : CGImage! = context.makeImage();
return NSBitmapImageRep(cgImage: greyImage);
}
func nsImageToCGImage(image: NSImage) -> CGImage{
if let imageData = image.tiffRepresentation as NSData! {
let imageSource : CGImageSource! = CGImageSourceCreateWithData(imageData,
nil);
let image = CGImageSourceCreateImageAtIndex(imageSource, 0, nil);
return image;
}
return nil;
}
I am still trying to understand the principle behind.
You can try CIFilter. The annoyance is that you have to convert back and forth between NSImage and CIImage:
import Cocoa
import CoreImage
let url = Bundle.main.url(forResource: "image", withExtension: "jpg")!
let image = CIImage(contentsOf: url)!
let bwFilter = CIFilter(name: "CIColorControls", withInputParameters: ["inputImage": image, "inputSaturation": 0.0])!
if let ciImage = bwFilter.outputImage {
let rep = NSCIImageRep(ciImage: ciImage)
let nsImage = NSImage(size: rep.size)
nsImage.addRepresentation(rep)
// nsImage is now your black-and-white image
}

Resources