Convert SwiftUI View to UIImage on iOS 14+ - view

I can convert any SwiftUI View to a high resolution UIImage, using the code below. It works great... until... I try to use an image size larger than CGSize(width: 2730, height: 2730).
If I increase the image size to CGSize(width: 2731, height: 2731) or larger, the line:
self.drawHierarchy(in: self.layer.bounds, afterScreenUpdates: true)
in "extension UIView", can no longer draw the UIImage.
Any idea on why there is a size limitation?
One Note: I can overcome the size limitation by uncommenting the 2 lines in the "extension View" and replacing:
self.drawHierarchy(in: self.layer.bounds, afterScreenUpdates: true)
With:
layer.render(in: context.cgContext)
in the "extension UIView"... But THEN "layer.render" will not render image effects such as "blur", SceneKit subviews, or metal. So using "self.drawHierarchy" is a must.
// 1: Set breakpoint on line: print("done") to inspect the high res image
// 2: Run, then tap the image on screen to inspect the highresImage
// 3: repeat after changing the size to CGSize = CGSize(width: 2731, height: 2731)
import SwiftUI
struct ContentView: View {
#State var blurRadius: CGFloat = 4.0
let imageSize: CGSize = CGSize(width: 2730, height: 2730)
var body: some View {
testView
.frame(width: 300, height: 300)
.onTapGesture {
// Adjust blur radius based on high res image scale
blurRadius *= imageSize.width * 0.5/300
// Capture high res image of swiftUI view
let highresImage = testView.asImage(size: imageSize)
// set breakpoint here to inspect the high res image size, quality, etc.
print("done")
// reset blur radius back to 4
blurRadius = 4
}
}
var testView: some View {
ZStack {
Color.blue
Circle()
.fill(Color.red)
}
.blur(radius: blurRadius)
}
}
extension UIView {
func asImage() -> UIImage {
let format = UIGraphicsImageRendererFormat()
format.scale = 1
return UIGraphicsImageRenderer(size: self.layer.frame.size, format: format).image { context in
self.drawHierarchy(in: self.layer.bounds, afterScreenUpdates: true)
//layer.render(in: context.cgContext)
}
}
}
extension View {
func asImage(size: CGSize) -> UIImage {
let controller = UIHostingController(rootView: self)
controller.view.bounds = CGRect(origin: .zero, size: size)
//UIApplication.shared.windows.first!.rootViewController?.view.addSubview(controller.view)
let image = controller.view.asImage()
//controller.view.removeFromSuperview()
return image
}
}

I can't figure out why, but it works for me, after changing the y-coordinate of the view's origin to anything non zero. This may be a bug in UIHostingController.
If you use a very small Int, you can't see the difference, e.g.:
controller.view.bounds = CGRect(origin: CGPoint(x: 0, y: 0.0001), size: size)

Related

SwiftUI exporting the content of Canvas

Does anyone know how to export the content of a Canvas into an Image?
With SwiftUI, it is possible to generate an Image from a View with an extension
func snapshot() -> UIImage {
let controller = UIHostingController(rootView: self)
let view = controller.view
let targetSize = controller.view.intrinsicContentSize
view?.bounds = CGRect(origin: .zero, size: targetSize)
view?.backgroundColor = .clear
let renderer = UIGraphicsImageRenderer(size: targetSize)
return renderer.image { _ in
view?.drawHierarchy(in: controller.view.bounds, afterScreenUpdates: true)
}
}
This works great for simple views like Button, but for Canvas it always generates an empty image.
For example, with the following code, the image generated by the button is fine, but the one of the Canvas is always empty.
import SwiftUI
extension View {
func snapshot() -> UIImage {
let controller = UIHostingController(rootView: self)
let view = controller.view
let targetSize = controller.view.intrinsicContentSize
view?.bounds = CGRect(origin: .zero, size: targetSize)
view?.backgroundColor = .clear
let renderer = UIGraphicsImageRenderer(size: targetSize)
return renderer.image { _ in
view?.drawHierarchy(in: controller.view.bounds, afterScreenUpdates: true)
}
}
}
struct ContentView: View {
var textView: some View {
Text("Hello, SwiftUI")
.padding()
.background(Color.green)
.foregroundColor(.white)
.clipShape(Capsule())
}
var canvas: some View {
Canvas { context, size in
var path = Path()
path.move(to: CGPoint(x: 0, y:0))
path.addLine(to: CGPoint(x: size.width/2, y:0))
path.addLine(to: CGPoint(x: size.width/2, y:size.height/2))
path.addLine(to: CGPoint(x: 0, y:size.height/2))
path.closeSubpath()
context.fill(path, with: .color(.blue))
}
}
var body: some View {
VStack {
textView
canvas
Button("Save to image: Canvas") {
if let view = canvas as? Canvas<EmptyView> {
let image = view.snapshot()
UIImageWriteToSavedPhotosAlbum(image, nil, nil, nil)
}
}
Button("Save to image: Text") {
if let view = textView as? Text {
let image = view.snapshot()
UIImageWriteToSavedPhotosAlbum(image, nil, nil, nil)
}
}
}
}
}
Apply a frame to the canvas and it should work. E.g.
canvas.frame(width: 300, height: 300)
Answer is here: Swift UI exporting content of canvas. Apply the frame in the line where you get the snapshot, i.e.
let newImage = canvasView.frame(width: 300, height: 300).snapshot()

How to merge 3 images(100x100px) to one new large (300x100px) in Swift

iam very new to swiftUI and dont found a way.
I want to create one image (300x100) dyncmicaly by merging three single 100x100 images horizontaly
ImageA(100x100) + ImageB(100x100) + ImageC(100x100) = ImageD(300x100)
Found a way to show them in a HStack but how can is get one new image to send the Date to new function
Regards
Alex
thanks a lot, I tried to use your function but got an error here "Cannot convert value of type 'UIImage?' to expected element type 'UIImage'!
Code should drat 3x zero. in the number-0.png ist just one lagre zero
//
// ContentView.swift
// imagecombine
//
// Created by Alex on 02.08.20.
// Copyright © 2020 Alex. All rights reserved.
//
import SwiftUI
func combineHorizontally(_ images: [UIImage]) -> UIImage? {
guard !images.isEmpty else { return nil }
var size = CGSize.zero
var scale = CGFloat.zero
for image in images {
scale = max(scale, image.scale)
size = CGSize(width: size.width + image.size.width, height: max(size.height, image.size.height))
}
var position = CGPoint.zero
let format = UIGraphicsImageRendererFormat()
format.scale = scale
return UIGraphicsImageRenderer(size: size, format: format).image { context in
for image in images {
image.draw(at: position)
position.x += image.size.width
}
}
}
let redImage = UIImage(named: "number-0.png")
let greenImage = UIImage(named: "number-0.png")
let blueImage = UIImage(named: "number-0.png")
let image = combineHorizontally([redImage, greenImage, blueImage])
struct ContentView: View {
var body: some View {
Image(image)
}
}
struct ContentView_Previews: PreviewProvider {
static var previews: some View {
ContentView()
}
}
You can use UIImageGraphicsRenderer and draw the images one after another:
func combineHorizontally(_ images: [UIImage]) -> UIImage? {
guard !images.isEmpty else { return nil }
var size = CGSize.zero
var scale = CGFloat.zero
for image in images {
scale = max(scale, image.scale)
size = CGSize(width: size.width + image.size.width, height: max(size.height, image.size.height))
}
var position = CGPoint.zero
let format = UIGraphicsImageRendererFormat()
format.scale = scale
return UIGraphicsImageRenderer(size: size, format: format).image { _ in
for image in images {
image.draw(at: position)
position.x += image.size.width
}
}
}
You said that there were only images and they were 100 × 100, but the above should work regardless of the number and size (memory permitting, of course).
Anyway, this
let image = combineHorizontally([redImage, greenImage, blueImage])
Results in:
To use that in a context where you don’t want an optional, you can use ! forced unwrapping operator, ?? nil coalescing operator, or some other unwrapping pattern, e.g. guard let, if let, etc.
Alternatively, if you don’t want to deal with optionals at all, you can write a rendition that doesn’t return an optional at all (but also doesn’t detect the error scenario where an empty array was provided):
func combineHorizontally(_ images: [UIImage]) -> UIImage {
var size = CGSize.zero
var scale: CGFloat = 1
for image in images {
scale = max(scale, image.scale)
size = CGSize(width: size.width + image.size.width, height: max(size.height, image.size.height))
}
var position = CGPoint.zero
let format = UIGraphicsImageRendererFormat()
format.scale = scale
return UIGraphicsImageRenderer(size: size, format: format).image { _ in
for image in images {
image.draw(at: position)
position.x += image.size.width
}
}
}

Reset offset, onTapGesture works, but onRotated does not work, why?

I want the view containing 2 rectangles floating in the bottom of the screen, whatever the orientation is portrait or landscape.
code is a test, when orientationDidChangeNotification happened, I Found UIDevice.current.orientation.isPortrait and UIScreen.main.bounds.height often have wrong value, why?
Anyway, test code is just reset offset = 0 in onRotated(). but it doesn't work; otherwise onTapGesture works fine.
Q1: Is it a wrong way for SwiftUI? SceneDelegate.orientationDidChangeNotification -> contentView.onRotated()?
Q2: why do UIDevice.current.orientation.isPortrait and UIScreen.main.bounds.height often have wrong value?
Q3: How to let a view float at the bottom of screen in both portrait and landscape?
let height: CGFloat = 100
struct TestView: View {
#State var offset = (UIScreen.main.bounds.height - height) / 2
var body: some View {
ZStack {
Text("+")
VStack(spacing: 0) {
Rectangle().fill(Color.blue)
Rectangle().fill(Color.red)
}
.frame(width: 100, height: height)
.offset(y: offset)
.onTapGesture {
self.offset = 0
}
}
}
func onRotated() {
// let isPortrait = UIDevice.current.orientation.isPortrait
offset = 0//(UIScreen.main.bounds.height - height) / 2
// print("\(isPortrait), screen height = \(UIScreen.main.bounds.height)")
}
}
class SceneDelegate: UIResponder, UIWindowSceneDelegate {
var window: UIWindow?
func scene(_ scene: UIScene, willConnectTo session: UISceneSession, options connectionOptions: UIScene.ConnectionOptions) {
let contentView = TestView()
if let windowScene = scene as? UIWindowScene {
NotificationCenter.default.addObserver(forName: UIDevice.orientationDidChangeNotification, object: nil, queue: nil) { notification in
contentView.onRotated()
}
let window = UIWindow(windowScene: windowScene)
window.rootViewController = UIHostingController(rootView: contentView)
self.window = window
window.makeKeyAndVisible()
}
}
...
}
contentView is not a reference, it is a value, so you call .onRotated on own copy of contentView value that lives only within callback
let contentView = TestView()
if let windowScene = scene as? UIWindowScene {
NotificationCenter.default.addObserver(forName: UIDevice.orientationDidChangeNotification, object: nil, queue: nil) { notification in
contentView.onRotated() // local copy!!!
}
instead create listener for notification publisher inside TestView, so it can change self internally.
Moreover, it is not clear the intention but SwiftUI gives possibility to track size classes via EnvironmentValues.horizontalSizeClass and EnvironmentValues.verticalSizeClass which are automatically changed on device orientation, so it is possible to make your view layout depending on those environment values even w/o notification.
See here good example on how to use size classes

How do I render a SwiftUI View that is not at the root hierarchy as a UIImage?

Suppose I have a simple SwiftUI View that is not the ContentView such as this:
struct Test: View {
var body: some View {
VStack {
Text("Test 1")
Text("Test 2")
}
}
}
How can I render this view as a UIImage?
I've looked into solutions such as :
extension UIView {
func asImage() -> UIImage {
let renderer = UIGraphicsImageRenderer(bounds: bounds)
return renderer.image { rendererContext in
layer.render(in: rendererContext.cgContext)
}
}
}
But it seems that solutions like that only work on UIView, not a SwiftUI View.
Here is the approach that works for me, as I needed to get image exactly sized as it is when placed alongside others. Hope it would be helpful for some else.
Demo: above divider is SwiftUI rendered, below is image (in border to show size)
Update: re-tested with Xcode 13.4 / iOS 15.5
Test module in project is here
extension View {
func asImage() -> UIImage {
let controller = UIHostingController(rootView: self)
// locate far out of screen
controller.view.frame = CGRect(x: 0, y: CGFloat(Int.max), width: 1, height: 1)
let size = controller.sizeThatFits(in: UIScreen.main.bounds.size)
controller.view.bounds = CGRect(origin: .zero, size: size)
controller.view.sizeToFit()
UIApplication.shared.windows.first?.rootViewController?.view.addSubview(controller.view)
let image = controller.view.asImage()
controller.view.removeFromSuperview()
return image
}
}
extension UIView {
func asImage() -> UIImage {
let renderer = UIGraphicsImageRenderer(bounds: bounds)
return renderer.image { rendererContext in
// [!!] Uncomment to clip resulting image
// rendererContext.cgContext.addPath(
// UIBezierPath(roundedRect: bounds, cornerRadius: 20).cgPath)
// rendererContext.cgContext.clip()
// As commented by #MaxIsom below in some cases might be needed
// to make this asynchronously, so uncomment below DispatchQueue
// if you'd same met crash
// DispatchQueue.main.async {
layer.render(in: rendererContext.cgContext)
// }
}
}
}
// TESTING
struct TestableView: View {
var body: some View {
VStack {
Text("Test 1")
Text("Test 2")
}
}
}
struct TestBackgroundRendering: View {
var body: some View {
VStack {
TestableView()
Divider()
Image(uiImage: render())
.border(Color.black)
}
}
private func render() -> UIImage {
TestableView().asImage()
}
}
Solution of Asperi works, but if you need image without white background you have to add this line:
controller.view.backgroundColor = .clear
And your View extension will be:
extension View {
func asImage() -> UIImage {
let controller = UIHostingController(rootView: self)
// locate far out of screen
controller.view.frame = CGRect(x: 0, y: CGFloat(Int.max), width: 1, height: 1)
UIApplication.shared.windows.first!.rootViewController?.view.addSubview(controller.view)
let size = controller.sizeThatFits(in: UIScreen.main.bounds.size)
controller.view.bounds = CGRect(origin: .zero, size: size)
controller.view.sizeToFit()
controller.view.backgroundColor = .clear
let image = controller.view.asImage()
controller.view.removeFromSuperview()
return image
}
}

iOS Swift how to incrementally draw to a UIImage?

How can I use iOS core graphics to incrementally draw a large data set in a single image?
I have code which is ready to process the entire dataset at once (over 100,000 rectangles) and produces a single image. This is a very long running operation and I want this dataset to be incrementally drawn 1000 rectangles at a time, displaying these small image updates (like images downloaded from internet in the 90s)
My questions are: Would I keep the reference to the same context throughout the operation and simply add elements to it? - OR - Should I be capturing the current image using UIGraphicsGetImageFromCurrentImageContext() , then drawing it in a new context and drawing additional rectangles on top of it?
Bonus question - is this the right approach if I want to use multiple threads to append to the same image?
let context = UIGraphicsGetCurrentContext()!
context.setStrokeColor(borderColor)
context.setLineWidth(CGFloat(borderWidth))
for elementIndex in 0 ..< data.count {
context.setFillColor(color.cgColor)
let marker = CGRect(x: toX(elementIndex),
y: toY(elementIndex),
width: rectWidth,
height: rectHeight)
context.addRect(marker)
context.drawPath(using: .fillStroke)
}
// Save the context as a new UIImage
let myImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
if let cgImage = myImage?.cgImage,
let orientation = myImage?.imageOrientation {
return UIImage(cgImage: cgImage, scale: 2, orientation: orientation)
}
You should:
Dispatch the whole thing to some background queue;
periodically call UIGraphicsGetImageFromCurrentImageContext and dispatch the image view update to the main queue
E.g., this will update the image view every ¼ second:
DispatchQueue.global().async {
var lastDrawn = CACurrentMediaTime()
UIGraphicsBeginImageContextWithOptions(size, false, 0)
for _ in 0 ..< 100_000 {
// draw whatever you want
let now = CACurrentMediaTime()
if now - lastDrawn > 0.25 {
self.updateImageView()
lastDrawn = now
}
}
self.updateImageView()
UIGraphicsEndImageContext()
}
Where:
func updateImageView() {
guard let image = UIGraphicsGetImageFromCurrentImageContext() else { return }
DispatchQueue.main.async {
self.imageView.image = image
}
}
Thus:
func buildImage(of size: CGSize) {
DispatchQueue.global().async {
var lastDrawn = CACurrentMediaTime()
UIGraphicsBeginImageContextWithOptions(size, false, 0)
for _ in 0 ..< 100_000 {
self.someColor().setFill()
UIBezierPath(rect: self.someRectangle(in: size)).fill()
let now = CACurrentMediaTime()
if now - lastDrawn > 0.25 {
self.updateImageView()
lastDrawn = now
}
}
self.updateImageView()
UIGraphicsEndImageContext()
}
}
func updateImageView() {
let image = UIGraphicsGetImageFromCurrentImageContext()
DispatchQueue.main.async {
self.imageView.image = image
}
}
func someRectangle(in size: CGSize) -> CGRect {
let x = CGFloat.random(in: 0...size.width)
let y = CGFloat.random(in: 0...size.height)
let width = CGFloat.random(in: 0...(size.width - x))
let height = CGFloat.random(in: 0...(size.height - y))
return CGRect(x: x, y: y, width: width, height: height)
}
func someColor() -> UIColor {
return UIColor(red: .random(in: 0...1),
green: .random(in: 0...1),
blue: .random(in: 0...1),
alpha: 1)
}
Yields:
Now, I’m not calling CoreGraphics directly, but you can and it will work the same.

Resources