I am developing application for Mac OS , i am not able to find any method for compressing NSImage and Converting them to base64 encoded string for Swift 4 .
Hardly i found Swift3 code
func compressUnderMegaBytes(megabytes: CGFloat) -> NSImage? {
var compressionRatio = 1.0
var imageRep = NSBitmapImageRep(data: self.tiffRepresentation!)
var options = NSDictionary(object: NSNumber(floatLiteral: compressionRatio), forKey: NSBitmapImageRep.PropertyKey.compressionFactor as NSCopying)
var compressedData: Data? = imageRep?.representation(using: .jpeg, properties: options as! [NSBitmapImageRep.PropertyKey : Any])
while compressedData?.length > (megabytes * 1024 * 1024) {
compressionRatio = compressionRatio * 0.9
options = NSDictionary(object: NSNumber(floatLiteral:compressionRatio), forKey: NSBitmapImageRep.PropertyKey.compressionFactor as NSCopying)
compressedData = imageRep?.representation(using:.png, properties: options as! [NSBitmapImageRep.PropertyKey : Any])
if compressionRatio <= 0.4 {
break
}
}
return NSImage(data: compressedData!)
}
but after converting to Swift 4 getting following error
The translated code is quite cumbersome. The NSDictionary detour is not necessary in Swift 4.
length has been renamed to count
extension NSImage {
func compressUnderMegaBytes(megabytes: CGFloat) -> NSImage? {
var compressionRatio = 1.0
guard let tiff = self.tiffRepresentation, let imageRep = NSBitmapImageRep(data: tiff) else { return nil }
var compressedData = imageRep.representation(using: .jpeg, properties: [.compressionFactor : compressionRatio])!
while CGFloat(compressedData.count) > megabytes * 1024 * 1024 {
compressionRatio = compressionRatio * 0.9
compressedData = imageRep.representation(using: .png, properties: [.compressionFactor : compressionRatio])!
if compressionRatio <= 0.4 {
break
}
}
return NSImage(data: compressedData)
}
}
Or without unsafe unwrapped optionals
extension NSImage {
func compressUnderMegaBytes(megabytes: CGFloat) -> NSImage? {
var compressionRatio = 1.0
guard let tiff = self.tiffRepresentation, let imageRep = NSBitmapImageRep(data: tiff) else { return nil }
var compressedData = imageRep.representation(using: .jpeg, properties: [.compressionFactor : compressionRatio])
if compressedData == nil { return self }
while CGFloat(compressedData!.count) > megabytes * 1024 * 1024 {
compressionRatio = compressionRatio * 0.9
let newCompressedData = imageRep.representation(using: .png, properties: [.compressionFactor : compressionRatio])
if compressionRatio <= 0.4 || newCompressedData == nil {
break
}
compressedData = newCompressedData
}
return NSImage(data: compressedData!)
}
}
Note: Is it intended to compress the first time as jpg and then as png?
Related
I'm new to SpriteKit and I'm trying to simulate a population of 1000 moving individuals each being represented by a simple circle. I have an awful frame rate (around 6/7 fps) so I reduced to 100 and reached 35 fps...
My nodes are the simplest in the world, I'm using the entity component system from GameplayKit, but nothing fancy.
So what can I do to improve and reach 60 fps?
First my shape entity, later in the process the color and filling will change for some individuals, but here there's only a simple circle node:
class ShapeComponent : GKComponent {
let node = SKShapeNode(ellipseOf: CGSize(width: 10, height: 10))
override func didAddToEntity() {
node.entity = entity
}
override func willRemoveFromEntity() {
node.entity = nil
}
}
Then my movement component, having just a speed and an angle. I have even stored sine and cosine to limit computation:
class MovementComponent : GKComponent {
var speed : CGFloat = 25
var direction : CGFloat = .random(in: 0...(2 * .pi)) {
didSet { updateSinCos() }
}
var sinDir : CGFloat = 0
var cosDir : CGFloat = 0
override init() {
super.init()
updateSinCos()
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
var shapeComponent: ShapeComponent {
guard let shapeComponent = entity?.component(ofType: ShapeComponent.self)
else { fatalError("A MovementComponent's entity must have a ShapeComponent") }
return shapeComponent
}
func updateSinCos() {
sinDir = sin(direction)
cosDir = cos(direction)
}
override func update(deltaTime: TimeInterval) {
super.update(deltaTime: deltaTime)
// Declare local versions of computed properties so we don't compute them multiple times.
let delta = speed * CGFloat(deltaTime)
let vector = CGVector(dx: delta * sinDir, dy: delta * cosDir)
let node = shapeComponent.node
let currentPosition = node.position
node.position = CGPoint(x: currentPosition.x + vector.dx, y: currentPosition.y + vector.dy)
}
}
And at last my entity, with movement and shape components:
class IndividualEntity : GKEntity {
var shapeComponent: ShapeComponent {
guard let shapeComponent = component(ofType: ShapeComponent.self)
else { fatalError("A IndividualEntity must have a ShapeComponent") }
return shapeComponent
}
var movementComponent: MovementComponent {
guard let movementComponent = component(ofType: MovementComponent.self)
else { fatalError("A IndividualEntity must have a MovementComponent") }
return movementComponent
}
override init() {
super.init()
addComponent(ShapeComponent())
addComponent(MovementComponent())
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
}
In my scene:
func setUpScene() {
for _ in 1...100 {
let position = CGPoint(x: .random(in: 0..<size.width) - 0.5*size.width, y: .random(in: 0..<size.height) - 0.5*size.height)
addIndividual(at: position)
}
}
var componentSystem = GKComponentSystem(componentClass: MovementComponent.self)
var individuals = [IndividualEntity]()
func addIndividual(at pos: CGPoint) {
let individual = IndividualEntity()
individual.shapeComponent.node.position = pos
addChild(individual.shapeComponent.node)
componentSystem.addComponent(foundIn: individual)
individuals.append(individual)
}
var lastUpdateTimeInterval: TimeInterval = 0
let maximumUpdateDeltaTime: TimeInterval = 1.0 / 60.0
override func update(_ currentTime: TimeInterval) {
guard view != nil else { return }
var deltaTime = currentTime - lastUpdateTimeInterval
deltaTime = deltaTime > maximumUpdateDeltaTime ? maximumUpdateDeltaTime : deltaTime
lastUpdateTimeInterval = currentTime
componentSystem.update(deltaTime: deltaTime)
}
So if someone can help me with this problem... I just want those circles to wander, and later respond to collision by changing direction and color.
Thanks in advance.
internal func rangeFromNSRange(_ nsRange: NSRange) -> Range<String.Index>? {
let from16 = utf16.startIndex.advanced(by: nsRange.location)
let to16 = from16.advanced(by: nsRange.length) //advanced(by:) is unavailable
if let from = String.Index(from16, within: self),
let to = String.Index(to16, within: self) {
return from ..< to
}
return nil
}
I have this file in swift 3 and I'm trying to convert it to swift 4 but I get this error and also this error
public func height(_ width: CGFloat, font: UIFont, lineBreakMode: NSLineBreakMode?) -> CGFloat {
var attrib: [String: AnyObject] = [NSAttributedStringKey.font.rawValue: font]
if lineBreakMode != nil {
let paragraphStyle = NSMutableParagraphStyle()
paragraphStyle.lineBreakMode = lineBreakMode!
attrib.updateValue(paragraphStyle, forKey: NSAttributedStringKey.paragraphStyle.rawValue)
}
let size = CGSize(width: width, height: CGFloat(Double.greatestFiniteMagnitude))
return ceil((self as NSString).boundingRect(with: size, options: NSStringDrawingOptions.usesLineFragmentOrigin, attributes:attrib, context: nil).height)
}
// Cannot convert value of type '[String : AnyObject]' to expected argument type '[NSAttributedStringKey : Any]?'
In Swift 4 there is an API to make Range<String.Index> from NSRange and vice versa. The String extension can be reduced to
internal func range(from nsRange: NSRange) -> Range<String.Index>? {
return Range(nsRange, in: self)
}
In Swift 4 the type of string attributes has been changed from String to NSAttributedStringKey. For example NSForegroundColorAttributeName is replaced with NSAttributedStringKey.foregroundColor. You need to change the declaration of attrib to
var attrib: [NSAttributedStringKey: Any] = [.font: font]
and to change the line to add an attribute accordingly.
I am following a tutorial written in Swift 2 for Xcode 7, part 1 of which (you can navigate to part IV, where my issue has come up) is here: http://www.mav3r1ck.io/spritekit-with-swift/
I am using my own sprites in place of those in the tutorial. When I run my code, an error appears on the first line of the following
let spawnRandomHead = SKAction.runBlock(spawnHead)
let waitTime = SKAction.waitForDuration(1.0)
let sequence = SKAction.sequence([spawnRandomHead,waitTime])
runAction(SKAction.repeatActionForever(sequence))
The full code is here:
import SpriteKit
class GameScene: SKScene, SKPhysicsContactDelegate {
enum bitMask: UInt32 {
case defender = 1
case head = 2
case frame = 4
}
let defender = SKSpriteNode(imageNamed: "Ivanovic is a boss")
override func didMoveToView(view: SKView) {
/* Setup your scene here */
backgroundColor = UIColor.blueColor()
defender.position = CGPoint(x: frame.size.width / 2, y:frame.size.height / 2)
defender.physicsBody = SKPhysicsBody(texture: defender.texture!, size: defender.frame.size)
defender.physicsBody?.dynamic = false
defender.physicsBody?.affectedByGravity = false
defender.physicsBody?.allowsRotation = false
defender.physicsBody?.categoryBitMask = bitMask.head.rawValue
defender.physicsBody?.contactTestBitMask = bitMask.head.rawValue
defender.physicsBody?.collisionBitMask = 0
addChild(defender)
let spawnRandomHead = SKAction.runBlock(spawnHead)
let waitTime = SKAction.waitForDuration(1.0)
let sequence = SKAction.sequence([spawnRandomHead,waitTime])
runAction(SKAction.repeatActionForever(sequence))
physicsWorld.contactDelegate = self
physicsWorld.gravity = CGVectorMake(0.0, -0.9)
defender.physicsBody?.contactTestBitMask = bitMask.frame.rawValue
defender.physicsBody?.collisionBitMask = bitMask.frame.rawValue
}
override func touchesBegan(touches: Set<UITouch>, withEvent event: UIEvent?) {
let touch = touches.first! as UITouch
let touchLocation = touch.locationInNode(self)
//print(touchLocation)
let moveTo = SKAction.moveTo(touchLocation, duration: 1.0)
defender.runAction(moveTo)
func randomNumber(min min: CGFloat, max: CGFloat) -> CGFloat {
let random = CGFloat(Float(arc4random()) / 0xFFFFFFFF)
return random * (max - min) + min
}
func spawnHead() {
let head = SKSpriteNode(imageNamed: "The Biter Strikes")
head.position = CGPoint(x: frame.size.width * randomNumber(min: 0, max: 1), y: frame.size.height + head.size.height)
head.physicsBody = SKPhysicsBody(texture: head.texture!, size: head.frame.size)
head.physicsBody?.categoryBitMask = bitMask.head.rawValue
head.physicsBody?.contactTestBitMask = bitMask.defender.rawValue
addChild(head)
}
func didBeginContact(contact: SKPhysicsContact) {
let contactMask = contact.bodyA.categoryBitMask | contact.bodyB.categoryBitMask
switch(contactMask) {
case bitMask.defender.rawValue | bitMask.head.rawValue:
let secondNode = contact.bodyB.node
secondNode?.physicsBody?.allowsRotation = true
let firstNode = contact.bodyA.node
firstNode?.physicsBody?.allowsRotation = true
firstNode?.removeFromParent()
default:
return
}
}
}
}
I have tried cleaning & rebuilding, restarting Xcode, and moving sections of the code around, but the error does not go away. I appreciate your support!
Hmm. Tried both. Now on the second line this
let spawnRandomHead = SKAction.runBlock({ [unowned self] () -> Void in
self.spawnHead()
})
let waitTime = SKAction.waitForDuration(1.0)
let sequence = SKAction.sequence([spawnRandomHead,waitTime])
runAction(SKAction.repeatActionForever(sequence))
a new error pops up saying " Value of type 'GameScene' has no member 'spawnHead' ".
The runBlock requires closure as an argument, so replace
let spawnRandomHead = SKAction.runBlock(spawnHead)
with
let spawnRandomHead = SKAction.runBlock({ [unowned self] () -> Void in
self.spawnHead()
})
or simply
let spawnRandomHead = SKAction.runBlock { [unowned self] in
self.spawnHead()
}
I'm currently making a small app that timelapses the webcam on my mac, saves the captured frame to png, and I am looking into exporting the captured frames as a single video.
I use CGImage to handle the original images and have them set in an array but I'm unsure on there to go from there. I gather from my own research that I have to use AVAssetWriter and AVAssetWriterInput somehow.
I've had a look about on here, read the apple docs and searched google. But all the guides etc, are in obj-c rather than swift which is making it really difficult to understand (As I have no experience in Obj-C).
Any help would be very much appreciated.
Many Thanks,
Luke.
I solved the same problem in Swift. Starting from an array oh UIImage, try this (it's a little long :-) but works):
var choosenPhotos: [UIImage] = [] *** your array of UIImages ***
var outputSize = CGSizeMake(1280, 720)
func build(outputSize outputSize: CGSize) {
let fileManager = NSFileManager.defaultManager()
let urls = fileManager.URLsForDirectory(.DocumentDirectory, inDomains: .UserDomainMask)
guard let documentDirectory: NSURL = urls.first else {
fatalError("documentDir Error")
}
let videoOutputURL = documentDirectory.URLByAppendingPathComponent("OutputVideo.mp4")
if NSFileManager.defaultManager().fileExistsAtPath(videoOutputURL.path!) {
do {
try NSFileManager.defaultManager().removeItemAtPath(videoOutputURL.path!)
} catch {
fatalError("Unable to delete file: \(error) : \(__FUNCTION__).")
}
}
guard let videoWriter = try? AVAssetWriter(URL: videoOutputURL, fileType: AVFileTypeMPEG4) else {
fatalError("AVAssetWriter error")
}
let outputSettings = [AVVideoCodecKey : AVVideoCodecH264, AVVideoWidthKey : NSNumber(float: Float(outputSize.width)), AVVideoHeightKey : NSNumber(float: Float(outputSize.height))]
guard videoWriter.canApplyOutputSettings(outputSettings, forMediaType: AVMediaTypeVideo) else {
fatalError("Negative : Can't apply the Output settings...")
}
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings)
let sourcePixelBufferAttributesDictionary = [kCVPixelBufferPixelFormatTypeKey as String : NSNumber(unsignedInt: kCVPixelFormatType_32ARGB), kCVPixelBufferWidthKey as String: NSNumber(float: Float(outputSize.width)), kCVPixelBufferHeightKey as String: NSNumber(float: Float(outputSize.height))]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
if videoWriter.canAddInput(videoWriterInput) {
videoWriter.addInput(videoWriterInput)
}
if videoWriter.startWriting() {
videoWriter.startSessionAtSourceTime(kCMTimeZero)
assert(pixelBufferAdaptor.pixelBufferPool != nil)
let media_queue = dispatch_queue_create("mediaInputQueue", nil)
videoWriterInput.requestMediaDataWhenReadyOnQueue(media_queue, usingBlock: { () -> Void in
let fps: Int32 = 1
let frameDuration = CMTimeMake(1, fps)
var frameCount: Int64 = 0
var appendSucceeded = true
while (!self.choosenPhotos.isEmpty) {
if (videoWriterInput.readyForMoreMediaData) {
let nextPhoto = self.choosenPhotos.removeAtIndex(0)
let lastFrameTime = CMTimeMake(frameCount, fps)
let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
var pixelBuffer: CVPixelBuffer? = nil
let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer)
if let pixelBuffer = pixelBuffer where status == 0 {
let managedPixelBuffer = pixelBuffer
CVPixelBufferLockBaseAddress(managedPixelBuffer, 0)
let data = CVPixelBufferGetBaseAddress(managedPixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGBitmapContextCreate(data, Int(self.outputSize.width), Int(self.outputSize.height), 8, CVPixelBufferGetBytesPerRow(managedPixelBuffer), rgbColorSpace, CGImageAlphaInfo.PremultipliedFirst.rawValue)
CGContextClearRect(context, CGRectMake(0, 0, CGFloat(self.outputSize.width), CGFloat(self.outputSize.height)))
let horizontalRatio = CGFloat(self.outputSize.width) / nextPhoto.size.width
let verticalRatio = CGFloat(self.outputSize.height) / nextPhoto.size.height
//aspectRatio = max(horizontalRatio, verticalRatio) // ScaleAspectFill
let aspectRatio = min(horizontalRatio, verticalRatio) // ScaleAspectFit
let newSize:CGSize = CGSizeMake(nextPhoto.size.width * aspectRatio, nextPhoto.size.height * aspectRatio)
let x = newSize.width < self.outputSize.width ? (self.outputSize.width - newSize.width) / 2 : 0
let y = newSize.height < self.outputSize.height ? (self.outputSize.height - newSize.height) / 2 : 0
CGContextDrawImage(context, CGRectMake(x, y, newSize.width, newSize.height), nextPhoto.CGImage)
CVPixelBufferUnlockBaseAddress(managedPixelBuffer, 0)
appendSucceeded = pixelBufferAdaptor.appendPixelBuffer(pixelBuffer, withPresentationTime: presentationTime)
} else {
print("Failed to allocate pixel buffer")
appendSucceeded = false
}
}
if !appendSucceeded {
break
}
frameCount++
}
videoWriterInput.markAsFinished()
videoWriter.finishWritingWithCompletionHandler { () -> Void in
print("FINISHED!!!!!")
}
})
}
}
Following is code to generate video from images working in Xcode 11.3.1 and Swift 5.1. This code is generated from the answer of #aleciufs Sep 25 '15 answer. The following function assumes the images are loaded and available var images array [UIImage]
func build(outputSize: CGSize) {
let fileManager = FileManager.default
let urls = fileManager.urls(for: .cachesDirectory, in: .userDomainMask)
guard let documentDirectory = urls.first else {
fatalError("documentDir Error")
}
let videoOutputURL = documentDirectory.appendingPathComponent("OutputVideo.mp4")
if FileManager.default.fileExists(atPath: videoOutputURL.path) {
do {
try FileManager.default.removeItem(atPath: videoOutputURL.path)
} catch {
fatalError("Unable to delete file: \(error) : \(#function).")
}
}
guard let videoWriter = try? AVAssetWriter(outputURL: videoOutputURL, fileType: AVFileType.mp4) else {
fatalError("AVAssetWriter error")
}
let outputSettings = [AVVideoCodecKey : AVVideoCodecType.h264, AVVideoWidthKey : NSNumber(value: Float(outputSize.width)), AVVideoHeightKey : NSNumber(value: Float(outputSize.height))] as [String : Any]
guard videoWriter.canApply(outputSettings: outputSettings, forMediaType: AVMediaType.video) else {
fatalError("Negative : Can't apply the Output settings...")
}
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: outputSettings)
let sourcePixelBufferAttributesDictionary = [
kCVPixelBufferPixelFormatTypeKey as String : NSNumber(value: kCVPixelFormatType_32ARGB),
kCVPixelBufferWidthKey as String: NSNumber(value: Float(outputSize.width)),
kCVPixelBufferHeightKey as String: NSNumber(value: Float(outputSize.height))
]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
if videoWriter.canAdd(videoWriterInput) {
videoWriter.add(videoWriterInput)
}
if videoWriter.startWriting() {
videoWriter.startSession(atSourceTime: CMTime.zero)
assert(pixelBufferAdaptor.pixelBufferPool != nil)
let media_queue = DispatchQueue(__label: "mediaInputQueue", attr: nil)
videoWriterInput.requestMediaDataWhenReady(on: media_queue, using: { () -> Void in
let fps: Int32 = 2
let frameDuration = CMTimeMake(value: 1, timescale: fps)
var frameCount: Int64 = 0
var appendSucceeded = true
while (!self.images.isEmpty) {
if (videoWriterInput.isReadyForMoreMediaData) {
let nextPhoto = self.images.remove(at: 0)
let lastFrameTime = CMTimeMake(value: frameCount, timescale: fps)
let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
var pixelBuffer: CVPixelBuffer? = nil
let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer)
if let pixelBuffer = pixelBuffer, status == 0 {
let managedPixelBuffer = pixelBuffer
CVPixelBufferLockBaseAddress(managedPixelBuffer, [])
let data = CVPixelBufferGetBaseAddress(managedPixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: data, width: Int(outputSize.width), height: Int(outputSize.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(managedPixelBuffer), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue)
context?.clear(CGRect(x: 0, y: 0, width: outputSize.width, height: outputSize.height))
let horizontalRatio = CGFloat(outputSize.width) / nextPhoto.size.width
let verticalRatio = CGFloat(outputSize.height) / nextPhoto.size.height
let aspectRatio = min(horizontalRatio, verticalRatio) // ScaleAspectFit
let newSize = CGSize(width: nextPhoto.size.width * aspectRatio, height: nextPhoto.size.height * aspectRatio)
let x = newSize.width < outputSize.width ? (outputSize.width - newSize.width) / 2 : 0
let y = newSize.height < outputSize.height ? (outputSize.height - newSize.height) / 2 : 0
context?.draw(nextPhoto.cgImage!, in: CGRect(x: x, y: y, width: newSize.width, height: newSize.height))
CVPixelBufferUnlockBaseAddress(managedPixelBuffer, [])
appendSucceeded = pixelBufferAdaptor.append(pixelBuffer, withPresentationTime: presentationTime)
} else {
print("Failed to allocate pixel buffer")
appendSucceeded = false
}
}
if !appendSucceeded {
break
}
frameCount += 1
}
videoWriterInput.markAsFinished()
videoWriter.finishWriting { () -> Void in
print("FINISHED!!!!!")
saveVideoToLibrary(videoURL: videoOutputURL)
}
})
}
}
The extra function I provide is:
func saveVideoToLibrary(videoURL: URL) {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: videoURL)
}) { saved, error in
if let error = error {
print("Error saving video to librayr: \(error.localizedDescription)")
}
if saved {
print("Video save to library")
}
}
}
let verbList: [String] = ["hacer", "ser", "estar"]
let POVList: [String] = ["él / usted","ella / usted","ellas / ustedes","ellos / ustedes","tú","yo","nosotros",]
let correctConjugation: [[String]] = [["hace","hace","hacen","hacen","haces","hago","hacemos"], ["es","es","son","son","eres","soy","somos"], ["está","está","estan","estan","estas","estoy","estamos"]]
func randomVerb() -> Int { //creates and returns a random number for the prefix arrray
var randomVerb = Int(arc4random_uniform(3))
return randomVerb
}
func randomPrefix() -> Int { //creates and returns a random number for the verb array
var randomPrefix = Int(arc4random_uniform(7))
return randomPrefix
}
#IBAction func changeVerb(sender: AnyObject) {
Verb.text = verbList[randomVerb()]
POV.text = POVList[randomPrefix()]
userResponse.backgroundColor = UIColor.whiteColor()
userResponse.text = ""
}
#IBAction func checkResponse(sender: AnyObject) {
var userResponseA: String
userResponseA = userResponse.text
if (userResponseA == correctConjugation[randomVerb()[randomPrefix()]]){
userResponse.backgroundColor = UIColor.greenColor()
} else {
userResponse.backgroundColor = UIColor.redColor()
}
}
So I get two errors here (in the if statement in checkResponse): first, "int does not have a member named 'subscript'" and if I just take out the call for the function in the if statement I get: "'String' is not convertible to 'Mirror Disposition'"
I really have no idea why this is not working. Bear with me, as I am an Xcode noob just trying to get a better grade in spanish.
Very close - just need to have your subscripts separated:
if (userResponseA == correctConjugation[randomVerb()][randomPrefix()]) {
// ...
}
When working with an array of arrays (in this case correctConjugation), each subscript takes you one level down.
For the other issue, you want a couple variables to hold the current verb and prefix indexes:
class VC: UIViewController {
// list declarations here
var verbIndex = 0
var povIndex = 0
#IBAction func changeVerb(sender: AnyObject) {
verbIndex = randomVerb()
povIndex = randomPrefix()
Verb.text = verbList[verbIndex]
POV.text = POVList[povIndex]
userResponse.backgroundColor = UIColor.whiteColor()
userResponse.text = ""
}
#IBAction func checkResponse(sender: AnyObject) {
var userResponseA = userResponse.text
if (userResponseA == correctConjugation[verbIndex][povIndex]){
userResponse.backgroundColor = UIColor.greenColor()
} else {
userResponse.backgroundColor = UIColor.redColor()
}
}
}