subclassing TTTableMessageItemCell, anything wrong? - three20

I subclassing TTTableMessageItemCell, got EXC_BAD_ACCESS runtime error. Anythign wrong?
CustomTTTableSubtitleItemCell.h
#import "Three20/Three20.h"
#interface CustomTTTableSubtitleItemCell : TTTableMessageItemCell {
TTButton *_rightButton;
}
#end
CustomTTTableSubtitleItemCell.m
#import "CustomTTTableSubtitleItemCell.h"
#import "CustomTTTableSubtitleItem.h"
#import "XYDefaultStyleSheet.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
static CGFloat kHPadding = 10;
static CGFloat kVPadding = 15;
#interface ButtonStyleSheet : TTDefaultStyleSheet
#end
#implementation ButtonStyleSheet
- (TTStyle*)blueToolbarButton:(UIControlState)state {
TTShape* shape = [TTRoundedRectangleShape shapeWithRadius:4.5];
UIColor* tintColor = RGBCOLOR(30, 110, 255);
return [TTSTYLESHEET toolbarButtonForState:state shape:shape tintColor:tintColor font:nil];
}
#end
#implementation CustomTTTableSubtitleItemCell
+ (CGFloat)tableView:(UITableView*)tableView rowHeightForItem:(id)item {
CustomTTTableSubtitleItem* captionedItem = item;
CGFloat maxWidth = tableView.width - kHPadding*2;
CGSize titleSize = [captionedItem.title sizeWithFont:TTSTYLEVAR(myTitleFont)
constrainedToSize:CGSizeMake(maxWidth, CGFLOAT_MAX)
lineBreakMode:UILineBreakModeWordWrap];
CGSize textSize = [captionedItem.text sizeWithFont:TTSTYLEVAR(myHeadingFont)
constrainedToSize:CGSizeMake(maxWidth, CGFLOAT_MAX)
lineBreakMode:UILineBreakModeWordWrap];
CGSize subtextSize = [captionedItem.caption sizeWithFont:TTSTYLEVAR(mySubtextFont)
constrainedToSize:CGSizeMake(maxWidth, CGFLOAT_MAX) lineBreakMode:UILineBreakModeWordWrap];
return kVPadding*2 + titleSize.height + textSize.height + subtextSize.height + kVPadding;
}
- (id)initWithStyle:(UITableViewCellStyle)style reuseIdentifier:(NSString*)identifier {
if (self = [super initWithStyle:UITableViewCellStyleValue2 reuseIdentifier:identifier]) {
_item = nil;
[TTStyleSheet setGlobalStyleSheet:[[[ButtonStyleSheet alloc] init] autorelease]];
}
return self;
}
- (void)layoutSubviews {
[super layoutSubviews];
[self.detailTextLabel sizeToFit];
self.detailTextLabel.top = kVPadding;
self.textLabel.height = self.detailTextLabel.height;
//_rightButton.frame = CGRectMake(20, self.detailTextLabel.bottom + kVPadding, kImageWidth, kImageHeight);
//_rightButton.alpha = !self.showingDeleteConfirmation;
[_rightButton sizeToFit];
_rightButton.left = self.contentView.width - (_timestampLabel.width + kHPadding);
_rightButton.top = self.height/2;
}
- (id)object {
return _item;
}
- (void)setObject:(id)object {
if (_item != object) {
[super setObject:object];
CustomTTTableSubtitleItem* item = object;
//self.textLabel.textColor = TTSTYLEVAR(myHeadingColor);
// self.textLabel.font = TTSTYLEVAR(myHeadingFont);
// self.textLabel.textAlignment = UITextAlignmentRight;
// self.textLabel.contentMode = UIViewContentModeCenter;
// self.textLabel.lineBreakMode = UILineBreakModeWordWrap;
// self.textLabel.numberOfLines = 0;
//
// self.detailTextLabel.textColor = TTSTYLEVAR(mySubtextColor);
// self.detailTextLabel.font = TTSTYLEVAR(mySubtextFont);
// self.detailTextLabel.textAlignment = UITextAlignmentLeft;
// self.detailTextLabel.contentMode = UIViewContentModeTop;
// self.detailTextLabel.lineBreakMode = UILineBreakModeWordWrap;
_rightButton = [TTButton
buttonWithStyle:#"blueToolbarButton:" title:item.rightButtonTitle];
}
}
- (void)dealloc {
TT_RELEASE_SAFELY(_rightButton);
[super dealloc];
}
#end

You're creating the TTButton using autorelease, while you're releasing it in your dealloc function. So both the release pool and the dealloc are trying to release your _rightButton TTButton.
in your header file, try adding:
#property (nonatomic, readonly, retain) TTButton* rightButton;
And then create the TTButton using his get function in your source file:
///////////////////////////////////////////////////////////////////////////////////////////////////
- (TTButton*)rightButton {
if (!_rightButton) {
_rightButton = [[TTButton
buttonWithStyle:#"blueToolbarButton:" title:item.rightButtonTitle] retain];
[self.contentView addSubview:rightButton];
}
return rightButton;
}
When using the rightButton, make sure to use self.rightBotton and not _rightButton, such as in the layout function (Because you need to object to be created).
self.rightButton.frame = CGRectMake(20, self.detailTextLabel.bottom + kVPadding, kImageWidth, kImageHeight);
I suggest opening the Three20UI/TTTableMessageItemCell.h & source file and trying to copy the behavior of one of the elements. That's what I did.

Related

Metal Framework on macOS

I am creating a simple Texture display that essentially renders the Video frames in BGRA format through Metal display. I follow the same steps as told in Metal WWDC session. But I have problems in creating the render encoder. My code is
id <MTLDevice> device = MTLCreateSystemDefaultDevice();
id<MTLCommandQueue> commandQueue = [device newCommandQueue];
id<MTLLibrary> library = [device newDefaultLibrary];
// Create Render Command Descriptor.
MTLRenderPipelineDescriptor* renderPipelineDesc = [MTLRenderPipelineDescriptor new];
renderPipelineDesc.colorAttachments[0].pixelFormat = MTLPixelFormatBGRA8Unorm;
renderPipelineDesc.vertexFunction = [library newFunctionWithName:#"basic_vertex"];
renderPipelineDesc.fragmentFunction = [library newFunctionWithName:#"basic_fragment"];
NSError* error = nil;
id<MTLRenderPipelineState> renderPipelineState = [device newRenderPipelineStateWithDescriptor:renderPipelineDesc
error:&error];
id<MTLCommandBuffer> commandBuffer = [commandQueue commandBuffer];
MTLRenderPassDescriptor* renderPassDesc = [MTLRenderPassDescriptor renderPassDescriptor];
id<CAMetalDrawable> drawable = [_metalLayer nextDrawable];
MTLRenderPassColorAttachmentDescriptor* colorAttachmentDesc = [MTLRenderPassColorAttachmentDescriptor new];
colorAttachmentDesc.texture = drawable.texture;
colorAttachmentDesc.loadAction = MTLLoadActionLoad;
colorAttachmentDesc.storeAction = MTLStoreActionStore;
colorAttachmentDesc.clearColor = MTLClearColorMake(0, 0, 0, 1);
[renderPassDesc.colorAttachments setObject:colorAttachmentDesc atIndexedSubscript:0];
[inTexture replaceRegion:region
mipmapLevel:0
withBytes:imageBytes
bytesPerRow:CVPixelBufferGetBytesPerRow(_image)];
id<MTLRenderCommandEncoder> renderCmdEncoder = [commandBuffer renderCommandEncoderWithDescriptor:renderPassDesc];
[renderCmdEncoder setRenderPipelineState:_renderPipelineState];
[renderCmdEncoder endEncoding];
This code crashes in the line saying "No Render Targets Found"
id renderCmdEncoder = [commandBuffer renderCommandEncoderWithDescriptor:renderPassDesc];
I am not able to figure out where and how to set the render target.
This will work perfectly; if you need help implementing it, let me know:
#import UIKit;
#import AVFoundation;
#import CoreMedia;
#import <MetalKit/MetalKit.h>
#import <Metal/Metal.h>
#import <MetalPerformanceShaders/MetalPerformanceShaders.h>
#interface ViewController : UIViewController <MTKViewDelegate, AVCaptureVideoDataOutputSampleBufferDelegate> {
NSString *_displayName;
NSString *serviceType;
}
#property (retain, nonatomic) SessionContainer *session;
#property (retain, nonatomic) AVCaptureSession *avSession;
#end;
#import "ViewController.h"
#interface ViewController () {
MTKView *_metalView;
id<MTLDevice> _device;
id<MTLCommandQueue> _commandQueue;
id<MTLTexture> _texture;
CVMetalTextureCacheRef _textureCache;
}
#property (strong, nonatomic) AVCaptureDevice *videoDevice;
#property (nonatomic) dispatch_queue_t sessionQueue;
#end
#implementation ViewController
- (void)viewDidLoad {
NSLog(#"%s", __PRETTY_FUNCTION__);
[super viewDidLoad];
_device = MTLCreateSystemDefaultDevice();
_metalView = [[MTKView alloc] initWithFrame:self.view.bounds];
[_metalView setContentMode:UIViewContentModeScaleAspectFit];
_metalView.device = _device;
_metalView.delegate = self;
_metalView.clearColor = MTLClearColorMake(1, 1, 1, 1);
_metalView.colorPixelFormat = MTLPixelFormatBGRA8Unorm;
_metalView.framebufferOnly = NO;
_metalView.autoResizeDrawable = NO;
CVMetalTextureCacheCreate(NULL, NULL, _device, NULL, &_textureCache);
[self.view addSubview:_metalView];
self.sessionQueue = dispatch_queue_create( "session queue", DISPATCH_QUEUE_SERIAL );
if ([self setupCamera]) {
[_avSession startRunning];
}
}
- (BOOL)setupCamera {
NSLog(#"%s", __PRETTY_FUNCTION__);
#try {
NSError * error;
_avSession = [[AVCaptureSession alloc] init];
[_avSession beginConfiguration];
[_avSession setSessionPreset:AVCaptureSessionPreset640x480];
// get list of devices; connect to front-facing camera
self.videoDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
if (self.videoDevice == nil) return FALSE;
AVCaptureDeviceInput *input = [AVCaptureDeviceInput deviceInputWithDevice:self.videoDevice error:&error];
[_avSession addInput:input];
dispatch_queue_t sampleBufferQueue = dispatch_queue_create("CameraMulticaster", DISPATCH_QUEUE_SERIAL);
AVCaptureVideoDataOutput * dataOutput = [[AVCaptureVideoDataOutput alloc] init];
[dataOutput setAlwaysDiscardsLateVideoFrames:YES];
[dataOutput setVideoSettings:#{(id)kCVPixelBufferPixelFormatTypeKey: #(kCVPixelFormatType_32BGRA)}];
[dataOutput setSampleBufferDelegate:self queue:sampleBufferQueue];
[_avSession addOutput:dataOutput];
[_avSession commitConfiguration];
} #catch (NSException *exception) {
NSLog(#"%s - %#", __PRETTY_FUNCTION__, exception.description);
return FALSE;
} #finally {
return TRUE;
}
}
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
{
size_t width = CVPixelBufferGetWidth(pixelBuffer);
size_t height = CVPixelBufferGetHeight(pixelBuffer);
CVMetalTextureRef texture = NULL;
CVReturn status = CVMetalTextureCacheCreateTextureFromImage(kCFAllocatorDefault, _textureCache, pixelBuffer, NULL, MTLPixelFormatBGRA8Unorm, width, height, 0, &texture);
if(status == kCVReturnSuccess)
{
_metalView.drawableSize = CGSizeMake(width, height);
_texture = CVMetalTextureGetTexture(texture);
_commandQueue = [_device newCommandQueue];
CFRelease(texture);
}
}
}
- (void)drawInMTKView:(MTKView *)view {
// creating command encoder
if (_texture) {
id<MTLCommandBuffer> commandBuffer = [_commandQueue commandBuffer];
id<MTLTexture> drawingTexture = view.currentDrawable.texture;
// set up and encode the filter
MPSImageGaussianBlur *filter = [[MPSImageGaussianBlur alloc] initWithDevice:_device sigma:5];
[filter encodeToCommandBuffer:commandBuffer sourceTexture:_texture destinationTexture:drawingTexture];
// committing the drawing
[commandBuffer presentDrawable:view.currentDrawable];
[commandBuffer commit];
_texture = nil;
}
}
- (void)mtkView:(MTKView *)view drawableSizeWillChange:(CGSize)size {
}
#end
you should try one of following points
1.Instead of creating new render pass descriptor,use current render pass descriptor object from MTKView object.this render pass descriptor already will be configured.you need not set anything.try the sample code given below-
if let currentPassDesc = view.currentRenderPassDescriptor,
let currentDrawable = view.currentDrawable
{
let renderCommandEncoder =
commandBuffer.makeRenderCommandEncoder(descriptor: currentPassDesc)
renderCommandEncoder.setRenderPipelineState(renderPipeline)
//set vertex buffers and call draw apis
.......
.......
commandBuffer.present(currentDrawable)
}
2.you are creating a new render pass descriptor and then setting its color attachment by the texture of drawable object so instead of doing this you should create a new texture object and then set usage of this texture as render target.then you will get content rendered in your new texture but it will be not displayed on screen so to get displayed the content of your textue you have to copy the content of your texture in drawable texture and then present drawable.
below is the code of making render target -
renderPassDescriptor.colorAttachments[0].clearColor =
MTLClearColor(red:
0.0,green: 0.0,blue: 0.0,alpha: 1.0)
renderPassDescriptor.colorAttachments[0].loadAction = .clear
renderPassDescriptor.colorAttachments[0].storeAction = .store
renderPassDescriptor.depthAttachment.clearDepth = 1.0
renderPassDescriptor.depthAttachment.loadAction = .clear
renderPassDescriptor.depthAttachment.storeAction = .dontCare
let view = self.view as!MTKView
let textDesc = MTLTextureDescriptor.texture2DDescriptor(pixelFormat:
.bgra8Unorm, width: Int(view.frame.width),
height: Int(view.frame.height), mipmapped: false)
textDesc.depth = 1
//see below line
textDesc.usage =
[MTLTextureUsage.renderTarget,MTLTextureUsage.shaderRead]
textDesc.storageMode = .private
mainPassFrameBuffer = device.makeTexture(descriptor: textDesc)
renderPassDescriptor.colorAttachments[0].texture = mainPassFrameBuffer

What would be the condition in a while loop using the FaceRecognizer

How do I make an while loop with FaceRecognizer so that while a face is recognized, a command will happen? I am not sure what the condition would be. What variable do I use and what to I equate it to?
Here is my code
#import "ViewController.h"
NSString* const faceCascadeFilename = #"haarcascade_frontalface_alt2";
const int HaarOptions = CV_HAAR_FIND_BIGGEST_OBJECT | CV_HAAR_DO_ROUGH_SEARCH;
#interface ViewController ()
#end
#implementation ViewController
#synthesize videoCamera;
- (void)viewDidLoad
{
[super viewDidLoad];
// Do any additional setup after loading the view, typically from a nib.
self.videoCamera = [[CvVideoCamera alloc] initWithParentView:imageView];
self.videoCamera.defaultAVCaptureDevicePosition = AVCaptureDevicePositionFront;
self.videoCamera.defaultAVCaptureSessionPreset = AVCaptureSessionPreset352x288;
self.videoCamera.defaultAVCaptureVideoOrientation = AVCaptureVideoOrientationPortrait;
self.videoCamera.defaultFPS = 30;
self.videoCamera.grayscaleMode = NO;
self.videoCamera.delegate = self;
NSString* faceCascadePath = [[NSBundle mainBundle] pathForResource:faceCascadeFilename ofType:#"xml"];
faceCascade.load([faceCascadePath UTF8String]);
Label1.hidden=YES;
}
- (void)didReceiveMemoryWarning
{
[super didReceiveMemoryWarning];
// Dispose of any resources that can be recreated.
}
#pragma mark - Protocol CvVideoCameraDelegate
#ifdef __cplusplus
- (void)processImage:(Mat&)image;
{
Mat grayscaleFrame;
cvtColor(image, grayscaleFrame, CV_BGR2GRAY);
equalizeHist(grayscaleFrame, grayscaleFrame);
std::vector<cv::Rect> faces;
faceCascade.detectMultiScale(grayscaleFrame, faces, 1.1, 2, HaarOptions, cv::Size(60, 60));
for (int i = 0; i < faces.size(); i++)
{
cv::Point pt1(faces[i].x + faces[i].width, faces[i].y + faces[i].height);
cv::Point pt2(faces[i].x, faces[i].y);
cv::rectangle(image, pt1, pt2, cvScalar(0, 255, 0, 0), 1, 8 ,0);
}
}
//#endif
#pragma mark - UI Actions
- (IBAction)startCamera:(id)sender
{
[self.videoCamera start];
imageView.hidden=YES;
while (FaceRecognizer...) {
Label1.hidden=NO;
}
}

How to make an if statement with FaceRecognizer

How do I make an if statement with FaceRecognizer so that if a face is recognized, a command will happen?
I am not sure what the condition would be. What variable do I use and what to I equate it to?
Here is my code
#import "ViewController.h"
NSString* const faceCascadeFilename = #"haarcascade_frontalface_alt2";
const int HaarOptions = CV_HAAR_FIND_BIGGEST_OBJECT | CV_HAAR_DO_ROUGH_SEARCH;
#interface ViewController ()
#end
#implementation ViewController
#synthesize videoCamera;
- (void)viewDidLoad
{
[super viewDidLoad];
// Do any additional setup after loading the view, typically from a nib.
self.videoCamera = [[CvVideoCamera alloc] initWithParentView:imageView];
self.videoCamera.defaultAVCaptureDevicePosition = AVCaptureDevicePositionFront;
self.videoCamera.defaultAVCaptureSessionPreset = AVCaptureSessionPreset352x288;
self.videoCamera.defaultAVCaptureVideoOrientation = AVCaptureVideoOrientationPortrait;
self.videoCamera.defaultFPS = 30;
self.videoCamera.grayscaleMode = NO;
self.videoCamera.delegate = self;
NSString* faceCascadePath = [[NSBundle mainBundle] pathForResource:faceCascadeFilename ofType:#"xml"];
faceCascade.load([faceCascadePath UTF8String]);
Label1.hidden=YES;
}
- (void)didReceiveMemoryWarning
{
[super didReceiveMemoryWarning];
// Dispose of any resources that can be recreated.
}
#pragma mark - Protocol CvVideoCameraDelegate
#ifdef __cplusplus
- (void)processImage:(Mat&)image;
{
Mat grayscaleFrame;
cvtColor(image, grayscaleFrame, CV_BGR2GRAY);
equalizeHist(grayscaleFrame, grayscaleFrame);
std::vector<cv::Rect> faces;
faceCascade.detectMultiScale(grayscaleFrame, faces, 1.1, 2, HaarOptions, cv::Size(60, 60));
for (int i = 0; i < faces.size(); i++)
{
cv::Point pt1(faces[i].x + faces[i].width, faces[i].y + faces[i].height);
cv::Point pt2(faces[i].x, faces[i].y);
cv::rectangle(image, pt1, pt2, cvScalar(0, 255, 0, 0), 1, 8 ,0);
}
}
//#endif
#pragma mark - UI Actions
- (IBAction)startCamera:(id)sender
{
[self.videoCamera start];
imageView.hidden=YES;
while (FaceRecognizer...) {
Label1.hidden=NO;
}
}

How make [UIView animateWithDuration: ..] work in application porting by apportable?

On portal application by apportable I need to make some animation (move/scale/change alpha) of UIView *object via call:
[UIView
animateWithDuration:1.f
delay:0.5f
options:UIViewAnimationOptionAllowUserInteraction
animations:^(void)
{
myview.center = moveTo;
myview.transform = transformTo;
myview.alpha = alphaTo;
}
completion:^(BOOL finished)
{
[self animationFinished];
}];
For now it's only make delay, then execute animation code & completion code immediately.
Thank you for answer.
But I need animation "today", so I make next class.
It's work not good enoght, but it's much better then nothing.
Maybe for one it will be helpful
AOTAnimate.h
//
// AOTAnimate.h
//
// Created by Andrei Bakulin on 18/11/2013.
//
#import <Foundation/Foundation.h>
#interface AOTAnimate : NSObject
{
UIView *view;
NSInteger animationTicksLeft;
CGFloat scaleX;
CGFloat scaleY;
CGPoint moveDelta;
CGSize scaleCurrent;
CGSize scaleDelta;
CGFloat alphaDelta;
void (^completeAction)();
}
#property (nonatomic, assign) CGFloat duration;
#property (nonatomic, assign) CGFloat delay;
#property (nonatomic, assign) CGFloat frequency;
#property (nonatomic, assign) UIViewAnimationOptions options;
#property (nonatomic, assign) CGPoint moveFrom;
#property (nonatomic, assign) CGAffineTransform transformFrom;
#property (nonatomic, assign) CGFloat alphaFrom;
#property (nonatomic, assign) CGPoint moveTo;
#property (nonatomic, assign) CGAffineTransform transformTo;
#property (nonatomic, assign) CGFloat alphaTo;
+ (AOTAnimate*)makeAnimationOnView:(UIView*)view_ duration:(CGFloat)duration_;
+ (AOTAnimate*)makeAnimationOnView:(UIView*)view_ duration:(CGFloat)duration_ delay:(CGFloat)delay_;
- (void)run;
- (void)runWithCompleteAction:(void (^)(void))complete_;
#end
AOTAnimate.m
//
// AOTAnimate.m
//
// Created by Andrei Bakulin on 18/11/2013.
//
#import "AOTAnimate.h"
#implementation AOTAnimate
#synthesize duration, delay, frequency, options;
#synthesize moveFrom, transformFrom, alphaFrom;
#synthesize moveTo, transformTo, alphaTo;
+ (AOTAnimate*)makeAnimationOnView:(UIView*)view_ duration:(CGFloat)duration_
{
return [self makeAnimationOnView:view_ duration:duration_ delay:0.f];
}
+ (AOTAnimate*)makeAnimationOnView:(UIView*)view_ duration:(CGFloat)duration_ delay:(CGFloat)delay_
{
return [[AOTAnimate alloc] initWithView:view_ duration:(CGFloat)duration_ delay:(CGFloat)delay_];
}
//----------------------------------
- (void)dealloc
{
[view release];
if( completeAction )
Block_release(completeAction);
[super dealloc];
}
- (id)initWithView:(UIView*)view_ duration:(CGFloat)duration_ delay:(CGFloat)delay_
{
self = [super init];
if (self)
{
view = [view_ retain];
duration = duration_;
delay = delay_;
frequency = 0.025f;
options = UIViewAnimationOptionAllowUserInteraction;
moveFrom = view.center;
transformFrom = view.transform;
alphaFrom = view.alpha;
moveTo = view.center;
transformTo = view.transform;
alphaTo = view.alpha;
}
return self;
}
//----------------------------------
#pragma mark - Run animation
- (void)run
{
[self runWithCompleteAction:nil];
}
- (void)runWithCompleteAction:(void (^)(void))complete_
{
view.center = moveFrom;
view.transform = transformFrom;
view.alpha = alphaFrom;
#ifndef ANDROID
[UIView
animateWithDuration:duration
delay:delay
options:options
animations:^(void)
{
view.center = moveTo;
view.transform = transformTo;
view.alpha = alphaTo;
}
completion:^(BOOL finished)
{
if( complete_ )
complete_();
}];
#else
if( duration <= 0.f )
{
[self doAnimationComplete];
return;
}
animationTicksLeft = ceil( duration / frequency );
if( animationTicksLeft == 0 )
{
[self doAnimationComplete];
return;
}
moveDelta = CGPointMake( (moveTo.x-moveFrom.x)/animationTicksLeft, (moveTo.y-moveFrom.y)/animationTicksLeft );
alphaDelta = (alphaTo-alphaFrom)/animationTicksLeft;
CGSize scaleFrom = CGSizeMake( [self scaleX:transformFrom], [self scaleY:transformFrom] );
CGSize scaleTo = CGSizeMake( [self scaleX:transformTo], [self scaleY:transformTo] );
scaleDelta = CGSizeMake((scaleTo.width - scaleFrom.width)/animationTicksLeft,
(scaleTo.height - scaleFrom.height)/animationTicksLeft );
scaleCurrent = scaleFrom;
if( complete_ )
{
completeAction = Block_copy(complete_);
}
[self performSelector:#selector(doAnimationTick) withObject:nil afterDelay:delay];
#endif
}
//----------------------------------
#pragma mark - Manual animation
#ifdef ANDROID
- (void)doAnimationTick
{
if( CGPointEqualToPoint( moveDelta, CGPointZero ) == NO )
{
view.center = CGPointMake( view.center.x + moveDelta.x, view.center.y + moveDelta.y );
}
if( CGSizeEqualToSize( scaleDelta, CGSizeZero) == NO )
{
view.transform = CGAffineTransformMakeScale( scaleCurrent.width, scaleCurrent.height );
scaleCurrent.width += scaleDelta.width;
scaleCurrent.height += scaleDelta.height;
}
if( alphaDelta != 0.f )
{
view.alpha = view.alpha + alphaDelta;
}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - -
animationTicksLeft--;
if( animationTicksLeft > 0 )
{
[self performSelector:#selector(doAnimationTick) withObject:nil afterDelay:frequency];
}
else
{
[self doAnimationComplete];
}
}
- (void)doAnimationComplete
{
view.center = moveTo;
view.transform = transformTo;
view.alpha = alphaTo;
if( completeAction )
completeAction();
}
//----------------------------------
#pragma mark - Helpers
- (CGFloat)scaleX:(CGAffineTransform)t
{
return sqrt(t.a * t.a + t.c * t.c);
}
- (CGFloat)scaleY:(CGAffineTransform)t
{
return sqrt(t.b * t.b + t.d * t.d);
}
#endif
#end
Use like this:
UIView *someview;
AOTAnimate *animate = [AOTAnimate makeAnimationOnView:someview duration:1.f delay:0.5f];
// allow to assign - animate.moveFrom / .tranfromFrom / alphaFrom properties,
// but by default they are copy from UIView* object
animate.moveTo = CGPointMake( 100, 200 ); // new point where need to move
animate.transformTo = CGAffineTransformScale( CGAffineTransformIdentity, 1.5f, 1.5f );
animate.alphaTo = 0.5f;
[animate runWithCompleteAction:^{
NSLog(#"Animation done..);
}];
If this method will run on iOS device - it'll use normal [UIView animateWithDuration:...] method
PS: This class do only "move" from one center point to another. Transform use only to scale object (not move). Alpha on my 2 test devices not supported, but maybe some where it does.
Animations do not work on the current version of Apportable's UIKit. We have fully functioning animations coming in the next version of UIKit, though. We will be releasing that once we are satisfied with the quality and coverage.

how to show UI objects code in xcode?

in one part of my project I need to create some UI objects programmatically, can I just customize my UI Objects like labels,... visually in storyboard then simply copy/paste generated code relevant to that object?
I searched in xcode menu but I couldn't find this but once I saw it in a tutorial in youtube.
Thanks in Advance
Yes you can customize the UI Classes or any other class, Like I have customize UILabel Class as UILabelExtended
UILabelExtended.h
#import <Foundation/Foundation.h>
/* **********************************************************************************************
This class inherit the class UILabel and extend the features of UILabel.
********************************************************************************************** */
#interface UILabelExtended : UILabel {
__unsafe_unretained id customDelegate;
id objectInfo;
SEL selector;
}
#property (nonatomic,assign) SEL selector;;
#property (nonatomic,assign) id customDelegate;
#property (nonatomic,retain) id objectInfo;
#end
#interface UILabel(UILabelCategory)
- (void)setHeightOfLabel;
- (void)setWidthOfLabel;
- (void)setHeightOfLabelWithMaxHeight:(float)maxHeight;
- (void)setWidthOfLabelWithMaxWidth:(float)maxWidth ;
#end
UILabelExtended.m
#import "UILabelExtended.h"
#implementation UILabelExtended
#synthesize selector,customDelegate, objectInfo;
- (void) touchesBegan:(NSSet *)touches withEvent:(UIEvent *)event {
if(self.selector)
if([self.customDelegate respondsToSelector:self.selector]) {
[self.customDelegate performSelector:self.selector withObject:self];
return;
}
}
- (void)dealloc {
self.customDelegate = nil;
self.selector = NULL;
self.objectInfo = nil;
}
#end
#implementation UILabel(UILabelCategory)
- (void)setHeightOfLabel {
UILabel* label = self;
//get the height of label content
CGFloat height = [label.text sizeWithFont:label.font constrainedToSize:CGSizeMake(label.bounds.size.width, 99999) lineBreakMode:NSLineBreakByWordWrapping].height;
//set the frame according to calculated height
CGRect frame = label.frame;
if([label.text length] > 0) {
frame.size.height = height;
}
else {
frame.size.height = 0;
}
label.frame = frame;
}
- (void)setWidthOfLabel {
UILabel* label = self;
//get the height of label content
CGFloat width = [label.text sizeWithFont:label.font constrainedToSize:CGSizeMake(99999, label.bounds.size.height) lineBreakMode:NSLineBreakByWordWrapping].width;
//set the frame according to calculated height
CGRect frame = label.frame;
if([label.text length] > 0) {
frame.size.width = width+5;
}
else {
frame.size.width = 0;
}
label.frame = frame;
}
- (void)setHeightOfLabelWithMaxHeight:(float)maxHeight {
UILabel* label = self;
//get the height of label content
CGFloat height = [label.text sizeWithFont:label.font constrainedToSize:CGSizeMake(label.bounds.size.width, maxHeight) lineBreakMode:NSLineBreakByWordWrapping].height;
//set the frame according to calculated height
CGRect frame = label.frame;
if([label.text length] > 0) {
if (height > maxHeight) {
frame.size.height = maxHeight;
}
else {
frame.size.height = height;
}
}
else {
frame.size.height = 0;
}
label.frame = frame;
}
- (void)setWidthOfLabelWithMaxWidth:(float)maxWidth {
UILabel* label = self;
//get the height of label content
CGFloat width = [label.text sizeWithFont:label.font constrainedToSize:CGSizeMake(99999, label.bounds.size.height) lineBreakMode:NSLineBreakByWordWrapping].width;
//set the frame according to calculated height
CGRect frame = label.frame;
if([label.text length] > 0) {
if (width > maxWidth) {
frame.size.width = maxWidth;
}
else {
frame.size.width = width;
}
}
else {
frame.size.width = 0;
}
label.frame = frame;
}
#end

Resources