I fall into this problem for a long time. as I asked, I can print the scrollview but only a part of it, not the full size view "contentView".
the code is like this:
if ([UIPrintInteractionController isPrintingAvailable])
{
UIPrintInteractionController *pic = [UIPrintInteractionController sharedPrintController];
UIGraphicsBeginImageContext(sv.contentSize);
[sv.layer renderInContext:UIGraphicsGetCurrentContext()];
UIImage *viewImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
NSData *myData = [NSData dataWithData:UIImagePNGRepresentation(viewImage)];
if(pic && [UIPrintInteractionController canPrintData: myData] ) {
pic.delegate =(id<UIPrintInteractionControllerDelegate>) self;
UIPrintInfo *printInfo = [UIPrintInfo printInfo];
printInfo.outputType = UIPrintInfoOutputGeneral;
printInfo.jobName = [NSString stringWithFormat:#"QFTicket-%#",self.payment.id];
printInfo.duplex = UIPrintInfoDuplexLongEdge;
pic.printInfo = printInfo;
pic.showsPageRange = YES;
pic.printingItem = myData;
pic.printFormatter=[sv viewPrintFormatter];
void (^completionHandler)(UIPrintInteractionController *, BOOL, NSError *) = ^(UIPrintInteractionController *printController, BOOL completed, NSError *error) {
if (!completed && error) {
NSLog(#"FAILED! due to error in domain %# with error code %u", error.domain, error.code);
}
};
[pic presentAnimated:YES completionHandler:completionHandler];
}
}
Am I missing something?
thx
Related
I work on capture Mac screen using JPEG format, and then get the pixelBuffer and imageBuffer of the captured JPEG samplebuffer.
But, the pixelBuffer is always nil, while when I convert the JPEG buffer to NSImage, the image can be got and displayed successfully.
-(void)createSession
{
if(self.session == nil)
{
self.session = [[AVCaptureSession alloc] init];
self.session.sessionPreset = AVCaptureSessionPresetPhoto;
CGDirectDisplayID displayId = [self getDisplayID];
self.input = [[AVCaptureScreenInput alloc] initWithDisplayID:displayId];
[self.session addInput:self.input];
self.imageOutput = [[AVCaptureStillImageOutput alloc] init];
NSDictionary *outputSettings = #{AVVideoCodecKey : AVVideoCodecJPEG};
[self.imageOutput setOutputSettings:outputSettings];
[self.session addOutput:self.imageOutput];
[self.session startRunning];
}
}
-(void)processSampleBuffer
{
AVCaptureConnection *videoConnection = nil;
for (AVCaptureConnection *connection in self.imageOutput.connections) {
for (AVCaptureInputPort *port in [connection inputPorts]) {
if ([[port mediaType] isEqual:AVMediaTypeVideo] ) {
videoConnection = connection;
break;
}
}
if (videoConnection) { break; }
}
[self.imageOutput captureStillImageAsynchronouslyFromConnection:videoConnection completionHandler:^(CMSampleBufferRef imageDataSampleBuffer, NSError * error) {
if(imageDataSampleBuffer != nil)
{
NSData * imageData = [AVCaptureStillImageOutput jpegStillImageNSDataRepresentation:imageDataSampleBuffer];
self.image = [[NSImage alloc] initWithData:imageData];
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(imageDataSampleBuffer);
CVPixelBufferLockBaseAddress(imageBuffer,0);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
NSLog(#"width %zu height %zu", width,height);
CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(imageDataSampleBuffer);
float width1 = CVPixelBufferGetWidth(pixelBuffer);
float height1 = CVPixelBufferGetHeight(pixelBuffer);
NSLog(#"Pixelbuffer width %f height %f", width1,height1);
}
else
{
NSLog(#"error");
}
}];
}
in processSampleBuffer
the self.image can get a NSImage and displayed in NSImageView successfully.
but the imageBuffer and pixelBuffer are both nil.
it confused me a lot, could someone help to have a look ?
OK, finally, I found the answer here, hope it can help others.
https://developer.apple.com/documentation/avfoundation/avcapturephoto/2873914-pixelbuffer?language=objc
Discussion
If you requested photo capture in a RAW format, or in a processed format without compression such as TIFF, you can use this property to access the underlying sample buffer.
If you requested capture in a compressed format such as JPEG or HEVC/HEIF, this property's value is nil. Use the fileDataRepresentation or CGImageRepresentation method to obtain compressed image data.
im working on an that search in the content of allot of files, i planed to use SearchKit but i can't figure out to make Apple's sample code to work, and i can't find any other ressources (NSHipster code didn't work either), here's my code:
#define kSearchMax 1000
#interface ViewController()
#property(nonatomic) SKIndexRef mySKIndex;
#end
#implementation ViewController
#synthesize mySKIndex;
- (void)viewDidLoad {
[super viewDidLoad];
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH, 0), ^{
[self openIndex];
[self addDoc];
SKIndexFlush(self.mySKIndex);
// i thought that the indexation may need some time ..
sleep(2);
dispatch_async(dispatch_get_main_queue(), ^{
[self searchterm:#"var"];
});
});
}
- (void) openIndex {
NSString *path = [[NSHomeDirectory() stringByAppendingPathComponent:#"index"] stringByAppendingPathExtension:#"txt"]; // 1
NSURL *url = [NSURL fileURLWithPath:path];
NSString *name = #"extension_index";
if ([name length] == 0) name = nil;
SKIndexType type = kSKIndexInverted;
if ([[NSFileManager defaultManager] fileExistsAtPath:path]) {
mySKIndex = SKIndexOpenWithURL ((__bridge CFURLRef) url,
(__bridge CFStringRef) name,
true
);
}else{
self.mySKIndex = SKIndexCreateWithURL((__bridge CFURLRef) url,
(__bridge CFStringRef) name,
(SKIndexType) type,
(CFDictionaryRef) NULL);
}
}
- (void) addDoc {
SKLoadDefaultExtractorPlugIns ();
NSString *path = [NSBundle.mainBundle pathForResource:#"Products" ofType:#"rtf"]; // 1
NSURL *url = [NSURL fileURLWithPath: path]; // 2
SKDocumentRef doc = SKDocumentCreateWithURL ((__bridge CFURLRef) url);
NSString *mimeTypeHint = #"text/rtf";
BOOL added = SKIndexAddDocument ((SKIndexRef) mySKIndex,
(SKDocumentRef) doc,
(__bridge CFStringRef)mimeTypeHint,
(Boolean) true
);
NSLog(added ? #"added" : #"not added");
}
- (void) searchterm:(NSString*)query{
SKSearchOptions options = kSKSearchOptionDefault;
BOOL more = YES;
UInt32 totalCount = 0;
SKSearchRef search = SKSearchCreate (mySKIndex,
(__bridge CFStringRef) query,
options);
while (more) {
SKDocumentID foundDocIDs [kSearchMax];
float foundScores [kSearchMax];
float *scores;
Boolean unranked =
options & kSKSearchOptionNoRelevanceScores;
if (unranked) {
scores = NULL;
} else {
scores = foundScores;
}
CFIndex foundCount = 0;
more = SKSearchFindMatches (
search,
kSearchMax,
foundDocIDs,
scores,
100,
&foundCount
);
NSLog(#"%#", [NSString stringWithFormat:#"current count = %i", totalCount]);
totalCount += foundCount;
}
}
#end
it always print "current count = 0" and the loop is executed only one time.
I am creating a simple Texture display that essentially renders the Video frames in BGRA format through Metal display. I follow the same steps as told in Metal WWDC session. But I have problems in creating the render encoder. My code is
id <MTLDevice> device = MTLCreateSystemDefaultDevice();
id<MTLCommandQueue> commandQueue = [device newCommandQueue];
id<MTLLibrary> library = [device newDefaultLibrary];
// Create Render Command Descriptor.
MTLRenderPipelineDescriptor* renderPipelineDesc = [MTLRenderPipelineDescriptor new];
renderPipelineDesc.colorAttachments[0].pixelFormat = MTLPixelFormatBGRA8Unorm;
renderPipelineDesc.vertexFunction = [library newFunctionWithName:#"basic_vertex"];
renderPipelineDesc.fragmentFunction = [library newFunctionWithName:#"basic_fragment"];
NSError* error = nil;
id<MTLRenderPipelineState> renderPipelineState = [device newRenderPipelineStateWithDescriptor:renderPipelineDesc
error:&error];
id<MTLCommandBuffer> commandBuffer = [commandQueue commandBuffer];
MTLRenderPassDescriptor* renderPassDesc = [MTLRenderPassDescriptor renderPassDescriptor];
id<CAMetalDrawable> drawable = [_metalLayer nextDrawable];
MTLRenderPassColorAttachmentDescriptor* colorAttachmentDesc = [MTLRenderPassColorAttachmentDescriptor new];
colorAttachmentDesc.texture = drawable.texture;
colorAttachmentDesc.loadAction = MTLLoadActionLoad;
colorAttachmentDesc.storeAction = MTLStoreActionStore;
colorAttachmentDesc.clearColor = MTLClearColorMake(0, 0, 0, 1);
[renderPassDesc.colorAttachments setObject:colorAttachmentDesc atIndexedSubscript:0];
[inTexture replaceRegion:region
mipmapLevel:0
withBytes:imageBytes
bytesPerRow:CVPixelBufferGetBytesPerRow(_image)];
id<MTLRenderCommandEncoder> renderCmdEncoder = [commandBuffer renderCommandEncoderWithDescriptor:renderPassDesc];
[renderCmdEncoder setRenderPipelineState:_renderPipelineState];
[renderCmdEncoder endEncoding];
This code crashes in the line saying "No Render Targets Found"
id renderCmdEncoder = [commandBuffer renderCommandEncoderWithDescriptor:renderPassDesc];
I am not able to figure out where and how to set the render target.
This will work perfectly; if you need help implementing it, let me know:
#import UIKit;
#import AVFoundation;
#import CoreMedia;
#import <MetalKit/MetalKit.h>
#import <Metal/Metal.h>
#import <MetalPerformanceShaders/MetalPerformanceShaders.h>
#interface ViewController : UIViewController <MTKViewDelegate, AVCaptureVideoDataOutputSampleBufferDelegate> {
NSString *_displayName;
NSString *serviceType;
}
#property (retain, nonatomic) SessionContainer *session;
#property (retain, nonatomic) AVCaptureSession *avSession;
#end;
#import "ViewController.h"
#interface ViewController () {
MTKView *_metalView;
id<MTLDevice> _device;
id<MTLCommandQueue> _commandQueue;
id<MTLTexture> _texture;
CVMetalTextureCacheRef _textureCache;
}
#property (strong, nonatomic) AVCaptureDevice *videoDevice;
#property (nonatomic) dispatch_queue_t sessionQueue;
#end
#implementation ViewController
- (void)viewDidLoad {
NSLog(#"%s", __PRETTY_FUNCTION__);
[super viewDidLoad];
_device = MTLCreateSystemDefaultDevice();
_metalView = [[MTKView alloc] initWithFrame:self.view.bounds];
[_metalView setContentMode:UIViewContentModeScaleAspectFit];
_metalView.device = _device;
_metalView.delegate = self;
_metalView.clearColor = MTLClearColorMake(1, 1, 1, 1);
_metalView.colorPixelFormat = MTLPixelFormatBGRA8Unorm;
_metalView.framebufferOnly = NO;
_metalView.autoResizeDrawable = NO;
CVMetalTextureCacheCreate(NULL, NULL, _device, NULL, &_textureCache);
[self.view addSubview:_metalView];
self.sessionQueue = dispatch_queue_create( "session queue", DISPATCH_QUEUE_SERIAL );
if ([self setupCamera]) {
[_avSession startRunning];
}
}
- (BOOL)setupCamera {
NSLog(#"%s", __PRETTY_FUNCTION__);
#try {
NSError * error;
_avSession = [[AVCaptureSession alloc] init];
[_avSession beginConfiguration];
[_avSession setSessionPreset:AVCaptureSessionPreset640x480];
// get list of devices; connect to front-facing camera
self.videoDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
if (self.videoDevice == nil) return FALSE;
AVCaptureDeviceInput *input = [AVCaptureDeviceInput deviceInputWithDevice:self.videoDevice error:&error];
[_avSession addInput:input];
dispatch_queue_t sampleBufferQueue = dispatch_queue_create("CameraMulticaster", DISPATCH_QUEUE_SERIAL);
AVCaptureVideoDataOutput * dataOutput = [[AVCaptureVideoDataOutput alloc] init];
[dataOutput setAlwaysDiscardsLateVideoFrames:YES];
[dataOutput setVideoSettings:#{(id)kCVPixelBufferPixelFormatTypeKey: #(kCVPixelFormatType_32BGRA)}];
[dataOutput setSampleBufferDelegate:self queue:sampleBufferQueue];
[_avSession addOutput:dataOutput];
[_avSession commitConfiguration];
} #catch (NSException *exception) {
NSLog(#"%s - %#", __PRETTY_FUNCTION__, exception.description);
return FALSE;
} #finally {
return TRUE;
}
}
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
{
size_t width = CVPixelBufferGetWidth(pixelBuffer);
size_t height = CVPixelBufferGetHeight(pixelBuffer);
CVMetalTextureRef texture = NULL;
CVReturn status = CVMetalTextureCacheCreateTextureFromImage(kCFAllocatorDefault, _textureCache, pixelBuffer, NULL, MTLPixelFormatBGRA8Unorm, width, height, 0, &texture);
if(status == kCVReturnSuccess)
{
_metalView.drawableSize = CGSizeMake(width, height);
_texture = CVMetalTextureGetTexture(texture);
_commandQueue = [_device newCommandQueue];
CFRelease(texture);
}
}
}
- (void)drawInMTKView:(MTKView *)view {
// creating command encoder
if (_texture) {
id<MTLCommandBuffer> commandBuffer = [_commandQueue commandBuffer];
id<MTLTexture> drawingTexture = view.currentDrawable.texture;
// set up and encode the filter
MPSImageGaussianBlur *filter = [[MPSImageGaussianBlur alloc] initWithDevice:_device sigma:5];
[filter encodeToCommandBuffer:commandBuffer sourceTexture:_texture destinationTexture:drawingTexture];
// committing the drawing
[commandBuffer presentDrawable:view.currentDrawable];
[commandBuffer commit];
_texture = nil;
}
}
- (void)mtkView:(MTKView *)view drawableSizeWillChange:(CGSize)size {
}
#end
you should try one of following points
1.Instead of creating new render pass descriptor,use current render pass descriptor object from MTKView object.this render pass descriptor already will be configured.you need not set anything.try the sample code given below-
if let currentPassDesc = view.currentRenderPassDescriptor,
let currentDrawable = view.currentDrawable
{
let renderCommandEncoder =
commandBuffer.makeRenderCommandEncoder(descriptor: currentPassDesc)
renderCommandEncoder.setRenderPipelineState(renderPipeline)
//set vertex buffers and call draw apis
.......
.......
commandBuffer.present(currentDrawable)
}
2.you are creating a new render pass descriptor and then setting its color attachment by the texture of drawable object so instead of doing this you should create a new texture object and then set usage of this texture as render target.then you will get content rendered in your new texture but it will be not displayed on screen so to get displayed the content of your textue you have to copy the content of your texture in drawable texture and then present drawable.
below is the code of making render target -
renderPassDescriptor.colorAttachments[0].clearColor =
MTLClearColor(red:
0.0,green: 0.0,blue: 0.0,alpha: 1.0)
renderPassDescriptor.colorAttachments[0].loadAction = .clear
renderPassDescriptor.colorAttachments[0].storeAction = .store
renderPassDescriptor.depthAttachment.clearDepth = 1.0
renderPassDescriptor.depthAttachment.loadAction = .clear
renderPassDescriptor.depthAttachment.storeAction = .dontCare
let view = self.view as!MTKView
let textDesc = MTLTextureDescriptor.texture2DDescriptor(pixelFormat:
.bgra8Unorm, width: Int(view.frame.width),
height: Int(view.frame.height), mipmapped: false)
textDesc.depth = 1
//see below line
textDesc.usage =
[MTLTextureUsage.renderTarget,MTLTextureUsage.shaderRead]
textDesc.storageMode = .private
mainPassFrameBuffer = device.makeTexture(descriptor: textDesc)
renderPassDescriptor.colorAttachments[0].texture = mainPassFrameBuffer
I'm trying to do a simple chat in iOS with Obj-C, and when I press the "send" button, I save the object into Parse and I retrieve all the messages.
When it succeeds, I retrieve all the messages so the last one (mine) should be retrieved as well, but the last one never gets called. What should I do?
This is my code:
NSMutableArray *messagesArray;
- (IBAction)sendButtonTapped:(id)sender {
self.sendButton.enabled = false;
self.textField.enabled = false;
PFObject *objectChat = [PFObject objectWithClassName: #"ChatClass"];
objectChat[#"text"] = self.textField.text;
[objectChat saveInBackgroundWithBlock:^(BOOL succeeded, NSError *error) {
self.sendButton.enabled = true;
self.textField.enabled = true;
if (succeeded) {
self.textField.text = #"";
[self retrieveMessages];
NSLog(#"success!");
} else {
NSLog(error.description);
}
}];
}
-(void) retrieveMessages {
NSDate *now = [NSDate date];
PFQuery *query = [PFQuery queryWithClassName:#"ChatClass"];
[query whereKey:#"createdAt" lessThanOrEqualTo:now];
[query findObjectsInBackgroundWithBlock:^(NSArray *objects, NSError *error) {
if (error == nil) {
messagesArray = [[NSMutableArray alloc] init];
for (int i = 0; i < [objects count]; i++) {
[messagesArray addObject:[[objects objectAtIndex:i] objectForKey:#"text"]];
}
runOnMainQueueWithoutDeadlocking(^{
[self.messageTableView reloadData];
});
}
}];
}
void runOnMainQueueWithoutDeadlocking(void (^block)(void))
{
if ([NSThread isMainThread])
{
block();
}
else
{
dispatch_async(dispatch_get_main_queue(), block);
}
}
Thank you very much in advance.
Regards.
Perhaps there's enough of a clock difference with the parse server that the time qualification misses the object just created. Removing that line should do the trick.
I am writing a text editor for Mac OS X. I need to display hidden characters in an NSTextView (such as spaces, tabs, and special characters). I have spent a lot of time searching for how to do this but so far I have not found an answer. If anyone could point me in the right direction I would be grateful.
Here's a fully working and clean implementation
#interface GILayoutManager : NSLayoutManager
#end
#implementation GILayoutManager
- (void)drawGlyphsForGlyphRange:(NSRange)range atPoint:(NSPoint)point {
NSTextStorage* storage = self.textStorage;
NSString* string = storage.string;
for (NSUInteger glyphIndex = range.location; glyphIndex < range.location + range.length; glyphIndex++) {
NSUInteger characterIndex = [self characterIndexForGlyphAtIndex: glyphIndex];
switch ([string characterAtIndex:characterIndex]) {
case ' ': {
NSFont* font = [storage attribute:NSFontAttributeName atIndex:characterIndex effectiveRange:NULL];
[self replaceGlyphAtIndex:glyphIndex withGlyph:[font glyphWithName:#"periodcentered"]];
break;
}
case '\n': {
NSFont* font = [storage attribute:NSFontAttributeName atIndex:characterIndex effectiveRange:NULL];
[self replaceGlyphAtIndex:glyphIndex withGlyph:[font glyphWithName:#"carriagereturn"]];
break;
}
}
}
[super drawGlyphsForGlyphRange:range atPoint:point];
}
#end
To install, use:
[myTextView.textContainer replaceLayoutManager:[[GILayoutManager alloc] init]];
To find font glyph names, you have to go to CoreGraphics:
CGFontRef font = CGFontCreateWithFontName(CFSTR("Menlo-Regular"));
for (size_t i = 0; i < CGFontGetNumberOfGlyphs(font); ++i) {
printf("%s\n", [CFBridgingRelease(CGFontCopyGlyphNameForGlyph(font, i)) UTF8String]);
}
Have a look at the NSLayoutManager class. Your NSTextView will have a layout manager associated with it, and the layout manager is responsible for associating a character (space, tab, etc.) with a glyph (the image of that character drawn on the screen).
In your case, you would probably be most interested in the replaceGlyphAtIndex:withGlyph: method, which would allow you to replace individual glyphs.
I wrote a text editor a few years back - here's some meaningless code that should get you looking in (hopefully) the right direction (this is an NSLayoutManager subclass btw - and yes I know it's leaking like the proverbial kitchen sink):
- (void)drawGlyphsForGlyphRange:(NSRange)glyphRange atPoint:(NSPoint)containerOrigin
{
if ([[[[MJDocumentController sharedDocumentController] currentDocument] editor] showInvisibles])
{
//init glyphs
unichar crlf = 0x00B6;
NSString *CRLF = [[NSString alloc] initWithCharacters:&crlf length:1];
unichar space = 0x00B7;
NSString *SPACE = [[NSString alloc] initWithCharacters:&space length:1];
unichar tab = 0x2192;
NSString *TAB = [[NSString alloc] initWithCharacters:&tab length:1];
NSString *docContents = [[self textStorage] string];
NSString *glyph;
NSPoint glyphPoint;
NSRect glyphRect;
NSDictionary *attr = [[NSDictionary alloc] initWithObjectsAndKeys:[NSUnarchiver unarchiveObjectWithData:[[NSUserDefaults standardUserDefaults] objectForKey:#"invisiblesColor"]], NSForegroundColorAttributeName, nil];
//loop thru current range, drawing glyphs
int i;
for (i = glyphRange.location; i < NSMaxRange(glyphRange); i++)
{
glyph = #"";
//look for special chars
switch ([docContents characterAtIndex:i])
{
//space
case ' ':
glyph = SPACE;
break;
//tab
case '\t':
glyph = TAB;
break;
//eol
case 0x2028:
case 0x2029:
case '\n':
case '\r':
glyph = CRLF;
break;
//do nothing
default:
glyph = #"";
break;
}
//should we draw?
if ([glyph length])
{
glyphPoint = [self locationForGlyphAtIndex:i];
glyphRect = [self lineFragmentRectForGlyphAtIndex:i effectiveRange:NULL];
glyphPoint.x += glyphRect.origin.x;
glyphPoint.y = glyphRect.origin.y;
[glyph drawAtPoint:glyphPoint withAttributes:attr];
}
}
}
[super drawGlyphsForGlyphRange:glyphRange atPoint:containerOrigin];
}
I solved the problem of converting between NSGlyphs and the corresponding unichar in the NSTextView. The code below works beautifully and replaces spaces with bullets for visible text:
- (void)drawGlyphsForGlyphRange:(NSRange)range atPoint:(NSPoint)origin
{
NSFont *font = [[CURRENT_TEXT_VIEW typingAttributes]
objectForKey:NSFontAttributeName];
NSGlyph bullet = [font glyphWithName:#"bullet"];
for (int i = range.location; i != range.location + range.length; i++)
{
unsigned charIndex = [self characterIndexForGlyphAtIndex:i];
unichar c =[[[self textStorage] string] characterAtIndex:charIndex];
if (c == ' ')
[self replaceGlyphAtIndex:charIndex withGlyph:bullet];
}
[super drawGlyphsForGlyphRange:range atPoint:origin];
}
Perhaps -[NSLayoutManager setShowsControlCharacters:] and/or -[NSLayoutManager setShowsInvisibleCharacters:] will do what you want.
Here is Pol's solution in Swift:
class MyLayoutManager: NSLayoutManager {
override func drawGlyphsForGlyphRange(glyphsToShow: NSRange, atPoint origin: NSPoint) {
if let storage = self.textStorage {
let s = storage.string
let startIndex = s.startIndex
for var glyphIndex = glyphsToShow.location; glyphIndex < glyphsToShow.location + glyphsToShow.length; glyphIndex++ {
let characterIndex = self.characterIndexForGlyphAtIndex(glyphIndex)
let ch = s[startIndex.advancedBy(characterIndex)]
switch ch {
case " ":
let attrs = storage.attributesAtIndex(characterIndex, effectiveRange: nil)
if let font = attrs[NSFontAttributeName] {
let g = font.glyphWithName("periodcentered")
self.replaceGlyphAtIndex(glyphIndex, withGlyph: g)
}
case "\n":
let attrs = storage.attributesAtIndex(characterIndex, effectiveRange: nil)
if let font = attrs[NSFontAttributeName] {
// let g = font.glyphWithName("carriagereturn")
let g = font.glyphWithName("paragraph")
self.replaceGlyphAtIndex(glyphIndex, withGlyph: g)
}
case "\t":
let attrs = storage.attributesAtIndex(characterIndex, effectiveRange: nil)
if let font = attrs[NSFontAttributeName] {
let g = font.glyphWithName("arrowdblright")
self.replaceGlyphAtIndex(glyphIndex, withGlyph: g)
}
default:
break
}
}
}
super.drawGlyphsForGlyphRange(glyphsToShow, atPoint: origin)
}
}
And to list the glyph names:
func listFonts() {
let font = CGFontCreateWithFontName("Menlo-Regular")
for var i:UInt16 = 0; i < UInt16(CGFontGetNumberOfGlyphs(font)); i++ {
if let name = CGFontCopyGlyphNameForGlyph(font, i) {
print("name: \(name) at index \(i)")
}
}
}