offline rendering with a lowpass filter causes aliasing and clipping - core-audio

I have a buffer of samples that are 8khz, and I am trying to simply apply a lowpass filter to the buffer. Meaning, I start with a buffer of 8khz samples, and I want to end up with a buffer of 8khz LOWPASSED samples. If I hook up a lowpass unit and connect it with the default output unit and supply my buffer, it sounds perfect and properly low passed. However, as soon as I remove the output and call AudioUnitRender on the low pass audio unit directly, the resulting samples are aliased and clipped.
#import "EffectMachine.h"
#import <AudioToolbox/AudioToolbox.h>
#import "AudioHelpers.h"
#import "Buffer.h"
#interface EffectMachine ()
#property (nonatomic, strong) Buffer *buffer;
#end
typedef struct EffectPlayer {
NSUInteger index;
AudioUnit lowPassUnit;
__unsafe_unretained Buffer *buffer;
} EffectPlayer;
OSStatus EffectMachineCallbackRenderProc(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData);
OSStatus EffectMachineCallbackRenderProc(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData) {
struct EffectPlayer *player = (struct EffectPlayer *)inRefCon;
for (int i = 0; i < inNumberFrames; i++) {
float sample;
if (player->index < player->buffer.size) {
sample = (float)player->buffer.samples[player->index];
player->index += 1;
} else {
sample = 0;
}
((float *)ioData->mBuffers[0].mData)[i] = sample;
((float *)ioData->mBuffers[1].mData)[i] = sample;
}
return noErr;
}
#implementation EffectMachine {
EffectPlayer player;
}
-(instancetype)initWithBuffer:(Buffer *)buffer {
if (self = [super init]) {
self.buffer = buffer;
}
return self;
}
-(Buffer *)process {
struct EffectPlayer initialized = {0};
player = initialized;
player.buffer = self.buffer;
[self setupAudioUnits];
Buffer *buffer = [self processedBuffer];
[self cleanup];
return buffer;
}
-(void)setupAudioUnits {
AudioComponentDescription lowpasscd = {0};
lowpasscd.componentType = kAudioUnitType_Effect;
lowpasscd.componentSubType = kAudioUnitSubType_LowPassFilter;
lowpasscd.componentManufacturer = kAudioUnitManufacturer_Apple;
AudioComponent comp = AudioComponentFindNext(NULL, &lowpasscd);
if (comp == NULL) NSLog(#"can't get lowpass unit");
AudioComponentInstanceNew(comp, &player.lowPassUnit);
AURenderCallbackStruct input;
input.inputProc = EffectMachineCallbackRenderProc;
input.inputProcRefCon = &player;
CheckError(AudioUnitSetProperty(player.lowPassUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
&input,
sizeof(input)),
"AudioUnitSetProperty for callback failed");
CheckError(AudioUnitSetParameter(player.lowPassUnit,
kLowPassParam_CutoffFrequency,
kAudioUnitScope_Global,
0,
1500,
0), "AudioUnitSetParameter cutoff for lowpass failed");
CheckError(AudioUnitSetParameter(player.lowPassUnit,
kLowPassParam_Resonance,
kAudioUnitScope_Global,
0,
0,
0), "AudioUnitSetParameter resonance for lowpass failed");
CheckError(AudioUnitInitialize(player.lowPassUnit),
"Couldn't initialize lowpass unit");
}
-(Buffer *)processedBuffer {
AudioBufferList *bufferlist = malloc(sizeof(AudioBufferList));
UInt32 blockSize = 1024;
float *left = malloc(sizeof(float) * blockSize);
float *right = malloc(sizeof(float) * blockSize);
bufferlist->mBuffers[0].mData = left;
bufferlist->mBuffers[1].mData = right;
UInt32 size = sizeof(float) * blockSize;
AudioTimeStamp inTimeStamp;
memset(&inTimeStamp, 0, sizeof(AudioTimeStamp));
inTimeStamp.mSampleTime = 0;
AudioUnitRenderActionFlags flag = 0;
NSUInteger length = ceil(self.buffer.size / (float)blockSize);
double *processed = malloc(sizeof(double) * blockSize * length);
for (int i = 0; i < length; i++) {
bufferlist->mBuffers[0].mDataByteSize = size;
bufferlist->mBuffers[1].mDataByteSize = size;
bufferlist->mNumberBuffers = 2;
inTimeStamp.mFlags = kAudioTimeStampSampleTimeValid;
AudioUnitRender(player.lowPassUnit, &flag, &inTimeStamp, 0, blockSize, bufferlist);
for (NSUInteger j = 0; j < blockSize; j++) {
processed[j + (blockSize * i)] = left[j];
}
inTimeStamp.mSampleTime += blockSize;
}
Buffer *buffer = [[Buffer alloc] initWithSamples:processed size:self.buffer.size sampleRate:self.buffer.sampleRate];
free(bufferlist);
free(left);
free(right);
free(processed);
return buffer;
}
-(void)cleanup {
AudioOutputUnitStop(player.lowPassUnit);
AudioUnitUninitialize(player.lowPassUnit);
AudioComponentInstanceDispose(player.lowPassUnit);
}
#end
If I add a generic output and try to set an 8khz ASBD on its input, then I just get garbage noise for output.. It looks like, 0,0,0,0,0,17438231945853048031929171968.000000,0,0,0,-2548199532257382185315640279040.000000... Yikes!
I tried adding ASBDs to the input and output of the lowpass unit, giving it an 8khz sample rate property, and it did nothing.. I tried adding converter units (with ASBDs set to 8khz) before, and then after, and then before AND after the lowpass filter (in a chain), this also did not work.
As a side question, my buffer is mono 8khz samples, and if I make my buffer list have mNumberBuffers set to 1, then my lowpass input render proc is never called... Is there a way to not have to use stereo channels?

I am using converters at both ends with ASBD set to 8000 samplerate mono floats for input of input converter and output of output converter while using 44100.0 stereo for input and output of the low pass unit, and calling AudioUnitRender on the end converter with no io unit for the offline render. For the online render I put a converter unit before the io unit so the render callback will pull from buffers at 8K for playback too. It appears that the lower sample rate on the output ASBD requires a higher maximum frames per slice and a smaller slice (AudioUnitRender inNumberFrames) and that's why it wouldn't render.
#import "ViewController.h"
#import <AudioToolbox/AudioToolbox.h>
#implementation ViewController{
int sampleCount;
int renderBufferHead;
float *renderBuffer;
}
- (void)viewDidLoad {
[super viewDidLoad];
float sampleRate = 8000;
int bufferSeconds = 3;
sampleCount = sampleRate * bufferSeconds;//seconds
float *originalSaw = generateSawWaveBuffer(440, sampleRate, sampleCount);
renderBuffer = originalSaw;
renderBufferHead = 0;
AURenderCallbackStruct cbStruct = {renderCallback,(__bridge void *)self};
//this will do offline render using the render callback, callback just reads from renderBuffer at samplerate
float *processedBuffer = offlineRender(sampleCount, sampleRate, &cbStruct);
renderBufferHead = 0;//rewind render buffer after processing
//set up audio units to do live render using the render callback at sample rate then self destruct after delay
//it will play originalSaw for bufferSeconds, then after delay will switch renderBuffer to point at processedBuffer
float secondsToPlayAudio = (bufferSeconds + 1) * 2;
onlineRender(sampleRate, &cbStruct,secondsToPlayAudio);
//wait for original to finish playing, then change render callback source buffer to processed buffer
dispatch_after(dispatch_time(DISPATCH_TIME_NOW, (int64_t)((secondsToPlayAudio / 2) * NSEC_PER_SEC)), dispatch_get_main_queue(), ^{
renderBuffer = processedBuffer;
renderBufferHead = 0;//rewind render buffer
});
//destroy after all rendering done
dispatch_after(dispatch_time(DISPATCH_TIME_NOW, (int64_t)(secondsToPlayAudio * NSEC_PER_SEC)), dispatch_get_main_queue(), ^{
free(originalSaw);
free(processedBuffer);
});
}
float * offlineRender(int count, double sampleRate, AURenderCallbackStruct *cbStruct){
AudioComponentInstance inConverter = getComponentInstance(kAudioUnitType_FormatConverter, kAudioUnitSubType_AUConverter);
AudioComponentInstance lowPass = getComponentInstance(kAudioUnitType_Effect, kAudioUnitSubType_LowPassFilter);
AudioComponentInstance outConverter = getComponentInstance(kAudioUnitType_FormatConverter, kAudioUnitSubType_AUConverter);
AudioStreamBasicDescription asbd = getMonoFloatASBD(sampleRate);
AudioUnitSetProperty(inConverter, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &asbd, sizeof(AudioStreamBasicDescription));
AudioUnitSetProperty(outConverter, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &asbd, sizeof(AudioStreamBasicDescription));
AudioUnitSetProperty(inConverter, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, cbStruct, sizeof(AURenderCallbackStruct));
formatAndConnect(inConverter, lowPass);
formatAndConnect(lowPass, outConverter);
UInt32 maxFramesPerSlice = 4096;
AudioUnitSetProperty(inConverter, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(UInt32));
AudioUnitSetProperty(lowPass, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(UInt32));
AudioUnitSetProperty(outConverter, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(UInt32));
AudioUnitInitialize(inConverter);
AudioUnitInitialize(lowPass);
AudioUnitInitialize(outConverter);
AudioUnitSetParameter(lowPass, kLowPassParam_CutoffFrequency, kAudioUnitScope_Global, 0, 500, 0);
AudioBufferList *bufferlist = malloc(sizeof(AudioBufferList) + sizeof(AudioBufferList));//stereo bufferlist + sizeof(AudioBuffer)
float *left = malloc(sizeof(float) * 4096);
bufferlist->mBuffers[0].mData = left;
bufferlist->mNumberBuffers = 1;
AudioTimeStamp inTimeStamp;
memset(&inTimeStamp, 0, sizeof(AudioTimeStamp));
inTimeStamp.mFlags = kAudioTimeStampSampleTimeValid;
inTimeStamp.mSampleTime = 0;
float *buffer = malloc(sizeof(float) * count);
int inNumberframes = 512;
AudioUnitRenderActionFlags flag = 0;
int framesRead = 0;
while (count){
inNumberframes = MIN(inNumberframes, count);
bufferlist->mBuffers[0].mDataByteSize = sizeof(float) * inNumberframes;
printf("Offline Render %i frames\n",inNumberframes);
AudioUnitRender(outConverter, &flag, &inTimeStamp, 0, inNumberframes, bufferlist);
memcpy(buffer + framesRead, left, sizeof(float) * inNumberframes);
inTimeStamp.mSampleTime += inNumberframes;
count -= inNumberframes;
framesRead += inNumberframes;
}
free(left);
// free(right);
free(bufferlist);
AudioUnitUninitialize(inConverter);
AudioUnitUninitialize(lowPass);
AudioUnitUninitialize(outConverter);
return buffer;
}
OSStatus renderCallback(void * inRefCon,
AudioUnitRenderActionFlags * ioActionFlags,
const AudioTimeStamp * inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData){
ViewController *self = (__bridge ViewController*)inRefCon;
float *left = ioData->mBuffers[0].mData;
for (int i = 0; i < inNumberFrames; i++) {
if (self->renderBufferHead >= self->sampleCount) {
left[i] = 0;
}
else{
left[i] = self->renderBuffer[self->renderBufferHead++];
}
}
if(ioData->mNumberBuffers == 2){
memcpy(ioData->mBuffers[1].mData, left, sizeof(float) * inNumberFrames);
}
printf("render %f to %f\n",inTimeStamp->mSampleTime,inTimeStamp->mSampleTime + inNumberFrames);
return noErr;
}
void onlineRender(double sampleRate, AURenderCallbackStruct *cbStruct,float duration){
AudioComponentInstance converter = getComponentInstance(kAudioUnitType_FormatConverter, kAudioUnitSubType_AUConverter);
AudioComponentInstance ioUnit = getComponentInstance(kAudioUnitType_Output, kAudioUnitSubType_DefaultOutput);
AudioStreamBasicDescription asbd = getMonoFloatASBD(sampleRate);
AudioUnitSetProperty(converter, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &asbd, sizeof(AudioStreamBasicDescription));
AudioUnitSetProperty(converter, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, cbStruct, sizeof(AURenderCallbackStruct));
formatAndConnect(converter, ioUnit);
AudioUnitInitialize(converter);
AudioUnitInitialize(ioUnit);
AudioOutputUnitStart(ioUnit);
dispatch_after(dispatch_time(DISPATCH_TIME_NOW, (int64_t)(duration * NSEC_PER_SEC)), dispatch_get_main_queue(), ^{
AudioOutputUnitStop(ioUnit);
AudioUnitUninitialize(ioUnit);
AudioUnitUninitialize(converter);
});
}
float * generateSawWaveBuffer(float frequency,float sampleRate, int sampleCount){
float *buffer = malloc(sizeof(float) * sampleCount);
float increment = (frequency / sampleRate) * 2;
int increasing = 1;
float sample = 0;
for (int i = 0; i < sampleCount; i++) {
if (increasing) {
sample += increment;
if (sample >= 1) {
increasing = 0;
}
}
else{
sample -= increment;
if (sample < -1) {
increasing = 1;
}
}
buffer[i] = sample;
}
return buffer;
}
AudioComponentInstance getComponentInstance(OSType type,OSType subType){
AudioComponentDescription desc = {0};
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentSubType = subType;
desc.componentType = type;
AudioComponent ioComponent = AudioComponentFindNext(NULL, &desc);
AudioComponentInstance unit;
AudioComponentInstanceNew(ioComponent, &unit);
return unit;
}
AudioStreamBasicDescription getMonoFloatASBD(double sampleRate){
AudioStreamBasicDescription asbd = {0};
asbd.mSampleRate = sampleRate;
asbd.mFormatID = kAudioFormatLinearPCM;
asbd.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagIsNonInterleaved | kAudioFormatFlagIsPacked;
asbd.mFramesPerPacket = 1;
asbd.mChannelsPerFrame = 1;
asbd.mBitsPerChannel = 32;
asbd.mBytesPerPacket = 4;
asbd.mBytesPerFrame = 4;
return asbd;
}
void formatAndConnect(AudioComponentInstance src,AudioComponentInstance dst){
AudioStreamBasicDescription asbd;
UInt32 propsize = sizeof(AudioStreamBasicDescription);
AudioUnitGetProperty(dst, kAudioUnitProperty_StreamFormat,kAudioUnitScope_Input,0,&asbd,&propsize);
AudioUnitSetProperty(src, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &asbd, sizeof(AudioStreamBasicDescription));
AudioUnitConnection connection = {0};
connection.destInputNumber = 0;
connection.sourceAudioUnit = src;
connection.sourceOutputNumber = 0;
AudioUnitSetProperty(dst, kAudioUnitProperty_MakeConnection, kAudioUnitScope_Input, 0, &connection, sizeof(AudioUnitConnection));
}
#end

Related

How do I return a CGImageRef from CGWindowListCreateImage to C#?

I'm currently attempting to write a plugin for macOS for Unity. I am taking a screenshot of the desktop with CGWindowListCreateImage. I'm trying to figure out how to return the byte[] data to C# so I can create a Texture2D from it. Any help would be greatly appreciated, thank you.
It doesn't want me to return a NSArray* The .h file is at the bottom.
NSArray* getScreenshot()
{
CGImageRef screenShot = CGWindowListCreateImage( CGRectInfinite, kCGWindowListOptionOnScreenOnly, kCGNullWindowID, kCGWindowImageDefault);
return getRGBAsFromImage(screenShot);
}
NSArray* getRGBAsFromImage(CGImageRef imageRef)
{
// First get the image into your data buffer
NSUInteger width = CGImageGetWidth(imageRef);
NSUInteger height = CGImageGetHeight(imageRef);
NSUInteger bytesPerPixel = 4;
unsigned long count = width * height * bytesPerPixel;
NSMutableArray *result = [NSMutableArray arrayWithCapacity:count];
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
unsigned char *rawData = (unsigned char*) alloca(height * width * 4);
NSUInteger bytesPerRow = bytesPerPixel * width;
NSUInteger bitsPerComponent = 8;
CGContextRef context = CGBitmapContextCreate(rawData, width, height,
bitsPerComponent, bytesPerRow, colorSpace,
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGColorSpaceRelease(colorSpace);
CGContextDrawImage(context, CGRectMake(0, 0, width, height), imageRef);
CGContextRelease(context);
// Now your rawData contains the image data in the RGBA8888 pixel format.
int x = 0, y = 0;
NSUInteger byteIndex = (bytesPerRow * y) + x * bytesPerPixel;
for (int i = 0 ; i < count ; ++i)
{
CGFloat alpha = ((CGFloat) rawData[byteIndex + 3] ) / 255.0f;
CGFloat red = ((CGFloat) rawData[byteIndex] ) / alpha;
CGFloat green = ((CGFloat) rawData[byteIndex + 1] ) / alpha;
CGFloat blue = ((CGFloat) rawData[byteIndex + 2] ) / alpha;
byteIndex += bytesPerPixel;
NSColor *acolor = [NSColor colorWithRed:red green:green blue:blue alpha:alpha];
[result insertObject:acolor atIndex:count];
}
free(rawData);
return result;
}
#ifndef TestMethods_hpp
#define TestMethods_hpp
#import <Foundation/Foundation.h>
#include <Carbon/Carbon.h>
#include <stdio.h>
#include <AppKit/AppKit.h>
typedef void (*Unity_Callback1)(char * message);
extern "C" {
NSArray* getScreenshot();
}
#endif /* TestMethods_h */

Data corruption when replacing a GLSL constant with a uniform value

Follow up to this recent question.
I am doing GPGPU programming in WebGL2, and I'm passing in a large 4-dimensional square array to my shaders by packing it into a texture to bypass the uniform count limits. Having freed myself from having to use a relatively small fixed-size array, I would like to be able to specify the size of the data that is actually being passed in programmatically.
Previously, I had hard-coded the size of the data to read using a const int as follows:
const int SIZE = 5;
const int SIZE2 = SIZE*SIZE;
const int SIZE3 = SIZE2*SIZE;
uniform sampler2D u_map;
int get_cell(vec4 m){
ivec4 i = ivec4(mod(m,float(SIZE)));
float r = texelFetch(u_map, ivec2(i.x*SIZE3+i.y*SIZE2+i.z*SIZE+i.w, 0), 0).r;
return int(r * 255.0);
}
If I update SIZE2 and SIZE3 to be non-constant and initialized in main, it still works:
const int SIZE = 5;
int SIZE2;
int SIZE3;
uniform sampler2D u_map;
int get_cell(vec4 m){
ivec4 i = ivec4(mod(m,float(SIZE)));
float r = texelFetch(u_map, ivec2(i.x*SIZE3+i.y*SIZE2+i.z*SIZE+i.w, 0), 0).r;
return int(r * 255.0);
}
...
void main(){
SIZE2 = SIZE*SIZE;
SIZE3 = SIZE*SIZE2;
...
}
However, if I then replace const int SIZE = 5; with uniform int SIZE;, and then add
const size_loc = gl.getUniformLocation(program, "SIZE");
gl.uniform1i(size_loc, 5);
to the JavaScript side to set it to the same integer value that used to be hardcoded, I start seeing incorrect values being read from the texture. What am I doing wrong?
UPDATE 1: I did a little experiment where I keep the constant SIZE specification, but then also pass in a uniform int alongside it. If they are not equal, I have the shader bail out and return all zeroes. This way, I could verify that the correct integer values are in fact being set on the uniform variable--but if I then make SIZE non-constant, and set it to the value of the uniform variable with which it was just compared and found to be equal then things break. What the heck?
UPDATE 2:
This works:
int SIZE = 5;
uniform int u_size;
....
void main() {
if (u_size != SIZE) return;
SIZE = u_size;
...
}
This doesn't:
int SIZE = 5;
uniform int u_size;
....
void main() {
SIZE = u_size;
...
}
I'm not able to reproduce your issue. Post a minimal, complete, verifiable, example in a snippet
Here's a working example
const vs = `#version 300 es
void main() {
gl_PointSize = 1.0;
gl_Position = vec4(0, 0, 0, 1);
}
`;
const fs = `#version 300 es
precision highp float;
uniform ivec4 cell;
uniform int SIZE;
int SIZE2;
int SIZE3;
uniform highp isampler2D u_map;
int get_cell(ivec4 m){
ivec4 i = m % SIZE;
int r = texelFetch(u_map, ivec2(i.x*SIZE3 + i.y*SIZE2 + i.z*SIZE + i.w, 0), 0).r;
return r;
}
out int result;
void main(){
SIZE2 = SIZE*SIZE;
SIZE3 = SIZE*SIZE2;
result = get_cell(cell);
}
`;
const gl = document.createElement('canvas').getContext('webgl2');
// compile shaders, link, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
// make a 1x1 R32I texture and attach to framebuffer
const framebufferInfo = twgl.createFramebufferInfo(gl, [
{ internalFormat: gl.R32I, minMag: gl.NEAREST, },
], 1, 1);
const size = 5;
const totalSize = size * size * size * size;
const data = new Int32Array(totalSize);
for (let i = 0; i < data.length; ++i) {
data[i] = 5 + i * 3;
}
// create a size*size*size*size by 1
// R32I texture
const tex = twgl.createTexture(gl, {
width: totalSize,
src: data,
minMag: gl.NEAREST,
internalFormat: gl.R32I,
});
gl.bindFramebuffer(gl.FRAMEBUFFER, framebufferInfo.framebuffer);
gl.viewport(0, 0, 1, 1);
gl.useProgram(programInfo.program);
const result = new Int32Array(1);
for (let w = 0; w < size; ++w) {
for (let z = 0; z < size; ++z) {
for (let y = 0; y < size; ++y) {
for (let x = 0; x < size; ++x) {
// calls gl.activeTexture, gl.bindTexture, gl.uniformXXX
twgl.setUniforms(programInfo, {
cell: [x, y, z, w],
u_map: tex,
SIZE: size,
});
gl.drawArrays(gl.POINTS, 0, 1); // draw 1 point
gl.readPixels(0, 0, 1, 1, gl.RED_INTEGER, gl.INT, result);
log(x, y, z, w, ':', result[0], data[x * size * size * size + y * size * size + z * size + w]);
}
}
}
}
function log(...args) {
const elem = document.createElement('pre');
elem.textContent = [...args].join(' ');
document.body.appendChild(elem);
}
pre { margin: 0; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
trying with the code you posted I see no issues either
const vs = `#version 300 es
void main() {
gl_PointSize = 1.0;
gl_Position = vec4(0, 0, 0, 1);
}
`;
const fs = `#version 300 es
precision highp float;
uniform vec4 cell;
uniform int SIZE;
int SIZE2;
int SIZE3;
uniform sampler2D u_map;
int get_cell(vec4 m){
ivec4 i = ivec4(mod(m,float(SIZE)));
float r = texelFetch(u_map, ivec2(i.x*SIZE3+i.y*SIZE2+i.z*SIZE+i.w, 0), 0).r;
return int(r * 255.0);
}
out float result;
void main(){
SIZE2 = SIZE*SIZE;
SIZE3 = SIZE*SIZE2;
// output to texture is normalized float
result = float(get_cell(cell)) / 255.0;
}
`;
const gl = document.createElement('canvas').getContext('webgl2');
// compile shaders, link, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const size = 5;
const totalSize = size * size * size * size;
const data = new Uint8Array(totalSize);
for (let i = 0; i < data.length; ++i) {
data[i] = (5 + i * 3) % 256;
}
// create a size*size*size*size by 1
// R8 texture
const tex = twgl.createTexture(gl, {
width: totalSize,
src: data,
minMag: gl.NEAREST,
internalFormat: gl.R8,
});
gl.viewport(0, 0, 1, 1);
gl.useProgram(programInfo.program);
const result = new Uint8Array(4);
for (let w = 0; w < size; ++w) {
for (let z = 0; z < size; ++z) {
for (let y = 0; y < size; ++y) {
for (let x = 0; x < size; ++x) {
// calls gl.activeTexture, gl.bindTexture, gl.uniformXXX
twgl.setUniforms(programInfo, {
cell: [x, y, z, w],
u_map: tex,
SIZE: size,
});
gl.drawArrays(gl.POINTS, 0, 1); // draw 1 point
gl.readPixels(0, 0, 1, 1, gl.RGBA, gl.UNSIGNED_BYTE, result);
log(x, y, z, w, ':', result[0], data[x * size * size * size + y * size * size + z * size + w]);
}
}
}
}
function log(...args) {
const elem = document.createElement('pre');
elem.textContent = [...args].join(' ');
document.body.appendChild(elem);
}
pre { margin: 0; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
Note that I wouldn't use a 1 dimensional texture since there is a limit on dimensions. I'd use a 3 dimensional texture to increase the limit
const vs = `#version 300 es
void main() {
gl_PointSize = 1.0;
gl_Position = vec4(0, 0, 0, 1);
}
`;
const fs = `#version 300 es
precision highp float;
uniform ivec4 cell;
uniform int SIZE;
uniform highp isampler3D u_map;
int get_cell(ivec4 m){
// no idea why you made x major
ivec4 i = m % SIZE;
int r = texelFetch(
u_map,
ivec3(
i.z * SIZE + i.w,
i.yx),
0).r;
return r;
}
out int result;
void main(){
result = get_cell(cell);
}
`;
const gl = document.createElement('canvas').getContext('webgl2');
// compile shaders, link, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
// make a 1x1 R32I texture and attach to framebuffer
const framebufferInfo = twgl.createFramebufferInfo(gl, [
{ internalFormat: gl.R32I, minMag: gl.NEAREST, },
], 1, 1);
const size = 5;
const totalSize = size * size * size * size;
const data = new Int32Array(totalSize);
for (let i = 0; i < data.length; ++i) {
data[i] = 5 + i * 3;
}
// create a size*size*size*size by 1
// R32I texture 3D
const tex = twgl.createTexture(gl, {
target: gl.TEXTURE_3D,
width: size * size,
height: size,
src: data,
minMag: gl.NEAREST,
internalFormat: gl.R32I,
});
gl.bindFramebuffer(gl.FRAMEBUFFER, framebufferInfo.framebuffer);
gl.viewport(0, 0, 1, 1);
gl.useProgram(programInfo.program);
const result = new Int32Array(1);
for (let w = 0; w < size; ++w) {
for (let z = 0; z < size; ++z) {
for (let y = 0; y < size; ++y) {
for (let x = 0; x < size; ++x) {
// calls gl.activeTexture, gl.bindTexture, gl.uniformXXX
twgl.setUniforms(programInfo, {
cell: [x, y, z, w],
u_map: tex,
SIZE: size,
});
gl.drawArrays(gl.POINTS, 0, 1); // draw 1 point
gl.readPixels(0, 0, 1, 1, gl.RED_INTEGER, gl.INT, result);
log(x, y, z, w, ':', result[0], data[x * size * size * size + y * size * size + z * size + w]);
}
}
}
}
function log(...args) {
const elem = document.createElement('pre');
elem.textContent = [...args].join(' ');
document.body.appendChild(elem);
}
pre { margin: 0; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>

How to change clarity of an h264 video which decoded by FFMPEG and rendered by opengl

I'm writing a movie player that use FFMPEG and OpenGL ES. Movie can be decoded succeessfully, but when I use AVFrame as texture to draw in my screen, I found it was so fuzzy. I don't know where wrong in my code. If I change the AVFrame from YUV to RGB image, it will be clear.
Does any one know why use YUV as texture to draw will be not clear?
My render code:
#import "SJGLView.h"
#import <GLKit/GLKit.h>
#import "SJDecoder.h"
#include "libavutil/pixfmt.h"
// MARK: - C Function
static void sj_logShaderError(GLuint shader) {
GLint info_len = 0;
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &info_len);
if (info_len == 0) NSLog(#"Empty info");
else {
GLchar *log = (GLchar *)malloc(info_len);
glGetShaderInfoLog(shader, info_len, &info_len, log);
NSLog(#"Shader compile log: %s", log);
}
}
static void sj_logProgramError(GLuint program) {
int info_length;
glGetProgramiv(program, GL_INFO_LOG_LENGTH, &info_length);
if (info_length) {
GLchar *log = (GLchar *)malloc(info_length);
glGetProgramInfoLog(program, info_length, &info_length, log);
NSLog(#"Program link log: %s", log);
}
}
GLuint sj_loadShader(GLenum shader_type, const char* shader_source) {
GLuint shader = glCreateShader(shader_type);
glShaderSource(shader, 1, &shader_source, NULL);
glCompileShader(shader);
GLint compile_status = 0;
glGetShaderiv(shader, GL_COMPILE_STATUS, &compile_status);
if (!compile_status) goto fail;
return shader;
fail:
if (shader) {
sj_logShaderError(shader);
glDeleteShader(shader);
}
return 0;
}
void loadOrtho(float *matrix, float left, float right, float bottom, float top, float near, float far) {
float r_l = right - left;
float t_b = top - bottom;
float f_n = far - near;
float tx = (right + left)/(right - left);
float ty = (top + bottom)/(top - bottom);
float tz = (far + near)/(far - near);
matrix[0] = 2.0f / r_l;
matrix[1] = 0.0f;
matrix[2] = 0.0f;
matrix[3] = 0.0f;
matrix[4] = 0.0f;
matrix[5] = 2.0f / t_b;
matrix[6] = 0.0f;
matrix[7] = 0.0f;
matrix[8] = 0.0f;
matrix[9] = 0.0f;
matrix[10] = -2.0f / f_n;
matrix[11] = 0.0f;
matrix[12] = tx;
matrix[13] = ty;
matrix[14] = tz;
matrix[15] = 1.0f;
}
// BT.709, standard for HDTV
static const GLfloat g_bt709[] = {
1.164, 1.164, 1.164,
0.0, -0.213, 2.112,
1.793, -0.533, 0.0,
};
const GLfloat *getColorMatrix_bt709() {
return g_bt709;
}
enum {
ATTRIBUTE_VERTEX,
ATTRIBUTE_TEXCOORD,
};
#implementation SJGLView {
EAGLContext *_context;
GLuint _framebuffer;
GLuint _renderbuffer;
GLint _backingWidth;
GLint _backingHeight;
GLfloat _vertices[8];
GLuint _program;
GLuint _av4Position;
GLuint _av2Texcoord;
GLuint _um4Mvp;
GLfloat _texcoords[8];
GLuint _us2Sampler[3];
GLuint _um3ColorConversion;
GLuint _textures[3];
SJDecoder *_decoder;
}
+ (Class)layerClass {
return [CAEAGLLayer class];
}
- (instancetype)initWithFrame:(CGRect)frame decoder:(SJDecoder *)decoder {
self = [super initWithFrame:frame];
if (self) {
_decoder = decoder;
[self setupGL];
}
return self;
}
- (void)layoutSubviews {
glBindRenderbuffer(GL_RENDERBUFFER, _renderbuffer);
[_context renderbufferStorage:GL_RENDERBUFFER fromDrawable:(CAEAGLLayer*)self.layer];
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_WIDTH, &_backingWidth);
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_HEIGHT, &_backingHeight);
[self updateVertices];
[self render: nil];
}
- (void)setContentMode:(UIViewContentMode)contentMode
{
[super setContentMode:contentMode];
[self updateVertices];
[self render:nil];
}
- (void)setupGL {
_context = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES2];
NSAssert(_context != nil, #"Failed to init EAGLContext");
CAEAGLLayer *eaglLayer= (CAEAGLLayer *)self.layer;
eaglLayer.opaque = YES;
eaglLayer.drawableProperties = #{
kEAGLDrawablePropertyRetainedBacking: [NSNumber numberWithBool:YES],
kEAGLDrawablePropertyColorFormat: kEAGLColorFormatRGBA8
};
[EAGLContext setCurrentContext:_context];
if ([self setupEAGLContext]) {
NSLog(#"Success to setup EAGLContext");
if ([self loadShaders]) {
NSLog(#"Success to load shader");
_us2Sampler[0] = glGetUniformLocation(_program, "us2_SamplerX");
_us2Sampler[1] = glGetUniformLocation(_program, "us2_SamplerY");
_us2Sampler[2] = glGetUniformLocation(_program, "us2_SamplerZ");
_um3ColorConversion = glGetUniformLocation(_program, "um3_ColorConversion");
}
}
}
- (BOOL)setupEAGLContext {
glGenFramebuffers(1, &_framebuffer);
glGenRenderbuffers(1, &_renderbuffer);
glBindFramebuffer(GL_FRAMEBUFFER, _framebuffer);
glBindRenderbuffer(GL_RENDERBUFFER, _renderbuffer);
[_context renderbufferStorage:GL_RENDERBUFFER fromDrawable:(CAEAGLLayer *)self.layer];
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_WIDTH, &_backingWidth);
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_HEIGHT, &_backingHeight);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, _renderbuffer);
GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (status != GL_FRAMEBUFFER_COMPLETE) {
NSLog(#"Failed to make complete framebuffer object: %x", status);
return NO;
}
GLenum glError = glGetError();
if (glError != GL_NO_ERROR) {
NSLog(#"Failed to setup EAGLContext: %x", glError);
return NO;
}
return YES;
}
- (BOOL)loadShaders {
NSString *vertexPath = [[NSBundle mainBundle] pathForResource:#"vertex" ofType:#"vsh"];
const char *vertexString = [[NSString stringWithContentsOfFile:vertexPath encoding:NSUTF8StringEncoding error:nil] UTF8String];
NSString *fragmentPath = _decoder.format == SJVideoFrameFormatYUV ? [[NSBundle mainBundle] pathForResource:#"yuv420p" ofType:#"fsh"] :
[[NSBundle mainBundle] pathForResource:#"rgb" ofType:#"fsh"];
const char *fragmentString = [[NSString stringWithContentsOfFile:fragmentPath encoding:NSUTF8StringEncoding error:nil] UTF8String];
GLuint vertexShader = sj_loadShader(GL_VERTEX_SHADER, vertexString);
GLuint fragmentShader = sj_loadShader(GL_FRAGMENT_SHADER, fragmentString);
_program = glCreateProgram();
glAttachShader(_program, vertexShader);
glAttachShader(_program, fragmentShader);
glLinkProgram(_program);
GLint link_status = GL_FALSE;
glGetProgramiv(_program, GL_LINK_STATUS, &link_status);
if(!link_status) goto fail;
_av4Position = glGetAttribLocation(_program, "av4_Position");
_av2Texcoord = glGetAttribLocation(_program, "av2_Texcoord");
_um4Mvp = glGetUniformLocation(_program, "um4_ModelViewProjection");
return YES;
fail:
sj_logProgramError(_program);
glDeleteShader(vertexShader);
glDeleteShader(fragmentShader);
glDeleteProgram(_program);
return NO;
}
- (void)useRenderer {
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glUseProgram(_program);
if (0 == _textures[0]) glGenTextures(3, _textures);
for (int i = 0; i < 3; i++) {
glActiveTexture(GL_TEXTURE0 + i);
glBindTexture(GL_TEXTURE_2D, _textures[i]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glUniform1i(_us2Sampler[i], i);
}
glUniformMatrix3fv(_um3ColorConversion, 1, GL_FALSE, getColorMatrix_bt709());
}
- (void)uploadTexture:(SJVideoFrame *)frame {
if (frame.format == SJVideoFrameFormatYUV) {
SJVideoYUVFrame *yuvFrame = (SJVideoYUVFrame *)frame;
const GLubyte *pixel[3] = { yuvFrame.luma.bytes, yuvFrame.chromaB.bytes, yuvFrame.chromaR.bytes };
const GLsizei widths[3] = { yuvFrame.width, yuvFrame.width/2, yuvFrame.width/2 };
const GLsizei heights[3] = { yuvFrame.height, yuvFrame.height/2, yuvFrame.height/2 };
for (int i = 0; i < 3; i++) {
glBindTexture(GL_TEXTURE_2D, _textures[i]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, widths[i], heights[i], 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, pixel[i]);
}
}
}
- (void)render:(SJVideoFrame *)frame {
[EAGLContext setCurrentContext:_context];
glUseProgram(_program);
[self useRenderer];
GLfloat modelviewProj[16];
loadOrtho(modelviewProj, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 1.0f);
glUniformMatrix4fv(_um4Mvp, 1, GL_FALSE, modelviewProj);
[self updateVertices];
[self updateTexcoords];
glBindFramebuffer(GL_FRAMEBUFFER, _framebuffer);
glViewport(0, 0, _backingWidth, _backingHeight);
[self uploadTexture:frame];
glClear(GL_COLOR_BUFFER_BIT);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glBindRenderbuffer(GL_RENDERBUFFER, _renderbuffer);
[_context presentRenderbuffer:GL_RENDERBUFFER];
}
- (void)updateVertices {
[self resetVertices];
BOOL fit = (self.contentMode == UIViewContentModeScaleAspectFit);
float width = _decoder.frameWidth;
float height = _decoder.frameHeight;
const float dW = (float)_backingWidth / width;
const float dH = (float)_backingHeight / height;
float dd = fit ? MIN(dH, dW) : MAX(dH, dW);
float nW = (width * dd / (float)_backingWidth);
float nH = (height * dd / (float)_backingHeight);
_vertices[0] = -nW;
_vertices[1] = -nH;
_vertices[2] = nW;
_vertices[3] = -nH;
_vertices[4] = -nW;
_vertices[5] = nH;
_vertices[6] = nW;
_vertices[7] = nH;
glVertexAttribPointer(_av4Position, 2, GL_FLOAT, GL_FALSE, 0, _vertices);
glEnableVertexAttribArray(_av4Position);
}
- (void)resetVertices {
_vertices[0] = -1.0f;
_vertices[1] = -1.0f;
_vertices[2] = 1.0f;
_vertices[3] = -1.0f;
_vertices[4] = -1.0f;
_vertices[5] = 1.0f;
_vertices[6] = 1.0f;
_vertices[7] = 1.0f;
}
- (void)updateTexcoords {
[self resetTexcoords];
glVertexAttribPointer(_av2Texcoord, 2, GL_FLOAT, GL_FALSE, 0, _texcoords);
glEnableVertexAttribArray(_av2Texcoord);
}
- (void)resetTexcoords {
_texcoords[0] = 0.0f;
_texcoords[1] = 1.0f;
_texcoords[2] = 1.0f;
_texcoords[3] = 1.0f;
_texcoords[4] = 0.0f;
_texcoords[5] = 0.0f;
_texcoords[6] = 1.0f;
_texcoords[7] = 0.0f;
}
.fsh :
precision highp float;
varying highp vec2 vv2_Texcoord;
uniform mat3 um3_ColorConversion;
uniform lowp sampler2D us2_SamplerX;
uniform lowp sampler2D us2_SamplerY;
uniform lowp sampler2D us2_SamplerZ;
void main() {
mediump vec3 yuv;
lowp vec3 rgb;
yuv.x = (texture2D(us2_SamplerX, vv2_Texcoord).r - (16.0/255.0));
yuv.y = (texture2D(us2_SamplerY, vv2_Texcoord).r - 0.5);
yuv.z = (texture2D(us2_SamplerZ, vv2_Texcoord).r - 0.5);
rgb = um3_ColorConversion * yuva;
gl_FragColor = vec4(rgb, 1.0);
}
.vsh file:
precision highp float;
varying highp vec2 vv2_Texcoord;
uniform lowp sampler2D us2_SamplerX;
void main() {
gl_FragColor = vec4(texture2D(us2_SamplerX, vv2_Texcoord).rgb, 1)
}
The rgb image:
RGB image
Update to add a GL_NEAREST image:
GL_NEAREST

Separate Effects Superpowered

I'm working with the SuperpoweredSDK. The SDK comes with an example called SuperpoweredCrossExample which shows off a few of the DJ-esque features. I'm trying to modify it to allow fx (such as filters, rolls, and flanger) to be applied to one player at a time instead of both simultaneously.
I've attempted in the code below to have a different buffer for each player and then merge them using the SuperpoweredStereoMixer. Although this seems to work for many others in sample code I've found, for me it continues to to crash on the "mixer" line. Any help would be very much appreciated. I've been spinning on this for the last few weeks.
#import "ViewController.h"
#import "SuperpoweredAdvancedAudioPlayer.h"
#import "SuperpoweredFilter.h"
#import "SuperpoweredRoll.h"
#import "SuperpoweredFlanger.h"
#import "SuperpoweredIOSAudioIO.h"
#import "SuperpoweredSimple.h"
#import "SuperpoweredMixer.h"
#import <stdlib.h>
#import <pthread.h>
#define HEADROOM_DECIBEL 3.0f
static const float headroom = powf(10.0f, -HEADROOM_DECIBEL * 0.025);
/*
This is a .mm file, meaning it's Objective-C++.
You can perfectly mix it with Objective-C or Swift, until you keep the member variables and C++ related includes here.
Yes, the header file (.h) isn't the only place for member variables.
*/
#implementation ViewController {
SuperpoweredAdvancedAudioPlayer *playerA, *playerB;
SuperpoweredIOSAudioIO *output;
SuperpoweredRoll *roll;
SuperpoweredFilter *filter;
SuperpoweredFlanger *flanger;
SuperpoweredStereoMixer *mixer;
unsigned char activeFx;
float *stereoBufferA, *stereoBufferB, crossValue, volA, volB;
unsigned int lastSamplerate;
pthread_mutex_t mutex;
}
void playerEventCallbackA(void *clientData, SuperpoweredAdvancedAudioPlayerEvent event, void *value) {
if (event == SuperpoweredAdvancedAudioPlayerEvent_LoadSuccess) {
ViewController *self = (__bridge ViewController *)clientData;
self->playerA->setBpm(126.0f);
self->playerA->setFirstBeatMs(353);
self->playerA->setPosition(self->playerA->firstBeatMs, false, false);
};
}
void playerEventCallbackB(void *clientData, SuperpoweredAdvancedAudioPlayerEvent event, void *value) {
if (event == SuperpoweredAdvancedAudioPlayerEvent_LoadSuccess) {
ViewController *self = (__bridge ViewController *)clientData;
self->playerB->setBpm(123.0f);
self->playerB->setFirstBeatMs(40);
self->playerB->setPosition(self->playerB->firstBeatMs, false, false);
};
}
- (void)viewDidLoad {
[super viewDidLoad];
lastSamplerate = activeFx = 0;
crossValue = volB = 0.0f;
volA = 1.0f * headroom;
pthread_mutex_init(&mutex, NULL); // This will keep our player volumes and playback states in sync.
if (posix_memalign((void **)&stereoBufferA, 16, 4096 + 128) != 0) abort(); // Allocating memory, aligned to 16.
if (posix_memalign((void **)&stereoBufferB, 16, 4096 + 128) != 0) abort(); // Allocating memory, aligned to 16.
playerA = new SuperpoweredAdvancedAudioPlayer((__bridge void *)self, playerEventCallbackA, 44100, 0);
playerA->open([[[NSBundle mainBundle] pathForResource:#"lycka" ofType:#"mp3"] fileSystemRepresentation]);
playerB = new SuperpoweredAdvancedAudioPlayer((__bridge void *)self, playerEventCallbackB, 44100, 0);
playerB->open([[[NSBundle mainBundle] pathForResource:#"nuyorica" ofType:#"m4a"] fileSystemRepresentation]);
playerA->syncMode = playerB->syncMode = SuperpoweredAdvancedAudioPlayerSyncMode_TempoAndBeat;
roll = new SuperpoweredRoll(44100);
filter = new SuperpoweredFilter(SuperpoweredFilter_Resonant_Lowpass, 44100);
flanger = new SuperpoweredFlanger(44100);
output = [[SuperpoweredIOSAudioIO alloc] initWithDelegate:(id<SuperpoweredIOSAudioIODelegate>)self preferredBufferSize:12 preferredMinimumSamplerate:44100 audioSessionCategory:AVAudioSessionCategoryPlayback channels:2];
[output start];
}
- (void)dealloc {
delete playerA;
delete playerB;
free(stereoBufferA);
free(stereoBufferB);
pthread_mutex_destroy(&mutex);
#if !__has_feature(objc_arc)
[output release];
[super dealloc];
#endif
}
- (void)interruptionStarted {}
- (void)recordPermissionRefused {}
- (void)interruptionEnded { // If a player plays Apple Lossless audio files, then we need this. Otherwise unnecessary.
playerA->onMediaserverInterrupt();
playerB->onMediaserverInterrupt();
}
// This is where the Superpowered magic happens.
- (bool)audioProcessingCallback:(float **)buffers inputChannels:(unsigned int)inputChannels outputChannels:(unsigned int)outputChannels numberOfSamples:(unsigned int)numberOfSamples samplerate:(unsigned int)samplerate hostTime:(UInt64)hostTime {
if (samplerate != lastSamplerate) { // Has samplerate changed?
lastSamplerate = samplerate;
playerA->setSamplerate(samplerate);
playerB->setSamplerate(samplerate);
roll->setSamplerate(samplerate);
filter->setSamplerate(samplerate);
flanger->setSamplerate(samplerate);
};
pthread_mutex_lock(&mutex);
bool masterIsA = (crossValue <= 0.5f);
float masterBpm = masterIsA ? playerA->currentBpm : playerB->currentBpm; // Players will sync to this tempo.
double msElapsedSinceLastBeatA = playerA->msElapsedSinceLastBeat; // When playerB needs it, playerA has already stepped this value, so save it now.
bool silence = !playerA->process(stereoBufferA, false, numberOfSamples, volA, masterBpm, playerB->msElapsedSinceLastBeat);
if (playerB->process(stereoBufferB, !silence, numberOfSamples, volB, masterBpm, msElapsedSinceLastBeatA)) silence = false;
roll->bpm = flanger->bpm = masterBpm; // Syncing fx is one line.
if (roll->process(silence ? NULL : stereoBufferA, stereoBufferA, numberOfSamples) && silence) silence = false;
if (!silence) {
filter->process(stereoBufferB, stereoBufferB, numberOfSamples);
flanger->process(stereoBufferB, stereoBufferB, numberOfSamples);
};
pthread_mutex_unlock(&mutex);
float *mixerInputs[4] = { stereoBufferA, stereoBufferB, NULL, NULL };
float mixerInputLevels[8] = { 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f };
float mixerOutputLevels[2] = { 1.0f, 1.0f };
if (!silence) mixer->process(mixerInputs, buffers, mixerInputLevels, mixerOutputLevels, NULL, NULL, numberOfSamples);
/*if (!silence) SuperpoweredDeInterleave(stereoBuffer, buffers[0], buffers[1], numberOfSamples); // The stereoBuffer is ready now, let's put the finished audio into the requested buffers.*/
return !silence;
}
- (IBAction)onPlayPause:(id)sender {
UIButton *button = (UIButton *)sender;
pthread_mutex_lock(&mutex);
if (playerA->playing) {
playerA->pause();
playerB->pause();
} else {
bool masterIsA = (crossValue <= 0.5f);
playerA->play(!masterIsA);
playerB->play(masterIsA);
};
pthread_mutex_unlock(&mutex);
button.selected = playerA->playing;
}
- (IBAction)onCrossFader:(id)sender {
pthread_mutex_lock(&mutex);
crossValue = ((UISlider *)sender).value;
if (crossValue < 0.01f) {
volA = 1.0f * headroom;
volB = 0.0f;
} else if (crossValue > 0.99f) {
volA = 0.0f;
volB = 1.0f * headroom;
} else { // constant power curve
volA = cosf(M_PI_2 * crossValue) * headroom;
volB = cosf(M_PI_2 * (1.0f - crossValue)) * headroom;
};
pthread_mutex_unlock(&mutex);
}
static inline float floatToFrequency(float value) {
static const float min = logf(20.0f) / logf(10.0f);
static const float max = logf(20000.0f) / logf(10.0f);
static const float range = max - min;
return powf(10.0f, value * range + min);
}
- (IBAction)onFxSelect:(id)sender {
activeFx = ((UISegmentedControl *)sender).selectedSegmentIndex;
}
- (IBAction)onFxValue:(id)sender {
float value = ((UISlider *)sender).value;
switch (activeFx) {
case 1:
filter->setResonantParameters(floatToFrequency(1.0f - value), 0.1f);
filter->enable(true);
flanger->enable(false);
roll->enable(false);
break;
case 2:
if (value > 0.8f) roll->beats = 0.0625f;
else if (value > 0.6f) roll->beats = 0.125f;
else if (value > 0.4f) roll->beats = 0.25f;
else if (value > 0.2f) roll->beats = 0.5f;
else roll->beats = 1.0f;
roll->enable(true);
filter->enable(false);
flanger->enable(false);
break;
default:
flanger->setWet(value);
flanger->enable(true);
filter->enable(false);
roll->enable(false);
};
}
- (IBAction)onFxOff:(id)sender {
filter->enable(false);
roll->enable(false);
flanger->enable(false);
}
#end

UInt8 EXC_BAD_ACCESS

I have a method that will add a filter to an image. This worked fine until a couple of months ago, now when I try to use this method the application will crash on the images buffer. I create the buffer and set it to the image's data, accessing the specific index later causes a bad access crash. I have looked for the past hour or two, and now I am convinced there is something im overlooking. I think something is being released that should not be. I am using the ios DP 4 preview of xcode, and I think this problem started with the update to the beta, but I am really not sure.
This is the line it crashes on located near the middle of the first for loop
m_PixelBuf[index+2] = m_PixelBuf[index+2]/*aRed*/;
Normally it is set to aRed Which I have checked, and it should not go out of the buffers boundaries.
-(void)contrastWithContrast:(float )contrast colorWithColor:(float )color{
drawImage.image = original;
UIImage * unfilteredImage2 = [[[UIImage alloc]initWithCGImage:drawImage.image.CGImage] autorelease];
CGImageRef inImage = unfilteredImage2.CGImage;
CGContextRef ctx;
CFDataRef m_DataRef;
m_DataRef = CGDataProviderCopyData(CGImageGetDataProvider(inImage));
UInt8 * m_PixelBuf = (UInt8 *) CFDataGetBytePtr(m_DataRef);
int length = CFDataGetLength(m_DataRef);
NSLog(#"Photo Length: %i",length);
//////Contrast/////////////
//NSLog(#"Contrast:%f",contrast);
int aRed;
int aGreen;
int aBlue;
for (int index = 0; index < length; index += 4){
aRed = m_PixelBuf[index+2];
aGreen = m_PixelBuf[index+1];
aBlue = m_PixelBuf[index];
aRed = (((aRed-128)*(contrast+100) )/100) + 128;
if (aRed < 0) aRed = 0; if (aRed>255) aRed=255;
m_PixelBuf[index+2] = m_PixelBuf[index+2]/*aRed*/;//Always crashes here
aGreen = (((aGreen-128)*(contrast+100) )/100) + 128;
if (aGreen < 0) aGreen = 0; if (aGreen>255) aGreen=255;
m_PixelBuf[index+1] = aGreen;
aBlue = (((aBlue-128)*(contrast+100) )/100) + 128;
if (aBlue < 0) aBlue = 0; if (aBlue>255) aBlue=255;
m_PixelBuf[index] = aBlue;
}
ctx = CGBitmapContextCreate(m_PixelBuf,
CGImageGetWidth( inImage ),
CGImageGetHeight( inImage ),
CGImageGetBitsPerComponent(inImage),
CGImageGetBytesPerRow(inImage ),
CGImageGetColorSpace(inImage ),
CGImageGetBitmapInfo(inImage) );
CGImageRef imageRef = CGBitmapContextCreateImage (ctx);
UIImage* rawImage = [[UIImage alloc]initWithCGImage:imageRef];
drawImage.image = rawImage;
[rawImage release];
CGContextRelease(ctx);
CFRelease(imageRef);
CFRelease(m_DataRef);
unfilteredImage2 = [[[UIImage alloc]initWithCGImage:drawImage.image.CGImage] autorelease];
inImage = unfilteredImage2.CGImage;
m_DataRef = CGDataProviderCopyData(CGImageGetDataProvider(inImage));
m_PixelBuf = (UInt8 *) CFDataGetBytePtr(m_DataRef);
length = CFDataGetLength(m_DataRef);
///////Color////////////////
for (int index = 0; index < length; index += 4)
{
//Blue
if((m_PixelBuf[index] + ((int)color * 2))>255){
m_PixelBuf[index] = 255;
}else if((m_PixelBuf[index] + ((int)color * 2))<0){
m_PixelBuf[index] = 0;
}
else{
m_PixelBuf[index]=m_PixelBuf[index] + ((int)color * 2);
}
//Green
if((m_PixelBuf[index+1] + ((int)color * 2))>255){
m_PixelBuf[index+1] = 255;
}else if((m_PixelBuf[index+1] + ((int)color * 2))<0){
m_PixelBuf[index+1] = 0;
}
else{
m_PixelBuf[index+1]=m_PixelBuf[index+1] + ((int)color * 2);
}
//Red
if((m_PixelBuf[index+2] + ((int)color * 2))>255){
m_PixelBuf[index+2] = 255;
}else if((m_PixelBuf[index+2] + ((int)color * 2))<0){
m_PixelBuf[index+2] = 0;
}
else{
m_PixelBuf[index+2]=m_PixelBuf[index+2] + ((int)color * 2);
}
//m_PixelBuf[index+3]=255;//Alpha
}
ctx = CGBitmapContextCreate(m_PixelBuf,
CGImageGetWidth( inImage ),
CGImageGetHeight( inImage ),
CGImageGetBitsPerComponent(inImage),
CGImageGetBytesPerRow(inImage ),
CGImageGetColorSpace(inImage ),
CGImageGetBitmapInfo(inImage) );
imageRef = CGBitmapContextCreateImage (ctx);
rawImage = [[UIImage alloc]initWithCGImage:imageRef];
drawImage.image = rawImage;
[rawImage release];
CGContextRelease(ctx);
CFRelease(imageRef);
CFRelease(m_DataRef);
//drawImage.image = unfilteredImage2;
willUpdate = YES;
}
sorry for any extra comments/info I just copied the whole method in.
Thanks,
Storealutes
I had same problem.
You should use below code to get pointer to pixel buffer instead of CFDataGetBytePtr().
CGImageRef cgImage = originalImage.CGImage;
size_t width = CGImageGetWidth(cgImage);
size_t height = CGImageGetHeight(cgImage);
char *buffer = (char*)malloc(sizeof(char) * width * height * 4);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef cgContext = CGBitmapContextCreate(buffer, width, height, 8, width * 4, colorSpace, kCGImageAlphaPremultipliedLast);
CGContextSetBlendMode(cgContext, kCGBlendModeCopy);
CGContextDrawImage(cgContext, CGRectMake(0.0f, 0.0f, width, height), cgImage);
free(buffer);
CGContextRelease(cgContext);
CGColorSpaceRelease(colorSpace);

Resources