Separate Effects Superpowered - xcode

I'm working with the SuperpoweredSDK. The SDK comes with an example called SuperpoweredCrossExample which shows off a few of the DJ-esque features. I'm trying to modify it to allow fx (such as filters, rolls, and flanger) to be applied to one player at a time instead of both simultaneously.
I've attempted in the code below to have a different buffer for each player and then merge them using the SuperpoweredStereoMixer. Although this seems to work for many others in sample code I've found, for me it continues to to crash on the "mixer" line. Any help would be very much appreciated. I've been spinning on this for the last few weeks.
#import "ViewController.h"
#import "SuperpoweredAdvancedAudioPlayer.h"
#import "SuperpoweredFilter.h"
#import "SuperpoweredRoll.h"
#import "SuperpoweredFlanger.h"
#import "SuperpoweredIOSAudioIO.h"
#import "SuperpoweredSimple.h"
#import "SuperpoweredMixer.h"
#import <stdlib.h>
#import <pthread.h>
#define HEADROOM_DECIBEL 3.0f
static const float headroom = powf(10.0f, -HEADROOM_DECIBEL * 0.025);
/*
This is a .mm file, meaning it's Objective-C++.
You can perfectly mix it with Objective-C or Swift, until you keep the member variables and C++ related includes here.
Yes, the header file (.h) isn't the only place for member variables.
*/
#implementation ViewController {
SuperpoweredAdvancedAudioPlayer *playerA, *playerB;
SuperpoweredIOSAudioIO *output;
SuperpoweredRoll *roll;
SuperpoweredFilter *filter;
SuperpoweredFlanger *flanger;
SuperpoweredStereoMixer *mixer;
unsigned char activeFx;
float *stereoBufferA, *stereoBufferB, crossValue, volA, volB;
unsigned int lastSamplerate;
pthread_mutex_t mutex;
}
void playerEventCallbackA(void *clientData, SuperpoweredAdvancedAudioPlayerEvent event, void *value) {
if (event == SuperpoweredAdvancedAudioPlayerEvent_LoadSuccess) {
ViewController *self = (__bridge ViewController *)clientData;
self->playerA->setBpm(126.0f);
self->playerA->setFirstBeatMs(353);
self->playerA->setPosition(self->playerA->firstBeatMs, false, false);
};
}
void playerEventCallbackB(void *clientData, SuperpoweredAdvancedAudioPlayerEvent event, void *value) {
if (event == SuperpoweredAdvancedAudioPlayerEvent_LoadSuccess) {
ViewController *self = (__bridge ViewController *)clientData;
self->playerB->setBpm(123.0f);
self->playerB->setFirstBeatMs(40);
self->playerB->setPosition(self->playerB->firstBeatMs, false, false);
};
}
- (void)viewDidLoad {
[super viewDidLoad];
lastSamplerate = activeFx = 0;
crossValue = volB = 0.0f;
volA = 1.0f * headroom;
pthread_mutex_init(&mutex, NULL); // This will keep our player volumes and playback states in sync.
if (posix_memalign((void **)&stereoBufferA, 16, 4096 + 128) != 0) abort(); // Allocating memory, aligned to 16.
if (posix_memalign((void **)&stereoBufferB, 16, 4096 + 128) != 0) abort(); // Allocating memory, aligned to 16.
playerA = new SuperpoweredAdvancedAudioPlayer((__bridge void *)self, playerEventCallbackA, 44100, 0);
playerA->open([[[NSBundle mainBundle] pathForResource:#"lycka" ofType:#"mp3"] fileSystemRepresentation]);
playerB = new SuperpoweredAdvancedAudioPlayer((__bridge void *)self, playerEventCallbackB, 44100, 0);
playerB->open([[[NSBundle mainBundle] pathForResource:#"nuyorica" ofType:#"m4a"] fileSystemRepresentation]);
playerA->syncMode = playerB->syncMode = SuperpoweredAdvancedAudioPlayerSyncMode_TempoAndBeat;
roll = new SuperpoweredRoll(44100);
filter = new SuperpoweredFilter(SuperpoweredFilter_Resonant_Lowpass, 44100);
flanger = new SuperpoweredFlanger(44100);
output = [[SuperpoweredIOSAudioIO alloc] initWithDelegate:(id<SuperpoweredIOSAudioIODelegate>)self preferredBufferSize:12 preferredMinimumSamplerate:44100 audioSessionCategory:AVAudioSessionCategoryPlayback channels:2];
[output start];
}
- (void)dealloc {
delete playerA;
delete playerB;
free(stereoBufferA);
free(stereoBufferB);
pthread_mutex_destroy(&mutex);
#if !__has_feature(objc_arc)
[output release];
[super dealloc];
#endif
}
- (void)interruptionStarted {}
- (void)recordPermissionRefused {}
- (void)interruptionEnded { // If a player plays Apple Lossless audio files, then we need this. Otherwise unnecessary.
playerA->onMediaserverInterrupt();
playerB->onMediaserverInterrupt();
}
// This is where the Superpowered magic happens.
- (bool)audioProcessingCallback:(float **)buffers inputChannels:(unsigned int)inputChannels outputChannels:(unsigned int)outputChannels numberOfSamples:(unsigned int)numberOfSamples samplerate:(unsigned int)samplerate hostTime:(UInt64)hostTime {
if (samplerate != lastSamplerate) { // Has samplerate changed?
lastSamplerate = samplerate;
playerA->setSamplerate(samplerate);
playerB->setSamplerate(samplerate);
roll->setSamplerate(samplerate);
filter->setSamplerate(samplerate);
flanger->setSamplerate(samplerate);
};
pthread_mutex_lock(&mutex);
bool masterIsA = (crossValue <= 0.5f);
float masterBpm = masterIsA ? playerA->currentBpm : playerB->currentBpm; // Players will sync to this tempo.
double msElapsedSinceLastBeatA = playerA->msElapsedSinceLastBeat; // When playerB needs it, playerA has already stepped this value, so save it now.
bool silence = !playerA->process(stereoBufferA, false, numberOfSamples, volA, masterBpm, playerB->msElapsedSinceLastBeat);
if (playerB->process(stereoBufferB, !silence, numberOfSamples, volB, masterBpm, msElapsedSinceLastBeatA)) silence = false;
roll->bpm = flanger->bpm = masterBpm; // Syncing fx is one line.
if (roll->process(silence ? NULL : stereoBufferA, stereoBufferA, numberOfSamples) && silence) silence = false;
if (!silence) {
filter->process(stereoBufferB, stereoBufferB, numberOfSamples);
flanger->process(stereoBufferB, stereoBufferB, numberOfSamples);
};
pthread_mutex_unlock(&mutex);
float *mixerInputs[4] = { stereoBufferA, stereoBufferB, NULL, NULL };
float mixerInputLevels[8] = { 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f };
float mixerOutputLevels[2] = { 1.0f, 1.0f };
if (!silence) mixer->process(mixerInputs, buffers, mixerInputLevels, mixerOutputLevels, NULL, NULL, numberOfSamples);
/*if (!silence) SuperpoweredDeInterleave(stereoBuffer, buffers[0], buffers[1], numberOfSamples); // The stereoBuffer is ready now, let's put the finished audio into the requested buffers.*/
return !silence;
}
- (IBAction)onPlayPause:(id)sender {
UIButton *button = (UIButton *)sender;
pthread_mutex_lock(&mutex);
if (playerA->playing) {
playerA->pause();
playerB->pause();
} else {
bool masterIsA = (crossValue <= 0.5f);
playerA->play(!masterIsA);
playerB->play(masterIsA);
};
pthread_mutex_unlock(&mutex);
button.selected = playerA->playing;
}
- (IBAction)onCrossFader:(id)sender {
pthread_mutex_lock(&mutex);
crossValue = ((UISlider *)sender).value;
if (crossValue < 0.01f) {
volA = 1.0f * headroom;
volB = 0.0f;
} else if (crossValue > 0.99f) {
volA = 0.0f;
volB = 1.0f * headroom;
} else { // constant power curve
volA = cosf(M_PI_2 * crossValue) * headroom;
volB = cosf(M_PI_2 * (1.0f - crossValue)) * headroom;
};
pthread_mutex_unlock(&mutex);
}
static inline float floatToFrequency(float value) {
static const float min = logf(20.0f) / logf(10.0f);
static const float max = logf(20000.0f) / logf(10.0f);
static const float range = max - min;
return powf(10.0f, value * range + min);
}
- (IBAction)onFxSelect:(id)sender {
activeFx = ((UISegmentedControl *)sender).selectedSegmentIndex;
}
- (IBAction)onFxValue:(id)sender {
float value = ((UISlider *)sender).value;
switch (activeFx) {
case 1:
filter->setResonantParameters(floatToFrequency(1.0f - value), 0.1f);
filter->enable(true);
flanger->enable(false);
roll->enable(false);
break;
case 2:
if (value > 0.8f) roll->beats = 0.0625f;
else if (value > 0.6f) roll->beats = 0.125f;
else if (value > 0.4f) roll->beats = 0.25f;
else if (value > 0.2f) roll->beats = 0.5f;
else roll->beats = 1.0f;
roll->enable(true);
filter->enable(false);
flanger->enable(false);
break;
default:
flanger->setWet(value);
flanger->enable(true);
filter->enable(false);
roll->enable(false);
};
}
- (IBAction)onFxOff:(id)sender {
filter->enable(false);
roll->enable(false);
flanger->enable(false);
}
#end

Related

OpenGL 3.3 (mac) Error validating program: Validation Failed: No vertex array object bound

The following code compiles and runs without errors on linux but gives error
"Error validating program: 'Validation Failed: No vertex array object bound."
on mac OS 10.14.2 (Mojave). Note that the program compiles successfully but has a problem during runtime.
MacBook Pro (Retina, 15-inch, Mid 2015)
I am compiling using g++ -std=c++11 test.cpp -w -framework OpenGL -lglfw -lGLEW -o p
test.cpp
#include <bits/stdc++.h>
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <glm/glm.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/type_ptr.hpp>
using namespace std;
#define cout(a) cout<<a<<endl
// IDs
GLuint VAO, VBO, VAO2, VBO2, shaderID, uniformModel;
float scale = 1.0, x = 0.0, y = 0.0;
const int numPoints = 50000;
const char* vShader = "shader.vert";
const char* fShader = "shader.frag";
void createSierpinskiGasket()
{
GLfloat points[3 * numPoints];
GLfloat vertices[] = {
-1.0f, -1.0f, 0.0f,
0.0f, 1.0f, 0.0f,
1.0f, -1.0f, 0.0f
};
points[0] = 0.25f; points[1] = 0.50f; points[2] = 0.0f;
for(int i = 3; i < numPoints * 3; i += 3)
{
int j = rand() % 3;
points[i] = (points[i - 3] + vertices[j * 3]) / 2.0;
points[i + 1] = (points[i - 2] + vertices[j * 3 + 1]) / 2.0;
points[i + 2] = (points[i - 1] + vertices[j * 3 + 2]) / 2.0;
}
glGenVertexArrays(1, &VAO2);
glBindVertexArray(VAO2);
glGenBuffers(1, &VBO2);
glBindBuffer(GL_ARRAY_BUFFER, VBO2);
glBufferData(GL_ARRAY_BUFFER, sizeof(points), points, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
void createTriangle()
{
GLfloat vertices[] = {
-1.0f, -1.0f, 0.0f,
1.0f, -1.0f, 0.0f,
0.0f, 1.0f, 0.0f
};
glGenVertexArrays(1, &VAO);
// Subsequent code will be associated with this VAO
glBindVertexArray(VAO);
glGenBuffers(1, &VBO);
// GL_ARRAY_BUFFER = Vertex data
glBindBuffer(GL_ARRAY_BUFFER, VBO);
// GL_STATIC_DRAW = Not going to change the data (transforms are OK)
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
// Location, number, type, normalize, stride, offset
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
// Enable location 0
glEnableVertexAttribArray(0);
// Unbinding
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
void addShader(const char* shaderIDCode, GLenum shaderIDType)
{
GLuint theShader = glCreateShader(shaderIDType);
const GLchar* theCode[1];
theCode[0] = shaderIDCode;
GLint codeLength[1];
codeLength[0] = strlen(shaderIDCode);
glShaderSource(theShader, 1, theCode, codeLength);
glCompileShader(theShader);
GLint result = 0;
GLchar eLog[1024] = { 0 };
glGetShaderiv(theShader, GL_COMPILE_STATUS, &result);
if (!result)
{
glGetShaderInfoLog(theShader, sizeof(eLog), NULL, eLog);
printf("Error compiling the %d shaderID: '%s'\n", shaderIDType, eLog);
return;
}
glAttachShader(shaderID, theShader);
}
void compileShader(const char* vertexCode, const char* fragmentCode)
{
// Creating shaderID program
shaderID = glCreateProgram();
if(!shaderID)
{
cout("Error creating shaderID.");
return;
}
addShader(vertexCode, GL_VERTEX_SHADER);
addShader(fragmentCode, GL_FRAGMENT_SHADER);
GLint result = 0;
GLchar eLog[1024] = { 0 };
glLinkProgram(shaderID);
glGetProgramiv(shaderID, GL_LINK_STATUS, &result);
if (!result)
{
glGetProgramInfoLog(shaderID, sizeof(eLog), NULL, eLog);
printf("Error linking program: '%s'\n", eLog);
return;
}
glValidateProgram(shaderID);
glGetProgramiv(shaderID, GL_VALIDATE_STATUS, &result);
if (!result)
{
glGetProgramInfoLog(shaderID, sizeof(eLog), NULL, eLog);
printf("Error validating program: '%s'\n", eLog);
return;
}
}
string readFile(const char* fileLocation)
{
string content;
ifstream fileStream(fileLocation, ios::in);
if (!fileStream.is_open()) {
printf("Failed to read %s! File doesn't exist.", fileLocation);
return "";
}
string line = "";
while (!fileStream.eof())
{
getline(fileStream, line);
content.append(line + "\n");
}
fileStream.close();
return content;
}
void createShader(const char* vertexLocation, const char* fragmentLocation)
{
string vertexString = readFile(vertexLocation);
string fragmentString = readFile(fragmentLocation);
const char* vertexCode = vertexString.c_str();
const char* fragmentCode = fragmentString.c_str();
compileShader(vertexCode, fragmentCode);
}
void handleKeys(GLFWwindow* window, int key, int code, int action, int mode)
{
if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
{
glfwSetWindowShouldClose(window, GL_TRUE);
}
if (key == GLFW_KEY_EQUAL && action == GLFW_PRESS)
{
scale += 0.05;
}
if (key == GLFW_KEY_MINUS && action == GLFW_PRESS)
{
scale -= 0.05;
}
if (key == GLFW_KEY_LEFT && action == GLFW_PRESS)
{
x -= 0.05;
}
if (key == GLFW_KEY_RIGHT && action == GLFW_PRESS)
{
x += 0.05;
}
if (key == GLFW_KEY_UP && action == GLFW_PRESS)
{
y += 0.05;
}
if (key == GLFW_KEY_DOWN && action == GLFW_PRESS)
{
y -= 0.05;
}
}
int main(void)
{
const GLint WIDTH = 800, HEIGHT = 600;
// Initializing GLFW
if(!glfwInit())
{
cout("GLFW initialization failed.");
glfwTerminate();
return 1;
}
// Setup GLFW window properties
// OpenGL version
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
// Not backwards compatible
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
// Allow forward compatibility
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
GLFWwindow* mainWindow = glfwCreateWindow(WIDTH, HEIGHT, "Test Window", NULL, NULL);
if(!mainWindow)
{
cout("GLFW window creation failed.");
glfwTerminate();
return 1;
}
// Get buffer size information
int bufferWidth, bufferHeight;
glfwGetFramebufferSize(mainWindow, &bufferWidth, &bufferHeight);
// Set context for GLEW to use
glfwMakeContextCurrent(mainWindow);
// Allow modern extension features
glewExperimental = GL_TRUE;
if(glewInit() != GLEW_OK)
{
cout("GLEW initialization failed.");
glfwDestroyWindow(mainWindow);
glfwTerminate();
return 1;
}
// Setup viewport size
glViewport(0, 0, bufferWidth, bufferHeight);
createTriangle();
createShader(vShader, fShader);
createSierpinskiGasket();
uniformModel = glGetUniformLocation(shaderID, "model");
// Loop until window is closed
while(!glfwWindowShouldClose(mainWindow))
{
// Get and handle user input
glfwPollEvents();
glfwSetKeyCallback(mainWindow, handleKeys);
// Clear window
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
// Clear colour buffer before next frame
glClear(GL_COLOR_BUFFER_BIT);
glUseProgram(shaderID);
glm::mat4 model = glm::mat4();
model = glm::translate(model, glm::vec3(x, y, 0));
//model = glm::rotate(model, rotX * toRadians, glm::vec3(1, 0, 0));
//model = glm::rotate(model, rotY * toRadians, glm::vec3(0, 1, 0));
//model = glm::rotate(model, rotZ * toRadians, glm::vec3(0, 0, 1));
model = glm::scale(model, glm::vec3(scale, scale, scale));
glUniformMatrix4fv(uniformModel, 1, GL_FALSE, glm::value_ptr(model));
glBindVertexArray(VAO);
glDrawArrays(GL_TRIANGLES, 0, 3);
glBindVertexArray(0);
/*glBindVertexArray(VAO2);
glDrawArrays(GL_POINTS, 0, numPoints);
glBindVertexArray(0);*/
glUseProgram(0);
glfwSwapBuffers(mainWindow);
}
return 0;
}
shader.frag
#version 330
in vec4 vCol;
uniform mat4 model;
out vec4 color;
void main()
{
//color = vec4(1.0f, 1.0f, 0.0f, 1.0f);
color = vec4(vCol.x, vCol.y, 0.5, 1.0);
}
shader.vert
#version 330
layout (location = 0) in vec3 pos;
uniform mat4 model;
out vec4 vCol;
void main()
{
gl_Position = model * vec4(pos.x, pos.y, pos.z, 1.0f);
vCol = vec4(clamp(pos, 0.0f, 1.0f), 1.0f);
}
The message
Validation Failed: No vertex array object bound.
means that the validation of the program could not be performed, because no Vertex Array Object is bound, when glValidateProgram is called
See OpenGL 4.6 API Core Profile Specification; 11.1. VERTEX SHADERS; page 402
[...] As a development aid, use the command
void ValidateProgram( uint program );
to validate the program object program against the current GL state.
This means that the VAO which is should be drawn, by the shader program, has to be bound, before glValidateProgram is called.
Bind the "triangle" VAO, before the shader program is validated:
createTriangle();
glBindVertexArray(VAO);
createShader(vShader, fShader);

How to change clarity of an h264 video which decoded by FFMPEG and rendered by opengl

I'm writing a movie player that use FFMPEG and OpenGL ES. Movie can be decoded succeessfully, but when I use AVFrame as texture to draw in my screen, I found it was so fuzzy. I don't know where wrong in my code. If I change the AVFrame from YUV to RGB image, it will be clear.
Does any one know why use YUV as texture to draw will be not clear?
My render code:
#import "SJGLView.h"
#import <GLKit/GLKit.h>
#import "SJDecoder.h"
#include "libavutil/pixfmt.h"
// MARK: - C Function
static void sj_logShaderError(GLuint shader) {
GLint info_len = 0;
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &info_len);
if (info_len == 0) NSLog(#"Empty info");
else {
GLchar *log = (GLchar *)malloc(info_len);
glGetShaderInfoLog(shader, info_len, &info_len, log);
NSLog(#"Shader compile log: %s", log);
}
}
static void sj_logProgramError(GLuint program) {
int info_length;
glGetProgramiv(program, GL_INFO_LOG_LENGTH, &info_length);
if (info_length) {
GLchar *log = (GLchar *)malloc(info_length);
glGetProgramInfoLog(program, info_length, &info_length, log);
NSLog(#"Program link log: %s", log);
}
}
GLuint sj_loadShader(GLenum shader_type, const char* shader_source) {
GLuint shader = glCreateShader(shader_type);
glShaderSource(shader, 1, &shader_source, NULL);
glCompileShader(shader);
GLint compile_status = 0;
glGetShaderiv(shader, GL_COMPILE_STATUS, &compile_status);
if (!compile_status) goto fail;
return shader;
fail:
if (shader) {
sj_logShaderError(shader);
glDeleteShader(shader);
}
return 0;
}
void loadOrtho(float *matrix, float left, float right, float bottom, float top, float near, float far) {
float r_l = right - left;
float t_b = top - bottom;
float f_n = far - near;
float tx = (right + left)/(right - left);
float ty = (top + bottom)/(top - bottom);
float tz = (far + near)/(far - near);
matrix[0] = 2.0f / r_l;
matrix[1] = 0.0f;
matrix[2] = 0.0f;
matrix[3] = 0.0f;
matrix[4] = 0.0f;
matrix[5] = 2.0f / t_b;
matrix[6] = 0.0f;
matrix[7] = 0.0f;
matrix[8] = 0.0f;
matrix[9] = 0.0f;
matrix[10] = -2.0f / f_n;
matrix[11] = 0.0f;
matrix[12] = tx;
matrix[13] = ty;
matrix[14] = tz;
matrix[15] = 1.0f;
}
// BT.709, standard for HDTV
static const GLfloat g_bt709[] = {
1.164, 1.164, 1.164,
0.0, -0.213, 2.112,
1.793, -0.533, 0.0,
};
const GLfloat *getColorMatrix_bt709() {
return g_bt709;
}
enum {
ATTRIBUTE_VERTEX,
ATTRIBUTE_TEXCOORD,
};
#implementation SJGLView {
EAGLContext *_context;
GLuint _framebuffer;
GLuint _renderbuffer;
GLint _backingWidth;
GLint _backingHeight;
GLfloat _vertices[8];
GLuint _program;
GLuint _av4Position;
GLuint _av2Texcoord;
GLuint _um4Mvp;
GLfloat _texcoords[8];
GLuint _us2Sampler[3];
GLuint _um3ColorConversion;
GLuint _textures[3];
SJDecoder *_decoder;
}
+ (Class)layerClass {
return [CAEAGLLayer class];
}
- (instancetype)initWithFrame:(CGRect)frame decoder:(SJDecoder *)decoder {
self = [super initWithFrame:frame];
if (self) {
_decoder = decoder;
[self setupGL];
}
return self;
}
- (void)layoutSubviews {
glBindRenderbuffer(GL_RENDERBUFFER, _renderbuffer);
[_context renderbufferStorage:GL_RENDERBUFFER fromDrawable:(CAEAGLLayer*)self.layer];
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_WIDTH, &_backingWidth);
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_HEIGHT, &_backingHeight);
[self updateVertices];
[self render: nil];
}
- (void)setContentMode:(UIViewContentMode)contentMode
{
[super setContentMode:contentMode];
[self updateVertices];
[self render:nil];
}
- (void)setupGL {
_context = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES2];
NSAssert(_context != nil, #"Failed to init EAGLContext");
CAEAGLLayer *eaglLayer= (CAEAGLLayer *)self.layer;
eaglLayer.opaque = YES;
eaglLayer.drawableProperties = #{
kEAGLDrawablePropertyRetainedBacking: [NSNumber numberWithBool:YES],
kEAGLDrawablePropertyColorFormat: kEAGLColorFormatRGBA8
};
[EAGLContext setCurrentContext:_context];
if ([self setupEAGLContext]) {
NSLog(#"Success to setup EAGLContext");
if ([self loadShaders]) {
NSLog(#"Success to load shader");
_us2Sampler[0] = glGetUniformLocation(_program, "us2_SamplerX");
_us2Sampler[1] = glGetUniformLocation(_program, "us2_SamplerY");
_us2Sampler[2] = glGetUniformLocation(_program, "us2_SamplerZ");
_um3ColorConversion = glGetUniformLocation(_program, "um3_ColorConversion");
}
}
}
- (BOOL)setupEAGLContext {
glGenFramebuffers(1, &_framebuffer);
glGenRenderbuffers(1, &_renderbuffer);
glBindFramebuffer(GL_FRAMEBUFFER, _framebuffer);
glBindRenderbuffer(GL_RENDERBUFFER, _renderbuffer);
[_context renderbufferStorage:GL_RENDERBUFFER fromDrawable:(CAEAGLLayer *)self.layer];
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_WIDTH, &_backingWidth);
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_HEIGHT, &_backingHeight);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, _renderbuffer);
GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (status != GL_FRAMEBUFFER_COMPLETE) {
NSLog(#"Failed to make complete framebuffer object: %x", status);
return NO;
}
GLenum glError = glGetError();
if (glError != GL_NO_ERROR) {
NSLog(#"Failed to setup EAGLContext: %x", glError);
return NO;
}
return YES;
}
- (BOOL)loadShaders {
NSString *vertexPath = [[NSBundle mainBundle] pathForResource:#"vertex" ofType:#"vsh"];
const char *vertexString = [[NSString stringWithContentsOfFile:vertexPath encoding:NSUTF8StringEncoding error:nil] UTF8String];
NSString *fragmentPath = _decoder.format == SJVideoFrameFormatYUV ? [[NSBundle mainBundle] pathForResource:#"yuv420p" ofType:#"fsh"] :
[[NSBundle mainBundle] pathForResource:#"rgb" ofType:#"fsh"];
const char *fragmentString = [[NSString stringWithContentsOfFile:fragmentPath encoding:NSUTF8StringEncoding error:nil] UTF8String];
GLuint vertexShader = sj_loadShader(GL_VERTEX_SHADER, vertexString);
GLuint fragmentShader = sj_loadShader(GL_FRAGMENT_SHADER, fragmentString);
_program = glCreateProgram();
glAttachShader(_program, vertexShader);
glAttachShader(_program, fragmentShader);
glLinkProgram(_program);
GLint link_status = GL_FALSE;
glGetProgramiv(_program, GL_LINK_STATUS, &link_status);
if(!link_status) goto fail;
_av4Position = glGetAttribLocation(_program, "av4_Position");
_av2Texcoord = glGetAttribLocation(_program, "av2_Texcoord");
_um4Mvp = glGetUniformLocation(_program, "um4_ModelViewProjection");
return YES;
fail:
sj_logProgramError(_program);
glDeleteShader(vertexShader);
glDeleteShader(fragmentShader);
glDeleteProgram(_program);
return NO;
}
- (void)useRenderer {
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glUseProgram(_program);
if (0 == _textures[0]) glGenTextures(3, _textures);
for (int i = 0; i < 3; i++) {
glActiveTexture(GL_TEXTURE0 + i);
glBindTexture(GL_TEXTURE_2D, _textures[i]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glUniform1i(_us2Sampler[i], i);
}
glUniformMatrix3fv(_um3ColorConversion, 1, GL_FALSE, getColorMatrix_bt709());
}
- (void)uploadTexture:(SJVideoFrame *)frame {
if (frame.format == SJVideoFrameFormatYUV) {
SJVideoYUVFrame *yuvFrame = (SJVideoYUVFrame *)frame;
const GLubyte *pixel[3] = { yuvFrame.luma.bytes, yuvFrame.chromaB.bytes, yuvFrame.chromaR.bytes };
const GLsizei widths[3] = { yuvFrame.width, yuvFrame.width/2, yuvFrame.width/2 };
const GLsizei heights[3] = { yuvFrame.height, yuvFrame.height/2, yuvFrame.height/2 };
for (int i = 0; i < 3; i++) {
glBindTexture(GL_TEXTURE_2D, _textures[i]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, widths[i], heights[i], 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, pixel[i]);
}
}
}
- (void)render:(SJVideoFrame *)frame {
[EAGLContext setCurrentContext:_context];
glUseProgram(_program);
[self useRenderer];
GLfloat modelviewProj[16];
loadOrtho(modelviewProj, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 1.0f);
glUniformMatrix4fv(_um4Mvp, 1, GL_FALSE, modelviewProj);
[self updateVertices];
[self updateTexcoords];
glBindFramebuffer(GL_FRAMEBUFFER, _framebuffer);
glViewport(0, 0, _backingWidth, _backingHeight);
[self uploadTexture:frame];
glClear(GL_COLOR_BUFFER_BIT);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glBindRenderbuffer(GL_RENDERBUFFER, _renderbuffer);
[_context presentRenderbuffer:GL_RENDERBUFFER];
}
- (void)updateVertices {
[self resetVertices];
BOOL fit = (self.contentMode == UIViewContentModeScaleAspectFit);
float width = _decoder.frameWidth;
float height = _decoder.frameHeight;
const float dW = (float)_backingWidth / width;
const float dH = (float)_backingHeight / height;
float dd = fit ? MIN(dH, dW) : MAX(dH, dW);
float nW = (width * dd / (float)_backingWidth);
float nH = (height * dd / (float)_backingHeight);
_vertices[0] = -nW;
_vertices[1] = -nH;
_vertices[2] = nW;
_vertices[3] = -nH;
_vertices[4] = -nW;
_vertices[5] = nH;
_vertices[6] = nW;
_vertices[7] = nH;
glVertexAttribPointer(_av4Position, 2, GL_FLOAT, GL_FALSE, 0, _vertices);
glEnableVertexAttribArray(_av4Position);
}
- (void)resetVertices {
_vertices[0] = -1.0f;
_vertices[1] = -1.0f;
_vertices[2] = 1.0f;
_vertices[3] = -1.0f;
_vertices[4] = -1.0f;
_vertices[5] = 1.0f;
_vertices[6] = 1.0f;
_vertices[7] = 1.0f;
}
- (void)updateTexcoords {
[self resetTexcoords];
glVertexAttribPointer(_av2Texcoord, 2, GL_FLOAT, GL_FALSE, 0, _texcoords);
glEnableVertexAttribArray(_av2Texcoord);
}
- (void)resetTexcoords {
_texcoords[0] = 0.0f;
_texcoords[1] = 1.0f;
_texcoords[2] = 1.0f;
_texcoords[3] = 1.0f;
_texcoords[4] = 0.0f;
_texcoords[5] = 0.0f;
_texcoords[6] = 1.0f;
_texcoords[7] = 0.0f;
}
.fsh :
precision highp float;
varying highp vec2 vv2_Texcoord;
uniform mat3 um3_ColorConversion;
uniform lowp sampler2D us2_SamplerX;
uniform lowp sampler2D us2_SamplerY;
uniform lowp sampler2D us2_SamplerZ;
void main() {
mediump vec3 yuv;
lowp vec3 rgb;
yuv.x = (texture2D(us2_SamplerX, vv2_Texcoord).r - (16.0/255.0));
yuv.y = (texture2D(us2_SamplerY, vv2_Texcoord).r - 0.5);
yuv.z = (texture2D(us2_SamplerZ, vv2_Texcoord).r - 0.5);
rgb = um3_ColorConversion * yuva;
gl_FragColor = vec4(rgb, 1.0);
}
.vsh file:
precision highp float;
varying highp vec2 vv2_Texcoord;
uniform lowp sampler2D us2_SamplerX;
void main() {
gl_FragColor = vec4(texture2D(us2_SamplerX, vv2_Texcoord).rgb, 1)
}
The rgb image:
RGB image
Update to add a GL_NEAREST image:
GL_NEAREST

offline rendering with a lowpass filter causes aliasing and clipping

I have a buffer of samples that are 8khz, and I am trying to simply apply a lowpass filter to the buffer. Meaning, I start with a buffer of 8khz samples, and I want to end up with a buffer of 8khz LOWPASSED samples. If I hook up a lowpass unit and connect it with the default output unit and supply my buffer, it sounds perfect and properly low passed. However, as soon as I remove the output and call AudioUnitRender on the low pass audio unit directly, the resulting samples are aliased and clipped.
#import "EffectMachine.h"
#import <AudioToolbox/AudioToolbox.h>
#import "AudioHelpers.h"
#import "Buffer.h"
#interface EffectMachine ()
#property (nonatomic, strong) Buffer *buffer;
#end
typedef struct EffectPlayer {
NSUInteger index;
AudioUnit lowPassUnit;
__unsafe_unretained Buffer *buffer;
} EffectPlayer;
OSStatus EffectMachineCallbackRenderProc(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData);
OSStatus EffectMachineCallbackRenderProc(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData) {
struct EffectPlayer *player = (struct EffectPlayer *)inRefCon;
for (int i = 0; i < inNumberFrames; i++) {
float sample;
if (player->index < player->buffer.size) {
sample = (float)player->buffer.samples[player->index];
player->index += 1;
} else {
sample = 0;
}
((float *)ioData->mBuffers[0].mData)[i] = sample;
((float *)ioData->mBuffers[1].mData)[i] = sample;
}
return noErr;
}
#implementation EffectMachine {
EffectPlayer player;
}
-(instancetype)initWithBuffer:(Buffer *)buffer {
if (self = [super init]) {
self.buffer = buffer;
}
return self;
}
-(Buffer *)process {
struct EffectPlayer initialized = {0};
player = initialized;
player.buffer = self.buffer;
[self setupAudioUnits];
Buffer *buffer = [self processedBuffer];
[self cleanup];
return buffer;
}
-(void)setupAudioUnits {
AudioComponentDescription lowpasscd = {0};
lowpasscd.componentType = kAudioUnitType_Effect;
lowpasscd.componentSubType = kAudioUnitSubType_LowPassFilter;
lowpasscd.componentManufacturer = kAudioUnitManufacturer_Apple;
AudioComponent comp = AudioComponentFindNext(NULL, &lowpasscd);
if (comp == NULL) NSLog(#"can't get lowpass unit");
AudioComponentInstanceNew(comp, &player.lowPassUnit);
AURenderCallbackStruct input;
input.inputProc = EffectMachineCallbackRenderProc;
input.inputProcRefCon = &player;
CheckError(AudioUnitSetProperty(player.lowPassUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
&input,
sizeof(input)),
"AudioUnitSetProperty for callback failed");
CheckError(AudioUnitSetParameter(player.lowPassUnit,
kLowPassParam_CutoffFrequency,
kAudioUnitScope_Global,
0,
1500,
0), "AudioUnitSetParameter cutoff for lowpass failed");
CheckError(AudioUnitSetParameter(player.lowPassUnit,
kLowPassParam_Resonance,
kAudioUnitScope_Global,
0,
0,
0), "AudioUnitSetParameter resonance for lowpass failed");
CheckError(AudioUnitInitialize(player.lowPassUnit),
"Couldn't initialize lowpass unit");
}
-(Buffer *)processedBuffer {
AudioBufferList *bufferlist = malloc(sizeof(AudioBufferList));
UInt32 blockSize = 1024;
float *left = malloc(sizeof(float) * blockSize);
float *right = malloc(sizeof(float) * blockSize);
bufferlist->mBuffers[0].mData = left;
bufferlist->mBuffers[1].mData = right;
UInt32 size = sizeof(float) * blockSize;
AudioTimeStamp inTimeStamp;
memset(&inTimeStamp, 0, sizeof(AudioTimeStamp));
inTimeStamp.mSampleTime = 0;
AudioUnitRenderActionFlags flag = 0;
NSUInteger length = ceil(self.buffer.size / (float)blockSize);
double *processed = malloc(sizeof(double) * blockSize * length);
for (int i = 0; i < length; i++) {
bufferlist->mBuffers[0].mDataByteSize = size;
bufferlist->mBuffers[1].mDataByteSize = size;
bufferlist->mNumberBuffers = 2;
inTimeStamp.mFlags = kAudioTimeStampSampleTimeValid;
AudioUnitRender(player.lowPassUnit, &flag, &inTimeStamp, 0, blockSize, bufferlist);
for (NSUInteger j = 0; j < blockSize; j++) {
processed[j + (blockSize * i)] = left[j];
}
inTimeStamp.mSampleTime += blockSize;
}
Buffer *buffer = [[Buffer alloc] initWithSamples:processed size:self.buffer.size sampleRate:self.buffer.sampleRate];
free(bufferlist);
free(left);
free(right);
free(processed);
return buffer;
}
-(void)cleanup {
AudioOutputUnitStop(player.lowPassUnit);
AudioUnitUninitialize(player.lowPassUnit);
AudioComponentInstanceDispose(player.lowPassUnit);
}
#end
If I add a generic output and try to set an 8khz ASBD on its input, then I just get garbage noise for output.. It looks like, 0,0,0,0,0,17438231945853048031929171968.000000,0,0,0,-2548199532257382185315640279040.000000... Yikes!
I tried adding ASBDs to the input and output of the lowpass unit, giving it an 8khz sample rate property, and it did nothing.. I tried adding converter units (with ASBDs set to 8khz) before, and then after, and then before AND after the lowpass filter (in a chain), this also did not work.
As a side question, my buffer is mono 8khz samples, and if I make my buffer list have mNumberBuffers set to 1, then my lowpass input render proc is never called... Is there a way to not have to use stereo channels?
I am using converters at both ends with ASBD set to 8000 samplerate mono floats for input of input converter and output of output converter while using 44100.0 stereo for input and output of the low pass unit, and calling AudioUnitRender on the end converter with no io unit for the offline render. For the online render I put a converter unit before the io unit so the render callback will pull from buffers at 8K for playback too. It appears that the lower sample rate on the output ASBD requires a higher maximum frames per slice and a smaller slice (AudioUnitRender inNumberFrames) and that's why it wouldn't render.
#import "ViewController.h"
#import <AudioToolbox/AudioToolbox.h>
#implementation ViewController{
int sampleCount;
int renderBufferHead;
float *renderBuffer;
}
- (void)viewDidLoad {
[super viewDidLoad];
float sampleRate = 8000;
int bufferSeconds = 3;
sampleCount = sampleRate * bufferSeconds;//seconds
float *originalSaw = generateSawWaveBuffer(440, sampleRate, sampleCount);
renderBuffer = originalSaw;
renderBufferHead = 0;
AURenderCallbackStruct cbStruct = {renderCallback,(__bridge void *)self};
//this will do offline render using the render callback, callback just reads from renderBuffer at samplerate
float *processedBuffer = offlineRender(sampleCount, sampleRate, &cbStruct);
renderBufferHead = 0;//rewind render buffer after processing
//set up audio units to do live render using the render callback at sample rate then self destruct after delay
//it will play originalSaw for bufferSeconds, then after delay will switch renderBuffer to point at processedBuffer
float secondsToPlayAudio = (bufferSeconds + 1) * 2;
onlineRender(sampleRate, &cbStruct,secondsToPlayAudio);
//wait for original to finish playing, then change render callback source buffer to processed buffer
dispatch_after(dispatch_time(DISPATCH_TIME_NOW, (int64_t)((secondsToPlayAudio / 2) * NSEC_PER_SEC)), dispatch_get_main_queue(), ^{
renderBuffer = processedBuffer;
renderBufferHead = 0;//rewind render buffer
});
//destroy after all rendering done
dispatch_after(dispatch_time(DISPATCH_TIME_NOW, (int64_t)(secondsToPlayAudio * NSEC_PER_SEC)), dispatch_get_main_queue(), ^{
free(originalSaw);
free(processedBuffer);
});
}
float * offlineRender(int count, double sampleRate, AURenderCallbackStruct *cbStruct){
AudioComponentInstance inConverter = getComponentInstance(kAudioUnitType_FormatConverter, kAudioUnitSubType_AUConverter);
AudioComponentInstance lowPass = getComponentInstance(kAudioUnitType_Effect, kAudioUnitSubType_LowPassFilter);
AudioComponentInstance outConverter = getComponentInstance(kAudioUnitType_FormatConverter, kAudioUnitSubType_AUConverter);
AudioStreamBasicDescription asbd = getMonoFloatASBD(sampleRate);
AudioUnitSetProperty(inConverter, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &asbd, sizeof(AudioStreamBasicDescription));
AudioUnitSetProperty(outConverter, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &asbd, sizeof(AudioStreamBasicDescription));
AudioUnitSetProperty(inConverter, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, cbStruct, sizeof(AURenderCallbackStruct));
formatAndConnect(inConverter, lowPass);
formatAndConnect(lowPass, outConverter);
UInt32 maxFramesPerSlice = 4096;
AudioUnitSetProperty(inConverter, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(UInt32));
AudioUnitSetProperty(lowPass, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(UInt32));
AudioUnitSetProperty(outConverter, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(UInt32));
AudioUnitInitialize(inConverter);
AudioUnitInitialize(lowPass);
AudioUnitInitialize(outConverter);
AudioUnitSetParameter(lowPass, kLowPassParam_CutoffFrequency, kAudioUnitScope_Global, 0, 500, 0);
AudioBufferList *bufferlist = malloc(sizeof(AudioBufferList) + sizeof(AudioBufferList));//stereo bufferlist + sizeof(AudioBuffer)
float *left = malloc(sizeof(float) * 4096);
bufferlist->mBuffers[0].mData = left;
bufferlist->mNumberBuffers = 1;
AudioTimeStamp inTimeStamp;
memset(&inTimeStamp, 0, sizeof(AudioTimeStamp));
inTimeStamp.mFlags = kAudioTimeStampSampleTimeValid;
inTimeStamp.mSampleTime = 0;
float *buffer = malloc(sizeof(float) * count);
int inNumberframes = 512;
AudioUnitRenderActionFlags flag = 0;
int framesRead = 0;
while (count){
inNumberframes = MIN(inNumberframes, count);
bufferlist->mBuffers[0].mDataByteSize = sizeof(float) * inNumberframes;
printf("Offline Render %i frames\n",inNumberframes);
AudioUnitRender(outConverter, &flag, &inTimeStamp, 0, inNumberframes, bufferlist);
memcpy(buffer + framesRead, left, sizeof(float) * inNumberframes);
inTimeStamp.mSampleTime += inNumberframes;
count -= inNumberframes;
framesRead += inNumberframes;
}
free(left);
// free(right);
free(bufferlist);
AudioUnitUninitialize(inConverter);
AudioUnitUninitialize(lowPass);
AudioUnitUninitialize(outConverter);
return buffer;
}
OSStatus renderCallback(void * inRefCon,
AudioUnitRenderActionFlags * ioActionFlags,
const AudioTimeStamp * inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData){
ViewController *self = (__bridge ViewController*)inRefCon;
float *left = ioData->mBuffers[0].mData;
for (int i = 0; i < inNumberFrames; i++) {
if (self->renderBufferHead >= self->sampleCount) {
left[i] = 0;
}
else{
left[i] = self->renderBuffer[self->renderBufferHead++];
}
}
if(ioData->mNumberBuffers == 2){
memcpy(ioData->mBuffers[1].mData, left, sizeof(float) * inNumberFrames);
}
printf("render %f to %f\n",inTimeStamp->mSampleTime,inTimeStamp->mSampleTime + inNumberFrames);
return noErr;
}
void onlineRender(double sampleRate, AURenderCallbackStruct *cbStruct,float duration){
AudioComponentInstance converter = getComponentInstance(kAudioUnitType_FormatConverter, kAudioUnitSubType_AUConverter);
AudioComponentInstance ioUnit = getComponentInstance(kAudioUnitType_Output, kAudioUnitSubType_DefaultOutput);
AudioStreamBasicDescription asbd = getMonoFloatASBD(sampleRate);
AudioUnitSetProperty(converter, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &asbd, sizeof(AudioStreamBasicDescription));
AudioUnitSetProperty(converter, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, cbStruct, sizeof(AURenderCallbackStruct));
formatAndConnect(converter, ioUnit);
AudioUnitInitialize(converter);
AudioUnitInitialize(ioUnit);
AudioOutputUnitStart(ioUnit);
dispatch_after(dispatch_time(DISPATCH_TIME_NOW, (int64_t)(duration * NSEC_PER_SEC)), dispatch_get_main_queue(), ^{
AudioOutputUnitStop(ioUnit);
AudioUnitUninitialize(ioUnit);
AudioUnitUninitialize(converter);
});
}
float * generateSawWaveBuffer(float frequency,float sampleRate, int sampleCount){
float *buffer = malloc(sizeof(float) * sampleCount);
float increment = (frequency / sampleRate) * 2;
int increasing = 1;
float sample = 0;
for (int i = 0; i < sampleCount; i++) {
if (increasing) {
sample += increment;
if (sample >= 1) {
increasing = 0;
}
}
else{
sample -= increment;
if (sample < -1) {
increasing = 1;
}
}
buffer[i] = sample;
}
return buffer;
}
AudioComponentInstance getComponentInstance(OSType type,OSType subType){
AudioComponentDescription desc = {0};
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentSubType = subType;
desc.componentType = type;
AudioComponent ioComponent = AudioComponentFindNext(NULL, &desc);
AudioComponentInstance unit;
AudioComponentInstanceNew(ioComponent, &unit);
return unit;
}
AudioStreamBasicDescription getMonoFloatASBD(double sampleRate){
AudioStreamBasicDescription asbd = {0};
asbd.mSampleRate = sampleRate;
asbd.mFormatID = kAudioFormatLinearPCM;
asbd.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagIsNonInterleaved | kAudioFormatFlagIsPacked;
asbd.mFramesPerPacket = 1;
asbd.mChannelsPerFrame = 1;
asbd.mBitsPerChannel = 32;
asbd.mBytesPerPacket = 4;
asbd.mBytesPerFrame = 4;
return asbd;
}
void formatAndConnect(AudioComponentInstance src,AudioComponentInstance dst){
AudioStreamBasicDescription asbd;
UInt32 propsize = sizeof(AudioStreamBasicDescription);
AudioUnitGetProperty(dst, kAudioUnitProperty_StreamFormat,kAudioUnitScope_Input,0,&asbd,&propsize);
AudioUnitSetProperty(src, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &asbd, sizeof(AudioStreamBasicDescription));
AudioUnitConnection connection = {0};
connection.destInputNumber = 0;
connection.sourceAudioUnit = src;
connection.sourceOutputNumber = 0;
AudioUnitSetProperty(dst, kAudioUnitProperty_MakeConnection, kAudioUnitScope_Input, 0, &connection, sizeof(AudioUnitConnection));
}
#end

Why OpenGL ES Cover All Android View ? just I want to Off Screen Rendering ~

I try to get images using off screen rendering on Android NDK, with OpenGL ES 2.0.
Android Version : 4.4.2
Device : Samsung Galaxy Tab SM-T705
I make a simple button example in Android
If you push the button, The program makes CreatePbufferWindow in NDK
The Program draw a Triangle.
glReadPixels() gets a result image.
and save the Bitmap Class
The program draws the result image in Android window.
I don't want to cover the bar and button
and I don't expect to diplay OpenGL ES View.
NDK C code
#include <jni.h>
#include <android/log.h>
#include <EGL/egl.h>
#include <EGL/eglext.h>
#include <EGL/eglplatform.h>
#include <GLES2/gl2.h>
#include <GLES2/gl2ext.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <android/bitmap.h>
#include "tga.h"
#include "jpge.h"
#define LOG_TAG "libgl2jni"
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__)
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__)
//static void printGLString(const char *name, GLenum s) {
// const char *v = (const char *) glGetString(s);
// LOGI("GL %s = %s\n", name, v);
//}
static void checkGlError(const char* op) {
for (GLint error = glGetError(); error; error
= glGetError()) {
LOGI("after %s() glError (0x%x)\n", op, error);
}
}
static const char gVertexShader[] =
"attribute vec4 vPosition;\n"
"void main() {\n"
" gl_Position = vPosition;\n"
"}\n";
static const char gFragmentShader[] =
"precision mediump float;\n"
"void main() {\n"
" gl_FragColor = vec4(0.0, 1.0, 0.0, 1.0);\n"
"}\n";
/**
* Initialize an EGL context for the current display.
*/
static int engine_init_display(int width, int height) {
// initialize OpenGL ES and EGL
/*
* Here specify the attributes of the desired configuration.
* Below, we select an EGLConfig with at least 8 bits per color
* component compatible with on-screen windows
*/
const EGLint attribs[] = {
//EGL_SURFACE_TYPE, EGL_WINDOW_BIT,
EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
EGL_BLUE_SIZE, 8,
EGL_GREEN_SIZE, 8,
EGL_RED_SIZE, 8,
EGL_ALPHA_SIZE, 8,
EGL_NONE
};
EGLint w, h, dummy, format;
EGLint numConfigs;
EGLConfig config;
EGLSurface surface;
EGLContext context;
EGLDisplay display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
checkGlError("eglGetDisplay");
eglInitialize(display, 0, 0);
checkGlError("eglInitialize");
/* Here, the application chooses the configuration it desires. In this
* sample, we have a very simplified selection process, where we pick
* the first EGLConfig that matches our criteria */
eglChooseConfig(display, attribs, &config, 1, &numConfigs);
checkGlError("eglChooseConfig");
/* EGL_NATIVE_VISUAL_ID is an attribute of the EGLConfig that is
* guaranteed to be accepted by ANativeWindow_setBuffersGeometry().
* As soon as we picked a EGLConfig, we can safely reconfigure the
* ANativeWindow buffers to match, using EGL_NATIVE_VISUAL_ID. */
eglGetConfigAttrib(display, config, EGL_NATIVE_VISUAL_ID, &format);
checkGlError("eglGetConfigAttrib");
// using PexelBuffer
EGLint attribList[] =
{
EGL_WIDTH, width,
EGL_HEIGHT, height,
EGL_LARGEST_PBUFFER, EGL_TRUE,
EGL_NONE
};
surface = eglCreatePbufferSurface(display, config, attribList);
checkGlError("eglCreatePbufferSurface");
// surface = eglCreateWindowSurface(display, config, engine->app->window, NULL);
const EGLint attrib_list[] = {
EGL_CONTEXT_CLIENT_VERSION, 2,
EGL_NONE
};
context = eglCreateContext(display, config, NULL, attrib_list);
checkGlError("eglCreateContext");
if (eglMakeCurrent(display, surface, surface, context) == EGL_FALSE) {
// LOGW("Unable to eglMakeCurrent");
return -1;
}
// eglQuerySurface(display, surface, EGL_WIDTH, &w);
// eglQuerySurface(display, surface, EGL_HEIGHT, &h);
// Initialize GL state.
glEnable(GL_CULL_FACE);
// glDisable(GL_DEPTH_TEST);
glEnable(GL_DEPTH_TEST);
return 0;
}
GLuint loadShader(GLenum shaderType, const char* pSource) {
GLuint shader = glCreateShader(shaderType);
if (shader) {
glShaderSource(shader, 1, &pSource, NULL);
glCompileShader(shader);
GLint compiled = 0;
glGetShaderiv(shader, GL_COMPILE_STATUS, &compiled);
if (!compiled) {
GLint infoLen = 0;
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &infoLen);
if (infoLen) {
char* buf = (char*) malloc(infoLen);
if (buf) {
glGetShaderInfoLog(shader, infoLen, NULL, buf);
LOGE("Could not compile shader %d:\n%s\n",
shaderType, buf);
free(buf);
}
glDeleteShader(shader);
shader = 0;
}
}
}
return shader;
}
GLuint createProgram(const char* pVertexSource, const char* pFragmentSource) {
GLuint vertexShader = loadShader(GL_VERTEX_SHADER, pVertexSource);
if (!vertexShader) {
return 0;
}
GLuint pixelShader = loadShader(GL_FRAGMENT_SHADER, pFragmentSource);
if (!pixelShader) {
return 0;
}
GLuint program = glCreateProgram();
if (program) {
glAttachShader(program, vertexShader);
checkGlError("glAttachShader");
glAttachShader(program, pixelShader);
checkGlError("glAttachShader");
glLinkProgram(program);
GLint linkStatus = GL_FALSE;
glGetProgramiv(program, GL_LINK_STATUS, &linkStatus);
if (linkStatus != GL_TRUE) {
GLint bufLength = 0;
glGetProgramiv(program, GL_INFO_LOG_LENGTH, &bufLength);
if (bufLength) {
char* buf = (char*) malloc(bufLength);
if (buf) {
glGetProgramInfoLog(program, bufLength, NULL, buf);
LOGE("Could not link program:\n%s\n", buf);
free(buf);
}
}
glDeleteProgram(program);
program = 0;
}
}
return program;
}
GLuint gProgram;
GLuint gvPositionHandle;
bool setupGraphics(int w, int h) {
// printGLString("Version", GL_VERSION);
// printGLString("Vendor", GL_VENDOR);
// printGLString("Renderer", GL_RENDERER);
// printGLString("Extensions", GL_EXTENSIONS);
LOGI("setupGraphics(%d, %d)", w, h);
gProgram = createProgram(gVertexShader, gFragmentShader);
if (!gProgram) {
LOGE("Could not create program.");
return false;
}
gvPositionHandle = glGetAttribLocation(gProgram, "vPosition");
checkGlError("glGetAttribLocation");
LOGI("glGetAttribLocation(\"vPosition\") = %d\n",
gvPositionHandle);
glViewport(0, 0, w, h);
checkGlError("glViewport");
return true;
}
GLfloat gTriangleVertices[] = { 1.0f, 1.0f, 1.0f, 0.5f, 0.5f, 0.5f,
-1.5f, 1.5f, 1.5f };
char* renderFrame(int width, int height) {
static float grey;
grey += 0.01f;
if (grey > 1.0f) {
grey = 0.0f;
}
// 크기 조정
for(int count = 0; count < 9; ++count)
{
gTriangleVertices[count] *= 0.8f;
}
char* pixelData = (char*)malloc(4 * width * height * sizeof(char));
for(int count = 0; count < width * height * 4; ++count) {
pixelData[count] = 0;
}
// 깊이 버퍼 활성화
glEnable(GL_DEPTH_TEST);
// 깊이 버퍼 초기화
glClearDepthf(1.F);
// glClearColor(grey, grey, grey, 1.0f);
glClearColor(1.0, 0.0, 0.0, 1.0f);
checkGlError("glClearColor");
glClear( GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT);
checkGlError("glClear");
glUseProgram(gProgram);
checkGlError("glUseProgram");
glVertexAttribPointer(gvPositionHandle, 3, GL_FLOAT, GL_FALSE, 0, gTriangleVertices);
checkGlError("glVertexAttribPointer");
glEnableVertexAttribArray(gvPositionHandle);
checkGlError("glEnableVertexAttribArray");
glDrawArrays(GL_TRIANGLES, 0, 3);
checkGlError("glDrawArrays");
glReadPixels(
0, 0,
width, height,
GL_RGBA,
GL_UNSIGNED_BYTE,
pixelData
);
// int jpgCount = 0;
// for(int count = 0; count < width * height * 4; ++count) {
// if (count % 4 != 3) {
// pixelJpgData[jpgCount] = pixelData[count];
// ++jpgCount;
// }
// }
// tgaGrabScreenSeries("/storage/emulated/0/Pictures/CTgaTest", 0, 0, width, height);
jpge::compress_image_to_jpeg_file(
"/storage/emulated/0/Pictures/CJpgTest.jpg",
width, height,
4,
(jpge::uint8*)pixelData
);
// LOGI("%s", (char const *)pixelData);
// for(int count = 0; count < width*height*4; ++count) {
// LOGI("%x", pixelData[count]);
// }
return pixelData;
// if(pixelData) {
// free(pixelData);
//// delete[] pixelData;
// }
}
int
decodeMemory(JNIEnv* env, const void* data, size_t len, jobject* bitmap)
{
jclass clazz = env->FindClass("android/graphics/BitmapFactory");
if (env->ExceptionCheck()) {
env->ExceptionClear();
return 2;
}
jmethodID mid = env->GetStaticMethodID(clazz, "decodeArray",
"([BII)Landroid/graphics/Bitmap;");
if (env->ExceptionCheck()) {
env->ExceptionClear();
return 2;
}
jbyteArray jarray = env->NewByteArray(len);
env->SetByteArrayRegion(jarray, 0, len, (jbyte*)data);
*bitmap = env->CallStaticObjectMethod(clazz, mid, jarray, 0, len);
return 1;
}
extern "C"
{
JNIEXPORT void JNICALL Java_com_javacodegeeks_android_buttonexample_GL2JNILib_init
(JNIEnv * env, jobject obj, jint width, jint height)
{
engine_init_display(width, height);
setupGraphics(width, height);
}
}
extern "C"
{
JNIEXPORT void JNICALL Java_com_javacodegeeks_android_buttonexample_GL2JNILib_step
(JNIEnv * env, jobject obj, jobject jBitmap, jint width, jint height)
//(JNIEnv * env, jobject obj, jint width, jint height)
{
char* pixelData = NULL;
pixelData = renderFrame(width, height);
// renderFrame(width, height);
decodeMemory(env, pixelData, width * height * 4, &jBitmap);
if (pixelData)
{
free(pixelData);
}
}
}
java Code
package com.javacodegeeks.android.buttonexample;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import android.content.Context;
import android.graphics.Bitmap;
import android.graphics.Canvas;
import android.graphics.Color;
import android.graphics.Paint;
import android.os.Environment;
import android.util.Log;
import android.view.View;
class GL2View extends View
{
public GL2View(Context context, int w, int h) {
super(context);
// TODO Auto-generated constructor stub
width = w;
height = h;
// //OpenGL 테스트 구현부 시작 /////////////////////////////////////////////////////
mBitmap = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
GL2JNILib.init(width, height);
mCanvas = new Canvas();
mCanvas.setBitmap(mBitmap);
// //OpenGL 테스트 구현부 끝 /////////////////////////////////////////////////////
mPaint = new Paint();
}
/* Image SDCard Save (input Bitmap -> saved file JPEG)
* Writer intruder(Kwangseob Kim)
* #param bitmap : input bitmap file
* #param folder : input folder name
* #param name : output file name
*/
public static void saveBitmaptoJpeg(Bitmap bitmap,String folder, String name){
// String ex_storage =Environment.getExternalStorageDirectory().getAbsolutePath(); // 절대 경로
String ex_storage = Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_PICTURES).getAbsolutePath(); // 이미지 경로
// Get Absolute Path in External Sdcard
String foler_name = "/"+folder+"/";
String file_name = name+".jpg";
String string_path = ex_storage+foler_name;
Log.d(VIEW_LOG_TAG, ex_storage);
File file_path;
try{
file_path = new File(string_path);
if(!file_path.isDirectory()){
file_path.mkdirs();
}
FileOutputStream out = new FileOutputStream(string_path+file_name);
bitmap.compress(Bitmap.CompressFormat.JPEG, 100, out);
out.close();
}catch(FileNotFoundException exception){
Log.e("FileNotFoundException", exception.getMessage());
}catch(IOException exception){
Log.e("IOException", exception.getMessage());
}
}
public void SaveBitmapToSDcard()
{
// File path = Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_PICTURES);
saveBitmaptoJpeg(mBitmap, "./", "JavaTest");
}
// #Override
// protected void onSizeChanged(int w, int h, int oldw, int oldh)
// {
// mBitmap = Bitmap.createBitmap(w, h, Bitmap.Config.ARGB_8888);
// mCanvas = new Canvas();
// mCanvas.setBitmap(mBitmap);
//
// testDrawing();
// }
//
// private void testDrawing()
// {
// mCanvas.drawColor(Color.WHITE);
// mPaint.setColor(Color.RED);
// mCanvas.drawRect(100, 100, 200, 200, mPaint);
// }
//
#Override
protected void onDraw(Canvas canvas)
{
GL2JNILib.step(mBitmap, width, height);
// GL2JNILib.step(width, height);
if(mBitmap != null)
{
canvas.drawBitmap(mBitmap, 0, 0, null);
}
SaveBitmapToSDcard();
}
private Bitmap mBitmap;
private Canvas mCanvas;
private Paint mPaint;
int width;
int height;
}
Native Call
//package com.android.gl2jni;
package com.javacodegeeks.android.buttonexample;
import android.graphics.Bitmap;
// Wrapper for native library
public class GL2JNILib {
static {
System.loadLibrary("gl2jni");
}
/**
* #param width the current view width
* #param height the current view height
*/
public static native void init(int width, int height);
public static native void step(Bitmap bitmap, int width, int height);
// public static native void step(int width, int height);
}
Buttom Image
Result Image
If you are asking how to export pixel datas from only Offscreen-FBO where you draw objects, you should have bind offscreen-FBO before calling glreadpixels()
Problem 1. You never bind & unbind FBOs. Thus, you draw objects on-Screen buffer.
Problem 2. You never generate offscreen-FBO and bind it before glreadpixels();
Solution. generate a offscreen-FBO, and bind it before drawing objects, and bind it before calling glreadpixels();
Update
It is codes to generate FBO and RBO and how to attach rbo to fbo
glGenFramebuffers(1, &fbo[object_id]);
glBindFramebuffer(GL_FRAMEBUFFER, fbo[object_id]);
glGenRenderbuffers(1, &rboColor[object_id]);
glBindRenderbuffer(GL_RENDERBUFFER, rboColor[object_id]);
glRenderbufferStorage(GL_RENDERBUFFER,GL_RGBA8_OES, width, height);
glBindFramebuffer(GL_FRAMEBUFFER, fbo[object_id]);
// Attach ColorRenderbuffers
glFramebufferRenderbuffer(GL_FRAMEBUFFER,GL_COLOR_ATTACHMENT0,GL_RENDERBUFFER,rboColor[object_id]);

SKPhysicsJoint is joining at what appears to be incorrect coordinates

In my simple archery game, I have defined an arrow node and a target node with associated physics bodies.
When I attempt to join them together using an SKPhysicsJointFixed (I have also tried other types), the behaviour is inaccurate with the joint seemingly created at random points before actually hitting the target node.
I have played with friction and restitution values and also SKShapeNode (with a CGPath) and SKSpriteNode (with a rectangle around the sprite) to define the target but the problem occurs with both.
When just using collisions, the arrows bounce off the correct locations of the target, which seems OK. It is only when the join occurs that the results become random on-screen, usually 10-20 points away from the target node "surface".
static const uint32_t arrowCategory = 0x1 << 1;
static const uint32_t targetCategory = 0x1 << 2;
- (SKSpriteNode *)createArrowNode
{
SKSpriteNode *arrow = [[SKSpriteNode alloc] initWithImageNamed:#"Arrow.png"];
arrow.position = CGPointMake(165, 110);
arrow.name = #"arrowNode";
arrow.physicsBody = [SKPhysicsBody bodyWithRectangleOfSize:arrow.frame.size];
arrow.physicsBody.angularVelocity = -0.25;
arrow.physicsBody.usesPreciseCollisionDetection = YES;
arrow.physicsBody.restitution = 0.0;
arrow.physicsBody.friction = 0.0;
arrow.physicsBody.categoryBitMask = arrowCategory;
arrow.physicsBody.collisionBitMask = targetCategory;
arrow.physicsBody.contactTestBitMask = /*arrowCategory | */targetCategory | bullseyeCategory;
return arrow;
}
-void(createTargetNode)
{
SKSpriteNode *sprite = [[SKSpriteNode alloc] initWithImageNamed:#"TargetOutline.png"];
sprite.physicsBody = [SKPhysicsBody bodyWithRectangleOfSize:sprite.size];
sprite.position = CGPointMake(610, 100);
sprite.name = #"targetNode";
sprite.physicsBody.usesPreciseCollisionDetection = NO;
// sprite.physicsBody.affectedByGravity = NO;
// sprite.physicsBody.mass = 20000;
sprite.physicsBody.dynamic = NO;
sprite.physicsBody.friction = 0.0;
sprite.physicsBody.restitution = 0.0;
sprite.physicsBody.categoryBitMask = targetCategory;
sprite.physicsBody.contactTestBitMask = targetCategory | arrowCategory;
[self addChild:sprite];
}
- (void)didBeginContact:(SKPhysicsContact *)contact
{
SKPhysicsBody *firstBody, *secondBody;
if (contact.bodyA.categoryBitMask < contact.bodyB.categoryBitMask)
{
firstBody = contact.bodyA;
secondBody = contact.bodyB;
}
else
{
firstBody = contact.bodyB;
secondBody = contact.bodyA;
}
if ((firstBody.categoryBitMask & arrowCategory) != 0 &&
(secondBody.categoryBitMask & targetCategory) != 0)
{
CGPoint contactPoint = contact.contactPoint;
NSLog(#"contactPoint is %#", NSStringFromCGPoint(contactPoint));
float contact_x = contactPoint.x;
float contact_y = contactPoint.y;
SKPhysicsJoint *joint = [SKPhysicsJointFixed jointWithBodyA:firstBody bodyB:secondBody anchor:(CGPoint)contactPoint ];
[self.physicsWorld addJoint:joint];
CGPoint bullseye = CGPointMake(590, 102.5);
NSLog(#"Center is %#", NSStringFromCGPoint(bullseye));
CGFloat distance = SDistanceBetweenPoints(contactPoint, bullseye);
NSLog(#"Distance to bullseye is %f", distance);
}

Resources