How to debug "ArrayIndexOutOfBoundsException: 0" in Processing - processing

I'm trying to make Kim Asendorf's pixel sorting code work with input from a webcam in processing.
I get the error: "ArrayIndexOutOfBoundsException: 0". Any ideas of how I can fix this?
//BLACK
int getFirstNotBlackY(int _x, int _y) {
int x = _x;
int y = _y;
color c;
if (y < height) {
while ( (c = cam.pixels[x + y * cam.width]) < blackValue) {
y++;
if (y >= height) return -1;
}
import processing.video.*;
Capture cam;
String[] cameras = Capture.list();
int mode = 0;
//MODE:
//0 -> black
//1 -> bright
//2 -> white
//b(16777216)
int loops = 1;
int blackValue = -10000000;
int brigthnessValue = 60;
int whiteValue = -6000000;
int row = 0;
int column =
boolean saved = false;
void setup() {
size(640, 480);
import processing.video.*;
String[] cameras = Capture.list();
import processing.video.*;
if (cameras.length == 0) {
println("There are no cameras available for capture.");
exit();
} else {
println("Available cameras:");
for (int i = 0; i < cameras.length; i++) {
println(cameras[i]);
}
// The camera can be initialized directly using an
// element from the array returned by list():
cam = new Capture(this, cameras[0]);
cam.start();
}
}
I've added the code I'm using to initialize the webcam.

Related

Triangle rasterizer using change of basis

My triangle rasterization program works as-is, but I would like to make it easier to use by specifying explicit coordinates instead of differentials. Unfortunately, I don't know how to calculate the necessary change-of-basis matrix. This can be seen by the empty "rasterizeCnvTri" function.
The following section is my code, using the PixelToaster library for a framebuffer:
// TrueColor Example
// How to open a display in truecolor mode and work with 32 bit integer pixels.
// Copyright © Glenn Fiedler, 2004-2005. http://www.pixeltoaster.com
#include "pt/source/PixelToaster.h"
using namespace PixelToaster;
int fbuf[256][256][3];
int zbuf[256][256];
void rasterizeTriangle(int u,int dudx,int dudy,int v,int dvdx,int dvdy,int w,int dwdx,int dwdy,int r,int g,int b)
{
int u_rw = u;
int v_rw = v;
int w_rw = w;
for(int j=0; j<256; j++)
{
int u_pt = u_rw;
int v_pt = v_rw;
int w_pt = w_rw;
for(int i=0; i<256; i++)
{
if(w_pt>=zbuf[i][j] && u_pt>=0 && v_pt>=0 && (u_pt+v_pt)<4096)
{fbuf[i][j][0]=r; fbuf[i][j][1]=g; fbuf[i][j][2]=b; zbuf[i][j]=w_pt;}
u_pt += dudx;
v_pt += dvdx;
w_pt += dwdx;
}
u_rw += dudy;
v_rw += dvdy;
w_rw += dwdy;
}
}
void rasterizeCnvTri(int x1,int x2,int x3,int y1,int y2,int y3,int w1,int w2,int w3,int r,int g,int b)
{
//how to do this?
}
int main()
{
const int width = 256;
const int height = 256;
Display display( "Triangle Rasterizer", width, height, Output::Default, Mode::TrueColor );
vector<TrueColorPixel> pixels( width * height );
rasterizeTriangle(-2048,32,0,-2048,0,32,2048,0,0,255,0,0);
rasterizeTriangle(0,32,-32,-2048,0,32,4096,-16,0,255,255,255);
while ( display.open() )
{
unsigned int index = 0;
for ( int y = 0; y < height; ++y )
{
for ( int x = 0; x < width; ++x )
{
pixels[index].r = fbuf[x][y][0];
pixels[index].g = fbuf[x][y][1];
pixels[index].b = fbuf[x][y][2];
++index;
}
}
display.update( pixels );
}
}
U and V are the axes along two of the sides, and W is the inverse depth. All three coordinates are scaled by 4096, so integers are sufficient.
[EDIT]
The following code works for me (also does basic perspective divide):
void rasterizeCnvTri(int x1,int x2,int x3,int y1,int y2,int y3,int w1,int w2,int w3,int r,int g,int b)
{
int xp1 = (((x1-128)*w1)>>8)+128;
int yp1 = (((y1-128)*w1)>>8)+128;
int xp2 = (((x2-128)*w2)>>8)+128;
int yp2 = (((y2-128)*w2)>>8)+128;
int xp3 = (((x3-128)*w3)>>8)+128;
int yp3 = (((y3-128)*w3)>>8)+128;
int xd2 = xp2-xp1;
int xd3 = xp3-xp1;
int yd2 = yp2-yp1;
int yd3 = yp3-yp1;
int wd2 = w2-w1;
int wd3 = w3-w1;
int plna = (yd2*wd3)-(yd3*wd2);
int plnb = (wd2*xd3)-(wd3*xd2);
int plnc = (xd2*yd3)-(xd3*yd2);
int plnd = -(plna*xp1)-(plnb*yp1)-(plnc*w1);
int invdet = (xd2*yd3)-(yd2*xd3);
int dudx = (-yd3<<12)/invdet;
int dudy = (yd2<<12)/invdet;
int u = (-xp1*dudx)-(yp1*dudy);
int dvdx = (xd3<<12)/invdet;
int dvdy = (-xd2<<12)/invdet;
int v = (-xp1*dvdx)-(yp1*dvdy);
rasterizeTriangle(-u,-dudx,-dudy,-v,-dvdx,-dvdy,-plnd/plnc,-plna/plnc,-plnb/plnc,r,g,b);
}

Image IO - Why when converting Image to red scale, blue scale does it result in black images?

I have a problem with my current programming project. The assignment calls to take an image from my computer (I use one of the sample pictures that the computer provides) and convert it to grayscale, green, red, and blue. I wrote the code for the program and it successfully converts the first two pictures (the original => grayscale and the original => green), but when it converts to red and blue, it produces an all-black image (what should happen is the picture should be converted from original => red and original => blue along with the previous two conversions, gray and green). Here is my code:
import java.io.File;
import java.io.IOException;
import java.awt.image.BufferedImage;
import javax.imageio.ImageIO;
public class Prog3a
{
public static void main(String args[]) throws IOException
{
BufferedImage img = null;
File f = null;
//read image
try
{
f = new File("C:\\Users\\Public\\Pictures\\Sample Pictures\\Penguins.jpg");
img = ImageIO.read(f);
}
catch(IOException exception)
{
System.out.println(exception);
}
//get image width and height
int width = img.getWidth();
int height = img.getHeight();
//convert to grayscale
for(int y = 0; y < height; y++)
{
for(int x = 0; x < width; x++)
{
int p = img.getRGB(x,y);
int a = (p>>24)&0xff;
int r = (p>>16)&0xff;
int g = (p>>8)&0xff;
int b = p&0xff;
//calculate average
int avg = (r+g+b)/3;
//replace RGB value with avg
p = (a<<24) | (avg<<16) | (avg<<8) | avg;
img.setRGB(x, y, p);
}
}
//write image
try
{
File toGray = new File("C:\\Users\\Public\\Pictures\\Sample Pictures\\CS108toGray");
ImageIO.write(img, "jpg", toGray);
}
catch(IOException exception)
{
System.out.println(exception);
}
//convert to green image
for(int y = 0; y < height; y++)
{
for(int x = 0; x < width; x++)
{
int p = img.getRGB(x,y);
int a = (p>>24)&0xff;
int g = (p>>8)&0xff;
//set new RGB
p = (a<<24) | (0<<16) | (g<<8) | 0;
img.setRGB(x, y, p);
}
}
//write image
try
{
File toGreen = new File("C:\\Users\\Public\\Pictures\\Sample Pictures\\CS108toGreen");
ImageIO.write(img, "jpg", toGreen);
}
catch(IOException e)
{
System.out.println(e);
}
//convert to red image
for(int y = 0; y < height; y++)
{
for(int x = 0; x < width; x++)
{
int p = img.getRGB(x,y);
int a = (p>>24)&0xff;
int r = (p>>16)&0xff;
//set new RGB
p = (a<<24) | (r<<16) | (0<<8) | 0;
img.setRGB(x, y, p);
}
}
//write image
try
{
File toRed = new File("C:\\Users\\Public\\Pictures\\Sample Pictures\\CS108toRed");
ImageIO.write(img, "jpg", toRed);
}
catch(IOException e)
{
System.out.println(e);
}
//convert to blue image
for(int y = 0; y < height; y++)
{
for(int x = 0; x < width; x++)
{
int p = img.getRGB(x,y);
int a = (p>>24)&0xff;
int b = p&0xff;
//set new RGB
p = (a<<24) | (0<<16) | (0<<8) | b;
img.setRGB(x, y, p);
}
}
//write image
try
{
File toBlue = new File("C:\\Users\\Public\\Pictures\\Sample Pictures\\CS108toBlue");
ImageIO.write(img, "jpg", toBlue);
}
catch(IOException e)
{
System.out.println(e);
}
}
}
How can I modify my program so that the red and blue image converter correctly converts the image to its respective colors? As a beginning programmer, any and all advice/suggestions are appreciated, thank you.

Processing - deprecated OpenKinect library

I am trying to replicate a project for Kinect for this music video, but the code is seriously outdated.
After weeks searching, I have not found anything about this.
I would be greatly thankful to anyone who points out to me what is deprecated in the following code:
(I'm using Processing 3)
import org.openkinect.*;
import org.openkinect.processing.*;
import java.io.*;
// Kinect Library object
Kinect kinect;
float a = 0;
// Size of kinect image
int w = 640;
int h = 480;
// writing state indicator
boolean write = false;
// treshold filter initial value
int fltValue = 950;
// "recording" object. each vector element holds a coordinate map vector
Vector <Object> recording = new Vector<Object>();
// We'll use a lookup table so that we don't have to repeat the math over and over
float[] depthLookUp = new float[2048];
void setup() {
size(800,600,P3D);
kinect = new Kinect(this);
kinect.start();
kinect.enableDepth(true);
// We don't need the grayscale image in this example
// so this makes it more efficient
kinect.processDepthImage(false);
// Lookup table for all possible depth values (0 - 2047)
for (int i = 0; i < depthLookUp.length; i++) {
depthLookUp[i] = rawDepthToMeters(i);
}
}
void draw() {
background(0);
fill(255);
textMode(SCREEN);
text("Kinect FR: " + (int)kinect.getDepthFPS() + "\nProcessing FR: " + (int)frameRate,10,16);
// Get the raw depth as array of integers
int[] depth = kinect.getRawDepth();
// We're just going to calculate and draw every 4th pixel (equivalent of 160x120)
int skip = 4;
// Translate and rotate
translate(width/2,height/2,-50);
rotateY(a);
//noStroke();
//lights();
int index = 0;
PVector[] frame = new PVector[19200];
for(int x=0; x<w; x+=skip) {
for(int y=0; y<h; y+=skip) {
int offset = x+y*w;
// Convert kinect data to world xyz coordinate
int rawDepth = depth[offset];
boolean flt = true;
PVector v = depthToWorld(x,y,rawDepth);
if (flt && rawDepth > fltValue)
{
v = depthToWorld(x,y,2047);
}
frame[index] = v;
index++;
stroke(map(rawDepth,0,2048,0,256));
pushMatrix();
// Scale up by 200
float factor = 400;
translate(v.x*factor,v.y*factor,factor-v.z*factor);
//sphere(1);
point(0,0);
//line (0,0,1,1);
popMatrix();
}
}
if (write == true) {
recording.add(frame);
}
// Rotate
//a += 0.015f;
}
// These functions come from:http://graphics.stanford.edu/~mdfisher/Kinect.html
float rawDepthToMeters(int depthValue) {
if (depthValue < 2047) {
return (float)(1.0 / ((double)(depthValue) * -0.0030711016 + 3.3309495161));
}
return 0.0f;
}
PVector depthToWorld(int x, int y, int depthValue) {
final double fx_d = 1.0 / 5.9421434211923247e+02;
final double fy_d = 1.0 / 5.9104053696870778e+02;
final double cx_d = 3.3930780975300314e+02;
final double cy_d = 2.4273913761751615e+02;
PVector result = new PVector();
double depth = depthLookUp[depthValue];//rawDepthToMeters(depthValue);
result.x = (float)((x - cx_d) * depth * fx_d);
result.y = (float)((y - cy_d) * depth * fy_d);
result.z = (float)(depth);
return result;
}
void stop() {
kinect.quit();
super.stop();
}
int currentFile = 0;
void saveFile() {
}
void keyPressed() { // Press a key to save the data
if (key == '1')
{
fltValue += 50;
println("fltValue: " + fltValue);
}
else if (key == '2')
{
fltValue -= 50;
println("fltValue: " + fltValue);
}
else if (key=='4'){
if (write == true) {
write = false;
println( "recorded " + recording.size() + " frames.");
// saveFile();
// save
Enumeration e = recording.elements();
println("Stopped Recording " + currentFile);
int i = 0;
while (e.hasMoreElements()) {
// Create one directory
boolean success = (new File("out"+currentFile)).mkdir();
PrintWriter output = createWriter("out"+currentFile+"/frame" + i++ +".txt");
PVector [] frame = (PVector []) e.nextElement();
for (int j = 0; j < frame.length; j++) {
output.println(j + ", " + frame[j].x + ", " + frame[j].y + ", " + frame[j].z );
}
output.flush(); // Write the remaining data
output.close();
}
currentFile++;
}
}
else if (key == '3') {
println("Started Recording "+currentFile);
recording.clear();
write = true;
}
}
If the code works, then I wouldn't worry too much about it. Deprecated can just mean that a newer version is available, not that the older version stopped working.
However, if the code does not work, then updating to a newer library is probably a good idea anyway. Check out the library section of the Processing homepage, which lists several Kinect libraries.
In fact, one of those libraries is the updated version of the old library you're using: Open Kinect for Processing.
Edit: It looks like both of the errors you mentioned are due to missing import statements. You need to import both Vector and Enumeration to use them:
import java.util.Vector;
import java.util.Enumeration;

Save image with Processing

I'm trying to save an image after certain time, the problem is that the image size is bigger than the display so when I use the save or saveFrame function it only saves the image that I can see in the display. There is any other way to save the whole image?
This is my code:
PImage picture, pictureFilter, img;
int total, cont, current;
ArrayList<ArrayList<Position>> columns;
String[] fontList;
public class Position {
public int x;
public int y;
}
void setup() {
fontList = PFont.list();
picture = loadImage("DSC05920b.JPG");
pictureFilter = loadImage("filtrePort2.jpg");
frame.setResizable(true);
size(picture.width, picture.height);
columns = new ArrayList<ArrayList<Position>>();
for(int i = 0; i < picture.width; i++) {
ArrayList<Position> row = new ArrayList<Position>();
for(int j = 0; j < picture.height; j++){
Position p = new Position();
p.x = i;
p.y = j;
row.add(p);
}
columns.add(row);
}
total = picture.width * picture.height;
cont = total;
current = 0;
img = createImage(picture.width, picture.height, RGB);
}
float randomLetter() {
float value = 23;
boolean found = false;
while(!found) {
value = random(48, 122);
if(value >48 && value <58) found = true;
if(value >65 && value <91) found = true;
if(value >97 && value <123) found = true;
}
return value;
}
void draw() {
int x = int(random(0, columns.size()));
ArrayList<Position> rows = columns.get(x);
int y = int(random(0, rows.size()));
Position p = rows.get(y);
color c = pictureFilter.get(p.x, p.y);
int r = (c >> 16) & 0xFF; // Faster way of getting red(argb)
if(r < 240) {
PFont f = createFont(fontList[int(random(0,fontList.length))],random(5, 24),true);
textFont(f);
fill(picture.get(p.x,p.y));
char letter = (char) int(randomLetter());
text(letter, p.x, p.y);
}
if(rows.size() == 1) {
if(columns.size() == 1) {
saveFrame("lol.jpg");
columns.remove(x);
} else {
columns.remove(x);
}
} else {
println(rows.size());
rows.remove(y);
}
--cont;
float percent = float(total-cont)/float(total)*100;
if(int(percent) != current) {
current = int(percent);
save("image_" + current + ".jpg");
}
println("DONE: " + (total-cont) + "/" + total + " Progress: " + percent + "%");
}
The code do a lot of stuff but the part that its not working well is at the final when I check if the percentage have been increased in order to save the image
You can write this into a PGraphics context - aka a graphics buffer.
The buffer can be as big as you need it to be, and you can choose whether to draw it on the screen or not..
// Create the buffer at the size you need, and choose the renderer
PGraphics pg = createGraphics(myImage.width, myImage.height, P2D);
// Wrap all your drawing functions in the pg context - e.g.
PFont f = createFont(fontList[int(random(0,fontList.length))],random(5, 24),true);
textFont(f);
pg.beginDraw();
pg.fill(picture.get(p.x,p.y));
char letter = (char) int(randomLetter());
pg.text(letter, p.x, p.y);
pg.endDraw();
// Draw your PG to the screen and resize the representation of it to the screen bounds
image(pg, 0, 0, width, height); // <-- this wont actually clip/resize the image
// Save it
pg.save("image_" + current + ".jpg");
The PImage class contains a save() function that exports to file. The API should be your first stop for questions like this.

Video Delay/Buffer in Processing 2.0

I'm having a ton of trouble making a simple video delay in processing. I looked around on the internet and I keep finding the same bit of code and I can't get it to work at all. When I first tried it, it did nothing (at all). Here's my modified version (which at least seems to load frames into the buffer), I really have no idea why it doesn't work and I'm getting really tired of pulling out my hair. Please... please, for the love of god, please somebody point out the stupid mistake I'm making here.
And now, without further delay (hah, get it?), the code:
import processing.video.*;
VideoBuffer vb;
Movie myMovie;
Capture cam;
float seconds = 1;
void setup() {
size(320,240, P3D);
frameRate(30);
String[] cameras = Capture.list();
if (cameras.length == 0) {
println("There are no cameras available for capture.");
exit();
} else {
println("Available cameras:");
for (int i = 0; i < cameras.length; i++) {
println(cameras[i]);
}
cam = new Capture(this, cameras[3]);
cam.start();
}
vb = new VideoBuffer(90, width, height);
}
void draw() {
if (cam.available() == true) {
cam.read();
vb.addFrame(cam);
}
image(cam, 0, 0);
image( vb.getFrame(), 150, 0 );
}
class VideoBuffer
{
PImage[] buffer;
int inputFrame = 0;
int outputFrame = 0;
int frameWidth = 0;
int frameHeight = 0;
VideoBuffer( int frames, int vWidth, int vHeight )
{
buffer = new PImage[frames];
for(int i = 0; i < frames; i++)
{
this.buffer[i] = new PImage(vWidth, vHeight);
}
this.inputFrame = 0;
this.outputFrame = 1;
this.frameWidth = vWidth;
this.frameHeight = vHeight;
}
// return the current "playback" frame.
PImage getFrame()
{
return this.buffer[this.outputFrame];
}
// Add a new frame to the buffer.
void addFrame( PImage frame )
{
// copy the new frame into the buffer.
this.buffer[this.inputFrame] = frame;
// advance the input and output indexes
this.inputFrame++;
this.outputFrame++;
println(this.inputFrame + " " + this.outputFrame);
// wrap the values..
if(this.inputFrame >= this.buffer.length)
{
this.inputFrame = 0;
}
if(this.outputFrame >= this.buffer.length)
{
this.outputFrame = 0;
}
}
}
This works in Processing 2.0.1.
import processing.video.*;
Capture cam;
PImage[] buffer;
int w = 640;
int h = 360;
int nFrames = 60;
int iWrite = 0, iRead = 1;
void setup(){
size(w, h);
cam = new Capture(this, w, h);
cam.start();
buffer = new PImage[nFrames];
}
void draw() {
if(cam.available()) {
cam.read();
buffer[iWrite] = cam.get();
if(buffer[iRead] != null){
image(buffer[iRead], 0, 0);
}
iWrite++;
iRead++;
if(iRead >= nFrames-1){
iRead = 0;
}
if(iWrite >= nFrames-1){
iWrite = 0;
}
}
}
There is a problem inside your addFrame-Method. You just store a reference to the PImage object, whose pixels get overwritten all the time. You have to use buffer[inputFrame] = frame.get() instead of buffer[inputFrame] = frame. The get() method returns a copy of the image.

Resources