Related
I'm new to Processing and I need to make a program that, captured the main monitor, shows on the second screen the average color and makes a spiral using another color (perceptual dominant color) get by a function.
The problem is that the program is so slow (lag, 1FPS). I think it's because it has too many things to do everytime i do a screenshot, but I have no idea how to make it faster.
Also there could be many other problems, but the main one is that.
Thank you very much!
Here's the code:
import java.awt.Robot;
import java.awt.AWTException;
import java.awt.Rectangle;
import java.awt.color.ColorSpace;
PImage screenshot;
float a = 0;
int blockSize = 20;
int avg_c;
int per_c;
void setup() {
fullScreen(2); // 1920x1080
noStroke();
frame.removeNotify();
}
void draw() {
screenshot();
avg_c = extractColorFromImage(screenshot);
per_c = extractAverageColorFromImage(screenshot);
background(avg_c); // Average color
spiral();
}
void screenshot() {
try{
Robot robot_Screenshot = new Robot();
screenshot = new PImage(robot_Screenshot.createScreenCapture
(new Rectangle(0, 0, displayWidth, displayHeight)));
}
catch (AWTException e){ }
frame.setLocation(displayWidth/2, 0);
}
void spiral() {
fill (per_c);
for (int i = blockSize; i < width; i += blockSize*2)
{
ellipse(i, height/2+sin(a+i)*100, blockSize+cos(a+i)*5, blockSize+cos(a+i)*5);
a += 0.001;
}
}
color extractColorFromImage(PImage screenshot) { // Get average color
screenshot.loadPixels();
int r = 0, g = 0, b = 0;
for (int i = 0; i < screenshot.pixels.length; i++) {
color c = screenshot.pixels[i];
r += c>>16&0xFF;
g += c>>8&0xFF;
b += c&0xFF;
}
r /= screenshot.pixels.length; g /= screenshot.pixels.length; b /= screenshot.pixels.length;
return color(r, g, b);
}
color extractAverageColorFromImage(PImage screenshot) { // Get lab average color (perceptual)
float[] average = new float[3];
CIELab lab = new CIELab();
int numPixels = screenshot.pixels.length;
for (int i = 0; i < numPixels; i++) {
color rgb = screenshot.pixels[i];
float[] labValues = lab.fromRGB(new float[]{red(rgb),green(rgb),blue(rgb)});
average[0] += labValues[0];
average[1] += labValues[1];
average[2] += labValues[2];
}
average[0] /= numPixels;
average[1] /= numPixels;
average[2] /= numPixels;
float[] rgb = lab.toRGB(average);
return color(rgb[0] * 255,rgb[1] * 255, rgb[2] * 255);
}
public class CIELab extends ColorSpace {
#Override
public float[] fromCIEXYZ(float[] colorvalue) {
double l = f(colorvalue[1]);
double L = 116.0 * l - 16.0;
double a = 500.0 * (f(colorvalue[0]) - l);
double b = 200.0 * (l - f(colorvalue[2]));
return new float[] {(float) L, (float) a, (float) b};
}
#Override
public float[] fromRGB(float[] rgbvalue) {
float[] xyz = CIEXYZ.fromRGB(rgbvalue);
return fromCIEXYZ(xyz);
}
#Override
public float getMaxValue(int component) {
return 128f;
}
#Override
public float getMinValue(int component) {
return (component == 0)? 0f: -128f;
}
#Override
public String getName(int idx) {
return String.valueOf("Lab".charAt(idx));
}
#Override
public float[] toCIEXYZ(float[] colorvalue) {
double i = (colorvalue[0] + 16.0) * (1.0 / 116.0);
double X = fInv(i + colorvalue[1] * (1.0 / 500.0));
double Y = fInv(i);
double Z = fInv(i - colorvalue[2] * (1.0 / 200.0));
return new float[] {(float) X, (float) Y, (float) Z};
}
#Override
public float[] toRGB(float[] colorvalue) {
float[] xyz = toCIEXYZ(colorvalue);
return CIEXYZ.toRGB(xyz);
}
CIELab() {
super(ColorSpace.TYPE_Lab, 3);
}
private double f(double x) {
if (x > 216.0 / 24389.0) {
return Math.cbrt(x);
} else {
return (841.0 / 108.0) * x + N;
}
}
private double fInv(double x) {
if (x > 6.0 / 29.0) {
return x*x*x;
} else {
return (108.0 / 841.0) * (x - N);
}
}
private final ColorSpace CIEXYZ =
ColorSpace.getInstance(ColorSpace.CS_CIEXYZ);
private final double N = 4.0 / 29.0;
}
There's lots that can be done, even beyond what's already been mentioned.
Iteration & Threading
After taking the screenshot, immediately iterate over every 1/N pixels (perhaps every 4 or 8) of the buffered image. Then, during this iteration, calculate the LAB value for each pixel (as you have each pixel channel directly available), and meanwhile increment the running total of each RGB channel.
This saves us from iterating over the same pixels twice and avoids unncessary conversions (BufferedImage → PImage; and composing then decomposing pixel channels from PImage pixels).
Likewise, we avoid Processing's expensive resize() call (as suggested in another answer), which is not something we want to call every frame (even though it does speed the program up, it's not an efficient method).
Now, on top of iteration change, we can wrap the iteration in a Callable to easily run the workload across multiple system threads concurrently (after all, pixel iteration is embarrassingly parallel); the example below does this with 2 threads, each screenshotting and processing half of the display's pixels.
Optimise RGB→XYZ→LAB conversion
We're not so concerned about the backwards conversion since that's only done for one value per frame
It looks like you've implemented XYZ→LAB yourself and are using the RGB→XYZ converter from java.awt.color.
As has been identified, the forward conversion XYZ→LAB uses a cbrt() which is as a bottleneck. I also imagine that the RGB→XYZ implementation makes 3 calls to Math.Pow(x, 2.4) — 3 non-integer exponents per pixel adds considerably to the computation. The solution is faster math...
Jafama
Jafama is a drop-in java.math replacement -- simply import the library and replace any Math.__() calls with FastMath.__() for a free speedup (you could go even further by trading Jafama's E-15 precision with less accurate and even faster dedicated LUT-based classes).
So at the very least, swap out Math.cbrt() for FastMath.cbrt(). Then consider implementing RGB→XYZ yourself (example), again using Jafama in place of java.math.
You may even find that for such a project, converting to XYZ only is a sufficient color space to work with to overcome the well known weaknesses with RGB (and therefore save yourself from the XYZ→LAB conversion).
Cache LAB Calculation
Unless most pixels are changing every frame, then consider caching the LAB value for every pixel, recalculating it only when the pixel has changed between the current the previous frames. The tradeoff here is the overhead from checking every pixel against its previous value, versus how much calculation positive checks will save. Given that the LAB calculation is much more expensive it's very worthwhile here. The example below uses this technique.
Screen Capture
No matter how well optimised the rest of the program is, a considerable bottleneck is the AWT Robot's createScreenCapture(). It will struggles to go past 30FPS on large enough displays. I can't offer any exact advice but it's worth looking at other screen capture methods in Java.
Reworked code with iteration changes & threading
This code implements what has discussed above minus any changes to the LAB calculation.
float a = 0;
int blockSize = 20;
int avg_c;
int per_c;
java.util.concurrent.ExecutorService threadPool = java.util.concurrent.Executors.newFixedThreadPool(4);
List<java.util.concurrent.Callable<Boolean>> taskList;
float[] averageLAB;
int totalR = 0, totalG = 0, totalB = 0;
CIELab lab = new CIELab();
final int pixelStride = 8; // look at every 8th pixel
void setup() {
size(800, 800, FX2D);
noStroke();
frame.removeNotify();
taskList = new ArrayList<java.util.concurrent.Callable<Boolean>>();
Compute thread1 = new Compute(0, 0, width, height/2);
Compute thread2 = new Compute(0, height/2, width, height/2);
taskList.add(thread1);
taskList.add(thread2);
}
void draw() {
totalR = 0; // re init
totalG = 0; // re init
totalB = 0; // re init
averageLAB = new float[3]; // re init
final int numPixels = (width*height)/pixelStride;
try {
threadPool.invokeAll(taskList); // run threads now and block until completion of all
}
catch (Exception e) {
e.printStackTrace();
}
// calculate average LAB
averageLAB[0]/=numPixels;
averageLAB[1]/=numPixels;
averageLAB[2]/=numPixels;
final float[] rgb = lab.toRGB(averageLAB);
per_c = color(rgb[0] * 255, rgb[1] * 255, rgb[2] * 255);
// calculate average RGB
totalR/=numPixels;
totalG/=numPixels;
totalB/=numPixels;
avg_c = color(totalR, totalG, totalB);
background(avg_c); // Average color
spiral();
fill(255, 0, 0);
text(frameRate, 10, 20);
}
class Compute implements java.util.concurrent.Callable<Boolean> {
private final Rectangle screenRegion;
private Robot robot_Screenshot;
private final int[] previousRGB;
private float[][] previousLAB;
Compute(int x, int y, int w, int h) {
screenRegion = new Rectangle(x, y, w, h);
previousRGB = new int[w*h];
previousLAB = new float[w*h][3];
try {
robot_Screenshot = new Robot();
}
catch (AWTException e1) {
e1.printStackTrace();
}
}
#Override
public Boolean call() {
BufferedImage rawScreenshot = robot_Screenshot.createScreenCapture(screenRegion);
int[] ssPixels = new int[rawScreenshot.getWidth()*rawScreenshot.getHeight()]; // screenshot pixels
rawScreenshot.getRGB(0, 0, rawScreenshot.getWidth(), rawScreenshot.getHeight(), ssPixels, 0, rawScreenshot.getWidth()); // copy buffer to int[] array
for (int pixel = 0; pixel < ssPixels.length; pixel+=pixelStride) {
// get invididual colour channels
final int pixelColor = ssPixels[pixel];
final int R = pixelColor >> 16 & 0xFF;
final int G = pixelColor >> 8 & 0xFF;
final int B = pixelColor & 0xFF;
if (pixelColor != previousRGB[pixel]) { // if pixel has changed recalculate LAB value
float[] labValues = lab.fromRGB(new float[]{R/255f, G/255f, B/255f}); // note that I've fixed this; beforehand you were missing the /255, so it was always white.
previousLAB[pixel] = labValues;
}
averageLAB[0] += previousLAB[pixel][0];
averageLAB[1] += previousLAB[pixel][1];
averageLAB[2] += previousLAB[pixel][2];
totalR+=R;
totalG+=G;
totalB+=B;
previousRGB[pixel] = pixelColor; // cache last result
}
return true;
}
}
800x800px; pixelStride = 4; fairly static screen background
Yeesh, about 1 FPS on my machine:
To optimize code can be really hard, so instead of reading everything looking for stuff to improve, I started by testing where you were losing so much processing power. The answer was at this line:
per_c = extractAverageColorFromImage(screenshot);
The extractAverageColorFromImage method is well written, but it underestimate the amount of work it has to do. There is a quadratic relationship between the size of a screen and the number of pixels in this screen, so the bigger the screen the worst the situation. And this method is processing every pixel of the screenshot all the time, several time per screenshot.
This is a lot of work for an average color. Now, if there was a way to cut some corners... maybe a smaller screen, or a smaller screenshot... oh! there is! Let's resize the screenshot. After all, we don't need to go into such details as individual pixels for an average. In the screenshot method, add this line:
void screenshot() {
try {
Robot robot_Screenshot = new Robot();
screenshot = new PImage(robot_Screenshot.createScreenCapture(new Rectangle(0, 0, displayWidth, displayHeight)));
// ADD THE NEXT LINE
screenshot.resize(width/4, height/4);
}
catch (AWTException e) {
}
frame.setLocation(displayWidth/2, 0);
}
I divided the workload by 4, but I encourage you to tweak this number until you have the fastest satisfying result you can. This is just a proof of concept:
As you can see, resizing the screenshot and making it 4x smaller gives me 10x more speed. That's not a miracle, but it's much better, and I can't see a difference in the end result - but about that part, you'll have to use your own judgement, as you are the one who knows what your project is about. Hope it'll help!
Have fun!
Unfortunately I can't provide a detailed answer like laancelot (+1), but hopefully I can provide a few tips:
Resizing the image is definitely a good direction. Bare in mind you can also skip a number of pixels instead of incrementing every single pixel. (if you handle the pixel indices correctly, you can get a similar effect to resize without calling resize, though that won't save you a lot CPU time)
Don't create a new Robot instance multiple times a second. Create it once in setup and re-use it. (This is more of a good habit to get into)
Use a CPU profiler, such as the one in VisualVM to see what exactly is slow and aim to optimise the slowest stuff first.
point 1 example:
for (int i = 0; i < numPixels; i+= 100)
point 2 example:
Robot robot_Screenshot;
...
void setup() {
fullScreen(2); // 1920x1080
noStroke();
frame.removeNotify();
try{
robot_Screenshot = new Robot();
}catch(AWTException e){
println("error setting up screenshot Robot instance");
e.printStackTrace();
}
}
...
void screenshot() {
screenshot = new PImage(robot_Screenshot.createScreenCapture
(new Rectangle(0, 0, displayWidth, displayHeight)));
frame.setLocation(displayWidth/2, 0);
}
point 3 example:
Notice the slowest bit are actually AWT's fromRGB and Math.cbrt()
I'd suggest finding another alternative RGB -> XYZ -> L*a*b* conversion method that is simpler (mainly functions, less classes, with AWT or other dependencies) and hopefully faster.
I'm trying to create a program that receives a photograph of a surface from a certain angle and position, and generates an image of what an isometric projection of the plane would look like. For example, given a photo of a checkerboard
and information about the positioning and properties of the camera, it could reconstruct a section of the undistorted pattern
My approach has been divided into two parts. The first part is to create four rays, coming from the camera, following the four corners of its field of view. I compute where these rays intersect with the plane, to form the quadrangle of the area of the plane that the camera can see, like this:
The second part is to render an isomorphic projection of the plane with the textured quadrangle. I divide the quadrangle into two triangles, then for each pixel on the rendering, I convert the cartesian coordinates into barymetric coordinates relative to each triangle, then convert it back into cartesian coordinates relative to a corresponding triangle that consumes half of the photograph, so that I can sample a color.
(I am aware that this could be done more efficiently with OpenGL, but I would like to not use it for logistical reasons. I am also aware that the quality will be affected by lack of interpolation, that does not matter for this task.)
I am testing the program with some data, but the rendering does not occur as intended. Here is the photograph:
And here is the program output:
I believe that the problem is occurring in the quadrangle rendering, because I have graphed the projected vertices, and they appear to be correct:
I am by no means an expert in computer graphics, so I would very much appreciate if someone had any idea what would cause this problem. Here is the relevant code:
public class ImageProjector {
private static final EquationSystem ground = new EquationSystem(0, 1, 0, 0);
private double fov;
private double aspectRatio;
private vec3d position;
private double xAngle;
private double yAngle;
private double zAngle;
public ImageProjector(double fov, double aspectRatio, vec3d position, double xAngle, double yAngle, double zAngle) {
this.fov = fov;
this.aspectRatio = aspectRatio;
this.position = position;
this.xAngle = xAngle;
this.yAngle = yAngle;
this.zAngle = zAngle;
}
public vec3d[] computeVertices() {
return new vec3d[] {
computeVertex(1, 1),
computeVertex(1, -1),
computeVertex(-1, -1),
computeVertex(-1, 1)
};
}
private vec3d computeVertex(int horizCoef, int vertCoef) {
vec3d p2 = new vec3d(tan(fov / 2) * horizCoef, tan((fov / 2) / aspectRatio) * vertCoef, 1);
p2 = p2.rotateXAxis(xAngle);
p2 = p2.rotateYAxis(yAngle);
p2 = p2.rotateZAxis(zAngle);
if (p2.y > 0) {
throw new RuntimeException("sky is visible to camera: " + p2);
}
p2 = p2.plus(position);
//System.out.println("passing through " + p2);
EquationSystem line = new LineBuilder(position, p2).build();
return new vec3d(line.add(ground).solveVariables());
}
}
public class barypoint {
public barypoint(double u, double v, double w) {
this.u = u;
this.v = v;
this.w = w;
}
public final double u;
public final double v;
public final double w;
public barypoint(vec2d p, vec2d a, vec2d b, vec2d c) {
vec2d v0 = b.minus(a);
vec2d v1 = c.minus(a);
vec2d v2 = p.minus(a);
double d00 = v0.dotProduct(v0);
double d01 = v0.dotProduct(v1);
double d11 = v1.dotProduct(v1);
double d20 = v2.dotProduct(v0);
double d21 = v2.dotProduct(v1);
double denom = d00 * d11 - d01 * d01;
v = (d11 * d20 - d01 * d21) / denom;
w = (d00 * d21 - d01 * d20) / denom;
u = 1.0 - v - w;
}
public barypoint(vec2d p, Triangle triangle) {
this(p, triangle.a, triangle.b, triangle.c);
}
public vec2d toCartesian(vec2d a, vec2d b, vec2d c) {
return new vec2d(
u * a.x + v * b.x + w * c.x,
u * a.y + v * b.y + w * c.y
);
}
public vec2d toCartesian(Triangle triangle) {
return toCartesian(triangle.a, triangle.b, triangle.c);
}
}
public class ImageTransposer {
private BufferedImage source;
private BufferedImage receiver;
public ImageTransposer(BufferedImage source, BufferedImage receiver) {
this.source = source;
this.receiver = receiver;
}
public void transpose(Triangle sourceCoords, Triangle receiverCoords) {
int xMin = (int) Double.min(Double.min(receiverCoords.a.x, receiverCoords.b.x), receiverCoords.c.x);
int xMax = (int) Double.max(Double.max(receiverCoords.a.x, receiverCoords.b.x), receiverCoords.c.x);
int yMin = (int) Double.min(Double.min(receiverCoords.a.y, receiverCoords.b.y), receiverCoords.c.y);
int yMax = (int) Double.max(Double.max(receiverCoords.a.y, receiverCoords.b.y), receiverCoords.c.y);
for (int x = xMin; x <= xMax; x++) {
for (int y = yMin; y <= yMax; y++) {
vec2d p = new vec2d(x, y);
if (receiverCoords.contains(p) && p.x >= 0 && p.y >= 0 && p.x < receiver.getWidth() && y < receiver.getHeight()) {
barypoint bary = new barypoint(p, receiverCoords);
vec2d sp = bary.toCartesian(sourceCoords);
if (sp.x >= 0 && sp.y >= 0 && sp.x < source.getWidth() && sp.y < source.getHeight()) {
receiver.setRGB(x, y, source.getRGB((int) sp.x, (int) sp.y));
}
}
}
}
}
}
public class ProjectionRenderer {
private String imagePath;
private BufferedImage mat;
private vec3d[] vertices;
private vec2d pos;
private double scale;
private int width;
private int height;
public boolean error = false;
public ProjectionRenderer(String image, BufferedImage mat, vec3d[] vertices, vec3d pos, double scale, int width, int height) {
this.imagePath = image;
this.mat = mat;
this.vertices = vertices;
this.pos = new vec2d(pos.x, pos.z);
this.scale = scale;
this.width = width;
this.height = height;
}
public void run() {
try {
BufferedImage image = ImageIO.read(new File(imagePath));
vec2d[] transVerts = Arrays.stream(vertices)
.map(v -> new vec2d(v.x, v.z))
.map(v -> v.minus(pos))
.map(v -> v.multiply(scale))
.map(v -> v.plus(new vec2d(mat.getWidth() / 2, mat.getHeight() / 2)))
// this fixes the image being upside down
.map(v -> new vec2d(v.x, mat.getHeight() / 2 + (mat.getHeight() / 2 - v.y)))
.toArray(vec2d[]::new);
System.out.println(Arrays.toString(transVerts));
Triangle sourceTri1 = new Triangle(
new vec2d(0, 0),
new vec2d(image.getWidth(), 0),
new vec2d(0, image.getHeight())
);
Triangle sourceTri2 = new Triangle(
new vec2d(image.getWidth(), image.getHeight()),
new vec2d(0, image.getHeight()),
new vec2d(image.getWidth(), 0)
);
Triangle destTri1 = new Triangle(
transVerts[3],
transVerts[0],
transVerts[2]
);
Triangle destTri2 = new Triangle(
transVerts[1],
transVerts[2],
transVerts[0]
);
ImageTransposer transposer = new ImageTransposer(image, mat);
System.out.println("transposing " + sourceTri1 + " -> " + destTri1);
transposer.transpose(sourceTri1, destTri1);
System.out.println("transposing " + sourceTri2 + " -> " + destTri2);
transposer.transpose(sourceTri2, destTri2);
} catch (IOException e) {
e.printStackTrace();
error = true;
}
}
}
The reason it's not working is because your transpose function works entirely with 2D co-ordinates, therefore it cannot compensate for the image distortion resulting from 3D perspective. You have effectively implemented a 2D affine transformation. Parallel lines remain parallel, which they do not under a 3D perspective transform. If you draw a straight line between two points on your triangle, you can linearly interpolate between them by linearly interpolating the barycentric co-ordinates, and vice versa.
To take Z into account, you can keep the barycentric co-ordinate approach, but provide a Z co-ordinate for each point in sourceCoords. The trick is to interpolate between 1/Z values (which can be linearly interpolated in a perspective image) instead of interpolating Z itself. So instead of interpolating what are effectively the texture co-ordinates for each point, interpolate the texture co-ordinate divided by Z, along with inverse Z, and interpolate all of those using your barycentric system. Then divide by inverse Z before doing your texture lookup to get texture co-ordinates back.
You could do that like this (assume a b c contain an extra z co-ordinate giving distance from camera):
public vec3d toCartesianInvZ(vec3d a, vec3d b, vec3d c) {
// put some asserts in to check for z = 0 to avoid div by zero
return new vec3d(
u * a.x/a.z + v * b.x/b.z + w * c.x/c.z,
u * a.y/a.z + v * b.y/b.z + w * c.y/c.z,
u * 1/a.z + v * 1/b.z + w * 1/c.z
);
}
(You could obviously speed up/simplify this by pre-computing all those divides and storing in sourceCoords, and just doing regular barycentric interpolation in 3D)
Then after you call it in transpose, divide by inv Z to get the texture co-ords back:
vec3d spInvZ = bary.toCartesianInvZ(sourceCoords);
vec2d sp = new vec2d(spInvZ.x / spInvZ.z, spInvZ.y / spInvZ.z);
etc. The Z co-ordinate that you need is the distance of the point in 3D space from the camera position, in the direction the camera is pointing. You can compute it with a dot product if you aren't getting it some other way:
float z = point.subtract(camera_pos).dot(camera_direction);
etc
I have the following code working on Processing 2, with Kinect:
import org.openkinect.freenect.*;
import org.openkinect.processing.*;
// Kinect Library object
Kinect kinect;
// Angle for rotation
float a = 0;
// We'll use a lookup table so that we don't have to repeat the math over and over
float[] depthLookUp = new float[2048];
void setup() {
// Rendering in P3D
size(1200, 800, P3D);
kinect = new Kinect(this);
kinect.initDepth();
// Lookup table for all possible depth values (0 - 2047)
for (int i = 0; i < depthLookUp.length; i++) {
depthLookUp[i] = rawDepthToMeters(i);
}
}
void draw() {
background(0);
// Get the raw depth as array of integers
int[] depth = kinect.getRawDepth();
// We're just going to calculate and draw every 4th pixel (equivalent of 160x120)
int skip = 4;
// Translate and rotate
translate(width/2, height/2, -50);
rotateY(a);
for (int x = 0; x < kinect.width; x += skip) {
for (int y = 0; y < kinect.height; y += skip) {
int offset = x + y*kinect.width;
// Convert kinect data to world xyz coordinate
int rawDepth = depth[offset];
PVector v = depthToWorld(x, y, rawDepth);
stroke(255);
pushMatrix();
// Scale up by 200
float factor = 200;
translate(v.x*factor, v.y*factor, factor-v.z*factor);
// Draw a point
point(0, 0);
popMatrix();
}
}
// Rotate
a += 0.015f;
}
// These functions come from: http://graphics.stanford.edu/~mdfisher/Kinect.html
float rawDepthToMeters(int depthValue) {
if (depthValue < 2047) {
return (float)(1.0 / ((double)(depthValue) * -0.0030711016 + 3.3309495161));
}
return 0.0f;
}
PVector depthToWorld(int x, int y, int depthValue) {
final double fx_d = 1.0 / 5.9421434211923247e+02;
final double fy_d = 1.0 / 5.9104053696870778e+02;
final double cx_d = 3.3930780975300314e+02;
final double cy_d = 2.4273913761751615e+02;
PVector result = new PVector();
double depth = depthLookUp[depthValue];//rawDepthToMeters(depthValue);
result.x = (float)((x - cx_d) * depth * fx_d);
result.y = (float)((y - cy_d) * depth * fy_d);
result.z = (float)(depth);
return result;
}
I would like to save pointcloud data in a file, so I can import it later on another program, such as Cinema 4D.
how do I create this file?
Processing has several functions for saving data to file, the simplest of which is saveStrings().
To use the saveStrings() function, you would simply store whatever you wanted to save into a String array, and then pass that into the function along with a filename.
You can then use the loadStrings() function to read the data from a file back into a String array.
How you format the data into a String is entirely up to you. You might store it as comma separated values.
More info can be found in the reference.
If you want to store the data into a file that another program can read, you have to first look up exactly what format that file needs to be in. I'd start by opening up some example files in a basic text editor.
I have an application where the user draws a line, then automatically the software should create a rectangle where the line intersects the midpoint of two adjacent sides of a right angled rectangle. See below, the red line is user drawn and the purple rectangle is calculated. My algorithm almost works, in some directions the rectangles aren't right angled. I want to know why, there is a fair amount of approximation in my code, which should be fine based on the scales I use (a few km). I shouldn't have to worry about the curvature of the earth.
Anybody know how I can improve my code?
Here are my references:
calculate points
lat and long to meters
public static MapRectangle CreatMapRectangle(BalingZone zone, double thickness)
{
MapsCoordinate rectpoint2;
MapsCoordinate rectPoint1 ;
MapsCoordinate rectPoint3 ;
MapsCoordinate rectPoint4 ;
var point1 = zone.Coordinates[0];
var point2 = zone.Coordinates[1];
var latitudeDiff = LatitudeDiffToMeters(point2.Latitude - point1.Latitude);
var longitudeDiff = LongitudeDiffToMeters(point1.Longitude - point2.Longitude, point1.Latitude);
var slopeB = longitudeDiff / latitudeDiff;
double latOffset = thickness * (slopeB / Math.Sqrt(1 + slopeB * slopeB));
double longOffset = thickness * (1 / Math.Sqrt(1 + slopeB * slopeB));
double p3Lat = CalculateLatitude(point1.Latitude, latOffset);
double p3Long = CalculateLongitude( point1.Longitude, p3Lat , longOffset);
rectPoint1 = new MapsCoordinate(p3Lat, p3Long);
double p4Lat = CalculateLatitude(point1.Latitude, -latOffset);
double p4Long = CalculateLongitude(point1.Longitude, p4Lat, -longOffset);
rectpoint2 = new MapsCoordinate(p4Lat, p4Long);
double p5Lat = CalculateLatitude(point2.Latitude, latOffset);
double p5Long = CalculateLongitude( point2.Longitude, p5Lat , longOffset);
rectPoint4 = new MapsCoordinate(p5Lat, p5Long);
double p6Lat = CalculateLatitude(point2.Latitude, -latOffset);
double p6Long = CalculateLongitude( point2.Longitude, p6Lat , -longOffset);
rectPoint3 = new MapsCoordinate(p6Lat, p6Long);
return new MapRectangle(rectPoint4, rectPoint3, rectPoint1, rectpoint2, thickness);
}
//use the quick and dirty estimate that 111,111 meters (111.111 km) in the y direction is 1 degree (of latitude)
// and 111,111 * cos(latitude) meters in the x direction is 1 degree (of longitude).
private static double LatitudeDiffToMeters(double latitudeDiff)
{
return 111111.0 * latitudeDiff;
}
private static double LongitudeDiffToMeters(double longitudeDiff, double latitude)
{
return 111111.0*Math.Cos(latitude)*longitudeDiff;
}
private static double CalculateLatitude(double latitude, double offset)
{
return latitude + offset/111111.0;
}
private static double CalculateLongitude(double longitude, double latitude, double offset)
{
return longitude + offset/(111111.0*Math.Cos(latitude));
}
}
Not sure why the question was voted down!
Anyway,there was an error in my code, i was using degrees instead of radians in the math functions. I also updated the calculations to assume a spherical earth (to work better closer to the poles)
private static double LatitudeDiffToMeters(double latitudeDiff)
{
return (R*latitudeDiff*Math.PI)/180;
}
private static double LongitudeDiffToMeters(double longitudeDiff, double latitude)
{
return (longitudeDiff*Math.PI* R * Math.Cos (Math.PI * latitude / 180))/180;
}
private static double CalculateLatitude(double latitude, double offset)
{
return latitude + ((offset/R) * (180/Math.PI));
}
private static double CalculateLongitude(double longitude, double latitude, double offset)
{
return longitude + offset / (R * Math.Cos (Math.PI * latitude / 180)) * (180 / Math.PI);
}
In the context of a game program, I have a moving circle and a fixed line segment. The segment can have an arbitrary size and orientation.
I know the radius of the circle: r
I know the coordinates of the circle before the move: (xC1, yC1)
I know the coordinates of the circle after the move: (xC2, yC2)
I know the coordinates of the extremities of the line segment: (xL1, yL1) - (xL2, yL2)
I am having difficulties trying to compute:
A boolean: If any part of the circle hits the line segment while moving from (xC1, yC1) to (xC2, yC2)
If the boolean is true, the coordinates (x, y) of the center of the circle when it hits the line segment (I mean when circle is tangent to segment for the first time)
I'm going to answer with pseudo-algorithm - without any code. The way I see it there are two cases in which we might return true, as per the image below:
Here in blue are your circles, the dashed line is the trajectory line and the red line is your given line.
We build a helper trajectory line, from and to the center of both circles. If this trajectory line intersects the given line - return true. See this question on how to compute that intersection.
In the second case the first test has failed us, but it might just so happen that the circles nudged the line as they passed on the trajectory anyway. We will need the following constuction:
From the trajectory we build normal lines to each point A and B. Then these lines are chopped or extended into helper lines (Ha and Hb), so that their length from A and B is exactly the radius of the circle. Then we check if each of these helper lines intersects with the trajectory line. If they do return true.
Otherwise return false.
Look here:
Line segment / Circle intersection
If the value you get under the square root of either the computation of x or y is negative, then the segment does not intersect. Aside from that, you can stop your computation after you have x and y (note: you may get two answers)
Update I've revised my answer to very specifically address your problem. I give credit to Doswa for this solution, as I pretty much followed along and wrote it for C#. The basic strategy is that we are going to locate the closest point of your line segment to the center of the circle. Based on that, we'll look at the distance of that closest point, and if it is within the radius, locate the point along the direction to the closest point that lies right at the radius of the circle.
// I'll bet you already have one of these.
public class Vec : Tuple<double, double>
{
public Vec(double item1, double item2) : base(item1, item2) { }
public double Dot(Vec other)
{ return Item1*other.Item1 + Item2*other.Item2; }
public static Vec operator-(Vec first, Vec second)
{ return new Vec(first.Item1 - second.Item1, first.Item2 - second.Item2);}
public static Vec operator+(Vec first, Vec second)
{ return new Vec(first.Item1 + second.Item1, first.Item2 + second.Item2);}
public static Vec operator*(double first, Vec second)
{ return new Vec(first * second.Item1, first * second.Item2);}
public double Length() { return Math.Sqrt(Dot(this)); }
public Vec Normalize() { return (1 / Length()) * this; }
}
public bool IntersectCircle(Vec origin, Vec lineStart,
Vec lineEnd, Vec circle, double radius, out Vec circleWhenHit)
{
circleWhenHit = null;
// find the closest point on the line segment to the center of the circle
var line = lineEnd - lineStart;
var lineLength = line.Length();
var lineNorm = (1/lineLength)*line;
var segmentToCircle = circle - lineStart;
var closestPointOnSegment = segmentToCircle.Dot(line) / lineLength;
// Special cases where the closest point happens to be the end points
Vec closest;
if (closestPointOnSegment < 0) closest = lineStart;
else if (closestPointOnSegment > lineLength) closest = lineEnd;
else closest = lineStart + closestPointOnSegment*lineNorm;
// Find that distance. If it is less than the radius, then we
// are within the circle
var distanceFromClosest = circle - closest;
var distanceFromClosestLength = distanceFromClosest.Length();
if (distanceFromClosestLength > radius) return false;
// So find the distance that places the intersection point right at
// the radius. This is the center of the circle at the time of collision
// and is different than the result from Doswa
var offset = (radius - distanceFromClosestLength) *
((1/distanceFromClosestLength)*distanceFromClosest);
circleWhenHit = circle - offset;
return true;
}
Here is some Java that calculates the distance from a point to a line (this is not complete, but will give you the basic picture). The code comes from a class called
'Vector'. The assumption is that the vector object is initialized to the line vector. The method 'distance' accepts the point that the line vector starts at (called 'at' of course), and the point of interest. It calculates and returns the distance from that point to the line.
public class Vector
{
double x_ = 0;
double y_ = 0;
double magnitude_ = 1;
public Vector()
{
}
public Vector(double x,double y)
{
x_ = x;
y_ = y;
}
public Vector(Vector other)
{
x_ = other.x_;
y_ = other.y_;
}
public void add(Vector other)
{
x_ += other.x_;
y_ += other.y_;
}
public void scale(double val)
{
x_ *= val;
y_ *= val;
}
public double dot(Vector other)
{
return x_*other.x_+y_*other.y_;
}
public void cross(Vector other)
{
x_ = x_*other.y_ - y_*other.x_;
}
public void unit()
{
magnitude_ = Math.sqrt(x_*x_+y_*y_);
x_/=magnitude_;
y_/=magnitude_;
}
public double distance(Vector at,Vector point)
{
//
// Create a perpendicular vector
//
Vector perp = new Vector();
perp.perpendicular(this);
perp.unit();
Vector offset = new Vector(point.x_ - at.x_,point.y_ - at.y_);
double d = Math.abs(offset.dot(perp));
double m = magnitude();
double t = dot(offset)/(m*m);
if(t < 0)
{
offset.x_ -= at.x_;
offset.y_ -= at.y_;
d = offset.magnitude();
}
if(t > 1)
{
offset.x_ -= at.x_+x_;
offset.y_ -= at.y_+y_;
d = offset.magnitude();
}
return d;
}
private void perpendicular(Vector other)
{
x_ = -other.y_;
y_ = other.x_;
}
public double magnitude()
{
magnitude_ = Math.sqrt(x_*x_+y_*y_);
return magnitude_;
}
}