get location(x,y) of matched features in two images using emgu - algorithm

i have two images taken by two cameras, i want to use surf algorithm or any algorithm in Emgu to get just matched features locations in two images to calculates (estimate) real distance from cameras and this features (objects), i found example to use surf algorithm in Emgu examples, but its draw lines between matched features i want to get x and y for any begin and end of each line.
features matched by surf algorithm sample image
i try to add some code in surf algorithm example but not work as expected in Draw method
long num_matches = matches.Size;
float lower = matches[0][0].Distance;
List<PointF> matched_points1= new List<PointF>();
List<PointF> matched_points2=new List<PointF>();
for (int i = 0; i < num_matches; i++)
{
if (matches[i][0].Distance < 0.095)
{
int idx1 = matches[i][0].TrainIdx;
int idx2 = matches[i][0].QueryIdx;
matched_points1.Add(observedKeyPoints[idx1].Point);
matched_points2.Add(observedKeyPoints[idx2].Point);
CvInvoke.Circle(result, new Point((int)observedKeyPoints[idx2].Point.X , (int)observedKeyPoints[idx2].Point.Y), 1, new MCvScalar(255, 0, 0));
CvInvoke.Circle(result, new Point((int)modelKeyPoints[idx1].Point.X + modelImage.Width, (int)modelKeyPoints[idx1].Point.Y), 1, new MCvScalar(255, 0, 0));
}
if (lower > matches[i][0].Distance)
lower = matches[i][0].Distance;
}

public static Point[] FindPoints(Mat modelImage, Mat observedImage, out long matchTime)
{
Mat homography;
VectorOfKeyPoint modelKeyPoints;
VectorOfKeyPoint observedKeyPoints;
using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
{
Mat mask;
FindMatch(modelImage, observedImage, out matchTime, out modelKeyPoints, out observedKeyPoints, matches,out mask, out homography);
//Draw the matched keypoints
Mat result = new Mat();
Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask);
Point[] points = null;
if (homography != null)
{
//draw a rectangle along the projected model
Rectangle rect = new Rectangle(Point.Empty, modelImage.Size);
PointF[] pts = new PointF[]
{
new PointF(rect.Left, rect.Bottom),
new PointF(rect.Right, rect.Bottom),
new PointF(rect.Right, rect.Top),
new PointF(rect.Left, rect.Top)
};
pts = CvInvoke.PerspectiveTransform(pts, homography);
points = Array.ConvertAll<PointF, Point>(pts, Point.Round);
}
return points;
}
}
//add the code in the Draw function
for (int i = 0; i < matches.Size; i++) { var a = matches[i].ToArray(); if (mask.GetData(i)[0] == 0) continue; foreach (var e in a) { Point p = new `Point(e.TrainIdx, e.QueryIdx); Console.WriteLine(string.Format("Point: {0}", p)); } Console.WriteLine("-----------------------"); }`

Related

Separate shapes as lines split the canvas + Select points to draw shapes from arrays

I want to detect separate shapes as random-generated lines split the canvas. I saved line intersection points in separate arrays for x and y positions (same order), but don't know how to connect
points that complete multiple pieces of shapes .
Is there any way to detect nearby points to close a minimal possible shape whether it be a triangle, rectangle, or polygon (e.g., by using beginShape and endShape)?
If 1) is too complicated, is there any method to select 3 or more random points from an array?
Here's a sample image that has 4 lines splitting the canvas with their intersection points marked in red. I also saved the top and bottom points (marked in black) of each random-generated line, plus the four corners of the canvas in the same arrays for x and y positions separately (px, py).
Multiple lines split the canvas.
How to get shapes split by lines in Processing?
I was able to get all the intersection points, but having a problem with connecting them into separate shapes. Here's the Processing code that I am working on:
//Run in Processing.
//Press r to refresh.
//Top and bottom points are added to px and py when refreshed (filled in black).
//Intersection points are added to px and py when detected (filled in red).
int l = 4; //set number of lines
float[] r1 = new float[l];
float[] r2 = new float[l];
float[] px = {}; //array to save x positions of all possible points
float[] py = {}; //array to save y positions of all possible points
boolean added = false;
void setup(){
size(800, 800);
background(255);
refresh();
}
void draw(){
background(255);
stroke(0, 150, 255, 150);
strokeWeight(1);
for(int i=0; i < r1.length; i++){
for(int j=0; j < r1.length; j++){
if(i>j){
boolean hit = lineLine(r1[i], 0, r2[i], height, r1[j], 0, r2[j], height);
if (hit) stroke(255, 150, 0, 150);
else stroke(0, 150, 255, 150);
}
line(r1[i], 0, r2[i], height);
}
}
added = true;
print(px.length);
}
//source: http://jeffreythompson.org/collision-detection/line-line.php
boolean lineLine(float x1, float y1, float x2, float y2, float x3, float y3, float x4, float y4) {
// calculate the distance to intersection point
float uA = ((x4-x3)*(y1-y3) - (y4-y3)*(x1-x3)) / ((y4-y3)*(x2-x1) - (x4-x3)*(y2-y1));
float uB = ((x2-x1)*(y1-y3) - (y2-y1)*(x1-x3)) / ((y4-y3)*(x2-x1) - (x4-x3)*(y2-y1));
// if uA and uB are between 0-1, lines are colliding
if (uA >= 0 && uA <= 1 && uB >= 0 && uB <= 1) {
// optionally, draw a circle where the lines meet
float intersectionX = x1 + (uA * (x2-x1));
float intersectionY = y1 + (uA * (y2-y1));
fill(255,0,0);
noStroke();
ellipse(intersectionX,intersectionY, 20,20);
if(added==false){
px = append(px, intersectionX);
py = append(py, intersectionY);
}
return true;
}
return false;
}
void refresh(){
added = false;
px = new float[0];
py = new float[0];
r1 = new float[l];
r2 = new float[l];
px = append(px, 0);
py = append(py, 0);
px = append(px, 0);
py = append(py, height);
px = append(px, width);
py = append(py, 0);
px = append(px, width);
py = append(py, height);
for(int i=0; i< r1.length; i++){
r1[i] = random(800);
}
for(int i=0; i< r2.length; i++){
r2[i] = random(800);
}
for(int i=0; i < r1.length; i++){
stroke(0);
line(r1[i], 0, r2[i], height);
px = append(px, r1[i]);
py = append(py, 0);
px = append(px, r2[i]);
py = append(py, height);
}
}
void keyReleased() {
if (key == 'r') refresh();
}
If you want to draw a shape made of the intersection points only you're on the right track with beginShape()/endShape().
Currently it looks like you're placing all the points in px, py: the intersection points and also the points defining the lines used to compute the intersections in the first place.
You might want to separate the two, for example a couply of arrays for points defining lines only and another pair of x,y arrays for the intersection points only. You'd only need to iterated through the intersected coordinates to place vertex(x, y) calls inbetween beginShape()/endShape(). Here's a modified version of you code to illustrate the idea:
//Run in Processing.
//Press r to refresh.
//Top and bottom points are added to px and py when refreshed (filled in black).
//Intersection points are added to px and py when detected (filled in red).
int l = 4; //set number of lines
float[] r1 = new float[l];
float[] r2 = new float[l];
float[] px = {}; //array to save x positions of all possible points
float[] py = {}; //array to save y positions of all possible points
float[] ipx = {}; // array to save x for intersections only
float[] ipy = {}; // array to save y for intersections only
boolean added = false;
void setup(){
size(800, 800);
background(255);
refresh();
}
void draw(){
background(255);
stroke(0, 150, 255, 150);
strokeWeight(1);
for(int i=0; i < r1.length; i++){
for(int j=0; j < r1.length; j++){
if(i>j){
boolean hit = lineLine(r1[i], 0, r2[i], height, r1[j], 0, r2[j], height);
if (hit) stroke(255, 150, 0, 150);
else stroke(0, 150, 255, 150);
}
line(r1[i], 0, r2[i], height);
}
}
added = true;
// draw intersections
beginShape();
for(int i = 0 ; i < ipx.length; i++){
vertex(ipx[i], ipy[i]);
}
endShape();
//print(px.length);
//println(px.length, py.length);
}
//source: http://jeffreythompson.org/collision-detection/line-line.php
boolean lineLine(float x1, float y1, float x2, float y2, float x3, float y3, float x4, float y4) {
// calculate the distance to intersection point
float uA = ((x4-x3)*(y1-y3) - (y4-y3)*(x1-x3)) / ((y4-y3)*(x2-x1) - (x4-x3)*(y2-y1));
float uB = ((x2-x1)*(y1-y3) - (y2-y1)*(x1-x3)) / ((y4-y3)*(x2-x1) - (x4-x3)*(y2-y1));
// if uA and uB are between 0-1, lines are colliding
if (uA >= 0 && uA <= 1 && uB >= 0 && uB <= 1) {
// optionally, draw a circle where the lines meet
float intersectionX = x1 + (uA * (x2-x1));
float intersectionY = y1 + (uA * (y2-y1));
fill(255,0,0);
noStroke();
ellipse(intersectionX,intersectionY, 20,20);
if(added==false){
px = append(px, intersectionX);
py = append(py, intersectionY);
// store intersections
ipx = append(ipx, intersectionX);
ipy = append(ipy, intersectionY);
}
return true;
}
return false;
}
void refresh(){
added = false;
px = new float[0];
py = new float[0];
ipx = new float[0];
ipy = new float[0];
r1 = new float[l];
r2 = new float[l];
px = append(px, 0);
py = append(py, 0);
px = append(px, 0);
py = append(py, height);
px = append(px, width);
py = append(py, 0);
px = append(px, width);
py = append(py, height);
for(int i=0; i< r1.length; i++){
r1[i] = random(800);
}
for(int i=0; i< r2.length; i++){
r2[i] = random(800);
}
for(int i=0; i < r1.length; i++){
stroke(0);
line(r1[i], 0, r2[i], height);
px = append(px, r1[i]);
py = append(py, 0);
px = append(px, r2[i]);
py = append(py, height);
}
}
void keyReleased() {
if (key == 'r') refresh();
}
Bare in mind this simlpy draws the points in the order in which the intersections were computed. On a good day you'll get something like this:
It doesn't exclude the possiblity of polygons with the wrong vertex order (winding):
and you might be get convave polygons too.
If you only need the outer 'shell' of these intersection points you might need something like a convex hull algorithm
One option to at least visually split shapes might to use beginShape(TRIANGLES); with endShape(CLOSE); which should iterate through points and draw a triangle for every coordinate triplate, however given random points and number of interesections you might end up with a missing triangle or two (e.g. 6 points = 2 triangles, 7 points = 2 triangles and 1 point with no missing pairs)
The only other note I have is around syntax: arrays are ok to get started with but you might want to look into ArrayList and PVector. This would allow you to use a single dynamic array of PVector instances which have x, y properties.
Update
Overall the code can be simplified. If we take out the line intersection related code we can get away with something like:
int l = 4; //set number of random lines
float[] r1 = new float[l]; // random x top
float[] r2 = new float[l]; // random x bottom
void setup() {
size(800, 800);
strokeWeight(3);
stroke(0, 150, 255, 150);
refresh();
}
void draw() {
background(255);
// random lines
for (int i=0; i < r1.length; i++) {
line(r1[i], 0, r2[i], height);
}
// borders
line(0, 0, width, 0);
line(width, 0, width - 1, height - 1);
line(0, height - 1, width - 1, height - 1);
line(0, 0, 0, height - 1);
}
void refresh() {
r1 = new float[l];
r2 = new float[l];
for (int i=0; i< r1.length; i++) {
r1[i] = random(800);
r2[i] = random(800);
}
}
void keyReleased() {
if (key == 'r') refresh();
}
If we were to use a basic Line class and make use of PVector and ArrayList we could rewrite the above as:
int numRandomLines = 4;
ArrayList<PVector> points = new ArrayList<PVector>();
void setup() {
size(800, 800);
stroke(0, 150, 255, 150);
strokeWeight(3);
refresh();
}
void refresh(){
// remove previous points
points.clear();
//add borders
points.add(new PVector(0, 0)); points.add(new PVector(width, 0));
points.add(new PVector(width, 0));points.add(new PVector(width - 1, height - 1));
points.add(new PVector(0, height - 1));points.add(new PVector(width - 1, height - 1));
points.add(new PVector(0, 0)); points.add(new PVector(0, height - 1));
// add random lines
for (int i=0; i< numRandomLines; i++) {
points.add(new PVector(random(800), 0)); points.add(new PVector(random(800), height));
}
}
void draw(){
background(255);
beginShape(LINES);
for(PVector point : points) vertex(point.x, point.y);
endShape();
}
void keyReleased() {
if (key == 'r') refresh();
}
and grouping a pair of points (PVector) into a Line class:
int numRandomLines = 4;
ArrayList<Line> lines = new ArrayList<Line>();
void setup() {
size(800, 800);
stroke(0, 150, 255, 150);
strokeWeight(3);
refresh();
}
void refresh(){
// remove previous points
lines.clear();
//add borders
lines.add(new Line(new PVector(0, 0), new PVector(width, 0)));
lines.add(new Line(new PVector(width, 0), new PVector(width - 1, height - 1)));
lines.add(new Line(new PVector(0, height - 1), new PVector(width - 1, height - 1)));
lines.add(new Line(new PVector(0, 0), new PVector(0, height - 1)));
// add random lines
for (int i=0; i< numRandomLines; i++) {
lines.add(new Line(new PVector(random(800), 0), new PVector(random(800), height)));
}
}
void draw(){
background(255);
for(Line line : lines) line.draw();
}
void keyReleased() {
if (key == 'r') refresh();
}
class Line{
PVector start;
PVector end;
Line(PVector start, PVector end){
this.start = start;
this.end = end;
}
void draw(){
line(start.x, start.y, end.x, end.y);
}
}
At this stage to get the individual shapes as your diagram describes, we could cheat and use a computer vision library like OpenCV. This is if course overkill (as we'd get() a PImage copy of the drawing, convert that to an OpenCV image) then simply use findContours() to get each shape/contour.
Going back to the original approach, the line to line intersection function could be integrated into the Line class:
int numRandomLines = 4;
ArrayList<Line> lines = new ArrayList<Line>();
ArrayList<PVector> intersections = new ArrayList<PVector>();
void setup() {
size(800, 800);
strokeWeight(3);
refresh();
}
void refresh(){
// remove previous points
lines.clear();
intersections.clear();
//add borders
lines.add(new Line(new PVector(0, 0), new PVector(width, 0)));
lines.add(new Line(new PVector(width, 0), new PVector(width - 1, height - 1)));
lines.add(new Line(new PVector(0, height - 1), new PVector(width - 1, height - 1)));
lines.add(new Line(new PVector(0, 0), new PVector(0, height - 1)));
// add random lines
for (int i=0; i< numRandomLines; i++) {
lines.add(new Line(new PVector(random(800), 0), new PVector(random(800), height)));
}
// compute intersections
int numLines = lines.size();
// when looping only check if lineA intersects lineB but not also if lineB intersects lineA (redundant)
for (int i = 0; i < numLines - 1; i++){
Line lineA = lines.get(i);
for (int j = i + 1; j < numLines; j++){
Line lineB = lines.get(j);
// check intersection
PVector intersection = lineA.intersect(lineB);
// if there is one, append the intersection point to the list
if(intersection != null){
intersections.add(intersection);
}
}
}
}
void draw(){
background(255);
stroke(0, 150, 255, 150);
// draw lines
for(Line line : lines) line.draw();
stroke(255, 0, 0, 150);
// draw intersections
for(PVector intersection : intersections) ellipse(intersection.x, intersection.y, 9, 9);
}
void keyReleased() {
if (key == 'r') refresh();
}
class Line{
PVector start;
PVector end;
Line(PVector start, PVector end){
this.start = start;
this.end = end;
}
void draw(){
line(start.x, start.y, end.x, end.y);
}
//source: http://jeffreythompson.org/collision-detection/line-line.php
//boolean lineLine(float this.start.x, float this.start.y, float this.end.x, float this.end.y,
//float other.start.x, float other.start.y, float other.end.x, float other.end.y) {
PVector intersect(Line other) {
// calculate the distance to intersection point
float uA = ((other.end.x-other.start.x)*(this.start.y-other.start.y) - (other.end.y-other.start.y)*(this.start.x-other.start.x)) / ((other.end.y-other.start.y)*(this.end.x-this.start.x) - (other.end.x-other.start.x)*(this.end.y-this.start.y));
float uB = ((this.end.x-this.start.x)*(this.start.y-other.start.y) - (this.end.y-this.start.y)*(this.start.x-other.start.x)) / ((other.end.y-other.start.y)*(this.end.x-this.start.x) - (other.end.x-other.start.x)*(this.end.y-this.start.y));
// if uA and uB are between 0-1, lines are colliding
if (uA >= 0 && uA <= 1 && uB >= 0 && uB <= 1) {
// optionally, draw a circle where the lines meet
float intersectionX = this.start.x + (uA * (this.end.x-this.start.x));
float intersectionY = this.start.y + (uA * (this.end.y-this.start.y));
return new PVector(intersectionX, intersectionY);
}
return null;
}
}
The next step would be a more complex algorithm to sort the points based on x, y position (e.g. top to bottom , left to right), iterate though points comparing the first to the rest by distance and angle and trying to work out if consecutive points with minimal distance and angle changes connect.
Having a quick look online I can see such algorithms for example:
Polygon Detection from a Set of Lines
Bentley Ottman algorithm (one of the algorithms mentioned in the paper above) is actually implemented in CGAL. (While there are CGAL Java bindings building these and either interfacing or making a wrapper for Processing isn't trivial).
I can see your code isn't javascript but since you didn't specify a language I assume you just want a method and can convert to your language.
The way I handled this was to assign each line a line number. If I can identify 2 adjacent points on one line then I will know if the third point exist by checking if there is a point at the crossing of the lines they are not sharing.
Example:
There's 3 lines (line 1, 2, 3)
I have an intersection point between lines 3 & 1 now I walk down line 3 for an adjacent point. I find one and its intersection is 3 & 2. Well the only way I could have a triangle is by lines 1 & 2 crossing somewhere. So we can programmatically check that.
Keep in mind that I never actually use and angles for this. I do calculate them in the functions but decided not to use them as I went with the method explained above. I have colored the triangles using an alpha value of 0.1 so you can see where there is overlap.
This is only check triangles
let canvas = document.getElementById("canvas");
let ctx = canvas.getContext("2d");
canvas.width = 400;
canvas.height = 400;
let lines = []; //holds each line
let points = []; //all intersection point are pushed here [{x: num, y: num}, {x: num, y: num},...]
let sortedPts = []; //all points sorted bu first number are pushed here in 2d array.
let lineNum = 15;
class Lines {
constructor(num) {
this.x = Math.round(Math.random() * canvas.width);
this.x2 = Math.round(Math.random() * canvas.width);
this.pt1 = {
x: this.x,
y: 0
};
this.pt2 = {
x: this.x2,
y: canvas.height
};
this.num = num;
this.rads = Math.atan2(this.pt2.y - this.pt1.y, this.pt2.x - this.pt1.x);
this.angle = this.rads * (180 / Math.PI);
}
draw() {
ctx.beginPath();
ctx.moveTo(this.pt1.x, this.pt1.y);
ctx.lineTo(this.pt2.x, this.pt2.y);
ctx.stroke();
}
}
//creates the lines. I also use this function to prepare the 2d array by pushing an empty array for each line into sortedPts.
function createLines() {
for (let i = 0; i < lineNum; i++) {
lines.push(new Lines(i + 1));
sortedPts.push([])
}
}
createLines();
//Visually draws lines on screen
function drawLines() {
for (let i = 0; i < lines.length; i++) {
lines[i].draw();
}
}
drawLines();
//intersecting formula
function lineSegmentsIntersect(line1, line2) {
let a_dx = line1.pt2.x - line1.pt1.x;
let a_dy = line1.pt2.y - line1.pt1.y;
let b_dx = line2.pt2.x - line2.pt1.x;
let b_dy = line2.pt2.y - line2.pt1.y;
let s =
(-a_dy * (line1.pt1.x - line2.pt1.x) + a_dx * (line1.pt1.y - line2.pt1.y)) /
(-b_dx * a_dy + a_dx * b_dy);
let t =
(+b_dx * (line1.pt1.y - line2.pt1.y) - b_dy * (line1.pt1.x - line2.pt1.x)) /
(-b_dx * a_dy + a_dx * b_dy);
if (s >= 0 && s <= 1 && t >= 0 && t <= 1) {
//this is where we create our array but we also add the line number of where each point intersects. I also add the angle but have not used it throughout the rest of this...yet.
points.push({
x: Math.round(line1.pt1.x + t * (line1.pt2.x - line1.pt1.x)),
y: Math.round(line1.pt1.y + t * (line1.pt2.y - line1.pt1.y)),
num: {
first: line1.num,
second: line2.num
},
angle: {
a1: line1.angle,
a2: line2.angle
}
});
}
}
//just checks each line against the others by passing to lineSegmentsIntersect() function
function callIntersect() {
for (let i = 0; i < lines.length; i++) {
for (let j = i + 1; j < lines.length; j++) {
lineSegmentsIntersect(lines[i], lines[j]);
}
}
}
callIntersect();
function drawPoints() {
//just draws the black points for reference
for (let i = 0; i < points.length; i++) {
ctx.beginPath();
ctx.arc(points[i].x, points[i].y, 2, 0, Math.PI * 2);
ctx.fill();
}
}
drawPoints();
function createSortedArray() {
//Now we take the points array and sort the points by the first number to make using i and j below possible
points.sort((a, b) => a.num.first - b.num.first)
//We push each group of points into an array inside sortedPts creating the 2d array
for (let i = 0; i < lineNum; i++) {
for (let j = 0; j < points.length; j++) {
if (points[j].num.first == (i + 1)) {
sortedPts[i].push(points[j]);
}
}
}
//now sort the 2d arrays by y value. This allows or next check to go in order from point to point per line.
sortedPts.forEach(arr => arr.sort((a, b) => a.y - b.y));
fillTriangles();
}
createSortedArray();
/*
The last step iterates through each point in the original points array
and check to see if either the first or second number matches the second
number of a point in our sortedPts array AND do the first or second number
match the next points in the sortedPtsd array. If so then we must have a
triangle.
Quick breakdown. If we have 3 lines (line 1, 2, 3) and I have a points on lines
2 & 3. I also have another point on lines 2 & 1. Then in order to have a triangle
the last point must be on lines 1 & 3.
That's all this is doing.
*/
function fillTriangles() {
//iterate through each array inside sortedPts array
for (let i = 0; i < sortedPts.length; i++) {
//iterate through all points inside each array of points inside the sortedPts array
for (let j = 0; j < sortedPts[i].length - 1; j++) {
//iterate over the original points and compare
for (let k = 0; k < points.length; k++) {
if (
(points[k].num.first == sortedPts[i][j].num.second ||
points[k].num.second == sortedPts[i][j].num.second) &&
(points[k].num.first == sortedPts[i][j + 1].num.second ||
points[k].num.second == sortedPts[i][j + 1].num.second)
) {
ctx.fillStyle = "rgba(200, 100, 0, 0.1)";
ctx.beginPath();
ctx.moveTo(sortedPts[i][j].x, sortedPts[i][j].y);
ctx.lineTo(sortedPts[i][j + 1].x, sortedPts[i][j + 1].y);
ctx.lineTo(points[k].x, points[k].y);
ctx.closePath();
ctx.fill();
}
}
}
}
}
<canvas id="canvas"></canvas>
I also think there's a good way to do this with the angles of the crossing lines and am working on something to do it that way. I am hoping I can get it to determine the type of shape based on the number of sides but I don't see that being a quick project.
Your goal is not clear to me. You can connect any arbitrary set of points in any arbitrary order and call it a shape. What are your criteria?
If you want to find the shortest path that connects all the points of a given subset, I suggest looking for travelling salesman problem.

Processing(Java) to p5js - glitch effect

I'm new in p5js and i want to create a noise effect in an image with it. I create a functional sketch with Java in processing, but when i pass it to p5j something is wrong.
The image is download in the html field hwne i put , but the pixels loc staff doesn't.
Can anyone help me!!
This is my sketch:
function setup()
{
createCanvas(400,300);
img = loadImage("data/monja.jpg");
//surface.setResizable(true);
//surface.setSize(img.width, img.height);
background(0);
}
function draw()
{
loadPixels();
img.loadPixels();
for (let x = 0; x < img.width; x++)
{
for (let y = 0; y < img.height; y++)
{
let loc = x+y*width;
let c = brightness(img.pixels[loc]);
let r = red(img.pixels[loc]);
let g = green(img.pixels[loc]);
let b = blue(img.pixels[loc]);
if (c < 70){
img.pixels[loc]= color(random(255));
}
else {
img.pixels[loc] = color(r, g, b);
}
}
}
updatePixels();
//image(img, 0, 0);
}```
To modify the color of certain pixels in an image here are some things to keep in mind.
When we call loadPixels the pixels array is an array of numbers.
How many numbers each pixel gets is determined by the pixel density
If pixel density is 1 then each pixel will get 4 numbers in the array, each with a value from 0 to 255.
The first number determines the amount of red in the pixel, the second green, the third red and the fourth is the alpha value for transparency.
Here is an example that changes pixels with a high red value to a random gray scale to create a glitch effect.
var img;
var c;
function preload(){
img = loadImage("https://i.imgur.com/rpQdRoY.jpeg");
}
function setup()
{
createCanvas(img.width, img.height);
background(0);
let d = pixelDensity();
img.loadPixels();
for (let i = 0; i < 4 * (img.width*d * img.height*d); i += 4) {
if (img.pixels[i] > 150 && img.pixels[i+1] <100&&img.pixels[i+2] < 100){
let rColor = random(255);
img.pixels[i] = rColor;
img.pixels[i + 1] = rColor;
img.pixels[i + 2] = rColor;
img.pixels[i + 3] = rColor;
}
}
img.updatePixels();
}
function draw() {
image(img,0,0);
}
<script src="https://cdn.jsdelivr.net/npm/p5#1.3.0/lib/p5.js"></script>

Real-time object recognition in complex background

I'm trying to make a real-time advertisement billboard detection in road using android smartphone. The goal is to crop the area of the advertisement billboard object (regions of interest) and save it to database.
For example:
enter image description here
enter image description here
For preprocessing, I used grayscaling and Canny Edge Detection (Otsu thresholding is used to set the upper and lower threshold). Then, I used contour-based method to detect whether the object is rectangular by checking the point. I use Java OpenCV in android studio for implementation. When I run the program, it only detect rectangular object in plain background and if the rectangular having a high contrast from the background. Currently, it can only detect rectangle with 90 degree and it failed to detect object with rounded rectangle shape. Furthermore, my program failed completely to detect rectangular object in a more complex background, like road scene where the object I'm trying to detect is having similar color to the background/low contrast or there are many occlusions like tree, traffic light, and cables which caused the detection to fail.
This is the code I use for edge detection
Mat destination = new Mat(oriMat.rows(), oriMat.cols(), oriMat.type());
Imgproc.cvtColor(oriMat, destination, Imgproc.COLOR_RGBA2GRAY);
Imgproc.GaussianBlur(destination, destination, new Size(3,3), 0, 0, Imgproc.BORDER_DEFAULT);
double otsuThresholdValue = Imgproc.threshold(destination, destination, 0, 255, Imgproc.THRESH_BINARY | Imgproc.THRESH_OTSU);
double lowerThreshold = otsuThresholdValue*0.5;
double upperThreshold = otsuThresholdValue;
Mat canny = new Mat();
Imgproc.Canny(destination, canny, lowerThreshold, upperThreshold);
Mat abs = new Mat();
Core.convertScaleAbs(canny, abs);
Mat result = new Mat();
Core.addWeighted(abs, 0.5, abs, 0.5, 0, result);
Here is the code I use for contour-based detection
ArrayList<MatOfPoint> contours = new ArrayList<>();
// find contours and store them all as a list
Imgproc.findContours(matData.monoChrome.clone(), contours, new Mat(), Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
final int width = matData.monoChrome.rows();
final int height = matData.monoChrome.cols();
int matArea = width * height;
for (int i = 0; i < contours.size(); i++) {
double contoursArea = Imgproc.contourArea(contours.get(i));
MatOfPoint2f approx = new MatOfPoint2f();
MatOfPoint2f contour = new MatOfPoint2f(contours.get(i).toArray());
double epsilon = Imgproc.arcLength(contour, true) * 0.1;
// Imgproc.minAreaRect(contour);
// approximate contour with accuracy proportional to the contour perimeter
Imgproc.approxPolyDP(contour, approx, epsilon, true);
if (Math.abs(contoursArea) < matArea * 0.01 ||
!Imgproc.isContourConvex(new MatOfPoint(approx.toArray()))) {
continue;
}
Imgproc.drawContours(matData.resizeMat, contours, i, new Scalar(0, 255, 0));
List<Point> points = approx.toList();
int pointCount = points.size();
LinkedList<Double> cos = new LinkedList<>();
for (int j = 2; j < pointCount + 1; j++) {
cos.addLast(angle(points.get(j % pointCount), points.get(j - 2), points.get(j - 1)));
}
Collections.sort(cos, (lhs, rhs) -> lhs.intValue() - rhs.intValue());
double mincos = cos.getFirst();
double maxcos = cos.getLast();
if (points.size() == 4 && mincos >= -0.3 && maxcos <= 0.5) {
for (int j = 0; j < points.size(); j++) {
Core.circle(matData.resizeMat, points.get(j), 6, new Scalar(255, 0, 0), 6);
}
matData.points = points;
break;
}
}
Is there any method I can use to recognize advertisement billboard in road?
I would appreciate any answers and ideas. Thank you!

Saving point cloud data into a file

I have the following code working on Processing 2, with Kinect:
import org.openkinect.freenect.*;
import org.openkinect.processing.*;
// Kinect Library object
Kinect kinect;
// Angle for rotation
float a = 0;
// We'll use a lookup table so that we don't have to repeat the math over and over
float[] depthLookUp = new float[2048];
void setup() {
// Rendering in P3D
size(1200, 800, P3D);
kinect = new Kinect(this);
kinect.initDepth();
// Lookup table for all possible depth values (0 - 2047)
for (int i = 0; i < depthLookUp.length; i++) {
depthLookUp[i] = rawDepthToMeters(i);
}
}
void draw() {
background(0);
// Get the raw depth as array of integers
int[] depth = kinect.getRawDepth();
// We're just going to calculate and draw every 4th pixel (equivalent of 160x120)
int skip = 4;
// Translate and rotate
translate(width/2, height/2, -50);
rotateY(a);
for (int x = 0; x < kinect.width; x += skip) {
for (int y = 0; y < kinect.height; y += skip) {
int offset = x + y*kinect.width;
// Convert kinect data to world xyz coordinate
int rawDepth = depth[offset];
PVector v = depthToWorld(x, y, rawDepth);
stroke(255);
pushMatrix();
// Scale up by 200
float factor = 200;
translate(v.x*factor, v.y*factor, factor-v.z*factor);
// Draw a point
point(0, 0);
popMatrix();
}
}
// Rotate
a += 0.015f;
}
// These functions come from: http://graphics.stanford.edu/~mdfisher/Kinect.html
float rawDepthToMeters(int depthValue) {
if (depthValue < 2047) {
return (float)(1.0 / ((double)(depthValue) * -0.0030711016 + 3.3309495161));
}
return 0.0f;
}
PVector depthToWorld(int x, int y, int depthValue) {
final double fx_d = 1.0 / 5.9421434211923247e+02;
final double fy_d = 1.0 / 5.9104053696870778e+02;
final double cx_d = 3.3930780975300314e+02;
final double cy_d = 2.4273913761751615e+02;
PVector result = new PVector();
double depth = depthLookUp[depthValue];//rawDepthToMeters(depthValue);
result.x = (float)((x - cx_d) * depth * fx_d);
result.y = (float)((y - cy_d) * depth * fy_d);
result.z = (float)(depth);
return result;
}
I would like to save pointcloud data in a file, so I can import it later on another program, such as Cinema 4D.
how do I create this file?
Processing has several functions for saving data to file, the simplest of which is saveStrings().
To use the saveStrings() function, you would simply store whatever you wanted to save into a String array, and then pass that into the function along with a filename.
You can then use the loadStrings() function to read the data from a file back into a String array.
How you format the data into a String is entirely up to you. You might store it as comma separated values.
More info can be found in the reference.
If you want to store the data into a file that another program can read, you have to first look up exactly what format that file needs to be in. I'd start by opening up some example files in a basic text editor.

Problems rendering a Polar Zonohedron in Processing

I've recently been looking into Zonohedrons and Rob Bell made beautiful ones. I had a play with the free Polar Zonohedron Sketchup Plugin and thought about playing with the geometry using Processing. So far I've open up the plugin/Ruby script and tried to port it directly, but I am not experienced with Ruby and have been using the Sketchup Ruby API reference.
The geometry part of the code is mostly in the polar_zonohedron function:
def polar_zonohedron #frequency, pitch = atan(sqrt(2)/2), len = 1.0 # frequency,pitch,length
mo = Sketchup.active_model
mo.start_operation "polar_zonohedron"
prompts = ["Frequency", "Pitch in radians", "Length"]
values = [8, "atan( sqrt(2)/2 )", 12.inch]
results = inputbox prompts, values, "Polar Zonohedron"
return if not results # This means that the user canceld the operation
ents = mo.active_entities
grp = ents.add_group
ents = grp.entities
grp.frequency = results[0]
grp.pitch = eval( results[1] )
grp.length = results[2]
pts=[]
#we begin by setting pts[0] to the origin
pts[0] = Geom::Point3d.new(0,0,0)
vector = Geom::Vector3d.new(cos(grp.pitch),0,sin(grp.pitch) ) #tilt pitch vector up the xz plane
vector.length = grp.length
#Using the origin as the initial generator we iterate thru each zone of the zonohedron
#our first task is to define the four points of the base rhomb for this zone
#at the end the pts[3] becomes our new origin for the rhomb of the next zone
1.upto(grp.frequency-1){ |i|
p_rotate = Geom::Transformation.rotation( pts[0] , Geom::Vector3d.new(0,0,1), i*2*PI/grp.frequency )
#obtain the other three points of the rhomb face
pts[1] = pts[0].transform vector
pts[3] = pts[1].transform( p_rotate )
pts[2] = pts[3].transform( vector )
#we now have the 4 points which make this zone's base rhomb
#so we rotate around the origin frequency times making a star pattern of faces
0.upto(grp.frequency-1){ |j|
f_rotate = Geom::Transformation.rotation( Geom::Point3d.new(0,0,0) , Geom::Vector3d.new(0,0,1), j*2*PI/grp.frequency )
ents.add_face( pts.collect{|p| p.transform(f_rotate)} )
}
#set the origin for the rhomb of the next zone
pts[0] = pts[3]
}
mo.commit_operation
end
I've understood the loops but am slightly confused by transforms:
pts[1] = pts[0].transform vector
pts[3] = pts[1].transform( p_rotate )
pts[2] = pts[3].transform( vector )
As far as I can tell pts[1] is the vector addiction of pts[0] and vector,
and pts[3] is pts[1] multiplied by the p_rotate rotation matrix. Would pts[2] also be an addition (between pts[3] and vector )?
Here's what my attempt looks like so far:
//a port attempt of Rob Bell's polar_zonohedron.rb script - http://zomebuilder.com/
int frequency = 3;
float pitch = atan(sqrt(2)/2);
float length = 24;
ArrayList<Face> faces = new ArrayList<Face>();
void setup(){
size(400,400,P3D);
strokeWeight(3);
setupZome();
}
void setupZome(){
faces.clear();
PVector[] pts = new PVector[4];
pts[0] = new PVector();
PVector vector = new PVector(cos(pitch),0,sin(pitch));
vector.mult(length);
for(int i = 1 ; i < frequency; i++){
PMatrix3D p_rotate = new PMatrix3D();
p_rotate.rotate(i * TWO_PI / frequency, 0,0,1);
//PVector v = new PVector();
//p_rotate.mult(pts[0],v);
//pts[0] = v;
pts[1] = PVector.add(pts[0],vector);
pts[3] = new PVector();
p_rotate.mult(pts[1],pts[3]);
pts[2] = PVector.add(pts[3],vector);
for(int j = 0; j < frequency; j++){
PMatrix3D f_rotate = new PMatrix3D();
f_rotate.rotate(j*2*PI/frequency , 0,0,1);
Face f = new Face();
for(PVector pt : pts){
PVector p = new PVector();
f_rotate.mult(pt,p);
f.add(p.get());
}
faces.add(f);
}
pts[0] = pts[3];
}
}
void draw(){
background(255);
lights();
translate(width * .5, height * .5,0);
rotateY(map(mouseX,0,width,-PI,PI));
rotateX(map(mouseY,0,height,-PI,PI));
drawAxes(100);
pushMatrix();
translate(0,0,-frequency * length * .25);
for(Face f : faces){
beginShape(mousePressed ? QUADS : POINTS);
for(PVector p : f.pts) vertex(p.x,p.y,p.z);
endShape();
}
popMatrix();
}
void keyPressed(){
if(keyCode == UP && frequency < 32) frequency++;
if(keyCode == DOWN && frequency > 2) frequency--;
setupZome();
}
void drawAxes(int size){
stroke(192,0,0);
line(0,0,0,size,0,0);
stroke(0,192,0);
line(0,0,0,0,size,0);
stroke(0,0,192);
line(0,0,0,0,0,size);
}
class Face{
ArrayList<PVector> pts = new ArrayList<PVector>();
Face(){}
void add(PVector p){
if(pts.size() <= 4) pts.add(p);
}
}
I feel I'm close, but I'm getting the loop conditionals and vertex indices wrong.
Any tips on how to fix this?
I was very close, but not paying attention to all the details.
Turns out I get the correct mesh if I don't increment the rotation on p_rotate:
p_rotate.rotate(TWO_PI / frequency, 0,0,1);
instead of
p_rotate.rotate(i * TWO_PI / frequency, 0,0,1);
Here is the full code listing:
//a port attempt of Rob Bell's polar_zonohedron.rb script - http://zomebuilder.com/
int frequency = 3;
float pitch = atan(sqrt(2)/2);
float length = 24;
ArrayList<Face> faces = new ArrayList<Face>();
void setup(){
size(400,400,P3D);
strokeWeight(3);
setupZome();
}
void setupZome(){
faces.clear();
PVector[] pts = new PVector[4];
pts[0] = new PVector();
PVector vector = new PVector(cos(pitch),0,sin(pitch));
vector.mult(length);
for(int i = 1 ; i < frequency-1; i++){
PMatrix3D p_rotate = new PMatrix3D();
p_rotate.rotate(TWO_PI / frequency, 0,0,1);
pts[1] = PVector.add(pts[0],vector);
pts[3] = new PVector();
p_rotate.mult(pts[1],pts[3]);
pts[2] = PVector.add(pts[3],vector);
for(int j = 0; j < frequency; j++){
PMatrix3D f_rotate = new PMatrix3D();
f_rotate.rotate(j*2*PI/frequency , 0,0,1);
Face f = new Face();
for(PVector pt : pts){
PVector p = new PVector();
f_rotate.mult(pt,p);
f.add(p.get());
}
faces.add(f);
}
pts[0] = pts[3];
}
}
void draw(){
background(255);
lights();
translate(width * .5, height * .5,0);
rotateY(map(mouseX,0,width,-PI,PI));
rotateX(map(mouseY,0,height,-PI,PI));
drawAxes(100);
pushMatrix();
translate(0,0,-frequency * length * .25);
for(Face f : faces){
beginShape(mousePressed ? QUADS : POINTS);
for(PVector p : f.pts) vertex(p.x,p.y,p.z);
endShape();
}
popMatrix();
}
void keyPressed(){
if(keyCode == UP && frequency < 32) frequency++;
if(keyCode == DOWN && frequency > 3) frequency--;
setupZome();
}
void drawAxes(int size){
stroke(192,0,0);
line(0,0,0,size,0,0);
stroke(0,192,0);
line(0,0,0,0,size,0);
stroke(0,0,192);
line(0,0,0,0,0,size);
}
class Face{
ArrayList<PVector> pts = new ArrayList<PVector>();
Face(){}
void add(PVector p){
if(pts.size() <= 4) pts.add(p);
}
}
And here a couple of screenshots:

Resources