Space utilization algorithm or Mesh creation algorithm - algorithm

Hey we are facing a space utilization problem or I am not clear what name I should give to problem.
Basically its a mesh problem.
I have tried to explain my problem using an image.
Problem statement is somewhat like below.
The box with the diagonal line is an item which has to be distributed in best proportion such as it should fit in all available container.
Containers are shown in different colors.
Now all containers will be in rectangle shape.
All containers has to be placed either in portrait mode or in landscape mode.
Both containers and item can be measured in width and height, for program they are pixels basically.
Based on comments of fellow members,Spektre and Lasse V. Karlsen here are the clarification on the same
It's a 2D arrangement
Yes we can rearrange the containers to achieve the best possible pattern.
No part of item should be in blank space. Item has to be a part of any container.
Item can overlap the container, and height and width can be vary from container to container. And Item's height width can also vary, but shape will remain rectangle always.
Location of Item is preferable if it sticks to top-left.
Yes it is somewhat like bin packing algorithm, but only problem with that algorithm is , in Bin packing items are more and container is one, in our case item is one and containers are more. So basically its a distribution problem.
Idea is the problem actually that we have the size of the container and need to place the containers so that we can create that rectangle.
The program should give following output
Position of the container
Part of item the container has inside.
And Arrangement pattern.

here something unsophisticated unoptimal but easy as a start point
Based on mine comments
exploiting common container size 480px
Algorithm:
rotate all containers (bins) to get 480 height
sort bins by width after rotation descending
need ceil(1080/480)=3 lines of 480px bins
use the widest bins to fill all the lines but never crossing 1920px
they are sorted so use the first ones
all used ones mark as used
use only unused bins
arrange rest of the bins to lines (goes to the shortest line)
so take unused bins
determine which line is shortest
if the shortest line is already 1920px wide or more then stop
if not move the bin to that line and mark it as used
C++ source code (ugly static allocation but simple and no lib used):
//---------------------------------------------------------------------------
struct _rec { int x,y,xs,ys,_used; };
_rec bin[128],item; int bins=0;
//---------------------------------------------------------------------------
void bin_generate(int n) // generate problem
{
int i;
Randomize();
item.x=0;
item.y=0;
item.xs=1920;
item.ys=1080;
for (bins=0;bins<n;bins++)
{
bin[bins].x=0;
bin[bins].y=0;
i=Random(2);
if (i==0) { bin[bins].xs=320; bin[bins].ys=480; }
else if (i==1) { bin[bins].xs=480; bin[bins].ys=800; }
else i=i;
// if (i==2) { bin[bins].xs=1920; bin[bins].ys=1080; }
}
}
//---------------------------------------------------------------------------
void bin_solve() // try to solve problem
{
int i,e,n,x,y,x0[128],y0[128],common=480;
_rec *r,*s,t;
// rotate bins to ys=480
for (r=bin,i=0;i<bins;i++,r++) if (r->xs==common) { x=r->xs; r->xs=r->ys; r->ys=x; }
// sort bins by xs desc
for (e=1;e;) for (e=0,r=bin,s=r+1,i=1;i<bins;i++,r++,s++) if (r->xs<s->xs) { t=*r; *r=*s; *s=t; e=1; }
// prepare lines needed ... n is num of lines, _rest is one common side height line is needed to add
n=item.ys/common; if (item.ys%common) n++; item.x=0; item.y=0;
for (i=0;i<n;i++) { x0[i]=0; y0[i]=common*i; }
for (r=bin,i=0;i<bins;i++,r++) r->_used=0;
// arrange wide bins to lines
for (e=0;e<n;e++)
for (r=bin,i=0;i<bins;i++,r++)
if (!r->_used)
if (x0[e]+r->xs<=item.xs)
{
r->x=x0[e];
r->y=y0[e];
r->_used=1;
x0[e]+=r->xs;
if (x0[e]>=item.xs) break;
}
// arrange rest bins to lines (goes to the shortest line)
for (r=bin,i=0;i<bins;i++,r++)
if (!r->_used)
{
// find shortest line
for (e=0,x=0;x<n;x++) if (x0[e]>x0[x]) e=x;
// stop if shortest line is already wide enough
if (x0[e]>=item.xs) break;
// fit the bin in it
r->x=x0[e];
r->y=y0[e];
r->_used=1;
x0[e]+=r->xs;
}
// arrange the unused rest below
for (x=0,y=n*common+40,r=bin,i=0;i<bins;i++,r++) if (!r->_used) { r->x=x; r->y=y; x+=r->xs; }
}
//---------------------------------------------------------------------------
Usage:
bin_generate(7); // generate n random devices to bin[bins] array of rectangles
bin_solve(); // try to solve problem ... just rearrange the bin[bins] values
this is not optimal but with some tweaks could be enough
for example last 2 lines need 600px of height together so if you have devices at that size or closely larger you can use them to fill the 2 last lines as 1 line ...
if not then may be some graph or tree approach will be better (due to low container count)
[Edit1] universal sizes
when you have not guarantied fixed common container size then you have to compute it instead...
//---------------------------------------------------------------------------
struct _rec { int x,y,xs,ys,_used; _rec(){}; _rec(_rec& a){ *this=a; }; ~_rec(){}; _rec* operator = (const _rec *a) { *this=*a; return this; }; /*_rec* operator = (const _rec &a) { ...copy... return this; };*/ };
List<_rec> bin,bintype;
_rec item;
//---------------------------------------------------------------------------
void bin_generate(int n) // generate problem
{
int i;
_rec r;
Randomize();
// target resolution
item.x=0; item.xs=1920;
item.y=0; item.ys=1080;
// all used device sizes in portrait start orientation
bintype.num=0; r.x=0; r.y=0; r._used=0;
r.xs= 320; r.ys= 480; bintype.add(r);
r.xs= 480; r.ys= 800; bintype.add(r);
r.xs= 540; r.ys= 960; bintype.add(r);
// r.xs=1080; r.ys=1920; bintype.add(r);
// create test case
bin.num=0; for (i=0;i<n;i++) bin.add(bintype[Random(bintype.num)]);
}
//---------------------------------------------------------------------------
void bin_solve() // try to solve problem
{
int i,j,k,e,x,y;
_rec *r,s;
List<int> hsiz,hcnt; // histogram of sizes
List< List<int> > lin; // line of bins with common size
// compute histogram of sizes
hsiz.num=0; hcnt.num=0;
for (r=bin.dat,i=0;i<bin.num;i++,r++)
{
x=r->xs; for (j=0;j<hsiz.num;j++) if (x==hsiz[j]) { hcnt[j]++; j=-1; break; } if (j>=0) { hsiz.add(x); hcnt.add(1); }
x=r->ys; for (j=0;j<hsiz.num;j++) if (x==hsiz[j]) { hcnt[j]++; j=-1; break; } if (j>=0) { hsiz.add(x); hcnt.add(1); }
}
// sort histogram by cnt desc (most occurent sizes are first)
for (e=1;e;) for (e=0,j=0,i=1;i<hsiz.num;i++,j++) if (hcnt[j]<hcnt[i])
{
x=hsiz[i]; hsiz[i]=hsiz[j]; hsiz[j]=x;
x=hcnt[i]; hcnt[i]=hcnt[j]; hcnt[j]=x; e=1;
}
// create lin[][]; with ys as common size (separate/rotate bins with common sizes from histogram)
lin.num=0;
for (r=bin.dat,i=0;i<bin.num;i++,r++) r->_used=0;
for (i=0;i<hsiz.num;i++)
{
lin.add(); lin[i].num=0; x=hsiz[i];
for (r=bin.dat,j=0;j<bin.num;j++,r++)
{
if ((!r->_used)&&(x==r->xs)) { lin[i].add(j); r->_used=1; y=r->xs; r->xs=r->ys; r->ys=y; }
if ((!r->_used)&&(x==r->ys)) { lin[i].add(j); r->_used=1; }
}
}
for (i=0;i<lin.num;i++) if (!lin[i].num) { lin.del(i); i--; }
// sort lin[][] by xs desc (widest bins are first)
for (i=0;i<lin.num;i++)
for (e=1;e;) for (e=0,k=0,j=1;j<lin[i].num;j++,k++)
if (bin[lin[i][k]].xs<bin[lin[i][j]].xs)
{ s=bin[lin[i][j]]; bin[lin[i][j]]=bin[lin[i][k]]; bin[lin[i][k]]=s; e=1; }
// arrange lines to visually check previous code (debug) ... and also compute the total line length (width)
for (y=item.ys+600,i=0;i<lin.num;i++,y+=r->ys) for (x=0,j=0;j<lin[i].num;j++) { r=&bin[lin[i][j]]; r->x=x; r->y=y; x+=r->xs; }
for (i=0;i<lin.num;i++)
{
j=lin[i][lin[i].num-1]; // last bin in line
hsiz[i]=bin[j].x+bin[j].xs; // total width
hcnt[i]=bin[j].ys; // line height
}
// now compute solution
for (r=bin.dat,i=0;i<bin.num;i++,r++) r->_used=0; // reset usage first
for (y=0,k=1,i=0;i<lin.num;i++) // process lines with common size
while(hsiz[i]>=item.xs) // stop if line shorter then needed
{
x=0;
// arrange wide bins to line
for (j=0;j<lin[i].num;j++)
{
r=&bin[lin[i][j]];
if ((!r->_used)&&(x+r->xs<=item.xs))
{
r->x=x; hsiz[i]-=x; x+=r->xs;
r->y=y; r->_used=k;
if (x>=item.xs) break;
}
}
// arrange short bins to finish line
if (x<item.xs)
for (j=lin[i].num-1;j>=0;j--)
{
r=&bin[lin[i][j]];
if (!r->_used)
{
r->x=x; hsiz[i]-=x; x+=r->xs;
r->y=y; r->_used=k;
if (x>=item.xs) break;
}
}
// remove unfinished line
if (x<item.xs)
{
for (j=0;j<lin[i].num;j++)
{
r=&bin[lin[i][j]];
if (r->_used==k)
{
r->x=0; r->y=0;
r->_used=0;
hsiz[i]+=r->xs;
}
}
break;
}
// next line
y+=hcnt[i];
if (y>=item.ys) break; // solution found already?
}
// rotate unused rest to have ys>=as needed but as wide as can be to form last line
e=item.ys-y; x=0;
if (e>0) for (r=bin.dat,i=0;i<bin.num;i++,r++)
if (!r->_used)
{
if ((r->xs<e)&&(r->ys<e)) continue; // skip too small bins
if (r->xs<r->ys) { j=r->xs; r->xs=r->ys; r->ys=j; }
if (r->ys< e) { j=r->xs; r->xs=r->ys; r->ys=j; }
r->x=x; x+=r->xs;
r->y=y; r->_used=1;
}
}
//---------------------------------------------------------------------------
it is almost the same as before but prior to solution histogram of container sizes is computed
choose most occurent ones and form groups of compatible bins (containers)
then apply the algorithm ...
I added usage of dynamic array template List<> because on static allocation I would go mad before writing this ...
List<int> x; is the same as int x[];
x.num is the number of items inside x[]
x.add() adds new item to end of x[]
x.add(q) adds new item = q to end of x[]
x.del(i) deletes i-th item from x[] ... indexing is from zero
so rewrite to what ever you use instead ...
List< List<int> > y; is 2D array y[][] ...
at last form last line from unused bins ...
This is not robust nor safe but it mostly works (it need some tweaking but I am too lazy for that)
the solution depends also on the input set order so you can find more solutions for the same input set if you shuffle it a bit ... (if some common sizes has the same count)

Related

How to implement range search in KD-Tree

I have built a d dimensional KD-Tree. I want to do range search on this tree. Wikipedia mentions range search in KD-Trees, but doesn't talk about implementation/algorithm in any way. Can someone please help me with this? If not for any arbitrary d, any help for at least for d = 2 and d = 3 would be great. Thanks!
There are multiple variants of kd-tree. The one I used had the following specs:
Each internal node has max two nodes.
Each leaf node can have max maxCapacity points.
No internal node stores any points.
Side note: there are also versions where each node (irrespective of whether its internal or leaf) stores exactly one point. The algorithm below can be tweaked for those too. Its mainly the buildTree where the key difference lies.
I wrote an algorithm for this some 2 years back, thanks to the resource pointed to by #9mat .
Suppose the task is to find the number of points which lie in a given hyper-rectangle ("d" dimensions). This task can also be to list all points OR all points which lie in given range and satisfy some other criteria etc, but that can be a straightforward change to my code.
Define a base node class as:
template <typename T> class kdNode{
public: kdNode(){}
virtual long rangeQuery(const T* q_min, const T* q_max) const{ return 0; }
};
Then, an internal node (non-leaf node) can look like this:
class internalNode:public kdNode<T>{
const kdNode<T> *left = nullptr, *right = nullptr; // left and right sub trees
int axis; // the axis on which split of points is being done
T value; // the value based on which points are being split
public: internalNode(){}
void buildTree(...){
// builds the tree recursively
}
// returns the number of points in this sub tree that lie inside the hyper rectangle formed by q_min and q_max
int rangeQuery(const T* q_min, const T* q_max) const{
// num of points that satisfy range query conditions
int rangeCount = 0;
// check for left node
if(q_min[axis] <= value) {
rangeCount += left->rangeQuery(q_min, q_max);
}
// check for right node
if(q_max[axis] >= value) {
rangeCount += right->rangeQuery(q_min, q_max);
}
return rangeCount;
}
};
Finally, the leaf node would look like:
class leaf:public kdNode<T>{
// maxCapacity is a hyper - param, the max num of points you allow a node to hold
array<T, d> points[maxCapacity];
int keyCount = 0; // this is the actual num of points in this leaf (keyCount <= maxCapacity)
public: leaf(){}
public: void addPoint(const T* p){
// add a point p to the leaf node
}
// check if points[index] lies inside the hyper rectangle formed by q_min and q_max
inline bool containsPoint(const int index, const T* q_min, const T* q_max) const{
for (int i=0; i<d; i++) {
if (points[index][i] > q_max[i] || points[index][i] < q_min[i]) {
return false;
}
}
return true;
}
// returns number of points in this leaf node that lie inside the hyper rectangle formed by q_min and q_max
int rangeQuery(const T* q_min, const T* q_max) const{
// num of points that satisfy range query conditions
int rangeCount = 0;
for(int i=0; i < this->keyCount; i++) {
if(containsPoint(i, q_min, q_max)) {
rangeCount++;
}
}
return rangeCount;
}
};
In the code for range query inside the leaf node, it is also possible to do a "binary search" inside of "linear search". Since the points will be sorted along on the axis axis, you can do a binary search do find l and r values using q_min and q_max, and then do a linear search from l to r instead of 0 to keyCount-1 (of course in the worst case it wont help, but practically, and especially if you have a capacity of pretty high values, this may help).
This is my solution for a KD-tree, where each node stores points (so not just the leafs). (Note that adapting for where points are stored only in the leafs is really easy).
I leaf some of the optimizations out and will explain them at the end, this to reduce the complexity of the solution.
The get_range function has varargs at the end, and can be called like,
x1, y1, x2, y2 or
x1, y1, z1, x2, y2, z2 etc. Where first the low values of the range are given and then the high values.
(You can use as many dimensions as you like).
static public <T> void get_range(K_D_Tree<T> tree, List<T> result, float... range) {
if (tree.root == null) return;
float[] node_region = new float[tree.DIMENSIONS * 2];
for (int i = 0; i < tree.DIMENSIONS; i++) {
node_region[i] = -Float.MAX_VALUE;
node_region[i+tree.DIMENSIONS] = Float.MAX_VALUE;
}
_get_range(tree, result, tree.root, node_region, 0, range);
}
The node_region represents the region of the node, we start as large as possible. Cause for all we know this could be the region we are dealing with.
Here the recursive _get_range implementation:
static public <T> void _get_range(K_D_Tree<T> tree, List<T> result, K_D_Tree_Node<T> node, float[] node_region, int dimension, float[] target_region) {
if (dimension == tree.DIMENSIONS) dimension = 0;
if (_contains_region(tree, node_region, target_region)) {
_add_whole_branch(node, result);
}
else {
float value = _value(tree, dimension, node);
if (node.left != null) {
float[] node_region_left = new float[tree.DIMENSIONS*2];
System.arraycopy(node_region, 0, node_region_left, 0, node_region.length);
node_region_left[dimension + tree.DIMENSIONS] = value;
if (_intersects_region(tree, node_region_left, target_region)){
_get_range(tree, result, node.left, node_region_left, dimension+1, target_region);
}
}
if (node.right != null) {
float[] node_region_right = new float[tree.DIMENSIONS*2];
System.arraycopy(node_region, 0, node_region_right, 0, node_region.length);
node_region_right[dimension] = value;
if (_intersects_region(tree, node_region_right, target_region)){
_get_range(tree, result, node.right, node_region_right, dimension+1, target_region);
}
}
if (_region_contains_node(tree, target_region, node)) {
result.add(node.point);
}
}
}
One important thing that the other answer does not provide is this part:
if (_contains_region(tree, node_region, target_region)) {
_add_whole_branch(node, result);
}
With a range search for a KD-Tree you have 3 options for a node's region, it's:
fully outside
it intersects
it's fully contained
Once you know a region is fully contained, then you can add the whole branch without doing any dimension checks.
To make it more clear, here is the _add_whole_branch:
static public <T> void _add_whole_branch(K_D_Tree_Node<T> node, List<T> result) {
result.add(node.point);
if (node.left != null) _add_whole_branch(node.left, result);
if (node.right != null) _add_whole_branch(node.right, result);
}
In this image, all the big white dots where added using _add_whole_branch and only for the red dots a check for all dimensions had to be done.
Optimization
1)
Instead of starting with the root node for the _get_range function, instead you can find the split node. This is the first node that has it's point within the query range. To find the split node you will still need to start at the root node, but the calculations are a bit cheaper (cause you go either left or right till).
2)
Now I create the float[] node_region_left and float[] node_region_right, and since this happens in a recursive function it can lead to quite some arrays. However, you can reuse the one for the left for the right. I didn't do it in this example for clarity reasons.
I can also imagine storing the region size in the node, but this takes quite some more memory and might lead to a lot of cache misses.

How to detect and save cyclic connectivity in edge vertices (hole detection)?

thanks for taking the time to read my question.
I am working on detecting holes in a triangular mesh and fill them with new triangles. I have done some of the parts that are, to get a list of edge vertices, etc. Following are the vertices/edges that make holes, please have a look at the image.
(9, 62) => vertex # 9 and 62 makes an edge (left hole)
(66, 9) => vertex # 66 and 9 makes an edge (left hole)
(70, 66) => vertex # 70 and 66 makes an edge (left hole)
(62, 70) => vertex # 62 and 70 makes an edge (left hole)
(147, 63) => vertex # 147 and 63 makes an edge (right hole)
(55, 148)
(63, 149)
(149, 55)
(148, 147)
The first thing that I need to do is to check which vertices make a cycle (means a hole is detected), and then save in a separate set of cyclic vertices.
The issue is to write such an algorithm that checks whether the given graph(vertices/edges) contains how many cycles? and then save into separate sets.
Please write me some simple and optimized algorithm to solve this problem.
Thank you.
mesh
Let assume your STL mesh got n triangles you need to convert it into indexed format. So extract all triangle points and convert to two separate tables. one holding all the points and second holding 3 indexes of points per each triangle. Let assume you got m points and n triangles.
You should have the point table (index) sorted and using binary search to speed this up from O(n.m) to O(m.log(n)).
_edge structure
create structure that holds all the edges of your mesh. Something like:
struct _edge
{
int p0,p1; // used vertexes index
int cnt; // count of edge usage
};
where p0<p1.
create _edge edge[] table O(n)
It should be a list holding all the edges (3n) so loop through all the triangles and add 3 edges per each. The count set to cnt=1 This is O(n).
Now sort the list by p0,p1 which is O(n.log(n)). After that just join all the edges with the same p0,p1 by summing their cnt and deleting one of them. If coded right then this is O(n).
detect hole
In regular STL each edge must have cnt=2. If cnt=1 then triangle is missing and you found your hole. if cnt>2 you got geometric error in your mesh.
So delete all edges with cnt>=2 from your edge[] table which is O(n).
detect loops
let assume we got k edges left in our edge[] table. Now for each 2 edges that are sharing a point create triangle. Something like:
for (i=0;i<k;i++)
for (j=i+1;j<k;j++)
{
if ((edge[i].p0==edge[j].p0)||(edge[i].p1==edge[j].p0)) add_triangle(edge[i].p0,edge[i].p1,edge[j].p1);
if ((edge[i].p0==edge[j].p1)||(edge[i].p1==edge[j].p1)) add_triangle(edge[i].p0,edge[i].p1,edge[j].p0);
}
If you use binary search for the inner loop then this will be O(k.log(k)). Also you should avoid to add duplicate triangles and correct the winding of them so first add the triangles to separate table (or remember starting index) and then remove duplicates (or you can do it directly in add_triangle).
Also to handle bigger holes do not forget to add new edges to your edge[] table. You can either update the edges after current edges are processed and repeat #4 or incorporate the changes on the run.
[Edit1] C++ example
recently I was doing some coding for STL for this QA:
Generating outside supporters into mesh for 3D printing
So as I got all the infrastructure already coded I chose to give this a shot and here the result:
struct STL3D_edge
{
int p0,p1,cnt,dir;
STL3D_edge() {}
STL3D_edge(STL3D_edge& a) { *this=a; }
~STL3D_edge() {}
STL3D_edge* operator = (const STL3D_edge *a) { *this=*a; return this; }
//STL3D_edge* operator = (const STL3D_edge &a) { ...copy... return this; }
int operator == (const STL3D_edge &a) { return ((p0==a.p0)&&(p1==a.p1)); }
int operator != (const STL3D_edge &a) { return ((p0!=a.p0)||(p1!=a.p1)); }
void ld(int a,int b) { cnt=1; if (a<=b) { dir=0; p0=a; p1=b; } else { dir=1; p0=b; p1=a; }}
};
List<STL3D_edge> edge;
List<float> pnt;
void edge_draw()
{
int i; STL3D_edge *e;
glBegin(GL_LINES);
for (e=edge.dat,i=0;i<edge.num;i++,e++)
{
glVertex3fv(pnt.dat+e->p0);
glVertex3fv(pnt.dat+e->p1);
}
glEnd();
}
void STL3D::holes()
{
// https://stackoverflow.com/a/45541861/2521214
int i,j,i0,i1,i2,j0,j1,j2;
float q[3];
_fac *f,ff;
STL3D_edge *e,ee,*e0,*e1,*e2;
ff.attr=31<<5; // patched triangles color/id
// create some holes for testing
if (fac.num<100) return;
for (i=0;i<10;i++) fac.del(Random(fac.num));
// compute edge table
edge.allocate(fac.num*3); edge.num=0;
for (f=fac.dat,i=0;i<fac.num;i++,f++)
{
// add/find points to/in pnt[]
for (i0=-1,j=0;j<pnt.num;j+=3){ vectorf_sub(q,pnt.dat+j,f->p[0]); if (vectorf_len2(q)<1e-6) { i0=j; break; }} if (i0<0) { i0=pnt.num; for (j=0;j<3;j++) pnt.add(f->p[0][j]); }
for (i1=-1,j=0;j<pnt.num;j+=3){ vectorf_sub(q,pnt.dat+j,f->p[1]); if (vectorf_len2(q)<1e-6) { i1=j; break; }} if (i1<0) { i1=pnt.num; for (j=0;j<3;j++) pnt.add(f->p[1][j]); }
for (i2=-1,j=0;j<pnt.num;j+=3){ vectorf_sub(q,pnt.dat+j,f->p[2]); if (vectorf_len2(q)<1e-6) { i2=j; break; }} if (i2<0) { i2=pnt.num; for (j=0;j<3;j++) pnt.add(f->p[2][j]); }
// add edges
ee.ld(i0,i1); for (e=edge.dat,j=0;j<edge.num;j++,e++) if (*e==ee) { e->cnt++; j=-1; break; } if (j>=0) edge.add(ee);
ee.ld(i1,i2); for (e=edge.dat,j=0;j<edge.num;j++,e++) if (*e==ee) { e->cnt++; j=-1; break; } if (j>=0) edge.add(ee);
ee.ld(i2,i0); for (e=edge.dat,j=0;j<edge.num;j++,e++) if (*e==ee) { e->cnt++; j=-1; break; } if (j>=0) edge.add(ee);
}
// delete even times used edges (to speed up the loops finding)
for (i0=i1=0,e0=e1=edge.dat;i0<edge.num;i0++,e0++)
if (int(e0->cnt&1)==1) { *e1=*e0; i1++; e1++; } edge.num=i1;
// find 2 edges with one comon point (j1)
for (e0=edge.dat,i0=0;i0<edge.num;i0++,e0++) if (int(e0->cnt&1)==1)
for (e1=e0+1,i1=i0+1;i1<edge.num;i1++,e1++) if (int(e1->cnt&1)==1)
{
// decide which points to use
j0=-1; j1=-1; j2=-1;
if (e0->p0==e1->p0) { j0=e0->p1; j1=e0->p0; j2=e1->p1; }
if (e0->p0==e1->p1) { j0=e0->p1; j1=e0->p0; j2=e1->p0; }
if (e0->p1==e1->p0) { j0=e0->p0; j1=e0->p1; j2=e1->p1; }
if (e0->p1==e1->p1) { j0=e0->p0; j1=e0->p1; j2=e1->p0; }
if (j2<0) continue;
// add missin triangle
if (e0->dir)
{
vectorf_copy(ff.p[0],pnt.dat+j1);
vectorf_copy(ff.p[1],pnt.dat+j0);
vectorf_copy(ff.p[2],pnt.dat+j2);
}
else{
vectorf_copy(ff.p[0],pnt.dat+j0);
vectorf_copy(ff.p[1],pnt.dat+j1);
vectorf_copy(ff.p[2],pnt.dat+j2);
}
ff.compute();
fac.add(ff);
// update edges
e0->cnt++;
e1->cnt++;
ee.ld(j0,j2); for (e=edge.dat,j=0;j<edge.num;j++,e++) if (*e==ee) { e->cnt++; j=-1; break; } if (j>=0) edge.add(ee);
break;
}
}
The full C++ code and description for the STL3D class is in the link above. I used some sphere STL mesh I found in my archive and color the hole patching triangles in green to recognize them. Here the result:
The black lines are wireframe and red ones are just debug draw of the edge[],pnt[] arrays for debug ...
As you can see it works even for holes bigger than just single triangle :) ...

Efficient Nonogram Solver

So I recently saw this puzzle posted by the British GCHQ:
It involves solving a 25x25 nonogram:
A nonogram is picture logic puzzles in which cells in a grid must be colored or left blank according to numbers at the side of the grid to reveal a hidden picture. In this puzzle type, the numbers are a form of discrete tomography that measures how many unbroken lines of filled-in squares there are in any given row or column. For example, a clue of "4 8 3" would mean there are sets of four, eight, and three filled squares, in that order, with at least one blank square between successive groups."
Naturally I had the inclination to try and write a program that would solve it for me. I was thinking of a recursive backtracking algorithm that starts at row 0, and for each possible arrangement of that row given the information from the row clue, it places a possible combination of the next row and verifies whether it is a valid placement given the column clues. If it is, it continues, if not it backtracks, until all the rows are placed in a valid configuration, or all possible row combinations have been exhausted.
I tested it on a few 5x5 puzzles and it works perfectly. The issue is that it takes too long to compute the 25x25 GCHQ puzzle. I need ways to make this algorithm more efficient - enough so that it can solve the puzzle linked above. Any ideas?
Here is my code for generating a set of the row possibilities for each row as well as the code for the solver (Note* it uses some non standard libraries but this shouldn't detract from the point):
// The Vector<int> input is a list of the row clues eg. for row 1, input = {7,3,1,1,7}. The
// int currentElemIndex keeps track of what block of the input clue we are dealing with i.e
// it starts at input[0] which is the 7 sized block and for all possible places it can be
// placed, places the next block from the clue recursively.
// The Vector<bool> rowState is the state of the row at the current time. True indicates a
// colored in square, false indicates empty.
// The Set< Vector<bool> >& result is just the set that stores all the possible valid row
// configurations.
// The int startIndex and endIndex are the bounds on the start point and end point between
// which the function will try to place the current block. The endIndex is calculated by
// subtracting the width of the board from the sum of the remaining block sizes + number
// of blocks remaining. Ie. if for row 1 with the input {7,3,1,1,7} we were placing the
// first block, the endIndex would be (3+1+1+7)+4=16 because if the first block was placed
// further than this, it would be impossible for the other blocks to fit.
// BOARD_WIDTH = 25;
// The containsPresets funtion makes sure that the row configuration is only added to the
// result set if it contains the preset values of the puzzle (the given squares
// at the start of the puzzle).
void Nonogram::rowPossibilitiesHelper(int currentElemIndex, Vector<bool>& rowState,
Vector<int>& input, Set< Vector<bool> >& result,
int startIndex, int rowIndex) {
if(currentElemIndex == input.size()) {
if(containsPresets(rowState, rowIndex)) {
result += rowState;
}
} else {
int endIndex = BOARD_WIDTH - rowSum(currentElemIndex+1, input);
int blockSize = input[currentElemIndex];
for(int i=startIndex; i<=endIndex-blockSize; i++) {
for(int j=0; j<blockSize; j++) {
rowState[i+j] = true; // set block
}
rowPossibilitiesHelper(currentElemIndex+1, rowState, input, result, i+blockSize+1, rowIndex); // explore
for(int j=0; j<blockSize; j++) {
rowState[i+j] = false; // unchoose
}
}
}
}
// The function is initally passed in 0 for the rowIndex. It gets a set of all possible
// valid arrangements of the board and for each one of them, sets the board row at rowIndex
// to the current rowConfig. Is then checks if the current configuration so far is valid in
// regards to the column clues. If it is, it solves the next row, if not, it unmarks the
// current configuration from the board row at rowIndex.
void Nonogram::solveHelper(int rowIndex) {
if(rowIndex == BOARD_HEIGHT) {
printBoard();
} else {
for(Vector<bool> rowConfig : rowPossisbilities(rowIndex)) {
setBoardRow(rowConfig, rowIndex);
if(isValidConfig(rowIndex)) { // set row
solveHelper(rowIndex+1); // explore
}
unsetBoardRow(rowIndex); // unset row
}
}
}
I've made a solution in Java, that for your example puzzle (25x25) solves it in about 50ms.
Full code and input examples: Github
Prerequisites
Notions of Java (understanding the samples)
Bitwise operations: I depend a lot on them, so read about it if you are not very familiar with it.
Graph traversal algorithms: DFS
Given:
R, C // number of rows, columns
int[][] rows; // for each row the block size from left to right (ex: rows[2][0] = first blocksize of 3 row)
int[][] cols; // for each column the block size from top to bottom
long[] grid; // bitwise representation of the board with the initially given painted blocks
Precalculate all permutations per row.
A permutation is also stored in a bitwise representation. Where the first bit is set to true if it fill the first column etc..
This is both time and space efficient. For the calculation we first count the number of extra spaces that can be added.
This is number_of_columns - sum_of_blocksize - (number_of_blocks-1)
Dfs over all possible permutations of placing extra spaces. See calcPerms and add them to the list of possible permutations if it's a match with the initially given painted blocks.
rowPerms = new long[R][];
for(int r=0;r<R;r++){
LinkedList<Long> res = new LinkedList<Long>();
int spaces = C - (rows[r].length-1);
for(int i=0;i<rows[r].length;i++){
spaces -= rows[r][i];
}
calcPerms(r, 0, spaces, 0, 0,res);
rowPerms[r] = new long[res.size()];
while(!res.isEmpty()){
rowPerms[r][res.size()-1]=res.pollLast();
}
}
...
// row, current block in row, extra spaces left to add, current permutation, current number of bits to shift
static void calcPerms(int r, int cur, int spaces, long perm, int shift, LinkedList<Long> res){
if(cur == rows[r].length){
if((grid[r]&perm)==grid[r]){
res.add(perm);
}
return;
}
while(spaces>=0){
calcPerms(r, cur+1, spaces, perm|(bits(rows[r][cur])<<shift), shift+rows[r][cur]+1,res);
shift++;
spaces--;
}
}
static long bits(int b){
return (1L<<b)-1; // 1 => 1, 2 => 11, 3 => 111, ...
}
Implement validations per row
Validating the rows:
[Trivial:] We are going to use precalculated permutations so we don't need any extra validation per row.
Validating the columns:
Herefor I'm keeping for each row and column the index of the current blocksize colIx, and the position in that size colVal.
This is calculated by the value and index of the previous row:
The value increased by 1 if the column is painted in the current row.
the value reset to 0 and index increased by 1 if the column was painted in the previous row and is not in the current row.
Sample:
static void updateCols(int row){
long ixc = 1L;
for(int c=0;c<C;c++,ixc<<=1){
// copy from previous
colVal[row][c]=row==0 ? 0 : colVal[row-1][c];
colIx[row][c]=row==0 ? 0 : colIx[row-1][c];
if((grid[row]&ixc)==0){
if(row > 0 && colVal[row-1][c] > 0){
// bit not set and col is not empty at previous row => close blocksize
colVal[row][c]=0;
colIx[row][c]++;
}
}else{
colVal[row][c]++; // increase value for set bit
}
}
}
Now we can use these index/values to determine which bits are expected to be false/true in the next row.
Used datastructure for validation:
static long[] mask; // per row bitmask, bit is set to true if the bit has to be validated by the val bitmask
static long[] val; // per row bitmask with bit set to false/true for as expected for the current row
When bit in previous row is set, we expect the bit in current row to be set to true if and only if the current size is still smaller than the expected size for the current index. Else it has to be 0 because you want to cut it off at the current row.
Or when the last blocksize is already used for the column we can not start a new block. Hence bit has to be 0.
static void rowMask(int row){
mask[row]=val[row]=0;
if(row==0){
return;
}
long ixc=1L;
for(int c=0;c<C;c++,ixc<<=1){
if(colVal[row-1][c] > 0){
// when column at previous row is set, we know for sure what has to be the next bit according to the current size and the expected size
mask[row] |= ixc;
if(cols[c][colIx[row-1][c]] > colVal[row-1][c]){
val[row] |= ixc; // must set
}
}else if(colVal[row-1][c] == 0 && colIx[row-1][c]==cols[c].length){
// can not add anymore since out of indices
mask[row] |= ixc;
}
}
}
Dfs all rows and check if still valid
This makes the actual dfs part as easy as your own.
If the rowmask fits with the current configuration we can update the column indices/values and traverse to the next row and eventually end up at row R.
static boolean dfs(int row){
if(row==R){
return true;
}
rowMask(row); // calculate mask to stay valid in the next row
for(int i=0;i<rowPerms[row].length;i++){
if((rowPerms[row][i]&mask[row])!=val[row]){
continue;
}
grid[row] = rowPerms[row][i];
updateCols(row);
if(dfs(row+1)){
return true;
}
}
return false;
}

Determine Minimum Number of Line Segments to Solve a Maze

I have a problem where I need to define a polygon with the minimum number of vertices that intersects or contains every pixel in an image that is not transparent (Let N be the number of pixels in the image). My only assumptions are that the image cannot contain transparent pixels within its boundary (holes), and that at least two pixels are non-transparent. As an example, lets say I have the following image:
My idea for an algorithm is thus:
1) Determine the edge pixels.This is done in O(N) time by iterating through each pixel, and determining whether any neighbors (out of the four pixels left, above, right, and below it) are empty. Store the pixel and which neighbors were non-transparent, keyed by linear index into the array of pixels. Let there be P edge pixels, shown in orange below.
2) Get an adjacency list of the edge pixels.This is done in O(P) time by selecting one of the edge pixels, and chosing a direction based on empty neighbors. For example, if a pixel has a bottom and right neighbor, then the next pixel will be either one in the upper-right corner, or the pixel immediately to the right. Select the next edge pixel from the dictionary of remaining edge pixels. Append that pixel to the list until the algorithm returns to the starting pixel. There are 27 edge pixels in the example image below (some are an edge pixel more than once).
3) Draw a maze that all edges must lie between.This is done in O(P) time by iterating through the adjacency list, and adding an edge to all edges on those pixels without a neighbor. In addition, an edge is added to the interior of the shape based on the direction to the next edge pixel. If the pixel represents a peninsula with single pixel width, the inner edge is added to the middle of the edge instead of the pixel vertex. The interior of the maze is shown in red. Note that the maze boundary is a super-set of all the edge pixels found in step 2.
4) Find a polygon with almost minimal edges that does not touch the border of the maze.This is the part I need help with. Does anyone have a suggestion of how you would go from step #3 to a solution such as the following?
I have no background in image processing, but I came across the Ramer–Douglas–Peucker algorithm yesterday and I think it might be helpful.
From my quick scan of the Wikipedia article, it reduces the number of point in a curve, so I would run this algorithm on each line where the points of the line are the end points you have and also set as points the borders of squares that the line crosses.
I marked out in this image two lines you could run the algorithm on and I think it would work.
How to find each line and when to stop - not 100% sure, but I hope this was useful.
See find holes in 2D point set
it is very similar problem
just invert the map and use midpoint of each grid square as point
that will lead to this:
as you want the inner polygon then there are 2 choices
shrink shape by 1 cell before applying this (can loose some detail in shape)
change the H/V lines so they are 1 cell shorter (half from both sides)
that will lead to something like this:
after some changes in code I can use 2x multi-sampling now so result is:
now you can join connected lines with the same slope
and apply something like ear clipping on the found corners to get more close to your desired polygon
Here the inverted source code in C++ (there may be some hole comments left):
//---------------------------------------------------------------------------
//---------------------------------------------------------------------------
//---------------------------------------------------------------------------
/* usage:
int i;
pntcloud_polygons h;
pnt2D point[points];
h.scann_beg(); for (i=0;i<points;i++) { p=point[i]; h.scann_pnt(p.x,p.y); } h.scann_end();
h.cell_size(2.5); // or (h.x1-h.x0)*0.01 ... cell size >> avg point distance
h.holes_beg(); for (i=0;i<points;i++) { p=point[i]; h.holes_pnt(p.x,p.y); } h.holes_end();
*/
//---------------------------------------------------------------------------
class pntcloud_polygons
{
public:
int xs,ys,n; // cell grid x,y - size and points count
int **map; // points density map[xs][ys]
// i=(x-x0)*g2l; x=x0+(i*l2g);
// j=(y-y0)*g2l; y=y0+(j*l2g);
double mg2l,ml2g; // scale to/from global/map space (x,y) <-> map[i][j]
double x0,x1,y0,y1; // used area (bounding box)
struct _line
{
int id; // id of hole for segmentation/polygonize
float i0,i1,j0,j1; // index in map[][]
_line(){}; _line(_line& a){ *this=a; }; ~_line(){}; _line* operator = (const _line *a) { *this=*a; return this; }; /*_line* operator = (const _line &a) { ...copy... return this; };*/
};
List<_line> lin;
int lin_i0; // start index for perimeter lines (smaller indexes are the H,V lines inside hole)
struct _point
{
int i,j; // index in map[][]
int p0,p1; // previous next point
int used;
_point(){}; _point(_point& a){ *this=a; }; ~_point(){}; _point* operator = (const _point *a) { *this=*a; return this; }; /*_point* operator = (const _point &a) { ...copy... return this; };*/
};
List<_point> pnt;
// class init and internal stuff
pntcloud_polygons() { xs=0; ys=0; n=0; map=NULL; mg2l=1.0; ml2g=1.0; x0=0.0; y0=0.0; x1=0.0; y1=0.0; lin_i0=0; };
pntcloud_polygons(pntcloud_polygons& a){ *this=a; };
~pntcloud_polygons() { _free(); };
pntcloud_polygons* operator = (const pntcloud_polygons *a) { *this=*a; return this; };
pntcloud_polygons* operator = (const pntcloud_polygons &a)
{
xs=0; ys=0; n=a.n; map=NULL;
mg2l=a.mg2l; x0=a.x0; x1=a.x1;
ml2g=a.ml2g; y0=a.y0; y1=a.y1;
_alloc(a.xs,a.ys);
for (int i=0;i<xs;i++)
for (int j=0;j<ys;j++) map[i][j]=a.map[i][j];
return this;
}
void _free() { if (map) { for (int i=0;i<xs;i++) if (map[i]) delete[] map[i]; delete[] map; } xs=0; ys=0; }
void _alloc(int _xs,int _ys) { int i=0; _free(); xs=_xs; ys=_ys; map=new int*[xs]; if (map) for (i=0;i<xs;i++) { map[i]=new int[ys]; if (map[i]==NULL) { i=-1; break; } } else i=-1; if (i<0) _free(); }
// scann boundary box interface
void scann_beg();
void scann_pnt(double x,double y);
void scann_end();
// dynamic allocations
void cell_size(double sz); // compute/allocate grid from grid cell size = sz x sz
// scann pntcloud_polygons interface
void holes_beg();
void holes_pnt(double x,double y);
void holes_end();
// global(x,y) <- local map[i][j] + half cell offset
inline void l2g(double &x,double &y,int i,int j) { x=x0+((double(i)+0.5)*ml2g); y=y0+((double(j)+0.5)*ml2g); }
inline void l2g(double &x,double &y,float i,float j) { x=x0+((double(i)+0.5)*ml2g); y=y0+((double(j)+0.5)*ml2g); }
// local map[i][j] <- global(x,y)
inline void g2l(int &i,int &j,double x,double y) { i= double((x-x0) *mg2l); j= double((y-y0) *mg2l); }
};
//---------------------------------------------------------------------------
void pntcloud_polygons::scann_beg()
{
x0=0.0; y0=0.0; x1=0.0; y1=0.0; n=0;
}
//---------------------------------------------------------------------------
void pntcloud_polygons::scann_pnt(double x,double y)
{
if (!n) { x0=x; y0=y; x1=x; y1=y; }
if (n<0x7FFFFFFF) n++; // avoid overflow
if (x0>x) x0=x; if (x1<x) x1=x;
if (y0>y) y0=y; if (y1<y) y1=y;
}
//---------------------------------------------------------------------------
void pntcloud_polygons::scann_end()
{
}
//---------------------------------------------------------------------------
void pntcloud_polygons::cell_size(double sz)
{
int x,y;
if (sz<1e-6) sz=1e-6;
x=ceil((x1-x0)/sz);
y=ceil((y1-y0)/sz);
_alloc(x,y);
ml2g=sz; mg2l=1.0/sz;
}
//---------------------------------------------------------------------------
void pntcloud_polygons::holes_beg()
{
int i,j;
for (i=0;i<xs;i++)
for (j=0;j<ys;j++)
map[i][j]=0;
}
//---------------------------------------------------------------------------
void pntcloud_polygons::holes_pnt(double x,double y)
{
int i,j;
g2l(i,j,x,y);
if ((i>=0)&&(i<xs))
if ((j>=0)&&(j<ys))
if (map[i][j]<0x7FFFFFFF) map[i][j]++; // avoid overflow
}
//---------------------------------------------------------------------------
void pntcloud_polygons::holes_end()
{
int i,j,e,i0,i1;
List<int> ix; // hole lines start/stop indexes for speed up the polygonization
_line *a,*b,l;
_point *aa,*bb,p;
lin.num=0; lin_i0=0;// clear lines
ix.num=0; // clear indexes
// find pntcloud_polygons (map[i][j].cnt!=0) or (map[i][j].cnt>=treshold)
// and create lin[] list of H,V lines covering pntcloud_polygons
for (j=0;j<ys;j++) // search lines
for (i=0;i<xs;)
{
int i0,i1;
for (;i<xs;i++) if (map[i][j]!=0) break; i0=i-1; // find start of polygon
for (;i<xs;i++) if (map[i][j]==0) break; i1=i; // find end of polygon
if (i0< 0) continue; // skip bad circumstances (edges or no hole found)
if (i1>=xs) continue;
if (map[i0][j]!=0) continue;
if (map[i1][j]!=0) continue;
l.i0=i0+0.5;
l.i1=i1-0.5;
l.j0=j ;
l.j1=j ;
l.id=-1;
lin.add(l);
}
for (i=0;i<xs;i++) // search columns
for (j=0;j<ys;)
{
int j0,j1;
for (;j<ys;j++) if (map[i][j]!=0) break; j0=j-1; // find start of polygon
for (;j<ys;j++) if (map[i][j]==0) break; j1=j ; // find end of polygon
if (j0< 0) continue; // skip bad circumstances (edges or no hole found)
if (j1>=ys) continue;
if (map[i][j0]!=0) continue;
if (map[i][j1]!=0) continue;
l.i0=i ;
l.i1=i ;
l.j0=j0+0.5;
l.j1=j1-0.5;
l.id=-1;
lin.add(l);
}
// segmentate lin[] ... group lines of the same hole together by lin[].id
// segmentation based on vector lines data
// you can also segmentate the map[][] directly as bitmap during hole detection
for (i=0;i<lin.num;i++) lin[i].id=i; // all lines are separate
for (;;) // join what you can
{
for (e=0,a=lin.dat,i=0;i<lin.num;i++,a++)
{
for (b=a,j=i;j<lin.num;j++,b++)
if (a->id!=b->id)
{
// if a,b not intersecting or neighbouring
if (a->i0>b->i1) continue;
if (b->i0>a->i1) continue;
if (a->j0>b->j1) continue;
if (b->j0>a->j1) continue;
// if they do mark e for join groups
e=1; break;
}
if (e) break; // join found ... stop searching
}
if (!e) break; // no join found ... stop segmentation
i0=a->id; // joid ids ... rename i1 to i0
i1=b->id;
for (a=lin.dat,i=0;i<lin.num;i++,a++)
if (a->id==i1)
a->id=i0;
}
// sort lin[] by id
for (e=1;e;) for (e=0,a=&lin[0],b=&lin[1],i=1;i<lin.num;i++,a++,b++)
if (a->id>b->id) { l=*a; *a=*b; *b=l; e=1; }
// re id lin[] and prepare start/stop indexes
for (i0=-1,i1=-1,a=&lin[0],i=0;i<lin.num;i++,a++)
if (a->id==i1) a->id=i0;
else { i0++; i1=a->id; a->id=i0; ix.add(i); }
ix.add(lin.num);
// polygonize
lin_i0=lin.num;
for (j=1;j<ix.num;j++) // process hole
{
i0=ix[j-1]; i1=ix[j];
// create border pnt[] list (unique points only)
pnt.num=0; p.used=0; p.p0=-1; p.p1=-1;
for (a=&lin[i0],i=i0;i<i1;i++,a++)
{
p.i=a->i0;
p.j=a->j0;
map[p.i][p.j]=0;
for (aa=&pnt[0],e=0;e<pnt.num;e++,aa++)
if ((aa->i==p.i)&&(aa->j==p.j)) { e=-1; break; }
if (e>=0) pnt.add(p);
p.i=a->i1;
p.j=a->j1;
map[p.i][p.j]=0;
for (aa=&pnt[0],e=0;e<pnt.num;e++,aa++)
if ((aa->i==p.i)&&(aa->j==p.j)) { e=-1; break; }
if (e>=0) pnt.add(p);
}
// mark not border points
for (aa=&pnt[0],i=0;i<pnt.num;i++,aa++)
if (!aa->used) // ignore marked points
if ((aa->i>0)&&(aa->i<xs-1)) // ignore map[][] border points
if ((aa->j>0)&&(aa->j<ys-1))
{ // ignore if any non hole cell around
if (map[aa->i-1][aa->j-1]>0) continue;
if (map[aa->i-1][aa->j ]>0) continue;
if (map[aa->i-1][aa->j+1]>0) continue;
if (map[aa->i ][aa->j-1]>0) continue;
if (map[aa->i ][aa->j+1]>0) continue;
if (map[aa->i+1][aa->j-1]>0) continue;
if (map[aa->i+1][aa->j ]>0) continue;
if (map[aa->i+1][aa->j+1]>0) continue;
aa->used=1;
}
// delete marked points
for (aa=&pnt[0],e=0,i=0;i<pnt.num;i++,aa++)
if (!aa->used) { pnt[e]=*aa; e++; } pnt.num=e;
// connect neighbouring points distance=1
for (i0= 0,aa=&pnt[i0];i0<pnt.num;i0++,aa++)
if (aa->used<2)
for (i1=i0+1,bb=&pnt[i1];i1<pnt.num;i1++,bb++)
if (bb->used<2)
{
i=aa->i-bb->i; if (i<0) i=-i; e =i;
i=aa->j-bb->j; if (i<0) i=-i; e+=i;
if (e!=1) continue;
aa->used++; if (aa->p0<0) aa->p0=i1; else aa->p1=i1;
bb->used++; if (bb->p0<0) bb->p0=i0; else bb->p1=i0;
}
// try to connect neighbouring points distance=sqrt(2)
for (i0= 0,aa=&pnt[i0];i0<pnt.num;i0++,aa++)
if (aa->used<2)
for (i1=i0+1,bb=&pnt[i1];i1<pnt.num;i1++,bb++)
if (bb->used<2)
if ((aa->p0!=i1)&&(aa->p1!=i1))
if ((bb->p0!=i0)&&(bb->p1!=i0))
{
if ((aa->used)&&(aa->p0==bb->p0)) continue; // avoid small losed loops
i=aa->i-bb->i; if (i<0) i=-i; e =i*i;
i=aa->j-bb->j; if (i<0) i=-i; e+=i*i;
if (e!=2) continue;
aa->used++; if (aa->p0<0) aa->p0=i1; else aa->p1=i1;
bb->used++; if (bb->p0<0) bb->p0=i0; else bb->p1=i0;
}
// try to connect to closest point
int ii,dd;
for (i0= 0,aa=&pnt[i0];i0<pnt.num;i0++,aa++)
if (aa->used<2)
{
for (ii=-1,i1=i0+1,bb=&pnt[i1];i1<pnt.num;i1++,bb++)
if (bb->used<2)
if ((aa->p0!=i1)&&(aa->p1!=i1))
if ((bb->p0!=i0)&&(bb->p1!=i0))
{
i=aa->i-bb->i; if (i<0) i=-i; e =i*i;
i=aa->j-bb->j; if (i<0) i=-i; e+=i*i;
if ((ii<0)||(e<dd)) { ii=i1; dd=e; }
}
if (ii<0) continue;
i1=ii; bb=&pnt[i1];
aa->used++; if (aa->p0<0) aa->p0=i1; else aa->p1=i1;
bb->used++; if (bb->p0<0) bb->p0=i0; else bb->p1=i0;
}
// add connected points to lin[] ... this is hole perimeter !!!
// lines are 2 x duplicated so some additional code for sort the order of line swill be good idea
l.id=lin[ix[j-1]].id;
// add index of points instead points
int lin_i1=lin.num;
for (i0=0,aa=&pnt[i0];i0<pnt.num;i0++,aa++)
{
l.i0=i0;
if (aa->p0>i0) { l.i1=aa->p0; lin.add(l); }
if (aa->p1>i0) { l.i1=aa->p1; lin.add(l); }
}
// reorder perimeter lines
for (i0=lin_i1,a=&lin[i0];i0<lin.num-1;i0++,a++)
for (i1=i0+1 ,b=&lin[i1];i1<lin.num ;i1++,b++)
{
if (a->i1==b->i0) { a++; l=*a; *a=*b; *b=l; a--; break; }
if (a->i1==b->i1) { a++; l=*a; *a=*b; *b=l; i=a->i0; a->i0=a->i1; a->i1=i; a--; break; }
}
// convert point indexes to points
for (i0=lin_i1,a=&lin[i0];i0<lin.num;i0++,a++)
{
bb=&pnt[a->i0]; a->i0=bb->i; a->j0=bb->j;
bb=&pnt[a->i1]; a->i1=bb->i; a->j1=bb->j;
}
}
}
//---------------------------------------------------------------------------
//---------------------------------------------------------------------------
//---------------------------------------------------------------------------
it is the same as the code in holes link above
just the map[][] conditions inverted to search polygons instead of holes
and found HV lines are shrinked by half of cell from each side
_lin coordinates are float now so o need for 4x multisampling
the best results are with 2x multi-sampling (to avoid polygon width=1)
so each cell in your map add as 2x2 points in the cell area
I added also 2 corner points to better align map[][] and your image

Given n points on a 2D plane, find the maximum number of points that lie on the same straight line

Below is the solution I am trying to implement
/**
* Definition for a point.
* class Point {
* int x;
* int y;
* Point() { x = 0; y = 0; }
* Point(int a, int b) { x = a; y = b; }
* }
*/
public class Solution {
public int maxPoints(Point[] points) {
int max=0;
if(points.length==1)
return 1;
for(int i=0;i<points.length;i++){
for(int j=0;j<points.length;j++){
if((points[i].x!=points[j].x)||(points[i].y!=points[j].y)){
int coll=get_collinear(points[i].x,points[i].y,points[j].x,points[j].y,points);
if(coll>max)
max=coll;
}
else{
**Case where I am suffering**
}
}
}
return max;
}
public int get_collinear(int x1,int y1,int x2, int y2,Point[] points)
{
int c=0;
for(int i=0;i<points.length;i++){
int k1=x1-points[i].x;
int l1=y1-points[i].y;
int k2=x2-points[i].x;
int l2=y2-points[i].y;
if((k1*l2-k2*l1)==0)
c++;
}
return c;
}
}
It runs at O(n^3). What I am basically doing is running two loops comparing various points in the 2d plane. And then taking 2 points I send these 2 points to the get_collinear method which hits the line formed by these 2 points with all the elements of the array to check if the 3 points are collinear. I know this is a brute force method. However in case where the input is[(0,0),(0,0)] my result fails. The else loop is where I have to add a condition to figure out such cases. Can someone help me with the solution to that. And does there exist a better solution to this problem at better run time. I can't think of any.
BTW complexity is indeed O(n^3) to lower that you need to:
sort the points somehow
by x and or y in ascending or descending order. Also use of polar coordinates can help sometimes
use divide at impera (divide and conquer) algorithms
usually for planar geometry algorithms is good idea to divide area to quadrants and sub-quadrants but these algorithms are hard to code on vector graphics
Also there is one other speedup possibility
check against all possible directions (limited number of them for example to 360 angles only) which leads to O(n^2). Then compute results which is still O(m^3) where m is the subset of points per the tested direction.
Ok here is something basic I coded in C++ for example:
void points_on_line()
{
const int dirs =360; // num of directions (accuracy)
double mdir=double(dirs)/M_PI; // conversion from angle to code
double pacc=0.01; // position acc <0,1>
double lmin=0.05; // min line size acc <0,1>
double lmax=0.25; // max line size acc <0,1>
double pacc2,lmin2,lmax2;
int n,ia,ib;
double x0,x1,y0,y1;
struct _lin
{
int dir; // dir code <0,dirs>
double ang; // dir [rad] <0,M_PI>
double dx,dy; // dir unit vector
int i0,i1; // index of points
} *lin;
glview2D::_pnt *a,*b;
glview2D::_lin q;
_lin l;
// prepare buffers
n=view.pnt.num; // n=number of points
n=((n*n)-n)>>1; // n=max number of lines
lin=new _lin[n]; n=0;
if (lin==NULL) return;
// precompute size of area and update accuracy constants ~O(N)
for (a=view.pnt.dat,ia=0;ia<view.pnt.num;ia++,a++)
{
if (!ia)
{
x0=a->p[0]; y0=a->p[1];
x1=a->p[0]; y1=a->p[1];
}
if (x0>a->p[0]) x0=a->p[0];
if (x1<a->p[0]) x1=a->p[0];
if (y0>a->p[1]) y0=a->p[1];
if (y1<a->p[1]) y1=a->p[1];
}
x1-=x0; y1-=y0; if (x1>y1) x1=y1;
pacc*=x1; pacc2=pacc*pacc;
lmin*=x1; lmin2=lmin*lmin;
lmax*=x1; lmax2=lmax*lmax;
// precompute lines ~O((N^2)/2)
for (a=view.pnt.dat,ia=0;ia<view.pnt.num;ia++,a++)
for (b=a+1,ib=ia+1;ib<view.pnt.num;ib++,b++)
{
l.i0=ia;
l.i1=ib;
x0=b->p[0]-a->p[0];
y0=b->p[1]-a->p[1];
x1=(x0*x0)+(y0*y0);
if (x1<=lmin2) continue; // ignore too small lines
if (x1>=lmax2) continue; // ignore too big lines
l.ang=atanxy(x0,y0);
if (l.ang>M_PI) l.ang-=M_PI; // 180 deg is enough lines goes both ways ...
l.dx=cos(l.ang);
l.dy=sin(l.ang);
l.dir=double(l.ang*mdir);
lin[n]=l; n++;
// q.p0=*a; q.p1=*b; view.lin.add(q); // just visualise used lines for testing
}
// test directions
int cnt,cntmax=0;
double t;
for (ia=0;ia<n;ia++)
{
cnt=1;
for (ib=ia+1;ib<n;ib++)
if (lin[ia].dir==lin[ib].dir)
{
a=&view.pnt[lin[ia].i0];
if (lin[ia].i0!=lin[ib].i0)
b=&view.pnt[lin[ib].i0];
else b=&view.pnt[lin[ib].i1];
x0=b->p[0]-a->p[0]; x0*=x0;
y0=b->p[1]-a->p[1]; y0*=y0;
t=sqrt(x0+y0);
x0=a->p[0]+(t*lin[ia].dx)-b->p[0]; x0*=x0;
y0=a->p[1]+(t*lin[ia].dy)-b->p[1]; y0*=y0;
t=x0+y0;
if (fabs(t)<=pacc2) cnt++;
}
if (cntmax<cnt) // if more points on single line found
{
cntmax=cnt; // update point count
q.p0=view.pnt[lin[ia].i0]; // copy start/end point
q.p1=q.p0;
q.p0.p[0]-=500.0*lin[ia].dx; // and set result line as very big (infinite) line
q.p0.p[1]-=500.0*lin[ia].dy;
q.p1.p[0]+=500.0*lin[ia].dx;
q.p1.p[1]+=500.0*lin[ia].dy;
}
}
if (cntmax) view.lin.add(q);
view.redraw=true;
delete lin;
// Caption=n; // just to see how many lines actualy survive the filtering
}
It is from my geometry engine so here is some stuff explained:
glview2D::_pnt
view.pnt[] are input 2D points (I feed random points around random line + random noise points)
view.pnt.num is number of points
glview2D::_lin
view.lin[] are output lines (just one line is used)
accuracy
Play with pacc,lmin,lmax constants to change the behavior and computation speed. Change dirs to change direction accuracy and computation speed
Complexity estimation is not possible due to big dependency on input data
But for my random test points are the runtimes like this:
[ 0.056 ms]Genere 100 random 2D points
[ 151.839 ms]Compute 100 points on line1 (unoptimized brute force O(N^3))
[ 4.385 ms]Compute 100 points on line2 (optimized direction check)
[ 0.096 ms] Genere 200 random 2D points
[1041.676 ms] Compute 200 points on line1
[ 39.561 ms] Compute 200 points on line2
[ 0.440 ms] Genere 1000 random 2D points
[29061.54 ms] Compute 1000 points on line2

Resources