Related
In a nutshell: I want to do a non-approximate version of Bresenham's line algorithm, but for a rectangle rather than a line, and whose points aren't necessarily aligned to the grid.
Given a square grid, and a rectangle comprising four non-grid-aligned points, I want to find a list of all grid squares that are covered, partially or completely, by the rectangle.
Bresenham's line algorithm is approximate – not all partially covered squares are identified. I'm looking for a "perfect" algorithm, that has no false positives or negatives.
It's an old question, but I have solved this issue (C++)
https://github.com/feelinfine/tracer
Maybe it will be usefull for someone
(sorry for my poor english)
Single line tracing
template <typename PointType>
std::set<V2i> trace_line(const PointType& _start_point, const PointType& _end_point, size_t _cell_size)
{
auto point_to_grid_fnc = [_cell_size](const auto& _point)
{
return V2i(std::floor((double)_point.x / _cell_size), std::floor((double)_point.y / _cell_size));
};
V2i start_cell = point_to_grid_fnc(_start_point);
V2i last_cell = point_to_grid_fnc(_end_point);
PointType direction = _end_point - _start_point;
//Moving direction (cells)
int step_x = (direction.x >= 0) ? 1 : -1;
int step_y = (direction.y >= 0) ? 1 : -1;
//Normalize vector
double hypot = std::hypot(direction.x, direction.y);
V2d norm_direction(direction.x / hypot, direction.y / hypot);
//Distance to the nearest square side
double near_x = (step_x >= 0) ? (start_cell.x + 1)*_cell_size - _start_point.x : _start_point.x - (start_cell.x*_cell_size);
double near_y = (step_y >= 0) ? (start_cell.y + 1)*_cell_size - _start_point.y : _start_point.y - (start_cell.y*_cell_size);
//How far along the ray we must move to cross the first vertical (ray_step_to_vside) / or horizontal (ray_step_to_hside) grid line
double ray_step_to_vside = (norm_direction.x != 0) ? near_x / norm_direction.x : std::numeric_limits<double>::max();
double ray_step_to_hside = (norm_direction.y != 0) ? near_y / norm_direction.y : std::numeric_limits<double>::max();
//How far along the ray we must move for horizontal (dx)/ or vertical (dy) component of such movement to equal the cell size
double dx = (norm_direction.x != 0) ? _cell_size / norm_direction.x : std::numeric_limits<double>::max();
double dy = (norm_direction.y != 0) ? _cell_size / norm_direction.y : std::numeric_limits<double>::max();
//Tracing loop
std::set<V2i> cells;
cells.insert(start_cell);
V2i current_cell = start_cell;
size_t grid_bound_x = std::abs(last_cell.x - start_cell.x);
size_t grid_bound_y = std::abs(last_cell.y - start_cell.y);
size_t counter = 0;
while (counter != (grid_bound_x + grid_bound_y))
{
if (std::abs(ray_step_to_vside) < std::abs(ray_step_to_hside))
{
ray_step_to_vside = ray_step_to_vside + dx; //to the next vertical grid line
current_cell.x = current_cell.x + step_x;
}
else
{
ray_step_to_hside = ray_step_to_hside + dy;//to the next horizontal grid line
current_cell.y = current_cell.y + step_y;
}
++counter;
cells.insert(current_cell);
};
return cells;
}
Get all cells
template <typename Container>
std::set<V2i> pick_cells(Container&& _points, size_t _cell_size)
{
if (_points.size() < 2 || _cell_size <= 0)
return std::set<V2i>();
Container points = std::forward<Container>(_points);
auto add_to_set = [](auto& _set, const auto& _to_append)
{
_set.insert(std::cbegin(_to_append), std::cend(_to_append));
};
//Outline
std::set<V2i> cells;
/*
for (auto it = std::begin(_points); it != std::prev(std::end(_points)); ++it)
add_to_set(cells, trace_line(*it, *std::next(it), _cell_size));
add_to_set(cells, trace_line(_points.back(), _points.front(), _cell_size));
*/
//Maybe this code works faster
std::vector<std::future<std::set<V2i> > > results;
using PointType = decltype(points.cbegin())::value_type;
for (auto it = points.cbegin(); it != std::prev(points.cend()); ++it)
results.push_back(std::async(trace_line<PointType>, *it, *std::next(it), _cell_size));
results.push_back(std::async(trace_line<PointType>, points.back(), points.front(), _cell_size));
for (auto& it : results)
add_to_set(cells, it.get());
//Inner
std::set<V2i> to_add;
int last_x = cells.begin()->x;
int counter = cells.begin()->y;
for (auto& it : cells)
{
if (last_x != it.x)
{
counter = it.y;
last_x = it.x;
}
if (it.y > counter)
{
for (int i = counter; i < it.y; ++i)
to_add.insert(V2i(it.x, i));
}
++counter;
}
add_to_set(cells, to_add);
return cells;
}
Types
template <typename _T>
struct V2
{
_T x, y;
V2(_T _x = 0, _T _y = 0) : x(_x), y(_y)
{
};
V2 operator-(const V2& _rhs) const
{
return V2(x - _rhs.x, y - _rhs.y);
}
bool operator==(const V2& _rhs) const
{
return (x == _rhs.x) && (y == _rhs.y);
}
//for std::set sorting
bool operator<(const V2& _rhs) const
{
return (x == _rhs.x) ? (y < _rhs.y) : (x < _rhs.x);
}
};
using V2d = V2<double>;
using V2i = V2<int>;
Usage
std::vector<V2d> points = { {200, 200}, {400, 400}, {500,100} };
size_t cell_size = 30;
auto cells = pick_cells(points, cell_size);
for (auto& it : cells)
... //do something with cells
You can use a scanline approach. The rectangle is a closed convex polygon, so it is sufficient to store the leftmost and rightmost pixel for each horizontal scanline. (And the top and bottom scanlines, too.)
The Bresenham algorithm tries to draw a thin, visually pleasing line without adjacent cells in the smaller dimension. We need an algorithm that visits each cell that the edges of the polygon pass through. The basic idea is to find the starting cell (x, y) for each edge and then to adjust x whenever the edge intersects a vertical border and to adjust y when it intersects a horizontal border.
We can represent the intersections by means of a normalised coordinate s that travels along the edge and that is 0.0 at the first node n1 and 1.0 at the second node n2.
var x = Math.floor(n1.x / cellsize);
var y = Math.floor(n1.y / cellsize);
var s = 0;
The vertical insersections can the be represented as equidistant steps of with dsx from an initial sx.
var dx = n2.x - n1.x;
var sx = 10; // default value > 1.0
// first intersection
if (dx < 0) sx = (cellsize * x - n1.x) / dx;
if (dx > 0) sx = (cellsize * (x + 1) - n1.x) / dx;
var dsx = (dx != 0) ? grid / Math.abs(dx) : 0;
Likewise for the horizontal intersecions. A default value greater than 1.0 catches the cases of horizontal and vertical lines. Add the first point to the scanline data:
add(scan, x, y);
Then we can visit the next adjacent cell by looking at the next intersection with the smallest s.
while (sx <= 1 || sy <= 1) {
if (sx < sy) {
sx += dsx;
if (dx > 0) x++; else x--;
} else {
sy += dsy;
if (dy > 0) y++; else y--;
}
add(scan, x, y);
}
Do this for all four edges and with the same scanline data. Then fill all cells:
for (var y in scan) {
var x = scan[y].min;
var xend = scan[y].max + 1;
while (x < xend) {
// do something with cell (x, y)
x++;
}
}
(I have only skimmed the links MBo provided. It seems that the approach presented in that paper is essentially the same as mine. If so, please excuse the redundant answer, but after working this out I thought I could as well post it.)
This is sub-optimal but might give a general idea.
First off treat the special case of the rectangle being aligned horizontally or vertically separately. This is pretty easy to test for and make the rest simpler.
You can represent the rectangle as a set of 4 inequalities a1 x + b1 y >= c1 a1 x + b1 y <= c2 a3 x + b3 y >= c3 a3 x + b3 y <= c4 as the edges of the rectangles are parallel some of the constants are the same. You also have (up to a multiple) a3=b1 and b3=-a1. You can multiply each inequality by a common factor so you are working with integers.
Now consider each scan line with a fixed value of y.
For each value of y find the four points where the lines intersect the scan line. That is find the solution with each line above. A little bit of logic will find the minimum and maximum values of x. Plot all pixels between these values.
You condition that you want all partially covered squares makes things a little trickier. You can solve this by considering two adjacent scan lines. You want to plot the points between the minimum x for both lines and the maximum for the both lines. If say
a1 x+b1 y>=c is the inequality for the bottom left line in the figure. You want the find the largest x such that a1 x + b1 y < c this will be floor((c-b1 y)/a1) call this minx(y) also find minx(y+1) and the left hand point will be the minimum of these two values.
There is many easy optimisation you can find the y-values of the top and bottom corners reducing the range of y-values to test. You should only need to test two side. For each end point of each line there is one multiplication, one subtraction and one division. The division is the slowest part I think about 4 time slower than other ops. You might be able to remove this with a Bresenham or DDA algorithms others have mentioned.
There is method of Amanatides and Woo to enumerate all intersected cells
A Fast Voxel Traversal Algorithm for Ray Tracing.
Here is practical implementation.
As side effect for you - you'll get points of intersection with grid lines - it may be useful if you need areas of partially covered cells (for antialiasing etc).
A classic algorithm question in 2D version is typically described as
Given n non-negative integers representing an elevation map where the width of each bar is 1, compute how much water it is able to trap after raining.
For example, Given the input
[0,1,0,2,1,0,1,3,2,1,2,1]
the return value would be
6
The algorithm that I used to solve the above 2D problem is
int trapWaterVolume2D(vector<int> A) {
int n = A.size();
vector<int> leftmost(n, 0), rightmost(n, 0);
//left exclusive scan, O(n), the highest bar to the left each point
int leftMaxSoFar = 0;
for (int i = 0; i < n; i++){
leftmost[i] = leftMaxSoFar;
if (A[i] > leftMaxSoFar) leftMaxSoFar = A[i];
}
//right exclusive scan, O(n), the highest bar to the right each point
int rightMaxSoFar = 0;
for (int i = n - 1; i >= 0; i--){
rightmost[i] = rightMaxSoFar;
if (A[i] > rightMaxSoFar) rightMaxSoFar = A[i];
}
// Summation, O(n)
int vol = 0;
for (int i = 0; i < n; i++){
vol += max(0, min(leftmost[i], rightmost[i]) - A[i]);
}
return vol;
}
My Question is how to make the above algorithm extensible to the 3D version of the problem, to compute the maximum of water trapped in real-world 3D terrain. i.e. To implement
int trapWaterVolume3D(vector<vector<int> > A);
Sample graph:
We know the elevation at each (x, y) point and the goal is to compute the maximum volume of water that can be trapped in the shape. Any thoughts and references are welcome.
For each point on the terrain consider all paths from that point to the border of the terrain. The level of water would be the minimum of the maximum heights of the points of those paths. To find it we need to perform a slightly modified Dijkstra's algorithm, filling the water level matrix starting from the border.
For every point on the border set the water level to the point height
For every point not on the border set the water level to infinity
Put every point on the border into the set of active points
While the set of active points is not empty:
Select the active point P with minimum level
Remove P from the set of active points
For every point Q adjacent to P:
Level(Q) = max(Height(Q), min(Level(Q), Level(P)))
If Level(Q) was changed:
Add Q to the set of active points
user3290797's "slightly modified Dijkstra algorithm" is closer to Prim's algorithm than Dijkstra's. In minimum spanning tree terms, we prepare a graph with one vertex per tile, one vertex for the outside, and edges with weights equal to the maximum height of their two adjoining tiles (the outside has height "minus infinity").
Given a path in this graph to the outside vertex, the maximum weight of an edge in the path is the height that the water has to reach in order to escape along that path. The relevant property of a minimum spanning tree is that, for every pair of vertices, the maximum weight of an edge in the path in the spanning tree is the minimum possible among all paths between those vertices. The minimum spanning tree thus describes the most economical escape paths for water, and the water heights can be extracted in linear time with one traversal.
As a bonus, since the graph is planar, there's a linear-time algorithm for computing the minimum spanning tree, consisting of alternating Boruvka passes and simplifications. This improves on the O(n log n) running time of Prim.
This problem can be solved using the Priority-Flood algorithm. It's been discovered and published a number of times over the past few decades (and again by other people answering this question), though the specific variant you're looking for is not, to my knowledge, in the literature.
You can find a review paper of the algorithm and its variants here. Since that paper was published an even faster variant has been discovered (link), as well as methods to perform this calculation on datasets of trillions of cells (link). A method for selectively breaching low/narrow divides is discussed here. Contact me if you'd like copies of any of these papers.
I have a repository here with many of the above variants; additional implementations can be found here.
A simple script to calculate volume using the RichDEM library is as follows:
#include "richdem/common/version.hpp"
#include "richdem/common/router.hpp"
#include "richdem/depressions/Lindsay2016.hpp"
#include "richdem/common/Array2D.hpp"
/**
#brief Calculates the volume of depressions in a DEM
#author Richard Barnes (rbarnes#umn.edu)
Priority-Flood starts on the edges of the DEM and then works its way inwards
using a priority queue to determine the lowest cell which has a path to the
edge. The neighbours of this cell are added to the priority queue if they
are higher. If they are lower, then they are members of a depression and the
elevation of the flooding minus the elevation of the DEM times the cell area
is the flooded volume of the cell. The cell is flooded, total volume
tracked, and the neighbors are then added to a "depressions" queue which is
used to flood depressions. Cells which are higher than a depression being
filled are added to the priority queue. In this way, depressions are filled
without incurring the expense of the priority queue.
#param[in,out] &elevations A grid of cell elevations
#pre
1. **elevations** contains the elevations of every cell or a value _NoData_
for cells not part of the DEM. Note that the _NoData_ value is assumed to
be a negative number less than any actual data value.
#return
Returns the total volume of the flooded depressions.
#correctness
The correctness of this command is determined by inspection. (TODO)
*/
template <class elev_t>
double improved_priority_flood_volume(const Array2D<elev_t> &elevations){
GridCellZ_pq<elev_t> open;
std::queue<GridCellZ<elev_t> > pit;
uint64_t processed_cells = 0;
uint64_t pitc = 0;
ProgressBar progress;
std::cerr<<"\nPriority-Flood (Improved) Volume"<<std::endl;
std::cerr<<"\nC Barnes, R., Lehman, C., Mulla, D., 2014. Priority-flood: An optimal depression-filling and watershed-labeling algorithm for digital elevation models. Computers & Geosciences 62, 117–127. doi:10.1016/j.cageo.2013.04.024"<<std::endl;
std::cerr<<"p Setting up boolean flood array matrix..."<<std::endl;
//Used to keep track of which cells have already been considered
Array2D<int8_t> closed(elevations.width(),elevations.height(),false);
std::cerr<<"The priority queue will require approximately "
<<(elevations.width()*2+elevations.height()*2)*((long)sizeof(GridCellZ<elev_t>))/1024/1024
<<"MB of RAM."
<<std::endl;
std::cerr<<"p Adding cells to the priority queue..."<<std::endl;
//Add all cells on the edge of the DEM to the priority queue
for(int x=0;x<elevations.width();x++){
open.emplace(x,0,elevations(x,0) );
open.emplace(x,elevations.height()-1,elevations(x,elevations.height()-1) );
closed(x,0)=true;
closed(x,elevations.height()-1)=true;
}
for(int y=1;y<elevations.height()-1;y++){
open.emplace(0,y,elevations(0,y) );
open.emplace(elevations.width()-1,y,elevations(elevations.width()-1,y) );
closed(0,y)=true;
closed(elevations.width()-1,y)=true;
}
double volume = 0;
std::cerr<<"p Performing the improved Priority-Flood..."<<std::endl;
progress.start( elevations.size() );
while(open.size()>0 || pit.size()>0){
GridCellZ<elev_t> c;
if(pit.size()>0){
c=pit.front();
pit.pop();
} else {
c=open.top();
open.pop();
}
processed_cells++;
for(int n=1;n<=8;n++){
int nx=c.x+dx[n];
int ny=c.y+dy[n];
if(!elevations.inGrid(nx,ny)) continue;
if(closed(nx,ny))
continue;
closed(nx,ny)=true;
if(elevations(nx,ny)<=c.z){
if(elevations(nx,ny)<c.z){
++pitc;
volume += (c.z-elevations(nx,ny))*std::abs(elevations.getCellArea());
}
pit.emplace(nx,ny,c.z);
} else
open.emplace(nx,ny,elevations(nx,ny));
}
progress.update(processed_cells);
}
std::cerr<<"t Succeeded in "<<std::fixed<<std::setprecision(1)<<progress.stop()<<" s"<<std::endl;
std::cerr<<"m Cells processed = "<<processed_cells<<std::endl;
std::cerr<<"m Cells in pits = " <<pitc <<std::endl;
return volume;
}
template<class T>
int PerformAlgorithm(std::string analysis, Array2D<T> elevations){
elevations.loadData();
std::cout<<"Volume: "<<improved_priority_flood_volume(elevations)<<std::endl;
return 0;
}
int main(int argc, char **argv){
std::string analysis = PrintRichdemHeader(argc,argv);
if(argc!=2){
std::cerr<<argv[0]<<" <Input>"<<std::endl;
return -1;
}
return PerformAlgorithm(argv[1],analysis);
}
It should be straight-forward to adapt this to whatever 2d array format you are using
In pseudocode, the following is equivalent to the foregoing:
Let PQ be a priority-queue which always pops the cell of lowest elevation
Let Closed be a boolean array initially set to False
Let Volume = 0
Add all the border cells to PQ.
For each border cell, set the cell's entry in Closed to True.
While PQ is not empty:
Select the top cell from PQ, call it C.
Pop the top cell from PQ.
For each neighbor N of C:
If Closed(N):
Continue
If Elevation(N)<Elevation(C):
Volume += (Elevation(C)-Elevation(N))*Area
Add N to PQ, but with Elevation(C)
Else:
Add N to PQ with Elevation(N)
Set Closed(N)=True
This problem is very close to the construction of the morphological watershed of a grayscale image.
One approach is as follows (flooding process):
sort all pixels by increasing elevation.
work incrementally, by increasing elevations, assigning labels to the pixels per catchment basin.
for a new elevation level, you need to label a new set of pixels:
Some have no labeled
neighbor, they form a local minimum configuration and begin a new catchment basin.
Some have only neighbors with the same label, they can be labeled similarly (they extend a catchment basin).
Some have neighbors with different labels. They do not belong to a specific catchment basin and they define the watershed lines.
You will need to enhance the standard watershed algorithm to be able to compute the volume of water. You can do that by determining the maximum water level in each basin and deduce the ground height on every pixel. The water level in a basin is given by the elevation of the lowest watershed pixel around it.
You can act every time you discover a watershed pixel: if a neighboring basin has not been assigned a level yet, that basin can stand the current level without leaking.
In order to accomplish tapping water problem in 3D i.e., to calculate the maximum volume of trapped rain water you can do something like this:
#include<bits/stdc++.h>
using namespace std;
#define MAX 10
int new2d[MAX][MAX];
int dp[MAX][MAX],visited[MAX][MAX];
int dx[] = {1,0,-1,0};
int dy[] = {0,-1,0,1};
int boundedBy(int i,int j,int k,int in11,int in22)
{
if(i<0 || j<0 || i>=in11 || j>=in22)
return 0;
if(new2d[i][j]>k)
return new2d[i][j];
if(visited[i][j]) return INT_MAX;
visited[i][j] = 1;
int r = INT_MAX;
for(int dir = 0 ; dir<4 ; dir++)
{
int nx = i + dx[dir];
int ny = j + dy[dir];
r = min(r,boundedBy(nx,ny,k,in11,in22));
}
return r;
}
void mark(int i,int j,int k,int in1,int in2)
{
if(i<0 || j<0 || i>=in1 || j>=in2)
return;
if(new2d[i][j]>=k)
return;
if(visited[i][j]) return ;
visited[i][j] = 1;
for(int dir = 0;dir<4;dir++)
{
int nx = i + dx[dir];
int ny = j + dy[dir];
mark(nx,ny,k,in1,in2);
}
dp[i][j] = max(dp[i][j],k);
}
struct node
{
int i,j,key;
node(int x,int y,int k)
{
i = x;
j = y;
key = k;
}
};
bool compare(node a,node b)
{
return a.key>b.key;
}
vector<node> store;
int getData(int input1, int input2, int input3[])
{
int row=input1;
int col=input2;
int temp=0;
int count=0;
for(int i=0;i<row;i++)
{
for(int j=0;j<col;j++)
{
if(count==(col*row))
break;
new2d[i][j]=input3[count];
count++;
}
}
store.clear();
for(int i = 0;i<input1;i++)
{
for(int j = 0;j<input2;j++)
{
store.push_back(node(i,j,new2d[i][j]));
}
}
memset(dp,0,sizeof(dp));
sort(store.begin(),store.end(),compare);
for(int i = 0;i<store.size();i++)
{
memset(visited,0,sizeof(visited));
int aux = boundedBy(store[i].i,store[i].j,store[i].key,input1,input2);
if(aux>store[i].key)
{
memset(visited,0,sizeof(visited));
mark(store[i].i,store[i].j,aux,input1,input2);
}
}
long long result =0 ;
for(int i = 0;i<input1;i++)
{
for(int j = 0;j<input2;j++)
{
result = result + max(0,dp[i][j]-new2d[i][j]);
}
}
return result;
}
int main()
{
cin.sync_with_stdio(false);
cout.sync_with_stdio(false);
int n,m;
cin>>n>>m;
int inp3[n*m];
store.clear();
for(int j = 0;j<n*m;j++)
{
cin>>inp3[j];
}
int k = getData(n,m,inp3);
cout<<k;
return 0;
}
class Solution(object):
def trapRainWater(self, heightMap):
"""
:type heightMap: List[List[int]]
:rtype: int
"""
m = len(heightMap)
if m == 0:
return 0
n = len(heightMap[0])
if n == 0:
return 0
visited = [[False for i in range(n)] for j in range(m)]
from Queue import PriorityQueue
q = PriorityQueue()
for i in range(m):
visited[i][0] = True
q.put([heightMap[i][0],i,0])
visited[i][n-1] = True
q.put([heightMap[i][n-1],i,n-1])
for j in range(1, n-1):
visited[0][j] = True
q.put([heightMap[0][j],0,j])
visited[m-1][j] = True
q.put([heightMap[m-1][j],m-1,j])
S = 0
while not q.empty():
cell = q.get()
for (i, j) in [(1,0), (-1,0), (0,1), (0,-1)]:
x = cell[1] + i
y = cell[2] + j
if x in range(m) and y in range(n) and not visited[x][y]:
S += max(0, cell[0] - heightMap[x][y]) # how much water at the cell
q.put([max(heightMap[x][y],cell[0]),x,y])
visited[x][y] = True
return S
Here is the simple code for the same-
#include<iostream>
using namespace std;
int main()
{
int n,count=0,a[100];
cin>>n;
for(int i=0;i<n;i++)
{
cin>>a[i];
}
for(int i=1;i<n-1;i++)
{
///computing left most largest and Right most largest element of array;
int leftmax=0;
int rightmax=0;
///left most largest
for(int j=i-1;j>=1;j--)
{
if(a[j]>leftmax)
{
leftmax=a[j];
}
}
///rightmost largest
for(int k=i+1;k<=n-1;k++)
{
if(a[k]>rightmax)
{
rightmax=a[k];
}
}
///computing hight of the water contained-
int x=(min(rightmax,leftmax)-a[i]);
if(x>0)
{
count=count+x;
}
}
cout<<count;
return 0;
}
I have a triangulated mesh. Assume it looks like an bumpy surface. I want to be able to find all edges that fall on the surrounding border of the mesh. (forget about inner vertices)
I know I have to find edges that are only connected to one triangle, and collect all these together and that is the answer. But I want to be sure that the vertices of these edges are ordered clockwise around the shape.
I want to do this because I would like to get a polygon line around the outside of mesh.
I hope this is clear enough to understand. In a sense i am trying to "De-Triangulate" the mesh. ha! if there is such a term.
Boundary edges are only referenced by a single triangle in the mesh, so to find them you need to scan through all triangles in the mesh and take the edges with a single reference count. You can do this efficiently (in O(N)) by making use of a hash table.
To convert the edge set to an ordered polygon loop you can use a traversal method:
Pick any unvisited edge segment [v_start,v_next] and add these vertices to the polygon loop.
Find the unvisited edge segment [v_i,v_j] that has either v_i = v_next or v_j = v_next and add the other vertex (the one not equal to v_next) to the polygon loop. Reset v_next as this newly added vertex, mark the edge as visited and continue from 2.
Traversal is done when we get back to v_start.
The traversal will give a polygon loop that could have either clock-wise or counter-clock-wise ordering. A consistent ordering can be established by considering the signed area of the polygon. If the traversal results in the wrong orientation you simply need to reverse the order of the polygon loop vertices.
Well as the saying goes - get it working - then get it working better. I noticed on my above example it assumes all the edges in the edges array do in fact link up in a nice border. This may not be the case in the real world (as I have discovered from my input files i am using!) In fact some of my input files actually have many polygons and all need borders detected. I also wanted to make sure the winding order is correct. So I have fixed that up as well. see below. (Feel I am making headway at last!)
private static List<int> OrganizeEdges(List<int> edges, List<Point> positions)
{
var visited = new Dictionary<int, bool>();
var edgeList = new List<int>();
var resultList = new List<int>();
var nextIndex = -1;
while (resultList.Count < edges.Count)
{
if (nextIndex < 0)
{
for (int i = 0; i < edges.Count; i += 2)
{
if (!visited.ContainsKey(i))
{
nextIndex = edges[i];
break;
}
}
}
for (int i = 0; i < edges.Count; i += 2)
{
if (visited.ContainsKey(i))
continue;
int j = i + 1;
int k = -1;
if (edges[i] == nextIndex)
k = j;
else if (edges[j] == nextIndex)
k = i;
if (k >= 0)
{
var edge = edges[k];
visited[i] = true;
edgeList.Add(nextIndex);
edgeList.Add(edge);
nextIndex = edge;
i = 0;
}
}
// calculate winding order - then add to final result.
var borderPoints = new List<Point>();
edgeList.ForEach(ei => borderPoints.Add(positions[ei]));
var winding = CalculateWindingOrder(borderPoints);
if (winding > 0)
edgeList.Reverse();
resultList.AddRange(edgeList);
edgeList = new List<int>();
nextIndex = -1;
}
return resultList;
}
/// <summary>
/// returns 1 for CW, -1 for CCW, 0 for unknown.
/// </summary>
public static int CalculateWindingOrder(IList<Point> points)
{
// the sign of the 'area' of the polygon is all we are interested in.
var area = CalculateSignedArea(points);
if (area < 0.0)
return 1;
else if (area > 0.0)
return - 1;
return 0; // error condition - not even verts to calculate, non-simple poly, etc.
}
public static double CalculateSignedArea(IList<Point> points)
{
double area = 0.0;
for (int i = 0; i < points.Count; i++)
{
int j = (i + 1) % points.Count;
area += points[i].X * points[j].Y;
area -= points[i].Y * points[j].X;
}
area /= 2.0f;
return area;
}
Traversal Code (not efficient - needs to be tidied up, will get to that at some point) Please Note: I store each segment in the chain as 2 indices - rather than 1 as suggested by Darren. This is purely for my own implementation / rendering needs.
// okay now lets sort the segments so that they make a chain.
var sorted = new List<int>();
var visited = new Dictionary<int, bool>();
var startIndex = edges[0];
var nextIndex = edges[1];
sorted.Add(startIndex);
sorted.Add(nextIndex);
visited[0] = true;
visited[1] = true;
while (nextIndex != startIndex)
{
for (int i = 0; i < edges.Count - 1; i += 2)
{
var j = i + 1;
if (visited.ContainsKey(i) || visited.ContainsKey(j))
continue;
var iIndex = edges[i];
var jIndex = edges[j];
if (iIndex == nextIndex)
{
sorted.Add(nextIndex);
sorted.Add(jIndex);
nextIndex = jIndex;
visited[j] = true;
break;
}
else if (jIndex == nextIndex)
{
sorted.Add(nextIndex);
sorted.Add(iIndex);
nextIndex = iIndex;
visited[i] = true;
break;
}
}
}
return sorted;
The answer to your question depends actually on how triangular mesh is represented in memory. If you use Half-edge data structure, then the algorithm is extremely simple, since everything was already done during Half-edge data structure construction.
Start from any boundary half-edge HE_edge* edge0 (it can be found by linear search over all half-edges as the first edge without valid face). Set the current half-edge HE_edge* edge = edge0.
Output the destination edge->vert of the current edge.
The next edge in clockwise order around the shape (and counter-clockwise order around the surrounding "hole") will be edge->next.
Stop when you reach edge0.
To efficiently enumerate the boundary edges in the opposite (counter-clockwise order) the data structure needs to have prev data field, which many existing implementations of Half-edge data structure do provide in addition to next, e.g. MeshLib
Does anybody know how to find the local maxima in a grayscale IPL_DEPTH_8U image using OpenCV? HarrisCorner mentions something like that but I'm actually not interested in corners ...
Thanks!
A pixel is considered a local maximum if it is equal to the maximum value in a 'local' neighborhood. The function below captures this property in two lines of code.
To deal with pixels on 'plateaus' (value equal to their neighborhood) one can use the local minimum property, since plateaus pixels are equal to their local minimum. The rest of the code filters out those pixels.
void non_maxima_suppression(const cv::Mat& image, cv::Mat& mask, bool remove_plateaus) {
// find pixels that are equal to the local neighborhood not maximum (including 'plateaus')
cv::dilate(image, mask, cv::Mat());
cv::compare(image, mask, mask, cv::CMP_GE);
// optionally filter out pixels that are equal to the local minimum ('plateaus')
if (remove_plateaus) {
cv::Mat non_plateau_mask;
cv::erode(image, non_plateau_mask, cv::Mat());
cv::compare(image, non_plateau_mask, non_plateau_mask, cv::CMP_GT);
cv::bitwise_and(mask, non_plateau_mask, mask);
}
}
Here's a simple trick. The idea is to dilate with a kernel that contains a hole in the center. After the dilate operation, each pixel is replaced with the maximum of it's neighbors (using a 5 by 5 neighborhood in this example), excluding the original pixel.
Mat1b kernelLM(Size(5, 5), 1u);
kernelLM.at<uchar>(2, 2) = 0u;
Mat imageLM;
dilate(image, imageLM, kernelLM);
Mat1b localMaxima = (image > imageLM);
Actually after I posted the code above I wrote a better and very very faster one ..
The code above suffers even for a 640x480 picture..
I optimized it and now it is very very fast even for 1600x1200 pic.
Here is the code :
void localMaxima(cv::Mat src,cv::Mat &dst,int squareSize)
{
if (squareSize==0)
{
dst = src.clone();
return;
}
Mat m0;
dst = src.clone();
Point maxLoc(0,0);
//1.Be sure to have at least 3x3 for at least looking at 1 pixel close neighbours
// Also the window must be <odd>x<odd>
SANITYCHECK(squareSize,3,1);
int sqrCenter = (squareSize-1)/2;
//2.Create the localWindow mask to get things done faster
// When we find a local maxima we will multiply the subwindow with this MASK
// So that we will not search for those 0 values again and again
Mat localWindowMask = Mat::zeros(Size(squareSize,squareSize),CV_8U);//boolean
localWindowMask.at<unsigned char>(sqrCenter,sqrCenter)=1;
//3.Find the threshold value to threshold the image
//this function here returns the peak of histogram of picture
//the picture is a thresholded picture it will have a lot of zero values in it
//so that the second boolean variable says :
// (boolean) ? "return peak even if it is at 0" : "return peak discarding 0"
int thrshld = maxUsedValInHistogramData(dst,false);
threshold(dst,m0,thrshld,1,THRESH_BINARY);
//4.Now delete all thresholded values from picture
dst = dst.mul(m0);
//put the src in the middle of the big array
for (int row=sqrCenter;row<dst.size().height-sqrCenter;row++)
for (int col=sqrCenter;col<dst.size().width-sqrCenter;col++)
{
//1.if the value is zero it can not be a local maxima
if (dst.at<unsigned char>(row,col)==0)
continue;
//2.the value at (row,col) is not 0 so it can be a local maxima point
m0 = dst.colRange(col-sqrCenter,col+sqrCenter+1).rowRange(row-sqrCenter,row+sqrCenter+1);
minMaxLoc(m0,NULL,NULL,NULL,&maxLoc);
//if the maximum location of this subWindow is at center
//it means we found the local maxima
//so we should delete the surrounding values which lies in the subWindow area
//hence we will not try to find if a point is at localMaxima when already found a neighbour was
if ((maxLoc.x==sqrCenter)&&(maxLoc.y==sqrCenter))
{
m0 = m0.mul(localWindowMask);
//we can skip the values that we already made 0 by the above function
col+=sqrCenter;
}
}
}
The following listing is a function similar to Matlab's "imregionalmax". It looks for at most nLocMax local maxima above threshold, where the found local maxima are at least minDistBtwLocMax pixels apart. It returns the actual number of local maxima found. Notice that it uses OpenCV's minMaxLoc to find global maxima. It is "opencv-self-contained" except for the (easy to implement) function vdist, which computes the (euclidian) distance between points (r,c) and (row,col).
input is one-channel CV_32F matrix, and locations is nLocMax (rows) by 2 (columns) CV_32S matrix.
int imregionalmax(Mat input, int nLocMax, float threshold, float minDistBtwLocMax, Mat locations)
{
Mat scratch = input.clone();
int nFoundLocMax = 0;
for (int i = 0; i < nLocMax; i++) {
Point location;
double maxVal;
minMaxLoc(scratch, NULL, &maxVal, NULL, &location);
if (maxVal > threshold) {
nFoundLocMax += 1;
int row = location.y;
int col = location.x;
locations.at<int>(i,0) = row;
locations.at<int>(i,1) = col;
int r0 = (row-minDistBtwLocMax > -1 ? row-minDistBtwLocMax : 0);
int r1 = (row+minDistBtwLocMax < scratch.rows ? row+minDistBtwLocMax : scratch.rows-1);
int c0 = (col-minDistBtwLocMax > -1 ? col-minDistBtwLocMax : 0);
int c1 = (col+minDistBtwLocMax < scratch.cols ? col+minDistBtwLocMax : scratch.cols-1);
for (int r = r0; r <= r1; r++) {
for (int c = c0; c <= c1; c++) {
if (vdist(Point2DMake(r, c),Point2DMake(row, col)) <= minDistBtwLocMax) {
scratch.at<float>(r,c) = 0.0;
}
}
}
} else {
break;
}
}
return nFoundLocMax;
}
The first question to answer would be what is "local" in your opinion. The answer may well be a square window (say 3x3 or 5x5) or circular window of a certain radius. You can then scan over the entire image with the window centered at each pixel and pick the highest value in the window.
See this for how to access pixel values in OpenCV.
This is very fast method. It stored founded maxima in a vector of
Points.
vector <Point> GetLocalMaxima(const cv::Mat Src,int MatchingSize, int Threshold, int GaussKernel )
{
vector <Point> vMaxLoc(0);
if ((MatchingSize % 2 == 0) || (GaussKernel % 2 == 0)) // MatchingSize and GaussKernel have to be "odd" and > 0
{
return vMaxLoc;
}
vMaxLoc.reserve(100); // Reserve place for fast access
Mat ProcessImg = Src.clone();
int W = Src.cols;
int H = Src.rows;
int SearchWidth = W - MatchingSize;
int SearchHeight = H - MatchingSize;
int MatchingSquareCenter = MatchingSize/2;
if(GaussKernel > 1) // If You need a smoothing
{
GaussianBlur(ProcessImg,ProcessImg,Size(GaussKernel,GaussKernel),0,0,4);
}
uchar* pProcess = (uchar *) ProcessImg.data; // The pointer to image Data
int Shift = MatchingSquareCenter * ( W + 1);
int k = 0;
for(int y=0; y < SearchHeight; ++y)
{
int m = k + Shift;
for(int x=0;x < SearchWidth ; ++x)
{
if (pProcess[m++] >= Threshold)
{
Point LocMax;
Mat mROI(ProcessImg, Rect(x,y,MatchingSize,MatchingSize));
minMaxLoc(mROI,NULL,NULL,NULL,&LocMax);
if (LocMax.x == MatchingSquareCenter && LocMax.y == MatchingSquareCenter)
{
vMaxLoc.push_back(Point( x+LocMax.x,y + LocMax.y ));
// imshow("W1",mROI);cvWaitKey(0); //For gebug
}
}
}
k += W;
}
return vMaxLoc;
}
Found a simple solution.
In this example, if you are trying to find 2 results of a matchTemplate function with a minimum distance from each other.
cv::Mat result;
matchTemplate(search, target, result, CV_TM_SQDIFF_NORMED);
float score1;
cv::Point displacement1 = MinMax(result, score1);
cv::circle(result, cv::Point(displacement1.x+result.cols/2 , displacement1.y+result.rows/2), 10, cv::Scalar(0), CV_FILLED, 8, 0);
float score2;
cv::Point displacement2 = MinMax(result, score2);
where
cv::Point MinMax(cv::Mat &result, float &score)
{
double minVal, maxVal;
cv::Point minLoc, maxLoc, matchLoc;
minMaxLoc(result, &minVal, &maxVal, &minLoc, &maxLoc, cv::Mat());
matchLoc.x = minLoc.x - result.cols/2;
matchLoc.y = minLoc.y - result.rows/2;
return minVal;
}
The process is:
Find global Minimum using minMaxLoc
Draw a filled white circle around global minimum using min distance between minima as radius
Find another minimum
The the scores can be compared to each other to determine, for example, the certainty of the match,
To find more than just the global minimum and maximum try using this function from skimage:
http://scikit-image.org/docs/dev/api/skimage.feature.html#skimage.feature.peak_local_max
You can parameterize the minimum distance between peaks, too. And more. To find minima, use negated values (take care of the array type though, 255-image could do the trick).
You can go over each pixel and test if it is a local maxima. Here is how I would do it.
The input is assumed to be type CV_32FC1
#include <vector>//std::vector
#include <algorithm>//std::sort
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/core/core.hpp"
//structure for maximal values including position
struct SRegionalMaxPoint
{
SRegionalMaxPoint():
values(-FLT_MAX),
row(-1),
col(-1)
{}
float values;
int row;
int col;
//ascending order
bool operator()(const SRegionalMaxPoint& a, const SRegionalMaxPoint& b)
{
return a.values < b.values;
}
};
//checks if pixel is local max
bool isRegionalMax(const float* im_ptr, const int& cols )
{
float center = *im_ptr;
bool is_regional_max = true;
im_ptr -= (cols + 1);
for (int ii = 0; ii < 3; ++ii, im_ptr+= (cols-3))
{
for (int jj = 0; jj < 3; ++jj, im_ptr++)
{
if (ii != 1 || jj != 1)
{
is_regional_max &= (center > *im_ptr);
}
}
}
return is_regional_max;
}
void imregionalmax(
const cv::Mat& input,
std::vector<SRegionalMaxPoint>& buffer)
{
//find local max - top maxima
static const int margin = 1;
const int rows = input.rows;
const int cols = input.cols;
for (int i = margin; i < rows - margin; ++i)
{
const float* im_ptr = input.ptr<float>(i, margin);
for (int j = margin; j < cols - margin; ++j, im_ptr++)
{
//Check if pixel is local maximum
if ( isRegionalMax(im_ptr, cols ) )
{
cv::Rect roi = cv::Rect(j - margin, i - margin, 3, 3);
cv::Mat subMat = input(roi);
float val = *im_ptr;
//replace smallest value in buffer
if ( val > buffer[0].values )
{
buffer[0].values = val;
buffer[0].row = i;
buffer[0].col = j;
std::sort(buffer.begin(), buffer.end(), SRegionalMaxPoint());
}
}
}
}
}
For testing the code you can try this:
cv::Mat temp = cv::Mat::zeros(15, 15, CV_32FC1);
temp.at<float>(7, 7) = 1;
temp.at<float>(3, 5) = 6;
temp.at<float>(8, 10) = 4;
temp.at<float>(11, 13) = 7;
temp.at<float>(10, 3) = 8;
temp.at<float>(7, 13) = 3;
vector<SRegionalMaxPoint> buffer_(5);
imregionalmax(temp, buffer_);
cv::Mat debug;
cv::cvtColor(temp, debug, cv::COLOR_GRAY2BGR);
for (auto it = buffer_.begin(); it != buffer_.end(); ++it)
{
circle(debug, cv::Point(it->col, it->row), 1, cv::Scalar(0, 255, 0));
}
This solution does not take plateaus into account so it is not exactly the same as matlab's imregionalmax()
I think you want to use the
MinMaxLoc(arr, mask=NULL)-> (minVal, maxVal, minLoc, maxLoc)
Finds global minimum and maximum in array or subarray
function on you image
I would like to determine a polygon and implement an algorithm which would check if a point is inside or outside the polygon.
Does anyone know if there is any example available of any similar algorithm?
If i remember correctly, the algorithm is to draw a horizontal line through your test point. Count how many lines of of the polygon you intersect to reach your point.
If the answer is odd, you're inside. If the answer is even, you're outside.
Edit: Yeah, what he said (Wikipedia):
C# code
bool IsPointInPolygon(List<Loc> poly, Loc point)
{
int i, j;
bool c = false;
for (i = 0, j = poly.Count - 1; i < poly.Count; j = i++)
{
if ((((poly[i].Lt <= point.Lt) && (point.Lt < poly[j].Lt))
|| ((poly[j].Lt <= point.Lt) && (point.Lt < poly[i].Lt)))
&& (point.Lg < (poly[j].Lg - poly[i].Lg) * (point.Lt - poly[i].Lt)
/ (poly[j].Lt - poly[i].Lt) + poly[i].Lg))
{
c = !c;
}
}
return c;
}
Location class
public class Loc
{
private double lt;
private double lg;
public double Lg
{
get { return lg; }
set { lg = value; }
}
public double Lt
{
get { return lt; }
set { lt = value; }
}
public Loc(double lt, double lg)
{
this.lt = lt;
this.lg = lg;
}
}
After searching the web and trying various implementations and porting them from C++ to C# I finally got my code straight:
public static bool PointInPolygon(LatLong p, List<LatLong> poly)
{
int n = poly.Count();
poly.Add(new LatLong { Lat = poly[0].Lat, Lon = poly[0].Lon });
LatLong[] v = poly.ToArray();
int wn = 0; // the winding number counter
// loop through all edges of the polygon
for (int i = 0; i < n; i++)
{ // edge from V[i] to V[i+1]
if (v[i].Lat <= p.Lat)
{ // start y <= P.y
if (v[i + 1].Lat > p.Lat) // an upward crossing
if (isLeft(v[i], v[i + 1], p) > 0) // P left of edge
++wn; // have a valid up intersect
}
else
{ // start y > P.y (no test needed)
if (v[i + 1].Lat <= p.Lat) // a downward crossing
if (isLeft(v[i], v[i + 1], p) < 0) // P right of edge
--wn; // have a valid down intersect
}
}
if (wn != 0)
return true;
else
return false;
}
private static int isLeft(LatLong P0, LatLong P1, LatLong P2)
{
double calc = ((P1.Lon - P0.Lon) * (P2.Lat - P0.Lat)
- (P2.Lon - P0.Lon) * (P1.Lat - P0.Lat));
if (calc > 0)
return 1;
else if (calc < 0)
return -1;
else
return 0;
}
The isLeft function was giving me rounding problems and I spent hours without realizing that I was doing the conversion wrong, so forgive me for the lame if block at the end of that function.
BTW, this is the original code and article:
http://softsurfer.com/Archive/algorithm_0103/algorithm_0103.htm
By far the best explanation and implementation can be found at
Point In Polygon Winding Number Inclusion
There is even a C++ implementation at the end of the well explained article. This site also contains some great algorithms/solutions for other geometry based problems.
I have modified and used the C++ implementation and also created a C# implementation. You definitely want to use the Winding Number algorithm as it is more accurate than the edge crossing algorithm and it is very fast.
I think there is a simpler and more efficient solution.
Here is the code in C++. I should be simple to convert it to C#.
int pnpoly(int npol, float *xp, float *yp, float x, float y)
{
int i, j, c = 0;
for (i = 0, j = npol-1; i < npol; j = i++) {
if ((((yp[i] <= y) && (y < yp[j])) ||
((yp[j] <= y) && (y < yp[i]))) &&
(x < (xp[j] - xp[i]) * (y - yp[i]) / (yp[j] - yp[i]) + xp[i]))
c = !c;
}
return c;
}
The complete solution in asp.Net C#, you can see the complete detail here, you can see how to find point(lat,lon) whether its inside or Outside the Polygon using the latitude and longitudes ?
Article Reference Link
private static bool checkPointExistsInGeofencePolygon(string latlnglist, string lat, string lng)
{
List<Loc> objList = new List<Loc>();
// sample string should be like this strlatlng = "39.11495,-76.873259|39.114588,-76.872808|39.112921,-76.870373|";
string[] arr = latlnglist.Split('|');
for (int i = 0; i <= arr.Length - 1; i++)
{
string latlng = arr[i];
string[] arrlatlng = latlng.Split(',');
Loc er = new Loc(Convert.ToDouble(arrlatlng[0]), Convert.ToDouble(arrlatlng[1]));
objList.Add(er);
}
Loc pt = new Loc(Convert.ToDouble(lat), Convert.ToDouble(lng));
if (IsPointInPolygon(objList, pt) == true)
{
return true;
}
else
{
return false;
}
}
private static bool IsPointInPolygon(List<Loc> poly, Loc point)
{
int i, j;
bool c = false;
for (i = 0, j = poly.Count - 1; i < poly.Count; j = i++)
{
if ((((poly[i].Lt <= point.Lt) && (point.Lt < poly[j].Lt)) |
((poly[j].Lt <= point.Lt) && (point.Lt < poly[i].Lt))) &&
(point.Lg < (poly[j].Lg - poly[i].Lg) * (point.Lt - poly[i].Lt) / (poly[j].Lt - poly[i].Lt) + poly[i].Lg))
c = !c;
}
return c;
}
Just a heads up (using answer as I can't comment), if you want to use point-in-polygon for geo fencing, then you need to change your algorithm to work with spherical coordinates. -180 longitude is the same as 180 longitude and point-in-polygon will break in such situation.
Relating to kobers answer I worked it out with more readable clean code and changed the longitudes that crosses the date border:
public bool IsPointInPolygon(List<PointPosition> polygon, double latitude, double longitude)
{
bool isInIntersection = false;
int actualPointIndex = 0;
int pointIndexBeforeActual = polygon.Count - 1;
var offset = calculateLonOffsetFromDateLine(polygon);
longitude = longitude < 0.0 ? longitude + offset : longitude;
foreach (var actualPointPosition in polygon)
{
var p1Lat = actualPointPosition.Latitude;
var p1Lon = actualPointPosition.Longitude;
var p0Lat = polygon[pointIndexBeforeActual].Latitude;
var p0Lon = polygon[pointIndexBeforeActual].Longitude;
if (p1Lon < 0.0) p1Lon += offset;
if (p0Lon < 0.0) p0Lon += offset;
// Jordan curve theorem - odd even rule algorithm
if (isPointLatitudeBetweenPolyLine(p0Lat, p1Lat, latitude)
&& isPointRightFromPolyLine(p0Lat, p0Lon, p1Lat, p1Lon, latitude, longitude))
{
isInIntersection = !isInIntersection;
}
pointIndexBeforeActual = actualPointIndex;
actualPointIndex++;
}
return isInIntersection;
}
private double calculateLonOffsetFromDateLine(List<PointPosition> polygon)
{
double offset = 0.0;
var maxLonPoly = polygon.Max(x => x.Longitude);
var minLonPoly = polygon.Min(x => x.Longitude);
if (Math.Abs(minLonPoly - maxLonPoly) > 180)
{
offset = 360.0;
}
return offset;
}
private bool isPointLatitudeBetweenPolyLine(double polyLinePoint1Lat, double polyLinePoint2Lat, double poiLat)
{
return polyLinePoint2Lat <= poiLat && poiLat < polyLinePoint1Lat || polyLinePoint1Lat <= poiLat && poiLat < polyLinePoint2Lat;
}
private bool isPointRightFromPolyLine(double polyLinePoint1Lat, double polyLinePoint1Lon, double polyLinePoint2Lat, double polyLinePoint2Lon, double poiLat, double poiLon)
{
// lon <(lon1-lon2)*(latp-lat2)/(lat1-lat2)+lon2
return poiLon < (polyLinePoint1Lon - polyLinePoint2Lon) * (poiLat - polyLinePoint2Lat) / (polyLinePoint1Lat - polyLinePoint2Lat) + polyLinePoint2Lon;
}
I add one detail to help people who live in the... south of earth!!
If you're in Brazil (that's my case), our GPS coord are all negatives.
And all these algo give wrong results.
The easiest way if to use the absolute values of the Lat and Long of all point. And in that case Jan Kobersky's algo is perfect.
Check if a point is inside a polygon or not -
Consider the polygon which has vertices a1,a2,a3,a4,a5. The following set of steps should help in ascertaining whether point P lies inside the polygon or outside.
Compute the vector area of the triangle formed by edge a1->a2 and the vectors connecting a2 to P and P to a1. Similarly, compute the vector area of the each of the possible triangles having one side as the side of the polygon and the other two connecting P to that side.
For a point to be inside a polygon, each of the triangles need to have positive area. Even if one of the triangles have a negative area then the point P stands out of the polygon.
In order to compute the area of a triangle given vectors representing its 3 edges, refer to http://www.jtaylor1142001.net/calcjat/Solutions/VCrossProduct/VCPATriangle.htm
The problem is easier if your polygon is convex. If so, you can do a simple test for each line to see if the point is on the inside or outside of that line (extending to infinity in both directions). Otherwise, for concave polygons, draw an imaginary ray from your point out to infinity (in any direction). Count how many times it crosses a boundary line. Odd means the point is inside, even means the point is outside.
This last algorithm is trickier than it looks. You will have to be very careful about what happens when your imaginary ray exactly hits one of the polygon's vertices.
If your imaginary ray goes in the -x direction, you can choose only to count lines that include at least one point whose y coordinate is strictly less than the y coordinate of your point. This is how you get most of the weird edge cases to work correctly.
If you have a simple polygon (none of the lines cross) and you don't have holes you can also triangulate the polygon, which you are probably going to do anyway in a GIS app to draw a TIN, then test for points in each triangle. If you have a small number of edges to the polygon but a large number of points this is fast.
For an interesting point in triangle see link text
Otherwise definately use the winding rule rather than edge crossing, edge crossing has real problems with points on edges, which if your data is generated form a GPS with limited precision is very likely.
the polygon is defined as a sequential list of point pairs A, B, C .... A.
no side A-B, B-C ... crosses any other side
Determine box Xmin, Xmax, Ymin, Ymax
case 1 the test point P lies outside the box
case 2 the test point P lies inside the box:
Determine the 'diameter' D of the box {[Xmin,Ymin] - [Xmax, Ymax]} ( and add a little extra to avoid possible confusion with D being on a side)
Determine the gradients M of all sides
Find a gradient Mt most different from all gradients M
The test line runs from P at gradient Mt a distance D.
Set the count of intersections to zero
For each of the sides A-B, B-C test for the intersection of P-D with a side
from its start up to but NOT INCLUDING its end. Increment the count of intersections
if required. Note that a zero distance from P to the intersection indicates that P is ON a side
An odd count indicates P is inside the polygon
I translated c# method in Php and I added many comments to understand code.Description of PolygonHelps:
Check if a point is inside or outside of a polygon. This procedure uses gps coordinates and it works when polygon has a little geographic area.
INPUT:$poly: array of Point: polygon vertices list; [{Point}, {Point}, ...];$point: point to check; Point: {"lat" => "x.xxx", "lng" => "y.yyy"}
When $c is false, the number of intersections with polygon is even, so the point is outside of polygon;When $c is true, the number of intersections with polygon is odd, so the point is inside of polygon;$n is the number of vertices in polygon;For each vertex in polygon, method calculates line through current vertex and previous vertex and check if the two lines have an intersection point.$c changes when intersection point exists.
So, method can return true if point is inside the polygon, else return false.
class PolygonHelps {
public static function isPointInPolygon(&$poly, $point){
$c = false;
$n = $j = count($poly);
for ($i = 0, $j = $n - 1; $i < $n; $j = $i++){
if ( ( ( ( $poly[$i]->lat <= $point->lat ) && ( $point->lat < $poly[$j]->lat ) )
|| ( ( $poly[$j]->lat <= $point->lat ) && ( $point->lat < $poly[$i]->lat ) ) )
&& ( $point->lng < ( $poly[$j]->lng - $poly[$i]->lng )
* ( $point->lat - $poly[$i]->lat )
/ ( $poly[$j]->lat - $poly[$i]->lat )
+ $poly[$i]->lng ) ){
$c = !$c;
}
}
return $c;
}
}
Jan's answer is great.
Here is the same code using the GeoCoordinate class instead.
using System.Device.Location;
...
public static bool IsPointInPolygon(List<GeoCoordinate> poly, GeoCoordinate point)
{
int i, j;
bool c = false;
for (i = 0, j = poly.Count - 1; i < poly.Count; j = i++)
{
if ((((poly[i].Latitude <= point.Latitude) && (point.Latitude < poly[j].Latitude))
|| ((poly[j].Latitude <= point.Latitude) && (point.Latitude < poly[i].Latitude)))
&& (point.Longitude < (poly[j].Longitude - poly[i].Longitude) * (point.Latitude - poly[i].Latitude)
/ (poly[j].Latitude - poly[i].Latitude) + poly[i].Longitude))
c = !c;
}
return c;
}
You can try this simple class https://github.com/xopbatgh/sb-polygon-pointer
It is easy to deal with it
You just insert polygon coordinates into array
Ask library is desired point with lat/lng inside the polygon
$polygonBox = [
[55.761515, 37.600375],
[55.759428, 37.651156],
[55.737112, 37.649566],
[55.737649, 37.597301],
];
$sbPolygonEngine = new sbPolygonEngine($polygonBox);
$isCrosses = $sbPolygonEngine->isCrossesWith(55.746768, 37.625605);
// $isCrosses is boolean
(answer was returned from deleted by myself because initially it was formatted wrong)