I try to display geometry which is constructed by constructpath commands like moveto lineto beziercurveto in Three.js.
Therefore I create a THREE.ShapePath(); and execute the command toShapes(isClockwise).
After this I use THREE.ExtrudeBufferGeometry to create the 3D shape.
Unfortunately the shapes are sometimes really complex and are not created correctly which means they are distorted.
Using libtess as triangulation library solves some issues. But I have still distorted geometry.
Now I want to use jsclipper to simplify the shapes prior triangulation.
I modified three.js in such way:
in the method addShape in ExtrudeBufferGeometry I have added:
$.each(vertices, function(index, item) {
vertices[index]['X'] = vertices[index]['x'];
vertices[index]['Y'] = vertices[index]['y'];
delete vertices[index]['x'];
delete vertices[index]['y'];
});
if (holes[0]) {
for (i = 0; i < holes.length; i++ ) {
$.each(holes[i], function(index, item) {
holes[i][index]['X'] = holes[i][index]['x'];
holes[i][index]['Y'] = holes[i][index]['y'];
delete holes[i][index]['x'];
delete holes[i][index]['y'];
});
}
}
var scale = 100;
ClipperLib.JS.ScaleUpPaths([vertices], scale);
if (holes[0]) {
ClipperLib.JS.ScaleUpPaths(holes, scale);
}
vertices = ClipperLib.Clipper.SimplifyPolygons([vertices], ClipperLib.PolyFillType.pftNonZero);
// or ClipperLib.PolyFillType.pftEvenOdd
if (holes[0]) {
holes = ClipperLib.Clipper.SimplifyPolygons(holes, ClipperLib.PolyFillType.pftNonZero);
// or ClipperLib.PolyFillType.pftEvenOdd
}
// var cleandelta = 0.1; // 0.1 should be the appropriate delta in different cases
// vertices = ClipperLib.Clipper.CleanPolygons([vertices], cleandelta * scale);
// if (holes[0]) {
// holes = ClipperLib.Clipper.CleanPolygons(holes, cleandelta * scale);
// }
ClipperLib.JS.ScaleDownPaths(vertices, scale);
if (holes[0]) {
ClipperLib.JS.ScaleDownPaths(holes, scale);
}
for (i = 0; i < vertices.length; i++ ) {
$.each(vertices[i], function(index, item) {
vertices[i][index]['x'] = vertices[i][index]['X'];
vertices[i][index]['y'] = vertices[i][index]['Y'];
delete vertices[i][index]['X'];
delete vertices[i][index]['Y'];
});
}
if (holes[0]) {
for (i = 0; i < holes.length; i++ ) {
$.each(holes[i], function(index, item) {
holes[i][index]['x'] = holes[i][index]['X'];
holes[i][index]['y'] = holes[i][index]['Y'];
delete holes[i][index]['X'];
delete holes[i][index]['Y'];
});
}
}
Now I can see that the vertices are "reduced".
But var faces = ShapeUtils.triangulateShape( vertices, holes ); doesn't generate faces for some examples anymore.
Please can one help how to simplify the shapes correctly?
A bit hard to figure out what the problem is actually. Clipper (also when using SimplifyPolygons or SimplifyPolygon) can only produce weakly-simple polygons, which means that there can be pseudo-duplicate points: although sequential coordinates are quaranteed to be not indentical, some of the next points can share the same coordinate. Also a coordinate can be on the line between two points.
After simplifying (or any other boolean operation) you could make a cleaning step using Offsetting with a small negative value: https://sourceforge.net/p/jsclipper/wiki/documentation/#clipperlibclipperoffsetexecute.
This possibly removes all of the pseudo-duplicate points.
I have made also a float version of Clipper (http://jsclipper.sourceforge.net/6.4.2.2_fpoint/). It is extensively tested, but because Angus Johnson, the author of the original C# Clipper (of which JS-version is ported from), has thought that using floats causes robustness problems although according to my tests the are no such, the original C# float version does not exists. The float version is simpler to use and you can try there a small negative offset: eg. -0.001 or -0.01.
You could also give a try to PolyTree or ExPolygons (https://sourceforge.net/p/jsclipper/wiki/ExPolygons%20and%20PolyTree%206/). ExPolygons can be used to get holes and contours and PolyTree can be used to get the full parent-child-relationship of holes and contours.
The last resort is a broken-pen-nib -function. It detects all pseudo-duplicate points and make a broken-pen-nib -effect to them, so that the result is free of any duplicates. The attached images shows what this effect means using large nib-effect-value to make the effect meaning clearer. Three.js polygon triangulation fails in pseudo duplicate points. There are a discussion https://github.com/mrdoob/three.js/issues/3386 of this subject.
// Make polygons to simple by making "a broken pen tip" effect on each semi-adjacent (duplicate) vertex
// ORIGPOLY can be a contour
// or exPolygon structure
function BreakPenNibs(ORIGPOLY, dist, scale)
{
if (!dist || dist < 0) return;
var sqrt = Math.sqrt;
var allpoints = {}, point = {};
var key = "";
var currX = 0.0,
currY = 0.0;
var prevX = 0.0,
prevY = 0.0;
var nextX = 0.0,
nextY;
var x = 0.0,
y = 0.0,
length = 0.0,
i = 0,
duplcount = 0,
j = 0;
var prev_i = 0,
next_i = 0,
last_i;
var extra_vertices = new Array(100),
moved_vertices = new Array(100);
// Get first all duplicates
var duplicates = new Array(100),
indexi = "",
indexstr = "",
arraystr = "",
polys, outer, holes;
if (ORIGPOLY instanceof Array)
{
outer = ORIGPOLY;
}
else if (ORIGPOLY.outer instanceof Array)
{
outer = ORIGPOLY.outer;
}
else return;
if (ORIGPOLY.holes instanceof Array) holes = ORIGPOLY.holes;
else holes = [];
polys = [outer].concat(holes);
var polys_length = polys.length;
// Get first max lenght of arrays
var max_index_len = 0;
var arr_len;
i = polys_length;
while (i--)
{
arr_len = polys[i].length;
if (arr_len > max_index_len) max_index_len = arr_len;
}
max_index_len = max_index_len.toString().length;
var max_polys_length = polys_length.toString().length;
var poly;
j = polys_length;
var scaling = scale/10;
while (j--)
{
poly = polys[j];
ilen = poly.length;
i = ilen;
while (i--)
{
point = poly[i];
//key = Math.round(point.X) + ":" + Math.round(point.Y);
key = (Math.round(point.X / scaling) * scaling)
+ ":" + (Math.round(point.Y / scaling) * scaling);
indexi = allpoints[key];
if (typeof (indexi) != "undefined")
{
// first found duplicate
duplicates[duplcount] = indexi;
duplcount++;
arraystr = j.toString();
while (arraystr.length < max_polys_length) arraystr = "0" + arraystr;
indexstr = i.toString();
while (indexstr.length < max_index_len) indexstr = "0" + indexstr;
duplicates[duplcount] = arraystr + "." + indexstr;
duplcount++;
}
arraystr = j.toString();
while (arraystr.length < max_polys_length) arraystr = "0" + arraystr;
indexstr = i.toString();
while (indexstr.length < max_index_len) indexstr = "0" + indexstr;
allpoints[key] = arraystr + "." + indexstr;
}
}
if (!duplcount) return;
duplicates.length = duplcount;
duplicates.sort();
//console.log(JSON.stringify(duplicates));
var splitted, poly_index = 0,
nth_dupl = 0;
var prev_poly_index = -1;
poly_index = 0;
for (j = 0; j < duplcount; j++)
{
splitted = duplicates[j].split(".");
poly_index = parseInt(splitted[0], 10);
if (poly_index != prev_poly_index) nth_dupl = 0;
else nth_dupl++;
i = parseInt(splitted[1], 10);
poly = polys[poly_index];
len = poly.length;
if (poly[0].X === poly[len - 1].X &&
poly[0].Y === poly[len - 1].Y)
{
last_i = len - 2;
}
else
{
last_i = len - 1;
}
point = poly[i];
// Calculate "broken pen tip" effect
// for current point by finding
// a coordinate at a distance dist
// along the edge between current and
// previous point
// This is inlined to maximize speed
currX = point.X;
currY = point.Y;
if (i === 0) prev_i = last_i; // last element in array
else prev_i = i - 1;
prevX = poly[prev_i].X;
prevY = poly[prev_i].Y;
x=0;y=0;
if (!point.Collinear)
{
length = sqrt((-currX + prevX) * (-currX + prevX) + (currY - prevY) * (currY - prevY));
//console.log(length);
x = currX - (dist * (currX - prevX)) / length;
y = currY - (dist * (currY - prevY)) / length;
}
// save the found (calculated) point
moved_vertices[j] = {
X: x,
Y: y,
Collinear:point.Collinear,
index: i,
poly_index: poly_index
};
// "broken nib effect" for next point also
if (i == len - 1) next_i = 0;
else next_i = i + 1;
nextX = poly[next_i].X;
nextY = poly[next_i].Y;
x=0;y=0;
if (!point.Collinear)
{
length = sqrt((-currX + nextX) * (-currX + nextX) + (currY - nextY) * (currY - nextY));
x = currX - (dist * (currX - nextX)) / length;
y = currY - (dist * (currY - nextY)) / length;
}
// save the found (calculated) point
extra_vertices[j] = {
X: x,
Y: y,
Collinear:point.Collinear,
index: i + nth_dupl,
poly_index: poly_index
};
prev_poly_index = poly_index;
}
moved_vertices.length = extra_vertices.length = duplcount;
//console.log("MOVED:" + JSON.stringify(moved_vertices));
//console.log("EXTRA:" + JSON.stringify(extra_vertices));
// Update moved coordinates
i = duplcount;
var point2;
while (i--)
{
point = moved_vertices[i];
x = point.X;
y = point.Y;
// Faster than isNaN: http://jsperf.com/isnan-alternatives
if (x != x || x == Infinity || x == -Infinity) continue;
if (y != y || y == Infinity || y == -Infinity) continue;
point2 = polys[point.poly_index][point.index];
point2.X = point.X;
point2.Y = point.Y;
point2.Collinear = point.Collinear;
}
// Add an extra vertex
// This is needed to remain the angle of the next edge
for (i = 0; i < duplcount; i++)
{
point = extra_vertices[i];
x = point.X;
y = point.Y;
// Faster than isNaN: http://jsperf.com/isnan-alternatives
if (x != x || x == Infinity || x == -Infinity) continue;
if (y != y || y == Infinity || y == -Infinity) continue;
polys[point.poly_index].splice(point.index + 1, 0,
{
X: point.X,
Y: point.Y,
Collinear: point.Collinear
});
}
// Remove collinear points
// and for some reason coming
// sequential duplicates
// TODO: check why seq. duplicates becomes
j = polys.length;
var prev_point = null;
while (j--)
{
poly = polys[j];
ilen = poly.length;
i = ilen;
while (i--)
{
point = poly[i];
if(prev_point!=null && point.X == prev_point.X && point.Y == prev_point.Y) poly.splice(i, 1);
else
if(point.Collinear) poly.splice(i, 1);
prev_point = point;
}
}
//console.log(JSON.stringify(polys));
// because original array is modified, no need to return anything
}
var BreakPenNipsOfExPolygons = function (exPolygons, dist, scale)
{
var i = 0,
j = 0,
ilen = exPolygons.length,
jlen = 0;
for (; i < ilen; i++)
{
//if(i!=4) continue;
BreakPenNibs(exPolygons[i], dist, scale);
}
};
Related
Given a matrix of 0 and 1 (0 is free space, 1 is wall). Find the shortest path from one cell to another, passing only through 0 and also without touching 1.
enter image description here
How can I do this using Lee's Algorithm?
class Solution {
public:
int shortestPathBinaryMatrix(vector<vector<int>>& grid) {
// edge case: start or end not accessible
if (grid[0][0] || grid.back().back()) return -1;
// support variables
int res = 2, len = 1, maxX = grid[0].size() - 1, maxY = grid.size() - 1;
queue<pair<int, int>> q;
// edge case: single cell matrix
if (!maxX && !maxY) return 1 - (grid[0][0] << 1);
// adding the starting point
q.push({0, 0});
// marking start as visited
grid[0][0] = -1;
while (len) {
while (len--) {
// reading and popping the coordinates on the front of the queue
auto [cx, cy] = q.front();
q.pop();
for (int x = max(0, cx - 1), lmtX = min(cx + 1, maxX); x <= lmtX; x++) {
for (int y = max(0, cy - 1), lmtY = min(cy + 1, maxY); y <= lmtY; y++) {
// check if we reached the target
if (x == maxX && y == maxY) return res;
// marking it as visited and adding it to the q if it was still a valid cell
if (!grid[y][x]) {
grid[y][x] = -1;
q.push({x, y});
}
}
}
}
// preparing for the next loop
res++;
len = q.size();
}
return -1;
}
};
I am currently developing a random maze generator that stores the maze in a 2-dimensional array called grid. This will then be used later on to generate a real 3D maze that the user can then walk through.
After doing some research, I attempted to create this maze generator using the recursive division algorithm, however due to the nature of the format of the maze, this isn't really working for me.
From what I understand, the recursive division method does not treat walls as cells.
For instance, my grid would look like this:
a b c d e f g h
1 - - - - - - - -
2 | | | | |
3 | | |
4 | - - | - |
5 | | | |
6 | - | - |
7 x |
8 - - - - - - - -
The point that I'm trying to get across here is that the grid I am trying to create will be represented something like this:
w w w w w w w w
w w w w w
w w w
w w w w w w
w w w w
w w w w w
g w
w w w w w w w w
Where 'w' is a wall and 'g' is the entrance/exit. So walls are placed into the grid, e.g. grid[1][2] == 'w'
The problem with the recursive division algorithm is that walls are not treated as members of the cell. All of the 'cells' would essentially contain whitespace and the walls would be placed around them.
So when I tried to implement this algorithm in my situation, I ended up with a result like this: (the black squares are walls, the white squares are empty, and the red square is the entrance)
My JSFiddle is located here.
Essentially the user will start at the red square and have to go through the maze and find keys that will open the door (which is the red square) to escape, so all of the whitespace in the maze would have to be accessible.
Does anyone have any ideas on how I can rewrite this algorithm to make sure that there is always a path from the red square to any other space in the maze? Ideally, the path would never be more than one square wide.
Code:
var grid;
function generate(dimensions, numDoors) {
//numDoors is unused right now
grid = new Array();
for (var i = 0; i < dimensions; i++) {
grid[i] = new Array();
for (var j = 0; j < dimensions; j++) {
grid[i][j] = "";
}
}
addOuterWalls();
var ent = addEntrance();
addInnerWalls(true, 1, grid.length - 2, 1, grid.length - 2, ent);
}
function addOuterWalls() {
for (var i = 0; i < grid.length; i++) {
if (i == 0 || i == (grid.length - 1)) {
for (var j = 0; j < grid.length; j++) {
grid[i][j] = "w";
}
} else {
grid[i][0] = "w";
grid[i][grid.length - 1] = "w";
}
}
}
function addEntrance() {
var x = randomNumber(1, grid.length - 1);
grid[grid.length - 1][x] = "g";
return x;
}
function addInnerWalls(h, minX, maxX, minY, maxY, gate) {
if (h) {
if (maxX - minX < 2) {
return;
}
var y = randomNumber(minY, maxY);
addHWall(minX, maxX, y);
addInnerWalls(!h, minX, maxX, minY, y-1, gate);
addInnerWalls(!h, minX, maxX, y + 1, maxY, gate);
} else {
if (maxY - minY < 2) {
return;
}
var x = randomNumber(minX, maxX);
addVWall(minY, maxY, x);
addInnerWalls(!h, minX, x-1, minY, maxY, gate);
addInnerWalls(!h, x + 1, maxX, minY, maxY, gate);
}
}
function addHWall(minX, maxX, y) {
var hole = randomNumber(minX, maxX);
for (var i = minX; i <= maxX; i++) {
if (i == hole) grid[y][i] = "";
else grid[y][i] = "w";
}
}
function addVWall(minY, maxY, x) {
var hole = randomNumber(minY, maxY);
for (var i = minY; i <= maxY; i++) {
if (i == hole) grid[i][x] = "";
else grid[i][x] = "w";
}
}
function randomNumber(min, max) {
return Math.floor(Math.random() * (max - min + 1) + min);
}
function display() {
document.getElementById("cnt").innerHTML = "";
for (var i = 0; i < grid.length; i++) {
var output = "<div>";
for (var j = 0; j < grid.length; j++) {
output += "<b " + grid[i][j] + "></b>";
}
output += "</div>";
document.getElementById("cnt").innerHTML += output;
}
}
generate(30, 1, 1);
display();
Put walls only in even cells, and doors in odd cells, and make "dimensions" odd.
http://jsfiddle.net/tPm3s/1/
Code:
var grid;
function generate(dimensions, numDoors) {
grid = new Array();
for (var i = 0; i < dimensions; i++) {
grid[i] = new Array();
for (var j = 0; j < dimensions; j++) {
grid[i][j] = "";
}
}
addOuterWalls();
var ent = addEntrance();
addInnerWalls(true, 1, grid.length - 2, 1, grid.length - 2, ent);
}
function addOuterWalls() {
for (var i = 0; i < grid.length; i++) {
if (i == 0 || i == (grid.length - 1)) {
for (var j = 0; j < grid.length; j++) {
grid[i][j] = "w";
}
} else {
grid[i][0] = "w";
grid[i][grid.length - 1] = "w";
}
}
}
function addEntrance() {
var x = randomNumber(1, grid.length - 1);
grid[grid.length - 1][x] = "g";
return x;
}
function addInnerWalls(h, minX, maxX, minY, maxY, gate) {
if (h) {
if (maxX - minX < 2) {
return;
}
var y = Math.floor(randomNumber(minY, maxY)/2)*2;
addHWall(minX, maxX, y);
addInnerWalls(!h, minX, maxX, minY, y-1, gate);
addInnerWalls(!h, minX, maxX, y + 1, maxY, gate);
} else {
if (maxY - minY < 2) {
return;
}
var x = Math.floor(randomNumber(minX, maxX)/2)*2;
addVWall(minY, maxY, x);
addInnerWalls(!h, minX, x-1, minY, maxY, gate);
addInnerWalls(!h, x + 1, maxX, minY, maxY, gate);
}
}
function addHWall(minX, maxX, y) {
var hole = Math.floor(randomNumber(minX, maxX)/2)*2+1;
for (var i = minX; i <= maxX; i++) {
if (i == hole) grid[y][i] = "";
else grid[y][i] = "w";
}
}
function addVWall(minY, maxY, x) {
var hole = Math.floor(randomNumber(minY, maxY)/2)*2+1;
for (var i = minY; i <= maxY; i++) {
if (i == hole) grid[i][x] = "";
else grid[i][x] = "w";
}
}
function randomNumber(min, max) {
return Math.floor(Math.random() * (max - min + 1) + min);
}
function display() {
document.getElementById("cnt").innerHTML = "";
for (var i = 0; i < grid.length; i++) {
var output = "<div>";
for (var j = 0; j < grid.length; j++) {
output += "<b " + grid[i][j] + "></b>";
}
output += "</div>";
document.getElementById("cnt").innerHTML += output;
}
}
generate(31, 1, 1);
display();
It's easy to blend() two images using p5.js, which is great, but I would like to be able to equalize the histogram of the resulting blended image. Something like -equalize in ImageMagick. Does anyone know how to do this in p5.js?
You can do this, sure. Algo: For each channel calculate cumulative normalized histograms multiplied by maximum value in that channel - this will be your new pixel values in channel given old value.
I've read algo description in GeeksforGeeks portal and ported it to p5.js, code:
let img
let mod
// helpers
function getMappings() {
hists = [[], [], []];
newlevels = [[], [], []];
maxval = [-1, -1, -1];
// RGB histograms & maximum pixel values
for (let i = 0; i < img.width; i++) {
for (let j = 0; j < img.height; j++) {
let c = img.get(i, j);
hists[0][c[0]] = (hists[0][c[0]] || 0) + 1;
hists[1][c[1]] = (hists[1][c[1]] || 0) + 1;
hists[2][c[2]] = (hists[2][c[2]] || 0) + 1;
for (let ch=0; ch < 3; ch++) {
if (c[ch] > maxval[ch]) {
maxval[ch] = c[ch];
}
}
}
}
// New intensity levels based on cumulative, normalized histograms
for (let hi = 0; hi < 3; hi++) {
let acc = 0;
for (let lev=0; lev < 256; lev++) {
acc += hists[hi][lev];
newlevels[hi][lev] = Math.round(maxval[hi]*acc/(img.width*img.height));
}
}
return newlevels;
}
function equalizeHistograms() {
let map = getMappings();
for (let i = 0; i < mod.width; i++) {
for (let j = 0; j < mod.height; j++) {
let c = img.get(i, j);
let newcol = color(map[0][c[0]], map[1][c[1]], map[2][c[2]]);
mod.set(i, j, newcol);
}
}
mod.updatePixels();
}
// system functions
function preload() {
img = loadImage('https://i.imgur.com/00HxCYr.jpg');
mod = createImage(200, 140);
}
function setup() {
img.loadPixels();
mod.loadPixels();
createCanvas(250, 400);
equalizeHistograms();
}
function draw() {
image(img,0,0);
image(mod,0,140);
}
DEMO
I have, essentially, a 512x512x512 WebGLTexture object that's 0. everywhere except for about 3 voxels, where it is 1.. I need to get the xyz coordinates of those 3 voxels printed out as fast as possible for a scientific computing application related to my research, but the best I can do is using a [parallel] 'for' loop after passing the object through a clunky chain of WebGL2 methods. Does anyone know a faster way to get those coordinates? Is there a way to push vec3 primitives to an array from a fragmentShader?
I've looked for helpful extensions unsuccessfully.
I am pushing tbl.compressedTable to an array every timestep via:
var tbl = new Abubu.RgbaCompressedDataFromTexture({
target : env.stipt,
threshold : env.fthrsh,
compressionThresholdChannel : 'r',
});
this.timeSeries.push(time) ;
this.lastRecordedTime = time ;
this.samples.push([tbl.compressedTable]) ;
Where the last line is the killer. I'm using the class prototype:
class RgbaCompressedDataFromTexture extends RgbaCompressedData{
constructor( options={} ){
if ( options.target == undefined &&
options.texture == undefined ) return null ;
var texture ;
texture = readOption(options.target, null ) ;
texture = readOption(options.texture, options.target ) ;
var ttbond = new Float32TextureTableBond({ target : texture } ) ;
ttbond.tex2tab() ;
var table = ttbond.table ;
var width = ttbond.width ;
var height = ttbond.height ;
var op = options ;
op.width = width ;
op.height = height ;
super( table, op ) ;
this.ttbond = ttbond ;
this.texture = texture ;
}
/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* CONSTRUCTOR ENDS
*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
Extending the class:
class RgbaCompressedData{
constructor( data, options={}){
if (data == undefined){
log( 'You need to provide data source for compression!') ;
return null ;
}
this.data = new Float32Array(data) ;
this.width = readOption( options.width, data.length/4 ) ;
this.height = readOption( options.height, 1 ) ;
if ( (this.width == (data.length/4)) && height != 1 ){
this.width = (data.length/this.height)/4 ;
}
this.threshold = readOption( options.threshold, 0 ) ;
this.threshold = readOption( options.compressionThreshold,
this.threshold ) ;
this.compressionThresholdChannel
= readOption( options.channel, 'r' ) ;
switch (this.compressionThresholdChannel){
case 'r' :
this.channel = 0 ;
break ;
case 'g' :
this.channel = 1 ;
break ;
case 'b' :
this.channel = 2 ;
break ;
case 'a' :
this.channel = 3 ;
break ;
default :
this.channel = 0 ;
break ;
}
this.compThresholdData = new Float32Array(this.width*this.height) ;
/*------------------------------------------------------------------------
* count number of pixels above the compression threshold
*------------------------------------------------------------------------
*/
this.noAboveThreshold = 0 ;
for(var j=0 ; j<this.height ; j++){
for (var i=0 ; i <this.width; i++){
var indx = i + j*this.width ;
this.compThresholdData[indx]
= this.data[indx*4 + this.channel] ;
if (this.compThresholdData[indx]>this.threshold){
this.noAboveThreshold++ ;
}
}
}
/*------------------------------------------------------------------------
* allocating memory to data
*------------------------------------------------------------------------
*/
this.compressedSize =
Math.ceil( Math.sqrt( this.noAboveThreshold )) ;
this.compressedTable =
new Float32Array(this.compressedSize*this.compressedSize*4 ) ;
this.decompressionMapTable =
new Float32Array(this.compressedSize*this.compressedSize*4 ) ;
this.compressionMapTable =
new Float32Array(this.width*this.height * 4 ) ;
/*------------------------------------------------------------------------
* compress data
*------------------------------------------------------------------------
*/
var num = 0 ;
for(var j=0 ; j<this.height ; j++){
for (var i=0 ; i <this.width; i++){
var indx = i + j*this.width ;
if (this.compThresholdData[indx]>this.threshold){
var jj = Math.floor( num/this.compressedSize) ;
var ii = num - jj*this.compressedSize ;
var x = ii/this.compressedSize
+ 0.5/this.compressedSize ;
var y = jj/this.compressedSize
+ 0.5/this.compressedSize ;
var nindx = ii + jj*this.compressedSize ;
this.compressionMapTable[indx*4 ] = x ;
this.compressionMapTable[indx*4 + 1 ] = y ;
this.decompressionMapTable[nindx*4 ] =
i/this.width + 0.5/this.width ;
this.decompressionMapTable[nindx*4+1] =
j/this.height+ 0.5/this.height ;
for (var k = 0 ; k<4 ; k++){
this.compressedTable[nindx*4+k]
= this.data[indx*4+k] ;
}
num++ ;
}else{
this.compressionMapTable[indx*4 ]
= 1.-0.5/this.compressedSize ;
this.compressionMapTable[indx*4 + 1 ]
= 1.-0.5/this.compressedSize ;
}
}
}
var ii = this.compressedSize -1 ;
var jj = this.compressedSize -1 ;
var nindx = ii + jj*this.compressedSize ;
for (var k = 0 ; k<4 ; k++){
this.compressedTable[nindx*4+k] = 0. ;
}
/*------------------------------------------------------------------------
* setting compressedData, compressionMap, decompressionMap textures
*------------------------------------------------------------------------
*/
this.full = new TableTexture(
this.data,
this.width,
this.height,
{
minFilter : 'nearest' ,
magFilter : 'nearest'
}
) ;
this.sparse = new TableTexture(
this.compressedTable,
this.compressedSize ,
this.compressedSize ,
{
minFilter : 'nearest' ,
magFilter : 'nearest'
}
) ;
this.compressionMap = new TableTexture(
this.compressionMapTable,
this.width,
this.height ,
{
minFilter : 'nearest' ,
magFilter : 'nearest'
}
) ;
this.decompressionMap = new TableTexture(
this.decompressionMapTable ,
this.compressedSize ,
this.compressedSize ,
{
minFilter : 'nearest' ,
magFilter : 'nearest'
}
) ;
}
/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* CONSTRUCTOR ENDS
*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
And making use of the following class:
class Float32TextureTableBond{
/*------------------------------------------------------------------------
* constructor
*------------------------------------------------------------------------
*/
constructor( options={}){
if ( options.target == undefined && options.texture == undefined ){
return null ;
} ;
this.texture = readOptions( options.target , null ) ;
this.texture = readOptions( options.texture, this.target ) ;
this.framebuffer = gl.createFramebuffer() ;
gl.bindFramebuffer( gl.READ_FRAMEBUFFER, this.framebuffer) ;
gl.framebufferTexture2D(gl.READ_FRAMEBUFFER, gl.COLOR_ATTACHMENT0,
gl.TEXTURE_2D,
this.target.texture, 0 ) ;
gl.readBuffer( gl.COLOR_ATTACHMENT0 ) ;
this.canRead = (
gl.checkFramebufferStatus(gl.READ_FRAMEBUFFER)
== gl.FRAMEBUFFER_COMPLETE
) ;
gl.bindFramebuffer( gl.READ_FRAMEBUFFER, null) ;
this.width = this.target.width ;
this.height = this.target.height ;
this.table = readOption(options.table,
new Float32Array(this.width*this.height*4 ) ) ;
}
/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* CONSTRUCTOR ENDS
*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
No error messages, correct output. When I start recording data, my simulation slows down to the speed of a lethargic turtle.
I haven't really thought it through but here's some code that may give you ideas.
The problem is there is no way to conditionally output data in WebGL2 AFAIK. You can discard in the fragment shader but that does not seem helpful here.
So, in any case, the first thing to think about is that shaders parallelize base on output. If there are 32k pixels to draw the GPU has 32k things it can parallelize. If there is 1 pixel that inspects 32k things the GPU has nothing to parallelize.
So, here's one idea, divide the 3D texture into cells NxNxN big, search through each cell for on voxels. If a cell is 32x32x32 then for a 512x512x512 input there are 4096 things to parallelize. For each cell, walk the cell and sum the positions of matches
sum = vec4(0)
for each voxel in cell
if voxel === 1
sum += vec4(positionOfVoxel, 1);
outColor = sum;
The result is that if there is just 1 match in that cell then sum.xyz will contain the position and sum.w will be 1. If there is more than one match sum.w will be > 1
The code below makes a 4096x1 texture and renders a quad to it. It uses gl_FragCoord.x to compute which cell each pixel being rendered corresponds to and sums the results for the corresponding cell.
It then uses readPixels to read the result and goes through and prints them out. Ideally I'd like the GPU itself to figure out the results but given you can't conditionally discard I didn't have any ideas.
For a cell with only one result the result is printed. For a cell with multiple result another shader that scans a cell. We know how many results are in a particular cell so we can render numResults by 1 pixels. The shader would then go over the cell and only look at the N'th result it finds
int idOfResultWeWant = int(gl_FragCoord.x)
int resultId = 0
for (z...) {
for (y...) {
for (x...) {
if (voxel) {
if (resultId === idOfResultWeWant) {
outColor = position
}
++resultId
}
}
}
The code below is lazy and uses 1D result textures which means the most cells it can handle is gl.getParameter(gl.MAX_TEXTURE_SIZE). It would have to change a little for larger sizes.
No idea if this is the fastest way or even a fast way but the concepts of parallel based on what's being rendered is important as well as dividing the problem into smaller parts.
Like maybe using 16x16x16 cells is better and maybe instead of the second shader we should just use the first shader again by subdivide a cell itself into smaller cells.
function main() {
const gl = document.createElement('canvas').getContext('webgl2');
if (!gl) {
return alert('need webgl2');
}
const ext = gl.getExtension('EXT_color_buffer_float');
if (!ext) {
return alert('need EXT_color_buffer_float');
}
const size = 512;
const cellSize = 32;
const cellsPer = size / cellSize;
const numCells = (size * size * size) / (cellSize * cellSize * cellSize);
const dataTexture = twgl.createTexture(gl, {
target: gl.TEXTURE_3D,
width: size,
height: size,
depth: size,
minMag: gl.NEAREST,
internalFormat: gl.R8,
auto: false,
});
function setData(x, y, z) {
log('set voxel:', x, y, z);
gl.texSubImage3D(
gl.TEXTURE_3D, 0, x, y, z, 1, 1, 1,
gl.RED, gl.UNSIGNED_BYTE, new Uint8Array([255]));
}
for (let i = 0; i < 3; ++i) {
const x = randInt(size);
const y = randInt(size);
const z = randInt(size);
setData(x, y, z);
}
setData(128, 267, 234);
setData(128 + 4, 267, 234);
setData(128 + 9, 267, 234);
const cellVS = `#version 300 es
in vec4 position;
void main() {
gl_Position = position;
}
`;
const cellFS = `#version 300 es
precision highp float;
uniform highp sampler3D data;
uniform int cellSize;
out vec4 outColor;
void main() {
// really should use 2D but I'm lazy
int ndx = int(gl_FragCoord.x);
// assumes equal sides
int size = textureSize(data, 0).x;
int cellsPer = size / cellSize;
int cellZ = ndx / cellsPer / cellsPer;
int cellY = ndx / cellsPer % cellsPer;
int cellX = ndx % cellsPer;
ivec3 cell = ivec3(cellX, cellY, cellZ) * cellSize;
vec4 sum = vec4(0);
for (int z = 0; z < cellSize; ++z) {
for (int y = 0; y < cellSize; ++y) {
for (int x = 0; x < cellSize; ++x) {
ivec3 pos = cell + ivec3(x, y, z);
// assumes data is 0 or 1
float occupied = texelFetch(
data,
pos,
0).r;
sum += vec4(pos, 1) * occupied;
}
}
}
outColor = sum;
}
`;
const cellScanFS = `#version 300 es
precision highp float;
uniform highp sampler3D data;
uniform int cellSize;
uniform ivec3 cell; // offset into cell
out vec4 outColor;
void main() {
// really should use 2D but I'm lazy
int idWeWant = int(gl_FragCoord.x);
// assumes equal sides
int size = textureSize(data, 0).x;
int cellsPer = size / cellSize;
vec4 result = vec4(0);
int id = 0;
for (int z = 0; z < cellSize; ++z) {
for (int y = 0; y < cellSize; ++y) {
for (int x = 0; x < cellSize; ++x) {
ivec3 pos = cell + ivec3(x, y, z);
float occupied = texelFetch(
data,
pos,
0).r;
if (occupied > 0.0) {
if (id == idWeWant) {
result = vec4(pos, 1);
}
++id;
}
}
}
}
outColor = result;
}
`;
const cellProgramInfo = twgl.createProgramInfo(gl, [cellVS, cellFS]);
const cellScanProgramInfo = twgl.createProgramInfo(gl, [cellVS, cellScanFS]);
const quadBufferInfo = twgl.primitives.createXYQuadBufferInfo(gl, 2);
// as long as numCells is less than the max
// texture dimensions we can use a
// numCells by 1 result texture.
// If numCells is > max texture dimension
// we'd need to adjust the code to use
// a 2d result texture.
const cellResultWidth = numCells;
const cellResultHeight = 1;
const cellResultFBI = twgl.createFramebufferInfo(gl, [
{ internalFormat: gl.RGBA32F, minMag: gl.NEAREST }
], cellResultWidth, cellResultHeight);
twgl.bindFramebufferInfo(gl, cellResultFBI);
twgl.setBuffersAndAttributes(gl, cellProgramInfo, quadBufferInfo);
gl.useProgram(cellProgramInfo.program);
twgl.setUniforms(cellProgramInfo, {
cellSize,
data: dataTexture,
});
// draw the quad
twgl.drawBufferInfo(gl, quadBufferInfo);
const data = new Float32Array(numCells * 4);
gl.readPixels(0, 0, numCells, 1, gl.RGBA, gl.FLOAT, data);
gl.useProgram(cellScanProgramInfo.program);
{
for (let i = 0; i < numCells; ++i) {
const off = i * 4;
const numResultsInCell = data[off + 3];
if (numResultsInCell) {
if (numResultsInCell === 1) {
log('result at: ', ...data.slice(off, off + 3));
} else {
getResultsForCell(i, numResultsInCell);
}
}
}
}
function getResultsForCell(i, numResultsInCell) {
const cellZ = (i / cellsPer | 0) / cellsPer | 0;
const cellY = (i / cellsPer | 0) % cellsPer;
const cellX = i % cellsPer;
twgl.setUniforms(cellScanProgramInfo, {
cellSize,
data: dataTexture,
cell: [cellX * cellSize, cellY * cellSize, cellZ * cellSize],
});
twgl.drawBufferInfo(gl, quadBufferInfo);
// note: cellResultsFBI is still bound. It's 4096x1
// so we can only get up to 4096 results without switching to
// a 2D texture
gl.viewport(0, 0, numResultsInCell, 1);
const result = new Float32Array(numResultsInCell * 4);
gl.readPixels(0, 0, numResultsInCell, 1, gl.RGBA, gl.FLOAT, result);
for (let j = 0; j < numResultsInCell; ++j) {
const off = j * 4;
log('result at:', ...result.slice(off, off + 3));
}
}
function randInt(min, max) {
return Math.floor(rand(min, max));
}
function rand(min, max) {
if (max === undefined) {
max = min;
min = 0;
}
return Math.random() * (max - min) + min;
}
function log(...args) {
const elem = document.createElement('pre');
elem.textContent = [...args].join(' ');
document.body.appendChild(elem);
}
}
main();
pre { margin: 0; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
Is there a way to push vec3 primitives to an array from a fragmentShader?
Yes, use a shader storage buffer. Something along the lines of:
layout(std430, binding = 0) buffer Output
{
uvec3 out_vals[];
};
That will need to be bound to a buffer large enough to store the returned arguments (From the top of my head I think std430 allows for vec3 output types, but I also have this strange feeling the out type might need to be uint, so you may need to write 3 values at a time - can't quite remember sadly)
You then need to determine an index for the element in the output array you will write to. For that you can use an atomic counter buffer to determine the counter, e.g.
layout(binding = 0, offset = 0) uniform atomic_uint out_count;
Then later on within your shader, generate your index from the gl_GlobalInvocatonID (if using a compute shader), or gl_SamplePosition for fragment shaders, and you should be able to write out the data:
uint index = atomicCounterIncrement(out_count);
out_vals[index] = gl_GlobalInvocatonID;
It is possible to use atomic operations on shader storage buffers directly, but most advice I've seen recommends using ACB's instead.
I have a set of polygons drawn on google map,now I want to implement the algo that given a postion with certain lat/long in which of the polygon it lies.
Note:polygons are also drawn given the lat/long positions using the google maps api
So Is there any api for it or how can i convert lat/long positions to x-y planes so that i can check if a given point lies in which area using area formulas?
would google.maps.geometry.poly.containsLocation work ?
use below ray casting alogrithm that may help u to resolve the problem
google.maps.Polygon.prototype.Contains = function(point) {
// ray casting alogrithm
var crossings = 0,
path = this.getPath();
// for each edge
for (var i = 0; i < path.getLength(); i++) {
var a = path.getAt(i),
j = i + 1;
if (j >= path.getLength()) {
j = 0;
}
var b = path.getAt(j);
if (rayCrossesSegment(point, a, b)) {
crossings++;
}
}
// odd number of crossings?
return (crossings % 2 == 1);
function rayCrossesSegment(point, a, b) {
var px = point.lng(),
py = point.lat(),
ax = a.lng(),
ay = a.lat(),
bx = b.lng(),
by = b.lat();
if (ay > by) {
ax = b.lng();
ay = b.lat();
bx = a.lng();
by = a.lat();
}
// alter longitude to cater for 180 degree crossings
if (px < 0) { px += 360 };
if (ax < 0) { ax += 360 };
if (bx < 0) { bx += 360 };
if (py == ay || py == by) py += 0.00000001;
if ((py > by || py < ay) || (px > Math.max(ax, bx))) return false;
if (px < Math.min(ax, bx)) return true;
var red = (ax != bx) ? ((by - ay) / (bx - ax)) : Infinity;
var blue = (ax != px) ? ((py - ay) / (px - ax)) : Infinity;
return (blue >= red);
}
};