I'm trying to convert/update THREE.Geometry calls to THREE.BufferGeometry when that geometry is constructed also using the deprecated THREE.Face3 calls which I can't find any tidy solution to
Source: https://github.com/juniorxsound/Depthkit.js/blob/master/README.md
DepthKit.geo = new THREE.Geometry();
for (let y = 0; y < VERTS_TALL; y++) {
for (let x = 0; x < VERTS_WIDE; x++) {
DepthKit.geo.vertices.push(new THREE.Vector3(x, y, 0));
}
}
for (let y = 0; y < VERTS_TALL - 1; y++) {
for (let x = 0; x < VERTS_WIDE - 1; x++) {
DepthKit.geo.faces.push(
new THREE.Face3(
x + y * VERTS_WIDE,
x + (y + 1) * VERTS_WIDE,
(x + 1) + y * (VERTS_WIDE)
));
DepthKit.geo.faces.push(
new THREE.Face3(
x + 1 + y * VERTS_WIDE,
x + (y + 1) * VERTS_WIDE,
(x + 1) + (y + 1) * (VERTS_WIDE)
));
}
}
I can get as far as using the guidance here to make an array of Vector3 as 'points' and set the points on the buffer geometry using that, but I can't get the faces between each point to be written in any similar method to that of THREE.Face3 above... Any ideas?
I'm not the only person stuck on this for well over a year
Related
I'm trying to implement Floyd-Steinberg dithering in a P5.js sketch by pre-dithering a bunch of circles in a graphics object (in setup) and then drawing them later.
However, I keep running into the issue where only part of the circle is dithered, and the rest looks normal. Any suggestions are welcome as I'm really stumped as to what is going on.
setup():
let circs;
function setup() {
//...
createCanvas(1000,1000);
let size = 200;
circs = [];
circs.push({
gfx: createGraphics(size, size),
size: size,
color: color(random(255))
});
for (let i = 0; i < circs.length; i++)
dither(circs[i]);
// ...
}
draw():
function draw() {
if (!paused) {
background(bg);
drawShadow(4); // just a call to the drawingContext shadow
for (let i = 0; i < circs.length; i++) {
push();
translate(width / 2, height / 2);
imageMode(CENTER);
image(circs[i].gfx, 0, 0);
pop();
}
}
}
floyd-steinberg - based on https://openprocessing.org/sketch/1192123
function index(x, y, g) {
return (x + y * g.width) * 4;
}
function dither(g) {
g.loadPixels();
for (let y = 0; y < g.height - 1; y++) {
for (let x = 1; x < g.width - 1; x++) {
let oldr = g.pixels[index(x, y, g)];
let oldg = g.pixels[index(x, y, g) + 1];
let oldb = g.pixels[index(x, y, g) + 2];
let factor = 1.0;
let newr = round((factor * oldr) / 255) * (255 / factor);
let newg = round((factor * oldg) / 255) * (255 / factor);
let newb = round((factor * oldb) / 255) * (255 / factor);
g.pixels[index(x, y, g)] = newr;
g.pixels[index(x, y, g) + 1] = newg;
g.pixels[index(x, y, g) + 2] = newb;
g.pixels[index(x + 1, y, g)] += ((oldr - newr) * 7) / 16.0;
g.pixels[index(x + 1, y, g) + 1] += ((oldr - newr) * 7) / 16.0;
g.pixels[index(x + 1, y, g) + 2] += ((oldr - newr) * 7) / 16.0;
g.pixels[index(x - 1, y + 1, g)] += ((oldr - newr) * 3) / 16.0;
g.pixels[index(x - 1, y + 1, g) + 1] += ((oldr - newr) * 3) / 16.0;
g.pixels[index(x - 1, y + 1, g) + 2] += ((oldr - newr) * 3) / 16.0;
g.pixels[index(x, y + 1, g)] += ((oldr - newr) * 5) / 16.0;
g.pixels[index(x, y + 1, g) + 1] += ((oldr - newr) * 5) / 16.0;
g.pixels[index(x, y + 1, g) + 2] += ((oldr - newr) * 5) / 16.0;
g.pixels[index(x + 1, y + 1, g)] += ((oldr - newr) * 1) / 16.0;
g.pixels[index(x + 1, y + 1, g) + 1] += ((oldr - newr) * 1) / 16.0;
g.pixels[index(x + 1, y + 1, g) + 2] += ((oldr - newr) * 1) / 16.0;
}
}
g.updatePixels();
}
I'm not sure what I'm missing as the dithering algorithm loops over the height and width and then should be updating, but I think I'm missing something.
p5.Graphics objects have a pixelDensity inherited from the sketch. When the pixel density is > 1 as it is for high DPI displays you need to account for this when you are computing your pixels indices:
function index(x, y, g) {
const d = g.pixelDensity();
return (x + y * g.width * d) * 4;
}
And when you are processing pixels you will need to double the maximum values for x and y.
Here's a demonstration of the effects of pixelDensity (and whether or not you handle it):
let g;
function setup() {
createCanvas(400, 400);
g = createGraphics(width, height);
redrawGraphics();
noLoop();
setInterval(
() => {
redrawGraphics(frameCount % 2);
redraw();
},
2000
);
}
function index(x, y, g, d) {
return (x + y * g.width * d) * 4;
}
function redrawGraphics(hdpi) {
const d = hdpi ? pixelDensity() : 1;
g.background(0);
g.loadPixels();
for (let y = 0; y < height * 2; y++) {
for (let x = 0; x < width * 2; x++) {
let ix = index(x, y, g, d);
let r = map(sin((x - y) / width * TWO_PI), -1, 1, 0, 255);
g.pixels[ix] = r;
g.pixels[ix + 1] = 0;
g.pixels[ix + 2] = 0;
g.pixels[ix + 3] = 255;
}
}
g.updatePixels();
}
function draw() {
image(g, 0, 0);
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/1.4.0/p5.js"></script>
I would like to check if an object is in range in a matrix.
A 1 range would be 9 blocks around the player (orange).
But a two range would be 25 blocks (blue). The player is the red cross.I tried the following code:`
int size = ((range * 2) +1) * ((range * 2) + 1);
int sq = (range * 2) + 1;
int startX = x - range; if (startX < 0) startX = 0;
int startY = y - range; if (startY < 0) startY = 0;
int endX = x + range; if (endX > arrayWitdth) endX = arrayWitdth;
int endY = y + range; if (endY > arrayLenght) endY = arrayLenght;
//printf("Range: %d\n", range);
for (size_t i = startX; i < endX; i++)
{
for (size_t j = startY; j < endY; j++)
{
//printf("Looking at (%d,%d)\n", i, j);
if (map[i][j] == charTocheck) return 1;
}
}
`
You don't check the last block, so the correct implementation would be:
int size = ((range * 2) +1) * ((range * 2) + 1);
int sq = (range * 2) + 1;
int startX = x - range; if (startX < 0) startX = 0;
int startY = y - range; if (startY < 0) startY = 0;
int endX = x + range + 1; if (endX > arrayWitdth) endX = arrayWitdth;
int endY = y + range + 1; if (endY > arrayLenght) endY = arrayLenght;
//printf("Range: %d\n", range);
for (size_t i = startX; i < endX; i++)
{
for (size_t j = startY; j < endY; j++)
{
//printf("Looking at (%d,%d)\n", i, j);
if (map[i][j] == charTocheck) return 1;
}
}
notice that endX and endY have slightly changed.
Im try conver rgb32 image to yuv420p for record video.
I have image
QImage image = QGuiApplication::primaryScreen()->grabWindow(0, rect_x, rect_y, rect_width, rect_height).toImage().convertToFormat(QImage::Format_RGB32);
AVFrame *frame;
and convert
for (y = 0; y < c->height; y++) {
QRgb *rowData = (QRgb*)image.scanLine(y);
for (x = 0; x < c->width; x++) {
QRgb pixelData = rowData[x];
int r = qRed(pixelData);
int g = qGreen(pixelData);
int b = qBlue(pixelData);
int y0 = (int)(0.2126 * (float)(r) + 0.7152 * (float)(g) + 0.0722 * (float)(b));
int u = 128 + (int)(-0.09991 * (float)(r) - 0.33609 * (float)(g) + 0.436 * (float)(b));
int v = 128 + (int)(0.615 * (float)(r) - 0.55861 * (float)(g) - 0.05639 * (float)(b));
frame->data[0][y * frame->linesize[0] + x] = y0;
frame->data[1][y / 2 * frame->linesize[1] + x / 2] = u;
frame->data[2][y / 2 * frame->linesize[2] + x / 2] = v;
}
}
but on result image im see artefact. Text look blended http://joxi.ru/eAORRX0u4d46a2
this bug in convert alogritm or something else?
UDP
for (y = 0; y < c->height; y++) {
QRgb *rowData = (QRgb*)image.scanLine(y);
for (x = 0; x < c->width; x++) {
QRgb pixelData = rowData[x];
int r = qRed(pixelData);
int g = qGreen(pixelData);
int b = qBlue(pixelData);
int y0 = (int)(0.2126 * (float)(r) + 0.7152 * (float)(g) + 0.0722 * (float)(b));
if (y0 < 0)
y0 = 0;
if (y0 > 255)
y0 = 255;
frame->data[0][y * frame->linesize[0] + x] = y0;
}
}
int x_pos = 0;
int y_pos = 0;
for (y = 1; y < c->height; y+=2) {
QRgb *pRow = (QRgb*)image.scanLine(y - 1);
QRgb *sRow = (QRgb*)image.scanLine(y);
for (x = 1; x < c->width; x+=2) {
QRgb pd1 = pRow[x - 1];
QRgb pd2 = pRow[x];
QRgb pd3 = sRow[x - 1];
QRgb pd4 = sRow[x];
int r = (qRed(pd1) + qRed(pd2) + qRed(pd3) + qRed(pd4)) / 4;
int g = (qGreen(pd1) + qGreen(pd2) + qGreen(pd3) + qGreen(pd4)) / 4;
int b = (qBlue(pd1) + qBlue(pd2) + qBlue(pd3) + qBlue(pd4)) / 4;
int u = 128 + (int)(-0.147 * (float)(r) - 0.289 * (float)(g) + 0.436 * (float)(b));
int v = 128 + (int)(0.615 * (float)(r) - 0.515 * (float)(g) - 0.1 * (float)(b));
if (u < 0)
u = 0;
if (v > 255)
v = 255;
frame->data[1][y_pos * frame->linesize[1] + x_pos] = u;
frame->data[2][y_pos * frame->linesize[2] + x_pos] = v;
x_pos++;
}
x_pos = 0;
y_pos++;
}
this work for me, but its wery slow, 60-70ms for one frame
The first problem is that you are letting your YUV values go beyond allowed range (which is even stricter than 0x00..0xFF. but you don't do any capping anyway). See:
Y' values are conventionally shifted and scaled to the range [16, 235] (referred to as studio swing or "TV levels") rather than using the full range of [0, 255] (referred to as full swing or "PC levels"). This confusing practice derives from the MPEG standards and explains why 16 is added to Y' and why the Y' coefficients in the basic transform sum to 220 instead of 255.[8] U and V values, which may be positive or negative, are summed with 128 to make them always positive, giving a studio range of 16–240 for U and V. (These ranges are important in video editing and production, since using the wrong range will result either in an image with "clipped" blacks and whites, or a low-contrast image.)
Second problem is that 4:2:0 means that you end up with one Y value for every pixel, and one U and one V value for every four pixels. That is, U and V should be averages of corresponding pixels, and your loop simply overwrites the values with U and V of the fourth input pixel, ignoring the previous three.
You tagged the question with ffmpeg and your previous question is FFmpeg related too. Note that FFmpeg offers swscale library, which sws_scale does the conversion way more efficiently compared to your loop and optimizations you could add to it. See related questions on SO:
avcodec YUV to RGB
Video from pipe->YUV with libAV->RGB with sws_scale->Draw with Qt
Using the code below,
1 maxed out mesh draws at 60 FPS,
2 maxed out meshes draw at 33~ FPS,
3 maxed out meshes draw at 28~ FPS,
4 maxed out meshes draw at 20~ FPS.
Am I doing something wrong, or am I reaching some sort of limit? It doesn't seem like I am drawing a lot of polygons but I am still new to programming so I don't know much. Please offer some efficiency advice. Thank you.
class PolygonManager
{
List<List<VertexPositionColor>> vertices;
VertexBuffer vertexBuffer;
List<List<int>> indices;
IndexBuffer indexBuffer;
int meshRef;
int indexRef;
Random random;
public PolygonManager()
{
vertices = new List<List<VertexPositionColor>>();
vertices.Add(new List<VertexPositionColor>());
indices = new List<List<int>>();
indices.Add(new List<int>());
meshRef = -1;
indexRef = 0;
random = new Random();
}
public void CreateMesh(int length, int width, Vector3 position, Color color)
{
meshRef = -1;
indexRef = 0;
for (int i = 0; i < vertices.Count; i++)
{
if (vertices[i].Count <= 65536 - (length * width))
meshRef = i;
}
if (meshRef == -1)
{
vertices.Add(new List<VertexPositionColor>());
indices.Add(new List<int>());
meshRef = vertices.Count - 1;
}
indexRef = vertices[meshRef].Count;
for (int y = 0; y < length; y++)
{
for (int x = 0; x < width; x++)
{
vertices[meshRef].Add(new VertexPositionColor(new Vector3(x, 0, y) + position,
new Color(color.R + (random.Next(-10, 10) / 100), color.G + (random.Next(-10, 10) / 100), color.B + (random.Next(-10, 10) / 100))));
}
}
for (int y = 0; y < length - 1; y++)
{
for (int x = 0; x < width - 1; x++)
{
int topLeft = x + y * width;
int topRight = (x + 1) + y * width;
int lowerLeft = x + (y + 1) * width;
int lowerRight = (x + 1) + (y + 1) * width;
indices[meshRef].Add(topLeft + indexRef);
indices[meshRef].Add(lowerRight + indexRef);
indices[meshRef].Add(lowerLeft + indexRef);
indices[meshRef].Add(topLeft + indexRef);
indices[meshRef].Add(topRight + indexRef);
indices[meshRef].Add(lowerRight + indexRef);
}
}
}
public void Draw(GraphicsDevice graphicsDevice, BasicEffect basicEffect)
{
for (int v = 0; v < vertices.Count; v++)
{
vertexBuffer = new VertexBuffer(graphicsDevice, typeof(VertexPositionColor), vertices[v].Count, BufferUsage.WriteOnly);
vertexBuffer.SetData<VertexPositionColor>(vertices[v].ToArray());
graphicsDevice.SetVertexBuffer(vertexBuffer);
indexBuffer = new IndexBuffer(graphicsDevice, typeof(int), indices[v].Count, BufferUsage.WriteOnly);
indexBuffer.SetData<int>(indices[v].ToArray());
graphicsDevice.Indices = indexBuffer;
foreach (EffectPass effectPass in basicEffect.CurrentTechnique.Passes)
{
effectPass.Apply();
for (int i = 0; i < 6; i++)
{
graphicsDevice.DrawIndexedPrimitives(PrimitiveType.TriangleList, 0, 0, vertices[v].Count, 0, indices[v].Count/3);
}
}
}
}
}
Moving the code where you initialize the buffers and write the data outside of the draw method should increase performance significantly.
Creating vertex and index buffers is an expensive operation. For static meshes (where the vertices don't change) you can reuse the buffers.
If the vertices/indices change often (once per frame) use a dynamic buffer.
For Ludum Dare 22, Notch programmed a game in 48 hours called Minicraft. It's like a 2D minecraft.
Anyway the source is available (here: http://www.ludumdare.com/compo/ludum-dare-22/?action=preview&uid=398 ), and I was taking a look since I am interested in random generation of terrain and levels. In the code is a block of code which runs the core generation, and the algorithm to me seems familiar, but I can't put a name to it. I'd like to know exactly what it is so I can read more about it and learn how it works.
Specifically, the code is from levelGen.java:
do {
int halfStep = stepSize / 2;
for (int y = 0; y < w; y += stepSize) {
for (int x = 0; x < w; x += stepSize) {
double a = sample(x, y);
double b = sample(x + stepSize, y);
double c = sample(x, y + stepSize);
double d = sample(x + stepSize, y + stepSize);
double e = (a + b + c + d) / 4.0 + (random.nextFloat() * 2 - 1) * stepSize * scale;
setSample(x + halfStep, y + halfStep, e);
}
}
for (int y = 0; y < w; y += stepSize) {
for (int x = 0; x < w; x += stepSize) {
double a = sample(x, y);
double b = sample(x + stepSize, y);
double c = sample(x, y + stepSize);
double d = sample(x + halfStep, y + halfStep);
double e = sample(x + halfStep, y - halfStep);
double f = sample(x - halfStep, y + halfStep);
double H = (a + b + d + e) / 4.0 + (random.nextFloat() * 2 - 1) * stepSize * scale * 0.5;
double g = (a + c + d + f) / 4.0 + (random.nextFloat() * 2 - 1) * stepSize * scale * 0.5;
setSample(x + halfStep, y, H);
setSample(x, y + halfStep, g);
}
}
stepSize /= 2;
scale *= (scaleMod + 0.8);
scaleMod *= 0.3;
} while (stepSize > 1);
Those two for loops are running some kind of sampling algorithm, and I would just like to know if this is known named algorithm, or if notch just rolled his own.
This looks like the diamond-square algorithm.