I need an !effective! algorithm to smooth a line renderer (basically, the given Vector3 which holds the points of the renderer)
something like that
Here is my code, but the fps with it is very low:
public static List<Vector3> MakeSmoothCurve(Vector3[] arrayToCurve, float smoothness)
{
List<Vector3> points;
List<Vector3> curvedPoints;
int pointsLength = 0;
int curvedLength = 0;
if (smoothness < 1.0f) smoothness = 1.0f;
pointsLength = arrayToCurve.Length;
curvedLength = (pointsLength * Mathf.RoundToInt(smoothness)) - 1;
curvedPoints = new List<Vector3>(curvedLength);
float t = 0.0f;
for (int pointInTimeOnCurve = 0; pointInTimeOnCurve < curvedLength + 1; pointInTimeOnCurve++)
{
t = Mathf.InverseLerp(0, curvedLength, pointInTimeOnCurve);
points = new List<Vector3>(arrayToCurve);
for (int j = pointsLength - 1; j > 0; j--)
{
for (int i = 0; i < j; i++)
{
points[i] = (1 - t) * points[i] + t * points[i + 1];
}
}
curvedPoints.Add(points[0]);
}
return (curvedPoints);
}
You can use a CurveField
https://docs.unity3d.com/ScriptReference/EditorGUILayout.CurveField.html
With that you can easily edit/test your curve and retrieve a point at given time.
https://docs.unity3d.com/ScriptReference/AnimationCurve.Evaluate.html
Related
I have the following code in Processing that will produce a grid of randomly selected tiles from loaded files:
static int img_count = 6;
PImage[] img;
void setup() {
size(1200, 800);
img = new PImage[img_count];
for (int i = 0; i < img_count; i++) {
img[i] = loadImage("./1x/Artboard " + (i+1) + ".png");
}
}
void draw() {
for (int i = 0; i < 12; i++) {
for (int j = 0; j < 12; j++) {
int rand_index = int(random(img_count));
image(img[rand_index], 100 * i, 100 * j, 100, 100 );
}
}
}
By itself, it almost does what I want:
But I need that every tile be randomly rotated as well, so I tried this:
void draw() {
for (int i = 0; i < 12; i++) {
for (int j = 0; j < 12; j++) {
float r = int(random(4)) * HALF_PI; // I added this
rotate(r); // I added this
int rand_index= int(random(img_count));
image(img[rand_index], 100 * i, 100 * j, 100, 100 );
}
}
}
This second code doesn't act as I intended, as rotate() will rotate the entire image, including tiles that were already rendered. I couldn't find an appropriate way to rotate a tile the way I want, is there any way to rotate the tile before placing it?
You will probably need to translate before rotating.
The order of transformations is important (e.g. translating, then rotating will be a different location than rotation, then translating).
In your case image(img, x, y) makes it easy to miss that behind the scenes it's more like translate(x,y);image(img, 0, 0);.
I recommend:
void draw() {
for (int i = 0; i < 12; i++) {
for (int j = 0; j < 12; j++) {
float r = int(random(4)) * HALF_PI; // I added this
translate(100 * i, 100 * j); // translate first
rotate(r); // I added this
int rand_index= int(random(img_count));
image(img[rand_index], 0, 0, 100, 100 );
}
}
}
(depending on your setup, you might find imageMode(CENTER); (in setup()) handy to rotate from image centre (as opposed to top left corner (default)))
I am facing trouble with a 2D stitching problem as posted here: link
I want to see the feature matching that happens in the stitching pipeline. I can do it by writing the stitching algorithm on my own like the below code.
However, can anyone please tell me how to get this information from the stitching pipeline?. I want to see the matched features on another file.
double minDist = Double.MAX_VALUE;
double maxDist = 0;
double distance;
for (int i = 0; i < matchesArr.length; i++) {
distance = matchesArr[i].distance;
if (distance < minDist)
minDist = distance;
else if (distance > maxDist)
maxDist = distance;
}
final double thresholdFactor = 3.5;
List<DMatch> good_matches = new Vector<DMatch>();
for (int i = 0; i < matchesArr.length; i++) {
if (matchesArr[i].distance <= thresholdFactor * minDist) {
good_matches.add(matchesArr[i]);
}
}
LinkedList<Point> listImage1 = new LinkedList<Point>();
LinkedList<Point> listImage2 = new LinkedList<Point>();
List<KeyPoint> keyPointsList1 = keyPoints1.toList();
List<KeyPoint> keyPointsList2 = keyPoints2.toList();
for (int i = 0; i < good_matches.size(); i++) {
listImage1.addLast(keyPointsList1.get(good_matches.get(i).queryIdx).pt);
listImage2.addLast(keyPointsList2.get(good_matches.get(i).trainIdx).pt);
}
MatOfDMatch goodMatches = new MatOfDMatch();
goodMatches.fromList(good_matches);
Features2d.drawMatches(processedImage1, keyPoints1, processedImage2, keyPoints2, goodMatches, imgMatch, new Scalar(254, 0, 0), new Scalar(254, 0, 0), new MatOfByte(), 2);
boolean imageMatched = imgcodecs.imwrite("imageMatched.jpg", imgMatch);
I have two images. One has more green color and another one has better quality (it has right color). How can I improve the first one to have the similar color as the second one.I used the contrast enhancement as
//Contrast enhancement
for (int y = 0; y < rotated.rows; y++)
{
for (int x = 0; x < rotated.cols; x++)
{
for (int c = 0; c < 3; c++)
{
//"* Enter the alpha value [1.0-3.0]: "
//"* Enter the beta value [0-100]: ";
rotated.at<Vec3b>(y, x)[c] =
saturate_cast<uchar>(2.5*(rotated.at<Vec3b>(y, x)[c]) + 30);
}
}
}
It brightens the image. But I like to have similar color as the second one. What are the RGB values to change to have the second image's color.
For contrast enhancement you can use the equivalent of Matlab imadjust. You can find an OpenCV implementation here.
Applying imadjust with default parameters on each separate channel you get:
Here the full code:
#include <opencv2\opencv.hpp>
#include <vector>
#include <algorithm>
using namespace std;
using namespace cv;
void imadjust(const Mat1b& src, Mat1b& dst, int tol = 1, Vec2i in = Vec2i(0, 255), Vec2i out = Vec2i(0, 255))
{
// src : input CV_8UC1 image
// dst : output CV_8UC1 imge
// tol : tolerance, from 0 to 100.
// in : src image bounds
// out : dst image buonds
dst = src.clone();
tol = max(0, min(100, tol));
if (tol > 0)
{
// Compute in and out limits
// Histogram
vector<int> hist(256, 0);
for (int r = 0; r < src.rows; ++r) {
for (int c = 0; c < src.cols; ++c) {
hist[src(r, c)]++;
}
}
// Cumulative histogram
vector<int> cum = hist;
for (int i = 1; i < hist.size(); ++i) {
cum[i] = cum[i - 1] + hist[i];
}
// Compute bounds
int total = src.rows * src.cols;
int low_bound = total * tol / 100;
int upp_bound = total * (100 - tol) / 100;
in[0] = distance(cum.begin(), lower_bound(cum.begin(), cum.end(), low_bound));
in[1] = distance(cum.begin(), lower_bound(cum.begin(), cum.end(), upp_bound));
}
// Stretching
float scale = float(out[1] - out[0]) / float(in[1] - in[0]);
for (int r = 0; r < dst.rows; ++r)
{
for (int c = 0; c < dst.cols; ++c)
{
int vs = max(src(r, c) - in[0], 0);
int vd = min(int(vs * scale + 0.5f) + out[0], out[1]);
dst(r, c) = saturate_cast<uchar>(vd);
}
}
}
int main()
{
Mat3b img = imread("path_to_image");
vector<Mat1b> planes;
split(img, planes);
for (int i = 0; i < 3; ++i)
{
imadjust(planes[i], planes[i]);
}
Mat3b result;
merge(planes, result);
return 0;
}
I'm not sure if it is possible in processing but I would like to be able to zoom in on the fractal without it being extremely laggy and buggy. What I currently have is:
int maxIter = 100;
float zoom = 1;
float x0 = width/2;
float y0 = height/2;
void setup(){
size(500,300);
noStroke();
smooth();
}
void draw(){
translate(x0, y0);
scale(zoom);
for(float Py = 0; Py < height; Py++){
for(float Px = 0; Px < width; Px++){
// scale pixel coordinates to Mandelbrot scale
float w = width;
float h = height;
float xScaled = (Px * (3.5/w)) - 2.5;
float yScaled = (Py * (2/h)) - 1;
float x = 0;
float y = 0;
int iter = 0;
while( x*x + y*y < 2*2 && iter < maxIter){
float tempX = x*x - y*y + xScaled;
y = 2*x*y + yScaled;
x = tempX;
iter += 1;
}
// color pixels
color c;
c = pickColor(iter);
rect(Px, Py,1,1);
fill(c);
}
}
}
// pick color based on time pixel took to escape (number of iterations through loop)
color pickColor(int iters){
color b = color(0,0,0);
if(iters == maxIter) return b;
int l = 1;
color[] colors = new color[maxIter];
for(int i = 0; i < colors.length; i++){
switch(l){
case 1 : colors[i] = color(255,0,0); break;
case 2 : colors[i] = color(0,0,255); break;
case 3 : colors[i] = color(0,255,0); break;
}
if(l == 1 || l == 2) l++;
else if(l == 3) l = 1;
else l--;
}
return colors[iters];
}
// allow zooming in and out
void mouseWheel(MouseEvent event){
float direction = event.getCount();
if(direction < 0) zoom += .02;
if(direction > 0) zoom -= .02;
}
// allow dragging back and forth to change view
void mouseDragged(){
x0+= mouseX-pmouseX;
y0+= mouseY-pmouseY;
}
but it doesn't work very well. It works alright at the size and max iteration I have it set to now (but still not well) and is completely unusable at larger sizes or higher maximum iterations.
The G4P library has an example that does exactly this. Download the library and go to the G4P_MandelBrot example. The example can be found online here.
Hope this helps!
Using the code below,
1 maxed out mesh draws at 60 FPS,
2 maxed out meshes draw at 33~ FPS,
3 maxed out meshes draw at 28~ FPS,
4 maxed out meshes draw at 20~ FPS.
Am I doing something wrong, or am I reaching some sort of limit? It doesn't seem like I am drawing a lot of polygons but I am still new to programming so I don't know much. Please offer some efficiency advice. Thank you.
class PolygonManager
{
List<List<VertexPositionColor>> vertices;
VertexBuffer vertexBuffer;
List<List<int>> indices;
IndexBuffer indexBuffer;
int meshRef;
int indexRef;
Random random;
public PolygonManager()
{
vertices = new List<List<VertexPositionColor>>();
vertices.Add(new List<VertexPositionColor>());
indices = new List<List<int>>();
indices.Add(new List<int>());
meshRef = -1;
indexRef = 0;
random = new Random();
}
public void CreateMesh(int length, int width, Vector3 position, Color color)
{
meshRef = -1;
indexRef = 0;
for (int i = 0; i < vertices.Count; i++)
{
if (vertices[i].Count <= 65536 - (length * width))
meshRef = i;
}
if (meshRef == -1)
{
vertices.Add(new List<VertexPositionColor>());
indices.Add(new List<int>());
meshRef = vertices.Count - 1;
}
indexRef = vertices[meshRef].Count;
for (int y = 0; y < length; y++)
{
for (int x = 0; x < width; x++)
{
vertices[meshRef].Add(new VertexPositionColor(new Vector3(x, 0, y) + position,
new Color(color.R + (random.Next(-10, 10) / 100), color.G + (random.Next(-10, 10) / 100), color.B + (random.Next(-10, 10) / 100))));
}
}
for (int y = 0; y < length - 1; y++)
{
for (int x = 0; x < width - 1; x++)
{
int topLeft = x + y * width;
int topRight = (x + 1) + y * width;
int lowerLeft = x + (y + 1) * width;
int lowerRight = (x + 1) + (y + 1) * width;
indices[meshRef].Add(topLeft + indexRef);
indices[meshRef].Add(lowerRight + indexRef);
indices[meshRef].Add(lowerLeft + indexRef);
indices[meshRef].Add(topLeft + indexRef);
indices[meshRef].Add(topRight + indexRef);
indices[meshRef].Add(lowerRight + indexRef);
}
}
}
public void Draw(GraphicsDevice graphicsDevice, BasicEffect basicEffect)
{
for (int v = 0; v < vertices.Count; v++)
{
vertexBuffer = new VertexBuffer(graphicsDevice, typeof(VertexPositionColor), vertices[v].Count, BufferUsage.WriteOnly);
vertexBuffer.SetData<VertexPositionColor>(vertices[v].ToArray());
graphicsDevice.SetVertexBuffer(vertexBuffer);
indexBuffer = new IndexBuffer(graphicsDevice, typeof(int), indices[v].Count, BufferUsage.WriteOnly);
indexBuffer.SetData<int>(indices[v].ToArray());
graphicsDevice.Indices = indexBuffer;
foreach (EffectPass effectPass in basicEffect.CurrentTechnique.Passes)
{
effectPass.Apply();
for (int i = 0; i < 6; i++)
{
graphicsDevice.DrawIndexedPrimitives(PrimitiveType.TriangleList, 0, 0, vertices[v].Count, 0, indices[v].Count/3);
}
}
}
}
}
Moving the code where you initialize the buffers and write the data outside of the draw method should increase performance significantly.
Creating vertex and index buffers is an expensive operation. For static meshes (where the vertices don't change) you can reuse the buffers.
If the vertices/indices change often (once per frame) use a dynamic buffer.