With FLTK (version 1.4.0), is it possible to set minimum size for an Fl_Group widget? Either explicitly or automatically so that it wouldn't resize smaller than needed to display all its children? The Fl_Window class has method size_range which allows to set the smallest window size, however Fl_Group doesn't have such a method.
If this is not available, then maybe there are some other way to enforce a constraint on how small a non-window group widget can be?
It is possible to partly achieve this by using the size_range method of the top-most window, however if we use Fl_Tile than each tile would not be constrained in any way. Yes, an Fl_Box widget inside an Fl_Tile can limit minimal sizes of the outer tiles, but it's not exactly what I need here.
You can override the resize method of an Fl_Group widget. The resize method is triggered automatically when resizing even if not called explicitly.
#include <FL/Enumerations.H>
#include <FL/Fl_Double_Window.H>
#include <FL/Fl_Group.H>
#include <FL/Fl.H>
struct MyGroup : public Fl_Group {
MyGroup(int x, int y, int w, int h, const char *label = 0): Fl_Group(x, y, w, h, label) {
box(FL_ENGRAVED_FRAME);
}
virtual void resize(int x, int y, int w, int h) override {
Fl_Group::resize(x, y, w, h);
if (w < 300 || h < 200) {
resize(x, y, 300, 200);
}
}
};
int main() {
Fl_Double_Window w(400, 300);
MyGroup g(20, 20, 360, 260);
w.end();
w.resizable(&w);
w.show();
return Fl::run();
}
You will notice that the window is resizable, and if you try to decrease its size the group widget will get smaller to a certain point only.
Related
I am trying to implement smooth zoomable audio waveform but am puzzled with the correct approach to implement zoom. I searched internet but there is very little or no information.
So here is what I have done:
Read audio samples from file and compute waveform points with samplesPerPixel = 10, 20, 40, 80, ....,10240. Store the datapoints for each scale (11 in total here). Max and min are also stored along with points for each samplesPerPixel.
When zooming, switch to the closest dataset. So if samplesPerPixel at current width is 70, then use dataset corresponding to samplesPerPixel = 80. The correct dataset index is easily found using log2(samplesPerPixel).
Use subsampling of the dataset to draw waveform points. So if we samplesPerPixel = 41 and we are using data set for zoom 80, then we use the scaling factor 80/41 to subsample.
let scaleFactor = 80.0/41.0
x = waveformPointX[i*scaleFactor]
I am yet to find a better approach and not too sure if the above approach of subsampling is correct, but for sure this approach consumes lot of memory and also is slow to load data at the start. How do audio editors implement zooming in waveform, is there an efficient approach?
EDIT: Here is a code for computing mipmaps.
public class WaveformAudioSample {
var samplesPerPixel:Int = 0
var totalSamples:Int = 0
var samples: [CGFloat] = []
var sampleMax: CGFloat = 0
}
private func downSample(_ waveformSample:WaveformAudioSample, factor:Int) {
NSLog("Averaging samples")
var downSampledAudioSamples:WaveformAudioSample = WaveformAudioSample()
downSampledAudioSamples.samples = [CGFloat](repeating: 0, count: waveformSample.samples.count/factor)
downSampledAudioSamples.samplesPerPixel = waveformSample.samplesPerPixel * factor
downSampledAudioSamples.totalSamples = waveformSample.totalSamples
for i in 0..<waveformSample.samples.count/factor {
var total:CGFloat = 0
for j in 0..<factor {
total = total + waveformSample.samples[i*factor + j]
}
let averagedSample = total/CGFloat(factor)
downSampledAudioSamples.samples[i] = averagedSample
}
NSLog("Averaged samples")
}
You should use power of 2 size of your data
This will allow you to use just cheap bit shifts and simple resizing without any costly floating point operations or integer multiplicatin and division.
You should do half resolution mipmaps using previous mipmap
This will always create one sample from 2 samples of previous mipmap so no nested for loops or costly index computations
Do not mix floating and integer computations if you can avoid it
even if you have FPU the conversion between int and float is usually very slow. Ideally keep your audio data in integer format...
Here small C++/VCL example of these ideas:
//$$---- Form CPP ----
//---------------------------------------------------------------------------
#include <vcl.h>
#include <math.h>
#pragma hdrstop
#include "win_main.h"
//---------------------------------------------------------------------------
#pragma package(smart_init)
#pragma resource "*.dfm"
TForm1 *Form1;
//---------------------------------------------------------------------------
//---------------------------------------------------------------------------
int xs,ys; // screen resolution
Graphics::TBitmap *bmp; // back buffer bitmap for rendering
//---------------------------------------------------------------------------
// input data
const int samples=1024;
int sample[samples];
// mipmas max 32 resolutions -> 2^32 samples input
int *mmdat0[32]={NULL}, // min
*mmdat1[32]={NULL}, // max
mmsiz[32]={0}; // resolution
//---------------------------------------------------------------------------
void generate_input(int *data,int size)
{
int i; float a,da;
da=10.0*M_PI/float(size-1);
for (a=0.0,i=0;i<size;i++,a+=da)
{
data[i]=float(100.0*sin(a))+Random(40)-20;
}
}
//---------------------------------------------------------------------------
void mipmap_free()
{
// free allocated mipmaps if needed
if (mmdat0[0]) delete[] mmdat0[0];
mmdat0[0]=NULL;
mmdat1[0]=NULL;
mmsiz[0]=0;
}
//---------------------------------------------------------------------------
void mipmap_compute(int *data,int size)
{
int i,j,k,n,N,a,a0,a1;
mipmap_free();
for (N=0,n=size;n;N+=n,n>>=1); // compute siz of all mipmas together
mmdat0[0]=new int[N+N]; // allocate space for all mipmas as single 1D array
mmdat1[0]=mmdat0[0]+N; // max will be at the other half
mmsiz [0]=size;
for (i=1,n=size;n;n>>=1,i++) // and just set pointers of sub mipmas
{
mmdat0[i]=mmdat0[i-1]+n; // to point at the the right place
mmdat1[i]=mmdat1[i-1]+n; // to point at the the right place
mmsiz [i]=mmsiz [i-1]>>1; // and set resolution as half
}
// copy first mipmap
n=size;
for (i=0;i<mmsiz[0];i++)
{
a=data[i];
mmdat0[0][i]=a;
mmdat1[0][i]=a;
}
// process all resolutions
for (k=1;mmsiz[k];k++)
{
// halve resolution
for (i=0,j=0;i<mmsiz[k];i++)
{
a=mmdat0[k-1][j]; a0=a;
a=mmdat1[k-1][j]; j++; a1=a;
a=mmdat0[k-1][j]; if (a0>a) a0=a;
a=mmdat1[k-1][j]; j++; if (a1<a) a1=a;
mmdat0[k][i]=a0;
mmdat1[k][i]=a1;
}
}
}
//---------------------------------------------------------------------------
void draw() // just render of my App
{
bmp->Canvas->Brush->Color=clWhite;
bmp->Canvas->FillRect(TRect(0,0,xs,ys));
int ix,x,y,y0=ys>>1;
// plot input data
bmp->Canvas->Pen->Color=clBlack;
x=0; y=y0-sample[x];
bmp->Canvas->MoveTo(x,y);
for (x=1;x<xs;x++)
{
y=y0-sample[x];
bmp->Canvas->LineTo(x,y);
}
// plot mipmap[ix] input data
ix=1;
bmp->Canvas->Pen->Color=clBlue;
x=0; y=y0-sample[x];
bmp->Canvas->MoveTo(x,y);
for (x=0;x<mmsiz[ix];x++)
{
y=y0-mmdat0[ix][x];
bmp->Canvas->LineTo(x,y);
y=y0-mmdat1[ix][x];
bmp->Canvas->LineTo(x,y);
}
Form1->Canvas->Draw(0,0,bmp);
// bmp->SaveToFile("out.bmp");
}
//---------------------------------------------------------------------------
__fastcall TForm1::TForm1(TComponent* Owner):TForm(Owner) // init of my app
{
// init backbuffer
bmp=new Graphics::TBitmap;
bmp->HandleType=bmDIB;
bmp->PixelFormat=pf32bit;
generate_input(sample,samples);
mipmap_compute(sample,samples);
}
//---------------------------------------------------------------------------
void __fastcall TForm1::FormDestroy(TObject *Sender) // not important just destructor of my App
{
mipmap_free();
delete bmp;
}
//---------------------------------------------------------------------------
void __fastcall TForm1::FormResize(TObject *Sender) // not important just resize event
{
xs=ClientWidth;
ys=ClientHeight;
bmp->Width=xs;
bmp->Height=ys;
draw();
}
//-------------------------------------------------------------------------
void __fastcall TForm1::FormPaint(TObject *Sender) // not important just repaint event
{
draw();
}
//---------------------------------------------------------------------------
Ignore the window VCL and rendering related stuff (I just wanted to pass whole source so you can see how it is used). The important is only the function mipmap_compute which converts your input data to 2 mipmaps. One is holding min values and the other max values.
The dynamic allocatins are not important the only important code chunk is marked with comment:
// process all resolutions
Where for each mipmap there is only single for loop without any expensive operations. If your platform is better with branchless code you can compute the min,max using in-build brunchless functions min,max. Something like:
// process all resolutions
for (k=1;mmsiz[k];k++)
{
// halve resolution
for (i=0,j=0;i<mmsiz[k];i++)
{
a=mmdat0[k-1][j]; a0=a;
a=mmdat1[k-1][j]; j++; a1=a;
a=mmdat0[k-1][j]; a0=min(a0,a);
a=mmdat1[k-1][j]; j++; a1=max(a1,a);
mmdat0[k][i]=a0;
mmdat1[k][i]=a1;
}
}
This can be further optimized simply by using pointer to actually selected mipmaps that will get rid of the [k] and [k-1] indexes allowing one less memory access per each element access.
// process all resolutions
for (k=1;mmsiz[k];k++)
{
// halve resolution
int *p0=mmdat0[k-1];
int *p1=mmdat1[k-1];
int *q0=mmdat0[k];
int *q1=mmdat1[k];
for (i=0,j=0;i<mmsiz[k];i++)
{
a=p0[j]; a0=a;
a=p1[j]; j++; a1=a;
a=p0[j]; a0=min(a0,a);
a=p1[j]; j++; a1=max(a1,a);
q0[i]=a0;
q1[i]=a1;
}
}
Now all you need is to bilinearly interpolate between 2 mipmaps to achieve your resolution, here small example for this:
// actually rescaled output
int out0[samples]; // min
int out1[samples]; // max
int outs=0; // size
void resize(int n) // compute out0[n],out1[n] from mipmaps
{
int i,*p0,*p1,*q0,*q1,pn,qn;
int pc,qc,pd,qd,pi,qi;
int a,a0,a1,b0,b1,bm,bd;
for (i=0;mmsiz[i]>=n;i++); // find smaller resolution
pn=mmsiz[i];
p0=mmdat0[i];
p1=mmdat1[i]; i--;
qn=mmsiz[i]; // bigger or equal resolution
q0=mmdat0[i];
q1=mmdat1[i]; outs=n;
pc=0; pi=0;
qc=0; qi=0;
bm=n-pn; bd=qn-pn;
for (i=0;i<n-1;i++)
{
// bilinear interpolation (3x linear)
a0=q0[qi];
a1=q0[qi+1];
b1=a0+(((a1-a0)*qc)/n);
a0=p0[pi];
a1=p0[pi+1];
b0=a0+(((a1-a0)*pc)/n);
out0[i]=b0+(((b1-b0)*bm)/bd); // /bd might be bitshift right by log2(bd)
// bilinear interpolation (3x linear)
a0=q1[qi];
a1=q1[qi+1];
b1=a0+(((a1-a0)*qc)/n);
a0=p1[pi];
a1=p1[pi+1];
b0=a0+(((a1-a0)*pc)/n);
out1[i]=b0+(((b1-b0)*bm)/bd); // /bd might be bitshift right by log2(bd)
// DDA increment indexes
pc+=pn; while (pc>=n){ pi++; pc-=n; } // pi = (i*pn)/n
qc+=qn; while (qc>=n){ qi++; qc-=n; } // qi = (i*qn)/n
}
out0[n-1]=q0[pn-1];
out1[n-1]=q1[pn-1];
}
Beware target size n must be less or equal to highest mipmap resolution...
This is how it looks (when I change the resolution manually with mouse wheel):
The choppyness is caused by GIF grabber ... the scaling is fast and seamless in real.
I had a similar problem, with 1.800.000 points of a waveform to draw on an 800 points screen. The zoom factor was 2000. If someone is interested, that's how I got awesome results :
Divide the very long list into 400 smaller lists
For each smaller list calculate the biggest difference, between the smaller and larger value in that list.
Plot 2 points per list, one at (offset + delta / 2) and one at (offset - delta / 2)
Results :
from 453932 points to 800 points
Python code :
numberOfSmallerList = 400
small_list_len = int(len(big_list) / numberOfSmallerList)
finalPointsToPlot = []
for i in range(0, len(big_list), small_list_len):
biggestDiff = max(big_list[i:i+small_list_len]) -
min(big_list[i:i+small_list_len])
finalPointsToPlot.append(biggestDiff/2 + 100)
finalPointsToPlot.append(100 - biggestDiff/2)
import matplotlib.pyplot as plt
plt.plot(finalPointsToPlot)
plt.show()
I'm trying to create a grid of an image (in the way one would tile a background with). Here's what I've been using:
PImage bgtile;
PGraphics bg;
int tilesize = 50;
void setup() {
int t = millis();
fullScreen(P2D);
background(0);
bgtile = loadImage("bgtile.png");
int bgw = ceil( ((float) width) / tilesize) + 1;
int bgh = ceil( ((float) height) / tilesize) + 1;
bg = createGraphics(bgw*tilesize,bgh*tilesize);
bg.beginDraw();
for(int i = 0; i < bgw; i++){
for(int j = 0; j < bgh; j++){
bg.image(bgtile, i*tilesize, j*tilesize, tilesize, tilesize);
}
}
bg.endDraw();
print(millis() - t);
}
The timing code says that this takes about a quarter of a second, but by my count there's a full second once the window opens before anything shows up on screen (which should happen as soon as draw is first run). Is there a faster way to get this same effect? (I want to avoid rendering bgtile hundreds of times in the draw loop for obvious reasons)
One way could be to make use of the GPU and let OpenGL repeat a texture for you.
Processing makes it fairly easy to repeat a texture via textureWrap(REPEAT)
Instead of drawing an image you'd make your own quad shape and instead of calling vertex(x, y) for example, you'd call vertex(x, y, u, v); passing texture coordinates (more low level info on the OpenGL link above). The simple idea is x,y would control the geometry on screen and u,v would control how the texture is applied to the geometry.
Another thing you can control is textureMode() which allows you control how you specify the texture coordinates (U, V):
IMAGE mode is the default: you use pixel coordinates (based on the dimensions of the texture)
NORMAL mode uses values between 0.0 and 1.0 (also known as normalised values) where 1.0 means the maximum the texture can go (e.g. image width for U or image height for V) and you don't need to worry about knowing the texture image dimensions
Here's a basic example based on the textureMode() example above:
PImage img;
void setup() {
fullScreen(P2D);
noStroke();
img = loadImage("https://processing.org/examples/moonwalk.jpg");
// texture mode can be IMAGE (pixel dimensions) or NORMAL (0.0 to 1.0)
// normal means 1.0 is full width (for U) or height (for V) without having to know the image resolution
textureMode(NORMAL);
// this is what will make handle tiling for you
textureWrap(REPEAT);
}
void draw() {
// drag mouse on X axis to change tiling
int tileRepeats = (int)map(constrain(mouseX,0,width), 0, width, 1, 100);
// draw a textured quad
beginShape(QUAD);
// set the texture
texture(img);
// x , y , U , V
vertex(0 , 0 , 0 , 0);
vertex(width, 0 , tileRepeats, 0);
vertex(width, height, tileRepeats, tileRepeats);
vertex(0 , height, 0 , tileRepeats);
endShape();
text((int)frameRate+"fps",15,15);
}
Drag the mouse on the Y axis to control the number of repetitions.
In this simple example both vertex coordinates and texture coordinates are going clockwise (top left, top right, bottom right, bottom left order).
There are probably other ways to achieve the same result: using a PShader comes to mind.
Your approach caching the tiles in setup is ok.
Even flattening your nested loop into a single loop at best may only shave a few milliseconds off, but nothing substantial.
If you tried to cache my snippet above it would make a minimal difference.
In this particular case, because of the back and forth between Java/OpenGL (via JOGL), as far as I can tell using VisualVM, it looks like there's not a lot of room for improvement since simply swapping buffers takes so long (e.g. bg.image()):
An easy way to do this would be to use processing's built in get(); which saves a PImage of the coordinates you pass, for example: PImage pic = get(0, 0, width, height); will capture a "screenshot" of your entire window. So, you can create the image like you already are, and then take a screenshot and display that screenshot.
PImage bgtile;
PGraphics bg;
PImage screenGrab;
int tilesize = 50;
void setup() {
fullScreen(P2D);
background(0);
bgtile = loadImage("bgtile.png");
int bgw = ceil(((float) width) / tilesize) + 1;
int bgh = ceil(((float) height) / tilesize) + 1;
bg = createGraphics(bgw * tilesize, bgh * tilesize);
bg.beginDraw();
for (int i = 0; i < bgw; i++) {
for (int j = 0; j < bgh; j++) {
bg.image(bgtile, i * tilesize, j * tilesize, tilesize, tilesize);
}
}
bg.endDraw();
screenGrab = get(0, 0, width, height);
}
void draw() {
image(screenGrab, 0, 0);
}
This will still take a little bit to generate the image, but once it does, there is no need to use the for loops again unless you change the tilesize.
#George Profenza's answer looks more efficient than my solution, but mine may take a little less modification to the code you already have.
In Processing, I keep getting 0 when I multiply width or height by a number. For example:
int x = width*2;
I get x = 0. Why?
When you define a size in processing, width and height are automatically assigned (example size(500,500)). width and height are 500 in this case.
Note that width and height only have values after you call the size() function. (Or if you don't call the size() function, after setup() is called.)
That can be a problem if you do stuff before calling the size() function, such as at the beginning of your sketch:
//this happens before the size() function is called!
int x = width*2;
void setup(){
size(500, 500);
}
The solution is to put the declaration of sketch-level variables at the top of your sketch, but only do their initialization after calling the size() function:
int x;
void setup(){
size(500, 500);
x = width*2;
}
This could also be caused if you try to define your own width and height variables:
//don't do this!
int width;
int height;
void setup(){
size(500, 500);
println(width);
}
Don't do this! Just use the width and height variables that Processing has already declared for you.
More generally, as you've seen, questions tend to be closed pretty quickly if people can't reproduce your problem. Although you did include the fact that you're using Processing, you'll have better luck in the future if you include an MCVE that we can run to see the problem ourselves. The code I've posted in this answer is a good example of the type of thing you should include in your questions in the future, that way they don't get closed.
I'd like to write an image from a kernel to a specified place in device memory which I define by an IntPtr.
Although it's not directly related to this problem, it's the RenderTexture from Unity which I want to change from inside the kernel in order to pass it to a shader which will visualize my algorithm on the GPU.
So far I tried this:
CLkernel.SetArgument (0, (IntPtr)(width*height*4*sizeof(float)), pointerToRT);
Which threw InvalidArgumentSize as I cannot specify it as an image, and this:
renderTexture = new ComputeImage2D (CLcontext, ComputeMemoryFlags.WriteOnly | ComputeMemoryFlags.UseHostPointer,
new ComputeImageFormat (ComputeImageChannelOrder.Rgba, ComputeImageChannelType.Float),
width, height, 0, pointerToRT);
CLkernel.SetMemoryArgument (0, renderTexture);
which resulted in an InvalidHostPointer Exception, as the pointer points to a place already in the device memory.
This is the Kernel code:
kernel void WriteToRenderTexture (
write_only image2d_t bdsRT,
global float* terrainHeight )
{
int width = get_global_size (0);
int x = get_global_id (0);
int y = get_global_id (1);
int pixel = x + y * width;
int2 coords = { x, y };
float4 value = { 0, terrainHeight [pixel * 3], 0.5, 0 };
write_imagef (bdsRT, coords, value);
}
Any ideas how I'm able to do so with Cloo?
I'm trying use Qt framework(4.7.4) to demonstrate a sliding display in which new pixel data is added to first row of the screen and previous pixels are scrolled one pixel below in every refresh.
It is refreshed 20 times per second and in every refresh, random green points (pixels) are drawn on black background.
The problem is; there is highly noticeable flickers in every refresh. I have researched through the web and optimized my code as much as possible. I tried to use raster rendering with both QPainter (on QWidget) and QGraphicsScene(on QGraphicsView) and even I tried to use OpenGL rendering on QGLWidget. However, at the end I have still the same flicker problem.
What may cause this flickering? I begin to suspect that my LCD monitor can not refresh the display for black to green transitions. I have also noticed that if I select a gray background instead of black, there happens no flicker.
The effect you're seeing is purely psychovisual. It's a human defect, not a software defect. I'm serious. You can verify by fixing the value of x - you'll still be repainting the entire pixmap on the window, there won't be any flicker - because there is no flicker per se.
The psychovisual flicker occurs when the scroll rate is not tied to the passage of real time. When occasionally the time between updates varies due to CPU load, or due to system timer inaccuracies, our visual system integrates two images and it appears as if the overall brightness is changed.
You've correctly noticed that the perceived flicker is reduced as you reduce the contrast ratio of the image by setting the background to grey. This is an additional clue that the effect is psychovisual.
Below is a way of preventing this effect. Notice how the scroll distance is tied to the time (here: 1ms = 1pixel).
#include <QElapsedTimer>
#include <QPaintEvent>
#include <QBasicTimer>
#include <QApplication>
#include <QPainter>
#include <QPixmap>
#include <QWidget>
#include <QDebug>
static inline int rand(int range) { return (double(qrand()) * range) / RAND_MAX; }
class Widget : public QWidget
{
float fps;
qint64 lastTime;
QPixmap pixmap;
QBasicTimer timer;
QElapsedTimer elapsed;
void timerEvent(QTimerEvent * ev) {
if (ev->timerId() == timer.timerId()) update();
}
void paintEvent(QPaintEvent * ev) {
qint64 time = elapsed.elapsed();
qint64 delta = time - lastTime;
lastTime = time;
if (delta > 0) {
const float weight(0.05);
fps = (1.0-weight)*fps + weight*(1E3/delta);
if (pixmap.size() != size()) {
pixmap = QPixmap(size());
pixmap.fill(Qt::black);
}
int dy = qMin((int)delta, pixmap.height());
pixmap.scroll(0, dy, pixmap.rect());
QPainter pp(&pixmap);
pp.fillRect(0, 0, pixmap.width(), dy, Qt::black);
for(int i = 0; i < 30; ++i){
int x = rand(pixmap.width());
pp.fillRect(x, 0, 3, dy, Qt::green);
}
}
QPainter p(this);
p.drawPixmap(ev->rect(), pixmap, ev->rect());
p.setPen(Qt::yellow);
p.fillRect(0, 0, 100, 50, Qt::black);
p.drawText(rect(), QString("FPS: %1").arg(fps, 0, 'f', 0));
}
public:
explicit Widget(QWidget *parent = 0) : QWidget(parent), fps(0), lastTime(0), pixmap(size())
{
timer.start(1000/60, this);
elapsed.start();
setAttribute(Qt::WA_OpaquePaintEvent);
}
};
int main(int argc, char *argv[])
{
QApplication a(argc, argv);
Widget w;
w.show();
return a.exec();
}
I'd recommend you do not scroll the pixmap in-place, but create a second pixmap and use drawPixmap() to copy everything but one line from pixmap 1 to pixmap 2 (with the scroll offset). Then continue painting on pixmap 2. After the frame, exchange the references to both pixmaps, and start over.
The rationale is that copying from one memory area to a different one can be optimised more easily than modifying one memory area in-place.