I am trying to compile this c++ application
int main ()
{
Mat frame;
Mat back;
Mat fore;
VideoCapture cap1;
cap1.open(0); /*to capture from camera*/
cv::Ptr<BackgroundSubtractorMOG> pMOG = createBackgroundSubtractorMOG();
cv::Ptr<BackgroundSubtractorMOG2> pMOG2 = createBackgroundSubtractorMOG2();
pMOG2->setNMixtures(10);
vector < vector < Point > >contours;
namedWindow ("Frame");
int i=0;
for (;;)
{
cap1 >> frame;
pMOG2->operator()(frame, fore);
pMOG2->getBackgroundImage (back);
erode (fore, fore, cv::Mat ());
erode (fore, fore, cv::Mat ());
dilate (fore, fore, cv::Mat ());
dilate (fore, fore, cv::Mat ());
dilate (fore, fore, cv::Mat ());
findContours (fore, contours, CV_RETR_EXTERNAL,CV_CHAIN_APPROX_NONE);
drawContours (frame, contours, -1, Scalar (255, 255, 255), 1);
Scalar color = Scalar(200,200,200);
int a=0;
vector<Rect> boundRect( contours.size() );
for( int i = 0; i < contours.size(); i++ )
{
boundRect[i] = boundingRect( contours[i] );
}
for( i = 0; i< contours.size(); i++ )
{
if(boundRect[i].width>=40 || boundRect[i].height>=40)//eliminates small boxes
{
a=a+(boundRect[i].height)*(boundRect[i].width);
}
// cout<<"Net contour area is "<<a<<"\n";
if(a>=int(frame.rows)*int(frame.cols)/2)//change denominator as per convenience
{
putText(frame,"Tampering",Point(5,30),FONT_HERSHEY_SIMPLEX,1,Scalar(0,255,255),2);
cout<<"\a";
}
}
imshow ("Frame", frame);
waitKey(10);
}
I am getting the following error while compiling
error:
Tampering.cpp:27:21: error: 'class cv::BackgroundSubtractorMOG2' has
no member named 'operator()'
pMOG2->operator()(frame, fore);
my opencv version-3.3.0
Thanks in advance
Firstly, thanks for subscribing to my repository: https://github.com/SaranshKejriwal/Tampering-Detection/blob/master/Tampering%20main/Tampering.cpp
In my case, I had used openCV 2.4.5, which has the operator() method within the BackgroundSubtractorMOG2 class.
Since you are using openCV 3.3.0, I encourage you to have a look at the documentation, to verify that this method still exists
Related
I used DrawIconEx (GDI/D3D11 interoperability and CopyResource) to generate an ID3D11Texture2D which has many pixels with an alpha channel value of 0. this texture has been verified by D3D11_USAGE_STAGING/Map to view the pixel value and ScreenGrab save png (relevant code needs to be modified: DXGI_FORMAT_B8G8R8A8_UNORM->Use GUID_WICPixelFormat32bppBGRA instead of GUID_WICPixelFormat24bppBGR).
When I use the rendering texture method of Tutorial 5: Texturing, the alpha value of 0 pixels will be rendered as black, which is not what i want, I hope these pixels render to be transparent. What will be done to achieve the goal? Here is my relevant code:
HRESULT CGraphRender::Init()
{
...
// Create an alpha enabled blend state description.
_blend_state = nullptr;
D3D11_BLEND_DESC blendDesc;
ZeroMemory(&blendDesc, sizeof(D3D11_BLEND_DESC));
blendDesc.RenderTarget[0].BlendEnable = TRUE;
blendDesc.RenderTarget[0].SrcBlend = D3D11_BLEND_SRC_ALPHA;
blendDesc.RenderTarget[0].DestBlend = D3D11_BLEND_INV_SRC_ALPHA;
blendDesc.RenderTarget[0].BlendOp = D3D11_BLEND_OP_ADD;
blendDesc.RenderTarget[0].SrcBlendAlpha = D3D11_BLEND_ONE;
blendDesc.RenderTarget[0].DestBlendAlpha = D3D11_BLEND_ZERO;
blendDesc.RenderTarget[0].BlendOpAlpha = D3D11_BLEND_OP_ADD;
blendDesc.RenderTarget[0].RenderTargetWriteMask = D3D11_COLOR_WRITE_ENABLE_ALL;
hr = _d3d_device->CreateBlendState(&blendDesc, &_blend_state);
RETURN_ON_FAIL(hr);
....
}
HRESULT CGraphRender::Clear_3D(float color[])
{
ID3D11RenderTargetView* rtv[] = { _back_rendertarget_view };
_immediate_context->OMSetRenderTargets(_countof(rtv), rtv, nullptr);
_immediate_context->ClearRenderTargetView(_back_rendertarget_view, color);
float blendFactor[4] = { 1.f, 1.f, 1.f, 1.f };
_immediate_context->OMSetBlendState(_blend_state, blendFactor, 0xffffffff);
return S_OK;
}
The problem has been solved: Perform the OMGetBlendState(_blend_state... setting before rendering the "alpha" texture, and restore the default blendstate after rendered
HRESULT CGraphRender::DrawTexture(const std::shared_ptr<CDrawTextureShader>& texture, const RECT& dst_rect, const BOOL& is_blend_alpha)
{
CComPtr<ID3D11DeviceContext> immediate_context;
_d3d_device->GetImmediateContext(&immediate_context);
if (!immediate_context)
{
return E_UNEXPECTED;
}
if (is_blend_alpha)
{
CComPtr<ID3D11BlendState> old_blend_state;
FLOAT old_blend_factor[4] = { 0.f };
UINT old_sample_mask = 0;
immediate_context->OMGetBlendState(&old_blend_state, old_blend_factor, &old_sample_mask);
float blend_factor[4] = { 1.f, 1.f, 1.f, 1.f };
immediate_context->OMSetBlendState(_blend_state, blend_factor, 0xffffffff);
HRESULT hr = texture->Render(immediate_context, dst_rect);
immediate_context->OMSetBlendState(old_blend_state, old_blend_factor, old_sample_mask);
return hr;
}
else
{
return texture->Render(immediate_context, dst_rect);
}
}
I can fill color by this code
void FillColorPolygon(POINT pts[],int ilnum,long fillColor)
{
COLORREF fillcol;
fillcol = Gc_disp::ColorSet(fillColor);
HBRUSH hBrushNew = CreateSolidBrush(fillcol);
HBRUSH hBrushOld = (HBRUSH)SelectObject(m_hDC, hBrushNew);
SetPolyFillMode(m_hDC, WINDING);
Polygon(m_hDC, pts, (short)ilnum);
SelectObject(m_hDC, hBrushOld);
DeleteObject(hBrushNew);
}
ColorSet function is fill by 100% opaque
COLORREF Gc_disp::ColorSet(long col)
{
COLORREF rcol = RGB(0, 0, 0);
if( col >= 0 && col <= GRIP_MAXCOLORS + 1 )
rcol = g_tblColor[ col ];
return( rcol );
}
But I don't know how to fill color with 50% opeque?
Edit:
After following #Jonathan's advise. I try to use AlphaBlend function.
First I try to make some bitmap turn to be transparent, It seems work.
void FillColorPolygonAlpha(POINT pts[],int ilnum,long fillColor)
{
BLENDFUNCTION m_bf;
m_bf.BlendOp = AC_SRC_OVER;
m_bf.BlendFlags = 0;
m_bf.SourceConstantAlpha = 0xC8;
m_bf.AlphaFormat = 0;
CBitmap m_bitmap;
CImage image;
image.Load(_T("C:\\Blas_grande.png"));
CBitmap bitmap;
m_bitmap.Attach(image.Detach());
int m_nWidth, m_nHeight;
BITMAP aBmp;
m_bitmap.GetBitmap(&aBmp);
m_nWidth = aBmp.bmWidth ;
m_nHeight = aBmp.bmHeight;
CDC* pDC = CDC::FromHandle( GetDC() );
CDC dcMem;
dcMem.CreateCompatibleDC(pDC);
CBitmap *pOldBitmap = dcMem.SelectObject(&m_bitmap);
AlphaBlend(m_hDC, 0,0, m_nWidth, m_nHeight, dcMem, 0,0,m_nWidth, m_nHeight,m_bf);
dcMem.SelectObject(pOldBitmap);
}
The image was drawn with transparent like below.
But it isn't work when I try to fill polygon.
I modify FillColorPolygonAlpha() function by changed to fill a polygon.
void FillColorPolygonAlpha(POINT pts[],int ilnum,long fillColor)
{
BLENDFUNCTION m_bf;
m_bf.BlendOp = AC_SRC_OVER;
m_bf.BlendFlags = 0;
m_bf.SourceConstantAlpha = 0xC8;
m_bf.AlphaFormat = 0;
CDC* pDC = CDC::FromHandle( GetDC() );
CDC dcMem;
dcMem.CreateCompatibleDC(pDC);
COLORREF fillcol;
fillcol = Gc_disp::ColorSet(fillColor);
HBRUSH hBrushNew = CreateSolidBrush(fillcol);
Polygon(dcMem, pts, (short)ilnum);
AlphaBlend(m_hDC, 0,0, m_nWidth, m_nHeight, dcMem, 0,0,m_nWidth, m_nHeight,m_bf);
}
It don't draw any polygon.
I solved this problem by using GDI+.
Because of SolidBrush can set a transparent by alpha value.
void FillColorPolygonAlpha(POINT pts[],int ilnum,long fillColor,int alpha)
{
COLORREF rcol;
rcol = Gc_disp::ColorSet(fillColor);
Gdiplus::Graphics gr(m_hDC);
Gdiplus::SolidBrush semiTransBrush(Gdiplus::Color(alpha, GetRValue(rcol), GetGValue(rcol), GetBValue(rcol) ));
CArray<Gdiplus::Point,Gdiplus::Point> arrPoints;
for(int i=0; i<sizeof(pts); i++)
{
Gdiplus::Point pt(pts[i].x , pts[i].y);
arrPoints.Add(pt);
}
Gdiplus::Point* pPoints = arrPoints.GetData();
gr.FillPolygon(&semiTransBrush,pPoints,ilnum);
}
I am trying to generate a binary image from the depthMap()-function in OpenNI, which provides an array of type int. With that image I want to do blob-Tracking.
Problem is that I am not able to generate a clear binary image from the depthMap. In my understanding the depth image generates a bright pixel for everything that is closer to the sensor and the farer away from the sensor the darker they get. So I ask every Pixel in the (one-dimensional) Array if it is over my min and under my max-Threshold to make up a range from that I want the get the data.
Here is my code:
// import library
import SimpleOpenNI.*;
import processing.opengl.*; // opengl
import blobDetection.*; // blobs
// declare SimpleOpenNI object
SimpleOpenNI context;
BlobDetection theBlobDetection;
BlobBall blobBalls;
PrintWriter output;
// threshold for binaryImage
int minThreshold, maxThreshold;
// Size of the kinect Image
int kinectWidth = 640;
int kinectHeight = 480;
//
float globalX, globalY;
// Colors
color bgColor = color(0, 0, 123);
color white = color(255,255,255);
color black = color(0,0,0);
// PImage to hold incoming imagery
int[] distanceArray;
PImage cam, forBlobDetect;
void setup() {
output = createWriter("positions.txt");
// init threshold
minThreshold = 960;
maxThreshold = 2500;
// same as Kinect dimensions
size(kinectWidth, kinectHeight);
background(bgColor);
// initialize SimpleOpenNI object
context = new SimpleOpenNI(this);
if (context.isInit() == false) {
println("Can't init SimpleOpenNI, maybe the camera is not connected!");
exit();
}
else {
// mirror the image to be more intuitive
context.setMirror(true);
context.enableDepth();
// context.enableScene();
distanceArray = context.depthMap();
forBlobDetect = new PImage(width, height);
theBlobDetection = new BlobDetection(forBlobDetect.width, forBlobDetect.height);
theBlobDetection.setThreshold(0.2);
}
}
void draw() {
noStroke();
// update the SimpleOpenNI object
context.update();
// put the image into a PImage
cam = context.depthImage();
// copy the image into the smaller blob image
// forBlobDetect.copy(cam, 0, 0, cam.width, cam.height, 0, 0, forBlobDetect.width, forBlobDetect.height);
// blur the blob image
forBlobDetect.filter(BLUR, 2);
//
int pos = 0;
int currentDepthValue = 0;
distanceArray = context.depthMap();
for(int x = 0; x < cam.width; x++) {
for(int y = 0; y < cam.height; y++) {
pos = y*cam.width+x;
currentDepthValue = distanceArray[pos];
// println(currentDepthValue);
if((currentDepthValue > minThreshold) && (currentDepthValue < maxThreshold)) {
forBlobDetect.pixels[pos] = black;
} else {
forBlobDetect.pixels[pos] = white;
}
}
}
// for(int i=0; i < distanceArray.length; i++) {
// currentDepthValue = distanceArray[i];
// // println(currentDepthValue);
// if(currentDepthValue > minThreshold) /*&& (currentDepthValue < maxThreshold)*/) {
// forBlobDetect.pixels[pos] = white;
// } else {
// forBlobDetect.pixels[pos] = black;
// }
// }
// detect the blobs
theBlobDetection.computeBlobs(forBlobDetect.pixels);
// display the image
image(cam, 0, 0);
image(forBlobDetect, 0, 0, width/2, height/2);
// image(context.sceneImage(), context.depthWidth(), 0);
}
Really stupid mistake by myself because I missunderstood the 11-bit Array.
Thanks to the "Making things see" examples I solved it.
https://github.com/atduskgreg/Making-Things-See-Examples/tree/master/ax02_depth_range_limit
I'm having a ton of trouble making a simple video delay in processing. I looked around on the internet and I keep finding the same bit of code and I can't get it to work at all. When I first tried it, it did nothing (at all). Here's my modified version (which at least seems to load frames into the buffer), I really have no idea why it doesn't work and I'm getting really tired of pulling out my hair. Please... please, for the love of god, please somebody point out the stupid mistake I'm making here.
And now, without further delay (hah, get it?), the code:
import processing.video.*;
VideoBuffer vb;
Movie myMovie;
Capture cam;
float seconds = 1;
void setup() {
size(320,240, P3D);
frameRate(30);
String[] cameras = Capture.list();
if (cameras.length == 0) {
println("There are no cameras available for capture.");
exit();
} else {
println("Available cameras:");
for (int i = 0; i < cameras.length; i++) {
println(cameras[i]);
}
cam = new Capture(this, cameras[3]);
cam.start();
}
vb = new VideoBuffer(90, width, height);
}
void draw() {
if (cam.available() == true) {
cam.read();
vb.addFrame(cam);
}
image(cam, 0, 0);
image( vb.getFrame(), 150, 0 );
}
class VideoBuffer
{
PImage[] buffer;
int inputFrame = 0;
int outputFrame = 0;
int frameWidth = 0;
int frameHeight = 0;
VideoBuffer( int frames, int vWidth, int vHeight )
{
buffer = new PImage[frames];
for(int i = 0; i < frames; i++)
{
this.buffer[i] = new PImage(vWidth, vHeight);
}
this.inputFrame = 0;
this.outputFrame = 1;
this.frameWidth = vWidth;
this.frameHeight = vHeight;
}
// return the current "playback" frame.
PImage getFrame()
{
return this.buffer[this.outputFrame];
}
// Add a new frame to the buffer.
void addFrame( PImage frame )
{
// copy the new frame into the buffer.
this.buffer[this.inputFrame] = frame;
// advance the input and output indexes
this.inputFrame++;
this.outputFrame++;
println(this.inputFrame + " " + this.outputFrame);
// wrap the values..
if(this.inputFrame >= this.buffer.length)
{
this.inputFrame = 0;
}
if(this.outputFrame >= this.buffer.length)
{
this.outputFrame = 0;
}
}
}
This works in Processing 2.0.1.
import processing.video.*;
Capture cam;
PImage[] buffer;
int w = 640;
int h = 360;
int nFrames = 60;
int iWrite = 0, iRead = 1;
void setup(){
size(w, h);
cam = new Capture(this, w, h);
cam.start();
buffer = new PImage[nFrames];
}
void draw() {
if(cam.available()) {
cam.read();
buffer[iWrite] = cam.get();
if(buffer[iRead] != null){
image(buffer[iRead], 0, 0);
}
iWrite++;
iRead++;
if(iRead >= nFrames-1){
iRead = 0;
}
if(iWrite >= nFrames-1){
iWrite = 0;
}
}
}
There is a problem inside your addFrame-Method. You just store a reference to the PImage object, whose pixels get overwritten all the time. You have to use buffer[inputFrame] = frame.get() instead of buffer[inputFrame] = frame. The get() method returns a copy of the image.
I can't get around a peculiar problem with SimpleOpenNI for Processing ao I'm asking for your help.
I'd like to store snapshots of pixel depth data (returned by .depthMapRealWorld() method as PVector arrays) on discrete time intervals, then process them further for a presentation. I tried adding them in an ArrayList, but it seems that the depthMapRealWorld() method is returning only a reference to a current depth data, not a real array. I tried in this sequence:
Just getting the data and adding it in an arraylist. On every call of the update() method the whole arraylist contained the same PVector array, even if the array at the zero position was added many iterations away!
Then I made the PVector array, along with its creation time, part of a class. Rewrote the sketch a little, but it didn't help. All of the arrays in the arraylist werw still the same.
Finally, in the constructor of the class, I "manually" copied the xyz coordinates of every vector from the PVector array into a int array. That seemed to solve the problem - the int arrays in the arraylist are now different from each other. But this solution introduced serious performance problems.
The question is: is there a more efficient way of storing these PVector arrays and retaining their value?
code:
import processing.opengl.*;
import SimpleOpenNI.*;
SimpleOpenNI kinect;
float rotation = 0;
int time = 0;
ArrayList dissolver;
ArrayList<Integer> timer;
int pSize = 10;
Past past;
void setup() {
dissolver = new ArrayList();
timer = new ArrayList();
size(1024, 768, OPENGL);
kinect = new SimpleOpenNI(this);
kinect.enableDepth();
translate(width/2, height/2, -100);
rotateX(radians(180));
stroke(255);
}
void draw() {
background(0);
translate(width/2, height/2, 500);
rotateX(radians(180));
kinect.update();
stroke (255, 255, 255);
past = new Past (kinect.depthMapRealWorld(), time);
if (dissolver.size() == pSize) { //remove the oldest arraylist element if when list gets full
dissolver.remove(0); //
}
if (time % 20 == 0) {
dissolver.add (past);
Past p1 = (Past) dissolver.get (0);
float [][] o2 = p1.getVector();
println ("x coord of a random point at arraylist position 0: " + o2[50000][0]); //for testing
}
if (dissolver.size() == pSize-1) {
//dissolve ();
}
time ++;
}
void dissolve () { //from the previous nonworking version; ignore
for (int offset = 0; offset < pSize-1; offset ++) {
PVector[] offPoints = (PVector[]) dissolver.get (offset);
int offTime = timer.get(offset);
for (int i = 0; i < offPoints.length; i+=10) {
int col = (time-offTime)*2; //why??
stroke (255, 0, col);
PVector currentPoint = offPoints[i];
if (currentPoint.z <1500) {
point(currentPoint.x, currentPoint.y, currentPoint.z); // - 2*(time-offTime) + random(0, 100)
}
}
}
}
class Past {
private PVector [] depth; //should contain this, not int
private float [][] depth1;
private int time;
Past (PVector [] now, int t) {
//should be like this: depth = now;
//clumsy and performancewise catastrophic solution below
depth1 = new float [now.length][3];
for (int i = 0; i< now.length; i+=10) {
PVector temp = now[i];
depth1 [i][0] = temp.x;
depth1 [i][1] = temp.y;
depth1 [i][2] = temp.z;
}
//arrayCopy(now, depth); this didn't work either
time = t;
}
float [][] getVector () {
return depth1;
}
int getTime () {
return time;
}
}
If I understood correctly, you want to store the 3D positions(ArrayList of PVectors) for each frame, right ?
If so, you should be able to simply store PVectors and reference them later.
Here's a basic sketch to illustrate this:
import processing.opengl.*;
import SimpleOpenNI.*;
SimpleOpenNI kinect;
ArrayList<ArrayList<PVector>> frames = new ArrayList<ArrayList<PVector>>();
ArrayList<PVector> frame;
boolean isRecording = true;
boolean isRecFrame;
void setup() {
size(1024, 768, OPENGL);
kinect = new SimpleOpenNI(this);
kinect.enableDepth();
stroke(255);
}
void draw() {
background(0);
translate(width/2, height/2, 500);
rotateX(PI);
translate(0,0,-1000);
kinect.update();
if(isRecording){
isRecFrame = (frameCount % 20 == 0);//record every 20 frames
int[] depthMap = kinect.depthMap();
int steps = 5; // to speed up the drawing, draw every N point
int index;
PVector realWorldPoint;
if(isRecFrame) frame = new ArrayList<PVector>();
for(int y=0;y < kinect.depthHeight();y+=steps)
{
for(int x=0;x < kinect.depthWidth();x+=steps)
{
index = x + y * kinect.depthWidth();
if(depthMap[index] > 0)
{
realWorldPoint = kinect.depthMapRealWorld()[index];
point(realWorldPoint.x,realWorldPoint.y,realWorldPoint.z);
if(isRecFrame) frame.add(realWorldPoint.get());
}
}
}
if(isRecFrame) frames.add(frame);
}else{//playback
ArrayList<PVector> currentFrame = frames.get(frameCount%frames.size());//playback is faster than recording now for testing purposes - add a decent frame counter here at some point
for(PVector p : currentFrame) point(p.x,p.y,p.z);
}
}
void keyPressed(){
if(key == ' ') isRecording = !isRecording;
}
Use the SPACE key to toggle between recording and playback.
The main thing to note is I'm storing a copy of the real world position for each depth pixel (frame.add(realWorldPoint.get());). Another thing to keep in mind is that currently you're storing these coordinates in memory which at some point will fill. If you only store a limited number of frames that should be fine, if not you might want to save to the points to disk. This way you can reuse recordings with other sketches. A basic way would be to sore them in a csv file:
void saveCSV(ArrayList<PVector> pts){
String csv = "x,y,z\n";
for(PVector p : pts) csv += p.x + "," + p.y + "," + p.z + "\n";
saveStrings("frame_"+frameCount+".csv",csv.split("\n"));
}
Another would be to use a more suitable format for point clouds, like PLY.
Saving an ASCII PLY is fairly straight forward:
void savePLY(ArrayList<PVector> pts){
String ply = "ply\n";
ply += "format ascii 1.0\n";
ply += "element vertex " + pts.size() + "\n";
ply += "property float x\n";
ply += "property float y\n";
ply += "property float z\n";
ply += "end_header\n";
for(PVector p : pts)ply += p.x + " " + p.y + " " + p.z + "\n";
saveStrings("frame_"+frameCount+".ply",ply.split("\n"));
}
You can later open/explore/process these files with tools like MeshLab.