how to create a masked image in opencv - image

Here is my code (working after taking inputs from zindarod)
#include <stdio.h>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
using namespace cv;
static void help()
{
printf("\nThis program demonstrates using features2d detector, descriptor extractor and simple matcher\n"
"Using the sift desriptor:\n"
"\n"
"Usage:\n matcher_simple <image1> <image2>\n");
}
int main(int argc, char** argv)
{
if(argc != 3)
{
help();
return -1;
}
Mat img1 = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
Mat img2 = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE);
Rect regionone(151, 115, 42, 27);
Rect regiontwo(141, 105, 52, 37);
Mat dst,mask;
Rect rect(151, 115, 42, 27);
mask = Mat::zeros(img1.size(),CV_8UC1);
mask(Rect(151,115,42,27)) = 1;
img1.copyTo(dst,mask);
if(img1.empty() || img2.empty())
{
printf("Can't read one of the images\n");
return -1;
}
// detecting keypoints
SiftFeatureDetector detector(400);
vector<KeyPoint> keypoints1, keypoints2;
detector.detect(dst, keypoints1);
detector.detect(img2, keypoints2);
// computing descriptors
SiftDescriptorExtractor extractor;
Mat descriptors1, descriptors2;
extractor.compute(dst, keypoints1, descriptors1);
extractor.compute(img2, keypoints2, descriptors2);
// matching descriptors
BFMatcher matcher(NORM_L2);
vector<DMatch> matches;
matcher.match(descriptors1, descriptors2, matches);
// drawing the results
namedWindow("matches", 1);
Mat img_matches;
drawMatches(dst, keypoints1, img2, keypoints2, matches, img_matches);
imshow("masked image",dst);
//imshow("matches", img_matches);
waitKey(0);
return 0;
}
My aim is to compare two different parts of two different images .
You can run above code after using
g++ above_code.cpp -o bincode -I /usr/include/ `pkg-config --libs --cflags opencv`
./bincode image1.png image2.png
It seems that I am passing a rectangular region to keypoint detector as a result , the keypoints1 are saved with coordinates relative to 151,115 .
So , I should pass a masked image to keypoint detector .
How can I create a matrix filled with zeroes (or 255) but with rectangular region at 151,115 copied from img1 ?
thanks.

The following copies source image to destination image based on mask.
Mat src = imread("source.jpg",-1),dst,mask;
Rect rect(151, 115, 42, 27);
mask = Mat::zeros(src.Size(),CV_8UC1);
rectangle(mask, Point(rect.x,rect.y),Point(rect.x+rect.width,rect.y+rect.height),Scalar(255),-1);
src.copyTo(dst,mask);
Although there's a better way for your problem, you can translate your keypoints to the size of the original image.

Related

How to deblur image using fourier transform in open-cv or emgu-cv?

i saw this video about debluring images using fourier transform in matlab
video
and i want to convert the code in emgu cv
my code in emgucv :
string path = Environment.GetFolderPath(Environment.SpecialFolder.Desktop);
Image<Bgr, byte> img = new Image<Bgr, byte>(#"lal.png");
//blur the image
Image<Gray, byte> gray = img.Convert<Gray, byte>().SmoothBlur(31,31);
//convert image to float and get the fourier transform
Mat g_fl = gray.Convert<Gray, float>().Mat;
Matrix<float> dft_image = new Matrix<float>(g_fl.Size);
CvInvoke.Dft(g_fl, dft_image, Emgu.CV.CvEnum.DxtType.Forward, 0);
//here i make an image of kernel with size of the original
Image<Gray, float> ker = new Image<Gray, float>(img.Size);
ker.SetZero();
for (int x = 0; x < 31; x++)
{
for (int y = 0; y < 31; y++)
{
//31 * 31= 961
ker[y, x] = new Gray(1/961);
}
}
//get the fourier of the kernel
Matrix<float> dft_blur = new Matrix<float>(g_fl.Size);
CvInvoke.Dft(ker, dft_blur, Emgu.CV.CvEnum.DxtType.Forward, 0);
// fouier image / fourier blur
Matrix<float> res = new Matrix<float>(g_fl.Size);
for (int x=0;x<g_fl.Cols;x++)
{
for (int y = 0; y < g_fl.Rows; y++)
{
res[y, x] = dft_image[y, x] / dft_blur[y, x];
}
}
//get the inverse of fourier
Image<Gray, float> ready = new Image<Gray, float>(g_fl.Size);
CvInvoke.Dft(res, ready, Emgu.CV.CvEnum.DxtType.Inverse, 0);
CvInvoke.Imshow("deblur", ready.Convert<Gray,byte>());
CvInvoke.Imshow("original", gray);
CvInvoke.WaitKey(0);
but the result is black and not working , where is the mistake in my code
if you have a code in opencv python you can post it :)??
Thanks :)
My old implementation of wiener filter:
#include "stdafx.h"
#pragma once
#pragma comment(lib, "opencv_legacy220.lib")
#pragma comment(lib, "opencv_core220.lib")
#pragma comment(lib, "opencv_highgui220.lib")
#pragma comment(lib, "opencv_imgproc220.lib")
#include "c:\Users\Andrey\Documents\opencv\include\opencv\cv.h"
#include "c:\Users\Andrey\Documents\opencv\include\opencv\cxcore.h"
#include "c:\Users\Andrey\Documents\opencv\include\opencv\highgui.h"
#include <string>
#include <iostream>
#include <complex>
using namespace std;
using namespace cv;
//----------------------------------------------------------
// Compute real and implicit parts of FFT for given image
//----------------------------------------------------------
void ForwardFFT(Mat &Src, Mat *FImg)
{
int M = getOptimalDFTSize( Src.rows );
int N = getOptimalDFTSize( Src.cols );
Mat padded;
copyMakeBorder(Src, padded, 0, M - Src.rows, 0, N - Src.cols, BORDER_CONSTANT, Scalar::all(0));
// Create complex representation of our image
// planes[0] Real part, planes[1] Implicit part (zeros)
Mat planes[] = {Mat_<double>(padded), Mat::zeros(padded.size(), CV_64F)};
Mat complexImg;
merge(planes, 2, complexImg);
dft(complexImg, complexImg);
// As result, we also have Re and Im planes
split(complexImg, planes);
// Crop specter, if it have odd number of rows or cols
planes[0] = planes[0](Rect(0, 0, planes[0].cols & -2, planes[0].rows & -2));
planes[1] = planes[1](Rect(0, 0, planes[1].cols & -2, planes[1].rows & -2));
FImg[0]=planes[0].clone();
FImg[1]=planes[1].clone();
}
//----------------------------------------------------------
// Restore our image using specter
//----------------------------------------------------------
void InverseFFT(Mat *FImg,Mat &Dst)
{
Mat complexImg;
merge(FImg, 2, complexImg);
// Apply inverse FFT
idft(complexImg, complexImg);
split(complexImg, FImg);
Dst=FImg[0];
}
//----------------------------------------------------------
// Wiener filter
//----------------------------------------------------------
void wienerFilter(Mat &src,Mat &dst,Mat &_h,double k)
{
//---------------------------------------------------
// small number for numeric stability
//---------------------------------------------------
const double eps=1E-8;
//---------------------------------------------------
int ImgW=src.size().width;
int ImgH=src.size().height;
//--------------------------------------------------
Mat Yf[2];
ForwardFFT(src,Yf);
//--------------------------------------------------
Mat h;
h.create(ImgH,ImgW,CV_64F);
h=0;
_h.copyTo(h(Rect(0, 0, _h.size().width, _h.size().height)));
Mat Hf[2];
ForwardFFT(h,Hf);
//--------------------------------------------------
Mat Fu[2];
Fu[0].create(ImgH,ImgW,CV_64F);
Fu[1].create(ImgH,ImgW,CV_64F);
complex<double> a;
complex<double> b;
complex<double> c;
double Hf_Re;
double Hf_Im;
double Phf;
double hfz;
double hz;
double A;
for (int i=0;i<Hf[0].size().height;i++)
{
for (int j=0;j<Hf[0].size().width;j++)
{
Hf_Re=Hf[0].at<double>(i,j);
Hf_Im=Hf[1].at<double>(i,j);
Phf = Hf_Re*Hf_Re+Hf_Im*Hf_Im;
hfz=(Phf<eps)*eps;
hz =(h.at<double>(i,j)>0);
A=Phf/(Phf+hz+k);
a=complex<double>(Yf[0].at<double>(i,j),Yf[1].at<double>(i,j));
b=complex<double>(Hf_Re+hfz,Hf_Im+hfz);
c=a/b; // Deconvolution
// Other we do to remove division by 0
Fu[0].at<double>(i,j)=(c.real()*A);
Fu[1].at<double>(i,j)=(c.imag()*A);
}
}
//--------------------------------------------------
Fu[0]/=(ImgW*ImgH);
Fu[1]/=(ImgW*ImgH);
//--------------------------------------------------
InverseFFT(Fu,dst);
// remove out of rane values
for (int i=0;i<Hf[0].size().height;i++)
{
for (int j=0;j<Hf[0].size().width;j++)
{
if(dst.at<double>(i,j)>215){dst.at<double>(i,j)=215;}
if(dst.at<double>(i,j)<(-40)){dst.at<double>(i,j)=(-40);}
}
}
}
//----------------------------------------------------------
// Main
//----------------------------------------------------------
int _tmain(int argc, _TCHAR* argv[])
{
// Input image
Mat img;
// Load it from drive
img=imread("data/motion_fuzzy_lena.bmp",0);
//---------------------------------------------
imshow("Src image", img);
// Image size
int ImgW=img.size().width;
int ImgH=img.size().height;
// Deconvolution kernel (coefficient sum must be 1)
// Image was blurred using same kernel
Mat h;
h.create(1,10,CV_64F);
h=1/double(h.size().width*h.size().height);
// Apply filter
wienerFilter(img,img,h,0.05);
normalize(img,img, 0, 1, CV_MINMAX);
imshow("Result image", img);
cvWaitKey(0);
return 0;
}
The result:

Putting image into a Window in x11

I have a QR code in .JPG format. I load it using OpenCV 3.4.4. Now, I create a new X11 window using XCreateSimpleWindow(). Then, I will resize the QR image to that of this new window.
Next, I want to put this resized QR code into the window. I tried using XPutImage(), but without any success, probably because I don't know the usage.
For using XPutImage(), I first took the image of the X11 window using XGetImage(); then obtained the pixel values of the QR image, then assigned that to the pixel value of the image obtained through XGetImage.
Once I had this XImage, I tried putting it to the window using XPutImage. But, it is still showing a black window.
There is no error in the terminal, but result is not as desired.
Any solution to this problem? Like, how to change the background of the window (X11) w.r.t a sample image, and using XPutImage()?
The code goes like this...
// Written by Ch. Tronche (http://tronche.lri.fr:8000/)
// Copyright by the author. This is unmaintained, no-warranty free software.
// Please use freely. It is appreciated (but by no means mandatory) to
// acknowledge the author's contribution. Thank you.
// Started on Thu Jun 26 23:29:03 1997
//
// Xlib tutorial: 2nd program
// Make a window appear on the screen and draw a line inside.
// If you don't understand this program, go to
// http://tronche.lri.fr:8000/gui/x/xlib-tutorial/2nd-program-anatomy.html
//
// compilation:
// g++ -o go qrinX11.cpp `pkg-config --cflags --libs opencv` -lX11
//
#include <opencv2/opencv.hpp>
#include "opencv2/opencv.hpp" // FOR OpenCV
#include <opencv2/core.hpp> // Basic OpenCV structures (cv::Mat)
#include <opencv2/videoio.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <bits/stdc++.h>
#include <X11/Xlib.h> // Every Xlib program must include this
#include <assert.h> // I include this to test return values the lazy way
#include <unistd.h> // So we got the profile for 10 seconds
#include <X11/Xutil.h>
#include <X11/keysym.h>
#include <X11/Xlib.h> // Every Xlib program must include this
#include <X11/Xlib.h>
#include <X11/Xatom.h>
#include <X11/extensions/Xcomposite.h>
#include <X11/extensions/Xfixes.h>
#include <X11/extensions/shape.h>
#define NIL (0) // A name for the void pointer
using namespace cv;
using namespace std;
int main()
{
XGCValues gr_values;
//GC gc;
XColor color, dummy;
Display *dpy = XOpenDisplay(NIL);
//assert(dpy);
//int screen = DefaultScreen(dpy);
// Get some colors
int blackColor = BlackPixel(dpy, DefaultScreen(dpy));
int whiteColor = WhitePixel(dpy, DefaultScreen(dpy));
// Create the window
Window w = XCreateSimpleWindow(dpy, DefaultRootWindow(dpy), 0, 0,
200, 100, 0, whiteColor, blackColor);
// We want to get MapNotify events
XSelectInput(dpy, w, StructureNotifyMask);
XMapWindow(dpy, w);
// Wait for the MapNotify event
for(;;) {
XEvent e;
XNextEvent(dpy, &e);
if (e.type == MapNotify)
break;
}
Window focal = w;
XWindowAttributes gwa;
XGetWindowAttributes(dpy, w, &gwa);
int wd1 = gwa.width;
int ht1 = gwa.height;
XImage *image = XGetImage(dpy, w, 0, 0 , wd1, ht1, AllPlanes, ZPixmap);
unsigned long rm = image->red_mask;
unsigned long gm = image->green_mask;
unsigned long bm = image->blue_mask;
Mat img(ht1, wd1, CV_8UC3); // OpenCV Mat object is initilaized
Mat scrap = imread("qr.jpg");//(wid, ht, CV_8UC3);
resize(scrap, img, img.size(), CV_INTER_AREA);
for (int x = 0; x < wd1; x++)
for (int y = 0; y < ht1 ; y++)
{
unsigned long pixel = XGetPixel(image,x,y);
unsigned char blue = pixel & bm; // Applying the red/blue/green mask to obtain the indiv channel values
unsigned char green = (pixel & gm) >> 8;
unsigned char red = (pixel & rm) >> 16;
Vec3b color = img.at<Vec3b>(Point(x,y)); // Store RGB values in the OpenCV image
//color[0] = blue;
//color[1] = green;
//color[2] = red;
//img.at<Vec3b>(Point(x,y)) = color;
pixel = color[0];//&color[1]&color[2];
}
namedWindow("QR", CV_WINDOW_NORMAL);
imshow("QR", img);
cout << "herererere\n";
GC gc = XCreateGC(dpy, w, 0, NIL);
XPutImage(dpy, w, gc, image, 0, 0, wd1, ht1, wd1, ht1);
waitKey(0);
//sleep(3);
return 0;
}
Alright, solved it on my own. There was a silly mistake at changing the pixel value and updating it to the actual image and then putting it to the background of the window.
First use XPutPixel(), then use XPutImage()
Here is the final and correct method:
// compilation:
// g++ -o go qrinX11.cpp `pkg-config --cflags --libs opencv` -lX11
//
#include <opencv2/opencv.hpp>
#include "opencv2/opencv.hpp" // FOR OpenCV
#include <opencv2/core.hpp> // Basic OpenCV structures (cv::Mat)
#include <opencv2/videoio.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <bits/stdc++.h>
#include <X11/Xlib.h> // Every Xlib program must include this
#include <assert.h> // I include this to test return values the lazy way
#include <unistd.h> // So we got the profile for 10 seconds
#include <X11/Xutil.h>
#include <X11/keysym.h>
#include <X11/Xlib.h> // Every Xlib program must include this
#include <X11/Xlib.h>
#include <X11/Xatom.h>
#include <X11/extensions/Xcomposite.h>
#include <X11/extensions/Xfixes.h>
#include <X11/extensions/shape.h>
#define NIL (0) // A name for the void pointer
using namespace cv;
using namespace std;
int main()
{
XGCValues gr_values;
//GC gc;
XColor color, dummy;
Display *dpy = XOpenDisplay(NIL);
//assert(dpy);
//int screen = DefaultScreen(dpy);
// Get some colors
int blackColor = BlackPixel(dpy, DefaultScreen(dpy));
int whiteColor = WhitePixel(dpy, DefaultScreen(dpy));
// Create the window
Window w = XCreateSimpleWindow(dpy, DefaultRootWindow(dpy), 0, 0,
200, 100, 0, whiteColor, blackColor);
// We want to get MapNotify events
XSelectInput(dpy, w, StructureNotifyMask);
XMapWindow(dpy, w);
// Wait for the MapNotify event
for(;;) {
XEvent e;
XNextEvent(dpy, &e);
if (e.type == MapNotify)
break;
}
Window focal = w;
XWindowAttributes gwa;
XGetWindowAttributes(dpy, w, &gwa);
int wd1 = gwa.width;
int ht1 = gwa.height;
XImage *image = XGetImage(dpy, w, 0, 0 , wd1, ht1, AllPlanes, ZPixmap);
unsigned long rm = image->red_mask;
unsigned long gm = image->green_mask;
unsigned long bm = image->blue_mask;
Mat img(ht1, wd1, CV_8UC3); // OpenCV Mat object is initilaized
Mat scrap = imread("qr.jpg");//(wid, ht, CV_8UC3);
resize(scrap, img, img.size(), CV_INTER_AREA);
for (int x = 0; x < wd1; x++)
for (int y = 0; y < ht1 ; y++)
{
unsigned long pixel = XGetPixel(image,x,y);
Vec3b color = img.at<Vec3b>(Point(x,y));
pixel = 65536 * color[2] + 256 * color[1] + color[0];
XPutPixel(image, x, y, pixel);
}
namedWindow("QR", CV_WINDOW_NORMAL);
imshow("QR", img);
GC gc = XCreateGC(dpy, w, 0, NIL);
XPutImage(dpy, w, gc, image, 0, 0, 0, 0, wd1, ht1);
waitKey(0);
return 0;
}
Simplicity is key, and improves performance (in this case):
//..
// Mat img(ht1, wd1, CV_8UC3); // OpenCV Mat object is initilaized
cv::Mat img(ht1, wd1, CV_8UC4, image->data); // initilaize with existing mem
Mat scrap = imread("qr.jpg");//(wid, ht, CV_8UC3);
cv::cvtColor(scrap,scrap,cv::COLOR_BGR2BGRA);
resize(scrap, img, img.size(), cv::INTER_AREA);
// .. and we can skip the for loops
namedWindow("QR", CV_WINDOW_NORMAL);
imshow("QR", img);
// .. etc

Using Gabor filter on an image

I'm looking to write some code in opencv/matlab that'll apply the Gabor filter to images to spot interesting image regions. I've read quite a lot of literature and seen some of the previous matlab/opencv code, but I'd like to attempt it all myself to make sure I fully understand.
I have the equation for the Gabor function and an image. I am unsure of the steps I should take in my algorithm. The general idea I got was to take the discrete Fourier transform of the image, multiply it (convolve?) it with the Gabor function then take the inverse Fourier transform for the result. Any pointers appreciated. Thanks!
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <math.h>
using namespace cv;
int main(int argc, char** argv)
{
int ks = 47;
int hks = (ks-1)/2;
int kernel_size=21;
double sig = 7;
double th = 200;
double ps = 90;
double lm = 0.5+ps/100.0;
double theta = th*CV_PI/180;
double psi = ps*CV_PI/180;
double del = 2.0/(ks-1);
double sigma = sig/ks;
double x_theta;
double y_theta;
Mat image = imread("C:\\users\\michael\\desktop\\tile1.tif",1), dest, src, src_f;
if (image.empty())
{
return -1;
}
imshow("Src", image);
cvtColor(image, src, CV_BGR2GRAY);
src.convertTo(src_f, CV_32F, 1.0/255, 0);
if (!ks%2)
{
ks+=1;
}
Mat kernel(ks,ks, CV_32F);
for (int y=-hks; y<=hks; y++)
{
for (int x=-hks; x<=hks; x++)
{
x_theta = x*del*cos(theta)+y*del*sin(theta);
y_theta = -x*del*sin(theta)+y*del*cos(theta);
kernel.at<float>(hks+y,hks+x) = (float)exp(-0.5*(pow(x_theta,2)+pow(y_theta,2))/pow(sigma,2))* cos(2*CV_PI*x_theta/lm + psi);
}
}
filter2D(src_f, dest, CV_32F, kernel);
imshow("Gabor", dest);
Mat Lkernel(kernel_size*20, kernel_size*20, CV_32F);
resize(kernel, Lkernel, Lkernel.size());
Lkernel /= 2.;
Lkernel += 0.5;
imshow("Kernel", Lkernel);
Mat mag;
pow(dest, 2.0, mag);
imshow("Mag", mag);
waitKey(0);
return 0;
}

accumulatedweight throws cv:Exception error

I am new to OpenCV and trying to find contours and draw rectangle on them, here's my code but its throwing cv::Exception when it comes to accumulatedweighted().
i tried to make both src(Original Image) and dst(background) by converting to CV_32FC3 and then finding avg using accumulatedweighted.
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <ctype.h>
using namespace cv;
using namespace std;
static void help()
{
cout << "\nThis is a Example to implement CAMSHIFT to detect multiple motion objects.\n";
}
Rect rect;
VideoCapture capture;
Mat currentFrame, currentFrame_grey, differenceImg, oldFrame_grey,background;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
bool first = true;
int main(int argc, char* argv[])
{
//Create a new movie capture object.
capture.open(0);
if(!capture.isOpened())
{
//error in opening the video input
cerr << "Unable to open video file: " /*<< videoFilename*/ << endl;
exit(EXIT_FAILURE);
}
//capture current frame from webcam
capture >> currentFrame;
//Size of the image.
CvSize imgSize;
imgSize.width = currentFrame.size().width; //img.size().width
imgSize.height = currentFrame.size().height; ////img.size().height
//Images to use in the program.
currentFrame_grey.create( imgSize, IPL_DEPTH_8U);//image.create().
while(1)
{
capture >> currentFrame;//VideoCapture& VideoCapture::operator>>(Mat& image)
//Convert the image to grayscale.
cvtColor(currentFrame,currentFrame_grey,CV_RGB2GRAY);//cvtColor()
currentFrame.convertTo(currentFrame,CV_32FC3);
background = Mat::zeros(currentFrame.size(), CV_32FC3);
accumulateWeighted(currentFrame,background,1.0,NULL);
imshow("Background",background);
if(first) //Capturing Background for the first time
{
differenceImg = currentFrame_grey.clone();//img1 = img.clone()
oldFrame_grey = currentFrame_grey.clone();//img2 = img.clone()
convertScaleAbs(currentFrame_grey, oldFrame_grey, 1.0, 0.0);//convertscaleabs()
first = false;
continue;
}
//Minus the current frame from the moving average.
absdiff(oldFrame_grey,currentFrame_grey,differenceImg);//absDiff()
//bluring the differnece image
blur(differenceImg, differenceImg, imgSize);//blur()
//apply threshold to discard small unwanted movements
threshold(differenceImg, differenceImg, 25, 255, CV_THRESH_BINARY);//threshold()
//find contours
findContours(differenceImg,contours,hierarchy,CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0)); //findcontours()
//draw bounding box around each contour
//for(; contours! = 0; contours = contours->h_next)
for(int i = 0; i < contours.size(); i++)
{
rect = boundingRect(contours[i]); //extract bounding box for current contour
//drawing rectangle
rectangle(currentFrame, cvPoint(rect.x, rect.y), cvPoint(rect.x+rect.width, rect.y+rect.height), cvScalar(0, 0, 255, 0), 2, 8, 0);
}
//New Background
convertScaleAbs(currentFrame_grey, oldFrame_grey, 1.0, 0.0);
//display colour image with bounding box
imshow("Output Image", currentFrame);//imshow()
//display threshold image
imshow("Difference image", differenceImg);//imshow()
//clear memory and contours
//cvClearMemStorage( storage );
//contours = 0;
contours.clear();
//background = currentFrame;
//press Esc to exit
char c = cvWaitKey(33);
if( c == 27 ) break;
}
// Destroy All Windows.
destroyAllWindows();
return 0;
}
Please Help to solve this.
you might want to RTFM before asking here.
so, you missed the alpha param as well as the dst Mat in your call to addWeighted
Mat dst;
accumulateWeighted(currentFrame, 0.5 background, 0.5, 0, dst);
also, no idea, what the whole thing should achieve. adding up the current frame before diffing it does not make any sense to me.
if you planned to do background separation, throw it all away, and use one of the builtin backgroundsubtractors instead

Creating a simple black image with opencv using cvcreateimage

Very basic question coming from a newbie in OpenCV. I just want to create an image with every pixel set to 0 (black). I have used the following code in the main() function:
IplImage* imgScribble = cvCreateImage(cvSize(320, 240), 8, 3);
And what I get is a solid gray image, instead of the black one.
Thanks in advance !
What version of opencv you are using?
For Mat,
#include <opencv2/opencv.hpp>
cv::Mat image(320, 240, CV_8UC3, cv::Scalar(0, 0, 0));
I can suggest two more altrnatives:
IplImage* imgScribble = cvCreateImage(cvSize(320, 240), 8, 3);
// Here you can set any color
cvSet(imgScribble, cvScalar(0,0,0));
// Here only black
cvZero(imgScribble);
The call to
cvCreateImage(cvSize(320, 240), 8, 3);
Create the image in the memory, but I don't think it initialize the data.
You should try this to initialize :
step= imgScribble->widthStep;
data = (uchar *)imgScribble->imageData;
for(i=0;i<imgScribble->height;i++) for(j=0;j<img->width;j++) for(k=0;k<3;k++)
data[i*step+j*3+k]=0;
(Inspired from this (Example C Program))
For Python:
import numpy as np
X_DIMENSION = 288
Y_DIMENSION = 382
black_image = np.zeros((X_DIMENSION, Y_DIMENSION))
With this code you generate a numpy array which is what is expected for opencv images and fill it with zero which is the color for black. This code is made for grayscale images. If you want it to be an RGB black image, just add 3 at the end of the tupple to create the dimensions np.zeros((X_DIMENSION, Y_DIMENSION, 3))
black and white image mean single channel image. you can simply created it as follows.
Mat img(500, 1000, CV_8UC1, Scalar(a));
"a" in between 0-255
you can see more examlpe and details from following page
https://progtpoint.blogspot.com/2017/01/tutorial-3-create-image.html
Here is my contribution:
cv::Mat output = cv::Mat::zeros(cv::Size(320, 240), CV_8UC3);
#include "stdafx.h"
#include <opencv/cxcore.h>
#include <opencv/highgui.h>
#include <iostream>
using namespace cv;
using namespace std;
#define LOAD_OPTION CV_LOAD_IMAGE_COLOR
int main( int argc, char** argv )
{
IplImage *image;
image = cvLoadImage("picture.jpg",0); // 0 : BLACK AND WHITE , Without 0 -> Color Picture
cvNamedWindow("Image",CV_WINDOW_AUTOSIZE);
cvShowImage("Image", image);
waitKey(-1);
return 0;
}

Resources