i need use CvInvert, but i have this problem:
OpenCV Error: Assertion failed (src.type() == dst.type() && src.rows
== dst.cols && src.cols == dst.rows) in cvInvert, file /opt/local/var/macports/build/_opt_mports_dports_graphics_opencv/opencv/work/OpenCV-2.4.3/modules/core/src/lapack.cpp, line 1738 libc++abi.dylib: terminate called throwing an exception
This is Code:
#include <iostream>
#include <opencv/cv.h>
#include <stdio.h>
#include <opencv2/highgui/highgui_c.h>
#include <opencv2/imgproc/imgproc_c.h>
#include <opencv2/core/core_c.h>
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
int main(int argc, const char * argv[])
{
CvCapture* capture=cvCreateCameraCapture(0);
IplImage* originalImg;
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, 640);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, 480);
cvNamedWindow("Imagen");
while (true) {
originalImg=cvQueryFrame(capture);
cvFlip(originalImg,originalImg,3);
IplImage* Gray=cvCreateImage(cvGetSize(originalImg), IPL_DEPTH_8U, 1);
cvCvtColor(originalImg, Gray, CV_RGB2GRAY);
CvMat* Mat_tipo=cvCreateMat(originalImg->height, originalImg->width, CV_32F);
CvMat* Mat_img=cvGetMat(Gray,Mat_tipo);
CvMat* Matinvenrt=cvCreateMat(Mat_img->rows, Mat_img->cols, CV_32F);
cvInvert(Mat_img, Matinvenrt,CV_LU);
cvShowImage("Imagen", Mat_img);
// imshow("imagen", img);
cvReleaseMat(&Mat_img);
int id=cvWaitKey(27);
if (id==27) break;
}
}
What happen??, Have Cvinvert a bug??
Thank you.
Assertion failed (src.type() == dst.type()
it is clearly visible from your code that the Mat types are not the same for Mat_img and Matinvert
Are you sure you are inverting a square matrix?
I had a similar error and was because I was trying to invert a M-N matrix.
Related
I am trying to segment a depth image, so that values of depth between limits (low and high) remain the same, and values outside limits are set to 0.
To accomplish this I am trying to use the forEach method in OpenCV 3, to speed up the operation using all the available cores of the CPU.
Implementing the function this way, it works:
void Filter_Image(cv::Mat &img, int low, int high)
{
for (uint8_t &p : cv::Mat_<uint8_t>(img))
{
if(((p > low) && (p < high)) == false)
p = 0;
}
}
However, when I try to use the lambda expression, I only get correct results in one vertical third of the image (if you splitted the image in 3 columns, I only get the first left column well segmented). The code is as follows:
void Filter_Image(cv::Mat &img, int low, int high)
{
img.forEach<uint8_t>([&](uint8_t &p, const int * position) -> void {
if(((p > low) && (p < high)) == false)
p = 0;
});
}
The functions are called from this piece of code (simplified for testing):
#include "opencv/cv.h"
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include "config_parser.h"
#include "background_substractor.h"
#include "object_tracker.h"
#include "roi_processing.h"
#include "filtering_functions.h"
using namespace cv;
int main(int argc, char **argv)
{
Mat opencv_frame;
namedWindow("Input Video");
//parse config
VIDEO_CONFIG videoConfig;
BACK_SUBS_CONFIG backSubsConfig;
TRACKER_CONFIG trackerConfig;
ROI_CONFIG roiConfig;
FILTERING_DATA filteringData;
Parse_Config("../Config/ConfigDepthImage.json", videoConfig, backSubsConfig, trackerConfig, filteringData, roiConfig);
Display_Config(videoConfig, backSubsConfig, trackerConfig, filteringData, roiConfig);
VideoCapture videoInput(videoConfig.path.c_str());
if (!videoInput.isOpened())
{
std::cout<<"Could not open reference video"<<std::endl;
return -1;
}
while (1)
{
videoInput >> opencv_frame;
if(opencv_frame.empty())
{
std::cout<<"Empty frame"<<std::endl;
destroyWindow("Input Video");
destroyWindow("Filtered Video");
break;
}
Filter_Image(opencv_frame, filteringData.min, filteringData.max);
//show video
imshow("Input Video", opencv_frame);
waitKey((1.0/videoConfig.fps)*1000);
}
return 0;
}
The difference in results can be observed in the displayed images:
This is the good one:
And this is the bad result in the same conditions using forEach:
I cannot see the error or the difference between the two functions. The type of the image is CV_8UC1.
Could anyone provide a clue?
Thank yo all very much in advance.
I'd like to build a rule that takes in a few parameters from a parsed line then sets a few as constant. Is that possible? An (invalid) example to illustrate what I'm trying to do is below. I think I'm using _r1 incorrectly here, but I'm not sure how to get at the right thing. Assume I don't want to just modify r before sending it into the parser.
#include <boost/spirit/include/qi.hpp>
#include <boost/spirit/include/qi_plus.hpp>
#include <boost/spirit/include/qi_sequence.hpp>
#include <boost/spirit/include/qi_string.hpp>
#include <boost/spirit/include/phoenix_core.hpp>
#include <boost/spirit/include/phoenix_operator.hpp>
#include <boost/spirit/include/phoenix_object.hpp>
#include <boost/fusion/include/adapt_struct.hpp>
#include <boost/fusion/adapted/struct/adapt_struct.hpp>
#include <boost/phoenix/bind/bind_function.hpp>
#include <string>
using namespace boost::spirit::qi;
struct Sample
{
int a;
int b;
};
BOOST_FUSION_ADAPT_STRUCT(Sample, a , b)
const rule<std::string::const_iterator, Sample()> AnythingAndOne = int_ >> eps[_r1.b = 1] >> eoi;
int main()
{
std::string z("3");
Sample r;
parse(z.begin(), z.end(), AnythingAndOne, r);
return 0;
}
Again, with reference to Boost Spirit: "Semantic actions are evil"? I'd avoid the semantic action.
You can directly synthesize a particular attribute value by using qi::attr:
Live On Coliru
#include <boost/spirit/include/qi.hpp>
#include <boost/fusion/include/adapt_struct.hpp>
#include <boost/fusion/include/io.hpp>
struct Sample {
int a;
int b;
};
BOOST_FUSION_ADAPT_STRUCT(Sample, a , b)
namespace qi = boost::spirit::qi;
int main()
{
std::string const z("3");
Sample r;
qi::rule<std::string::const_iterator, Sample()> AnythingAndOne
= qi::int_ >> qi::attr(1) >> qi::eoi;
if (parse(z.begin(), z.end(), AnythingAndOne, r))
std::cout << "Parsed: " << boost::fusion::as_vector(r) << "\n";
else
std::cout << "Parse failed\n";
}
Prints
Parsed: (3 1)
I found a problem in opencv2.4.8 undistort() in vs2010. It just show me one color image in "undist" window, while the same codes work fine in Qtcreator.
#include "opencv2/opencv.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
using namespace cv;
int main()
{
VideoCapture cap(0); // open the default camera
if(!cap.isOpened()) // check if we succeeded
return -1;
while(1)
{
Mat frame;
cap >> frame;
imshow("video",frame);
double cM[3][3] = {{610.12376,0,319.5}, {0,610.12376,239.5}, {0, 0, 1}};
Mat cameraMatrix = Mat(3, 3,CV_64F,cM);
double dM[5]={0.0681495,-0.128756,0,0,0.5857514};
Mat distCoeffs = Mat(8, 1, CV_64F,dM);
Mat undi=frame.clone();
undistort(frame,undi, cameraMatrix, distCoeffs);
imshow("undist",undi);
if ( (cvWaitKey(30) & 255) == 's' )
{
imwrite("test.jpg",undi);
}
else if ( (cvWaitKey(30) & 255) == 27 ) break;
}
cvWaitKey(0);
return 0;
}
I am trying to cluster video frames abnormal and normal. I divided into frames as normal and abnormal with frames. I have two problem, I am not sure whether my approach is true or not and I got an unexpected error.
Please help me.
Error code: bowTrainer.add(features1);
My full code is as below:
// Bow.cpp : Defines the entry point for the console application.
//
#include "stdafx.h"
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/features2d/features2d.hpp"
#include <opencv2/nonfree/features2d.hpp>
#include <opencv2/nonfree/nonfree.hpp>
#include <opencv2/legacy/legacy.hpp>
#include <windows.h>
#include "opencv2/ml/ml.hpp"
#include <stdlib.h>
#include <stdio.h>
#include <sys/stat.h>
#define _USE_MATH_DEFINES
#include <math.h>
#include <limits>
#include <cstdio>
#include <iostream>
#include <fstream>
using namespace std;
using namespace cv;
using std::vector;
using std::iostream;
int main()
{
initModule_nonfree();
Ptr<FeatureDetector> features = FeatureDetector::create("SIFT");
Ptr<DescriptorExtractor> descriptor = DescriptorExtractor::create("SIFT");
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("FlannBased");
//defining terms for bowkmeans trainer
TermCriteria tc(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 10, 0.001);
int dictionarySize = 100;
int retries = 1;
int flags = KMEANS_PP_CENTERS;
BOWKMeansTrainer bowTrainer(dictionarySize, tc, retries, flags);
BOWImgDescriptorExtractor bowDE(descriptor, matcher);
//**creating dictionary**//
Mat trainme(0, dictionarySize, CV_32FC1);
Mat labels(0, 1, CV_32FC1); //1d matrix with 32fc1 is requirement of normalbayesclassifier class
int i=0;
while(i<10)
{
char filename[255];
string n;
n=sprintf(filename, "C:\\Users\\Desktop\\New folder\\View_001\\frame_000%d.jpg",i);
Mat img = imread(filename, 0);
Mat features1;
vector<KeyPoint> keypoints;
descriptor->compute(img, keypoints, features1);
bowTrainer.add(features1);
Mat dictionary = bowTrainer.cluster();
bowDE.setVocabulary(dictionary);
Mat bowDescriptor;
bowDE.compute(img, keypoints, bowDescriptor);
trainme.push_back(bowDescriptor);
float label = 1.0;
labels.push_back(label);
i++;
}
int j=11;
while(j<21)
{
char filename2[255];
string n;
n=sprintf(filename2, "C:\\Users\\Desktop\\New folder\\View_001\\frame_000%d.jpg",j);
cout<<filename2;
Mat img2 = imread(filename2, 0);
Mat features2;
vector<KeyPoint> keypoints2;
descriptor->compute(img2, keypoints2, features2);
bowTrainer.add(features2);
Mat bowDescriptor2;
bowDE.compute(img2, keypoints2, bowDescriptor2);
trainme.push_back(bowDescriptor2);
float label = 2.0;
labels.push_back(label);
j++;
}
NormalBayesClassifier classifier;
classifier.train(trainme, labels);
//**classifier trained**//
//**now trying to predict using the same trained classifier, it should return 1.0**//
Mat tryme(0, dictionarySize, CV_32FC1);
Mat tryDescriptor;
Mat img3 = imread("C:\\Users\\Desktop\\New folder\\View_001\\frame_0121.jpg", 0);
vector<KeyPoint> keypoints3;
features->detect(img3, keypoints3);
bowDE.compute(img3, keypoints3, tryDescriptor);
tryme.push_back(tryDescriptor);
cout<<classifier.predict(tryme)<<endl;
waitKey(0);
system("PAUSE");
return 0;
}
I am updating some older OpenCV code that was written in (I guess) an OpenCV 1.1 manner (i.e. using IplImages).
What I want to accomplish right now is to simply load a series of images (passed as command line arguments) as Mats. This is part of a larger task. The first code sample below is the old code's image loading method. It loads 5 images from the command line and displays them in sequence, pausing for a key hit after each, then exits.
The second code sample is my updated version using Mat. It works fine so far, but is this the best way to do this? I've used an array of Mats. Should I use an array of pointers to Mats instead? And is there a way to do this such that the number of images is determined at run time from argc and does not need to be set ahead of time with IMAGE_NUM.
Basically, I'd like to be able to pass any number (within reason) of images as command line arguments, and have them loaded into some convenient array or other similar storage for later reference.
Thanks.
Old code:
#include <iostream>
#include <cv.h>
#include <cxcore.h>
#include <highgui.h>
using namespace std;
using namespace cv;
// the number of input images
#define IMAGE_NUM 5
int main(int argc, char **argv)
{
uchar **imgdata;
IplImage **img;
int index = 0;
char *img_file[IMAGE_NUM];
cout << "Loading files" << endl;
while(++index < argc)
if (index <= IMAGE_NUM)
img_file[index-1] = argv[index];
// malloc memory for images
img = (IplImage **)malloc(IMAGE_NUM * sizeof(IplImage *)); // Allocates memory to store just an IplImage pointer for each image loaded
imgdata = (uchar **)malloc(IMAGE_NUM * sizeof(uchar *));
// load images. Note: cvLoadImage actually allocates the memory for the images
for (index = 0; index < IMAGE_NUM; index++) {
img[index] = cvLoadImage(img_file[index], 1);
if (!img[index]->imageData){
cout << "Image data not loaded properly" << endl;
return -1;
}
imgdata[index] = (uchar *)img[index]->imageData;
}
for (index = 0; index < IMAGE_NUM; index++){
imshow("myWin", img[index]);
waitKey(0);
}
cvDestroyWindow("myWin");
cvReleaseImage(img);
return 0;
}
New code:
#include <iostream>
#include <cv.h>
#include <cxcore.h>
#include <highgui.h>
#include <time.h>
using namespace std;
using namespace cv;
// the number of input images
#define IMAGE_NUM 5
int main(int argc, char **argv)
{
Mat img[IMAGE_NUM];
int index = 0;
for (index = 0; index < IMAGE_NUM; index++) {
img[index] = imread(argv[index+1]);
if (!img[index].data){
cout << "Image data not loaded properly" << endl;
cin.get();
return -1;
}
}
for (index = 0; index < IMAGE_NUM; index++) {
imshow("myWin", img[index]);
waitKey(0);
}
cvDestroyWindow("myWin");
return 0;
}
you can use a vector instead of an array:
for example
#include <iostream>
#include <cv.h>
#include <cxcore.h>
#include <highgui.h>
#include <time.h>
#include <vector>
using namespace std;
using namespace cv;
int main(int argc, char **argv)
{
vector<Mat> img;
//Mat img[IMAGE_NUM];
int index = 0;
for (index = 0; index < IMAGE_NUM; index++) {
//img[index] = imread(argv[index+1]);
img.push_back(imread(argy[index+1]));
if (!img[index].data){
cout << "Image data not loaded properly" << endl;
cin.get();
return -1;
}
}
vector<Mat>::iterator it;
for (it = img.begin(); it != img.end() ; it++) {
imshow("myWin", (*it));
waitKey(0);
}
cvDestroyWindow("myWin");
return 0;
}
It took me a while to get back around to this, but what I've ended up doing is as follows, which is probably functionally the same as Gootik's suggestion. This has worked well for me. Notice that for functions that take Mat& (i.e. a single cv::Mat), you can just de-ref the array of Mats and pass that, which is a notation I'm more comfortable with after doing a lot of image processing work in Matlab.
#include <iostream>
#include <cv.h>
#include <cxcore.h>
#include <highgui.h>
using namespace std;
using namespace cv;
int main(int argc, char **argv)
{
if (argc==1){
cout << "No images to load!" << endl;
cin.get();
return 0;
}
int index = 0;
int image_num = argc-1;
Mat *img = new Mat[image_num]; // allocates table on heap instead of stack
// Load the images from command line:
for (index = 0; index < image_num; index++) {
img[index] = imread(argv[index+1]);
if (!img[index].data){
cout << "Image data not loaded properly" << endl;
cin.get();
return -1;
}
}
for (index = 0; index < image_num; index++) {
imshow("myWin", img[index]);
waitKey(0);
}
cvDestroyWindow("myWin");
delete [] img; // notice the [] when deleting an array.
return 0;
}