invalid null pointer exception - debugging

I am getting an error in this piece of code:
#include <stdio.h>
#include <cv.h>
#include <highgui.h>
using namespace std;
int main( int argc, char** argv )
{
//load color img specified by first argument
//IplImage *img = cvLoadImage( argv[1]);
IplImage *img = cvLoadImage(argv[1], CV_LOAD_IMAGE_COLOR );
IplImage *red = cvCreateImage(cvSize(img->width, img->height ),img->depth,img->nChannels);
IplImage *green = cvCreateImage(cvSize(img->width, img->height ),img- >depth,img>nChannels);
IplImage *blue = cvCreateImage(cvSize(img->width, img->height ),img->depth,img->nChannels);
// setup the pointer to access img data
uchar *pImg = ( uchar* )img->imageData;
// setup pointer to write data
uchar *pRed = ( uchar* )red->imageData;
uchar *pGreen = ( uchar* )green->imageData;
uchar *pBlue = ( uchar* )blue->imageData;
int i, j, rED, gREEN, bLUE, byte;
for( i = 0 ; i < img->height ; i++ )
{
for( j = 0 ; j < img->width ; j++ )
{
rED = pImg[i*img->widthStep + j*img->nChannels + 2];
gREEN = pImg[i*img->widthStep + j*img->nChannels + 1];
bLUE = pImg[i*img->widthStep + j*img->nChannels + 0];
// RED
pRed[i*img->widthStep + j*img->nChannels + 2] = rED;
// GREEN
pGreen[i*img->widthStep + j*img->nChannels + 1] = gREEN;
// BLUE
pBlue[i*img->widthStep + j*img->nChannels + 0] = bLUE;
}
}
// save images
cvSaveImage( argv[2], red );
cvSaveImage( argv[3], green );
cvSaveImage( argv[4], blue );
return 0;
}
The error is debug assertion failed.
expression:
invalid null pointer
this is piece of code where there is a break point.
#ifdef _DEBUG
_CRTIMP2_PURE void __CLRCALL_PURE_OR_CDECL _Debug_message(const wchar_t *message,
const wchar_t *file, unsigned int line)
{ // report error and die
if(::_CrtDbgReportW(_CRT_ASSERT, file, line, NULL, message)==1)
{
::_CrtDbgBreak();
}
}
the yellow arrow is pointing to ::_CrtDbgBreak()

Perhaps not the only problem in that snippet, but there's a typo on this line:
IplImage *green = cvCreateImage(cvSize(img->width, img->height ),img->depth,img>nChannels);
You're passing img>nChannels, not img->nChannels

Related

Simple OpenCL program not working

This program is a simple parallel program which adds the elements of 2 vectors.
The program was error free and it was compiled successfully but the results are not right
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <iomanip>
#include <array>
#include <fstream>
#include <sstream>
#include <string>
#include <algorithm>
#include <iterator>
#ifdef __APPLE__
#include <OpenCL/opencl.h>
#else
#include <CL/cl.h>
#include <time.h>
#endif
#define MAX_SOURCE_SIZE (0x100000)
// number of points in Both A and B files (number of rows)
const int number_of_points = 11;
// number of points axis in Both A and B files (number of Columns)
const int number_of_axis = 3;
using namespace std;
int main(int argc, char *argv[]) {
clock_t tStart = clock();
// Create the two input vectors
// working variables
int i;
ifstream input_fileA, input_fileB; // input files
string line; // transfer row from file to array
float x; // transfer word from file to array
int row = 0; // number of rows of file A,B (= array)
int col = 0; // number of rows of file A,B (= array)
// working arrays
// array contains file A data
float arrayA[number_of_points][number_of_axis]={{0}};
// array contains file B data
float arrayB[number_of_points][number_of_axis]={{0}};
// float X1[number_of_points]; // X values of file A points
float Y1[number_of_points]; // Y values of file A points
// float X2[number_of_points]; // X values of file B points
float Y2[number_of_points]; // Y values of file B points
float *X1 = (float*)malloc(sizeof(float)*number_of_points);
float *X2 = (float*)malloc(sizeof(float)*number_of_points);
// import input files
input_fileA.open(argv[1]);
input_fileB.open(argv[2]);
// transfer input files data to array
// input file A to arrayA
row = 0;
while (getline(input_fileA, line))
{
istringstream streamA(line);
col = 0;
while(streamA >> x){
arrayA[row][col] = x;
col++;
}
row++;
}
// input file B to arrayB
row = 0;
while (getline(input_fileB, line))
{
istringstream streamB(line);
col = 0;
while(streamB >> x){
arrayB[row][col] = x;
col++;
}
row++;
}
// put Xs of points in X vectors and Ys of points in Y vectors
// input file A
for (int i = 0; i<number_of_points; i++){
X1[i] = arrayA[i][1];
Y1[i] = arrayA[i][2];
}
// input file B
for (int i = 0; i<number_of_points; i++){
X2[i] = arrayB[i][1];
Y2[i] = arrayB[i][2];
}
// int i;
// const int LIST_SIZE = 50;
// int *A = (int*)malloc(sizeof(int)*number_of_points);
// int *B = (int*)malloc(sizeof(int)*number_of_points);
// for(i = 0; i < number_of_points; i++) {
// A[i] = X1[i];
// B[i] = X2[i];
// }
// Load the kernel source code into the array source_str
FILE *fp;
char *source_str;
size_t source_size;
fp = fopen("vector_add_kernel.cl", "r");
if (!fp) {
fprintf(stderr, "Failed to load kernel.\n");
exit(1);
}
source_str = (char*)malloc(MAX_SOURCE_SIZE);
source_size = fread( source_str, 1, MAX_SOURCE_SIZE, fp);
fclose( fp );
// Get platform and device information
cl_platform_id platform_id = NULL;
cl_device_id device_id = NULL;
cl_uint ret_num_devices;
cl_uint ret_num_platforms;
cl_int ret = clGetPlatformIDs(1, &platform_id, &ret_num_platforms);
ret = clGetDeviceIDs( platform_id, CL_DEVICE_TYPE_ALL, 1,
&device_id, &ret_num_devices);
// Create an OpenCL context
cl_context context =
clCreateContext( NULL, 1, &device_id, NULL, NULL, &ret);
// Create a command queue
cl_command_queue command_queue =
clCreateCommandQueue(context, device_id, 0, &ret);
// Create memory buffers on the device for each vector
cl_mem x1_mem_obj = clCreateBuffer(context, CL_MEM_READ_ONLY,
number_of_points * sizeof(float), NULL, &ret);
cl_mem x2_mem_obj = clCreateBuffer(context, CL_MEM_READ_ONLY,
number_of_points * sizeof(float), NULL, &ret);
cl_mem c_mem_obj = clCreateBuffer(context, CL_MEM_WRITE_ONLY,
number_of_points * sizeof(float), NULL, &ret);
// Copy the lists A and B to their respective memory buffers
ret = clEnqueueWriteBuffer(command_queue, x1_mem_obj, CL_TRUE, 0,
number_of_points * sizeof(float), X1, 0, NULL, NULL);
ret = clEnqueueWriteBuffer(command_queue, x2_mem_obj, CL_TRUE, 0,
number_of_points * sizeof(float), X2, 0, NULL, NULL);
// Create a program from the kernel source
cl_program program = clCreateProgramWithSource(context, 1,
(const char **)&source_str, (const size_t *)&source_size, &ret);
// Build the program
ret = clBuildProgram(program, 1, &device_id, NULL, NULL, NULL);
// Create the OpenCL kernel
cl_kernel kernel = clCreateKernel(program, "vector_add", &ret);
// Set the arguments of the kernel
ret = clSetKernelArg(kernel, 0, sizeof(cl_mem), (void *)&x1_mem_obj);
ret = clSetKernelArg(kernel, 1, sizeof(cl_mem), (void *)&x2_mem_obj);
ret = clSetKernelArg(kernel, 2, sizeof(cl_mem), (void *)&c_mem_obj);
// Execute the OpenCL kernel on the list
size_t global_item_size = number_of_points; // Process the entire lists
size_t local_item_size = 64; // Process in groups of 64
ret = clEnqueueNDRangeKernel(command_queue, kernel, 1, NULL,
&global_item_size, &local_item_size, 0, NULL, NULL);
// Read the memory buffer C on the device to the local variable C
// int *C = (int*)malloc(sizeof(int)*number_of_points);
float *C = (float*)malloc(sizeof(float)*number_of_points);
ret = clEnqueueReadBuffer(command_queue, c_mem_obj, CL_TRUE, 0,
number_of_points * sizeof(float), C, 0, NULL, NULL);
// Display the result to the screen
for(i = 0; i < number_of_points; i++)
printf("%f + %f = %f\n", X1[i], X2[i], C[i]);
// Clean up
ret = clFlush(command_queue);
ret = clFinish(command_queue);
ret = clReleaseKernel(kernel);
ret = clReleaseProgram(program);
ret = clReleaseMemObject(x1_mem_obj);
ret = clReleaseMemObject(x2_mem_obj);
ret = clReleaseMemObject(c_mem_obj);
ret = clReleaseCommandQueue(command_queue);
ret = clReleaseContext(context);
free(X1);
free(X2);
free(C);
printf("ALL Time taken: %.2fs\n", (double)(clock() - tStart)/CLOCKS_PER_SEC);
return 0;
}
and the kernel file
__kernel void vector_add(__global float *X1,
__global float *X2,
__global float *C) {
// Get the index of the current element
int i = get_global_id(0);
// Do the operation
C[i] = X1[i] + X2[i];
}
The result was
0.000000 + 0.000000 = 0.000000
1.000000 + 1.000000 = 0.000000
2.000000 + 2.000000 = 0.000000
3.000000 + 3.000000 = 0.000000
4.000000 + 4.000000 = 0.000000
5.000000 + 5.000000 = 0.000000
6.000000 + 6.000000 = 0.000000
7.000000 + 7.000000 = 0.000000
8.000000 + 8.000000 = 0.000000
9.000000 + 9.000000 = 0.000000
1.000000 + 1.000000 = 0.000000
ALL Time taken: 0.07s
You've committed one of the cardinal sins of OpenCL programming, in that you are not checking the error codes from any of your OpenCL API calls! You should always check the return code from every single OpenCL API call. If you did this, it would point you towards the problem very quickly.
The problem is in your kernel enqueue call. If you check the error code, you'll see that you are getting -54 back, which corresponds to CL_INVALID_WORK_GROUP_SIZE. Specifically, kernel invocations have the requirement that the work-group size (local size) exactly divides the global size. You are asking for a work-group size of 64 and a global size of 11, which does not fulfil this requirement.
You can also pass NULL as the work-group size parameter, and the OpenCL implementation will pick a work-group size that will definitely work on your behalf.

How to write a video?

I'm using opencv with visual studio 2010 in Windows 7 with 32 bit OS.... While running the sample program of People detection, it shows the output video playing in a window... But I'm unable to open the output video, stored in a particular location... Kindly help me... Thankyou...
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <opencv2/core/core.hpp>
#include <iostream>
#include <stdio.h>
#include <string.h>
#include <ctype.h>
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
Mat img; char _filename[1024];
HOGDescriptor hog;
hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());
namedWindow("people detector", 1);
CvCapture *cap=cvCaptureFromFile("E:/Phase_I_output/2.walk.avi");
img=cvQueryFrame(cap);
for(;;)
{
img=cvQueryFrame(cap);
if(img.empty())
break;
fflush(stdout);
vector<Rect> found, found_filtered;
double t = (double)getTickCount();
int can = img.channels();
hog.detectMultiScale(img, found, 0, Size(8,8), Size(32,32), 1.05, 2);
t = (double)getTickCount() - t;
printf("tdetection time = %gms\n", t*1000./cv::getTickFrequency());
size_t i, j;
for( i = 0; i < found.size(); i++ )
{
Rect r = found[i];
for( j = 0; j < found.size(); j++ )
if( j != i && (r & found[j]) == r)
break;
if( j == found.size() ) found_filtered.push_back(r);
}
for( i = 0; i < found_filtered.size(); i++ )
{
Rect r = found_filtered[i];
r.x += cvRound(r.width*0.1);
r.width = cvRound(r.width*0.8);
r.y += cvRound(r.height*0.07);
r.height = cvRound(r.height*0.8);
rectangle(img, r.tl(), r.br(), cv::Scalar(0,255,0), 3);
}
Size size2 = Size(640,480);
int codec = CV_FOURCC('M', 'J', 'P', 'G');
VideoWriter writer2("E:/Phase_I_output/video_.avi",codec,50.0,size2,true);
writer2.open("E:/Phase_I_output/video_.avi",codec,15.0,size2,true);
writer2.write(img);
imshow("people detector", img);
if(waitKey(1) == 27)
break;
}
std::cout << "Completed" << std::endl ;
waitKey();
return 0;
}
You should initialize the videowriter before the infinite loop, and release the videowriter (not necessary with the C++ API) and the videocapture once there are no more frame to grab :
Size size2 = Size(640,480);
int codec = CV_FOURCC('M', 'J', 'P', 'G');
VideoWriter writer2("E:/Phase_I_output/video_.avi",codec,50.0,size2,true);
writer2.open("E:/Phase_I_output/video_.avi",codec,50.0,size2,true);
for(;;){
//do your stuff
//write the current frame
writer2.write(img);
}
cvReleaseVideoWriter( writer2 );
cvReleaseCapture( &cap );
You should also use the C++ API of openCV. I believe every function beginning with 'cv' is part of the C API (and no longer supported). Check the openCV documentation to find the corresponding C++ function.
For example :
img=cvQueryFrame(cap);
Will become :
cap >> img;
Edit
I corrected your code to use the C++ API of openCV, and it's working fine (the people detection seems to give false positive though). Here is the code :
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <opencv2/core/core.hpp>
#include <iostream>
#include <stdio.h>
#include <string.h>
#include <ctype.h>
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
Mat img;
string _filename;
_filename = "path/to/video.avi";
HOGDescriptor hog;
hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());
namedWindow("people detector", WND_PROP_AUTOSIZE);
VideoCapture cap = VideoCapture(_filename);
if(!cap.isOpened()){
cout<<"error opening : "<<_filename<<endl;
return 1;
}
Size size2 = Size(640,480);
int codec = static_cast<int>(cap.get(CV_CAP_PROP_FOURCC));
double fps = cap.get(CV_CAP_PROP_FPS);
VideoWriter writer2("../outputVideo_.avi",codec,fps,size2,true);
for(;;)
{
cap >> img;
if(img.empty()){
cout<<"frame n° "<<cap.get(CV_CAP_PROP_FRAME_COUNT)<<endl;
break;
}
fflush(stdout);
vector<Rect> found, found_filtered;
double t = (double)getTickCount();
int can = img.channels();
hog.detectMultiScale(img, found, 0, Size(8,8), Size(32,32), 1.05, 2);
t = (double)getTickCount() - t;
printf("tdetection time = %gms\n", t*1000./cv::getTickFrequency());
size_t i, j;
for( i = 0; i < found.size(); i++ )
{
Rect r = found[i];
for( j = 0; j < found.size(); j++ )
if( j != i && (r & found[j]) == r)
break;
if( j == found.size() ) found_filtered.push_back(r);
}
for( i = 0; i < found_filtered.size(); i++ )
{
Rect r = found_filtered[i];
r.x += cvRound(r.width*0.1);
r.width = cvRound(r.width*0.8);
r.y += cvRound(r.height*0.07);
r.height = cvRound(r.height*0.8);
rectangle(img, r.tl(), r.br(), Scalar(0,255,0), 3);
}
//writer2.write(img);
writer2 << img;
imshow("people detector", img);
if(waitKey(1) == 27)
{
break;
}
}
writer2.release();
cap.release();
cout << "Completed" << endl ;
waitKey();
destroyAllWindows();
return 0;
}

Sobel filter in cuda (cant show full image)

I have a classic problem about the output of sobel filter using CUDA.
this is a main class (main.cpp)
/*main class */
int main(int argc, char** argv)
{
IplImage* image_source = cvLoadImage("test.jpg",
CV_LOAD_IMAGE_GRAYSCALE);
IplImage* image_input = cvCreateImage(cvGetSize(image_source),
IPL_DEPTH_8U,image_source->nChannels);
IplImage* image_output = cvCreateImage(cvGetSize(image_source),
IPL_DEPTH_8U,image_source->nChannels);
/* Convert from IplImage tofloat */
cvConvert(image_source,image_input);
unsigned char *h_out = (unsigned char*)image_output->imageData;
unsigned char *h_in = (unsigned char*)image_input->imageData;
width = image_input->width;
height = image_input->height;
widthStep = image_input->widthStep;
sobel_parallel(h_in, h_out, width, height, widthStep);
cvShowImage( "CPU", image_output );
cvReleaseImage( &image_output );
waitKey(0);
}
And this is the CUDA file (kernel_gpu.cu)
__global__ void kernel ( unsigned char *d_in , unsigned char *d_out , int width ,
int height, int widthStep ) {
int col = blockIdx . x * blockDim . x + threadIdx . x ;
int row = blockIdx . y * blockDim . y + threadIdx . y ;
int dx [3][3] = { -1 , 0 , 1 ,
-2 , 0 , 2 ,
-1 , 0 , 1};
int dy [3][3] = {1 ,2 ,1 ,
0 ,0 ,0 ,
-1 , -2 , -1};
int s;
if( col < width && row < height)
{
int i = row;
int j = col;
// apply kernel in X direction
int sum_x=0;
for(int m=-1; m<=1; m++)
for(int n=-1; n<=1; n++)
{
s=d_in[(i+m)*widthStep+j+n]; // get the (i,j) pixel value
sum_x+=s*dx[m+1][n+1];
}
// apply kernel in Y direction
int sum_y=0;
for(int m=-1; m<=1; m++)
for(int n=-1; n<=1; n++)
{
s=d_in[(i+m)*widthStep+j+n]; // get the (i,j) pixel value
sum_y+=s*dy[m+1][n+1];
}
int sum=abs(sum_x)+abs(sum_y);
if (sum>255)
sum=255;
d_out[i*widthStep+j]=sum; // set the (i,j) pixel value
}
}
// Kernel Calling Function
extern "C" void sobel_parallel( unsigned char* h_in, unsigned char* h_out,
int rows, int cols, int widthStep){
unsigned char* d_in;
unsigned char* d_out;
cudaMalloc((void**) &d_in, rows*cols);
cudaMalloc((void**) &d_out, rows*cols);
cudaMemcpy(d_in, h_in, rows*cols*sizeof( unsigned char), cudaMemcpyHostToDevice);
dim3 block (16,16);
dim3 grid ((rows * cols) / 256.0);
kernel<<<grid,block>>>(d_in, d_out, rows, cols, widthStep);
cudaMemcpy(h_out, d_out, rows*cols*sizeof( unsigned char), cudaMemcpyDeviceToHost);
cudaFree(d_in);
cudaFree(d_out);
}
Error :
the result image does not appear in their entirety, only part of the image.
Why is the result(GPU) like this?? (I tried to make CPU computation using the same function and no problem).
You are creating 1 Dimensional grid, while using 2D indexing inside the kernel which will cover only the x direction and only the top 16 rows of the image will be filtered (because the height of the block is 16).
dim3 grid ((rows * cols) / 256.0); //This is incorrect in current case
Consider creating 2 dimensional grid, so that it spans all the rows of the image.
dim3 grid ((cols + 15)/16, (rows + 15)/16);
Check the width and widthStep variables to see if they are actually equal or not because in your sobel_parallel function you are implicitly assuming this (which might not be true since your data is aligned). If this is not true the code
cudaMalloc((void**) &d_in, rows*cols);
will actually allocate less memory than necessary and hence you will only process part of your image. It would be better to use
cudaMalloc((void**) &d_in, rows*widthStep);
And of course adjust the rest of your code as necessary.
You are also calling
void sobel_parallel( unsigned char* h_in, unsigned char* h_out,
int rows, int cols, int widthStep)
with
sobel_parallel(h_in, h_out, width, height, widthStep);
which exchanges rows with cols and this is again exchanged when you are calling your kernel. This will cause a problem when you use the above suggestion.

Estimating an Affine Transform between Two Images

I have a sample image:
I apply the affine transform with the following warp matrix:
[[ 1.25 0. -128 ]
[ 0. 2. -192 ]]
and crop a 128x128 part from the result to get an output image:
Now, I want to estimate the warp matrix and crop size/location from just comparing the sample and output image. I detect feature points using SURF, and match them by brute force:
There are many matches, of which I'm keeping the best three (by distance), since that is the number required to estimate the affine transform. I then use those 3 keypoints to estimate the affine transform using getAffineTransform. However, the transform it returns is completely wrong:
-0.00 1.87 -6959230028596648489132997794229911552.00
0.00 -1.76 -0.00
What am I doing wrong? Source code is below.
Perform affine transform (Python):
"""Apply an affine transform to an image."""
import cv
import sys
import numpy as np
if len(sys.argv) != 10:
print "usage: %s in.png out.png x1 y1 width height sx sy flip" % __file__
sys.exit(-1)
source = cv.LoadImage(sys.argv[1])
x1, y1, width, height, sx, sy, flip = map(float, sys.argv[3:])
X, Y = cv.GetSize(source)
Xn, Yn = int(sx*(X-1)), int(sy*(Y-1))
if flip:
arr = np.array([[-sx, 0, sx*(X-1)-x1], [0, sy, -y1]])
else:
arr = np.array([[sx, 0, -x1], [0, sy, -y1]])
print arr
warp = cv.fromarray(arr)
cv.ShowImage("source", source)
dest = cv.CreateImage((Xn, Yn), source.depth, source.nChannels)
cv.WarpAffine(source, dest, warp)
cv.SetImageROI(dest, (0, 0, int(width), int(height)))
cv.ShowImage("dest", dest)
cv.SaveImage(sys.argv[2], dest)
cv.WaitKey(0)
Estimate affine transform from two images (C++):
#include <stdio.h>
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/nonfree/nonfree.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <algorithm>
using namespace cv;
void readme();
bool cmpfun(DMatch a, DMatch b) { return a.distance < b.distance; }
/** #function main */
int main( int argc, char** argv )
{
if( argc != 3 )
{
return -1;
}
Mat img_1 = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
Mat img_2 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
if( !img_1.data || !img_2.data )
{
return -1;
}
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors with a brute force matcher
BFMatcher matcher(NORM_L2, false);
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
double max_dist = 0;
double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_1.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist )
//-- PS.- radiusMatch can also be used here.
sort(matches.begin(), matches.end(), cmpfun);
std::vector< DMatch > good_matches;
vector<Point2f> match1, match2;
for (int i = 0; i < 3; ++i)
{
good_matches.push_back( matches[i]);
Point2f pt1 = keypoints_1[matches[i].queryIdx].pt;
Point2f pt2 = keypoints_2[matches[i].trainIdx].pt;
match1.push_back(pt1);
match2.push_back(pt2);
printf("%3d pt1: (%.2f, %.2f) pt2: (%.2f, %.2f)\n", i, pt1.x, pt1.y, pt2.x, pt2.y);
}
//-- Draw matches
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2, good_matches, img_matches,
Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//-- Show detected matches
imshow("Matches", img_matches );
imwrite("matches.png", img_matches);
waitKey(0);
Mat fun = getAffineTransform(match1, match2);
for (int i = 0; i < fun.rows; ++i)
{
for (int j = 0; j < fun.cols; j++)
{
printf("%.2f ", fun.at<float>(i,j));
}
printf("\n");
}
return 0;
}
/** #function readme */
void readme()
{
std::cout << " Usage: ./SURF_descriptor <img1> <img2>" << std::endl;
}
The cv::Mat getAffineTransform returns is made of doubles, not of floats. The matrix you get probably is fine, you just have to change the printf command in your loops to
printf("%.2f ", fun.at<double>(i,j));
or even easier: Replace this manual output with
std::cout << fun << std::endl;
It's shorter and you don't have to care about data types yourself.

Insert an empty space at the end of a string

I was wondering how can I insert an empty space in the string text (defined in
char *text = argv[1];)
For example, if a write:
./mar "Hello how are you"
I would like to see/
Hello how are you Hello how are you Hello how are you Hello how are you
and not
Hello how are youHello how are youHello how are youHello how are youHello how are you
scrolling horizontally in the cli.
The code is:
/*mar.c*/
#include <curses.h>
#include <unistd.h> // For sleep()
#include <string.h> // For strlen()
#include <stdlib.h> // For malloc()
#include <sys/select.h>
int main(int argc, char* argv[])
{
char *text = argv[1];
char *scroll;
int text_length;
int i,p, max_x, max_y;
// Get text length
text_length = strlen(text);
// Initialize screen for ncurses
initscr();
// Don't show cursor
curs_set(0);
// Clear the screen
clear();
// Get terminal dimensions
getmaxyx(stdscr, max_y, max_x);
scroll = malloc(2 * max_x + 1);
for (i=0; i< 2*max_x; i++) {
getmaxyx(stdscr, max_y, max_x);
scroll[i] = text[i % text_length];
}
scroll[2*max_x - 1]='\0';
// Scroll text back across the screen
p=0;
do{
getmaxyx(stdscr, max_y, max_x);
mvaddnstr(0,0,&scroll[p%max_x], max_x);
refresh();
usleep(40000);
p=p+1;
// Execute while none key is pressed
}while (!kbhit());
endwin();
return 0;
}
int kbhit(void)
{
struct timeval tv;
fd_set read_fd;
tv.tv_sec=0;
tv.tv_usec=0;
FD_ZERO(&read_fd);
FD_SET(0,&read_fd);
if(select(1, &read_fd, NULL, NULL, &tv) == -1)
return 0;
if(FD_ISSET(0,&read_fd))
return 1;
return 0;
}
You need to allocate a new char array whose length is 2 bytes greater than the length of the argument string (so that there's room both for the space and for the null terminator). Then, call strcpy to copy the argument string into the new array, overwrite the second-to-last index with a space and put a null terminator into the last index.

Resources