How to get the first frame of a video using opencv? - visual-studio-2010

I have to find the cross correlation of all the frames of a video with the first frame of that video ....
double crossCorrelation( IplImage* img1, IplImage* img2 ) {
double corr;
int M = img1->width;
int N = img1->height;
BwImage img_1( img1 );
BwImage img_2( img2 );
CvScalar img1_avg = cvAvg( img1, NULL );
CvScalar img2_avg = cvAvg( img2, NULL );
double sum_img1_img2 = 0;
double sum_img1_2 = 0;
double sum_img2_2 = 0;
for( int m=0; m<M; ++m ) {
for( int n=0; n<N; ++n ) {
sum_img1_img2 = sum_img1_img2 + (img_1[m][n]-img1_avg.val[0])*(img_2[m][n]-img2_avg.val[0]);
sum_img1_2 = sum_img1_2 + (img_1[m][n]-img1_avg.val[0])*(img_1[m][n]-img1_avg.val[0]);
sum_img2_2 = sum_img2_2 + (img_2[m][n]-img2_avg.val[0])*(img_2[m][n]-img2_avg.val[0]);
}
}
corr = sum_img1_img2/sqrt(sum_img1_2*sum_img2_2);
return corr;
}
This is the code for finding the correlation. For img1 I need the frame 1 and rest frames will be img 2 in loop !
How should I do that ?
Please help!

try this code...it works..
CvCapture *video = cvCaptureFromFile("C:\\path_to_video.avi");
IplImage *firstFrame = cvQueryFrame(video);//this is the first frame
IplImage *nextFrame;
while(nextFrame!=NULL)
{
nextFrame = cvQueryFrame(video);
if(nextFrame!=NULL)
double CrossCorrValue = crossCorrelation(firstFrame,nextFrame);
}

You should probably use the more modern VideoCapture::retrieve function and cv::Mat - but essentially rotating_image is correct.
Simply grab the first frame into a separate image and then continue reusing a new image for the subsequent frames

Related

In Flutter, how to manipulate pixel by pixel of an ui.Image without using the Image package?

in Flutter what's the correct way to change pixel data of an image with ui.Image type without using the Image package ? I was able to update the pixels by using the Image package but I don't want to convert the image multiple times. So I'm trying to explore the possibility to update Uint8List data. Below is the code snippet. However, I got "Exception: Invalid image data" when I tried to update the image with manipulated Uint8List. Wonder what I did wrong ? Appreciate any feedback.
int clamp(int x, int a, int b) {
return (x < a)
? a
: (x > b)
? b
: x;
}
int getColorY(int a, int r, int g, int b ) =>
(clamp(a, 0, 255) << 24) |
(clamp(r, 0, 255) << 16) |
(clamp(g, 0, 255) << 8) |
(clamp(b, 0, 255));
Future<ui.Image> setImageData(ui.Image uiX) async
int w = uiX.width;
int h = uiX.height;
//get byteData
final rgbaImageData =
await uiX.toByteData(format: ui.ImageByteFormat.png);
// convert to Uint32
Uint32List words = Uint32List.view(
rgbaImageData.buffer,
rgbaImageData.offsetInBytes,
rgbaImageData.lengthInBytes ~/ Uint32List.bytesPerElement);
int a = 0;
int r = 0;
int g = 0;
int b = 0;
for (int idx = 0; idx < words.length; idx++) {
Color color = Color(words[idx]);
if (color.red > 128) {
a = 0;
} else {
r = 128;
g = 135;
b = 110;
}
words[idx] = getColorY(a, r, g, b);
}
//convert Uint32List to Uint8List
Uint8List bytes = words.buffer.asUint8List();
final Completer<ui.Image> imageCompleter = new Completer();
ui.decodeImageFromList(bytes, (ui.Image img) {
imageCompleter.complete(img);
});
return imageCompleter.future;
}
You can convert ui.Image to raw byte data, manipulate it, then convert it back to ui.Image using ui.imageFromBytes.
final ui.Image image = ...;
// Convert to raw rgba
final ByteData bytes = image.toByteData(format:
ImageByteFormat.rawRgba,
);
// Set the first pixel of the image to red.
bytes.setUint32(0, 0xFF0000FF);
// Set pixel at (x, y) to green.
final x = 10;
final y = 10;
bytes.setUint32((y * image.width + x) * 4, 0x00FF00FF);
ui.decodeImageFromPixels(
bytes.buffer.asUint8List(),
image.width,
image.height,
ui.PixelFormat.rgba8888,
(ui.Image result) {
// use your result image
},
);
The wrong part in your code is:
final rgbaImageData = await uiX.toByteData(format: ui.ImageByteFormat.png);
As format: specifies the format of the returned bytes. So, you had to pass ui.ImageByteFormat.rawRgba.
Also, Color(int) expects an ARGB format instead of RGBA.
You can safely convert package:image/image.dart to package:flutter/src/widgets/image.dart and vice-versa (convert Image class to UI Image Widget) just remember to add header information if it's missing.
See a more detailed answer here:
https://stackoverflow.com/a/72871105/706387

Ways to improve ESC/POS Thermal_Printer image printing speed?

I have been doing printing job with Thermal Printer Image Printing on portable thermal printer for weeks and this is code I got for Image Printing.
public static byte[] GetByteImage(Bitmap bm, int BitmapWidth)
{
BitmapData data = GetGreyScaledBitmapData(bm, BitmapWidth);
BitArray dots = data.Dots;
string t = data.Width.ToString();
byte[] width = BitConverter.GetBytes(data.Width);
int offset = 0;
MemoryStream stream = new MemoryStream();
BinaryWriter bw = new BinaryWriter(stream);
//Line spacing
bw.Write((char)0x1B);
bw.Write('3');
bw.Write((byte)0);
while (offset < data.Height)
{
//Declare printer to print image mode
bw.Write((char)0x1B);
bw.Write('*');
bw.Write((byte)33);
bw.Write(width[0]);
bw.Write(width[1]);
for (int x = 0; x < data.Width; ++x)
{
for (int k = 0; k < 3; ++k)
{
byte slice = 0;
for (int b = 0; b < 8; ++b)
{
int y = (((offset / 8) + k) * 8) + b;
int i = (y * data.Width) + x;
bool v = false;
if (i < dots.Length)
{
v = dots[i];
}
slice |= (byte)((v ? 1 : 0) << (7 - b));
}
bw.Write(slice);
}
}
offset += 24;
bw.Write((char)0x0A);
}
bw.Write((char)0x1B);
bw.Write('3');
bw.Write((byte)0);
bw.Flush();
byte[] bytes = stream.ToArray();
return bytes;
}
public static BitmapData GetGreyScaledBitmapData(Bitmap bmpFileName, double imgsize)
{
using (var bitmap = (Bitmap)(bmpFileName))
{
var threshold = 127;
var index = 0;
double multiplier = imgsize;
double scale = (double)(multiplier / (double)bitmap.Width);
int xheight = (int)(bitmap.Height * scale);
int xwidth = (int)(bitmap.Width * scale);
var dimensions = xwidth * xheight;
var dots = new BitArray(dimensions);
for (var y = 0; y < xheight; y++)
{
for (var x = 0; x < xwidth; x++)
{
var _x = (int)(x / scale);
var _y = (int)(y / scale);
Android.Graphics.Color color = new Android.Graphics.Color(bitmap.GetPixel(_x, _y));
var luminance = (int)(color.R * 0.3 + color.G * 0.59 + color.B * 0.11);
dots[index] = (luminance < threshold);
index++;
}
}
return new BitmapData()
{
Dots = dots,
Height = (int)(bitmap.Height * scale),
Width = (int)(bitmap.Width * scale)
};
}
}
public class BitmapData
{
public BitArray Dots
{
get;
set;
}
public int Height
{
get;
set;
}
public int Width
{
get;
set;
}
}
The problem is, it print very slow and make jerking sound while printing.
Another problem is, the method of image converting to Grey Scale is a bit slow.
And when I test with other apps I found that they have no jerking sound and almost instantly print image after clicked print button.
Is there a way to improve above code so it can print smoothly ?
This is the app I tested Printer Lab - Thermal printer manager
The Thermal Printer I used RPP300 72mm Mobile Printer
The ESC * command you are using prints every 24 dots in height.
Then, as you feel the problem, it will be jerky and slow print.
Please use a combination of GS * and GS / commands to improve it.
Details of their specifications are described on pages 24 to 26 of the Thermal Mobile Printer Command Set Manual.
In Addition:
By the way, I was overlooking another command.
It would be easier for us to create the data that we will send.
However, smooth printing depends on the printer performance and communication line speed.
That command is GS v 0. It is described on pages 32 and 33 of the manual.
The program in this article is a bit image data conversion process for FS q and GS (L / GS 8 L commands, but it can also be used for GS * commands. Please try it.
Convert raster byte[] image data to column Format in C#
Finally got a solution. I was really dumb back then. Just ask your printer manufacturer company for SDK or find SDK from other printer manufacturer.

Issue in plotting resultant bit map of two bit maps difference

I want to compare one bitmap with another bitmap (reference bitmap) and draw all the difference of it in resultant bit map.
Using below code I am able to draw only difference area but not with exact color of it.
Here is my code
Bitmap ResultantBitMap = new Bitmap(bitMap1.Height, bitMap2.Height);
BitmapData bitMap1Data = bitMap1.LockBits(new Rectangle(0, 0, bitMap1.Width, bitMap1.Height), System.Drawing.Imaging.ImageLockMode.ReadOnly, System.Drawing.Imaging.PixelFormat.Format32bppArgb);
BitmapData bitMap2Data = bitMap2.LockBits(new Rectangle(0, 0, bitMap2.Width, bitMap2.Height), System.Drawing.Imaging.ImageLockMode.ReadOnly, System.Drawing.Imaging.PixelFormat.Format32bppArgb);
BitmapData bitMapResultantData = ResultantBitMap.LockBits(new Rectangle(0, 0, ResultantBitMap.Width, ResultantBitMap.Height), System.Drawing.Imaging.ImageLockMode.ReadWrite, System.Drawing.Imaging.PixelFormat.Format32bppArgb);
IntPtr scan0 = bitMap1Data.Scan0;
IntPtr scan02 = bitMap2Data.Scan0;
IntPtr scan0ResImg1 = bitMapResultantData.Scan0;
int bitMap1Stride = bitMap1Data.Stride;
int bitMap2Stride = bitMap2Data.Stride;
int ResultantImageStride = bitMapResultantData.Stride;
for (int y = 0; y < bitMap1.Height; y++)
{
//define the pointers inside the first loop for parallelizing
byte* p = (byte*)scan0.ToPointer();
p += y * bitMap1Stride;
byte* p2 = (byte*)scan02.ToPointer();
p2 += y * bitMap2Stride;
byte* pResImg1 = (byte*)scan0ResImg1.ToPointer();
pResImg1 += y * ResultantImageStride;
for (int x = 0; x < bitMap1.Width; x++)
{
//always get the complete pixel when differences are found
if (Math.Abs(p[0] - p2[0]) >= 20 || Math.Abs(p[1] - p2[1]) >= 20 || Math.Abs(p[2] - p2[2]) >= 20)
{
pResImg1[0] = p2[0];// B
pResImg1[1] = p2[1];//R
pResImg1[2] = p2[2];//G
pResImg1[3] = p2[3];//A (Opacity)
}
p += 4;
p2 += 4;
pResImg1 += 4;
}
}
bitMap1.UnlockBits(bitMap1Data);
bitMap2.UnlockBits(bitMap2Data);
ResultantBitMap.UnlockBits(bitMapResultantData);
ResultantBitMap.Save(#"c:\\abcd\abcd.jpeg");
What I want is the difference image with exact color of the reference image.
It's hard to tell what's going on without knowing what all those library calls and "+= 4" are but, are you sure p and p2 correspond to the first and second images of your diagram?
Also, your "Format32bppArgb" format suggests that [0] corresponds to alpha, not to red. Maybe there's a problem with that, too.

Why is this while loop breaking while reading an avi file in opencv?

int main(int argc, char* argv[]) {
ofstream file;
file.open("Motion.dat");
int frame_number = 0;
CvCapture* capture = cvCreateFileCapture("Cricketc1.avi");
CvCapture* capture1 = cvCreateFileCapture("Cricketc1.avi");
IplImage* imgsize = NULL;
IplImage *img1 = NULL;
IplImage *img2 = NULL;
IplImage *vidFrame = NULL;
IplImage *imggray1 = NULL;
IplImage *imggray2 = NULL;
IplImage *imggray3 = NULL;
cvNamedWindow("Video", 0);
cvNamedWindow("Video1", 0);
imgsize = cvQueryFrame(capture1);
assert(imgsize);
CvSize sz = cvGetSize(imgsize);
cvReleaseCapture(&capture1);
imggray1 = cvCreateImage(sz, IPL_DEPTH_8U, 1);
imggray2 = cvCreateImage(sz, IPL_DEPTH_8U, 1);
imggray3 = cvCreateImage(sz, IPL_DEPTH_8U, 1);
while (true) {
frame_number++;
img1 = cvQueryFrame(capture);
if(img1->imageData == NULL)
break;
cvCvtColor(img1, imggray1, CV_RGB2GRAY);
img2 = cvQueryFrame(capture);
if(img2->imageData == NULL)
break;
cvCvtColor(img1, imggray1, CV_RGB2GRAY);
cvAbsDiff( imggray1, imggray2, imggray3 );
CvScalar sumDiff = cvSum (imggray3);
cout << sunDiff.val[0] << sunDiff.val[1] << sunDiff.val[2] << endl;
cvWaitKey(40);
}
cvReleaseCapture(&capture);
cvDestroyAllWindows();
file.close();
system("Pause");
return 0;
}
There are a total of 1251 frames in the video # 25 fps.
But the loop breaks at frame_number equal to 625, at line if(img2->imageData == NULL).
This error mainly comes up if I do any computation in between the while loop. A simple cvShowImage() will work just fine, but any other manipulation around it causes this error to show up.
This is the error that comes up after that :
Unhandled exception at 0x00221de7 in getMotion2.exe: 0xC0000005: Access violation reading location 0x00000044.
What is the problem ?
You are reading image twice from the same cvCapture* in a loop: img1 = cvQueryFrame(capture); and img2 = cvQueryFrame(capture);. If you change the second line to img2 = cvQueryFrame(capture2); it should work fine.

Find local maxima in grayscale image using OpenCV

Does anybody know how to find the local maxima in a grayscale IPL_DEPTH_8U image using OpenCV? HarrisCorner mentions something like that but I'm actually not interested in corners ...
Thanks!
A pixel is considered a local maximum if it is equal to the maximum value in a 'local' neighborhood. The function below captures this property in two lines of code.
To deal with pixels on 'plateaus' (value equal to their neighborhood) one can use the local minimum property, since plateaus pixels are equal to their local minimum. The rest of the code filters out those pixels.
void non_maxima_suppression(const cv::Mat& image, cv::Mat& mask, bool remove_plateaus) {
// find pixels that are equal to the local neighborhood not maximum (including 'plateaus')
cv::dilate(image, mask, cv::Mat());
cv::compare(image, mask, mask, cv::CMP_GE);
// optionally filter out pixels that are equal to the local minimum ('plateaus')
if (remove_plateaus) {
cv::Mat non_plateau_mask;
cv::erode(image, non_plateau_mask, cv::Mat());
cv::compare(image, non_plateau_mask, non_plateau_mask, cv::CMP_GT);
cv::bitwise_and(mask, non_plateau_mask, mask);
}
}
Here's a simple trick. The idea is to dilate with a kernel that contains a hole in the center. After the dilate operation, each pixel is replaced with the maximum of it's neighbors (using a 5 by 5 neighborhood in this example), excluding the original pixel.
Mat1b kernelLM(Size(5, 5), 1u);
kernelLM.at<uchar>(2, 2) = 0u;
Mat imageLM;
dilate(image, imageLM, kernelLM);
Mat1b localMaxima = (image > imageLM);
Actually after I posted the code above I wrote a better and very very faster one ..
The code above suffers even for a 640x480 picture..
I optimized it and now it is very very fast even for 1600x1200 pic.
Here is the code :
void localMaxima(cv::Mat src,cv::Mat &dst,int squareSize)
{
if (squareSize==0)
{
dst = src.clone();
return;
}
Mat m0;
dst = src.clone();
Point maxLoc(0,0);
//1.Be sure to have at least 3x3 for at least looking at 1 pixel close neighbours
// Also the window must be <odd>x<odd>
SANITYCHECK(squareSize,3,1);
int sqrCenter = (squareSize-1)/2;
//2.Create the localWindow mask to get things done faster
// When we find a local maxima we will multiply the subwindow with this MASK
// So that we will not search for those 0 values again and again
Mat localWindowMask = Mat::zeros(Size(squareSize,squareSize),CV_8U);//boolean
localWindowMask.at<unsigned char>(sqrCenter,sqrCenter)=1;
//3.Find the threshold value to threshold the image
//this function here returns the peak of histogram of picture
//the picture is a thresholded picture it will have a lot of zero values in it
//so that the second boolean variable says :
// (boolean) ? "return peak even if it is at 0" : "return peak discarding 0"
int thrshld = maxUsedValInHistogramData(dst,false);
threshold(dst,m0,thrshld,1,THRESH_BINARY);
//4.Now delete all thresholded values from picture
dst = dst.mul(m0);
//put the src in the middle of the big array
for (int row=sqrCenter;row<dst.size().height-sqrCenter;row++)
for (int col=sqrCenter;col<dst.size().width-sqrCenter;col++)
{
//1.if the value is zero it can not be a local maxima
if (dst.at<unsigned char>(row,col)==0)
continue;
//2.the value at (row,col) is not 0 so it can be a local maxima point
m0 = dst.colRange(col-sqrCenter,col+sqrCenter+1).rowRange(row-sqrCenter,row+sqrCenter+1);
minMaxLoc(m0,NULL,NULL,NULL,&maxLoc);
//if the maximum location of this subWindow is at center
//it means we found the local maxima
//so we should delete the surrounding values which lies in the subWindow area
//hence we will not try to find if a point is at localMaxima when already found a neighbour was
if ((maxLoc.x==sqrCenter)&&(maxLoc.y==sqrCenter))
{
m0 = m0.mul(localWindowMask);
//we can skip the values that we already made 0 by the above function
col+=sqrCenter;
}
}
}
The following listing is a function similar to Matlab's "imregionalmax". It looks for at most nLocMax local maxima above threshold, where the found local maxima are at least minDistBtwLocMax pixels apart. It returns the actual number of local maxima found. Notice that it uses OpenCV's minMaxLoc to find global maxima. It is "opencv-self-contained" except for the (easy to implement) function vdist, which computes the (euclidian) distance between points (r,c) and (row,col).
input is one-channel CV_32F matrix, and locations is nLocMax (rows) by 2 (columns) CV_32S matrix.
int imregionalmax(Mat input, int nLocMax, float threshold, float minDistBtwLocMax, Mat locations)
{
Mat scratch = input.clone();
int nFoundLocMax = 0;
for (int i = 0; i < nLocMax; i++) {
Point location;
double maxVal;
minMaxLoc(scratch, NULL, &maxVal, NULL, &location);
if (maxVal > threshold) {
nFoundLocMax += 1;
int row = location.y;
int col = location.x;
locations.at<int>(i,0) = row;
locations.at<int>(i,1) = col;
int r0 = (row-minDistBtwLocMax > -1 ? row-minDistBtwLocMax : 0);
int r1 = (row+minDistBtwLocMax < scratch.rows ? row+minDistBtwLocMax : scratch.rows-1);
int c0 = (col-minDistBtwLocMax > -1 ? col-minDistBtwLocMax : 0);
int c1 = (col+minDistBtwLocMax < scratch.cols ? col+minDistBtwLocMax : scratch.cols-1);
for (int r = r0; r <= r1; r++) {
for (int c = c0; c <= c1; c++) {
if (vdist(Point2DMake(r, c),Point2DMake(row, col)) <= minDistBtwLocMax) {
scratch.at<float>(r,c) = 0.0;
}
}
}
} else {
break;
}
}
return nFoundLocMax;
}
The first question to answer would be what is "local" in your opinion. The answer may well be a square window (say 3x3 or 5x5) or circular window of a certain radius. You can then scan over the entire image with the window centered at each pixel and pick the highest value in the window.
See this for how to access pixel values in OpenCV.
This is very fast method. It stored founded maxima in a vector of
Points.
vector <Point> GetLocalMaxima(const cv::Mat Src,int MatchingSize, int Threshold, int GaussKernel )
{
vector <Point> vMaxLoc(0);
if ((MatchingSize % 2 == 0) || (GaussKernel % 2 == 0)) // MatchingSize and GaussKernel have to be "odd" and > 0
{
return vMaxLoc;
}
vMaxLoc.reserve(100); // Reserve place for fast access
Mat ProcessImg = Src.clone();
int W = Src.cols;
int H = Src.rows;
int SearchWidth = W - MatchingSize;
int SearchHeight = H - MatchingSize;
int MatchingSquareCenter = MatchingSize/2;
if(GaussKernel > 1) // If You need a smoothing
{
GaussianBlur(ProcessImg,ProcessImg,Size(GaussKernel,GaussKernel),0,0,4);
}
uchar* pProcess = (uchar *) ProcessImg.data; // The pointer to image Data
int Shift = MatchingSquareCenter * ( W + 1);
int k = 0;
for(int y=0; y < SearchHeight; ++y)
{
int m = k + Shift;
for(int x=0;x < SearchWidth ; ++x)
{
if (pProcess[m++] >= Threshold)
{
Point LocMax;
Mat mROI(ProcessImg, Rect(x,y,MatchingSize,MatchingSize));
minMaxLoc(mROI,NULL,NULL,NULL,&LocMax);
if (LocMax.x == MatchingSquareCenter && LocMax.y == MatchingSquareCenter)
{
vMaxLoc.push_back(Point( x+LocMax.x,y + LocMax.y ));
// imshow("W1",mROI);cvWaitKey(0); //For gebug
}
}
}
k += W;
}
return vMaxLoc;
}
Found a simple solution.
In this example, if you are trying to find 2 results of a matchTemplate function with a minimum distance from each other.
cv::Mat result;
matchTemplate(search, target, result, CV_TM_SQDIFF_NORMED);
float score1;
cv::Point displacement1 = MinMax(result, score1);
cv::circle(result, cv::Point(displacement1.x+result.cols/2 , displacement1.y+result.rows/2), 10, cv::Scalar(0), CV_FILLED, 8, 0);
float score2;
cv::Point displacement2 = MinMax(result, score2);
where
cv::Point MinMax(cv::Mat &result, float &score)
{
double minVal, maxVal;
cv::Point minLoc, maxLoc, matchLoc;
minMaxLoc(result, &minVal, &maxVal, &minLoc, &maxLoc, cv::Mat());
matchLoc.x = minLoc.x - result.cols/2;
matchLoc.y = minLoc.y - result.rows/2;
return minVal;
}
The process is:
Find global Minimum using minMaxLoc
Draw a filled white circle around global minimum using min distance between minima as radius
Find another minimum
The the scores can be compared to each other to determine, for example, the certainty of the match,
To find more than just the global minimum and maximum try using this function from skimage:
http://scikit-image.org/docs/dev/api/skimage.feature.html#skimage.feature.peak_local_max
You can parameterize the minimum distance between peaks, too. And more. To find minima, use negated values (take care of the array type though, 255-image could do the trick).
You can go over each pixel and test if it is a local maxima. Here is how I would do it.
The input is assumed to be type CV_32FC1
#include <vector>//std::vector
#include <algorithm>//std::sort
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/core/core.hpp"
//structure for maximal values including position
struct SRegionalMaxPoint
{
SRegionalMaxPoint():
values(-FLT_MAX),
row(-1),
col(-1)
{}
float values;
int row;
int col;
//ascending order
bool operator()(const SRegionalMaxPoint& a, const SRegionalMaxPoint& b)
{
return a.values < b.values;
}
};
//checks if pixel is local max
bool isRegionalMax(const float* im_ptr, const int& cols )
{
float center = *im_ptr;
bool is_regional_max = true;
im_ptr -= (cols + 1);
for (int ii = 0; ii < 3; ++ii, im_ptr+= (cols-3))
{
for (int jj = 0; jj < 3; ++jj, im_ptr++)
{
if (ii != 1 || jj != 1)
{
is_regional_max &= (center > *im_ptr);
}
}
}
return is_regional_max;
}
void imregionalmax(
const cv::Mat& input,
std::vector<SRegionalMaxPoint>& buffer)
{
//find local max - top maxima
static const int margin = 1;
const int rows = input.rows;
const int cols = input.cols;
for (int i = margin; i < rows - margin; ++i)
{
const float* im_ptr = input.ptr<float>(i, margin);
for (int j = margin; j < cols - margin; ++j, im_ptr++)
{
//Check if pixel is local maximum
if ( isRegionalMax(im_ptr, cols ) )
{
cv::Rect roi = cv::Rect(j - margin, i - margin, 3, 3);
cv::Mat subMat = input(roi);
float val = *im_ptr;
//replace smallest value in buffer
if ( val > buffer[0].values )
{
buffer[0].values = val;
buffer[0].row = i;
buffer[0].col = j;
std::sort(buffer.begin(), buffer.end(), SRegionalMaxPoint());
}
}
}
}
}
For testing the code you can try this:
cv::Mat temp = cv::Mat::zeros(15, 15, CV_32FC1);
temp.at<float>(7, 7) = 1;
temp.at<float>(3, 5) = 6;
temp.at<float>(8, 10) = 4;
temp.at<float>(11, 13) = 7;
temp.at<float>(10, 3) = 8;
temp.at<float>(7, 13) = 3;
vector<SRegionalMaxPoint> buffer_(5);
imregionalmax(temp, buffer_);
cv::Mat debug;
cv::cvtColor(temp, debug, cv::COLOR_GRAY2BGR);
for (auto it = buffer_.begin(); it != buffer_.end(); ++it)
{
circle(debug, cv::Point(it->col, it->row), 1, cv::Scalar(0, 255, 0));
}
This solution does not take plateaus into account so it is not exactly the same as matlab's imregionalmax()
I think you want to use the
MinMaxLoc(arr, mask=NULL)-> (minVal, maxVal, minLoc, maxLoc)
Finds global minimum and maximum in array or subarray
function on you image

Resources