Related
I'm stuck at the solution of a problem.
Problem =>
You are given a square grid with some cells open (.) and some blocked (X). Your playing piece can move along any row or column until it reaches the edge of the grid or a blocked cell. Given a grid, a start and a goal, determine the minimum number of moves to get to the goal.
Example =>
...
.X.
...
The starting position (0,0) so start in the top left corner. The goal is (1,2) The path is (0,0)->(0,2)->(1,2). It takes moves to reach the goal.
Output = 2
Solution=>
BFS using Queue.
But how BFS can get to the minimum path for example if there is more than one path exist between starting and ending point then how BFS can get to the minimum one ?
Here is my solution for the above problem. But it doesn't work.
class Pair{
int x,y;
Pair(int a,int b){x=a;y=b;}
}
class Result {
public static int minimumMoves(List<String> grid, int startX, int startY, int goalX, int goalY)
{
int n=grid.get(0).length();
ArrayDeque<Pair> q=new ArrayDeque<Pair>();
Pair location[][]=new Pair[n][n];
char color[][]=new char[n][n];
//default color a mean it is neither in queue nor explore
//till now, b mean it is in queue, c means it already explore
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
color[i][j]='a';
}
}
q.addLast(new Pair(startX,startY));
int tempx,tempy,tempi,tempj;
while(!q.isEmpty()){
tempx=q.peekFirst().x;
tempy=q.peekFirst().y;
q.removeFirst();
color[tempx][tempy]='c';
tempj=tempy-1;
tempi=tempx;
//cheking unvisited node around -X axis
while(tempj>=0){
if(color[tempi][tempj]!='a' || grid.get(tempi).charAt(tempj)!='.'){
break;
}
q.addLast(new Pair(tempi,tempj));
color[tempi][tempj]='b';
location[tempi][tempj]=new Pair(tempx,tempy);
tempj--;
}
//checking unvisited node around +X axis
tempi=tempx;
tempj=tempy+1;
while(tempj<n){
if(color[tempi][tempj]!='a' || grid.get(tempi).charAt(tempj)!='.'){
break;
}
q.addLast(new Pair(tempi,tempj));
color[tempi][tempj]='b';
location[tempi][tempj]=new Pair(tempx,tempy);
tempj++;
}
//checking unvisited node around +Y axis
tempi=tempx-1;
tempj=tempy;
while(tempi>=0){
if(color[tempi][tempj]!='a' || grid.get(tempi).charAt(tempj)!='.'){
break;
}
q.addLast(new Pair(tempi,tempj));
color[tempi][tempj]='b';
location[tempi][tempj]=new Pair(tempx,tempy);
tempi--;
}
checking unvisited node around -Y axis
tempi=tempx+1;
tempj=tempy;
while(tempi<n){
if(color[tempi][tempj]!='a' || grid.get(tempi).charAt(tempj)!='.'){
break;
}
q.addLast(new Pair(tempi,tempj));
color[tempi][tempj]='b';
location[tempi][tempj]=new Pair(tempx,tempy);
tempi++;
}
}//end of main while
//for track the path
Stack<Pair> stack=new Stack<Pair>();
//If path doesn't exist
if(location[goalX][goalY]==null){
return -1;
}
boolean move=true;
stack.push(new Pair(goalX,goalY));
while(move){
tempi=stack.peek().x;
tempj=stack.peek().y;
stack.push(location[tempi][tempj]);
if(tempi==startX && tempj==startY){
move=false;
}
}
System.out.println(stack);
return stack.size()-2;
}
}
Here My algorithm only find the path. Not the minimum path. Can anyone suggest me how BFS finds the minimum path here and what should I change into my code ?
BFS finds the minimal path by concentric moving outward, so everything in round 1 is 1 away from start, all squares added there are then 2 away from start and so on. This means the basic idea of using BFS to find the path is good, unfortunately the implementation is a bit difficult and slow.
Another way of viewing it is to think about the grid as a graph, with all squares connected to all other squares up, down, left and right until they hit the edge or an obstacle.
A third way of thinking of it is like a flood fill, first round only start is filled, next round all that can be accessed from it is filled and so on.
The major thing is that you break when you see a b.
aabbaaaaaa
aabbbaaaaa
babbbaaaaa
babbbaaaaa
babbbaaaaa
babbbaaaaa
bbbbbaaaaa
bbbbbaaaaa
bCbbbAAAAA
cccccaaaaa
When processing the capital Cit stops because it is surrounded by bs and cs. And therefore you don't examine the As.
I have hacked the code a bit, note i'm not a java programmer ... my main problem when trying to solve it was timeouts. I believe this can be solved without the location array by recording how many generations of BFS we run, that should save a lot of memory and time.
class Pair{
int x,y;
Pair(int a,int b){x=a;y=b;}
public String toString() {
return "[" + x + "," + y + "]";
}
}
class Result {
/*
* Complete the 'minimumMoves' function below.
*
* The function is expected to return an INTEGER.
* The function accepts following parameters:
* 1. STRING_ARRAY grid
* 2. INTEGER startX
* 3. INTEGER startY
* 4. INTEGER goalX
* 5. INTEGER goalY
*/
public static int minimumMoves(List<String> grid, int startX, int startY, int goalX, int goalY) {
if (startX==goalX&&startY==goalY)
return 0;
startX += 1;
startY += 1;
goalX += 1;
goalY += 1;
int n=grid.get(0).length();
Pair dirs[] = {new Pair(-1,0), new Pair(+1,0), new Pair(0,-1), new Pair(0,+1)};
ArrayDeque<Pair> q=new ArrayDeque<Pair>();
Pair location[][]=new Pair[n+2][n+2];
char color[][]=new char[n+2][n+2];
//default color a mean it is neither in queue nor explore
//till now, b mean it is in queue, c means it already explore
for(int i=0;i<n+2;i++){
for(int j=0;j<n+2;j++){
if (i == 0 || i == n+1 ||j == 0 || j == n+1 || // boarder
grid.get(i-1).charAt(j-1)!='.')
color[i][j]='x';
else
color[i][j]='a';
}
}
q.addLast(new Pair(startX,startY));
int tempx,tempy,tempi,tempj;
while(!q.isEmpty()){
tempx=q.peekFirst().x;
tempy=q.peekFirst().y;
q.removeFirst();
if(location[goalX][goalY]!=null){
System.out.println("Goal reached");
break;
}
color[tempx][tempy]='c';
for (Pair dir : dirs ) {
tempi=tempx;
tempj=tempy;
while(true){
tempi+=dir.x;
tempj+=dir.y;
if (color[tempi][tempj]=='x') { // includes boarder
break;
}
if (color[tempi][tempj]>='b') {
continue;
}
q.addLast(new Pair(tempi,tempj));
color[tempi][tempj]='b';
location[tempi][tempj]=new Pair(tempx,tempy);
}
}
// System.out.println(location[goalX][goalY]);
// for(int i = 1; i < n+1; i++) {
// for(int j = 1; j < n+1; j++) {
// System.out.printf("%c", color[i][j]);
// }
// System.out.println();
// }
}//end of main while
//for track the path
Stack<Pair> stack=new Stack<Pair>();
//If path doesn't exist
if(location[goalX][goalY]==null){
System.out.printf("Gaol not reached %d %d", goalX, goalY);
System.out.println();
for(int i = 1; i < n+1; i++) {
for(int j = 1; j < n+1; j++) {
System.out.printf("%s", location[i][j]);
}
System.out.println();
}
return -1;
}
boolean move=true;
int moves = 0;
tempi = goalX;
tempj = goalY;
while(move){
System.out.println(String.valueOf(tempi)+" "+ String.valueOf(tempj));
moves = moves +1;
Pair cur = location[tempi][tempj];
tempi=cur.x;
tempj=cur.y;
if(tempi==startX && tempj==startY){
move=false;
}
}
System.out.println(moves);
return moves;
}
}
I am studying this problem and I recognise this as a variant of the gas station problem. As a result, I use Greedy algorithm to solve this problem. I would like to ask if anyone helps me to point out my algorithm is correct or not, thanks.
My algorithm
var x = input.distance, cost = input.cost, c = input.travelDistance, price = [Number.POSITIVE_INFINITY];
var result = [];
var lastFill = 0, tempMinIndex = 0, totalCost = 0;
for(var i=1; i<x.length; i++) {
var d = x[i] - x[lastFill];
if(d > c){ //car can not travel to this shop, has to decide which shop to refill in the previous possible shops
result.push(tempMinIndex);
lastFill = tempMinIndex;
totalCost += price[tempMinIndex];
tempMinIndex = i;
}
//calculate price
price[i] = d/c * cost[i];
if(price[i] <= price[tempMinIndex])
tempMinIndex = i;
}
//add last station to the list and the total cost
if(lastFill != x.length - 1){
result.push(x.length - 1);
totalCost += price[price.length-1];
}
You can try out the algorithm at this link
https://drive.google.com/file/d/0B4sd8MQwTpVnMXdCRU0xZFlVRlk/view?usp=sharing
First, regarding to your solution.
There is a bug that ruins even at the most simple inputs. When you decided that the distance became too far and you should fulfil at some point before, you don't update distance and gas station charge you more that it should. The fix is simple:
if(d > c){
//car can not travel to this shop, has to decide which shop to refill
//in the previous possible shops
result.push(tempMinIndex);
lastFill = tempMinIndex;
totalCost += price[tempMinIndex];
tempMinIndex = i;
// Fix: update distance
var d = x[i] - x[lastFill];
}
Even with this fix, your algorithm fails on some input data, like this:
0 10 20 30
0 20 30 50
30
It should refill on every gasoline to minimize cost, but it simply fills on the last one.
After some research, I came up with solution. I'll try to explain it as simple as possible to make it language independent.
Idea
For every gas station G we will count cheapest way of filling. We'll do that recursively: for each gas station let's find all gas stations i from which we can reach G. For every i count cheapest filling possible and sum up with the cost of the filling at G given gasoline left. For start gas station cost is 0. More formally:
CostOfFilling(x), Capacity and Position(x) can be retrieved from input data.
So, the answer for the problem is simply BestCost(LastGasStation)
Code
Now, solution in javascript to make things clearer.
function calculate(input)
{
// Array for keeping calculated values of cheapest filling at each station
best = [];
var x = input.distance;
var cost = input.cost;
var capacity = input.travelDistance;
// Array initialization
best.push(0);
for (var i = 0; i < x.length - 1; i++)
{
best.push(-1);
}
var answer = findBest(x, cost, capacity, x.length - 1);
return answer;
}
// Implementation of BestCost function
var findBest = function(distances, costs, capacity, distanceIndex)
{
// Return value if it's already have been calculated
if (best[distanceIndex] != -1)
{
return best[distanceIndex];
}
// Find cheapest way to fill by iterating on every available gas station
var minDistanceIndex = findMinDistance(capacity, distances, distanceIndex);
var answer = findBest(distances, costs, capacity, minDistanceIndex) +
calculateCost(distances, costs, capacity, minDistanceIndex, distanceIndex);
for (var i = minDistanceIndex + 1; i < distanceIndex; i++)
{
var newAnswer = findBest(distances, costs, capacity, i) +
calculateCost(distances, costs, capacity, i, distanceIndex);
if (newAnswer < answer)
{
answer = newAnswer;
}
}
// Save best result
best[distanceIndex] = answer;
return answer;
}
// Implementation of MinGasStation function
function findMinDistance(capacity, distances, distanceIndex)
{
for (var i = 0; i < distances.length; i++)
{
if (distances[distanceIndex] - distances[i] <= capacity)
{
return i;
}
}
}
// Implementation of Cost function
function calculateCost(distances, costs, capacity, a, b)
{
var distance = distances[b] - distances[a];
return costs[b] * (distance / capacity);
}
Full workable html page with code is available here
A classic algorithm question in 2D version is typically described as
Given n non-negative integers representing an elevation map where the width of each bar is 1, compute how much water it is able to trap after raining.
For example, Given the input
[0,1,0,2,1,0,1,3,2,1,2,1]
the return value would be
6
The algorithm that I used to solve the above 2D problem is
int trapWaterVolume2D(vector<int> A) {
int n = A.size();
vector<int> leftmost(n, 0), rightmost(n, 0);
//left exclusive scan, O(n), the highest bar to the left each point
int leftMaxSoFar = 0;
for (int i = 0; i < n; i++){
leftmost[i] = leftMaxSoFar;
if (A[i] > leftMaxSoFar) leftMaxSoFar = A[i];
}
//right exclusive scan, O(n), the highest bar to the right each point
int rightMaxSoFar = 0;
for (int i = n - 1; i >= 0; i--){
rightmost[i] = rightMaxSoFar;
if (A[i] > rightMaxSoFar) rightMaxSoFar = A[i];
}
// Summation, O(n)
int vol = 0;
for (int i = 0; i < n; i++){
vol += max(0, min(leftmost[i], rightmost[i]) - A[i]);
}
return vol;
}
My Question is how to make the above algorithm extensible to the 3D version of the problem, to compute the maximum of water trapped in real-world 3D terrain. i.e. To implement
int trapWaterVolume3D(vector<vector<int> > A);
Sample graph:
We know the elevation at each (x, y) point and the goal is to compute the maximum volume of water that can be trapped in the shape. Any thoughts and references are welcome.
For each point on the terrain consider all paths from that point to the border of the terrain. The level of water would be the minimum of the maximum heights of the points of those paths. To find it we need to perform a slightly modified Dijkstra's algorithm, filling the water level matrix starting from the border.
For every point on the border set the water level to the point height
For every point not on the border set the water level to infinity
Put every point on the border into the set of active points
While the set of active points is not empty:
Select the active point P with minimum level
Remove P from the set of active points
For every point Q adjacent to P:
Level(Q) = max(Height(Q), min(Level(Q), Level(P)))
If Level(Q) was changed:
Add Q to the set of active points
user3290797's "slightly modified Dijkstra algorithm" is closer to Prim's algorithm than Dijkstra's. In minimum spanning tree terms, we prepare a graph with one vertex per tile, one vertex for the outside, and edges with weights equal to the maximum height of their two adjoining tiles (the outside has height "minus infinity").
Given a path in this graph to the outside vertex, the maximum weight of an edge in the path is the height that the water has to reach in order to escape along that path. The relevant property of a minimum spanning tree is that, for every pair of vertices, the maximum weight of an edge in the path in the spanning tree is the minimum possible among all paths between those vertices. The minimum spanning tree thus describes the most economical escape paths for water, and the water heights can be extracted in linear time with one traversal.
As a bonus, since the graph is planar, there's a linear-time algorithm for computing the minimum spanning tree, consisting of alternating Boruvka passes and simplifications. This improves on the O(n log n) running time of Prim.
This problem can be solved using the Priority-Flood algorithm. It's been discovered and published a number of times over the past few decades (and again by other people answering this question), though the specific variant you're looking for is not, to my knowledge, in the literature.
You can find a review paper of the algorithm and its variants here. Since that paper was published an even faster variant has been discovered (link), as well as methods to perform this calculation on datasets of trillions of cells (link). A method for selectively breaching low/narrow divides is discussed here. Contact me if you'd like copies of any of these papers.
I have a repository here with many of the above variants; additional implementations can be found here.
A simple script to calculate volume using the RichDEM library is as follows:
#include "richdem/common/version.hpp"
#include "richdem/common/router.hpp"
#include "richdem/depressions/Lindsay2016.hpp"
#include "richdem/common/Array2D.hpp"
/**
#brief Calculates the volume of depressions in a DEM
#author Richard Barnes (rbarnes#umn.edu)
Priority-Flood starts on the edges of the DEM and then works its way inwards
using a priority queue to determine the lowest cell which has a path to the
edge. The neighbours of this cell are added to the priority queue if they
are higher. If they are lower, then they are members of a depression and the
elevation of the flooding minus the elevation of the DEM times the cell area
is the flooded volume of the cell. The cell is flooded, total volume
tracked, and the neighbors are then added to a "depressions" queue which is
used to flood depressions. Cells which are higher than a depression being
filled are added to the priority queue. In this way, depressions are filled
without incurring the expense of the priority queue.
#param[in,out] &elevations A grid of cell elevations
#pre
1. **elevations** contains the elevations of every cell or a value _NoData_
for cells not part of the DEM. Note that the _NoData_ value is assumed to
be a negative number less than any actual data value.
#return
Returns the total volume of the flooded depressions.
#correctness
The correctness of this command is determined by inspection. (TODO)
*/
template <class elev_t>
double improved_priority_flood_volume(const Array2D<elev_t> &elevations){
GridCellZ_pq<elev_t> open;
std::queue<GridCellZ<elev_t> > pit;
uint64_t processed_cells = 0;
uint64_t pitc = 0;
ProgressBar progress;
std::cerr<<"\nPriority-Flood (Improved) Volume"<<std::endl;
std::cerr<<"\nC Barnes, R., Lehman, C., Mulla, D., 2014. Priority-flood: An optimal depression-filling and watershed-labeling algorithm for digital elevation models. Computers & Geosciences 62, 117–127. doi:10.1016/j.cageo.2013.04.024"<<std::endl;
std::cerr<<"p Setting up boolean flood array matrix..."<<std::endl;
//Used to keep track of which cells have already been considered
Array2D<int8_t> closed(elevations.width(),elevations.height(),false);
std::cerr<<"The priority queue will require approximately "
<<(elevations.width()*2+elevations.height()*2)*((long)sizeof(GridCellZ<elev_t>))/1024/1024
<<"MB of RAM."
<<std::endl;
std::cerr<<"p Adding cells to the priority queue..."<<std::endl;
//Add all cells on the edge of the DEM to the priority queue
for(int x=0;x<elevations.width();x++){
open.emplace(x,0,elevations(x,0) );
open.emplace(x,elevations.height()-1,elevations(x,elevations.height()-1) );
closed(x,0)=true;
closed(x,elevations.height()-1)=true;
}
for(int y=1;y<elevations.height()-1;y++){
open.emplace(0,y,elevations(0,y) );
open.emplace(elevations.width()-1,y,elevations(elevations.width()-1,y) );
closed(0,y)=true;
closed(elevations.width()-1,y)=true;
}
double volume = 0;
std::cerr<<"p Performing the improved Priority-Flood..."<<std::endl;
progress.start( elevations.size() );
while(open.size()>0 || pit.size()>0){
GridCellZ<elev_t> c;
if(pit.size()>0){
c=pit.front();
pit.pop();
} else {
c=open.top();
open.pop();
}
processed_cells++;
for(int n=1;n<=8;n++){
int nx=c.x+dx[n];
int ny=c.y+dy[n];
if(!elevations.inGrid(nx,ny)) continue;
if(closed(nx,ny))
continue;
closed(nx,ny)=true;
if(elevations(nx,ny)<=c.z){
if(elevations(nx,ny)<c.z){
++pitc;
volume += (c.z-elevations(nx,ny))*std::abs(elevations.getCellArea());
}
pit.emplace(nx,ny,c.z);
} else
open.emplace(nx,ny,elevations(nx,ny));
}
progress.update(processed_cells);
}
std::cerr<<"t Succeeded in "<<std::fixed<<std::setprecision(1)<<progress.stop()<<" s"<<std::endl;
std::cerr<<"m Cells processed = "<<processed_cells<<std::endl;
std::cerr<<"m Cells in pits = " <<pitc <<std::endl;
return volume;
}
template<class T>
int PerformAlgorithm(std::string analysis, Array2D<T> elevations){
elevations.loadData();
std::cout<<"Volume: "<<improved_priority_flood_volume(elevations)<<std::endl;
return 0;
}
int main(int argc, char **argv){
std::string analysis = PrintRichdemHeader(argc,argv);
if(argc!=2){
std::cerr<<argv[0]<<" <Input>"<<std::endl;
return -1;
}
return PerformAlgorithm(argv[1],analysis);
}
It should be straight-forward to adapt this to whatever 2d array format you are using
In pseudocode, the following is equivalent to the foregoing:
Let PQ be a priority-queue which always pops the cell of lowest elevation
Let Closed be a boolean array initially set to False
Let Volume = 0
Add all the border cells to PQ.
For each border cell, set the cell's entry in Closed to True.
While PQ is not empty:
Select the top cell from PQ, call it C.
Pop the top cell from PQ.
For each neighbor N of C:
If Closed(N):
Continue
If Elevation(N)<Elevation(C):
Volume += (Elevation(C)-Elevation(N))*Area
Add N to PQ, but with Elevation(C)
Else:
Add N to PQ with Elevation(N)
Set Closed(N)=True
This problem is very close to the construction of the morphological watershed of a grayscale image.
One approach is as follows (flooding process):
sort all pixels by increasing elevation.
work incrementally, by increasing elevations, assigning labels to the pixels per catchment basin.
for a new elevation level, you need to label a new set of pixels:
Some have no labeled
neighbor, they form a local minimum configuration and begin a new catchment basin.
Some have only neighbors with the same label, they can be labeled similarly (they extend a catchment basin).
Some have neighbors with different labels. They do not belong to a specific catchment basin and they define the watershed lines.
You will need to enhance the standard watershed algorithm to be able to compute the volume of water. You can do that by determining the maximum water level in each basin and deduce the ground height on every pixel. The water level in a basin is given by the elevation of the lowest watershed pixel around it.
You can act every time you discover a watershed pixel: if a neighboring basin has not been assigned a level yet, that basin can stand the current level without leaking.
In order to accomplish tapping water problem in 3D i.e., to calculate the maximum volume of trapped rain water you can do something like this:
#include<bits/stdc++.h>
using namespace std;
#define MAX 10
int new2d[MAX][MAX];
int dp[MAX][MAX],visited[MAX][MAX];
int dx[] = {1,0,-1,0};
int dy[] = {0,-1,0,1};
int boundedBy(int i,int j,int k,int in11,int in22)
{
if(i<0 || j<0 || i>=in11 || j>=in22)
return 0;
if(new2d[i][j]>k)
return new2d[i][j];
if(visited[i][j]) return INT_MAX;
visited[i][j] = 1;
int r = INT_MAX;
for(int dir = 0 ; dir<4 ; dir++)
{
int nx = i + dx[dir];
int ny = j + dy[dir];
r = min(r,boundedBy(nx,ny,k,in11,in22));
}
return r;
}
void mark(int i,int j,int k,int in1,int in2)
{
if(i<0 || j<0 || i>=in1 || j>=in2)
return;
if(new2d[i][j]>=k)
return;
if(visited[i][j]) return ;
visited[i][j] = 1;
for(int dir = 0;dir<4;dir++)
{
int nx = i + dx[dir];
int ny = j + dy[dir];
mark(nx,ny,k,in1,in2);
}
dp[i][j] = max(dp[i][j],k);
}
struct node
{
int i,j,key;
node(int x,int y,int k)
{
i = x;
j = y;
key = k;
}
};
bool compare(node a,node b)
{
return a.key>b.key;
}
vector<node> store;
int getData(int input1, int input2, int input3[])
{
int row=input1;
int col=input2;
int temp=0;
int count=0;
for(int i=0;i<row;i++)
{
for(int j=0;j<col;j++)
{
if(count==(col*row))
break;
new2d[i][j]=input3[count];
count++;
}
}
store.clear();
for(int i = 0;i<input1;i++)
{
for(int j = 0;j<input2;j++)
{
store.push_back(node(i,j,new2d[i][j]));
}
}
memset(dp,0,sizeof(dp));
sort(store.begin(),store.end(),compare);
for(int i = 0;i<store.size();i++)
{
memset(visited,0,sizeof(visited));
int aux = boundedBy(store[i].i,store[i].j,store[i].key,input1,input2);
if(aux>store[i].key)
{
memset(visited,0,sizeof(visited));
mark(store[i].i,store[i].j,aux,input1,input2);
}
}
long long result =0 ;
for(int i = 0;i<input1;i++)
{
for(int j = 0;j<input2;j++)
{
result = result + max(0,dp[i][j]-new2d[i][j]);
}
}
return result;
}
int main()
{
cin.sync_with_stdio(false);
cout.sync_with_stdio(false);
int n,m;
cin>>n>>m;
int inp3[n*m];
store.clear();
for(int j = 0;j<n*m;j++)
{
cin>>inp3[j];
}
int k = getData(n,m,inp3);
cout<<k;
return 0;
}
class Solution(object):
def trapRainWater(self, heightMap):
"""
:type heightMap: List[List[int]]
:rtype: int
"""
m = len(heightMap)
if m == 0:
return 0
n = len(heightMap[0])
if n == 0:
return 0
visited = [[False for i in range(n)] for j in range(m)]
from Queue import PriorityQueue
q = PriorityQueue()
for i in range(m):
visited[i][0] = True
q.put([heightMap[i][0],i,0])
visited[i][n-1] = True
q.put([heightMap[i][n-1],i,n-1])
for j in range(1, n-1):
visited[0][j] = True
q.put([heightMap[0][j],0,j])
visited[m-1][j] = True
q.put([heightMap[m-1][j],m-1,j])
S = 0
while not q.empty():
cell = q.get()
for (i, j) in [(1,0), (-1,0), (0,1), (0,-1)]:
x = cell[1] + i
y = cell[2] + j
if x in range(m) and y in range(n) and not visited[x][y]:
S += max(0, cell[0] - heightMap[x][y]) # how much water at the cell
q.put([max(heightMap[x][y],cell[0]),x,y])
visited[x][y] = True
return S
Here is the simple code for the same-
#include<iostream>
using namespace std;
int main()
{
int n,count=0,a[100];
cin>>n;
for(int i=0;i<n;i++)
{
cin>>a[i];
}
for(int i=1;i<n-1;i++)
{
///computing left most largest and Right most largest element of array;
int leftmax=0;
int rightmax=0;
///left most largest
for(int j=i-1;j>=1;j--)
{
if(a[j]>leftmax)
{
leftmax=a[j];
}
}
///rightmost largest
for(int k=i+1;k<=n-1;k++)
{
if(a[k]>rightmax)
{
rightmax=a[k];
}
}
///computing hight of the water contained-
int x=(min(rightmax,leftmax)-a[i]);
if(x>0)
{
count=count+x;
}
}
cout<<count;
return 0;
}
I have recently completed the following interview exercise:
'A robot can be programmed to run "a", "b", "c"... "n" kilometers and it takes ta, tb, tc... tn minutes, respectively. Once it runs to programmed kilometers, it must be turned off for "m" minutes.
After "m" minutes it can again be programmed to run for a further "a", "b", "c"... "n" kilometers.
How would you program this robot to go an exact number of kilometers in the minimum amount of time?'
I thought it was a variation of the unbounded knapsack problem, in which the size would be the number of kilometers and the value, the time needed to complete each stretch. The main difference is that we need to minimise, rather than maximise, the value. So I used the equivalent of the following solution: http://en.wikipedia.org/wiki/Knapsack_problem#Unbounded_knapsack_problem
in which I select the minimum.
Finally, because we need an exact solution (if there is one), over the map constructed by the algorithm for all the different distances, I iterated through each and trough each robot's programmed distance to find the exact distance and minimum time among those.
I think the pause the robot takes between runs is a bit of a red herring and you just need to include it in your calculations, but it does not affect the approach taken.
I am probably wrong, because I failed the test. I don't have any other feedback as to the expected solution.
Edit: maybe I wasn't wrong after all and I failed for different reasons. I just wanted to validate my approach to this problem.
import static com.google.common.collect.Sets.*;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.log4j.Logger;
import com.google.common.base.Objects;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
public final class Robot {
static final Logger logger = Logger.getLogger (Robot.class);
private Set<ProgrammedRun> programmedRuns;
private int pause;
private int totalDistance;
private Robot () {
//don't expose default constructor & prevent subclassing
}
private Robot (int[] programmedDistances, int[] timesPerDistance, int pause, int totalDistance) {
this.programmedRuns = newHashSet ();
for (int i = 0; i < programmedDistances.length; i++) {
this.programmedRuns.add (new ProgrammedRun (programmedDistances [i], timesPerDistance [i] ) );
}
this.pause = pause;
this.totalDistance = totalDistance;
}
public static Robot create (int[] programmedDistances, int[] timesPerDistance, int pause, int totalDistance) {
Preconditions.checkArgument (programmedDistances.length == timesPerDistance.length);
Preconditions.checkArgument (pause >= 0);
Preconditions.checkArgument (totalDistance >= 0);
return new Robot (programmedDistances, timesPerDistance, pause, totalDistance);
}
/**
* #returns null if no strategy was found. An empty map if distance is zero. A
* map with the programmed runs as keys and number of time they need to be run
* as value.
*
*/
Map<ProgrammedRun, Integer> calculateOptimalStrategy () {
//for efficiency, consider this case first
if (this.totalDistance == 0) {
return Maps.newHashMap ();
}
//list of solutions for different distances. Element "i" of the list is the best set of runs that cover at least "i" kilometers
List <Map<ProgrammedRun, Integer>> runsForDistances = Lists.newArrayList();
//special case i = 0 -> empty map (no runs needed)
runsForDistances.add (new HashMap<ProgrammedRun, Integer> () );
for (int i = 1; i <= totalDistance; i++) {
Map<ProgrammedRun, Integer> map = new HashMap<ProgrammedRun, Integer> ();
int minimumTime = -1;
for (ProgrammedRun pr : programmedRuns) {
int distance = Math.max (0, i - pr.getDistance ());
int time = getTotalTime (runsForDistances.get (distance) ) + pause + pr.getTime();
if (minimumTime < 0 || time < minimumTime) {
minimumTime = time;
//new minimum found
map = new HashMap<ProgrammedRun, Integer> ();
map.putAll(runsForDistances.get (distance) );
//increase count
Integer num = map.get (pr);
if (num == null) num = Integer.valueOf (1);
else num++;
//update map
map.put (pr, num);
}
}
runsForDistances.add (map );
}
//last step: calculate the combination with exact distance
int minimumTime2 = -1;
int bestIndex = -1;
for (int i = 0; i <= totalDistance; i++) {
if (getTotalDistance (runsForDistances.get (i) ) == this.totalDistance ) {
int time = getTotalTime (runsForDistances.get (i) );
if (time > 0) time -= pause;
if (minimumTime2 < 0 || time < minimumTime2 ) {
minimumTime2 = time;
bestIndex = i;
}
}
}
//if solution found
if (bestIndex != -1) {
return runsForDistances.get (bestIndex);
}
//try all combinations, since none of the existing maps run for the exact distance
List <Map<ProgrammedRun, Integer>> exactRuns = Lists.newArrayList();
for (int i = 0; i <= totalDistance; i++) {
int distance = getTotalDistance (runsForDistances.get (i) );
for (ProgrammedRun pr : programmedRuns) {
//solution found
if (distance + pr.getDistance() == this.totalDistance ) {
Map<ProgrammedRun, Integer> map = new HashMap<ProgrammedRun, Integer> ();
map.putAll (runsForDistances.get (i));
//increase count
Integer num = map.get (pr);
if (num == null) num = Integer.valueOf (1);
else num++;
//update map
map.put (pr, num);
exactRuns.add (map);
}
}
}
if (exactRuns.isEmpty()) return null;
//finally return the map with the best time
minimumTime2 = -1;
Map<ProgrammedRun, Integer> bestMap = null;
for (Map<ProgrammedRun, Integer> m : exactRuns) {
int time = getTotalTime (m);
if (time > 0) time -= pause; //remove last pause
if (minimumTime2 < 0 || time < minimumTime2 ) {
minimumTime2 = time;
bestMap = m;
}
}
return bestMap;
}
private int getTotalTime (Map<ProgrammedRun, Integer> runs) {
int time = 0;
for (Map.Entry<ProgrammedRun, Integer> runEntry : runs.entrySet()) {
time += runEntry.getValue () * runEntry.getKey().getTime ();
//add pauses
time += this.pause * runEntry.getValue ();
}
return time;
}
private int getTotalDistance (Map<ProgrammedRun, Integer> runs) {
int distance = 0;
for (Map.Entry<ProgrammedRun, Integer> runEntry : runs.entrySet()) {
distance += runEntry.getValue() * runEntry.getKey().getDistance ();
}
return distance;
}
class ProgrammedRun {
private int distance;
private int time;
private transient float speed;
ProgrammedRun (int distance, int time) {
this.distance = distance;
this.time = time;
this.speed = (float) distance / time;
}
#Override public String toString () {
return "(distance =" + distance + "; time=" + time + ")";
}
#Override public boolean equals (Object other) {
return other instanceof ProgrammedRun
&& this.distance == ((ProgrammedRun)other).distance
&& this.time == ((ProgrammedRun)other).time;
}
#Override public int hashCode () {
return Objects.hashCode (Integer.valueOf (this.distance), Integer.valueOf (this.time));
}
int getDistance() {
return distance;
}
int getTime() {
return time;
}
float getSpeed() {
return speed;
}
}
}
public class Main {
/* Input variables for the robot */
private static int [] programmedDistances = {1, 2, 3, 5, 10}; //in kilometers
private static int [] timesPerDistance = {10, 5, 3, 2, 1}; //in minutes
private static int pause = 2; //in minutes
private static int totalDistance = 41; //in kilometers
/**
* #param args
*/
public static void main(String[] args) {
Robot r = Robot.create (programmedDistances, timesPerDistance, pause, totalDistance);
Map<ProgrammedRun, Integer> strategy = r.calculateOptimalStrategy ();
if (strategy == null) {
System.out.println ("No strategy that matches the conditions was found");
} else if (strategy.isEmpty ()) {
System.out.println ("No need to run; distance is zero");
} else {
System.out.println ("Strategy found:");
System.out.println (strategy);
}
}
}
Simplifying slightly, let ti be the time (including downtime) that it takes the robot to run distance di. Assume that t1/d1 ≤ … ≤ tn/dn. If t1/d1 is significantly smaller than t2/d2 and d1 and the total distance D to be run are large, then branch and bound likely outperforms dynamic programming. Branch and bound solves the integer programming formulation
minimize ∑i ti xi
subject to
∑i di xi = D
∀i xi ∈ N
by using the value of the relaxation where xi can be any nonnegative real as a guide. The latter is easily verified to be at most (t1/d1)D, by setting x1 to D/d1 and ∀i ≠ 1 xi = 0, and at least (t1/d1)D, by setting the sole variable of the dual program to t1/d1. Solving the relaxation is the bound step; every integer solution is a fractional solution, so the best integer solution requires time at least (t1/d1)D.
The branch step takes one integer program and splits it in two whose solutions, taken together, cover the entire solution space of the original. In this case, one piece could have the extra constraint x1 = 0 and the other could have the extra constraint x1 ≥ 1. It might look as though this would create subproblems with side constraints, but in fact, we can just delete the first move, or decrease D by d1 and add the constant t1 to the objective. Another option for branching is to add either the constraint xi = ⌊D/di⌋ or xi ≤ ⌊D/di⌋ - 1, which requires generalizing to upper bounds on the number of repetitions of each move.
The main loop of branch and bound selects one of a collection of subproblems, branches, computes bounds for the two subproblems, and puts them back into the collection. The efficiency over brute force comes from the fact that, when we have a solution with a particular value, every subproblem whose relaxed value is at least that much can be thrown away. Once the collection is emptied this way, we have the optimal solution.
Hybrids of branch and bound and dynamic programming are possible, for example, computing optimal solutions for small D via DP and using those values instead of branching on subproblems that have been solved.
Create array of size m and for 0 to m( m is your distance) do:
a[i] = infinite;
a[0] = 0;
a[i] = min{min{a[i-j] + tj + m for all j in possible kilometers of robot. and j≠i} , ti if i is in possible moves of robot}
a[m] is lowest possible value. Also you can have array like b to save a[i]s selection. Also if a[m] == infinite means it's not possible.
Edit: we can solve it in another way by creating a digraph, again our graph is dependent to m length of path, graph has nodes labeled {0..m}, now start from node 0 connect it to all possible nodes; means if you have a kilometer i you can connect 0 and vi with weight ti, except for node 0->x, for all other nodes you should connect node i->j with weight tj-i + m for j>i and j-i is available in input kilometers. now you should find shortest path from v0 to vn. but this algorithm still is O(nm).
Let G be the desired distance run.
Let n be the longest possible distance run without pause.
Let L = G / n (Integer arithmetic, discard fraction part)
Let R = G mod n (ie. The remainder from the above division)
Make the robot run it's longest distance (ie. n) L times, and then whichever distance (a, b, c, etc.) is greater than R by the least amount (ie the smallest available distance that is equal to or greater than R)
Either I understood the problem wrong, or you're all over thinking it
I am a big believer in showing instead of telling. Here is a program that may be doing what you are looking for. Let me know if it satisfies your question. Simply copy, paste, and run the program. You should of course test with your own data set.
import java.util.Arrays;
public class Speed {
/***
*
* #param distance
* #param sprints ={{A,Ta},{B,Tb},{C,Tc}, ..., {N,Tn}}
*/
public static int getFastestTime(int distance, int[][] sprints){
long[] minTime = new long[distance+1];//distance from 0 to distance
Arrays.fill(minTime,Integer.MAX_VALUE);
minTime[0]=0;//key=distance; value=time
for(int[] speed: sprints)
for(int d=1; d<minTime.length; d++)
if(d>=speed[0] && minTime[d] > minTime[d-speed[0]]+speed[1])
minTime[d]=minTime[d-speed[0]]+speed[1];
return (int)minTime[distance];
}//
public static void main(String... args){
//sprints ={{A,Ta},{B,Tb},{C,Tc}, ..., {N,Tn}}
int[][] sprints={{3,2},{5,3},{7,5}};
int distance = 21;
System.out.println(getFastestTime(distance,sprints));
}
}
Does anybody know how to find the local maxima in a grayscale IPL_DEPTH_8U image using OpenCV? HarrisCorner mentions something like that but I'm actually not interested in corners ...
Thanks!
A pixel is considered a local maximum if it is equal to the maximum value in a 'local' neighborhood. The function below captures this property in two lines of code.
To deal with pixels on 'plateaus' (value equal to their neighborhood) one can use the local minimum property, since plateaus pixels are equal to their local minimum. The rest of the code filters out those pixels.
void non_maxima_suppression(const cv::Mat& image, cv::Mat& mask, bool remove_plateaus) {
// find pixels that are equal to the local neighborhood not maximum (including 'plateaus')
cv::dilate(image, mask, cv::Mat());
cv::compare(image, mask, mask, cv::CMP_GE);
// optionally filter out pixels that are equal to the local minimum ('plateaus')
if (remove_plateaus) {
cv::Mat non_plateau_mask;
cv::erode(image, non_plateau_mask, cv::Mat());
cv::compare(image, non_plateau_mask, non_plateau_mask, cv::CMP_GT);
cv::bitwise_and(mask, non_plateau_mask, mask);
}
}
Here's a simple trick. The idea is to dilate with a kernel that contains a hole in the center. After the dilate operation, each pixel is replaced with the maximum of it's neighbors (using a 5 by 5 neighborhood in this example), excluding the original pixel.
Mat1b kernelLM(Size(5, 5), 1u);
kernelLM.at<uchar>(2, 2) = 0u;
Mat imageLM;
dilate(image, imageLM, kernelLM);
Mat1b localMaxima = (image > imageLM);
Actually after I posted the code above I wrote a better and very very faster one ..
The code above suffers even for a 640x480 picture..
I optimized it and now it is very very fast even for 1600x1200 pic.
Here is the code :
void localMaxima(cv::Mat src,cv::Mat &dst,int squareSize)
{
if (squareSize==0)
{
dst = src.clone();
return;
}
Mat m0;
dst = src.clone();
Point maxLoc(0,0);
//1.Be sure to have at least 3x3 for at least looking at 1 pixel close neighbours
// Also the window must be <odd>x<odd>
SANITYCHECK(squareSize,3,1);
int sqrCenter = (squareSize-1)/2;
//2.Create the localWindow mask to get things done faster
// When we find a local maxima we will multiply the subwindow with this MASK
// So that we will not search for those 0 values again and again
Mat localWindowMask = Mat::zeros(Size(squareSize,squareSize),CV_8U);//boolean
localWindowMask.at<unsigned char>(sqrCenter,sqrCenter)=1;
//3.Find the threshold value to threshold the image
//this function here returns the peak of histogram of picture
//the picture is a thresholded picture it will have a lot of zero values in it
//so that the second boolean variable says :
// (boolean) ? "return peak even if it is at 0" : "return peak discarding 0"
int thrshld = maxUsedValInHistogramData(dst,false);
threshold(dst,m0,thrshld,1,THRESH_BINARY);
//4.Now delete all thresholded values from picture
dst = dst.mul(m0);
//put the src in the middle of the big array
for (int row=sqrCenter;row<dst.size().height-sqrCenter;row++)
for (int col=sqrCenter;col<dst.size().width-sqrCenter;col++)
{
//1.if the value is zero it can not be a local maxima
if (dst.at<unsigned char>(row,col)==0)
continue;
//2.the value at (row,col) is not 0 so it can be a local maxima point
m0 = dst.colRange(col-sqrCenter,col+sqrCenter+1).rowRange(row-sqrCenter,row+sqrCenter+1);
minMaxLoc(m0,NULL,NULL,NULL,&maxLoc);
//if the maximum location of this subWindow is at center
//it means we found the local maxima
//so we should delete the surrounding values which lies in the subWindow area
//hence we will not try to find if a point is at localMaxima when already found a neighbour was
if ((maxLoc.x==sqrCenter)&&(maxLoc.y==sqrCenter))
{
m0 = m0.mul(localWindowMask);
//we can skip the values that we already made 0 by the above function
col+=sqrCenter;
}
}
}
The following listing is a function similar to Matlab's "imregionalmax". It looks for at most nLocMax local maxima above threshold, where the found local maxima are at least minDistBtwLocMax pixels apart. It returns the actual number of local maxima found. Notice that it uses OpenCV's minMaxLoc to find global maxima. It is "opencv-self-contained" except for the (easy to implement) function vdist, which computes the (euclidian) distance between points (r,c) and (row,col).
input is one-channel CV_32F matrix, and locations is nLocMax (rows) by 2 (columns) CV_32S matrix.
int imregionalmax(Mat input, int nLocMax, float threshold, float minDistBtwLocMax, Mat locations)
{
Mat scratch = input.clone();
int nFoundLocMax = 0;
for (int i = 0; i < nLocMax; i++) {
Point location;
double maxVal;
minMaxLoc(scratch, NULL, &maxVal, NULL, &location);
if (maxVal > threshold) {
nFoundLocMax += 1;
int row = location.y;
int col = location.x;
locations.at<int>(i,0) = row;
locations.at<int>(i,1) = col;
int r0 = (row-minDistBtwLocMax > -1 ? row-minDistBtwLocMax : 0);
int r1 = (row+minDistBtwLocMax < scratch.rows ? row+minDistBtwLocMax : scratch.rows-1);
int c0 = (col-minDistBtwLocMax > -1 ? col-minDistBtwLocMax : 0);
int c1 = (col+minDistBtwLocMax < scratch.cols ? col+minDistBtwLocMax : scratch.cols-1);
for (int r = r0; r <= r1; r++) {
for (int c = c0; c <= c1; c++) {
if (vdist(Point2DMake(r, c),Point2DMake(row, col)) <= minDistBtwLocMax) {
scratch.at<float>(r,c) = 0.0;
}
}
}
} else {
break;
}
}
return nFoundLocMax;
}
The first question to answer would be what is "local" in your opinion. The answer may well be a square window (say 3x3 or 5x5) or circular window of a certain radius. You can then scan over the entire image with the window centered at each pixel and pick the highest value in the window.
See this for how to access pixel values in OpenCV.
This is very fast method. It stored founded maxima in a vector of
Points.
vector <Point> GetLocalMaxima(const cv::Mat Src,int MatchingSize, int Threshold, int GaussKernel )
{
vector <Point> vMaxLoc(0);
if ((MatchingSize % 2 == 0) || (GaussKernel % 2 == 0)) // MatchingSize and GaussKernel have to be "odd" and > 0
{
return vMaxLoc;
}
vMaxLoc.reserve(100); // Reserve place for fast access
Mat ProcessImg = Src.clone();
int W = Src.cols;
int H = Src.rows;
int SearchWidth = W - MatchingSize;
int SearchHeight = H - MatchingSize;
int MatchingSquareCenter = MatchingSize/2;
if(GaussKernel > 1) // If You need a smoothing
{
GaussianBlur(ProcessImg,ProcessImg,Size(GaussKernel,GaussKernel),0,0,4);
}
uchar* pProcess = (uchar *) ProcessImg.data; // The pointer to image Data
int Shift = MatchingSquareCenter * ( W + 1);
int k = 0;
for(int y=0; y < SearchHeight; ++y)
{
int m = k + Shift;
for(int x=0;x < SearchWidth ; ++x)
{
if (pProcess[m++] >= Threshold)
{
Point LocMax;
Mat mROI(ProcessImg, Rect(x,y,MatchingSize,MatchingSize));
minMaxLoc(mROI,NULL,NULL,NULL,&LocMax);
if (LocMax.x == MatchingSquareCenter && LocMax.y == MatchingSquareCenter)
{
vMaxLoc.push_back(Point( x+LocMax.x,y + LocMax.y ));
// imshow("W1",mROI);cvWaitKey(0); //For gebug
}
}
}
k += W;
}
return vMaxLoc;
}
Found a simple solution.
In this example, if you are trying to find 2 results of a matchTemplate function with a minimum distance from each other.
cv::Mat result;
matchTemplate(search, target, result, CV_TM_SQDIFF_NORMED);
float score1;
cv::Point displacement1 = MinMax(result, score1);
cv::circle(result, cv::Point(displacement1.x+result.cols/2 , displacement1.y+result.rows/2), 10, cv::Scalar(0), CV_FILLED, 8, 0);
float score2;
cv::Point displacement2 = MinMax(result, score2);
where
cv::Point MinMax(cv::Mat &result, float &score)
{
double minVal, maxVal;
cv::Point minLoc, maxLoc, matchLoc;
minMaxLoc(result, &minVal, &maxVal, &minLoc, &maxLoc, cv::Mat());
matchLoc.x = minLoc.x - result.cols/2;
matchLoc.y = minLoc.y - result.rows/2;
return minVal;
}
The process is:
Find global Minimum using minMaxLoc
Draw a filled white circle around global minimum using min distance between minima as radius
Find another minimum
The the scores can be compared to each other to determine, for example, the certainty of the match,
To find more than just the global minimum and maximum try using this function from skimage:
http://scikit-image.org/docs/dev/api/skimage.feature.html#skimage.feature.peak_local_max
You can parameterize the minimum distance between peaks, too. And more. To find minima, use negated values (take care of the array type though, 255-image could do the trick).
You can go over each pixel and test if it is a local maxima. Here is how I would do it.
The input is assumed to be type CV_32FC1
#include <vector>//std::vector
#include <algorithm>//std::sort
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/core/core.hpp"
//structure for maximal values including position
struct SRegionalMaxPoint
{
SRegionalMaxPoint():
values(-FLT_MAX),
row(-1),
col(-1)
{}
float values;
int row;
int col;
//ascending order
bool operator()(const SRegionalMaxPoint& a, const SRegionalMaxPoint& b)
{
return a.values < b.values;
}
};
//checks if pixel is local max
bool isRegionalMax(const float* im_ptr, const int& cols )
{
float center = *im_ptr;
bool is_regional_max = true;
im_ptr -= (cols + 1);
for (int ii = 0; ii < 3; ++ii, im_ptr+= (cols-3))
{
for (int jj = 0; jj < 3; ++jj, im_ptr++)
{
if (ii != 1 || jj != 1)
{
is_regional_max &= (center > *im_ptr);
}
}
}
return is_regional_max;
}
void imregionalmax(
const cv::Mat& input,
std::vector<SRegionalMaxPoint>& buffer)
{
//find local max - top maxima
static const int margin = 1;
const int rows = input.rows;
const int cols = input.cols;
for (int i = margin; i < rows - margin; ++i)
{
const float* im_ptr = input.ptr<float>(i, margin);
for (int j = margin; j < cols - margin; ++j, im_ptr++)
{
//Check if pixel is local maximum
if ( isRegionalMax(im_ptr, cols ) )
{
cv::Rect roi = cv::Rect(j - margin, i - margin, 3, 3);
cv::Mat subMat = input(roi);
float val = *im_ptr;
//replace smallest value in buffer
if ( val > buffer[0].values )
{
buffer[0].values = val;
buffer[0].row = i;
buffer[0].col = j;
std::sort(buffer.begin(), buffer.end(), SRegionalMaxPoint());
}
}
}
}
}
For testing the code you can try this:
cv::Mat temp = cv::Mat::zeros(15, 15, CV_32FC1);
temp.at<float>(7, 7) = 1;
temp.at<float>(3, 5) = 6;
temp.at<float>(8, 10) = 4;
temp.at<float>(11, 13) = 7;
temp.at<float>(10, 3) = 8;
temp.at<float>(7, 13) = 3;
vector<SRegionalMaxPoint> buffer_(5);
imregionalmax(temp, buffer_);
cv::Mat debug;
cv::cvtColor(temp, debug, cv::COLOR_GRAY2BGR);
for (auto it = buffer_.begin(); it != buffer_.end(); ++it)
{
circle(debug, cv::Point(it->col, it->row), 1, cv::Scalar(0, 255, 0));
}
This solution does not take plateaus into account so it is not exactly the same as matlab's imregionalmax()
I think you want to use the
MinMaxLoc(arr, mask=NULL)-> (minVal, maxVal, minLoc, maxLoc)
Finds global minimum and maximum in array or subarray
function on you image