fast algorithm for move to front transform - algorithm

I'm trying to find the fastest algorithm for the move to front transformation. The one that's used for example in conjunction with burrows wheeler transform.
The best I've managed so far does about 15MB/s on Core i3 2.1GHz. But I'm sure that it's not optimal. Here's my best effort so far. Is there anything that's faster?
class mtf256_x {
typedef unsigned char u8;
typedef unsigned long long L;
public:
L enc[37];
u8 dec[256];
mtf256_x() {
unsigned i;
for (i=0;i<37;i++) {
enc[i]=0;
}
for (i=0;i<256;i++) {
dec[i]=i;
set(i,i);
}
}
u8 decode(u8 in) {
u8 r = dec[in];
if (in) {
memmove(dec+1,dec,in);
dec[0]=r;
}
return r;
}
u8 set(unsigned x, u8 y) {
unsigned xl = (x%7)*9;
unsigned xh = (x/7);
enc[xh] &= ~(0x1FFLLU<<xl);
enc[xh] |= ((L)y)<<xl;
}
u8 get(unsigned x) {
return enc[x/7] >> (x%7)*9;
}
u8 encode(u8 in) {
u8 r;
unsigned i;
r = get(in);
L m2 = 0x0040201008040201LLU; // 0x01 for each 9 bit int
L m1 = 0x3FDFEFF7FBFDFEFFLLU; // 0xff for each 9 bit int
L Q = (0x100+r)*m2;
L a,b,c,d;
L * l= enc;
for (i=0;i<37;i++) {
a=l[i];
a+= ((Q-a)>>8)&m2; // conditional add 1
a&=m1;
l[i]=a;
}
set(in,0);
return r;
}
};

Maybe you can try this
http://kuc406.moxo.cz/projects.php
int b[ uint8Max + 2 ], treshold = 0, pivot = -1;
long inFileOffst = 0, pOffset = 0, t[ uint8Max + 1 ];
int rank, c, i, p0, p1;
// initialise list
for( i = 0; i <= uint8Max; t[ i++ ] = 0 );
for( i = 1; i <= uint8Max; b[ i - 1 ] = i++ );
b[ uint8Max ] = b[ uint8Max + 1 ] = 0;
// read data
// input = c; output = rank
inFileOffst++;
rank = 0;
if( ( p1 = b[uint8Max + 1] ) != ( c = data[input] ) )
{
if( t[ c ] < pOffset )
{
rank += treshold++;
t[ c ] = inFileOffst;
p1 = pivot;
}
else if( t[ c ] == pOffset )
{
pivot = c;
t[ c ] = pOffset = inFileOffst;
treshold = 0;
}
while( true ) // passing the list
{
if( ( p0 = b[ p1 ] ) == c )
{
rank++;
b[ p1 ] = b[ c ];
break;
}
if( ( p1 = b[ p0 ] ) == c )
{
rank += 2;
b[ p0 ] = b[ c ];
break;
}
if( ( p0 = b[ p1 ] ) == c )
{
rank += 3;
b[ p1 ] = b[ c ];
break;
}
if( ( p1 = b[ p0 ] ) == c )
{
rank += 4;
b[ p0 ] = b[ c ];
break;
}
rank += 4;
}
b[ c ] = b[ uint8Max + 1 ];
b[ uint8Max + 1 ] = c;
}
But it's more like for a smal alphabet (e.g. for the bytes), but for bigger alphabet, I'd suggest to try nburns version, or more at http://encode.ru forum.

Related

codility GenomicRangeQuery algorithm comparsion speed Java vs Swift

I rewrited code that solves GenomicRangeQuery task from Java to Swift. The code in Jave gets 100/100 score but the code in Swift fails all performance tests. I'm trying to understand why because logic in code is the same. I'w wondering why Swift code is executing so long. Do I using some very slow parts in my swift code that I'm not aware of. Please take a look at this Java code copied from here.
class Solution {
public int[] solveGenomicRange(String S, int[] P, int[] Q) {
//used jagged array to hold the prefix sums of each A, C and G genoms
//we don't need to get prefix sums of T, you will see why.
int[][] genoms = new int[3][S.length()+1];
//if the char is found in the index i, then we set it to be 1 else they are 0
// 3 short values are needed for this reason
short a, c, g;
for (int i=0; i<S.length(); i++) {
a = 0; c = 0; g = 0;
if ('A' == (S.charAt(i))) {
a=1;
}
if ('C' == (S.charAt(i))) {
c=1;
}
if ('G' == (S.charAt(i))) {
g=1;
}
//here we calculate prefix sums. To learn what's prefix sums look at here https://codility.com/media/train/3-PrefixSums.pdf
genoms[0][i+1] = genoms[0][i] + a;
genoms[1][i+1] = genoms[1][i] + c;
genoms[2][i+1] = genoms[2][i] + g;
}
int[] result = new int[P.length];
//here we go through the provided P[] and Q[] arrays as intervals
for (int i=0; i<P.length; i++) {
int fromIndex = P[i];
//we need to add 1 to Q[i],
//because our genoms[0][0], genoms[1][0] and genoms[2][0]
//have 0 values by default, look above genoms[0][i+1] = genoms[0][i] + a;
int toIndex = Q[i]+1;
if (genoms[0][toIndex] - genoms[0][fromIndex] > 0) {
result[i] = 1;
} else if (genoms[1][toIndex] - genoms[1][fromIndex] > 0) {
result[i] = 2;
} else if (genoms[2][toIndex] - genoms[2][fromIndex] > 0) {
result[i] = 3;
} else {
result[i] = 4;
}
}
return result;
}
}
And here the same code rewritten to Swift 2.1
public func solution(inout S:String, inout _ P:[Int], inout _ Q:[Int]) -> [Int] {
let len = S.characters.count
//used jagged array to hold the prefix sums of each A, C and G genoms
//we don't need to get prefix sums of T, you will see why.
var genoms = [[Int]](count: 3, repeatedValue: [Int](count: len+1, repeatedValue: 0))
//if the char is found in the index i, then we set it to be 1 else they are 0
// 3 short values are needed for this reason
var a,c,g:Int
for i in 0..<len {
a=0; c=0; g=0
let char = S[S.startIndex.advancedBy(i)]
switch char {
case "A": a=1;
case "C": c=1;
case "G": g=1;
default: ()
}
//here we calculate prefix sums. To learn what's prefix sums look at here https://codility.com/media/train/3-PrefixSums.pdf
genoms[0][i+1] = genoms[0][i] + a
genoms[1][i+1] = genoms[1][i] + c
genoms[2][i+1] = genoms[2][i] + g
}
var result: [Int] = [Int](count: P.count, repeatedValue: 0)
//here we go through the provided P[] and Q[] arrays as intervals
for i in 0..<P.count {
let fromIndex = P[i]
//we need to add 1 to Q[i],
//because our genoms[0][0], genoms[1][0] and genoms[2][0]
//have 0 values by default, look above genoms[0][i+1] = genoms[0][i] + a;
let toIndex = Q[i] + 1
if (genoms[0][toIndex] - genoms[0][fromIndex] > 0) {
result[i] = 1;
} else if (genoms[1][toIndex] - genoms[1][fromIndex] > 0) {
result[i] = 2;
} else if (genoms[2][toIndex] - genoms[2][fromIndex] > 0) {
result[i] = 3;
} else {
result[i] = 4;
}
}
return result
}
Does anybody know why this Swift code fails all performance tests when Java code passes all tests? I suppose I'm touching some sensitive bottleneck in Swift but I'm not aware where.
If someone is not aware of codility this is the link to the task.
This Java code for the GenomicRangeQuery problem scored 100% at codility.
It uses 4 simple Arrays to do the prefix sums.
I post it here as an alternative approach.
Time Complexity is O(n+m)
public int[] solution4(String S, int[] P, int[] Q){
char[]chars=S.toCharArray();
int n=chars.length;
int[]contaA=new int[n+1];
int[]contaC=new int[n+1];
int[]contaG=new int[n+1];
int[]contaT=new int[n+1];
for (int i=1;i<n+1;i++){
contaA[i]=contaA[i-1];
contaC[i]=contaC[i-1];
contaG[i]=contaG[i-1];
contaT[i]=contaT[i-1];
if (chars[i-1]=='A')contaA[i]+=1;
if (chars[i-1]=='C')contaC[i]+=1;
if (chars[i-1]=='G')contaG[i]+=1;
if (chars[i-1]=='T')contaT[i]+=1;
}
int[] arrayContadores=new int[P.length];
for (int i=0;i<P.length;i++){
int primeiro=P[i];
int ultimo=Q[i];
int A=contaFatia(contaA,primeiro,ultimo);
int C=contaFatia(contaC,primeiro,ultimo);
int G=contaFatia(contaG,primeiro,ultimo);
int T=contaFatia(contaT,primeiro,ultimo);
if (A>0){arrayContadores[i]=1;
}else if (C>0) {
arrayContadores[i] = 2;
}else if(G>0){
arrayContadores[i]=3;
}else if (T>0){
arrayContadores[i]=4;
}
}
return arrayContadores;
}
public int contaFatia(int[]P,int x,int y){
return P[y+1]-P[x];
}
public func solution(_ S : inout String, _ P : inout [Int], _ Q : inout [Int]) -> [Int] {
var retArr = [Int]()
var chrArr = [Character]()
for chr in S {
chrArr.append(chr)
}
for i in 0..<P.count {
var minFactor = 4
if P[i] - Q[i] == 0 {
if chrArr[P[i]] == "A"{
minFactor = 1
}else if chrArr[P[i]] == "C"{
minFactor = 2
}else if chrArr[P[i]] == "G"{
minFactor = 3
}
}else {
for j in P[i]...Q[i] {
if chrArr[j] == "A"{
minFactor = 1
break
}else if chrArr[j] == "C"{
minFactor = 2
}else if chrArr[j] == "G"{
if minFactor > 2 {
minFactor = 3
}
}
}
}
retArr.append(minFactor)
}
return retArr
}
I have been playing with things in Swift for a while trying to come up with the right solution. This is the closest I have come.
public func solution(_ S : inout String, _ P : inout [Int], _ Q : inout [Int]) -> [Int] {
let N = S.count + 1
var outerImpacts: ContiguousArray<ContiguousArray<Int>> = []
outerImpacts.reserveCapacity(N)
for i in 0..<N {
if i > 0 {
var innerImpacts = outerImpacts[i - 1]
switch S[S.index(S.startIndex, offsetBy: i - 1)] {
case "A":
innerImpacts[0] += 1
case "C":
innerImpacts[1] += 1
case "G":
innerImpacts[2] += 1
case "T":
innerImpacts[3] += 1
default:
break
}
outerImpacts.append(innerImpacts)
} else {
outerImpacts.append(ContiguousArray<Int>(repeating: 0, count: 4))
}
}
let M: Int = P.count
var minimalImpacts: [Int] = []
minimalImpacts.reserveCapacity(M)
for i in 0..<M {
for j in 0..<4 where (outerImpacts[Q[i] + 1][j] - outerImpacts[P[i]][j]) > 0 {
minimalImpacts.append(j + 1)
break
}
}
return minimalImpacts
}

How do I keep track of path in TSP?

I found a simple solution to a TSP problem using bitmask here.
int n, src;
vector< vector< int > > graph, dp;
// initial status
void init() {
for ( int i = 0; i < n; ++i )
dp[ 1 << i ][ i ] = graph[ src ][ i ];
}
// TSP recursive
int TSP( int status, int x ) {
if ( dp[ status ][ x ] != -1 )
return dp[ status ][ x ];
int mask = 1 << x;
dp[ status ][ x ] = 1e9;
for ( int i = 0; i < n; ++i )
if ( i != x && ( status & ( 1 << i ) ) )
dp[ status ][ x ] = min( dp[ status ][ x ], TSP( status - mask, i ) + graph[ i ][ x ] );
return dp[ status ][ x ];
}
int main() {
scanf( "%d %d", &n, &src );
graph = vector< vector< int > >( n, vector< int >( n ) );
dp = vector< vector< int > >( 1 << n, vector< int >( n, -1 ) );
for ( int i = 0; i < n; ++i )
for ( int j = 0; j < n; ++j ) {
int x;
scanf( "%d", &x );
graph[ i ][ j ] = x;
}
init();
printf( "%d\n", TSP( ( 1 << n ) - 1, src ) );
return 0;
}
What if I want to know the path of the solution. How do I implement that?
What's come in my mind is to add a variable track[status][list of path]. But, I don't think it's a good solution. What is the best way to keep track of the path?

OpenCV Stereo matching and disparity map

I am using the code sample for stereo matching that came with openCV source code, here:
/*
* stereo_match.cpp
* calibration
*
* Created by Victor Eruhimov on 1/18/10.
* Copyright 2010 Argus Corp. All rights reserved.
*
*/
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/contrib/contrib.hpp"
#include <stdio.h>
using namespace cv;
static void print_help()
{
printf("\nDemo stereo matching converting L and R images into disparity and point clouds\n");
printf("\nUsage: stereo_match <left_image> <right_image> [--algorithm=bm|sgbm|hh|var] [--blocksize=<block_size>]\n"
"[--max-disparity=<max_disparity>] [--scale=scale_factor>] [-i <intrinsic_filename>] [-e <extrinsic_filename>]\n"
"[--no-display] [-o <disparity_image>] [-p <point_cloud_file>]\n");
}
static void saveXYZ(const char* filename, const Mat& mat)
{
const double max_z = 1.0e4;
FILE* fp = fopen(filename, "wt");
for(int y = 0; y < mat.rows; y++)
{
for(int x = 0; x < mat.cols; x++)
{
Vec3f point = mat.at<Vec3f>(y, x);
if(fabs(point[2] - max_z) < FLT_EPSILON || fabs(point[2]) > max_z) continue;
fprintf(fp, "%f %f %f\n", point[0], point[1], point[2]);
}
}
fclose(fp);
}
int main(int argc, char** argv)
{
const char* algorithm_opt = "--algorithm=";
const char* maxdisp_opt = "--max-disparity=";
const char* blocksize_opt = "--blocksize=";
const char* nodisplay_opt = "--no-display=";
const char* scale_opt = "--scale=";
if(argc < 3)
{
print_help();
return 0;
}
const char* img1_filename = 0;
const char* img2_filename = 0;
const char* intrinsic_filename = 0;
const char* extrinsic_filename = 0;
const char* disparity_filename = 0;
const char* point_cloud_filename = 0;
enum { STEREO_BM=0, STEREO_SGBM=1, STEREO_HH=2, STEREO_VAR=3 };
int alg = STEREO_SGBM;
int SADWindowSize = 0, numberOfDisparities = 0;
bool no_display = false;
float scale = 1.f;
StereoBM bm;
StereoSGBM sgbm;
StereoVar var;
for( int i = 1; i < argc; i++ )
{
if( argv[i][0] != '-' )
{
if( !img1_filename )
img1_filename = argv[i];
else
img2_filename = argv[i];
}
else if( strncmp(argv[i], algorithm_opt, strlen(algorithm_opt)) == 0 )
{
char* _alg = argv[i] + strlen(algorithm_opt);
alg = strcmp(_alg, "bm") == 0 ? STEREO_BM :
strcmp(_alg, "sgbm") == 0 ? STEREO_SGBM :
strcmp(_alg, "hh") == 0 ? STEREO_HH :
strcmp(_alg, "var") == 0 ? STEREO_VAR : -1;
if( alg < 0 )
{
printf("Command-line parameter error: Unknown stereo algorithm\n\n");
print_help();
return -1;
}
}
else if( strncmp(argv[i], maxdisp_opt, strlen(maxdisp_opt)) == 0 )
{
if( sscanf( argv[i] + strlen(maxdisp_opt), "%d", &numberOfDisparities ) != 1 ||
numberOfDisparities < 1 || numberOfDisparities % 16 != 0 )
{
printf("Command-line parameter error: The max disparity (--maxdisparity=<...>) must be a positive integer divisible by 16\n");
print_help();
return -1;
}
}
else if( strncmp(argv[i], blocksize_opt, strlen(blocksize_opt)) == 0 )
{
if( sscanf( argv[i] + strlen(blocksize_opt), "%d", &SADWindowSize ) != 1 ||
SADWindowSize < 1 || SADWindowSize % 2 != 1 )
{
printf("Command-line parameter error: The block size (--blocksize=<...>) must be a positive odd number\n");
return -1;
}
}
else if( strncmp(argv[i], scale_opt, strlen(scale_opt)) == 0 )
{
if( sscanf( argv[i] + strlen(scale_opt), "%f", &scale ) != 1 || scale < 0 )
{
printf("Command-line parameter error: The scale factor (--scale=<...>) must be a positive floating-point number\n");
return -1;
}
}
else if( strcmp(argv[i], nodisplay_opt) == 0 )
no_display = true;
else if( strcmp(argv[i], "-i" ) == 0 )
intrinsic_filename = argv[++i];
else if( strcmp(argv[i], "-e" ) == 0 )
extrinsic_filename = argv[++i];
else if( strcmp(argv[i], "-o" ) == 0 )
disparity_filename = argv[++i];
else if( strcmp(argv[i], "-p" ) == 0 )
point_cloud_filename = argv[++i];
else
{
printf("Command-line parameter error: unknown option %s\n", argv[i]);
return -1;
}
}
if( !img1_filename || !img2_filename )
{
printf("Command-line parameter error: both left and right images must be specified\n");
return -1;
}
if( (intrinsic_filename != 0) ^ (extrinsic_filename != 0) )
{
printf("Command-line parameter error: either both intrinsic and extrinsic parameters must be specified, or none of them (when the stereo pair is already rectified)\n");
return -1;
}
if( extrinsic_filename == 0 && point_cloud_filename )
{
printf("Command-line parameter error: extrinsic and intrinsic parameters must be specified to compute the point cloud\n");
return -1;
}
int color_mode = alg == STEREO_BM ? 0 : -1;
Mat img1 = imread(img1_filename, color_mode);
Mat img2 = imread(img2_filename, color_mode);
if( scale != 1.f )
{
Mat temp1, temp2;
int method = scale < 1 ? INTER_AREA : INTER_CUBIC;
resize(img1, temp1, Size(), scale, scale, method);
img1 = temp1;
resize(img2, temp2, Size(), scale, scale, method);
img2 = temp2;
}
Size img_size = img1.size();
Rect roi1, roi2;
Mat Q;
if( intrinsic_filename )
{
// reading intrinsic parameters
FileStorage fs(intrinsic_filename, CV_STORAGE_READ);
if(!fs.isOpened())
{
printf("Failed to open file %s\n", intrinsic_filename);
return -1;
}
Mat M1, D1, M2, D2;
fs["M1"] >> M1;
fs["D1"] >> D1;
fs["M2"] >> M2;
fs["D2"] >> D2;
M1 *= scale;
M2 *= scale;
fs.open(extrinsic_filename, CV_STORAGE_READ);
if(!fs.isOpened())
{
printf("Failed to open file %s\n", extrinsic_filename);
return -1;
}
Mat R, T, R1, P1, R2, P2;
fs["R"] >> R;
fs["T"] >> T;
stereoRectify( M1, D1, M2, D2, img_size, R, T, R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, -1, img_size, &roi1, &roi2 );
Mat map11, map12, map21, map22;
initUndistortRectifyMap(M1, D1, R1, P1, img_size, CV_16SC2, map11, map12);
initUndistortRectifyMap(M2, D2, R2, P2, img_size, CV_16SC2, map21, map22);
Mat img1r, img2r;
remap(img1, img1r, map11, map12, INTER_LINEAR);
remap(img2, img2r, map21, map22, INTER_LINEAR);
img1 = img1r;
img2 = img2r;
}
numberOfDisparities = numberOfDisparities > 0 ? numberOfDisparities : ((img_size.width/8) + 15) & -16;
bm.state->roi1 = roi1;
bm.state->roi2 = roi2;
bm.state->preFilterCap = 31;
bm.state->SADWindowSize = SADWindowSize > 0 ? SADWindowSize : 9;
bm.state->minDisparity = 0;
bm.state->numberOfDisparities = numberOfDisparities;
bm.state->textureThreshold = 10;
bm.state->uniquenessRatio = 15;
bm.state->speckleWindowSize = 100;
bm.state->speckleRange = 32;
bm.state->disp12MaxDiff = 1;
sgbm.preFilterCap = 63;
sgbm.SADWindowSize = SADWindowSize > 0 ? SADWindowSize : 3;
int cn = img1.channels();
sgbm.P1 = 8*cn*sgbm.SADWindowSize*sgbm.SADWindowSize;
sgbm.P2 = 32*cn*sgbm.SADWindowSize*sgbm.SADWindowSize;
sgbm.minDisparity = 0;
sgbm.numberOfDisparities = numberOfDisparities;
sgbm.uniquenessRatio = 10;
sgbm.speckleWindowSize = bm.state->speckleWindowSize;
sgbm.speckleRange = bm.state->speckleRange;
sgbm.disp12MaxDiff = 1;
sgbm.fullDP = alg == STEREO_HH;
var.levels = 3; // ignored with USE_AUTO_PARAMS
var.pyrScale = 0.5; // ignored with USE_AUTO_PARAMS
var.nIt = 25;
var.minDisp = -numberOfDisparities;
var.maxDisp = 0;
var.poly_n = 3;
var.poly_sigma = 0.0;
var.fi = 15.0f;
var.lambda = 0.03f;
var.penalization = var.PENALIZATION_TICHONOV; // ignored with USE_AUTO_PARAMS
var.cycle = var.CYCLE_V; // ignored with USE_AUTO_PARAMS
var.flags = var.USE_SMART_ID | var.USE_AUTO_PARAMS | var.USE_INITIAL_DISPARITY | var.USE_MEDIAN_FILTERING ;
Mat disp, disp8;
//Mat img1p, img2p, dispp;
//copyMakeBorder(img1, img1p, 0, 0, numberOfDisparities, 0, IPL_BORDER_REPLICATE);
//copyMakeBorder(img2, img2p, 0, 0, numberOfDisparities, 0, IPL_BORDER_REPLICATE);
int64 t = getTickCount();
if( alg == STEREO_BM )
bm(img1, img2, disp);
else if( alg == STEREO_VAR ) {
var(img1, img2, disp);
}
else if( alg == STEREO_SGBM || alg == STEREO_HH )
sgbm(img1, img2, disp);
t = getTickCount() - t;
printf("Time elapsed: %fms\n", t*1000/getTickFrequency());
//disp = dispp.colRange(numberOfDisparities, img1p.cols);
if( alg != STEREO_VAR )
disp.convertTo(disp8, CV_8U, 255/(numberOfDisparities*16.));
else
disp.convertTo(disp8, CV_8U);
if( !no_display )
{
namedWindow("left", 1);
imshow("left", img1);
namedWindow("right", 1);
imshow("right", img2);
namedWindow("disparity", 0);
imshow("disparity", disp8);
printf("press any key to continue...");
fflush(stdout);
waitKey();
printf("\n");
}
if(disparity_filename)
imwrite(disparity_filename, disp8);
if(point_cloud_filename)
{
printf("storing the point cloud...");
fflush(stdout);
Mat xyz;
reprojectImageTo3D(disp, xyz, Q, true);
saveXYZ(point_cloud_filename, xyz);
printf("\n");
}
return 0;
}
And I have tried all of the algorithms on the following set of images:
http://imageshack.com/a/img607/4641/utam.jpg (left)
http://imageshack.com/a/img62/5939/2hkc.jpg (right)
Here is my result:
http://imageshack.com/a/img856/4274/1n50.jpg
And here is the desired result:
http://i.stack.imgur.com/W9PBr.jpg
What could be the problem?
change parameters! For example, say bm, my experience is SADWindowSize and minDisparity really matters a lot! so you can add trackbars for each parameter and play with it until you get desired result. I've tried the same pair of images and it works fine. So all you need to do it try, try and try parameters.
I know it sounds crazy, but I had similar output when I was accidentally passing the same image as both the left and right input to the SGBMStereo algorithm. Just taking a shot in the dark...

MS VS 2010 srand() not compiling?

I have a piece of code ( I'm student ) that "should" work in theory, but Microsoft's Visual Studio 2010 seems to have a problem with srand, because it's not being highlighted like other reserved names.
If I remove srand from Auto_Complete_Matrix then code will compile with no problem.
Function Auto_Complete_Matrix
Error 2 error C2143: error de sintaxis : missing ';' ahead of 'type' c:\users\jorgee!\desktop\uade\program. 1\proyectos\tp3-matrices\ejercicio 2\main.c 46 1 Ejercicio 2
I've included stdlib and time libraries.
Thanks a lot for the help.
/* 2. Realizar una función que determine si una matriz cuadrada de dimensión N
es simétrica con respecto a su diagonal principal. */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <stdlib.h>
#define FIL 5
#define COL 5
int True_False( char *message );
int Auto_Complete_Matrix( int matrix[ FIL ][ COL ] );
int Manual_Complete_Matrix( int matrix[ FIL ][ COL ] );
void Print_Matrix( int matrix[ FIL ][ COL ] );
int main () {
int matrix[ FIL ][ COL ];
if( True_False( "Desea autocompletar la Matriz con números al azar? <Si / No> \n\n" ) ) {
Auto_Complete_Matrix( matrix );
}
else {
Manual_Complete_Matrix( matrix );
}
Print_Matrix( matrix );
system( "pause" );
}
int True_False( char *message ) {
char Answer[3];
fputs( message, stdout );
fgets( Answer, 3, stdin );
if ( Answer[0] == 1 ) {
return 1;
}
if ( strncmp( Answer, "Si", 2 ) == 0 || strncmp( Answer, "si", 2 ) ) {
return 1;
}
return 0;
}
int Auto_Complete_Matrix ( int matrix[ FIL ][ COL ] ) {
srand(time(0));
int i, j;
for ( i = 0; i < FIL; i ++ ) {
for ( j = 0; j < COL; j ++ ) {
matrix[i][j] = rand() % (100 - 0 + 1) + 0;
}
}
return 0;
}
int Manual_Complete_Matrix( int matrix[ FIL ][ COL ] ) {
int i, j;
for ( i = 0; i < FIL; i++ ) {
for ( j = 0; j < COL; j++ ) {
while( fscanf( stdin, "%d", matrix[i][j] ) != 1 ) {
fflush( stdin );
continue;
}
}
}
}
void Print_Matrix( int matrix[ FIL ][ COL ] ) {
int i, j;
for( i = 0; i < FIL; i++) {
for( j = 0; j < COL; j++ ) {
printf( "%5d", matrix[i][j] );
}
puts("\n");
}
}
In a file compiled as C (instead of C++), you must declare all variables at the top of the enclosing scope (i.e. in this case the surrounding curly braces). Here, you're calling a function (srand) before the 'int i, j' declaration statement.

Mergesort: what's up with the last step in the merge operation?

The following mergesort is from Data Structures and Algorithm Analysis (Weiss). What I'm wondering about is the last for loop on the merge step. I understand that we have to copy tmpArray back into array, but I don't understand why we do it from rightend, and why we don't have i go from 0 to tmpArray.size. Can someone explain please?
public static void mergeSort( Comparable [ ] a )
{
Comparable [ ] tmpArray = new Comparable[ a.length ];
mergesort( a, tmpArray, 0, a.length - 1 );
}
public static void mergesort( Comparable [ ] a, Comparable [ ] tmpArray,
int left, int right )
{
if( left < right ) {
int center = ( left + right ) / 2;
mergesort( a, tmpArray, left, center );
mergesort( a, tmpArray, center + 1, right );
merge( a, tmpArray, left, center + 1, right );
}
}
public static void merge( Comparable [ ] a, Comparable [ ] tmpArray,
int leftPos, int rightPos, int rightEnd )
{
int leftEnd = rightPos - 1;
int tmpPos = leftPos;
int numElements = rightEnd - leftPos + 1;
while( leftPos <= leftEnd && rightPos <= rightEnd )
if( a[ leftPos ].compareTo( a[ rightPos ] ) <= 0 )
tmpArray[ tmpPos++ ] = a[ leftPos++ ];
else
tmpArray[ tmpPos++ ] = a[ rightPos++ ];
while( leftPos <= leftEnd ) // Copy rest of first half
tmpArray[ tmpPos++ ] = a[ leftPos++ ];
while( rightPos <= rightEnd ) // Copy rest of right half
tmpArray[ tmpPos++ ] = a[ rightPos++ ];
for( int i = 0; i < numElements; i++, rightEnd-- )
a[ rightEnd ] = tmpArray[ rightEnd ]; // Copy tmpArray back
}
I think the main reason to do things this way is because the value of rightPos, the spot where the subarray starts, has been destructively modified by the inner loop (note the use of rightPos++. Consequently, in order to write the value back properly, the last loop counts backwards down to where rightPos originally was. Were it to just start at rightPos and count forward, it would be writing to the wrong index.
That said, I don't see any reason to do it this way. It would be easier to just declare new variables and modify those values instead of destroying the inputs and doing extra gymnastics to recover the original values.
Hope this helps!

Resources