set of sets in C++ - c++11

How to create a set of sets in C++?
set<char> inner1;
set<char> inner2;
set< set<char> > outer;
set< set<char> >:: iterator new_it;
set<char>::iterator it;
for (int i = 65; i < 70; ++i)
inner1.insert(i);
for (int i = 70; i < 80; ++i)
inner2.insert(i);
outer.insert(inner1);
outer.insert(inner2);
for(new_it=outer.begin();new_it!=outer.end();new_it++)
{
for(it=*(new_it);it!=(new_it);)
{
}
}

You can't assign new_it to it in the inner loop. Do it this way:
for(it = new_it->begin(); it!=new_it->end(); ++it)

Related

Simplex solver - issues with getting it working

I'm trying to write a simple simplex solver for linear optimization problems, but I'm having trouble getting it working. Every time I run it I get a vector subscript out of range (which is quite easy to find), but I think that its probably a core issue somewhere else in my impl.
Here is my simplex solver impl:
bool pivot(vector<vector<double>>& tableau, int row, int col) {
int n = tableau.size();
int m = tableau[0].size();
double pivot_element = tableau[row][col];
if (pivot_element == 0) return false;
for (int j = 0; j < m; j++) {
tableau[row][j] /= pivot_element;
}
for (int i = 0; i < n; i++) {
if (i != row) {
double ratio = tableau[i][col];
for (int j = 0; j < m; j++) {
tableau[i][j] -= ratio * tableau[row][j];
}
}
}
return true;
}
int simplex(vector<vector<double>>& tableau, vector<double>& basic, vector<double>& non_basic) {
int n = tableau.size() - 1;
int m = tableau[0].size() - 1;
while (true) {
int col = -1;
for (int j = 0; j < m; j++) {
if (tableau[n][j] > 0) {
col = j;
break;
}
}
if (col == -1) break;
int row = -1;
double min_ratio = numeric_limits<double>::infinity();
for (int i = 0; i < n; i++) {
if (tableau[i][col] > 0) {
double ratio = tableau[i][m] / tableau[i][col];
if (ratio < min_ratio) {
row = i;
min_ratio = ratio;
}
}
}
if (row == -1) return -1;
if (!pivot(tableau, row, col)) return -1;
double temp = basic[row];
basic[row] = non_basic[col];
non_basic[col] = temp;
}
return 1;
}

Access violation writing location 0x00000000 in middle of a for loop

I'm trying to use mnist dataset for neural networks but im getting a Access violation writing location 0x00000000
the code is
for (int i = 0; i < length; i++) {
innerarray = (int8_t*)malloc(width * height);
for (int j = 0; j < width * height; j++) {
int8_t value = 0;
innerarray[j] = value;
}
temparray[i] = innerarray;
}
for (int i = 0; i < length; i++) {
for (int j = 0; j < width * height; j++) {
int8_t grayscale;
rf.read((char*)&grayscale, 1);
temparray[i][j] = grayscale; //error happens here
}
}
variable values:
int length = 10000;
int width = 28;
int height = 28;
The weird thing is it only happen when i >= 2512. Also replacing grayscale with 0 doesn't work. I can hower set temparray[2512][0] to 0 before the last nested for loop.
Like this:
for (int i = 0; i < length; i++) {
innerarray = (int8_t*)malloc(width * height);
for (int j = 0; j < width * height; j++) {
int8_t value = 0;
innerarray[j] = value;
}
temparray[i] = innerarray;
}
temparray[2512][0] = 0; //works
for (int i = 0; i < length; i++) {
for (int j = 0; j < width * height; j++) {
int8_t grayscale;
rf.read((char*)&grayscale, 1);
temparray[i][j] = 0; //error still happens here
}
}
The full code is:
#include<iostream>
#include<fstream>
#include<cstdint>
#include<cstdlib>
#include<array>
using namespace std;
struct images {
int32_t height = 0;
int32_t width = 0;
int32_t magicnumber = 0;
int32_t numberofimages = 0;
int8_t** images[];
void setimages(int8_t** newimages) {
delete[] this->images;
int8_t** images = (int8_t**)malloc(numberofimages);
int8_t* innerarray;
for (int i = 0; i < numberofimages; i++) {
innerarray = (int8_t*)malloc(width * height);
images[i] = innerarray;
}
for (int i = 0; i < numberofimages; i++) {
for (int j = 0; j < width * height; j++) {
images[i][j] = newimages[i][j];
}
}
};
};
struct labels {
int32_t magicnumber = 0;
int32_t numberoflabels = 0;
int8_t labels[];
};
int32_t litleendiantobig(int32_t litle) {//reverse works as well
int32_t big = ((4278190080 & litle) >> 24) + ((255 & litle) << 24) + ((16711680 & litle) >> 8) + ((65280 & litle) << 8);
return big;
}
images loadimages(string filename, int32_t magicalnumber) {
ifstream rf(filename, ios::out | ios::binary);
if (!rf) {
cout << "Cannot open file! " << filename << endl;
exit(1);
}
int32_t magicnumberoffile;
rf.read((char*)&magicnumberoffile, 4);
magicnumberoffile = litleendiantobig(magicnumberoffile);
if (magicalnumber != magicnumberoffile) {
cout << "Wrong magic number!" << endl;
cout << "expected:" << magicalnumber << endl;
cout << "got:" << magicnumberoffile << endl;
exit(1);
}
images img;
int32_t length;
rf.read((char*)&length, 4);
length = litleendiantobig(length);
img.numberofimages = length;
int32_t width;
rf.read((char*)&width, 4);
width = litleendiantobig(width);
img.width = width;
int32_t height;
rf.read((char*)&height, 4);
height = litleendiantobig(height);
img.height = height;
int8_t** temparray = (int8_t**)malloc(length);
int8_t* innerarray;
for (int i = 0; i < length; i++) {
innerarray = (int8_t*)malloc(width * height);
for (int j = 0; j < width * height; j++) {
int8_t value = 0;
innerarray[j] = value;
}
temparray[i] = innerarray;
}
for (int i = 0; i < length; i++) {
for (int j = 0; j < width * height; j++) {
int8_t grayscale;
rf.read((char*)&grayscale, 1);
temparray[i][j] = grayscale; //error happens here
}
}
img.setimages(temparray);
rf.close();
return img;
}
int main() {
images testimages;
loadimages("t10k-images.bin", 2051);
cout << testimages.images;
return 0;
}
I don't now how to solve the problem and can't find it anywhere else. Thanks for helping me out.
Your using malloc has done you in.
int* array = (int*)malloc(width* height); // allocate width * height bytes.
array[i] = x; // Sets the [i] _integer_ of array to x.
// but you allocated space for BYTE size elemennts.
The correct way to allocate integers using malloc:
int* array = (int*)malloc(width* height * sizeof(int)); // allocate width * height ints
Either that or your original intent was to allocate 8 bit pixels. In that case, your pointers should be declared as unsigned char*.
In either case, when coding in C++, types are important, and using operator new to allocate your arrays would have saved you from these troubles.

C++ standard doesn t provide a hash for this type.

I try to implement a* algorithm for grids with unordered_map, I have my own priority queue(whitch)works fine. The problem is that when i run the program i get this error: C++ standard doesn t provide a hash for this type.
Can A* be implemented with other structures?
Or how can i fix this?
int main()
{
std::ifstream file("Labirint.txt");
char **labirint;
int nr_linii, nr_coloane;
file >> nr_linii >> nr_coloane;
locatie soricel;
locatie branza;
file >> soricel.x >> soricel.y;
file >> branza.x >> branza.y;
int deplasare_linie[] = { 0,0,-1,1 };
int deplasare_coloana[] = { -1,1,0,0 };
labirint = new char*[nr_linii];
for (int i = 0; i < nr_linii; ++i)
labirint[i] = new char[nr_coloane];
for (int i = 0; i < nr_linii; i++)
for (int j = 0; j < nr_coloane; ++j)
file >> labirint[i][j];
square_nod start,goal;
start.pozitie = soricel;
start.prioritate = 0;
goal.pozitie = branza;
PriorityQueue frontier;
frontier.Insert(start);
std::unordered_map<square_nod, int> came_from;
std::unordered_map<square_nod, int> cost_so_far;
cost_so_far.insert(std::make_pair(start, 0));
while (!frontier.isEmpty())
{
square_nod current = frontier.minElement();
frontier.extractMin();
if (current == goal)
{
break;
}
for (int i = 0; i < 4; i++)
{
square_nod next;
next.pozitie.x = current.pozitie.x + deplasare_linie[i];
next.pozitie.y = current.pozitie.y + deplasare_coloana[i];
int new_cost = cost_so_far[current] + 1;
auto gasit = cost_so_far.find(next);
if (gasit == cost_so_far.end() || new_cost < cost_so_far[next])
{
cost_so_far[next] = new_cost;
int priority = new_cost + city_block_distance(goal, next);
next.prioritate = priority;
frontier.Insert(next);
came_from[next] = current.prioritate;
}
}
}
}
I assume you mean the type square_nod, which you haven't listed the definition for. You need to add one for your class.
Borrowing from this question which might be a duplicate
namespace std {
template <> struct hash<Foo>
{
size_t operator()(const Foo & x) const
{
/* your code here, e.g. "return hash<int>()(x.value);" */
}
};
}

How to sort the boundary elements of a matrix in ascending order?

I've worked on this but when I'm entering the matrix, all the elements in the matrix are getting sorted! But I want to sort only the boundary elements in ascending order. Can some body please tell me my mistake?
int k,temp=0,sum=0;
k=n;
boolean b=true;
do
{
for(i=0;i<m;i++)
{
for(j=0;j<k-1;j++)
{
if(i!=0||j!=0)
{
if(A[i][j]>A[i][j+1])
{
temp=A[i][j];
A[i][j]=A[i][j+1];
A[i][j+1]=temp;
}
}
}
}
k-=1;
if(k<0)
b=false;
}while(b);
k=m;
do
{
for(i=0;i<k-1;i++)
{
for(j=0;j<n;j++)
{
if(i!=0||j!=0)
{
if(A[j][i]>A[j][i+1])
{
temp=A[j][i];
A[j][i]=A[j][i+1];
A[j][i+1]=temp;
}
}
}
}
k-=1;
if(k<0)
b=false;
}while(b);
System.out.println("REARRANGED MATRIX:");
for(i=0;i<m;i++)
{
for(j=0;j<n;j++)
{
System.out.print(A[i][j]+" ");
}
System.out.println();
}
Instead of using the condition 'if(i!=0||j!=0)' use 'if(i==0||i==2||j==0||j==2)'.This may solve the ambiguity you are having.Your mistake was that you have taken the number of rows and columns both to be greater than zero.the boundary elements are those where number of rows is 0 or 2 or number of columns is 0 or 2(by this I mean that only those elements which have coordinates with either i=0 or i=2 or j=0 or j=2 will be considered as boundary elements.matrix coordinates
I have one solution of this. I have used selection sort for doing this.At first I have sorted the matrix then displaying the boundary of the sorted array
import java.io.*;
class Boundary_Sorting
{
public static void main(String args[])throws IOException
{
InputStreamReader isr = new InputStreamReader(System.in);
BufferedReader br = new BufferedReader(isr);
System.out.println("Enter the rows of the matrix=");
int m = Integer.parseInt(br.readLine());
System.out.println("Enter the column of the matrix=");
int n = Integer.parseInt(br.readLine());
int a[][] = new int[m][n];
int i,j;
System.out.println("Enter the elements of the matrix: ");
for(i = 0; i < m; i++)
{
for(j = 0; j < n; j++)
{
a[i][j]=Integer.parseInt(br.readLine());
}
}
System.out.println("**********************");
System.out.println("The original matrix is");
System.out.println("**********************");
for(i = 0; i < m; i++)
{
for(j = 0; j < n; j++)
{
System.out.print(a[i][j]+"\t");
}
System.out.println();
}
int B[] = new int[m*n]; //creating a 1D Array of size 'r*c'
int x = 0;
for(i = 0; i < m; i++)
{
for(j = 0; j < n; j++)
{
B[x] = a[i][j];
x++;
}
}
/*Sorting the 1D Array in Ascending Order*/
int t = 0;
for(i = 0; i < (m * n) - 1; i++)
{
for(j = i + 1; j < (m * n); j++)
{
if(B[i] > B[j])
{
t = B[i];
B[i] = B[j];
B[j] = t;
}
}
}
/*Saving the sorted 1D Array back into the 2D Array */
x = 0;
for(i = 0; i < m; i++)
{
for(j = 0; j < n; j++)
{
a[i][j] = B[x];
x++;
}
}
/* Printing the sorted 2D Array */
System.out.println("**********************");
System.out.println("The Sorted Array:");
System.out.println("**********************");
for(i = 0; i < m; i++)
{
for(j = 0; j < n; j++)
{
System.out.print(a[i][j]+"\t");
}
System.out.println();
}
System.out.println("**********************");
System.out.println("The boundary elements of the matrix is=");
System.out.println("**********************");
for(i = 0; i < m; i++)
{
for(j = 0; j < n; j++)
{
if(i==0 || j==0 || i == m-1 || j == n-1) //condition for accessing boundary elements
System.out.print(a[i][j]+"\t");
else
System.out.print(" \t");
}
System.out.println();
}
}
}

Fully Associative Cache implementation

I don't understand why my code for the fully associative cache doesn't match the trace files that I'm given.
The parameters are each cache line is 32 bytes and the total cache size is 16KB.
My implementations for set associative caches of 2,4,8,and 16 all work perfectly (using least recently used replacement policy). But for fully associative, which could also just be described as a set associative of 32, is VERY close to the trace file but not quite. Frankly, I don't know how to debug this one since there's a vast amount of steps (at least the way I did it)
Here's the relevant parts of my code (excuse the inefficiency)
//Fully Associative
int **fullyAssoc;
fullyAssoc = new int*[64]; //where fullyAssoc[0][index] is way 0, fullyAssoc[2][index] is way 1 etc..
int **LRU32;
LRU32 = new int*[32];
for (int i = 0; i < 64; ++i){ //Initialize all entries in fullyAssoc to 0
fullyAssoc[i] = new int[16 * CACHE_LINE / 32];
}
for (int i = 0; i < 16; i++){ //Initialize LRU array
LRU32[0][i] = 0;
LRU32[1][i] = 1;
LRU32[2][i] = 2;
LRU32[3][i] = 3;
LRU32[4][i] = 4;
LRU32[5][i] = 5;
LRU32[6][i] = 6;
LRU32[7][i] = 7;
LRU32[8][i] = 8;
LRU32[9][i] = 9;
LRU32[10][i] = 10;
LRU32[11][i] = 11;
LRU32[12][i] = 12;
LRU32[13][i] = 13;
LRU32[14][i] = 14;
LRU32[15][i] = 15;
LRU32[16][i] = 16;
LRU32[17][i] = 17;
LRU32[18][i] = 18;
LRU32[19][i] = 19;
LRU32[20][i] = 20;
LRU32[21][i] = 21;
LRU32[22][i] = 22;
LRU32[23][i] = 23;
LRU32[24][i] = 24;
LRU32[25][i] = 25;
LRU32[26][i] = 26;
LRU32[27][i] = 27;
LRU32[28][i] = 28;
LRU32[29][i] = 29;
LRU32[30][i] = 30;
LRU32[31][i] = 31;
}
int fullyAssocLRU = 0;
int memCount = 0;
while(getline(fileIn, line)){
stringstream s(line);
s >> instruction >> hex >> address;
int indexFull;
int tagFull;
unsigned long long address, addressFull;
address = address >> 5; //Byte offset
addressFull = address;
indexFull = addressFull % 16;
tagFull = addressFull >> 4;
if (assocCache(fullyAssoc, indexFull, 32, tagFull, LRU32) == 1){
fullyAssocLRU++;
}
}
void LRU_update(int **lru, int index, int way, int ways){
int temp = 0;
int temp2[ways];
int temp_index = 0;
int i = 0;
while(i < ways){
if (lru[i][index] == way/2){
temp = lru[i][index];
i++;
continue;
}
else{
temp2[temp_index] = lru[i][index];
temp_index++;
}
i++;
}
for (int j = 0; j < ways - 1; j++){
lru[j][index] = temp2[j];
}
lru[ways - 1][index] = temp;
}
bool assocCache(int **block, int index, int ways, int tag, int **lru){
bool retVal = false;
for(int i = 0; i < 2*ways; i = i + 2){
if (block[i][index] == 0){
block[i][index] = 1;
block[i+1][index] = tag;
LRU_update(lru, index, i, ways);
return retVal;
}
else{
if (block[i+1][index] == tag){
retVal = true;
LRU_update(lru, index, i, ways);
return retVal;
}
else{
continue;
}
}
}
int head = 2 * lru[0][index];
block[head][index] = 1;
block[head+1][index] = tag;
LRU_update(lru, index, head, ways);
return retVal;
}
The trace files is supposed to be:
837589,1122102; 932528,1122102; 972661,1122102; 1005547,1122102; //For direct mapped
993999,1122102; 999852,1122102; 999315,1122102; 1000092,1122102; //For set associative
1000500,1122102; //For fully associative (LRU)
My output is:
837589,1122102; 932528,1122102; 972661,1122102; 1005547,1122102;
939999,1122102; 999852,1122102; 999315,1122102; 1000092,1122102;
1000228,1122102;
As you can see, for the fully associative one, it's only 272 off the correct output. Why would it be off when switching from 16 ways to 32 ways?
Ah, I mistakenly though a fully associative cache for a 32 line size cache of 16KB cache size is 32 ways, when it's actually 512 ways.

Resources