Replace pointer to pointer by initializer_list - c++11

#include <initializer_list>
#include <iostream>
#include <vector>
//this api is anti intuition
void original(int const **data)
{
for(size_t i = 0; i != 3; ++i){
int const *ptr = *data;
//std::cout<<*ptr++<<", "<<*ptr<<std::endl; //this line may cause undefined behavior
std::cout<<ptr[0]<<", "<<ptr[1]<<std::endl;
++data;
}
}
//my eyes prefer this api than original like api
void replace_original(std::initializer_list<std::initializer_list<int>> list)
{
std::vector<int const*> results(list.size());
for(auto data : list){
results.push_back(std::begin(data)); //#1
}
original(&results[0]);
}
int main()
{
int first[] = {0, 1};
int second[] = {2, 3};
int third[] = {4, 5};
int const *array[] = {first, second, third};
original(array);
replace_original({ {0, 1}, {2, 3}, {4, 5} });
return 0;
}
The results are
0, 1
2, 3
4, 5
expected results are
0, 1
2, 3
4, 5
0, 1
2, 3
4, 5
I want to encapsulate the api of original(old, c style api) by the api like replace_original
But can't figure out why #1 can't work.

Ah, stupid mistake, I should change the loop to
size_t const size = list.size();
std::vector<int const*> results(size);
for(size_t i = 0; i != size; ++i){
results[i] = std::begin( *(std::begin(list) + i) );
}
Do you have a better solution to encapsulate this kind of api?
After google, I find out that in c++14, size() of initializer_list will
become constexpr so we should be able to use std::array to replace std::vector

Related

Cannot understand hoow to recursively merge sort

Currently self-learning C++ with Daniel Liang's Introduction to C++.
On the topic of the merge sort, I cannot seem to understand how his code is recursively calling itself.
I understand the general concept of the merge sort, but I am having trouble understanding this code specifically.
In this example, we first pass the list 1, 7, 3, 4, 9, 3, 3, 1, 2, and its size (9) to the mergeSort function.
From there, we divide the list into two until the array size reaches 1. In this case, we would get: 1,7,3,4 -> 1,7 -> 1. We then move onto the merge sorting the second half. The second half array would be 7 in this case. We merge the two arrays [1] and [7] and proceed to delete the two arrays that were dynamically allocated to prevent any memory leak.
The part I don't understand is how does this code run from here? After delete[] firstHalf and delete[] secondHalf. From my understanding, shouldn't there be another mergeSort function call in order to merge sort the new firstHalf and secondHalf?
#include <iostream>
using namespace std;
// Function prototype
void arraycopy(int source[], int sourceStartIndex,
int target[], int targetStartIndex, int length);
void merge(int list1[], int list1Size,
int list2[], int list2Size, int temp[]);
// The function for sorting the numbers
void mergeSort(int list[], int arraySize)
{
if (arraySize > 1)
{
// Merge sort the first half
int* firstHalf = new int[arraySize / 2];
arraycopy(list, 0, firstHalf, 0, arraySize / 2);
mergeSort(firstHalf, arraySize / 2);
// Merge sort the second half
int secondHalfLength = arraySize - arraySize / 2;
int* secondHalf = new int[secondHalfLength];
arraycopy(list, arraySize / 2, secondHalf, 0, secondHalfLength);
mergeSort(secondHalf, secondHalfLength);
// Merge firstHalf with secondHalf
merge(firstHalf, arraySize / 2, secondHalf, secondHalfLength,
list);
delete [] firstHalf;
delete [] secondHalf;
}
}
void merge(int list1[], int list1Size,
int list2[], int list2Size, int temp[])
{
int current1 = 0; // Current index in list1
int current2 = 0; // Current index in list2
int current3 = 0; // Current index in temp
while (current1 < list1Size && current2 < list2Size)
{
if (list1[current1] < list2[current2])
temp[current3++] = list1[current1++];
else
temp[current3++] = list2[current2++];
}
while (current1 < list1Size)
temp[current3++] = list1[current1++];
while (current2 < list2Size)
temp[current3++] = list2[current2++];
}
void arraycopy(int source[], int sourceStartIndex,
int target[], int targetStartIndex, int length)
{
for (int i = 0; i < length; i++)
{
target[i + targetStartIndex] = source[i + sourceStartIndex];
}
}
int main()
{
const int SIZE = 9;
int list[] = {1, 7, 3, 4, 9, 3, 3, 1, 2};
mergeSort(list, SIZE);
for (int i = 0; i < SIZE; i++)
cout << list[i] << " ";
return 0;
}
From my understanding, shouldn't there be another mergeSort function
call in order to merge sort the new firstHalf and secondHalf?
It is happening implicitly during the recursive call. When you reach these two lines:
delete [] firstHalf;
delete [] secondHalf;
It means that one call to mergeSort is completed. If this call belongs to merging a first half, then code starts from the line after, i.e. these lines:
// Merge sort the second half
int secondHalfLength = arraySize - arraySize / 2;
...
But, if this call belongs to merging of the second half, then the control goes back to the line just after that call, i.e. these lines:
// Merge firstHalf with secondHalf
merge(firstHalf, arraySize / 2, secondHalf, secondHalfLength,
list);
And everything if doing well as planned.

Processes read data from the same file

I have a mesh file and I did a partitioning of it using METIS(in 4 parts/processes).METIS provided me with the partitioning file of the mesh(gave me a file with the number of process where each element of the mesh belongs to).My job now is to input these information to my parallel code.I tried to do it by letting each process to have access to the same mesh file and read the data that it wants based on partitioning file.
#include <iostream>
#include <fstream>
#include <sstream>
#include "mpi.h"
using namespace std;
//each process stores the partitioning
int* PartitionFile(){
ifstream file ("epart.txt");
int NE=14;
int part,i=0;
int *partition=new int[14];
while(file>>part){
partition[i]=part;
i++;
}
file.close();
return partition;
}
int FindSizeOfLocalElements(int *epart,int rank){
int size=0;
for (int i=0;i<14;i++){
if(epart[i]==rank){
size+=1;
}
}
return size;
}
//stores the elements of each process
int * LocalPartition(int* epart,int size,int rank){
int *localPart=new int[size];
int j=0;
for(int i=0;i<14;i++){
if (epart[i]==rank){
localPart[j]=i+1;//+1 because elements start from 1(global numbering)
j+=1;
}
}
return localPart;
}
int **ElementConnectivityMeshFile(int* localPart,int size){
ifstream file ("mesh.txt");
int node1,node2,node3;
int elem=1;
int i=0;
int **elemConn=new int*[size];
for(int j=0;j<size;j++){
elemConn[j]=new int[3];//each element has 3 nodes.Here elements has local numbering.Global numbering is stored in localPart
}
while(file>>node1>>node2>>node3){
if (elem==localPart[i]){
elemConn[i][0]=node1;
elemConn[i][1]=node2;
elemConn[i][2]=node3;
i+=1;
}
elem+=1;
if(elem>14){break;}
}
file.close();
return elemConn;
}
int main(){
MPI_Init(NULL, NULL);
int numProc;
MPI_Comm_size(MPI_COMM_WORLD, &numProc);
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
int *epart=PartitionFile();
int size=FindSizeOfLocalElements(epart,rank);
int *elem=LocalPartition(epart,size,rank);
int **elemConn=ElementConnectivityMeshFile(elem,size);
MPI_Finalize();
return 0;
}
This part of code gives me the desired results,however I want to know how efficient is letting MPI processes read the same file,by using c++ standard functions, and if that can affect scalability and performance.For this demostration i used a mesh of 14 elements and 4 processes.
mesh file
1 3 2
2 3 4
3 5 4
4 5 6
5 7 6
8 7 5
3 8 5
9 7 8
9 8 3
1 9 3
10 9 1
11 10 1
11 1 12
12 1 2
epart file
2
2
0
0
0
1
0
1
1
3
3
3
2
2
I think this program illustrates the basic approach using MPI-IO with binary files:
#include <stdio.h>
#include <mpi.h>
#define NELEM 14
#define NVERT 3
int meshfile[NELEM][NVERT] =
{ { 1, 3, 2},
{ 2, 3, 4},
{ 3, 5, 4},
{ 4, 5, 6},
{ 5, 7, 6},
{ 8, 7, 5},
{ 3, 8, 5},
{ 9, 7, 8},
{ 9, 8, 3},
{ 1, 9, 3},
{10, 9, 1},
{11, 10, 1},
{11, 1, 12},
{12, 1, 2}, };
int partfile[NELEM] = {2, 2, 0, 0, 0, 1, 0, 1, 1, 3, 3, 3, 2, 2};
int main(void)
{
int i;
int part[NELEM];
int mesh[NELEM][NVERT];
/* Should really malloc smaller mesh based on local size */
FILE *fp;
int rank, size;
MPI_Comm comm;
MPI_Status status;
MPI_File fh;
MPI_Datatype filetype;
int disp[NELEM];
int nelemlocal;
/* Should really malloc smaller displ based on nelemlocal */
comm = MPI_COMM_WORLD;
MPI_Init(NULL, NULL);
MPI_Comm_size(comm, &size);
MPI_Comm_rank(comm, &rank);
if (rank == 0)
{
printf("Running on %d processes\n", size);
// data files should already exist but create them here so we
// have a self-contained program
fp = fopen("mesh.dat", "w");
fwrite(meshfile, sizeof(int), NELEM*NVERT, fp);
fclose(fp);
fp = fopen("part.dat", "w");
fwrite(partfile, sizeof(int), NELEM, fp);
fclose(fp);
}
// could read on rank 0 and broadcast, but using MPI-IO then
// "readall" should take an efficient collective approach
// every rank read the whole partition file
MPI_File_open(comm, "part.dat", MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
MPI_File_set_view(fh, 0, MPI_INT, MPI_INT, "native", MPI_INFO_NULL);
MPI_File_read_all(fh, part, NELEM, MPI_INT, &status);
MPI_File_close(&fh);
nelemlocal = 0;
// pick out local elements and record displacements
for (i=0; i < NELEM; i++)
{
if (part[i] == rank)
{
disp[nelemlocal] = i*NVERT;
nelemlocal += 1;
}
}
printf("on rank %d, nelemlocal = %d\n", rank, nelemlocal);
// create the MPI datatype to use as the filetype, which is
// effectively a mask that selects only the elements for this rank
MPI_Type_create_indexed_block(nelemlocal, NVERT, disp, MPI_INT, &filetype);
MPI_Type_commit(&filetype);
MPI_File_open(comm, "mesh.dat", MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
// set the file view appropriate to this rank
MPI_File_set_view(fh, 0, MPI_INT, filetype, "native", MPI_INFO_NULL);
// each rank only reads its own set of elements based on file view
MPI_File_read_all(fh, mesh, nelemlocal*NVERT, MPI_INT, &status);
MPI_File_close(&fh);
// check we got the correct data
for (i=0; i < nelemlocal; i++)
{
printf("on rank %d, mesh[%d] = %d, %d, %d\n",
rank, i, mesh[i][0], mesh[i][1], mesh[i][2]);
}
MPI_Finalize();
}
and it seems to give the right answer;
dsh#laptop$ mpicc -o metisio metisio.c
dsh#laptop$ mpirun -n 4 ./metisio | sort
on rank 0, mesh[0] = 3, 5, 4
on rank 0, mesh[1] = 4, 5, 6
on rank 0, mesh[2] = 5, 7, 6
on rank 0, mesh[3] = 3, 8, 5
on rank 0, nelemlocal = 4
on rank 1, mesh[0] = 8, 7, 5
on rank 1, mesh[1] = 9, 7, 8
on rank 1, mesh[2] = 9, 8, 3
on rank 1, nelemlocal = 3
on rank 2, mesh[0] = 1, 3, 2
on rank 2, mesh[1] = 2, 3, 4
on rank 2, mesh[2] = 11, 1, 12
on rank 2, mesh[3] = 12, 1, 2
on rank 2, nelemlocal = 4
on rank 3, mesh[0] = 1, 9, 3
on rank 3, mesh[1] = 10, 9, 1
on rank 3, mesh[2] = 11, 10, 1
on rank 3, nelemlocal = 3
Running on 4 processes

How to assign variadic template arguments to std::array

I have an std::array and I have a variadic template function with the number of parameters that matches the size of the array. I need to assign the arguments to the elements of the array. In other words, in the code below I wish a to get values {1, 2, 3} and b to get values {1, 2, 3, 4, 5}
std::array<int, 3> a;
std::array<int, 5> b;
assign_values(a, 1, 2, 3);
assign_values(b, 1, 2, 3, 4, 5);
The question is how to implement the assign_values variadic template function.
I'm limited with the C++14 version.
Update:
The arguments can be of different types: assign_values(b, 1, 2u, 3., '4', 5l);
Sth like this:
template<class T, size_t N, class ... Values>
void assign_values(std::array<T,N>& arr, Values... vals) {
static_assert(N == sizeof...(vals));
int j = 0;
for (auto i : std::initializer_list< std::common_type_t<Values...> >{vals...})
arr[j++] = i;
}
Demo
I'm limited with the C++14 version
The good old trick of the unused array initialization (pre C++17 surrogate of template folding) should works (also C++11)
template <typename T, std::size_t N, typename ... Values>
void assign_values (std::array<T,N> & arr, Values... vals)
{
static_assert(N == sizeof...(vals));
using unused = int[];
int j = 0;
(void)unused { 0, (arr[j++] = vals, 0)... };
}

How make a stride chunk iterator thrust cuda

I need a class iterator like this
https://github.com/thrust/thrust/blob/master/examples/strided_range.cu
but that this new iterator do the next sequence
[k * size_stride, k * size_stride+1, ...,k * size_stride+size_chunk-1,...]
with
k = 0,1,...,N
Example:
size_stride = 8
size_chunk = 3
N = 3
then the sequence is
[0,1,2,8,9,10,16,17,18,24,25,26]
I don't know how do this efficiently...
The strided range interator is basically a carefully crafted permutation iterator with a functor that gives the appropriate indices for permutation.
Here is a modification to the strided range iterator example. The main changes were:
include the chunk size as an iterator parameter
modify the functor that provides the indices for the permutation iterator to spit out the desired sequence
adjust the definitions of .end() iterator to provide the appropriate length of sequence.
Worked example:
$ cat t1280.cu
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/functional.h>
#include <thrust/fill.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/sequence.h>
#include <iostream>
#include <assert.h>
// this example illustrates how to make strided-chunk access to a range of values
// examples:
// strided_chunk_range([0, 1, 2, 3, 4, 5, 6], 1,1) -> [0, 1, 2, 3, 4, 5, 6]
// strided_chunk_range([0, 1, 2, 3, 4, 5, 6], 2,1) -> [0, 2, 4, 6]
// strided_chunk_range([0, 1, 2, 3, 4, 5, 6], 3,2) -> [0 ,1, 3, 4, 6]
// ...
template <typename Iterator>
class strided_chunk_range
{
public:
typedef typename thrust::iterator_difference<Iterator>::type difference_type;
struct stride_functor : public thrust::unary_function<difference_type,difference_type>
{
difference_type stride;
int chunk;
stride_functor(difference_type stride, int chunk)
: stride(stride), chunk(chunk) {}
__host__ __device__
difference_type operator()(const difference_type& i) const
{
int pos = i/chunk;
return ((pos * stride) + (i-(pos*chunk)));
}
};
typedef typename thrust::counting_iterator<difference_type> CountingIterator;
typedef typename thrust::transform_iterator<stride_functor, CountingIterator> TransformIterator;
typedef typename thrust::permutation_iterator<Iterator,TransformIterator> PermutationIterator;
// type of the strided_range iterator
typedef PermutationIterator iterator;
// construct strided_range for the range [first,last)
strided_chunk_range(Iterator first, Iterator last, difference_type stride, int chunk)
: first(first), last(last), stride(stride), chunk(chunk) {assert(chunk<=stride);}
iterator begin(void) const
{
return PermutationIterator(first, TransformIterator(CountingIterator(0), stride_functor(stride, chunk)));
}
iterator end(void) const
{
int lmf = last-first;
int nfs = lmf/stride;
int rem = lmf-(nfs*stride);
return begin() + (nfs*chunk) + ((rem<chunk)?rem:chunk);
}
protected:
Iterator first;
Iterator last;
difference_type stride;
int chunk;
};
int main(void)
{
thrust::device_vector<int> data(50);
thrust::sequence(data.begin(), data.end());
typedef thrust::device_vector<int>::iterator Iterator;
// create strided_chunk_range
std::cout << "stride 3, chunk 2, length 7" << std::endl;
strided_chunk_range<Iterator> scr1(data.begin(), data.begin()+7, 3, 2);
thrust::copy(scr1.begin(), scr1.end(), std::ostream_iterator<int>(std::cout, " ")); std::cout << std::endl;
std::cout << "stride 8, chunk 3, length 50" << std::endl;
strided_chunk_range<Iterator> scr(data.begin(), data.end(), 8, 3);
thrust::copy(scr.begin(), scr.end(), std::ostream_iterator<int>(std::cout, " ")); std::cout << std::endl;
return 0;
}
$ nvcc -arch=sm_35 -o t1280 t1280.cu
$ ./t1280
stride 3, chunk 2, length 7
0 1 3 4 6
stride 8, chunk 3, length 50
0 1 2 8 9 10 16 17 18 24 25 26 32 33 34 40 41 42 48 49
$
This is probably not the most optimal implementation, in particular because we are doing division in the permutation functor, but it should get you started.
I assume (and test for) chunk<=stride, because this seemed reasonable to me, and simplified my thought process. I'm sure it could be modified, with an appropriate example of what sequence you would like to see, for the case where chunk>stride.

Why the Man or Boy test seem much faster in Objective-C (blocks) than in C++ (lambda)?

I've got an assignment for "performance analysis" class and decided to do some testing in gcc and clang using the Man or Boy test. I've completed my assignment already, but something odd came up.
The Objective-C code (no ARC!) with blocks is as follow:
#import <stdlib.h>
#import <assert.h>
#import <Foundation/Foundation.h>
typedef int (^F)(void);
int A(int kParam, F x1, F x2, F x3, F x4, F x5) {
__block int k = kParam;
__block F B;
B = ^ {
return A(--k, B, x1, x2, x3, x4);
};
return k <= 0 ? x4() + x5() : B();
};
F K(int n) {
return [[^{
return n;
} copy] autorelease];
};
int main(int argc, const char **argv) {
static int TABLE[] = {1, 0, -2, 0, 1, 0, 1, -1, -10, -30, -67, -138, -291,
-642, -1446, -3250};
NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
if(argc == 2) {
int k;
sscanf(argv[1], "%d", &k);
int result = A(k, K(1), K(-1), K(-1), K(1), K(0));
assert(result == TABLE[k]);
};
[pool drain];
return EXIT_SUCCESS;
};
And the C++ code I used is as follow:
#include <cassert>
#include <iostream>
#include <tr1/functional> // I'm using tr1 because I have an old version of libstdc++
using namespace std;
using namespace std::tr1;
typedef function<int()> F;
int A(int k, const F &x1, const F &x2, const F &x3, const F &x4, const F &x5) {
F B = [=, &k, &B] {
return A(--k, B, x1, x2, x3, x4);
};
return k <= 0 ? x4() + x5() : B();
};
F L(int n) {
return [n] {
return n;
};
};
int main(int argc, char **argv) {
static int TABLE[] = {1, 0, -2, 0, 1, 0, 1, -1, -10, -30, -67, -138, -291,
-642, -1446, -3250};
if(argc == 2) {
int k;
sscanf(argv[1], "%d", &k);
int result = A(k, L(1), L(-1), L(-1), L(1), L(0));
assert(result == TABLE[k]);
};
return EXIT_SUCCESS;
};
The Objective-C version seems to perform quite good (I tested with k ranging from 1 to 15) on both clang 3.5 and llvm-gcc 4.2. The C++ version, on the other hand, took around 9 seconds with k as 15 on both clang 3.5 and gcc 4.9.
Am I missing something? Why is the C++ version so much slower at higher values? (Here's the data table I generated to use in R, in case anyone wants to check.)
Edit:
Looks like the overhead came from std::function<>, as stated in the comments. In case anyone ever needs, using this replacement instead resolved the issue, and it got (a little bit) faster than Objective-C, as one would expect.

Resources