How to set interface alternate setting - linux-kernel

I'm trying to set an alternate setting of an interface using USBFS IOCTL calls.
Following is my code snippet.
int interface = 3;
struct usbdevfs_ioctl command;
struct usbdevfs_getdriver getdrv;
getdrv.interface = interface;
ret = ioctl(fd, USBDEVFS_GETDRIVER, &getdrv);
if (ret < 0)
{
print((" get driver failed %d %d", ret, errno));
}
command.ifno = interface;
command.ioctl_code = USBDEVFS_DISCONNECT;
command.data = NULL;
ret = ioctl(fd, USBDEVFS_IOCTL, &command);
if (ret < 0)
{
print((" detach driver failed %d %d", ret, errno));
}
ret = ioctl(fd, USBDEVFS_CLAIMINTERFACE, &interface);
if (ret < 0)
{
print(("claim interface failed %d %d", ret, errno));
}
si.interface = 3;
si.altsetting = setZerobandwidth;
ret = ioctl(fd, USBDEVFS_SETINTERFACE, &si);
if (ret < 0)
{
print(("set interface ioctl failed %d %d", ret, errno));
}
ret = ioctl(fd, USBDEVFS_RELEASEINTERFACE, &interface);
if (ret < 0)
{
print(("release interface ioctl failed %d %d", ret, errno));
}
command.ifno = interface;
command.ioctl_code = USBDEVFS_CONNECT;
command.data = NULL;
ret = ioctl(fd, USBDEVFS_IOCTL, &command);
if (ret < 0)
{
print(("attach driver ioctl failed %d %d", ret, errno));
}
However ret = ioctl(fd, USBDEVFS_SETINTERFACE, &si) is working fine but once I release the interface ret = ioctl(fd, USBDEVFS_RELEASEINTERFACE, &interface); alternate setting is resetting to first altsetting.
As per libusb API Doc, libusb_release_interface will reset the alternate setting to first alternate setting.
Please help me with what IOCTL calls I need to follow.

You cannot change the alt setting permanently from user space.
This can be changed permanently from kernal space.
I would suggest you close usage of end point, kernel will set it to zero bandwidth interface once you close.

Related

What does `setupkvm` do in xv6 ? What does it really mean to "set up kernel virtual memory"?

In copyuvm function setupkvm is called to set kernel virtual memory. Why do we need to setup kernel virtual memory when we are copying user process ? Why didn't we need that when we were doing allocuvm ?
Code for copyuvm
// Given a parent process's page table, create a copy
// of it for a child.
pde_t*
copyuvm(pde_t *pgdir, uint sz)
{
pde_t *d;
pte_t *pte;
uint pa, i, flags;
char *mem;
if((d = setupkvm()) == 0)
return 0;
for(i = 0; i < sz; i += PGSIZE){
if((pte = walkpgdir(pgdir, (void *) i, 0)) == 0)
panic("copyuvm: pte should exist");
if(!(*pte & PTE_P))
panic("copyuvm: page not present");
pa = PTE_ADDR(*pte);
flags = PTE_FLAGS(*pte);
if((mem = kalloc()) == 0)
goto bad;
memmove(mem, (char*)P2V(pa), PGSIZE);
if(mappages(d, (void*)i, PGSIZE, V2P(mem), flags) < 0) {
kfree(mem);
goto bad;
}
}
return d;
bad:
freevm(d);
return 0;
}
and for allocuvm
int
allocuvm(pde_t *pgdir, uint oldsz, uint newsz)
{
char *mem;
uint a;
if(newsz >= KERNBASE)
return 0;
if(newsz < oldsz)
return oldsz;
a = PGROUNDUP(oldsz);
for(; a < newsz; a += PGSIZE){
mem = kalloc();
if(mem == 0){
cprintf("allocuvm out of memory\n");
deallocuvm(pgdir, newsz, oldsz);
return 0;
}
memset(mem, 0, PGSIZE);
if(mappages(pgdir, (char*)a, PGSIZE, V2P(mem), PTE_W|PTE_U) < 0){
cprintf("allocuvm out of memory (2)\n");
deallocuvm(pgdir, newsz, oldsz);
kfree(mem);
return 0;
}
}
return newsz;
}
What copyuvm does is that copy whole virtual memory (user + kernel) from a page directory. So during copyuvm we need setupkvm for kernel part.
On the other hand, allocuvm just extends existing virtual memory ( specifically heap portion). Since there already exists kernel portion of mappings in allocuvm, we are not bound to call setupkvm.

Linux Kernel: manually modify page table entry flags

I am trying to manually mark a certain memory region of a userspace process as non-cacheable (for educational purposes, not intended to be used in production code) by setting a flag in the respective page table entries.
I have an Ubuntu 14.04 (ASLR disabled) with a 4.4 Linux kernel running on an x86_64 Intel Skylake processor.
In my kernel module I have the following function:
/*
* Set memory region [start,end], excluding 'addr', of process with PID 'pid' as uncacheable.
*/
ssize_t set_uncachable(uint32_t pid, uint64_t start, uint64_t end, uint64_t addr)
{
struct task_struct* ts = NULL;
struct vm_area_struct *curr, *first = NULL;
struct mm_struct* mm;
pgd_t * pgd;
pte_t * pte;
uint64_t numpages, curr_addr;
uint32_t level, j, i = 0;
printk(KERN_INFO "set_unacheable called\n");
ts = pid_task(find_vpid(pid), PIDTYPE_PID); //find task from PID
pgd = ts->mm->pgd; //page table root of the task
first = ts->mm->mmap;
curr = first;
if(first == NULL)
return -1;
do
{
printk(KERN_INFO "Region %3u [0x%016llx - 0x%016llx]", i, curr->vm_start, curr->vm_end);
numpages = (curr->vm_end - curr->vm_start) / PAGE_SIZE; //PAGE_SIZE is 4K for now
if(curr->vm_start > curr->vm_end)
numpages = 0;
for(j = 0; j < numpages; j++)
{
curr_addr = curr->vm_start + (PAGE_SIZE*j);
pte = lookup_address_in_pgd(pgd, curr_addr, &level);
if((pte != NULL) && (level == 1))
{
printk(KERN_INFO "PTE for 0x%016x - 0x%016x (level %u)\n", curr_addr, pte->pte, level);
if(curr_addr >= start && curr_addr < end && curr_addr != addr)
{
//setting page entry to PAT#3
pte->pte |= PWT_BIT | PCD_BIT;
pte->pte &= ~PAT_BIT;
printk(KERN_INFO "PTE for 0x%016x - 0x%016x (level %u) -- UPDATED\n", curr_addr, pte->pte, level);
}
}
}
curr = curr->vm_next;
if(curr == NULL)
return -1;
i++;
} while (curr != first);
return 0;
}
To test the above code I run an application that allocates a certain region in memory:
//#define BUF_ADDR_START 0x0000000008400000LL /* works */
#define BUF_ADDR_START 0x00007ffff0000000LL /* does not work */
[...]
buffer = mmap((void *)BUF_ADDR, BUF_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_POPULATE, 0, 0);
if ( buffer == MAP_FAILED )
{
printf("Failed to map buffer\n");
exit(-1);
}
memset(buffer, 0, BUF_SIZE);
printf("Buffer at %p\n", buffer);
I want to mark the buffer uncacheable using my kernel module. The code in my kernel module works for 0x8400000, but for 0x7ffff0000000 no page table entry is found (i.e. lookup_address_in_pgd returns NULL). The buffer is definitely allocated in the test program, though.
It seems like my kernel module works for low addresses (code, data, and heap sections), but not for memory mapped at higher addresses (stack, shared libraries, etc.).
Does anyone have an idea why it fails for larger addresses? Suggestions on how to implement set_uncachable more elegantly are welcome as well ;-)
Thanks!

OPENCL API's take almost same time irrespective of sample size

I've been trying to profile an OpenCL host code for FIR filtering on MAC, Ubuntu and other platforms. My Host code and kernel are as below.
The issue is that irrespective of the number of samples that I provide for the FIR filter, the clenquendrangelernel ends up taking the same amount of time. Also I've profiled the clEnqueueReadBuffer and clEnqueueWriteBuffer as well and somehow they also end up taking the same amount of time. In mac I'm profiling with mach as well as using OpenCL events, in ubuntu, I'm profiling with PAPI. Im unable to understand why this is happening, ideally with increase in the number of samples, the clEnqueueReadBuffer and clEnqueueWriteBuffer should take more time and so should kernel execution.
Kernel:-
__kernel void fir4(
__global float* input,
__global float* output)
{
int i = get_global_id(0);
int j = 0;
int coeff[4] = {5,7,5,7};
/*for(j=0;j<4;j++)
{
output[i] += coeff[j]*(input[i+4-j-1]);
}*/
//unrolled
output[i] += coeff[0]*(input[i+4-0-1]);
output[i] += coeff[1]*(input[i+4-1-1]);
output[i] += coeff[2]*(input[i+4-2-1]);
output[i] += coeff[3]*(input[i+4-3-1]);
}
__kernel void fir8(
__global float* input,
__global float* output)
{
int i = get_global_id(0);
int j = 0;
int coeff[8] = {5,7,5,7,5,7,5,7};
for(j=0;j<8;j++)
{
output[i] += coeff[j]*(input[i+8-j-1]);
}
}
__kernel void fir12(
__global float* input,
__global float* output)
{
int i = get_global_id(0);
int j = 0;
int coeff[12] = {5,7,5,7,5,7,5,7,5,7,5,7};
for(j=0;j<12;j++)
{
output[i] += coeff[j]*(input[i+12-j-1]);
}
}
Host Code:-
// Use a static data size for simplicity
//
#define DATA_SIZE (48000)
#define NUM_COEFF (4)
int main(int argc, char** argv)
{
uint64_t start;
uint64_t end;
uint64_t elapsed;
double elapsedmilli;
int err; // error code returned from api calls
float data[DATA_SIZE]; // original data set given to device
float coeff[NUM_COEFF];
float results_host[DATA_SIZE] = {};
float results[DATA_SIZE]; // results returned from device
unsigned int correct; // number of correct results returned
size_t global; // global domain size for our calculation
size_t local; // local domain size for our calculation
cl_event event; //Linking event to kernel for profiling
cl_platform_id platform_id = NULL; // compute device platform id
cl_device_id device_id; // compute device id
cl_context context; // compute context
cl_command_queue commands; // compute command queue
cl_program program; // compute program
cl_kernel kernel; // compute kernel
cl_mem input; // device memory used for the input array
cl_mem output; // device memory used for the output array
// Fill our data set with random float values
//
int i,j = 0;
unsigned int count = DATA_SIZE;
unsigned int taps = NUM_COEFF;
for(i = 0; i < count; i++)
data[i] = rand() / (float)RAND_MAX;
for(i=0; i < taps; i++)
{
if(!(i%2))
coeff[i] = 5;
else
coeff[i] = 7;
}
//Connect to a platform on device
err = clGetPlatformIDs(1, &platform_id, NULL);
if (err != CL_SUCCESS)
{
printf("Error: Failed to locate opencl platform!\n");
return EXIT_FAILURE;
}
// Connect to a compute device
//
int gpu = 0;
err = clGetDeviceIDs(platform_id, gpu ? CL_DEVICE_TYPE_GPU : CL_DEVICE_TYPE_CPU, 1, &device_id, NULL);
if (err != CL_SUCCESS)
{
printf("Error: Failed to create a device group!\n");
return EXIT_FAILURE;
}
// Create a compute context
//
context = clCreateContext(0, 1, &device_id, NULL, NULL, &err);
if (!context)
{
printf("Error: Failed to create a compute context!\n");
return EXIT_FAILURE;
}
// Create a command commands
//
commands = clCreateCommandQueue(context, device_id, CL_QUEUE_PROFILING_ENABLE, &err);
if (!commands)
{
printf("Error: Failed to create a command commands!\n");
return EXIT_FAILURE;
}
//Use function and load the kernel source from .cl files in the same folder
//
char *KernelSource = load_program_source("fir.cl");
// Create the compute program from the source buffer
//
program = clCreateProgramWithSource(context, 1, (const char **) & KernelSource, NULL, &err);
if (!program)
{
printf("Error: Failed to create compute program!\n");
return EXIT_FAILURE;
}
// Build the program executable
//
err = clBuildProgram(program, 0, NULL, NULL, NULL, NULL);
if (err != CL_SUCCESS)
{
size_t len;
char buffer[2048];
printf("Error: Failed to build program executable!\n");
clGetProgramBuildInfo(program, device_id, CL_PROGRAM_BUILD_LOG, sizeof(buffer), buffer, &len);
printf("%s\n", buffer);
exit(1);
}
// Create the compute kernel in the program we wish to run
//
switch(taps)
{
case(4):
{
kernel = clCreateKernel(program, "fir4", &err);
break;
}
case(8):
{
kernel = clCreateKernel(program, "fir8", &err);
break;
}
case(12):
{
kernel = clCreateKernel(program, "fir12", &err);
break;
}
default:
{
kernel = clCreateKernel(program, "fir4", &err);
break;
}
}
if (!kernel || err != CL_SUCCESS)
{
printf("Error: Failed to create compute kernel! - %d\n",err);
exit(1);
}
// Create the input and output arrays in device memory for our calculation
//
input = clCreateBuffer(context, CL_MEM_READ_ONLY, sizeof(float) * count, NULL, NULL);
output = clCreateBuffer(context, CL_MEM_WRITE_ONLY, sizeof(float) * count, NULL, NULL);
if (!input || !output)
{
printf("Error: Failed to allocate device memory!\n");
exit(1);
}
// Write our data set into the input array in device memory
//
err = clEnqueueWriteBuffer(commands, input, CL_TRUE, 0, sizeof(float) * count, data, 0, NULL, NULL);
if (err != CL_SUCCESS)
{
printf("Error: Failed to write to source array!\n");
exit(1);
}
// Set the arguments to our compute kernel
//
err = 0;
err = clSetKernelArg(kernel, 0, sizeof(cl_mem), &input);
err |= clSetKernelArg(kernel, 1, sizeof(cl_mem), &output);
if (err != CL_SUCCESS)
{
printf("Error: Failed to set kernel arguments! %d\n", err);
exit(1);
}
// Get the maximum work group size for executing the kernel on the device
//
err = clGetKernelWorkGroupInfo(kernel, device_id, CL_KERNEL_WORK_GROUP_SIZE, sizeof(local), &local, NULL);
if (err != CL_SUCCESS)
{
printf("Error: Failed to retrieve kernel work group info! %d\n", err);
exit(1);
}
// Execute the kernel over the entire range of our 1d input data set
// using the maximum number of work group items for this device
//
global = count;
local = 48;
start = mach_absolute_time();
err = clEnqueueNDRangeKernel(commands, kernel, 1, NULL, &global, &local, 0, NULL, &event);
if (err)
{
printf("Error: Failed to execute kernel!-%d\n",err);
return EXIT_FAILURE;
}
// Wait for the command commands to get serviced before reading back results
//
clWaitForEvents(1, &event);
clFinish(commands);
end = mach_absolute_time();
cl_ulong time_start, time_end;
double total_time;
clGetEventProfilingInfo(event, CL_PROFILING_COMMAND_START, sizeof(time_start), &time_start, NULL);
clGetEventProfilingInfo(event, CL_PROFILING_COMMAND_END, sizeof(time_end), &time_end, NULL);
total_time = time_end - time_start;
printf("cl:main timing:opencl clEnqueueNDRangeKernel %0.3f us\n", total_time / 1000.0);
elapsed = end - start;
struct mach_timebase_info info;
mach_timebase_info(&info);
double t = 1e-9 * (elapsed) * info.numer / info.denom;
elapsedmilli = 1e-6 * (elapsed) * info.numer / info.denom;
printf("cl:main timing:MACH clEnqueueNDRangeKernel %f ms, %d elapsed\n",elapsedmilli,elapsed);
// Read back the results from the device to verify the output
//
err = clEnqueueReadBuffer( commands, output, CL_TRUE, 0, sizeof(float) * count, results, 0, NULL, NULL );
if (err != CL_SUCCESS)
{
printf("Error: Failed to read output array! %d\n", err);
exit(1);
}
// Validate our results
//
correct = 0;
for(i=0; i<DATA_SIZE; i++)
{
for(j=0;j<NUM_COEFF;j++)
{
results_host[i]+=coeff[j]*(data[i+NUM_COEFF-j-1]);
}
//printf("Host Output[%d]-%f\n",i,results_host[i]);
}
for(i = 0; i < count; i++)
{
if(results[i] == results_host[i])
correct++;
//printf("CL Output[%d]-%f\n",i,results[i]);
}
// Print a brief summary detailing the results
//
printf("Computed '%d/%d' correct values! Samples-%d,Taps-%d\n", correct, count, DATA_SIZE, NUM_COEFF);
// Shutdown and cleanup
//
clReleaseMemObject(input);
clReleaseMemObject(output);
clReleaseProgram(program);
clReleaseKernel(kernel);
clReleaseCommandQueue(commands);
clReleaseContext(context);
return 0;
}
Adding just 10-20 multiplications and additions per item is not comparable to kernel overhead time. Try with 100 or 1000-wide coefficients array.
Using more input elements per item with that way, just increases cache hit numbers(also ratio) because more threads read from same locations.
If DATA_SIZE is several millions, then all data could not fit in cache and become slower linearly with its length. 48000 means less than 200kB. A HD5850 has 512 k L2 cache(3x bandwidth of memory) and 8kB L1 per compute unit(too fast) for example.

clBuildProgram failed with error: Failed to build program executable

I'm a beginner at OpenCL. I was trying to build a simple app which just add 2 vectors to get results. This is my following host code
#define USE_PLATFORM 0
#define USE_DEVICE 2
#define DATA_SIZE 1024
#define USE_KERNEL_PATH "/Users/huangxin/Documents/August13Programming/FirstEGOpenCL/FirstEGOpenCL/kernel.cl"
using namespace std;
int main(int argc, const char * argv[]) {
int err;
cl_uint numPlatforms;
cl_uint numDevices;
cl_command_queue command;
size_t global;
//Query the number of platforms supported.
err = clGetPlatformIDs(0, NULL, &numPlatforms);
if (err != CL_SUCCESS || USE_PLATFORM >= numPlatforms)
{
printf("Error at: clGetPlatformIDs(querying platforms count failed):\n");
exit(-1);
}
//Get all platforms.
vector<cl_platform_id> platforms(numPlatforms);
err = clGetPlatformIDs(numPlatforms, &platforms[0], &numPlatforms);
if (err != CL_SUCCESS)
{
printf("Error at: clGetPlatformIDs(getting all platforms failed):\n");
exit(-1);
}
//Query the number of devices supported by the platform spicified.
err = clGetDeviceIDs(platforms[USE_PLATFORM], CL_DEVICE_TYPE_ALL, 0, NULL, &numDevices);
if (err != CL_SUCCESS || USE_PLATFORM >= numDevices)
{
printf("Error at: clGetDeviceIDs(querying devices count failed):\n");
exit(-1);
}
//Get all devices.
vector<cl_device_id> devices(numDevices);
err=clGetDeviceIDs(platforms[USE_PLATFORM], CL_DEVICE_TYPE_ALL, numDevices, &devices[0], &numDevices);
if (err != CL_SUCCESS)
{
printf("Error at: clGetDeviceIDs(getting all devices failed):\n");
exit(-1);
}
//Get device infomation.
char deviceInfo[1024];
//get device max work item dimensions.
size_t maxItemSize[3];
clGetDeviceInfo(devices[USE_DEVICE], CL_DEVICE_NAME, sizeof(deviceInfo)*1024, deviceInfo, NULL);
clGetDeviceInfo(devices[USE_DEVICE], CL_DEVICE_MAX_WORK_ITEM_SIZES, sizeof(size_t)*3, maxItemSize, NULL);
cout << "Device selected: " << deviceInfo << endl;
cout << "Max item size: " << maxItemSize[0] << "," << maxItemSize[1] << ","<< maxItemSize[2] << endl;
//Set property with certain platform
cl_context_properties prop[] = {CL_CONTEXT_PLATFORM, reinterpret_cast<cl_context_properties>(platforms[USE_PLATFORM]), 0};
//create context with certain property.
cl_context context = clCreateContextFromType(prop, CL_DEVICE_TYPE_ALL, NULL, NULL, &err);
if (err != CL_SUCCESS)
{
printf("Error at: clCreateContextFromType(get context failed):\n");
exit(-1);
}
//create command queue using selected device and context.
command = clCreateCommandQueue(context, devices[USE_DEVICE], 0, NULL);
//create program with specified kernel source.
const char *kernelSource = getKernelSource(USE_KERNEL_PATH);
cl_program program = clCreateProgramWithSource(context, 1, &kernelSource, 0, &err);
if (err != CL_SUCCESS)
{
printf("Error at: clCreateProgramWithSource(get program failed):\n");
exit(-1);
}
//since OpenCL is a dynamic-compile architechture, we need to build the program.
err = clBuildProgram(program, 0, 0, 0, 0, 0);
if (err != CL_SUCCESS)
{
cout << err << endl;
size_t len;
char buffer[2048];
printf("Error: Failed to build program executable!\n");
clGetProgramBuildInfo(program, devices[USE_DEVICE], CL_PROGRAM_BUILD_LOG, sizeof(buffer), buffer, &len);
printf("%s\n", buffer);
exit(1);
}
//kernel是OpenCL中对执行在一个最小粒度的compute item上的代码及参数的抽象
//create the kernel function using the built program.
cl_kernel adder = clCreateKernel(program, "adder", &err);
if (err != CL_SUCCESS)
{
printf("Error at: clCreateKernel(get kernel function failed):\n");
exit(-1);
}
//create the vector of input random data.
vector<float> inA(DATA_SIZE), inB(DATA_SIZE);
for(int i = 0; i < DATA_SIZE; i++) {
inA[i] = (float)(random() % DATA_SIZE) / 1000;
inB[i] = (float)(random() % DATA_SIZE) / 1000;
}
//create the read-only device mem using specified context, that is to copy the host mem to the device mem.
cl_mem cl_a = clCreateBuffer(context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(cl_float) * DATA_SIZE, &inA[0], NULL);
cl_mem cl_b = clCreateBuffer(context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(cl_float) * DATA_SIZE, &inB[0], NULL);
//create the result mem.
cl_mem cl_res = clCreateBuffer(context, CL_MEM_WRITE_ONLY, sizeof(cl_float) * DATA_SIZE, NULL, NULL);
//setting up the arguement of kernel memory
clSetKernelArg(adder, 0, sizeof(cl_mem), &cl_a);
clSetKernelArg(adder, 1, sizeof(cl_mem), &cl_b);
clSetKernelArg(adder, 2, sizeof(cl_mem), &cl_res);
START_CHECK_RUNNING_TIME
//enqueue the kernel into the specified command(#TODO:come back later to check the remaining arguement.
global = DATA_SIZE;
err = clEnqueueNDRangeKernel(command, adder, 1, 0, &global, 0, 0, 0, 0);
if (err != CL_SUCCESS)
{
printf("Error at: clEnqueueNDRangeKernel(enqueue kernel failed):\n");
exit(-1);
}
printf("*****************FLAG***************");
//copy the results from the kernel into the host(CPU).
vector<float> res(DATA_SIZE);
err = clEnqueueReadBuffer(command, cl_res, CL_TRUE, 0, sizeof(float) * DATA_SIZE, &res[0], 0, 0, 0);
END_CHECK_RUNNING_TIME
//check the number of right compute.
int cnt = 0;
for (int i = 0; i < res.size(); i++) {
cnt += (res[i] == inA[i] + inB[i] ? 1 : 0);
}
cout << "Computed " << res.size() << " values\n";
cout << "Correct values:(" << cnt << "/" << res.size() << "),correct rate:" << (float)cnt / res.size() * 100 << "%" << endl;
gettimeofday(&sTime, NULL);
for (int i = 0; i < res.size(); i++) {
for (int j = 0; j < 10000; j++)
res[i] = inA[i] + inB[i];
}
gettimeofday(&eTime, NULL);timeuse = 1000000 * ( eTime.tv_sec - sTime.tv_sec ) + eTime.tv_usec -sTime.tv_usec; printf("Running time: %fs\n", (double)timeuse/(1000000));
//cleaning up the variables.
clReleaseKernel(adder);
clReleaseProgram(program);
clReleaseMemObject(cl_a);
clReleaseMemObject(cl_b);
clReleaseMemObject(cl_res);
clReleaseCommandQueue(command);
clReleaseContext(context);
return 0;
}
It's a bit long code, but it's really doing simple stuff. this is my kernel code
kernel void adder(global const float* a, global const float* b, global float* result)
{
size_t idx = get_global_id(0);
for (int i = 0; i < 10000; i++)
result[idx] = a[idx] +b[idx];
}
And I got the following result:
Device selected: GeForce GT 650M
-11
Error: Failed to build program executable!
No kernels or only kernel prototypes found.
I don't quite understand what "No kernels or only kernel prototypes found." mean and it's really strange that if I use the first device(CPU) or my second device(HD Graphics 4000), the same code runs perfectly.
I want to know what is wrong and why it happens.
I was running these code in the Xcode with Mac OS X 10.10.
As the comments say, is a good practice to use:
__kernel void adder(__global const float* a, __global const float* b, __global float* result)
Because that way you clearly define those are special CL flags. Tpically all the CL kernels follow that rule, even if the spec allows both.
But your problem is probably due to running the clBuildProgram() without any device in the devices list. Therefore, not compiling anything at all!
In CL every device has an specific compiler (the CPUs don't have the same compiler as GPU, sometimes not even the same instruction sets). So you should give the API the list of devices for which the kernels have to be compiled.
The proper way would be this:
err = clBuildProgram(program, 1, &devices[USE_DEVICE], "", 0, 0);
Note: I added "", because probably in the future you will want to add some build parameters, better to have it ready :)

MapViewOfFile chunked loading of file

I want to map file into memory with chunk size equal system granularity. First chunk read without error and all others fails with error 5 (ERROR_ACCESS_DENIED). I tried run program with administrator privileges.
My code:
#include <windows.h>
#include <stdio.h>
int main() {
HANDLE hFile = CreateFile( TEXT("db.txt"),
GENERIC_READ,
FILE_SHARE_READ,
NULL,
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL,
NULL);
if (hFile == INVALID_HANDLE_VALUE) {
printf("[ERROR] File opening error %d\n", GetLastError());
return 1;
}
printf("[DONE] File opened successfully.\n");
HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_READONLY, 0, 0, NULL);
if (hMap == NULL) {
printf("[ERROR] Create mapping error %d\n", GetLastError());
return 2;
}
printf("[DONE] Create mapping successfully.\n");
LARGE_INTEGER file_size = { };
if (!GetFileSizeEx(hFile, &file_size)) {
printf("[ERROR] Getiing filesize error %d\n", GetLastError());
return 3;
}
printf("[DONE] Getting file size.\n");
SYSTEM_INFO info = { };
GetSystemInfo(&info);
printf("[DONE] Getting system memory granularity %d.\n", info.dwAllocationGranularity);
DWORD offset = 0;
int size = 0;
do {
char* ENTRY = (char*)MapViewOfFile(hMap, FILE_MAP_READ, HIWORD(offset), LOWORD(offset), info.dwAllocationGranularity);
if (ENTRY == NULL) {
printf("[ERROR] Map entry error %d\n", GetLastError());
} else {
printf("[DONE] MAPPING PART WITH OFFSET %d\n", offset);
//printf("%s\n", ENTRY);
}
if (offset + info.dwAllocationGranularity < file_size.QuadPart) {
offset += info.dwAllocationGranularity;
} else {
offset = file_size.QuadPart;
}
//offset += size;
UnmapViewOfFile(ENTRY);
} while (offset < file_size.QuadPart);
CloseHandle(hMap);
CloseHandle(hFile);
system("pause");
return 0;
}
How I fix it?
You're using HIWORD and LOWORD for the offset in the call to MapViewOfFile, but these only take a 32-bit value and split it into two 16-bit halves - what you want is a 64-bit value split into two 32-bit halves.
Instead you need HIDWORD and LODWORD, which are defined in <intsafe.h>:
#define LODWORD(_qw) ((DWORD)(_qw))
#define HIDWORD(_qw) ((DWORD)(((_qw) >> 32) & 0xffffffff))
Like so:
char* ENTRY = (char*)MapViewOfFile(hMap, FILE_MAP_READ, HIDWORD(offset), LODWORD(offset), info.dwAllocationGranularity);
You need this even though your offset variable is 32 bit (in which case, HIDWORD will just return 0 and the full value of offset is passed as the low-order DWORD).

Resources