Byte order functions on stm32 microcontroller - endianness

Are there any written functions for htonll,ntohll,ntohl,htonl like this one i've found here on forum:
uint32_t ntohl(uint32_t const net) {
uint8_t data[4] = {};
memcpy(&data, &net, sizeof(data));
return ((uint32_t) data[3] << 0)
| ((uint32_t) data[2] << 8)
| ((uint32_t) data[1] << 16)
| ((uint32_t) data[0] << 24);
}
or could you just tell me data shifting scheme for each one and would write it just like this one above?
EDIT:
Are those correct:
uint32_t htonl(uint32_t const net) {
uint8_t data[4] = {};
memcpy(&data, &net, sizeof(data));
return ((uint32_t) data[0] << 0)
| ((uint32_t) data[1] << 8)
| ((uint32_t) data[2] << 16)
| ((uint32_t) data[3] << 24);
}
uint64_t htonll(uint64_t const net) {
uint8_t data[4] = {};
memcpy(&data, &net, sizeof(data));
return ((uint64_t) data[0] << 0)
| ((uint64_t) data[1] << 8)
| ((uint64_t) data[2] << 16)
| ((uint64_t) data[3] << 24);
}
uint64_t ntohll(uint64_t const net) {
uint8_t data[4] = {};
memcpy(&data, &net, sizeof(data));
return ((uint64_t) data[3] << 0)
| ((uint64_t) data[2] << 8)
| ((uint64_t) data[1] << 16)
| ((uint64_t) data[0] << 24);
}

This isn't STM32 specific but can be answered in general for all ARM MCU. Assuming you are using gcc, there are built-in functions like __builtin_bswap32.
So you can implement it as:
uint32_t htonl(uint32_t net)
{
return __builtin_bswap32(net);
}
uint16_t htons(uint16_t net)
{
return __builtin_bswap16(net);
}
uint64_t htonll(uint64_t net)
{
return __builtin_bswap64(net);
}
The resulting assembler code is very efficient:
htonl:
rev r0, r0
bx lr
htons:
rev16 r0, r0
uxth r0, r0
bx lr
htonll:
rev r3, r0
rev r0, r1
mov r1, r3
bx lr
If you declare the functions as inline, you can even save the function call.
BTW: In your code, the 64 bit versions are likely incorrect as they only return a 32 bit quantity.

Related

Cannot read/write I2C register in Linux device driver (Orange Pi)

I'm using I2C register to config for device driver but:
I cannot read/write I2C register after request_mem_region() -> ioremap(). I see that just happens with I2C, UART, SPI... but I still can use with GPIO and TIMER.
I tried to change config in menuconfig before building distro but it's efficient (enable Embedded system, enable map Ram...). Of course, I too tried readl(), writel(), iowrite32(), ioread32()...
But code works fine when I edit I2C register that is available when configuring the distro and can see it on /dev/iomem.
I think there is a problem with ram access. But how to fix it? Thanks!
After code for test:
u8 twi_mem_flag = 0;
int PIOA_Init(void)
{
if (!request_mem_region(TWI_BASE, 0x400, "TWI_MEM")) {
printk(KERN_INFO "TWI_MEM is existed\n");
twi_mem_flag = 1;
}
PA_CFG0_REG = (uint32_t *) ioremap(PA_CFG0, 4);
PA_PUL0_REG = (uint32_t *) ioremap(PA_PUL0, 4);
PA_DATA_REG = (uint32_t *) ioremap(PA_DAT, 4);
PA_DRV0_REG = (uint32_t *) ioremap(PA_DRV0, 4);
PA_EINT_CFG0_REG = (uint32_t *) ioremap(PA_EINT_CFG0, 4);
PA_EINT_CTL_REG = (uint32_t *) ioremap(PA_EINT_CTL, 4);
PA_EINT_STATUS_REG = (uint32_t *) ioremap(PA_EINT_STATUS, 4);
PA_EINT_DEB_REG = (uint32_t *) ioremap(PA_EINT_DEB, 4);
TWI_ADDR_REG = (uint32_t *) ioremap(TWI_ADDR, 4);
TWI_XADDR_REG = (uint32_t *) ioremap(TWI_XADDR, 4);
TWI_DATA_REG = (uint32_t *) ioremap(TWI_DATA, 4);
TWI_CNTR_REG = (uint32_t *) ioremap(TWI_CNTR, 4);
*PA_CFG0_REG &= ~(0X77 << 24);
*PA_CFG0_REG |= (0X01 << 24);
*PA_PUL0_REG &= ~(0X0f << 12);
*PA_PUL0_REG |= (0X05 << 12);
*PA_EINT_CFG0_REG = (0x03 << 28);
*PA_EINT_CTL_REG = (0x01 << 7);
*PA_EINT_STATUS_REG = (0x01 << 7);
*PA_EINT_DEB_REG = (0x11);
*TWI_ADDR_REG |= 0x07;
*TWI_DATA_REG |= 0x77;
*TWI_CNTR_REG |= 0x05;
printk( KERN_INFO "GPIO: %x - %x - %x - %x\n", *PA_CFG0_REG, *PA_PUL0_REG, *PA_DATA_REG, *PA_DRV0_REG);
printk( KERN_INFO "TWI: %x - %x - %x - %x\n", *TWI_ADDR_REG, *TWI_XADDR_REG, *TWI_DATA_REG, *TWI_CNTR_REG);
return 0;
}
void PIOA_destroy(void)
{
iounmap(PA_DRV0_REG);
iounmap(PA_DATA_REG);
iounmap(PA_CFG0_REG);
iounmap(PA_PUL0_REG);
iounmap(PA_EINT_CFG0_REG);
iounmap(PA_EINT_CTL_REG);
iounmap(PA_EINT_STATUS_REG);
iounmap(PA_EINT_DEB_REG);
iounmap(TWI_ADDR_REG);
iounmap(TWI_XADDR_REG);
iounmap(TWI_DATA_REG);
iounmap(TWI_CNTR_REG);
if (!twi_mem_flag)
release_mem_region(TWI_BASE, 0x400);
}
[ 228.246399] GPIO: 1227777 - 5400 - 0 - 55555555
[ 228.246420] TWI: 0 - 0 - 0 - 0
[ 228.246428] The blink led device is installed !!

Efficient sse shuffle mask generation for left-packing byte elements

What would be an efficient way to optimize the following code with sse ?
uint16_t change1= ... ;
uint8_t* pSrc = ... ;
uint8_t* pDest = ... ;
if(change1 & 0x0001) *pDest++ = pSrc[0];
if(change1 & 0x0002) *pDest++ = pSrc[1];
if(change1 & 0x0004) *pDest++ = pSrc[2];
if(change1 & 0x0008) *pDest++ = pSrc[3];
if(change1 & 0x0010) *pDest++ = pSrc[4];
if(change1 & 0x0020) *pDest++ = pSrc[5];
if(change1 & 0x0040) *pDest++ = pSrc[6];
if(change1 & 0x0080) *pDest++ = pSrc[7];
if(change1 & 0x0100) *pDest++ = pSrc[8];
if(change1 & 0x0200) *pDest++ = pSrc[9];
if(change1 & 0x0400) *pDest++ = pSrc[10];
if(change1 & 0x0800) *pDest++ = pSrc[11];
if(change1 & 0x1000) *pDest++ = pSrc[12];
if(change1 & 0x2000) *pDest++ = pSrc[13];
if(change1 & 0x4000) *pDest++ = pSrc[14];
if(change1 & 0x8000) *pDest++ = pSrc[15];
So far I am using a quite big lookup table for it, but I really want to get rid of it:
SSE3Shuffle::Entry& e0 = SSE3Shuffle::g_Shuffle.m_Entries[change1];
_mm_storeu_si128((__m128i*)pDest, _mm_shuffle_epi8(*(__m128i*)pSrc, e0.mask));
pDest += e0.offset;
Assuming:
change1 = _mm_movemask_epi8(bytemask);
offset = popcnt(change1);
On large buffers, using two shuffles and a 1 KiB table is only ~10% slower than using 1 shuffle and a 1MiB table. My attempts at generating the shuffle mask via prefix sums and bit twiddling are about about half the speed of the table based methods
(solutions using pext/pdep were not explored).
Reducing table size: Use two lookups into a 2 KiB table instead of 1 lookup into a 1 MiB table. Always keep the top-most byte - if that byte is to be discarded then it doesn't matter what byte is at that position (down to 7-bit indices, or 1 KiB table). Further reduce possible combinations by manually packing the two bytes in each 16-bit lane (down to a 216 byte table).
The following example strips whitespace from text using SSE4.1. If only SSSE3 is available then blendv can be emulated. The 64-bit halves are re-combined by overlapping writes to memory, but they could be re-combined in the xmm register (as seen in the AVX2 example).
#include <stdint.h>
#include <smmintrin.h> // SSE4.1
size_t despacer (void* dst_void, void* src_void, size_t length)
{
uint8_t* src = (uint8_t*)src_void;
uint8_t* dst = (uint8_t*)dst_void;
if (length >= 16) {
// table of control characters (space, tab, newline, carriage return)
const __m128i lut_cntrl = _mm_setr_epi8(' ', 0, 0, 0, 0, 0, 0, 0, 0, '\t', '\n', 0, 0, '\r', 0, 0);
// bits[4:0] = index -> ((trit_d * 0) + (trit_c * 9) + (trit_b * 3) + (trit_a * 1))
// bits[15:7] = popcnt
const __m128i sadmask = _mm_set1_epi64x(0x8080898983838181);
// adding 8 to each shuffle index is cheaper than extracting the high qword
const __m128i offset = _mm_cvtsi64_si128(0x0808080808080808);
// shuffle control indices
static const uint64_t table[27] = {
0x0000000000000706, 0x0000000000070600, 0x0000000007060100, 0x0000000000070602,
0x0000000007060200, 0x0000000706020100, 0x0000000007060302, 0x0000000706030200,
0x0000070603020100, 0x0000000000070604, 0x0000000007060400, 0x0000000706040100,
0x0000000007060402, 0x0000000706040200, 0x0000070604020100, 0x0000000706040302,
0x0000070604030200, 0x0007060403020100, 0x0000000007060504, 0x0000000706050400,
0x0000070605040100, 0x0000000706050402, 0x0000070605040200, 0x0007060504020100,
0x0000070605040302, 0x0007060504030200, 0x0706050403020100
};
const uint8_t* end = &src[length & ~15];
do {
__m128i v = _mm_loadu_si128((__m128i*)src);
src += 16;
// detect spaces
__m128i mask = _mm_cmpeq_epi8(_mm_shuffle_epi8(lut_cntrl, v), v);
// shift w/blend: each word now only has 3 states instead of 4
// which reduces the possiblities per qword from 128 to 27
v = _mm_blendv_epi8(v, _mm_srli_epi16(v, 8), mask);
// extract bitfields describing each qword: index, popcnt
__m128i desc = _mm_sad_epu8(_mm_and_si128(mask, sadmask), sadmask);
size_t lo_desc = (size_t)_mm_cvtsi128_si32(desc);
size_t hi_desc = (size_t)_mm_extract_epi16(desc, 4);
// load shuffle control indices from pre-computed table
__m128i lo_shuf = _mm_loadl_epi64((__m128i*)&table[lo_desc & 0x1F]);
__m128i hi_shuf = _mm_or_si128(_mm_loadl_epi64((__m128i*)&table[hi_desc & 0x1F]), offset);
// store an entire qword then advance the pointer by how ever
// many of those bytes are actually wanted. Any trailing
// garbage will be overwritten by the next store.
// note: little endian byte memory order
_mm_storel_epi64((__m128i*)dst, _mm_shuffle_epi8(v, lo_shuf));
dst += (lo_desc >> 7);
_mm_storel_epi64((__m128i*)dst, _mm_shuffle_epi8(v, hi_shuf));
dst += (hi_desc >> 7);
} while (src != end);
}
// tail loop
length &= 15;
if (length != 0) {
const uint64_t bitmap = 0xFFFFFFFEFFFFC1FF;
do {
uint64_t c = *src++;
*dst = (uint8_t)c;
dst += ((bitmap >> c) & 1) | ((c + 0xC0) >> 8);
} while (--length);
}
// return pointer to the location after the last element in dst
return (size_t)(dst - ((uint8_t*)dst_void));
}
Whether the tail loop should be vectorized or use cmov is left as an exercise for the reader. Writing each byte unconditionally/branchlessly is fast when the input is unpredictable.
Using AVX2 to generate the shuffle control mask using an in-register table is only slightly slower than using large precomputed tables.
#include <stdint.h>
#include <immintrin.h>
// probably needs improvment...
size_t despace_avx2_vpermd(const char* src_void, char* dst_void, size_t length)
{
uint8_t* src = (uint8_t*)src_void;
uint8_t* dst = (uint8_t*)dst_void;
const __m256i lut_cntrl2 = _mm256_broadcastsi128_si256(_mm_setr_epi8(' ', 0, 0, 0, 0, 0, 0, 0, 0, '\t', '\n', 0, 0, '\r', 0, 0));
const __m256i permutation_mask = _mm256_set1_epi64x( 0x0020100884828180 );
const __m256i invert_mask = _mm256_set1_epi64x( 0x0020100880808080 );
const __m256i zero = _mm256_setzero_si256();
const __m256i fixup = _mm256_set_epi32(
0x08080808, 0x0F0F0F0F, 0x00000000, 0x07070707,
0x08080808, 0x0F0F0F0F, 0x00000000, 0x07070707
);
const __m256i lut = _mm256_set_epi32(
0x04050607, // 0x03020100', 0x000000'07
0x04050704, // 0x030200'00, 0x0000'0704
0x04060705, // 0x030100'00, 0x0000'0705
0x04070504, // 0x0300'0000, 0x00'070504
0x05060706, // 0x020100'00, 0x0000'0706
0x05070604, // 0x0200'0000, 0x00'070604
0x06070605, // 0x0100'0000, 0x00'070605
0x07060504 // 0x00'000000, 0x'07060504
);
// hi bits are ignored by pshufb, used to reject movement of low qword bytes
const __m256i shuffle_a = _mm256_set_epi8(
0x7F, 0x7E, 0x7D, 0x7C, 0x7B, 0x7A, 0x79, 0x78, 0x07, 0x16, 0x25, 0x34, 0x43, 0x52, 0x61, 0x70,
0x7F, 0x7E, 0x7D, 0x7C, 0x7B, 0x7A, 0x79, 0x78, 0x07, 0x16, 0x25, 0x34, 0x43, 0x52, 0x61, 0x70
);
// broadcast 0x08 then blendd...
const __m256i shuffle_b = _mm256_set_epi32(
0x08080808, 0x08080808, 0x00000000, 0x00000000,
0x08080808, 0x08080808, 0x00000000, 0x00000000
);
for( uint8_t* end = &src[(length & ~31)]; src != end; src += 32){
__m256i r0,r1,r2,r3,r4;
unsigned int s0,s1;
r0 = _mm256_loadu_si256((__m256i *)src); // asrc
// detect spaces
r1 = _mm256_cmpeq_epi8(_mm256_shuffle_epi8(lut_cntrl2, r0), r0);
r2 = _mm256_sad_epu8(zero, r1);
s0 = (unsigned)_mm256_movemask_epi8(r1);
r1 = _mm256_andnot_si256(r1, permutation_mask);
r1 = _mm256_sad_epu8(r1, invert_mask); // index_bitmap[0:5], low32_spaces_count[7:15]
r2 = _mm256_shuffle_epi8(r2, zero);
r2 = _mm256_sub_epi8(shuffle_a, r2); // add space cnt of low qword
s0 = ~s0;
r3 = _mm256_slli_epi64(r1, 29); // move top part of index_bitmap to high dword
r4 = _mm256_srli_epi64(r1, 7); // number of spaces in low dword
r4 = _mm256_shuffle_epi8(r4, shuffle_b);
r1 = _mm256_or_si256(r1, r3);
r1 = _mm256_permutevar8x32_epi32(lut, r1);
s1 = _mm_popcnt_u32(s0);
r4 = _mm256_add_epi8(r4, shuffle_a);
s0 = s0 & 0xFFFF; // isolate low oword
r2 = _mm256_shuffle_epi8(r4, r2);
s0 = _mm_popcnt_u32(s0);
r2 = _mm256_max_epu8(r2, r4); // pin low qword bytes
r1 = _mm256_xor_si256(r1, fixup);
r1 = _mm256_shuffle_epi8(r1, r2); // complete shuffle mask
r0 = _mm256_shuffle_epi8(r0, r1); // despace!
_mm_storeu_si128((__m128i*)dst, _mm256_castsi256_si128(r0));
_mm_storeu_si128((__m128i*)&dst[s0], _mm256_extracti128_si256(r0,1));
dst += s1;
}
// tail loop
length &= 31;
if (length != 0) {
const uint64_t bitmap = 0xFFFFFFFEFFFFC1FF;
do {
uint64_t c = *src++;
*dst = (uint8_t)c;
dst += ((bitmap >> c) & 1) | ((c + 0xC0) >> 8);
} while (--length);
}
return (size_t)(dst - ((uint8_t*)dst_void));
}
For posterity, the 1 KiB version (generating the table is left as an exercise for the reader).
static const uint64_t table[128] __attribute__((aligned(64))) = {
0x0706050403020100, 0x0007060504030201, ..., 0x0605040302010700, 0x0605040302010007
};
const __m128i mask_01 = _mm_set1_epi8( 0x01 );
__m128i vector0 = _mm_loadu_si128((__m128i*)src);
__m128i vector1 = _mm_shuffle_epi32( vector0, 0x0E );
__m128i bytemask0 = _mm_cmpeq_epi8( ???, vector0); // detect bytes to omit
uint32_t bitmask0 = _mm_movemask_epi8(bytemask0) & 0x7F7F;
__m128i hsum = _mm_sad_epu8(_mm_add_epi8(bytemask0, mask_01), _mm_setzero_si128());
vector0 = _mm_shuffle_epi8(vector0, _mm_loadl_epi64((__m128i*) &table[(uint8_t)bitmask0]));
_mm_storel_epi64((__m128i*)dst, vector0);
dst += (uint32_t)_mm_cvtsi128_si32(hsum);
vector1 = _mm_shuffle_epi8(vector1, _mm_loadl_epi64((__m128i*) &table[bitmask0 >> 8]));
_mm_storel_epi64((__m128i*)dst, vector1);
dst += (uint32_t)_mm_cvtsi128_si32(_mm_unpackhi_epi64(hsum, hsum));
https://github.com/InstLatx64/AVX512_VPCOMPRESSB_Emu has some benchmarks.
If one is willing to use BMI2 available on haswell and later, one can use pdep to first compress unwanted nibbles out from uint64_t, and then use pext to scatter the result to shuffle mask.
// Step 1 -- replicate mask to nibbles
uint64_t change4 = pdep(change1, 0x1111111111111111ULL) * 0x0F;
// Step 2 -- extract index from array of nibbles
uint64_t indices = pext(0xfedcba09876543210, change4);
// Step 3 -- interleave nibbles to octects
uint64_t high = pdep(indices >> 32ULL,0x0F0F0F0F0F0F0F0F);
uint64_t low = pdep(indices, 0x0F0F0F0F0F0F0F0FULL);
// Step 4 -- use these two masks to compress pSrc
__m128i compressed = _mm_shuffle_epi8(pSrc, _mm_set_epi64(high, low));
// Step 5 -- store 16 bytes unaligned
_mm_storeu_si128(pDst, compressed);
// Step 6 -- increment target pointer
pDst += __mm_popcnt(change1);
Also other variants (based on cumulative sum or sorting the 'X's (or zero bits) out from XX23456789abXXef will first require some technique to spread the bits from uint16_t evenly to __m128i (i.e. reverse of movemask_epi8).
The 64k entry LUT can however be split to top and bottom parts:
int c = change1 & 0xff;
int p = __popcount(c);
uint64_t a = LUT256[c]; // low part of index
uint64_t b = LUT256[change1 >> 8]; // top part of index
b += addlut9[p]; // 0x0101010101010101 * p
// Then must concatenate b|a at pth position of 'a'
if (p < 8)
{
a |= b << (8*(8-p));
b >>= 8*p;
}
__m128i d = _mm_shuffle_epi8(_mm_loadu_si128(pSrc),_mm_set1_epi64(b,a));
// and continue with steps 5 and 6 as before

uint8_t need uint16_t to store values from stringstream

reading an IP address from argv forces me to convert it to uint32_t
this function will do so:
uint32_t convert_string_ip_to_uint32_t (const string & string_ip)
{
stringstream s(string_ip) ;
uint16_t a,b,c,d; //to store the 4 ints
char ch; //to temporarily store the '.'
s >> a >> ch >> b >> ch >> c >> ch >> d ;
std::cout << a << " " << b << " " << c << " "<< d << std::flush;;
return ((uint32_t) a << 24 | b << 16 | c << 8 | d);
}
But when I change the data type of a,b,c and d to uint8_t, the result will be rubish. Why?
e.g. string_ip = '192.168.1.10'

cublas cublasDgetrfBatched() batched LU factorization doesn't work with matrices bigger than 32x32

I wrote a cuda function for Matlab to perform a LU factorization of a batch of matrices using cublasDgetrfBatched(). The toolkit documentation of this function is here.
It works fine for matrices up to size 32x32. But it fails with status code CUBLAS_STATUS_INVALID_VALUE for bigger matrices. Below is my source code (gpuBatchedLU.cu):
#include "mex.h"
#include "gpu/mxGPUArray.h"
/* Includes, cuda */
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <string>
#include <sstream>
static std::string cublasGetErrorString(cublasStatus_t error) {
switch (error) {
case CUBLAS_STATUS_SUCCESS:
return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED:
return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED:
return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE:
return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH:
return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR:
return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED:
return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "CUBLAS_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
inline bool cublasAssert(cublasStatus_t code, const char* file, int line) {
if (code != CUBLAS_STATUS_SUCCESS) {
std::stringstream ss;
ss << "cublasAssert: " << cublasGetErrorString(code) << " in "
<< std::string(file) << ", line " << line << ".";
mexErrMsgTxt(ss.str().c_str());
}
return code == CUBLAS_STATUS_SUCCESS;
}
inline bool cudaAssert(cudaError_t code, const char* file, int line) {
if (code != cudaSuccess) {
std::stringstream ss;
ss << "cudaAssert: " << cudaGetErrorString(code) << " in "
<< std::string(file) << ", line " << line << ".";
mexErrMsgTxt(ss.str().c_str());
}
return code == cudaSuccess;
}
inline bool mexGPUAssert(int code, const char* file, int line) {
if (code != MX_GPU_SUCCESS) {
std::stringstream ss;
ss << "mexGPUAssert: could not initialize the Mathworks GPU API in "
<< std::string(file) << ", line " << line << ".";
mexErrMsgTxt(ss.str().c_str());
}
return code == MX_GPU_SUCCESS;
}
#define cublasErrchk(ans) { cublasAssert((ans), __FILE__, __LINE__); }
#define cudaErrchk(ans) { cudaAssert((ans), __FILE__, __LINE__); }
#define mxGPUErrchk(ans) { mexGPUAssert((ans), __FILE__, __LINE__); }
void mexFunction(int nlhs, mxArray *plhs[], /* Output variables */int nrhs,
const mxArray *prhs[]) /* Input variables */{
if (nrhs != 1) { /* end if not one function arguments */
mexErrMsgTxt("This function requires one input argument.");
return;
}
if (nlhs > 3) { /* take three outputs */
mexErrMsgTxt("This function takes a maximum of three output variables.");
return;
}
mxGPUErrchk(mxInitGPU());
const mxGPUArray* in1_gpu = mxGPUCreateFromMxArray(prhs[0]);
size_t ndims = mxGPUGetNumberOfDimensions(in1_gpu);
const size_t* dim = (const size_t*) mxGPUGetDimensions(in1_gpu);
if (ndims != 3) { /* end if input arguments are of different dimensions */
mexErrMsgTxt("The input argument must be a 3-dimensional array.");
return;
}
cublasHandle_t handle;
cublasErrchk(cublasCreate(&handle));
int no_matrices = dim[2];
int nrow = dim[0];
int ncol = dim[1];
int matrix_size = nrow * ncol;
size_t i;
std::stringstream ss;
ss << "dim[2] = " << dim[2] << "\nno_matrices = " << no_matrices << "\nnrow = " << nrow << "\nmatrix_size = " << nrow << " x " << ncol << " = " << matrix_size << std::endl;
mexPrintf(ss.str().c_str());
mxGPUArray* gpu_array_inout = mxGPUCopyFromMxArray(prhs[0]);
double* inout_storage = (double*) mxGPUGetData(gpu_array_inout);
size_t info_dimensions[1] = { no_matrices };
mxGPUArray* gpu_array_info = mxGPUCreateGPUArray(1, (mwSize*) info_dimensions, mxINT32_CLASS, mxREAL,
MX_GPU_INITIALIZE_VALUES);
int* out_info = (int*) mxGPUGetData(gpu_array_info);
mexPrintf("after defining gpu_array_info\n");
size_t pivot_dimensions[2] = { nrow, no_matrices };
mxGPUArray* gpu_array_pivot = mxGPUCreateGPUArray(2, (mwSize*) pivot_dimensions, mxINT32_CLASS, mxREAL,
MX_GPU_DO_NOT_INITIALIZE);
int* out_pivot = (int*) mxGPUGetData(gpu_array_pivot);
mexPrintf("after defining gpu_array_pivot\n");
double** inout_pointers_CPU = (double**) malloc(no_matrices * sizeof(double*));
for (i = 0; i < no_matrices; i++) {
inout_pointers_CPU[i] = (double*) ((char*) inout_storage + i * ((size_t) matrix_size) * sizeof(double));
}
double** inout_pointers_GPU;
cudaErrchk(cudaMalloc((void** )&inout_pointers_GPU, no_matrices * sizeof(double*)));
cudaErrchk(
cudaMemcpy(inout_pointers_GPU, inout_pointers_CPU, no_matrices * sizeof(double*), cudaMemcpyHostToDevice));
free(inout_pointers_CPU);
ss.clear();
ss << "check again before calling cublasDgetrfBatched:\nnrow = " << nrow << "\nno_matrices = " << no_matrices << std::endl;
mexPrintf(ss.str().c_str());
cublasErrchk(cublasDgetrfBatched(handle, nrow, inout_pointers_GPU, nrow, out_pivot, out_info, no_matrices));
cublasErrchk(cublasDestroy(handle));
cudaErrchk(cudaFree(inout_pointers_GPU));
if (mxIsGPUArray(prhs[0])) {
plhs[0] = mxGPUCreateMxArrayOnGPU(gpu_array_inout);
if (nlhs > 1) {
plhs[1] = mxGPUCreateMxArrayOnGPU(gpu_array_pivot);
if (nlhs > 2) {
plhs[2] = mxGPUCreateMxArrayOnGPU(gpu_array_info);
}
}
} else {
plhs[0] = mxGPUCreateMxArrayOnCPU(gpu_array_inout);
if (nlhs > 1) {
plhs[1] = mxGPUCreateMxArrayOnCPU(gpu_array_pivot);
if (nlhs > 2) {
plhs[2] = mxGPUCreateMxArrayOnCPU(gpu_array_info);
}
}
}
mxGPUDestroyGPUArray(gpu_array_inout);
mxGPUDestroyGPUArray(gpu_array_pivot);
mxGPUDestroyGPUArray(gpu_array_info);
mxFree((void*) dim);
return;
}
I compile as follows:
mex -L/usr/local/cuda/lib64 -lcudart -lcublas gpuBatchedLU.cu
And I call from MATLAB:
[a1,b1,c1]=gpuBatchedLU(randn(32,32,5)); %no problem
[a2,b2,c2]=gpuBatchedLU(randn(33,33,5)); %produces CUBLAS_STATUS_INVALID_VALUE
I use Matlab R2013b with the parallel toolbox, Cuda 5.5, and a NVS 5200M graphics chip.
Can anyone replicate this problem? I would appreciate any suggestions on how to solve this problem.
The problem seems to be with Matlab R2013b using libcublas.so in version 5.0. The file link is in /MATLAB/R2013b/bin/glnxa64/. Once I changed the link to the libcublas.so of my Cuda 5.5 installation it worked fine.

Changing the Interrupt descriptor Table

I am using Linux 2.6.26 kernel version and I am trying to change the interrupt descriptor table using a kernel module. I am only trying to change the page fault table entry here. So I make a copy of the original IDT and make changes to the page fault table entry only. The objective of the ISR is to print out information of the page fault before calling the original Page fault handler. But the kernel just crashes once I load it with insmod i.e it specifically crashed with the "loadIDTR" function. With further debugging, I found out that by not changing any entry if I load the IDTR it works fine. I am out of ideas.
I have pasted the code below
#include <linux/module.h> // for init_module()
#include <linux/init.h>
#include <linux/mm.h> // for get_free_page()
#include <linux/sched.h>
#include <linux/spinlock.h>
#define SUCCESS 0
#define PGFAULT_INT 0x0E
static char modname[] = "pgfaults";
static unsigned short oldidtr[3], newidtr[3];
static unsigned long long *oldidt, *newidt;
static unsigned long isr_orig, kpage;
static char *why[]={ "sra", "srp", "swa", "swp", "ura", "urp", "uwa", "uwp" };
unsigned long long gate_desc_orig,gate_desc_orig1;
static void my_intrept( unsigned long *tos )
{
// stack-layout:
// es,ds,edi,esi,ebp,esp,ebx,edx,ecx,eax,err,eip,cs,efl
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13
volatile unsigned long vaddr;
struct task_struct *task = current;
unsigned long err = tos[ 10 ];
unsigned long eip = tos[ 11 ];
static int count = 0;
int exe, len = 0;
char msg[80]="";
// get the faulting virtual address from register CR2
asm(" mov %%cr2, %%eax ; movl %%eax, %0 " : "=m" (vaddr) );
// construct the diagnostic message
len += sprintf( msg+len, "#%-6d ", ++count );
len += sprintf( msg+len, "%16s ", task->comm );
len += sprintf( msg+len, "pid=%-5d ", task->pid );
len += sprintf( msg+len, "CR2=%08X ", (unsigned int) vaddr );
len += sprintf( msg+len, "EIP=%08X ", (unsigned int) eip );
len += sprintf( msg+len, "%s ", why[ err ] );
// note if an instruction-fetch caused the page-fault
if ( vaddr == eip ) exe = 'x'; else exe = ' ';
len += sprintf( msg+len, "%c ", exe );
// print this diagnostic message to the kernel log
printk( "<1> %s \n", msg );
}
//---------- NEW PAGE-FAULT EXCEPTION-HANDLER ---------//
asmlinkage void isr0x0E( void );
asm(" .text ");
asm(" .type isr0x0E, #function ");
asm("isr0x0E: ");
asm(" pushal ");
asm(" pushl %ds ");
asm(" pushl %es ");
//
asm(" movl %ss, %eax ");
asm(" movl %eax, %ds ");
asm(" movl %eax, %es ");
//
asm(" pushl %esp ");
asm(" call my_intrept ");
asm(" addl $4, %esp ");
//
asm(" popl %es ");
asm(" popl %ds ");
asm(" popal ");
asm(" jmp *isr_orig ");
//-------------------------------------------------------//
static void load_IDTR( void *regimage )
{
asm(" lidt %0 " : : "m" (*(unsigned short*)regimage) );
}
int pgfault_init( void )
{
int i;
unsigned long long gate_desc,gate_desc1,gate_desc2;
spinlock_t lock =SPIN_LOCK_UNLOCKED;
unsigned long flags;
unsigned short selector1;
// allocate a mapped kernel page for our new IDT
kpage =__get_free_page( GFP_KERNEL);
if ( !kpage ) return -ENOMEM;
// initialize our other global variables
asm(" sidt oldidtr ; sidt newidtr ");
memcpy( newidtr+1, &kpage, sizeof( kpage ) );
oldidt = (unsigned long long *)(*(unsigned long*)(oldidtr+1));
newidt = (unsigned long long *)(*(unsigned long*)(newidtr+1));
// extract and save entry-point to original page-pault handler
gate_desc_orig = oldidt[ PGFAULT_INT ];
gate_desc =gate_desc_orig & 0xFFFF00000000FFFF;
gate_desc |= ( gate_desc >> 32 );
isr_orig = (unsigned long)gate_desc;
// initialize our new Interrupt Descriptor Table
memcpy( newidt, oldidt, 256*sizeof( unsigned long long ) );
gate_desc_orig1 = (unsigned long)isr0x0E;
gate_desc = gate_desc_orig1 & 0x00000000FFFFFFFF;
gate_desc = gate_desc | ( gate_desc << 32 );
gate_desc1= 0xFFFF0000;
gate_desc1= gate_desc1 << 32;
gate_desc1= gate_desc1 | 0x0000FFFF;
gate_desc = gate_desc & gate_desc1;
gate_desc2= 0x0000EF00;
gate_desc2= gate_desc2 <<32;
gate_desc2= gate_desc2 | 0x00100000;
gate_desc = gate_desc | gate_desc2; // trap-gate
//Part which is most likely creating a fault when loading the idtr
newidt[ PGFAULT_INT ] = gate_desc;
//**********************************************
// activate the new IDT
spin_lock_irqsave(&lock,flags);
load_IDTR( newidtr );
spin_unlock_irqrestore(&lock,flags);
// smp_call_function( load_IDTR, oldidtr, 1, 1 );
return SUCCESS;
}
void pgfault_exit( void )
{
// reactivate the old IDT
unsigned long flags;
spinlock_t lock =SPIN_LOCK_UNLOCKED;
spin_lock_irqsave(&lock,flags);
load_IDTR( oldidtr );
spin_unlock_irqrestore(&lock,flags);
// smp_call_function( load_IDTR, oldidtr, 1, 1 );
// release allocated kernel page
if ( kpage ) free_page( kpage );
}
EXPORT_SYMBOL_GPL(my_intrept);
MODULE_LICENSE("GPL");
module_init( pgfault_init);
module_exit( pgfault_exit);
Why don't you use kernel function instead of fiddling with bits manually!
check it (it is initialization module func):
struct desc_ptr newidtr;
gate_desc *oldidt, *newidt;
store_idt(&__IDT_register);
oldidt = (gate_desc *)__IDT_register.address;
__IDT_page =__get_free_page(GFP_KERNEL);
if(!__IDT_page)
return -1;
newidtr.address = __IDT_page;
newidtr.size = __IDT_register.size;
newidt = (gate_desc *)newidtr.address;
memcpy(newidt, oldidt, __IDT_register.size);
pack_gate(&newidt[PGFAULT_NR], GATE_INTERRUPT, (unsigned long)isr0x0E, 0, 0, __KERNEL_CS);
__load_idt((void *)&newidtr);
smp_call_function(__load_idt, &newidtr, 0, 1);
return 0;
I have tested it it works!
Your segment selector in your trap gate descriptor appears to be hardcoded to 0x0010, when it should be __KERNEL_CS (which is 0x0060 in the 2.6.26 kernel sources I have).
By the way, this is pretty baroque:
gate_desc_orig1 = (unsigned long)isr0x0E;
gate_desc = gate_desc_orig1 & 0x00000000FFFFFFFF;
gate_desc = gate_desc | ( gate_desc << 32 );
gate_desc1= 0xFFFF0000;
gate_desc1= gate_desc1 << 32;
gate_desc1= gate_desc1 | 0x0000FFFF;
gate_desc = gate_desc & gate_desc1;
gate_desc2= 0x0000EF00;
gate_desc2= gate_desc2 <<32;
gate_desc2= gate_desc2 | 0x00100000;
gate_desc = gate_desc | gate_desc2; // trap-gate
You could simplify that down to (with the __KERNEL_CS fix):
gate_desc = (unsigned long long)isr0x0E * 0x100000001ULL;
gate_desc &= 0xFFFF00000000FFFFULL;
gate_desc |= 0x0000EF0000000000ULL; // trap-gate
gate_desc |= (unsigned long long)__KERNEL_CS << 16;
For reference, here is a working implementation of a custom page fault handler for the Linux x86_64 architecture. I just tested this module myself with kernel 3.2, it works perfectly.
https://github.com/RichardUSTC/intercept-page-fault-handler

Resources