In the code below, the decrypted text does not match the original plaintext. The first 12 bytes are messed up. Note that block cipher padding has been disabled. I have tried different values for BUF_SIZE, all multiples of 16 - every time the first 12 bytes of the decrypted data is wrong. Here's the output:
plain buf[32]:
11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11
11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11
outlen=32
outlen=32
dec buf[32]:
0C 08 01 46 6D 3D FC E9 98 0A 2D E1 AF A3 95 3A
0B 31 1B 9D 11 11 11 11 11 11 11 11 11 11 11 11
Here's the code:
#include <stdio.h>
#include <string.h>
#include <CommonCrypto/CommonCryptor.h>
static void
dumpbuf(const char* label, const unsigned char* pkt, unsigned int len)
{
const int bytesPerLine = 16;
if (label) {
printf("%s[%d]:\n", label, len);
}
for (int i = 0; i < int(len); i++) {
if (i && ((i % bytesPerLine) == 0)) {
printf("\n");
}
unsigned int c = (unsigned int)pkt[i] & 0xFFu;
printf("%02X ", c);
}
printf("\n");
}
int main(int argc, char* argv[])
{
unsigned char key[16];
unsigned char iv[16];
memset(key, 0x22, sizeof(key));
memset(iv, 0x33, sizeof(iv));
#define BUF_SIZE 32
unsigned char plainBuf[BUF_SIZE];
unsigned char encBuf[BUF_SIZE];
memset(plainBuf, 0x11, sizeof(plainBuf));
dumpbuf("plain buf", plainBuf, sizeof(plainBuf));
int outlen;
CCCryptorStatus status;
status = CCCrypt(kCCEncrypt, kCCAlgorithmAES128, 0,
key, kCCKeySizeAES128, iv, plainBuf, sizeof(plainBuf),
encBuf, sizeof(encBuf), (size_t*)&outlen);
if (kCCSuccess != status) {
fprintf(stderr, "FEcipher: CCCrypt failure\n");
return -1;
}
printf("outlen=%d\n", outlen);
status = CCCrypt(kCCDecrypt, kCCAlgorithmAES128, 0,
key, kCCKeySizeAES128, iv, encBuf, sizeof(encBuf),
plainBuf, sizeof(plainBuf), (size_t*)&outlen);
if (kCCSuccess != status) {
fprintf(stderr, "FEcipher: CCCrypt failure\n");
return -1;
}
printf("outlen=%d\n", outlen);
dumpbuf("dec buf", plainBuf, sizeof(plainBuf));
return 0;
}
Thanks,
Hari
#owlstead, thanks for your response. CBC is the default - you don't need to specify anything special in the options to enable it.
The same code using CCCrypt() was working before. I don't know what changed - may be a new library was installed during an update. Instead of using the convenience function CCCrypt() I'm now using the Create/Update/Final API - it works, so I have a workaround.
outlen should be size_t, not int.
Related
For example, in /crypto/nhpoly1305.c:
42 static void nh_generic(const u32 *key, const u8 *message, size_t message_len,
43 __le64 hash[NH_NUM_PASSES])
44 {
45 u64 sums[4] = { 0, 0, 0, 0 };
46
47 BUILD_BUG_ON(NH_PAIR_STRIDE != 2);
48 BUILD_BUG_ON(NH_NUM_PASSES != 4);
49
50 while (message_len) {
51 u32 m0 = get_unaligned_le32(message + 0);
52 u32 m1 = get_unaligned_le32(message + 4);
53 u32 m2 = get_unaligned_le32(message + 8);
54 u32 m3 = get_unaligned_le32(message + 12);
What is the meaning of these values added to the argument? I assume it has something to do with bits/bytes...
I implement an concurrency script of multi thread in c++ 11 . But i stuck .
int product_val = 0;
- thread 1 : increase product_val to vector in thread 2 , notify thread 2 and waiting for thread 2 print product_val;
- thread 2 : wait and decrease product_val , print product_val
1 #include <iostream>
2 #include <thread>
3 #include <condition_variable>
4 #include <mutex>
5 #include <chrono>
6 #include <queue>
7 using namespace std;
8 int product_val = 0;
9 std::condition_variable cond;
10 std::mutex sync;
11 int main() {
12 //thread 2
13 std::thread con = std::thread([&](){
14 while (1)
15 {
16 std::unique_lock<std::mutex> l(sync);
17 cond.wait(l);
18 product_val--;
19 printf("Consumer product_val = %d \n", product_val);
20 l.unlock();
21 }
22 });
23 //thread 1 (main thread) process
24 for (int i = 0; i < 5; i++)
25 {
26 std::unique_lock<std::mutex> l(sync);
27 product_val++;
28 std::cout << "producer product val " << product_val;
29 cond.notify_one();
30 l.unlock();
31 l.lock();
32 while (product_val)
33 {
34
35 }
36 std::cout << "producer product val " << product_val;
37 l.unlock();
38 }
39 return 0;
40 }
The kernel below in C language changes a cell of an array:
__global__ void test(int *mt[matrix_size])
{
mt[0][0]=12;
}
The code below copies kernel results to host but it doesn't send the array to host correctly:
int *matrix[matrix_size],*d_matrix[matrix_size];
for(int i=0;i<matrix_size;i++)
matrix[i] = (int *)malloc(n*n*sizeof(int));
for(int i=0;i<matrix_size;i++)
cudaMalloc((void**)&d_matrix[i],sizeof(int));
test<<<1,1>>>(d_matrix);
cudaMemcpy(*matrix,*d_matrix,n*n*sizeof(int),cudaMemcpyDeviceToHost);
printf("\n\n %d \n\n",matrix[0][0]); //the result is zero instead of 12
How can I fix the problem?
You have gotten a lot wrong here.
The root cause is that d_matrix is in host memory and can't be passed directly to a kernel. If you check runtime errors you will see that firstly the first cudaMemcpy call fails because of the wrong direction argument, then when you fix that, the kernel fails with a invalid address error.
To fix this, you need to allocate a copy of d_matrix on the GPU and copy d_matrix to that copy. This is because the array you are passing to the kernel decays to a pointer and is not passed by value.
Something like this:
#include <cstdio>
const int n = 9;
const int matrix_size = 16;
__global__
void test(int *mt[matrix_size])
{
mt[threadIdx.x][0] = 12 + threadIdx.x;
}
int main()
{
int *matrix[matrix_size],*d_matrix[matrix_size];
for(int i=0;i<matrix_size;i++) {
matrix[i] = (int *)malloc(n * n * sizeof(int));
cudaMalloc((void**)&d_matrix[i], n * n * sizeof(int));
}
int **dd_matrix;
cudaMalloc(&dd_matrix, matrix_size * sizeof(int*));
cudaMemcpy(dd_matrix, d_matrix, matrix_size * sizeof(int *), cudaMemcpyHostToDevice);
test<<<1,matrix_size>>>(dd_matrix);
for(int i=0;i<matrix_size;i++) {
cudaMemcpy(matrix[i], d_matrix[i], n*n*sizeof(int), cudaMemcpyDeviceToHost);
printf("%d = %d \n", i, matrix[i][0]);
}
return 0;
}
Which when run gives this:
$ nvcc -g -G -arch=sm_52 -o bozocu bozocu.cu
$ ./bozocu
0 = 12
1 = 13
2 = 14
3 = 15
4 = 16
5 = 17
6 = 18
7 = 19
8 = 20
9 = 21
10 = 22
11 = 23
12 = 24
13 = 25
14 = 26
15 = 27
is, I believe, more in line with what you were expecting.
A new satellite data processing center has just been completed and ready for the initial testing using live data being sent down from an orbiting satellite. As the very first messages are displayed on the screen and you notice many of the data values are wildly out of range.
For example, on the terminal screen is something defined as “delta time” and it seems to be out of the expected range [0.01 to 10,000.00 seconds], but the value displayed (as a double) is [-4.12318024e-028 seconds]. After further investigation into the raw byte-based data stream, you find the original data being sent down from the satellite for this double word as [0xC0 0x83 0xA1 0xCA 0x66 0x55 0x40 0xBA]. On one of the old terminals, this data is displayed correctly and is within the expected range.
a. [5] What caused this problem?
b. [5] If this is the real problem, what should the actual value be?
Ah, Failure Mode Analysis. Very important indeed!
Well, other terminal shows data correctly --> there is incompatibility between terminal and data.
Big Endian, little Endian perhaps? I am expecting the "old" terminal to be little Endian because it may have been coded in C. Now you can interpret the data.
Here is some code
#include <stdio.h>
union myW {
double x;
// Recieved as:[0xC0 0x83 0xA1 0xCA 0x66 0x55 0x40 0xBA]
unsigned char d[8] = {0x83, 0xC0,0xCA, 0xA1, 0x55, 0x66, 0xBA, 0x40};
};
union myBad {
double x;
// Recieved as:[0xC0 0x83 0xA1 0xCA 0x66 0x55 0x40 0xBA]
unsigned char d[8] = {0xC0, 0x83,0xA1, 0xCA, 0x66, 0x55, 0x40, 0xBA};
};
int main(void)
{
myW value;
value.x = 1.0; // check how reasonable number looks like
printf("Something reasonable: \n");
for(int i = 0; i < 8; i++)
{
printf("%u ", value.d[i]);
}
myW received;
printf("\nWhat shouldve been displayed:\n");
for(int i = 0; i < 8; i++)
{
printf("%u ", received.d[i]);
}
printf("\n%f\n", received.x);
myBad bad;
printf("\nBad output as:\n");
for(int i = 0; i < 8; i++)
{
printf("%u ", bad.d[i]);
}
printf("\n%0.30f\n", bad.x);
}
Output:
Something reasonable:
0 0 0 0 0 0 240 63
What shouldve been displayed::
131 192 202 161 85 102 186 64
6758.334500
Bad output as:
192 131 161 202 102 85 64 186
-0.000000000000000000000000000412
Compiled with g++
Is there any constraint to the mapping kernel module's memory via the vmap? On my system I write a simple KMOD, that maps a kernel-function (printk) and a module-function (printx) and check if mappings are equals. The result shows me that there is a problem with mapping module's printx - the mapping and the function's code does not equals. Could someone explain me what I do wrong? And here is the code:
// vmap-test.c
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mm.h>
int printx(void)
{
return 0;
}
void vmap_action(unsigned long address)
{
void * mapping;
struct page * page;
page = virt_to_page(address);
mapping = vmap(&page, 1, VM_MAP, PAGE_KERNEL);
if (mapping) {
int i = 0;
void * data = mapping + offset_in_page(address);
printk("VMAP: src %p -> dst %p\n", (void *)address, data);
for (i=0; i<16; i++) {
printk("%.02x %.02x\n", ((u8 *)address)[i], ((u8 *)data)[i]);
}
vunmap(mapping);
}
}
int my_module_init(void)
{
vmap_action((unsigned long)printk);
vmap_action((unsigned long)printx);
return 0;
}
module_init(my_module_init);
void my_module_exit(void)
{
}
module_exit(my_module_exit);
And the result with dmesg is:
vmap(printk)
[88107.398146] VMAP: src ffffffff813dfaef -> dst ffffc900117ddaef
[88107.398148] 55 55
[88107.398149] 48 48
[88107.398150] 89 89
[88107.398151] e5 e5
[88107.398152] 48 48
[88107.398153] 83 83
[88107.398154] ec ec
[88107.398155] 50 50
[88107.398156] 0f 0f
[88107.398156] 1f 1f
[88107.398157] 44 44
[88107.398158] 00 00
[88107.398159] 00 00
[88107.398160] 48 48
[88107.398161] 8d 8d
[88107.398162] 45 45
vmap(printx)
[88107.398164] VMAP: src ffffffffa009a010 -> dst ffffc900117fd010
[88107.398166] 55 35
[88107.398167] 48 fb
[88107.398168] 89 53
[88107.398169] e5 d5
[88107.398170] 0f f7
[88107.398171] 1f 97
[88107.398171] 44 ee
[88107.398172] 00 fd
[88107.398173] 00 d5
[88107.398174] 31 2d
[88107.398175] c0 bf
[88107.398176] 5d f6
[88107.398177] c3 2d
[88107.398178] 0f bd
[88107.398179] 1f b7
[88107.398180] 00 99
Any suggestions are welcome :) Thanks.
Well, I found that similar function is implemented in KSplice project and here is it:
/*
* map_writable creates a shadow page mapping of the range
* [addr, addr + len) so that we can write to code mapped read-only.
*
* It is similar to a generalized version of x86's text_poke. But
* because one cannot use vmalloc/vfree() inside stop_machine, we use
* map_writable to map the pages before stop_machine, then use the
* mapping inside stop_machine, and unmap the pages afterwards.
*/
static void *map_writable(void *addr, size_t len)
{
void *vaddr;
int nr_pages = DIV_ROUND_UP(offset_in_page(addr) + len, PAGE_SIZE);
struct page **pages = kmalloc(nr_pages * sizeof(*pages), GFP_KERNEL);
void *page_addr = (void *)((unsigned long)addr & PAGE_MASK);
int i;
if (pages == NULL)
return NULL;
for (i = 0; i < nr_pages; i++) {
if (__module_address((unsigned long)page_addr) == NULL) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) || !defined(CONFIG_X86_64)
pages[i] = virt_to_page(page_addr);
#else /* LINUX_VERSION_CODE < && CONFIG_X86_64 */
/* e3ebadd95cb621e2c7436f3d3646447ac9d5c16d was after 2.6.21
* This works around a broken virt_to_page() from the RHEL 5 backport
* of x86-64 relocatable kernel support.
*/
pages[i] =
pfn_to_page(__pa_symbol(page_addr) >> PAGE_SHIFT);
#endif /* LINUX_VERSION_CODE || !CONFIG_X86_64 */
WARN_ON(!PageReserved(pages[i]));
} else {
pages[i] = vmalloc_to_page(addr);
}
if (pages[i] == NULL) {
kfree(pages);
return NULL;
}
page_addr += PAGE_SIZE;
}
vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
kfree(pages);
if (vaddr == NULL)
return NULL;
return vaddr + offset_in_page(addr);
}
So, as it comes there is a different handling of kernel's and module's memory. If the page is not belongs to any module then vmalloc_to_page rather then virt_to_phys used. I'll check if it helps and post the result later.