DPDK changes pakcets content when sending out packets through `rte_eth_tx_burst` - endianness

I construct DPDK packets with following code:
#define PKG_GEN_COUNT 1
#define EIU_HEADER_LEN 42
#define ETHERNET_HEADER_LEN 14
#define IP_DEFTTL 64 /* from RFC 1340. */
#define IP_VERSION 0x40
#define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */
#define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
#define MEGA_JOB_GET 0x2
#define MEGA_JOB_SET 0x3
#define MEGA_END_MARK_LEN 2
#define PROTOCOL_TYPE_LEN 2U
#define KEY_LEN 8
#define VAL_LEN 8
#define PROTOCOL_KEYLEN_LEN 2U
#define PROTOCOL_VALLEN_LEN 4U
#define PROTOCOL_HEADER_LEN 8U
struct rte_mbuf *tx_bufs_pt[PKG_GEN_COUNT];
struct rte_ether_hdr *ethh;
struct rte_ipv4_hdr *ip_hdr;
struct rte_udp_hdr *udph;
for (int i = 0; i < PKG_GEN_COUNT; i++) {
struct rte_mbuf *pkt = (struct rte_mbuf *)rte_pktmbuf_alloc(
(struct rte_mempool *)send_mbuf_pool);
if (pkt == NULL)
rte_exit(EXIT_FAILURE,
"Cannot alloc storage memory in port %" PRIu16 "\n",
port);
pkt->data_len = 1484;
pkt->nb_segs = 1; // nb_segs
pkt->pkt_len = pkt->data_len;
pkt->ol_flags = PKT_TX_IPV4; // ol_flags
pkt->vlan_tci = 0; // vlan_tci
pkt->vlan_tci_outer = 0; // vlan_tci_outer
pkt->l2_len = sizeof(struct rte_ether_hdr);
pkt->l3_len = sizeof(struct rte_ipv4_hdr);
ethh = (struct rte_ether_hdr *)rte_pktmbuf_mtod(pkt, unsigned char *);
ethh->s_addr = S_Addr;
ethh->d_addr = D_Addr;
ethh->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
ip_hdr = (struct rte_ipv4_hdr *)((unsigned char *)ethh +
sizeof(struct rte_ether_hdr));
ip_hdr->version_ihl = IP_VHL_DEF;
ip_hdr->type_of_service = 0;
ip_hdr->fragment_offset = 0;
ip_hdr->time_to_live = IP_DEFTTL;
ip_hdr->next_proto_id = IPPROTO_UDP;
ip_hdr->packet_id = 0;
ip_hdr->total_length = rte_cpu_to_be_16(pktlen);
ip_hdr->src_addr = rte_cpu_to_be_32(IP_SRC_ADDR);
ip_hdr->dst_addr = rte_cpu_to_be_32(IP_DST_ADDR);
ip_hdr->hdr_checksum = rte_ipv4_cksum(ip_hdr);
udph = (struct rte_udp_hdr *)((unsigned char *)ip_hdr +
sizeof(struct rte_ipv4_hdr));
udph->src_port = 123;
udph->dst_port = 123;
udph->dgram_len =
rte_cpu_to_be_16((uint16_t)(pktlen - sizeof(struct rte_ether_hdr) -
sizeof(struct rte_ipv4_hdr)));
tx_bufs_pt[i] = pkt;
}
char *ptr = NULL;
uint64_t set_key = 1;
while (1) {
for (i = 0; i < PKG_GEN_COUNT; i++) {
/* Load request */
ptr = (char *)((char *)rte_pktmbuf_mtod(tx_bufs_pt[i], char *) +
EIU_HEADER_LEN);
tx_pkt_load(ptr, &set_key);
}
int nb_tx = rte_eth_tx_burst(port, queue_id, tx_bufs_pt, PKG_GEN_COUNT);
}
The tx_pkt_load function fills IP packets' content.
static void tx_pkt_load(char *ptr, uint64_t *start_set_key) {
uint64_t k, get_key, set_key = *start_set_key;
for (k = 0; k < number_packet_set[WORKLOAD_ID]; k++) {
*(uint16_t *)ptr = MEGA_JOB_SET;
ptr += sizeof(uint16_t);
*(uint16_t *)ptr = KEY_LEN;
ptr += sizeof(uint16_t);
*(uint32_t *)ptr = VALUE_LEN;
ptr += sizeof(uint32_t);
set_key++;
*(uint64_t *)(ptr) = set_key;
ptr += KEY_LEN;
*(uint64_t *)(ptr) = set_key + 1;
ptr += VALUE_LEN;
*(uint16_t *)ptr = MEGA_JOB_GET;
ptr += sizeof(uint16_t);
*(uint16_t *)ptr = KEY_LEN;
ptr += sizeof(uint16_t);
get_key = set_key;
*(uint64_t *)(ptr) = get_key;
ptr += KEY_LEN;
}
*start_set_key = set_key;
/* pkt ending mark */
*(uint16_t *)ptr = 0xFFFF;
}
Before I call rte_eth_tx_burst, I use the show_pkt function to dump the IP pakcets' content.
void show_pkt(struct rte_mbuf *pkt) {
int pktlen = pkt->data_len - EIU_HEADER_LEN;
uint8_t *ptr = (uint8_t *)((uint8_t *)rte_pktmbuf_mtod(pkt, uint8_t *) +
EIU_HEADER_LEN);
while (*(uint16_t *)ptr != 0xFFFF) {
uint32_t key_len = *(uint16_t *)(ptr + PROTOCOL_TYPE_LEN);
if (*(uint16_t *)ptr == MEGA_JOB_GET) {
fprintf(
fp[sched_getcpu()], "GET\t%lu\n",
*(uint64_t *)(ptr + PROTOCOL_TYPE_LEN + PROTOCOL_KEYLEN_LEN));
ptr += PROTOCOL_TYPE_LEN + PROTOCOL_KEYLEN_LEN + key_len;
} else if (*(uint16_t *)ptr == MEGA_JOB_SET) {
uint32_t val_len =
*(uint16_t *)(ptr + PROTOCOL_TYPE_LEN + PROTOCOL_KEYLEN_LEN);
fprintf(fp[sched_getcpu()], "SET\t%lu\t%lu\n",
*(uint64_t *)(ptr + PROTOCOL_HEADER_LEN),
*(uint64_t *)(ptr + PROTOCOL_HEADER_LEN + key_len));
ptr += PROTOCOL_TYPE_LEN + PROTOCOL_KEYLEN_LEN +
PROTOCOL_VALLEN_LEN + key_len + val_len;
}
}
fprintf(fp[sched_getcpu()], "END_MARK: %04x \n", *(uint16_t *)ptr);
fprintf(fp[sched_getcpu()], "\n");
fflush(fp[sched_getcpu()]);
}
The generated file shows the expected packet content. Each GET has the same argument of last SET's first argument and GET's argument should be incremental. The SET's sencond argument equals to its first argument plus one and SET's arguments should be also be incremental respectively.
SET 82 83
GET 82
SET 83 84
GET 83
SET 84 85
GET 84
SET 85 86
GET 85
SET 86 87
GET 86
SET 87 88
GET 87
SET 88 89
GET 88
SET 89 90
GET 89
SET 90 91
GET 90
SET 91 92
GET 91
SET 92 93
GET 92
SET 93 94
GET 93
SET 94 95
GET 94
SET 95 96
GET 95
SET 96 97
GET 96
SET 97 98
GET 97
SET 98 99
GET 98
SET 99 100
GET 99
SET 100 101
GET 100
SET 101 102
GET 101
SET 102 103
GET 102
SET 103 104
GET 103
SET 104 105
GET 104
SET 105 106
GET 105
SET 106 107
GET 106
SET 107 108
GET 107
SET 108 109
GET 108
SET 109 110
GET 109
SET 110 111
GET 110
SET 111 112
GET 111
SET 112 113
GET 112
SET 113 114
GET 113
SET 114 115
GET 114
SET 115 116
GET 115
SET 116 117
GET 116
SET 117 118
GET 117
SET 118 119
GET 118
SET 119 120
GET 119
SET 120 121
GET 120
SET 121 122
GET 121
END_MARK: ffff
However, when I use tcpdump to capture received pakcets on the target machine, the packets captured do not contain expected content. I also tried to use rte_eth_rx_burst to receive packets and dump packets' content through the same function show_pkt. It shows the same result as the following. It's so odd.
SET 82 83
GET 82
SET 83 84
GET 83
SET 84 85
GET 84
SET 85 86
GET 85
SET 86 87
GET 86
SET 87 88
GET 87
SET 88 89
GET 88
SET 89 90
GET 89
SET 90 91
GET 90
SET 91 92
GET 91
SET 92 93
GET 92
SET 93 94
GET 93
SET 94 95
GET 94
SET 95 96
GET 95
SET 96 97
GET 96
SET 97 98
GET 97
SET 98 99
GET 98
SET 99 100
GET 99
SET 100 101
GET 100
SET 101 102
GET 101
SET 102 103
GET 102
SET 103 104
GET 103
SET 104 105
GET 104
SET 105 106
GET 105
SET 106 107
GET 106
SET 107 108
GET 107
SET 108 109
GET 108
SET 109 110
GET 109
SET 110 111
GET 110
SET 111 112
GET 111
SET 112 113
GET 112
SET 73 74
GET 73
SET 74 75
GET 74
SET 75 76
GET 75
SET 76 77
GET 76
SET 77 78
GET 77
SET 78 79
GET 78
SET 79 80
GET 79
SET 80 81
GET 80
SET 81 82
GET 81
END_MARK: ffff
[UPDATE]
The packets dumped through rte_pktmbuf_dump contain excepted content. And the packets captured by tcpdump is odd.
The packets' content has the following pattern.
uint16_t (0x03)
uint16_t (0x08)
uint32_t (0x08)
uint64_t (x)
uint64_t (x + 1)
uint16_t (0x02)
uint16_t (0x8)
uint64_t (x)
The x should be monotonically increasing through all packets. The second packet captured by tcpdump failed to conform to this law. The starting x is 82 and at the end of packet, the x is 81.
[UPDATE]
Part of the second packet dumped by rte_pktmbuf_dump:
00000030: 00 00 2A 00 00 00 00 00 00 00 2B 00 00 00 00 00
...
000005C0: 08 00 51 00 00 00 00 00 00 00 FF FF
Part of the second packet captured through tcpdump:
0x0020: 0800 0000 5200 0000 0000 0000 5300 0000
...
0x05b0: 0200 0800 5100 0000 0000 0000 ffff
Compared to the 0x32-th byte of the packet dumped by rte_pktmbuf_dump, the 0x24-th byte of the packet captured by tcpdump should be 2a. Because the last 12 bytes of the two packets are same, which means the two packets should be same.

DPDK API with the given option and logic does not modify the packet content before the NIC transmit. To ensure the same, I have tested the logic with tcpdump to capture the packet on the Linux side.
Note: Due to the absence of the exact code or snippet, have edited the code to meet the requirement. I am able to send and receive the packet without any issues.
DPDK test app cmd: sudo LD_LIBRARY_PATH=[path to shared dpdk library] ./a.out --no-pci --vdev=net_tap0 -l 10 -- -p 0x1
tcpdump cmd: sudo tcpdump -exxxi dtap0 -Q in
Code: https://paste.ubuntu.com/p/zHP5q89yMz/
pktmbuf_dump:
01 02 03 04 05 06 01 02 03 04 05 06 08 00 45 00
05 A2 00 00 00 00 40 11 5B 2E 01 02 03 04 0A 0B
0C 0D 7B 00 7B 00 64 00 00 00 03 00 02 00 02 00
00 00 02 00 03 00 02 00 02 00 02 00 FF FF 00 00
tcpdump:
0x0000: 0102 0304 0506 0102 0304 0506 0800 4500
0x0010: 05a2 0000 0000 4011 5b2e 0102 0304 0a0b
0x0020: 0c0d 7b00 7b00 6400 0000 0300 0200 0200
0x0030: 0000 0200 0300 0200 0200 0200 ffff 0000

Related

Why smppcxx sample_app isn't working and returns "SMPP error: Invalid command_length" and crashes?

I took sample_app from smppcxx library and changed the settings to:
const std::string ipaddr = "194.228.174.1";
const Smpp::Uint16 port = 9111;
const Smpp::SystemId sysid("MaxiTipSMPP");
const Smpp::Password pass(<actual_password>);
const Smpp::SystemType systype("");
const Smpp::Uint8 infver = 0x34;
const Smpp::ServiceType servtype("");
const Smpp::Address srcaddr("234567");
const Smpp::Address dstaddr("420606752839");
const std::string msgtext = "Hello smsc";
The code called is:
Socket sd;
sd.connect(ipaddr.c_str(), port);
send_bind(sd);
read_bind_resp(sd);
//send_enquire_link(sd);
//read_enquire_link_resp(sd);
send_submit_sm(sd);
read_submit_sm_resp(sd);
Smpp::Uint32 seqnum = read_deliver_sm(sd);
send_deliver_sm_resp(sd, seqnum);
//send_data_sm(sd);
//read_data_sm_resp(sd);
//seqnum = read_deliver_sm(sd);
//send_deliver_sm_resp(sd, seqnum);
send_unbind(sd);
read_unbind_resp(sd);
and the problem happens in read_submit_sm_resp(sd) (or in read_enquire_link_resp(sd) if uncommented):
Buffer buf;
buf = read_smpp_pdu(sd, buf);
std::cout << "\nRead a submit sm resp\n";
Smpp::hex_dump(&buf[0], buf.size(), std::cout);
Smpp::SubmitSmResp pdu;
std::cout << "read_submit_sm_resp buf.size() is " << buf.size() << std::endl;
pdu.decode(&buf[0]);
std::string sid = pdu.message_id();
printf("response message_id: \"%s\"\n", sid.c_str());
on line
pdu.decode(&buf[0]);
, why? The application crashes. I expected the code to work as is, but it just doesn't.
There is the output:
Sending a bind transceiver
00 00 00 2a 00 00 00 09 00 00 00 00 00 00 00 01 ...*............
4d 61 78 69 54 69 70 53 4d 50 50 00 MaxiTipSMPP.password
Read a bind response
00 00 00 15 80 00 00 09 00 00 00 00 00 00 00 01 ................
53 4d 53 43 00 SMSC.
read_bind_resp buf.size() is 21
response system_id: "SMSC"
Sending a submit sm
00 00 00 3d 00 00 00 04 00 00 00 00 00 00 00 01 ...=............
00 00 00 32 33 34 35 36 37 00 01 01 34 32 30 36 ...234567...4206
30 36 37 35 32 38 33 39 00 00 00 00 00 00 01 00 06752839........
00 00 0a 48 65 6c 6c 6f 20 73 6d 73 63 ...Hello smsc
Read a submit sm resp
00 00 00 a4 00 00 00 05 00 00 00 00 00 00 00 01 ................
00 01 01 39 39 39 30 33 30 00 01 01 34 32 30 36 ...999030...4206
30 36 37 35 32 38 33 39 00 04 00 00 00 00 00 00 06752839........
00 00 47 69 64 3a 66 62 32 37 37 66 62 34 33 66 ..Gid:fb277fb43f
63 31 34 36 66 30 39 61 39 31 37 37 32 63 37 63 c146f09a91772c7c
31 33 64 65 35 62 20 64 6f 6e 65 20 64 61 74 65 13de5b done date
3a 31 37 30 32 30 36 30 35 30 37 30 34 20 73 74 :170206050704 st
61 74 3a 55 4e 44 45 4c 49 56 00 1e 00 21 66 62 at:UNDELIV...!fb
32 37 37 66 62 34 33 66 63 31 34 36 66 30 39 61 277fb43fc146f09a
39 31 37 37 32 63 37 63 31 33 64 65 35 62 00 04 91772c7c13de5b..
27 00 01 05 '...
read_submit_sm_resp buf.size() is 164
SMPP error: Invalid command_length
I added an output and it tells that size is 164 and I see 164 bytes and in bind response, which works without problems, there is size 21 and I see 21 bytes, should I fix the decode function somehow?
Smpp::SubmitSmResp::decode(const Smpp::Uint8* buff)
{
Response::decode(buff);
Smpp::Uint32 len = Response::command_length();
Smpp::Uint32 offset = 16;
const char* err = "Bad length in submit_sm_resp";
if(len < offset)
throw Error(err);
const Smpp::Char* sptr = reinterpret_cast<const Smpp::Char*>(buff);
message_id_ = &sptr[offset];
offset += message_id_.length() + 1;
if(len < offset)
throw Error(err);
Header::decode_tlvs(buff + offset, len - offset);
}
I still think that the library should work as is, so I guess that maybe I should change some setting or something. Did anyone have the same problem? Any idea what to do? The only thing I want is to send smses, about max 100 a day...
I managed to fix it.
1) Have only 1 open connection instead of opening a new connection for every sms to send. Provider banned me, because of too many connection openings.
2) The flow isn't send->read->send->read->.., but reading of the responses should be done asynchronously and so the client must parse the responses to understand, what type of response was received.
3) The connection should be maintained with send_enquire_link.
I guess that programming this gateway for sending smses was far over the scope of 1 manday for a person like me that didn't know anything about smpp. I finished the task in about 3 days with quite a lot of work overnight. Why do I add that? Because the main problem was my approach because of the assigned time => I thought that there should be a simple solution for 1 manday task...

CertCreateCertificateContext returns CRYPT_E_ASN1_BADTAG / 8009310b

I realize this is a very similar post to others (e.g. this one), but there are details missing from the posts which might be significant for my case.
To start with, here's my simplified program:
#include "stdafx.h"
#include <windows.h>
#include <wincrypt.h>
int _tmain(int argc, _TCHAR* argv[])
{
// usage: CertExtract certpath
char keyFile[] = "C:\\Certificates\\public.crt";
BYTE lp[65536];
SECURITY_ATTRIBUTES sa;
HANDLE hKeyFile;
DWORD bytes;
PCCERT_CONTEXT certContext;
sa.nLength = sizeof(sa);
sa.lpSecurityDescriptor = NULL;
sa.bInheritHandle = FALSE;
hKeyFile = CreateFile(keyFile, GENERIC_READ, FILE_SHARE_READ, &sa, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
if (hKeyFile) {
if (ReadFile(hKeyFile, lp, GetFileSize(hKeyFile, NULL), &bytes, NULL) && bytes > 0) {
certContext = CertCreateCertificateContext(X509_ASN_ENCODING, lp, bytes);
if (certContext) {
printf("yay!");
CertFreeCertificateContext(certContext);
}
else {
printf("Could not convert certificate to internal form\n");
}
}
else {
printf("Failed to read key file: %s\n", keyFile);
}
}
else {
printf("Failed to open key file: %s\n", keyFile);
}
CloseHandle(hKeyFile);
return 0;
}
In order to create the certificate, I used the following steps with OpenSSL:
C:\Certificates>openssl genrsa -out private.key 1024
Loading 'screen' into random state - done
Generating RSA private key, 1024 bit long modulus
......................................++++++
................++++++
e is 65537 (0x10001)
C:\Certificates>openssl req -new -key private.key -out public.csr
Loading 'screen' into random state - done
C:\Certificates>copy private.key private.key.org
1 file(s) copied.
C:\Certificates>openssl rsa -in private.key.org -out private.key
writing RSA key
C:\Certificates>openssl x509 -req -days 365 -in public.csr -signkey private.key -ou
t public.crt
Loading 'screen' into random state - done
Signature ok
subject=/CN=My Signing Cert
Getting Private key
with the following conf file:
RANDFILE = .rnd
[ req ]
distinguished_name = req_distinguished_name
prompt = no
[ req_distinguished_name ]
commonName = My Signing Cert
The certificate file looks like:
-----BEGIN CERTIFICATE-----
MIIBqzCCARQCCQDUJyWk0OxlRTANBgkqhkiG9w0BAQUFADAaMRgwFgYDVQQDDA9N
eSBTaWduaW5nIENlcnQwHhcNMTYwMTA1MjIzODU5WhcNMTcwMTA0MjIzODU5WjAa
MRgwFgYDVQQDDA9NeSBTaWduaW5nIENlcnQwgZ8wDQYJKoZIhvcNAQEBBQADgY0A
MIGJAoGBAJobIhfSSMLEPeG9SOBelWHo4hjKXe8dT6cllPr6QXdXe2VNLh9fxVlx
spVGFQwjlF3OHYnmSQnY3m2b5wlFNYVuHvy8rUsZWOF4drSbiqWKh0TuJ+4MBeGq
EormTJ+kiGqNm5IVRrTu9OV8f0XQTGV1pxHircQxsGhxY5w0QTjjAgMBAAEwDQYJ
KoZIhvcNAQEFBQADgYEAedqjKfMyIFC8nUbJ6t/Y8D+fJFwCcdwojUFizr78FEwA
IZSas1b1bXSkA+QEooW7pYdBAfzNuD3WfZAIZpqFlr4rPNIqHzYa0OIdDPwzQQLa
3zPKqjj6QeTWEi5/ArzO+sTVv4m3Og3GQjMChb8H/GxsWdbComPVP82DTUet+ZU=
-----END CERTIFICATE-----
Converting the PEM-encoding to hex allows me to identify the parts of the certificate:
30 SEQUENCE //Certificate
(82 01 AB)
30 SEQUENCE //tbsCertificate
(82 01 14)
02 INTEGER //serialNumber
(09)
00 D4 27 25 A4 D0 EC 65 45
30 SEQUENCE //signature
(0D)
06 OBJECT IDENTIFIER
(09)
2A 86 48 86 F7 0D 01 01 05
05 NULL
(00)
30 SEQUENCE //issuer
(1A)
31 SET
(18)
30 SEQUENCE
(16)
06 OBJECT IDENTIFIER
(03)
55 04 03
0C UTF8String
(0F)
4D 79 20 53 69 67 6E 69 6E 67 20 43 65 72 74
30 SEQUENCE //validity
(1E)
17 UTCTime
(0D)
31 36 30 31 30 35 32 32 33 38 35 39 5A
17 UTCTime
(0D)
31 37 30 31 30 34 32 32 33 38 35 39 5A
30 SEQUENCE //subjectName
(1A)
31 SET
(18)
30 SEQUENCE
(16)
06 OBJECT IDENTIFIER
(03)
55 04 03
0C UTF8String
(0F)
4D 79 20 53 69 67 6E 69 6E 67 20 43 65 72 74
30 SEQUENCE //subjectPublicKeyInfo
(81 9F)
30 SEQUENCE //algorithmId
(0D)
06 OBJECT IDENTIFIER //algorithm
(09)
2A 86 48 86 F7 0D 01 01 01
05 NULL
(00)
03 BIT STRING //subjectPublicKey
(81 8D)
[00] //padding bits
30 SEQUENCE //RSAPublicKey
(81 89)
02 INTEGER //modulus
(81 81)
00 9A 1B 22 17 D2 48 C2 C4 3D E1 BD 48 E0 5E 95 61 E8 E2 18 CA 5D EF 1D 4F A7 25 94 FA FA 41 77 57 7B 65 4D 2E 1F 5F C5 59 71 B2 95 46 15 0C 23 94 5D CE 1D 89 E6 49 09 D8 DE 6D 9B E7 09 45 35 85 6E 1E FC BC AD 4B 19 58 E1 78 76 B4 9B 8A A5 8A 87 44 EE 27 EE 0C 05 E1 AA 12 8A E6 4C 9F A4 88 6A 8D 9B 92 15 46 B4 EE F4 E5 7C 7F 45 D0 4C 65 75 A7 11 E2 AD C4 31 B0 68 71 63 9C 34 41 38 E3 02 03 01 00 01
30 SEQUENCE //signatureAlgorithm
(0D)
06 OBJECT IDENTIFIER
(09)
2A 86 48 86 F7 0D 01 01 05
05 NULL
(00)
03 BIT STRING //signatureValue
(81 81)
[00] //padding bits
79 DA A3 29 F3 32 20 50 BC 9D 46 C9 EA DF D8 F0 3F 9F 24 5C 02 71 DC 28 8D 41 62 CE BE FC 14 4C 00 21 94 9A B3 56 F5 6D 74 A4 03 E4 04 A2 85 BB A5 87 41 01 FC CD B8 3D D6 7D 90 08 66 9A 85 96 BE 2B 3C D2 2A 1F 36 1A D0 E2 1D 0C FC 33 41 02 DA DF 33 CA AA 38 FA 41 E4 D6 12 2E 7F 02 BC CE FA C4 D5 BF 89 B7 3A 0D C6 42 33 02 85 BF 07 FC 6C 6C 59 D6 C2 A2 63 D5 3F CD 83 4D 47 AD F9 95
which appears to conform to the X.509 specs (as I would expect it to):
Certificate ::= {
tbsCertificate TBSCertificate,
signatureAlgorithm AlgorithmIdentifier,
signatureValue BIT STRING
}
TBSCertificate ::= SEQUENCE {
version [0] Version DEFAULT v1, <-- what does this mean?
serialNumber INTEGER,
signature AlgorithmIdentifier,
issuer Name,
validity Validity,
subjectName Name,
subjectPublicKeyInfo SubjectPublicKeyInfo
...
}
with the lone exception of the version part, which isn't clear to me whether it is optional or not (though it never seems to be added with certificates I create with OpenSSL).
I can open the certificate to import into a certificate store (and can successfully import to a store), so I don't think anything is specifically wrong with the file/encoding.
When I reach the call to CertCreateCertificateContext, my lp buffer looks like:
-----BEGIN CERTIFICATE-----\nMIIBqzCCARQCCQDUJyWk0OxlRTANBgkqhkiG9w0BAQUFADAaMRgwFgYDVQQDDA9N\neSBTaWduaW5nIENlcnQwHhcNMTYwMTA1MjIzODU5WhcNMTcwMTA0MjIzODU5WjAa\nMRgwFgYDVQQDDA9NeSBTaWduaW5nIENlcnQwgZ8wDQ...
and bytes = 639 -- which is the file size.
I've tried adding logic to strip out the certificate comments, but examples of importing a certificate in this manner don't indicate that should be necessary.
I've tried setting the dwCertEncodingType to X509_ASN_ENCODING | PKCS_7_ASN_ENCODING and PKCS_7_ASN_ENCODING out of desperation (though I don't believe I am using PKCS#7 encoding here...a little fuzzy on that).
Does anyone have any suggestions on what I might be doing incorrectly here? I appreciate it.
I figured out my issue. CertCreateCertificateContext is expecting the binary ASN.1 data, not the PEM-encoded certificate I created with openssl. I figured this out by using a Microsoft certificate generation tool and testing that certificate out:
C:\Program Files\Microsoft SDKs\Windows\v7.1\Bin>makecert.exe -n "CN=Test Signing Cert" -b 0
1/06/2016 -e 01/06/2017 -len 1024 -r C:\Certificates\public_v2.crt
Succeeded
looking at the file in a hex editor, it looked precisely like the ASN.1 binary data. next, I used the Copy to File feature from the certificate viewer that launches when you double-click a certificate to copy my original public.crt file to a DER encoded binary X.509 (.CER) file and verified that my program began to work (that is, the CertCreateCertificateContext was now happy).
so, in case someone else is bumping up against the same issue I was having, here is a complete solution to importing a PEM-encoded certificate from a file into memory for use with the Crypto API:
#include "stdafx.h"
#include <windows.h>
#include <wincrypt.h>
#define LF 0x0A
int _tmain(int argc, _TCHAR* argv[])
{
char keyFile[] = "C:\\Certificates\\public.crt";
BYTE lp[65536];
SECURITY_ATTRIBUTES sa;
HANDLE hKeyFile;
DWORD bytes;
PCCERT_CONTEXT certContext;
BYTE *p;
DWORD flags;
sa.nLength = sizeof(sa);
sa.lpSecurityDescriptor = NULL;
sa.bInheritHandle = FALSE;
hKeyFile = CreateFile(keyFile, GENERIC_READ, FILE_SHARE_READ, &sa, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
if (hKeyFile) {
if (ReadFile(hKeyFile, lp, GetFileSize(hKeyFile, NULL), &bytes, NULL) && bytes > 0) {
p = lp + bytes;
if (CryptStringToBinary((char *)lp, p - lp, CRYPT_STRING_BASE64_ANY, p, &bytes, NULL, &flags) && bytes > 0) {
certContext = CertCreateCertificateContext(X509_ASN_ENCODING, p, bytes);
if (certContext) {
printf("yay!");
CertFreeCertificateContext(certContext);
}
else {
printf("Could not convert certificate to internal form\n");
}
}
else {
printf("Failed to convert from PEM");
}
}
else {
printf("Failed to read key file: %s\n", keyFile);
}
}
else {
printf("Failed to open key file: %s\n", keyFile);
}
CloseHandle(hKeyFile);
return 0;
}
Note:
because I'm lazy, I decode the PEM encoding to binary in the same BYTE array I used to load the file into -- for this simple test, it was expedient, but if you're looking to implement this sort of thing for real, I wouldn't recommend it

Visual Leak Detector reporting strange leaks in CRT module of VC++

I've just now installed Visual Leak Detector (2.3) on Windows 8. I tested it with blank CRT program (in Visual Studio 2012) that does nothing.
#include <vld.h>
int main(int argc, char** argv)
{
return 0;
}
When I run it VLD reports strange leaks in vc++ crt module:
Visual Leak Detector Version 2.3 installed.
WARNING: Visual Leak Detector detected memory leaks!
---------- Block 31 at 0x0000000052C07530: 70 bytes ----------
Call Stack:
f:\dd\vctools\crt_bld\self_64_amd64\crt\src\stdenvp.c (127): my_application.exe!_setenvp + 0x27 bytes
f:\dd\vctools\crt_bld\self_64_amd64\crt\src\crt0.c (223): my_application.exe!__tmainCRTStartup + 0x5 bytes
f:\dd\vctools\crt_bld\self_64_amd64\crt\src\crt0.c (164): my_application.exe!mainCRTStartup
0x00000000FAF8167E (File and line number not available): KERNEL32.DLL!BaseThreadInitThunk + 0x1A bytes
0x00000000FD8CC3F1 (File and line number not available): ntdll.dll!RtlUserThreadStart + 0x21 bytes
Data:
20 B5 C0 52 50 00 00 00 50 92 C0 52 50 00 00 00 ...RP... P..RP...
20 91 DD E1 F6 07 00 00 7E 00 00 00 02 00 00 00 ........ ~.......
12 00 00 00 00 00 00 00 1F 00 00 00 FD FD FD FD ........ ........
50 52 4F 43 45 53 53 4F 52 5F 4C 45 56 45 4C 3D PROCESSO R_LEVEL=
36 00 FD FD FD FD 6....... ........
---------- Block 40 at 0x0000000052C075D0: 72 bytes ----------
Call Stack:
f:\dd\vctools\crt_bld\self_64_amd64\crt\src\stdenvp.c (127): my_application.exe!_setenvp + 0x27 bytes
f:\dd\vctools\crt_bld\self_64_amd64\crt\src\crt0.c (223): my_application.exe!__tmainCRTStartup + 0x5 bytes
f:\dd\vctools\crt_bld\self_64_amd64\crt\src\crt0.c (164): my_application.exe!mainCRTStartup
0x00000000FAF8167E (File and line number not available): KERNEL32.DLL!BaseThreadInitThunk + 0x1A bytes
0x00000000FD8CC3F1 (File and line number not available): ntdll.dll!RtlUserThreadStart + 0x21 bytes
Data:
F0 94 C0 52 50 00 00 00 20 76 C0 52 50 00 00 00 ...RP... .v.RP...
20 91 DD E1 F6 07 00 00 7E 00 00 00 02 00 00 00 ........ ~.......
14 00 00 00 00 00 00 00 28 00 00 00 FD FD FD FD ........ (.......
53 45 53 53 49 4F 4E 4E 41 4D 45 3D 43 6F 6E 73 SESSIONN AME=Cons
6F 6C 65 00 FD FD FD FD ole..... ........
---------- Block 41 at 0x0000000052C07620: 67 bytes ----------
Call Stack:
f:\dd\vctools\crt_bld\self_64_amd64\crt\src\stdenvp.c (127): my_application.exe!_setenvp + 0x27 bytes
f:\dd\vctools\crt_bld\self_64_amd64\crt\src\crt0.c (223): my_application.exe!__tmainCRTStartup + 0x5 bytes
f:\dd\vctools\crt_bld\self_64_amd64\crt\src\crt0.c (164): my_application.exe!mainCRTStartup
0x00000000FAF8167E (File and line number not available): KERNEL32.DLL!BaseThreadInitThunk + 0x1A bytes
0x00000000FD8CC3F1 (File and line number not available): ntdll.dll!RtlUserThreadStart + 0x21 bytes
Data:
D0 75 C0 52 50 00 00 00 D0 96 C0 52 50 00 00 00 .u.RP... ...RP...
20 91 DD E1 F6 07 00 00 7E 00 00 00 02 00 00 00 ........ ~.......
0F 00 00 00 00 00 00 00 29 00 00 00 FD FD FD FD ........ ).......
53 79 73 74 65 6D 44 72 69 76 65 3D 43 3A 00 FD SystemDr ive=C:..
FD FD FD ........ ........
---------- Block 43 at 0x0000000052C07670: 65 bytes ----------
Call Stack:
f:\dd\vctools\crt_bld\self_64_amd64\crt\src\stdenvp.c (127): my_application.exe!_setenvp + 0x27 bytes
f:\dd\vctools\crt_bld\self_64_amd64\crt\src\crt0.c (223): my_application.exe!__tmainCRTStartup + 0x5 bytes
f:\dd\vctools\crt_bld\self_64_amd64\crt\src\crt0.c (164): my_application.exe!mainCRTStartup
0x00000000FAF8167E (File and line number not available): KERNEL32.DLL!BaseThreadInitThunk + 0x1A bytes
0x00000000FD8CC3F1 (File and line number not available): ntdll.dll!RtlUserThreadStart + 0x21 bytes
Data:
D0 96 C0 52 50 00 00 00 C0 76 C0 52 50 00 00 00 ...RP... .v.RP...
20 91 DD E1 F6 07 00 00 7E 00 00 00 02 00 00 00 ........ ~.......
0D 00 00 00 00 00 00 00 2B 00 00 00 FD FD FD FD ........ +.......
54 45 4D 50 3D 46 3A 5C 54 45 4D 50 00 FD FD FD TEMP=F:\ TEMP....
FD ........ ........
Visual Leak Detector detected 48 memory leaks (6044 bytes).
Largest number used: 15094 bytes.
Total allocations: 25276 bytes.
Visual Leak Detector is now exiting.
There are not much details about this on net however in this msdn forum a comment says:
That's not really a 'leak' so much as 'preparing your environment for
execution'. It's making a writable copy of the process's environment
for programs which expect it, and they will be released when the
process exits. You can safely ignore the report.
However, I want to suppress these lines from appearing it to report (If they genuinely are not leaks)
Has anyone experienced this and know how to sort out?
Easiest solution apparently is to add StartDisabled=yes in the vld.ini file and then explicitly enable it from the first line of main() . Sure, you'll also miss "memory leaks" from your global objects but that's usually equally harmless.
I found this was bug in VLD 2.3. Details of bug are here. I just downloaded v2.4rc2 (vld-2.4rc2-setup.exe) and this issue not seems appearing anymore. (I had downloaded v2.3 because it has been marked stable)

What do these bytes do?

This is the hexdump of a black 1x1 PNG made in Gimp and exported with minimal information:
89 50 4E 47 0D 0A 1A 0A 00 00 00 0D 49 48 44 52
00 00 00 01 00 00 00 01 08 02 00 00 00 90 77 53
DE 00 00 00 0C 49 44 41 54 08 D7 63 60 60 60 00
00 00 04 00 01 27 34 27 0A 00 00 00 00 49 45 4E
44 AE 42 60 82
Now after reading the specification I am quite sure what most of them mean, except for bytes 30-34 between the IHDR and IDAT chunk: 90 77 53 DE
Can someone enlighten me?
Those numbers are the CRC checksum for the previous chunk. See in the official specification: 5 Datastream structure for a general overview, and in particular 5.3 Chunk layout.
A CRC is calculated for, and appended to each separate chunk:
A four-byte CRC (Cyclic Redundancy Code) calculated on the preceding bytes in the chunk, including the chunk type field and chunk data fields, but not including the length field. The CRC can be used to check for corruption of the data. The CRC is always present, even for chunks containing no data.
Here is your 1x1 pixel image, annotated byte for byte. Right after the data of each of the chunks IHDR, IDAT, and IEND is a CRC for the preceding data.
File: test.png
89 50 4E 47 0D 0A 1A 0A
Header 0x89 "PNG" CR LF ^Z LF checks out okay
===========
00 00 00 0D
49 48 44 52
00 00 00 01 00 00 00 01 08 02 00 00 00
90 77 53 DE
block: "IHDR", 13 bytes [49484452]
Width: 1
Height: 1
Bit depth: 8
Color type: 2 = Color
(Bits per pixel: 8)
(Bytes per pixel: 3)
Compression method: 0
Filter method: 0
Interlace method: 0 (none)
CRC: 907753DE
===========
00 00 00 0C
49 44 41 54
08 D7 63 60 60 60 00 00 00 04 00 01
27 34 27 0A
block: "IDAT", 12 bytes [49444154]
expanded result: 4 (as expected)
(Row 0 Filter:0)
decompresses into
00 00 00 00
CRC: 2734270A
===========
00 00 00 00
49 45 4E 44
AE 42 60 82
block: "IEND", 0 bytes [49454E44]
CRC: AE426082
The IDAT data decompresses into four 0's: the first one is the row filter (0, meaning 'none') and the next 3 bytes are Red, Green, Blue values for the one single pixel.

How to read binary blocks of mifare card?

I am developing an application which reads NFC card from the reader.
I know the code for reading binary block like this:
FF B0 00 04 10
04 for the block 4 and 10 for 16 bytes data. My card has the data "TEST009996".
I run 5 code for read binary blocks from 4-8 like this:
FF B0 00 04 10
FF B0 00 05 10
FF B0 00 06 10
FF B0 00 07 10
FF B0 00 08 10
I got the following results:
T☻enTEÉ ☺
T☻enTEST00É
T☻enTEST009996É
enTEST009996■ 6É
ST009996■ 6 É
or in hexadecimal:
01 03 A0 10 44 03 11 D1 01 0D 54 02 65 6E 48 43 90 00
44 03 11 D1 01 0D 54 02 65 6E 48 43 49 44 30 30 90 00
01 0D 54 02 65 6E 48 43 49 44 30 30 39 39 39 36 90 00
65 6E 48 43 49 44 30 30 39 39 39 36 FE 00 00 36 90 00
49 44 30 30 39 39 39 36 FE 00 00 36 00 00 00 00 90 00
Should I create an algorithm to cut the result to get the data? Are there any better ways?
Source:
http://downloads.acs.com.hk/drivers/en/API-ACR122U-2.02.pdf
It seems that your tag is an NFC Forum Type 2 Tag (find the NFC Forum Type 2 Tag Operation specification on the NFC Forum website). As you mention MIFARE this could, for instance, be a MIFARE Ultralight, MIFARE Ultralight C or NTAG tag.
A block on a Type 2 Tag consists of 4 bytes. The read command reads 4 blocks at a time. So the read command gives you 4 blocks (4 bytes each) starting at a given block offset plus a status word for the read command (0x9000 for success). In your case you get:
Read(4, 16): 0103A010 440311D1 010D5402 656E4843 9000
Read(5, 16): 440311D1 010D5402 656E4843 49443030 9000
Read(6, 16): 010D5402 656E4843 49443030 39393936 9000
Read(7, 16): 656E4843 49443030 39393936 FE000036 9000
Read(8, 16): 49443030 39393936 FE000036 00000000 9000
Consequently, the memory of your tag looks like this:
0103A010
440311D1
010D5402
656E4843
49443030
39393936
FE000036
00000000
A Type 2 Tag (btw. in order to make sure that this tag actually conforms to the Type 2 Tag Operation Specification you would also need to read the capability container which is located in block 3) contains a series of tag-length-value (TLV) structures:
01 (Tag: Lock Control TLV)
03 (Length: 3 bytes)
A0 10 44 (Value: Information on position and function of lock bytes)
03 (Tag: NDEF Message TLV)
11 (Length: 17 bytes)
D1010D5402656E48434944303039393936 (Value: NDEF message)
FE (Tag: Terminator TLV; has no length field)
So your tag contains the NDEF message
D1010D5402656E48434944303039393936
This translates to
D1 (Header byte of record 1)
- Message begin is set (= first record of an NDEF message)
- Message end is set (= last record of an NDEF message)
- Short record flag is set (= Payload length field consists of 1 byte only)
- Type Name Format = 0x1 (= Type field contains an NFC Forum well-known type)
01 (Type length: 1 byte)
0D (Payload length: 13 bytes)
54 (Type: "T")
02656E48434944303039393936 (Payload field)
The payload field of a NFC Forum Text record decodes like this:
02 (Status byte: Text is UTF-8 encoded, Language code has a length of 2 bytes)
656E (Language code: "en")
48434944303039393936 (Text: "TEST009996")

Resources