I cannot understand what is mean slot.
It is the device number from the dbdf(Domain:bus:device:function)?
Codes from here::
https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/pci.h#L32
/*
* The PCI interface treats multi-function devices as independent
* devices. The slot/function address of each device is encoded
* in a single byte as follows:
*
* 7:3 = slot
* 2:0 = function
*/
#define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
#define PCI_FUNC(devfn) ((devfn) & 0x07)
Related
Hello everyone I am finishing the last section of my firmware for my NFC project. I am attempting to communicate with an AD-740 NFC Tag which uses NXP's MF0ULx1 MIFARE Ultralight EV1 - Contactless ticket IC. The NFC reader that I am using is NXP's CLRC663. I am controlling the NFC reader with a PSOC5LP device over SPI.
Now that you have all of the background information I will now ask my question.
For this particular NXP Read Method I need to encode my Cmd and Addr into a CRC of length 2 bytes. The datasheet, which I can link to, references ISO/IEC 14443. Searching that in Google brings me to the Wikipedia page, which then shows four sections of a PDF. I am assuming that I need section 4 the transmission protocol section. The only problem is this PDF is blocked by a pay wall. Is this intentional??
Scrounging around on the internet I have found a few code examples that may prevent me from purchasing a 170$ PDF just to look at a polynomial...
Code example 1 - I can post source link in comments. Don't have rep to do it in main post.
// Calculate an ISO 14443a CRC. Code translated from the code in
// iso14443a_crc().
func ISO14443aCRC(data []byte) [2]byte {
crc := uint32(0x6363)
for _, bt := range data
{
bt ^= uint8(crc & 0xff)
bt ^= bt << 4
bt32 := uint32(bt)
crc = (crc >> 8) ^ (bt32 << 8) ^ (bt32 << 3) ^ (bt32 >> 4)
}
return [2]byte{byte(crc & 0xff), byte((crc >> 8) & 0xff)}
}
Code example 2 - I can post source link in comments. Don't have rep to do it in main post.
void iso14443a_crc(byte_t* pbtData, size_t szLen, byte_t* pbtCrc)
{
byte_t bt;
uint32_t wCrc = 0x6363;
do {
bt = *pbtData++;
bt = (bt^(byte_t)(wCrc & 0x00FF));
bt = (bt^(bt<<4));
wCrc = (wCrc >> 8)^((uint32_t)bt << 8)^((uint32_t)bt<<3)^((uint32_t)bt>>4);
} while (--szLen);
*pbtCrc++ = (byte_t) (wCrc & 0xFF);
*pbtCrc = (byte_t) ((wCrc >> 8) & 0xFF);
}
And now on to my final question with all of this information... Would it be safe to assume that:
CRC polynomial is: 0x6363
Seed Value is: 0x00FF
A visual representation can be seen here
I've found a few helpful resources in calculating CRC_A for ISO/IEC 14443:
Online calculator by Pavel Zhovner
Generic CRC calculator
Libnfc contains implementations for CRC_A and CRC_B as:
iso14443a_crc in iso14443-subr.c
iso14443b_crc in iso14443-subr.c
Each method also has a variant to append it to a byte array
For posterity, here's libnfc's implementation of iso14443a_crc:
void
iso14443a_crc(uint8_t *pbtData, size_t szLen, uint8_t *pbtCrc)
{
uint32_t wCrc = 0x6363;
do {
uint8_t bt;
bt = *pbtData++;
bt = (bt ^ (uint8_t)(wCrc & 0x00FF));
bt = (bt ^ (bt << 4));
wCrc = (wCrc >> 8) ^ ((uint32_t) bt << 8) ^ ((uint32_t) bt << 3) ^ ((uint32_t) bt >> 4);
} while (--szLen);
*pbtCrc++ = (uint8_t)(wCrc & 0xFF);
*pbtCrc = (uint8_t)((wCrc >> 8) & 0xFF);
}
ISO14443A Polynomial is 0x8408, initial value is 0x6363.
I have read this page regarding how nice levels work: http://oakbytes.wordpress.com/2012/06/06/linux-scheduler-cfs-and-nice/
Does anyone know the file within the kernel code-base where the formula "1024 / (1.25)^ (nice)" is implemented to assign the weight of a process?
Although the calculation referenced in the question is not used as-is (the kernel uses only integer math with few or no exceptions), the nice syscall is implemented here. After some sanity and security checking, it ultimately calls set_user_nice() which uses macros to convert the user-specified nice value to a kernel priority value.
void set_user_nice(struct task_struct *p, long nice)
{
int old_prio, delta, queued;
unsigned long flags;
struct rq *rq;
if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
return;
/*
* We have to be careful, if called from sys_setpriority(),
* the task might be in the middle of scheduling on another CPU.
*/
rq = task_rq_lock(p, &flags);
/*
* The RT priorities are set via sched_setscheduler(), but we still
* allow the 'normal' nice value to be set - but as expected
* it wont have any effect on scheduling until the task is
* SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
*/
if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
p->static_prio = NICE_TO_PRIO(nice);
goto out_unlock;
}
queued = task_on_rq_queued(p);
if (queued)
dequeue_task(rq, p, 0);
p->static_prio = NICE_TO_PRIO(nice);
set_load_weight(p);
old_prio = p->prio;
p->prio = effective_prio(p);
delta = p->prio - old_prio;
if (queued) {
enqueue_task(rq, p, 0);
/*
* If the task increased its priority or is running and
* lowered its priority, then reschedule its CPU:
*/
if (delta < 0 || (delta > 0 && task_running(rq, p)))
resched_curr(rq);
}
out_unlock:
task_rq_unlock(rq, p, &flags);
}
EXPORT_SYMBOL(set_user_nice);
The macros are defined here:
#define MAX_NICE 19
#define MIN_NICE -20
#define NICE_WIDTH (MAX_NICE - MIN_NICE + 1)
/*
* Priority of a process goes from 0..MAX_PRIO-1, valid RT
* priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
* tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
* values are inverted: lower p->prio value means higher priority.
*
* The MAX_USER_RT_PRIO value allows the actual maximum
* RT priority to be separate from the value exported to
* user-space. This allows kernel threads to set their
* priority to a value higher than any user task. Note:
* MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
*/
#define MAX_USER_RT_PRIO 100
#define MAX_RT_PRIO MAX_USER_RT_PRIO
#define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH)
#define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2)
/*
* Convert user-nice values [ -20 ... 0 ... 19 ]
* to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
* and back.
*/
#define NICE_TO_PRIO(nice) ((nice) + DEFAULT_PRIO)
#define PRIO_TO_NICE(prio) ((prio) - DEFAULT_PRIO)
/*
* 'User priority' is the nice value converted to something we
* can work with better when scaling various scheduler parameters,
* it's a [ 0 ... 39 ] range.
*/
#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
See this document for an explanation of the design.
i checked the VloumeID tool from microsoft technet forum and the "Hard Disk Serial Number Change" tool from "http://www.xboxharddrive.com/freeware.html".
But these tools only offer to change the VolumeID. is ther a safe way to generate a new one without conflicting with other VolumeIDs of other logical drive that may exist on the same PC
I am assuming that you want to set the volume serial number programmatically.
A Volume Serial Number (VSN) is generated based on the current date/time. Exact implementation details may differ per OS version and/or the tool used for the format.
See the following links for more info:
Volume Serial Numbers and Format Date/Time Verification
Rufus Source Code
From the Rufus source code:
/*
* 28.2 CALCULATING THE VOLUME SERIAL NUMBER
*
* For example, say a disk was formatted on 26 Dec 95 at 9:55 PM and 41.94
* seconds. DOS takes the date and time just before it writes it to the
* disk.
*
* Low order word is calculated: Volume Serial Number is:
* Month & Day 12/26 0c1ah
* Sec & Hundrenths 41:94 295eh 3578:1d02
* -----
* 3578h
*
* High order word is calculated:
* Hours & Minutes 21:55 1537h
* Year 1995 07cbh
* -----
* 1d02h
*/
static DWORD GetVolumeID(void)
{
SYSTEMTIME s;
DWORD d;
WORD lo,hi,tmp;
GetLocalTime(&s);
lo = s.wDay + (s.wMonth << 8);
tmp = (s.wMilliseconds/10) + (s.wSecond << 8);
lo += tmp;
hi = s.wMinute + (s.wHour << 8);
hi += s.wYear;
d = lo + (hi << 16);
return d;
}
Which translates to the following Delphi Code:
type
TVolumeId = record
case byte of
0: (Id: DWORD);
1: (
Lo: WORD;
Hi: WORD;
);
end;
function GetVolumeID: DWORD;
var
dtNow: TDateTime;
vlid: TVolumeId;
st: SYSTEMTIME;
begin
GetLocalTime(st);
vlid.Lo := st.wDay + (st.wMonth shl 8);
vlid.Lo := vlid.Lo + (st.wMilliseconds div 10 + (st.wSecond shl 8));
vlid.Hi := st.wMinute + (st.wHour shl 8);
vlid.Hi := vlid.Hi + st.wYear;
Result := vlid.Id
end;
I recently saw a declaration of enum that looks like this:
<Serializable()>
<Flags()>
Public Enum SiteRoles
ADMIN = 10 << 0
REGULAR = 5 << 1
GUEST = 1 << 2
End Enum
I was wondering if someone can explain what does "<<" syntax do or what it is used for? Thank you...
The ENUM has a Flags attribute which means that the values are used as bit flags.
Bit Flags are useful when representing more than one attribute in a variable
These are the flags for a 16 bit (attribute) variable (hope you see the pattern which can continue on to X number of bits., limited by the platform/variable type of course)
BIT1 = 0x1 (1 << 0)
BIT2 = 0x2 (1 << 1)
BIT3 = 0x4 (1 << 2)
BIT4 = 0x8 (1 << 3)
BIT5 = 0x10 (1 << 4)
BIT6 = 0x20 (1 << 5)
BIT7 = 0x40 (1 << 6)
BIT8 = 0x80 (1 << 7)
BIT9 = 0x100 (1 << 8)
BIT10 = 0x200 (1 << 9)
BIT11 = 0x400 (1 << 10)
BIT12 = 0x800 (1 << 11)
BIT13 = 0x1000 (1 << 12)
BIT14 = 0x2000 (1 << 13)
BIT15 = 0x4000 (1 << 14)
BIT16 = 0x8000 (1 << 15)
To set a bit (attribute) you simply use the bitwise or operator:
UInt16 flags;
flags |= BIT1; // set bit (Attribute) 1
flags |= BIT13; // set bit (Attribute) 13
To determine of a bit (attribute) is set you simply use the bitwise and operator:
bool bit1 = (flags & BIT1) > 0; // true;
bool bit13 = (flags & BIT13) > 0; // true;
bool bit16 = (flags & BIT16) > 0; // false;
In your example above, ADMIN and REGULAR are bit number 5 ((10 << 0) and (5 << 1) are the same), and GUEST is bit number 3.
Therefore you could determine the SiteRole by using the bitwise AND operator, as shown above:
UInt32 SiteRole = ...;
IsAdmin = (SiteRole & ADMIN) > 0;
IsRegular = (SiteRole & REGULAR) > 0;
IsGuest = (SiteRole & GUEST) > 0;
Of course, you can also set the SiteRole by using the bitwise OR operator, as shown above:
UInt32 SiteRole = 0x00000000;
SiteRole |= ADMIN;
The real question is why do ADMIN and REGULAR have the same values? Maybe it's a bug.
These are bitwise shift operations. Bitwise shifts are used to transform the integer value of the enum mebers here to a different number. Each enum member will actually have the bit-shifted value. This is probably an obfuscation technique and is the same as setting a fixed integer value for each enum member.
Each integer has a binary reprsentation (like 0111011); bit shifting allows bits to move to the left (<<) or right (>>) depending on which operator is used.
For example:
10 << 0 means:
1010 (10 in binary form) moved with 0 bits left is 1010
5 << 1 means:
101 (5 in binary form) moved one bit to the left = 1010 (added a zero to the right)
so 5 << 1 is 10 (because 1010 represents the number 10)
and etc.
In general the x << y operation can be seen as a fast way to calculate x * Pow(2, y);
You can read this article for more detailed info on bit shifting in .NET http://www.blackwasp.co.uk/CSharpShiftOperators.aspx
I have some RGB(image) data which is 12 bit. Each R,G,B has 12 bits, total 36 bits.
Now I need to club this 12 bit RGB data into a packed data format. I have tried to mention the packing as below:-
At present I have input data as -
B0 - 12 bits G0 - 12 bits R0 - 12 bits B1 - 12 bits G1 - 12 bits R1 - 12 bits .. so on.
I need to convert it to packed format as:-
Byte1 - B8 (8 bits of B0 data)
Byte2 - G4B4 (remaining 4 bits of B0 data+ first 4 bits of G0)
Byte3 - G8 (remaining 8 bits of G0)
Byte4 - R8 (first 8 bits of R0)
Byte5 - B4R4 (first 4 bits of B1 + last 4 bits of R0)
I have to write these individual bytes to a file in text format. one byte below another.
Similar thing i have to do for a 10 bit RGB input data.
Is there any tool/software to get the conversion of data i am looking to get done.
I am trying to do it in a C program - I am forming a 64 bit from the individual 12 bits of R,G,B (total 36 bits). But after that I am not able to come up with a logic to pick
the necessary bits from a R,G,B data to form a byte stream, and to dump them to a text file.
Any pointers will be helpful.
This is pretty much untested, super messy code I whipped together to give you a start. It's probably not packing the bytes exactly as you want, but you should get the general idea.
Apologies for the quick and nasty code, only had a couple of minutes, hope it's of some help anyway.
#include <stdio.h>
typedef struct
{
unsigned short B;
unsigned short G;
unsigned short R;
} UnpackedRGB;
UnpackedRGB test[] =
{
{0x0FFF, 0x000, 0x0EEE},
{0x000, 0x0FEF, 0xDEF},
{0xFED, 0xDED, 0xFED},
{0x111, 0x222, 0x333},
{0xA10, 0xB10, 0xC10}
};
UnpackedRGB buffer = {0, 0, 0};
int main(int argc, char** argv)
{
int numSourcePixels = sizeof(test)/sizeof(UnpackedRGB);
/* round up to the last byte */
int destbytes = ((numSourcePixels * 45)+5)/10;
unsigned char* dest = (unsigned char*)malloc(destbytes);
unsigned char* currentDestByte = dest;
UnpackedRGB *pixel1;
UnpackedRGB *pixel2;
int ixSource;
for (ixSource = 0; ixSource < numSourcePixels; ixSource += 2)
{
pixel1 = &test[ixSource];
pixel2 = ((ixSource + 1) < numSourcePixels ? &test[ixSource] : &buffer);
*currentDestByte++ = (0x0FF) & pixel1->B;
*currentDestByte++ = ((0xF00 & pixel1->B) >> 8) | (0x0F & pixel1->G);
*currentDestByte++ = ((0xFF0 & pixel1->G) >> 4);
*currentDestByte++ = (0x0FF & pixel1->R);
*currentDestByte++ = ((0xF00 & pixel1->R) >> 8) | (0x0F & pixel2->B);
if ((ixSource + 1) >= numSourcePixels)
{
break;
}
*currentDestByte++ = ((0xFF0 & pixel2->B) >> 4);
*currentDestByte++ = (0x0FF & pixel2->G);
*currentDestByte++ = ((0xF00 & pixel2->G) >> 8) | (0x0F & pixel2->R);
*currentDestByte++ = (0xFF0 & pixel2->R);
}
FILE* outfile = fopen("output.bin", "w");
fwrite(dest, 1, destbytes,outfile);
fclose(outfile);
}
Use bitwise & (and), | (or), and shift <<, >> operators.