Unable to read Serial-Port (UART) - linux-kernel

Below code is opening the second serial port and I am trying to read and write to it.Now I am using the console functionalities on the first port ( Tera Term Console ) to see the logs there (printf or dmesg ).
But I am not able to read from the port. The console hangs.
int fd; /* File descriptor for the port */
#define BUFF_SIZE 1024
struct termios options;
char to_write[1024];
char to_read[1024];
int bytes_written;
int init_uart()
{
tcgetattr(fd, &options);
/* Set Baud Rate */
cfsetispeed( &options, B115200 );
cfsetospeed( &options, B115200 );
// Set the Charactor size
options.c_cflag &= ~CSIZE; /* Mask the character size bits */
options.c_cflag |= CS8; /* Select 8 data bits */
// Set parity - No Parity (8N1)
options.c_cflag &= ~PARENB;/*no parity bit*/
options.c_cflag &= ~CSTOPB;/* One bit stop bit*/
options.c_cflag &= ~CSIZE;
options.c_cflag |= CS8;/* 8 Bits Character length*/
// Disable Hardware flowcontrol
options.c_cflag &= ~CRTSCTS;
// Enable Raw Input
options.c_lflag &= ~(ICANON | ECHO | ECHOE | ISIG);
// Disable Software Flow control
options.c_iflag &= ~(IXON | IXOFF | IXANY);
// Chose raw (not processed) output
options.c_oflag &= ~OPOST;
if ( tcsetattr( fd, TCSANOW, &options ) == -1 ){
printf ("Error with tcsetattr = %s\n", strerror ( errno ) );
return -1;
}
return 0;
}
int write_uart()
{
int i=0;
while (i < BUFF_SIZE-2){
to_write[i]='a';
i++;
}
to_write[i]='\n';
to_write[i+1]='\r';
// Write to the port
bytes_written = write(fd, to_write, BUFF_SIZE);
if(bytes_written < BUFF_SIZE){
fputs("write() of 1024 bytes failed!\n", stderr);
return -1;
}
return 0;
}
int
read_port(void)
{
int n = read(fd, to_read, sizeof(BUFF_SIZE));
if (n < 1024){
fputs("read failed!\n", stderr);
return -1;
}
return 0;
}
int main()
{
int i=0;
fd = open("/dev/ttyS1",O_RDWR | O_NOCTTY);
if(fd == -1)
perror("open_port: Unable to open /dev/ttyS1\n");
else
printf("ttyS0 Opened successfully\n");
if(init_uart())
perror("open_port: Unable to initialize /dev/ttyS0 Port\n");
else
printf("Port Initialization is done successfuly\n");
if(write_uart())
perror("write_port: Unable to write to /dev/ttyS0 Port\n");
else
printf("Write to the port happened successfully\n");
if(read_port())
perror("read_port: Unable to read from /dev/ttyS0 Port\n");
else
printf("Read to the port happened successfully\n");
close(fd);
return 0;
}

#randomization, I think you have to configure control chars i.e. c_cc[VTIME] and c_cc[VMIN].
VTIME: The timer is started when read is called. read returns either when at least one byte of data is available, or when the timer expires. If the timer expires without any input becoming available, read returns 0.
VMIN: read blocks until the lesser of MIN bytes or the number of bytes requested are available, and returns the lesser of these two values.
Try configuring VMIN and VTIME and test your application. You can configure either of them. Will find good description about these things in man pages i.e. #man termios.

Related

Is it normal for winapi to take around 70ms to read from serial port?

The true time it takes from when I send the first bit to a serial port to when I receive the last bit it pings back I measured to be 6ms but ReadFile takes around 70-80ms. I'm wondering if this is expected, is this just Windows or is it my code at fault? Here's the function to send and read from the serial port, in my main I have declared and initialized the HANDLE and called that function.
int sendBytes(char* command, char* COM, HANDLE hSerial, int read) {
BOOL Write_Status;
DCB dcbSerialParams = { 0 }; // Initializing DCB structure
dcbSerialParams.DCBlength = sizeof(dcbSerialParams);
Write_Status = GetCommState(hSerial, &dcbSerialParams); //retreives the current settings
if (Write_Status == FALSE) {
printf("\n Error! in GetCommState()");
CloseHandle(hSerial);
return 1;
}
dcbSerialParams.BaudRate = CBR_57600;
dcbSerialParams.ByteSize = 8;
dcbSerialParams.StopBits = ONESTOPBIT;
dcbSerialParams.Parity = NOPARITY;
Write_Status = SetCommState(hSerial, &dcbSerialParams); //Configuring the port according to settings in DCB
if (Write_Status == FALSE)
{
CloseHandle(hSerial);
return 1;
}
///*----------------------------- Writing a Character to Serial Port----------------------------------------*/
int length = strlen(command);
char send[20];
strcpy(send, command);
send[length + 1] = 13;
send[length + 2] = 10;
DWORD dNoOFBytestoWrite; // No of bytes to write into the port
DWORD dNoOfBytesWritten = 0; // No of bytes written to the port
dNoOFBytestoWrite = length + 2; // Calculating the no of bytes to write into the port
if (!WriteFile(hSerial, send, dNoOFBytestoWrite, &dNoOfBytesWritten, NULL))
printf("Error writing text to %s\n", COM);
if (read) {
int maxChars = 100;
BOOL Read_Status; // Status of the various operations
DWORD dwEventMask; // Event mask to trigger
char SerialBuffer[100]; // Buffer Containing Rxed Data
DWORD NoBytesRead; // Bytes read by ReadFile()
///*------------------------------------ Setting Receive Mask ----------------------------------------------*/
Read_Status = SetCommMask(hSerial, EV_RXCHAR); //Configure Windows to Monitor the serial device for Character Reception
if (Read_Status == FALSE)
printf("\n\n Error! in Setting CommMask");
// else
// printf("\n\n Setting CommMask successfull");
///*------------------------------------ Setting WaitComm() Event ----------------------------------------*/
// printf("\n\n Waiting for Data Reception");
Read_Status = WaitCommEvent(hSerial, &dwEventMask, NULL); //Wait for the character to be received
// /*-------------------------- Program will Wait here till a Character is received ------------------------*/
if (Read_Status == FALSE)
{
printf("\n Error! in Setting WaitCommEvent()");
}
else //If WaitCommEvent()==True Read the RXed data using ReadFile();
{
// printf("\n\n Characters Received \t");
clock_t begin = clock();
if (!ReadFile(hSerial, SerialBuffer, 24, &NoBytesRead, NULL))
{
printf("wrong character");
return 1;
}
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("time : %f\n",time_spent);
}
}
}
This is not how you measure timing with sub-second precision:
clock_t begin = clock();
// stuff
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
This is how you measure timing:
LARGE_INTEGER before, after, frequency;
QueryPerformanceCounter(&before);
// stuff
QueryPerformanceCounter(&after);
QueryPerformanceFrequency(&frequency);
double time_spent = (after.QuadPart - before.QuadPart) / (double)frequency.QuadPart;
CLOCKS_PER_SEC is imprecise, and then clock() can be even worse, often as bad as the scheduler quantum which is typically 10ms or 15ms.

Why does ioctl FIONREAD from /dev/null return 0 on Mac OS X and a random number on Linux?

I've come across something that seems strange while adding tests to a project I'm working on - I have been using /dev/null as a serial port and not expecting any data to be available for reading.
However, on LINUX there is always data available, and on Mac OS X after a call to srand() there is data available.
Can someone help explain this behaviour?
Here is a minimum viable test C++
#include <stdio.h>
#include <stdlib.h>
#include <termios.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
int open_serial(const char *device) {
speed_t bd = B115200;
int fd;
int state;
struct termios config;
if ((fd = open(device, O_NDELAY | O_NOCTTY | O_NONBLOCK | O_RDWR)) == -1)
return -1;
fcntl(fd, F_SETFL, O_RDWR);
tcgetattr(fd, &config);
cfmakeraw(&config);
cfsetispeed(&config, bd);
cfsetospeed(&config, bd);
config.c_cflag |= (CLOCAL | CREAD);
config.c_cflag &= ~(CSTOPB | CSIZE | PARENB);
config.c_cflag |= CS8;
config.c_lflag &= ~(ECHO | ECHOE | ICANON | ISIG);
config.c_oflag &= ~OPOST;
config.c_cc[VMIN] = 0;
config.c_cc[VTIME] = 50; // 5 seconds reception timeout
tcsetattr(fd, TCSANOW, &config);
ioctl(fd, TIOCMGET, &state);
state |= (TIOCM_DTR | TIOCM_RTS);
ioctl(fd, TIOCMSET, &state);
usleep(10000); // Sleep for 10 milliseconds
return fd;
};
int serial_data_available(const int fd) {
int result;
ioctl(fd, FIONREAD, &result);
return result;
};
int main() {
int fd = open_serial("/dev/null");
printf("Opened /dev/null - FD: %d\n", fd);
printf("Serial data available : %d\n", serial_data_available(fd));
printf("Serial data available : %d\n", serial_data_available(fd));
printf("Calling srand()\n");
srand(1234);
printf("Serial data available : %d\n", serial_data_available(fd));
printf("Serial data available : %d\n", serial_data_available(fd));
return 0;
}
Under Mac OS X the output is as follows :-
Opened /dev/null - FD: 3
Serial data available : 0
Serial data available : 0
Calling srand()
Serial data available : 148561936
Serial data available : 0
On Linux I get the following :-
Opened /dev/null - FD: 3
Serial data available : 32720
Serial data available : 32720
Calling srand()
Serial data available : 32720
Serial data available : 32720
Two questions -
Shouldn't /dev/null should always have 0 bytes available for reading?
Why does calling srand() on Mac OS X cause the bytes available for reading from /dev/null to change?
The problem was obvious (in hindsight!) - the result int is not initialised, so when ioctl has an error, the function returns a non-zero integer, even though data may not be available.
int serial_data_available(const int fd) {
int result;
ioctl(fd, FIONREAD, &result);
return result;
};
the correct code should be
int serial_data_available(const int fd) {
int result = 0;
ioctl(fd, FIONREAD, &result);
return result;
};

Linux Kernel: manually modify page table entry flags

I am trying to manually mark a certain memory region of a userspace process as non-cacheable (for educational purposes, not intended to be used in production code) by setting a flag in the respective page table entries.
I have an Ubuntu 14.04 (ASLR disabled) with a 4.4 Linux kernel running on an x86_64 Intel Skylake processor.
In my kernel module I have the following function:
/*
* Set memory region [start,end], excluding 'addr', of process with PID 'pid' as uncacheable.
*/
ssize_t set_uncachable(uint32_t pid, uint64_t start, uint64_t end, uint64_t addr)
{
struct task_struct* ts = NULL;
struct vm_area_struct *curr, *first = NULL;
struct mm_struct* mm;
pgd_t * pgd;
pte_t * pte;
uint64_t numpages, curr_addr;
uint32_t level, j, i = 0;
printk(KERN_INFO "set_unacheable called\n");
ts = pid_task(find_vpid(pid), PIDTYPE_PID); //find task from PID
pgd = ts->mm->pgd; //page table root of the task
first = ts->mm->mmap;
curr = first;
if(first == NULL)
return -1;
do
{
printk(KERN_INFO "Region %3u [0x%016llx - 0x%016llx]", i, curr->vm_start, curr->vm_end);
numpages = (curr->vm_end - curr->vm_start) / PAGE_SIZE; //PAGE_SIZE is 4K for now
if(curr->vm_start > curr->vm_end)
numpages = 0;
for(j = 0; j < numpages; j++)
{
curr_addr = curr->vm_start + (PAGE_SIZE*j);
pte = lookup_address_in_pgd(pgd, curr_addr, &level);
if((pte != NULL) && (level == 1))
{
printk(KERN_INFO "PTE for 0x%016x - 0x%016x (level %u)\n", curr_addr, pte->pte, level);
if(curr_addr >= start && curr_addr < end && curr_addr != addr)
{
//setting page entry to PAT#3
pte->pte |= PWT_BIT | PCD_BIT;
pte->pte &= ~PAT_BIT;
printk(KERN_INFO "PTE for 0x%016x - 0x%016x (level %u) -- UPDATED\n", curr_addr, pte->pte, level);
}
}
}
curr = curr->vm_next;
if(curr == NULL)
return -1;
i++;
} while (curr != first);
return 0;
}
To test the above code I run an application that allocates a certain region in memory:
//#define BUF_ADDR_START 0x0000000008400000LL /* works */
#define BUF_ADDR_START 0x00007ffff0000000LL /* does not work */
[...]
buffer = mmap((void *)BUF_ADDR, BUF_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_POPULATE, 0, 0);
if ( buffer == MAP_FAILED )
{
printf("Failed to map buffer\n");
exit(-1);
}
memset(buffer, 0, BUF_SIZE);
printf("Buffer at %p\n", buffer);
I want to mark the buffer uncacheable using my kernel module. The code in my kernel module works for 0x8400000, but for 0x7ffff0000000 no page table entry is found (i.e. lookup_address_in_pgd returns NULL). The buffer is definitely allocated in the test program, though.
It seems like my kernel module works for low addresses (code, data, and heap sections), but not for memory mapped at higher addresses (stack, shared libraries, etc.).
Does anyone have an idea why it fails for larger addresses? Suggestions on how to implement set_uncachable more elegantly are welcome as well ;-)
Thanks!

dsPic Receive 11byte usart string

I'm using a dsPic33 to try and receive a 11-byte string and place it in a array, but have not been successful at receiving it completely. The string I send is "$123456789#" which should be received by the pic. I have tried using the code below. Any help will be appreciated.
char inBytes[11];
int i;
unsigned char temp;
while (U1STAbits.URXDA != 0)
{
temp = U1RXREG;
if (temp == '$')
{
inBytes[0] = temp;
for(i = 1; i < 10; i++)
{
if (U1STAbits.URXDA != 0)
inChar = U1RXREG;
inBytes[i] = inChar;
}
}
jolati had a good point about the end value beeing too low to get 11 bytes but I must add that you have to wait for your other bytes to become available before you read them.
In your example;
char inBytes[11];
int i;
unsigned char temp;
while (!U1STAbits.URXDA ); //Wait until at least one byte is available
temp = U1RXREG;
if (temp == '$')
{
inBytes[0] = temp;
for(i = 1; i < 11; i++) //Loop over range i = 1 to 10 inclusively
{
while (!U1STAbits.URXDA ); //Wait until at least one byte is available
inBytes[i] = U1RXREG;
}
}
Ideally, you would do this in a non blocking way with interrupts so you handle your data as it comes in but, if you cant use interrupts, you can always use non blocking polling like:
void AsyncRX()
{
//Note that the static variables keeps their value between invocations of the
// function. i is set to 0 only on the first run of this function, it keeps
// its value on every other run after that.
static int i = 0;
static char inBytes[11];
//Nothing more to do until there is at least 1 byte available
if( !U1STAbits.URXDA )
return;
//Save the byte and test that our message starts with $
inBytes[i] = U1RXREG;
if( inBytes[0] != '$' )
return;
//Test the counter to see if we have a full 11 bytes
i++;
if( i < 11 )
return;
//Do something with your 11 bytes
//...
//...
//Reset the counter for the next message
i = 0;
}
For the interrupt example, you could simply grab the polled version and throw it into a ISR. The following is an example. Note that I do not know which dsp33 you are using and I have not programmed interrupts in high end cores (with vector tables) in a while so you may need to make a change or two. Also note that you need to enable interupts by setting the appropriate registers as they are not enabled by default.
void __attribute__ ((interrupt, no_auto_psv)) _U1RXInterrupt(void)
{
//Note that the static variables keeps their value between invocations of the
// function. i is set to 0 only on the first run of this function, it keeps
// its value on every other run after that.
static int i = 0;
static char inBytes[11];
//Reset the interrupt flag
IFS0bits.U1RXIF = 0;
//Use up all bytes in the buffer (the interrupt can be set to only fire
// once the buffer has multiple bytes in it).
while( U1STAbits.URXDA )
{
//Save the byte and test that our message starts with $
inBytes[i] = U1RXREG;
if( inBytes[0] != '$' )
continue;
//Test the counter to see if we have a full 11 bytes
i++;
if( i < 11 )
continue;
//Do something with your 11 bytes
//...
//...
//Reset the counter for the next message
i = 0;
}
}
for(i = 1; i < 10; i++) starts saving data at index 1 and stops at 9, only 9 bytes. Change < 10 to <= 10 or < 11.

socket "read" hanging if the MacBook sleeps more than 10 minutes

I am writing an app, where a socket is connecting to a host and downloading a file.
The application runs in Mac.
Now, while the app is downloading, if I put the MacBook in sleep mode for more than 10 minutes, 60% of the time the app hangs when the computer wakes up.
The stack trace shows that, it has hanged in the "read" call. I am able to reproduce this with a sample program also. Below, I have pasted the code of the sample program and the stack where it is hanging. How to solve this hanging?
Also, this is not just TCP/IP waiting that will come out in few minutes. I have waited for more than 12 hours, it did not come out.
The stack trace: -
Call graph:
2466 Thread_2507
2466 start
2466 read$UNIX2003
2466 read$UNIX2003
The program :-
#include <stdio.h>
#include <string.h>
#include <netdb.h>
#include <sys/socket.h>
#include <unistd.h>
#define buflen 131072
unsigned int portno = 80;
char hostname[] = "192.168.1.9";
int main()
{
int sd = socket(AF_INET, SOCK_STREAM, 0); /* init socket descriptor */
struct sockaddr_in sin;
struct hostent *host = gethostbyname(hostname);
char buf[buflen];
int len;
int ret;
FILE *fp;
int i;
if(sd == -1){
printf("Could not create client socket\n");
return 1;
}
/*set keep alive*/
int optval = 1;
int optlen = sizeof(optval);
ret = setsockopt(sd, SOL_SOCKET, SO_KEEPALIVE, &optval, optlen);
if(ret != 0){
printf("could not set socket option.\n");
return 1;
}
/*** PLACE DATA IN sockaddr_in struct ***/
memcpy(&sin.sin_addr.s_addr, host->h_addr, host->h_length);
sin.sin_family = AF_INET;
sin.sin_port = htons(portno);
/*** CONNECT SOCKET TO THE SERVICE DESCRIBED BY sockaddr_in struct ***/
if (connect(sd, (struct sockaddr *)&sin, sizeof(sin)) < 0) {
perror("connecting");
return 1;
}
char *str = "GET /general-log.exe / HTTP/1.0\n\n";
ret = write(sd, str, strlen(str));
if(ret < 0){
printf("error while writing\n");
return 1;
}
fp = fopen("downloaded.file", "wb+");
if(fp == NULL){
printf("not able to open the file.\n");
return 1;
}
i = 0;
while ((len = read(sd, buf, buflen)) > 0) {
printf("%d\t%d\n", i++, len);
fwrite(buf, len, 1, fp); //we should check for return
}
if(len < 0){
printf("Error while reading\n");
}
fclose(fp);
close(sd);
return 0;
}
Update apparently the SO_RCVTIMEOUT is solving the problem.
struct timeval tv;
tv.tv_sec=10;
tv.tv_usec=0;
setsockopt ( m_sock, SOL_SOCKET, SO_RCVTIMEO, (char *) &tv, sizeof ( tv ) );
Is it okay to use SO_RCVTIMEO?
TCP/IP connections don't survive sleep mode. SO_KEEPALIVE doesn't help in this case since it has no effect on the server side. Just wait two minutes and the read will time out. After the timeout, you can connect again.
And that sleep(1) is unnecessary. The server will respond as soon as the data is available. If you don't fetch is right away, you'll allocate a connection on the server for longer than you need.
I couldn't solve it using blocking sockets. I had to change the IMAP library to non-blocking sockets.

Resources