I'm building a device driver of sorts that consumes data from a keyboard emulating device.
The device is a card swipe, so its behavior is as follows:
User walks up, swipes card
I get a string of characters (key codes, really, including modifier keys for capital letters)
I don't know how many characters I'm going to get
I don't know when I'm getting something
Since I don't know how many characters I'm going to get, blocking reads on the keyboard tty aren't useful - I'd end up blocking after the last character. What I'm doing is, in Ruby, using the IO module to perform async reads against the keyboard device, and using a timeout to determine that the end of data was reached. This works fine logically (even a user swiping his or her card fast will do so slower than the send rate between characters).
The issue is that sometimes, I lose data from the middle of the string. My hunch is that there's some sort of buffer overflow happening because I'm reading the data too slowly. Trying to confirm this, I inserted small waits in between each key process. Longer waits (20ms+) do exacerbate the problem. However, a wait of around 5ms actually makes it go away? The only explanation I can come up with is that the async read itself is expensive (because Ruby), and doing them without a rate limit is actually slower than doing them with a 5ms delay.
Does this sound rational? Are there other ideas on what this could be?
The ruby is actually JRuby 9000. The machine is Ubuntu LTS 16.
Edit: here's a snippet of the relevant code
private def read_swipe(buffer_size, card_reader_input, pause_between_reads, seconds_to_complete)
limit = Time.now + seconds_to_complete.seconds
swipe_data = ''
begin
start_time = Time.now
sleep pause_between_reads
batch = card_reader_input.read_nonblock(buffer_size)
swipe_data << batch
rescue IO::WaitReadable
IO.select([card_reader_input], nil, nil, 0.5)
retry unless limit < start_time
end while start_time < limit
swipe_data
end
where card_reader_input = File.new(event_handle, 'rb')
I am not sure about Ruby code but you can use linux sysfs to access the characters coming out of keyboard 'like' device, and if feasible you can call C code from ruby application. I had done this for barcode reader and following is the code:
static int init_barcode_com(char* bcr_portname)
{
int fd;
/* Open the file descriptor in non-blocking mode */
fd = open(bcr_portname, O_RDONLY | O_NOCTTY | O_NDELAY);
cout << "Barcode Reader FD: " << fd <<endl;
if (fd == -1)
{
cerr << "ERROR: Cannot open fd for barcode communication with error " << fd <<endl;
}
fcntl(fd, F_SETFL, 0);
/* Set up the control structure */
struct termios toptions;
/* Get currently set options for the tty */
tcgetattr(fd, &toptions);
/* Set custom options */
/* 9600 baud */
cfsetispeed(&toptions, B9600);
cfsetospeed(&toptions, B9600);
/* 8 bits, no parity, no stop bits */
toptions.c_cflag &= ~PARENB;
toptions.c_cflag &= ~CSTOPB;
toptions.c_cflag &= ~CSIZE;
toptions.c_cflag |= CS8;
/* no hardware flow control */
toptions.c_cflag &= ~CRTSCTS;
/* enable receiver, ignore status lines */
toptions.c_cflag |= CREAD | CLOCAL;
/* disable input/output flow control, disable restart chars */
toptions.c_iflag &= ~(IXON | IXOFF | IXANY);
/* disable canonical input, disable echo,
* disable visually erase chars,
* disable terminal-generated signals */
toptions.c_lflag &= ~(ICANON | ECHO | ECHOE | ISIG);
/* disable output processing */
toptions.c_oflag &= ~OPOST;
/* wait for n (in our case its 1) characters to come in before read returns */
/* WARNING! THIS CAUSES THE read() TO BLOCK UNTIL ALL */
/* CHARACTERS HAVE COME IN! */
toptions.c_cc[VMIN] = 0;
/* no minimum time to wait before read returns */
toptions.c_cc[VTIME] = 100;
/* commit the options */
tcsetattr(fd, TCSANOW, &toptions);
/* Wait for the Barcode to reset */
usleep(10*1000);
return fd;
}
static int read_from_barcode_reader(int fd, char* bcr_buf)
{
int i = 0, nbytes = 0;
char buf[1];
/* Flush anything already in the serial buffer */
tcflush(fd, TCIFLUSH);
while (1) {
nbytes = read(fd, buf, 1); // read a char at a time
if (nbytes == -1) {
return -1; // Couldn't read
}
if (nbytes == 0) {
return 0;
}
if (buf[0] == '\n' || buf[0] == '\r') {
return 0;
}
bcr_buf[i] = buf[0];
i++;
}
return 0;
}
Now that you do not know how many characters your going to get you can use VMIN and VTIME combination to address your concern. This document details various possibilities with VMIN and VTIME.
Related
I am successfully programming PIC32MX250F128B using Pickit3. I have written a code where, when I press a I am getting 100 data from vibration sensor. Now if I want to get another 100 data, either I have to disconnect and then reconnect the 10k ohm pull up resistor connected to MCLR pin or have to run the program again.
Is there any other way I can reset the pickit?
Here is the code I am using:
#include <p32xxxx.h> // include chip specific header file
#include <plib.h> // include peripheral library functions
// Configuration Bits
#pragma config FNOSC = FRCPLL // Internal Fast RC oscillator (8 MHz) w/ PLL
#pragma config FPLLIDIV = DIV_2 // Divide FRC before PLL (now 4 MHz)
#pragma config FPLLMUL = MUL_20 // PLL Multiply (now 80 MHz)
#pragma config FPLLODIV = DIV_2 // Divide After PLL (now 40 MHz)
// see figure 8.1 in datasheet for more info
#pragma config FWDTEN = OFF // Watchdog Timer Disabled
#pragma config ICESEL = ICS_PGx2 // ICE/ICD Comm Channel Select
#pragma config JTAGEN = OFF // Disable JTAG
#pragma config FSOSCEN = OFF // Disable Secondary Oscillator
#pragma config FPBDIV = DIV_1 // PBCLK = SYCLK
// Defines
#define SYSCLK 40000000L
// Macros
// Equation to set baud rate from UART reference manual equation 21-1
#define Baud2BRG(desired_baud) ( (SYSCLK / (16*desired_baud))-1)
// Function Prototypes
int SerialTransmit(const char *buffer);
unsigned int SerialReceive(char *buffer); //, unsigned int max_size);
int UART2Configure( int baud);
short a2dvals[11000];
int adcptr,num_channels,k,i;
char sampling;
int ADC_RSLT0,totaldata,totaldata1,chunks_sent,data_count,l;
short temp;
BOOL a2don;
volatile unsigned int channel4;
void __ISR(_ADC_VECTOR, IPL2) TIMER3Handler(void) // Fonction d'interruption Timer 3
{
temp = ReadADC10(0);
a2dvals[k] = (temp);
k++;
if (k>totaldata1)// && sampling == 's')
{
T3CONCLR = 0x8000;
a2don=FALSE;
chunks_sent = 0;
totaldata = k/2;
k = 1;
}
mAD1ClearIntFlag();
}
int main(void)
{
char buf[1024]; // declare receive buffer with max size 1024
// Peripheral Pin Select
U2RXRbits.U2RXR = 4; //SET RX to RB8
RPB9Rbits.RPB9R = 2; //SET RB9 to TX
SYSTEMConfigPerformance(SYSCLK);
UART2Configure(9600); // Configure UART2 for a baud rate of 9600
U2MODESET = 0x8000; // enable UART2
ANSELBbits.ANSB2 = 1; // set RB2 (AN4) to analog
TRISBbits.TRISB2 = 1; // set RB2 as an input
//adcConfigureManual(); // Configure ADC
//AD1CON1SET = 0x8000; // Enable ADC
SerialTransmit("Hello! Enter 'a' to do ADC conversion \r\n");
unsigned int rx_size;
while( 1){
rx_size = SerialReceive(buf); //, 1024); // wait here until data is received
SerialTransmit(buf); // Send out data exactly as received
SerialTransmit("\r\n");
}
return 1;
} // END main()
/* UART2Configure() sets up the UART2 for the most standard and minimal operation
* Enable TX and RX lines, 8 data bits, no parity, 1 stop bit, idle when HIGH
* Input: Desired Baud Rate
* Output: Actual Baud Rate from baud control register U2BRG after assignment*/
int UART2Configure( int desired_baud){
U2MODE = 0; // disable autobaud, TX and RX enabled only, 8N1, idle=HIGH
U2STA = 0x1400; // enable TX and RX
U2BRG = Baud2BRG(desired_baud); // U2BRG = (FPb / (16*baud)) - 1
// Calculate actual assigned baud rate
int actual_baud = SYSCLK / (16 * (U2BRG+1));
return actual_baud;
} // END UART2Configure()
/* SerialTransmit() transmits a string to the UART2 TX pin MSB first
*
* Inputs: *buffer = string to transmit */
int SerialTransmit(const char *buffer)
{
unsigned int size = strlen(buffer);
while( size)
{
while( U2STAbits.UTXBF); // wait while TX buffer full
U2TXREG = *buffer; // send single character to transmit buffer
buffer++; // transmit next character on following loop
size--; // loop until all characters sent (when size = 0)
}
while( !U2STAbits.TRMT); // wait for last transmission to finish
return 0;
}
/* SerialReceive() is a blocking function that waits for data on
* the UART2 RX buffer and then stores all incoming data into *buffer
*
* Note that when a carriage return '\r' is received, a nul character
* is appended signifying the strings end
*
* Inputs: *buffer = Character array/pointer to store received data into
* max_size = number of bytes allocated to this pointer
* Outputs: Number of characters received */
unsigned int SerialReceive(char *buffer) //, unsigned int max_size)
{
//unsigned int num_char = 0;
/* Wait for and store incoming data until either a carriage return is received
* or the number of received characters (num_chars) exceeds max_size */
while(1)
{
while( !U2STAbits.URXDA); // wait until data available in RX buffer
*buffer = U2RXREG; // empty contents of RX buffer into *buffer pointer
if (*buffer == 'a')
{
int dummy,dummy1;
unsigned char tempstr[5];
SYSTEMConfig(SYSCLK, SYS_CFG_WAIT_STATES | SYS_CFG_PCACHE);
// the ADC ///////////////////////////////////////
// configure and enable the ADC
CloseADC10(); // ensure the ADC is off before setting the configuration
// define setup parameters for OpenADC10
// Turn module on | ouput in integer | trigger mode auto | enable autosample
// ADC_CLK_AUTO -- Internal counter ends sampling and starts conversion (Auto convert)
// ADC_AUTO_SAMPLING_ON -- Sampling begins immediately after last conversion completes; SAMP bit is automatically set
// ADC_AUTO_SAMPLING_OFF -- Sampling begins with AcquireADC10();
#define PARAM1 ADC_MODULE_ON|ADC_FORMAT_INTG32 | ADC_CLK_TMR | ADC_AUTO_SAMPLING_ON //
// define setup parameters for OpenADC10
// ADC ref external | disable offset test | disable scan mode | do 1 sample | use single buf | alternate mode off
#define PARAM2 ADC_VREF_AVDD_AVSS | ADC_OFFSET_CAL_DISABLE | ADC_SCAN_OFF | ADC_SAMPLES_PER_INT_1 | ADC_ALT_BUF_OFF | ADC_ALT_INPUT_OFF
//
// Define setup parameters for OpenADC10
// use peripherial bus clock | set sample time | set ADC clock divider
// ADC_CONV_CLK_Tcy2 means divide CLK_PB by 2 (max speed)
// ADC_SAMPLE_TIME_5 seems to work with a source resistance < 1kohm
#define PARAM3 ADC_CONV_CLK_SYSTEM | ADC_SAMPLE_TIME_5 | ADC_CONV_CLK_Tcy2 //ADC_SAMPLE_TIME_15| ADC_CONV_CLK_Tcy2
// define setup parameters for OpenADC10
// set AN4 and as analog inputs
#define PARAM4 ENABLE_AN4_ANA
// define setup parameters for OpenADC10
// do not assign channels to scan
#define PARAM5 SKIP_SCAN_ALL
// use ground as neg ref for A | use AN4 for input A
// configure to sample AN4
SetChanADC10( ADC_CH0_NEG_SAMPLEA_NVREF | ADC_CH0_POS_SAMPLEA_AN4 ); // configure to sample AN4
OpenADC10( PARAM1, PARAM2, PARAM3, PARAM4, PARAM5 ); // configure ADC using the parameters defined above
ConfigIntADC10(ADC_INT_PRI_2 | ADC_INT_ON);
EnableADC10(); // Enable the ADC
INTEnableSystemMultiVectoredInt();
OpenTimer3(T3_OFF | T3_SOURCE_INT | T3_PS_1_1 ,0x3e8);
num_channels = 1;
totaldata1 = 10500;
a2don=TRUE;
T3CONSET = 0x8000;
k=0;
while(1)
{
while(a2don);
for(i=0;i<100;i++)
{
dummy = a2dvals[i]/1000 ;
tempstr[0] = dummy + 0x30;
dummy1 = a2dvals[i]- dummy*1000;
dummy = dummy1/100;
tempstr[1] = dummy + 0x30;
dummy1 = dummy1 - dummy*100;
dummy = dummy1/10;
tempstr[2] = dummy + 0x30;
dummy1 = dummy1 - dummy*10;
tempstr[3] = dummy1 + 0x30;
//tempstr[4] = "\0";
printf("%c%c%c%c \n", tempstr[0],tempstr[1],tempstr[2],tempstr[3]);
}
a2don=TRUE;
}
}
}
return 1;
}// END SerialReceive()
enter image description here
Thanks for your advices.
You do not need to reset the Pickit. If anything, that might be the least efficient way to do it (arguably).
Rather try something like this. Please note this is high level. You will need to make it work yourself.
void(main){
// Setup your things here
while(1){ // Your infinite loop
// Check if you received 'a' here
if (received_a == 1){ // You received a 'a'
send_data(); // Send your data
}
}
}
Without providing actual code you have written we will not be able to help you.
You use while(1) loops everywhere, and if you don't use a break; or return command you stay in that loop forever.
I think you don't need while(1) loops in the functions except in main(). Remove these and it should work.
Try drawing out your program flow in a flow chart, it should clear things up. Also consider using a state machine using switch/case. It makes it a lot clearer where you are in the code and it's easier to debug. Also, it's probably even better to use interrupts for adc and the serial port. You free up the pic to do other stuff while peripherals are doing stuff that takes time.
Background
I'm writing some dtrace program which tracks application socket file descriptors. Aim is to provide logs which help me spot leak of file descriptors in some very complex OS X application.
Here is my other question with very helpful answer.
Problem
I want that my program is logging address to which file descriptor has been connected to. In examples there is a code which partial do what I need: soconnect_mac.d, here is link to github.
soconnect_mac.d works great when applied on Firefox, but it completely fails in case of my application. Quick investigation shown that soconnect_mac.d is able to interpret only AF_INET (value 2) family address and som library used by my application is using AF_SYSTEM (value 32) family address.
I can't find anything which could help me convert received address to something what is human readable.
So far I've got this:
#!/usr/sbin/dtrace -s
inline int af_inet = 2 ; /* AF_INET defined in Kernel/sys/socket.h */
inline int af_inet6 = 30; /* AF_INET6 defined in Kernel/sys/socket.h */
inline int af_system = 32; /* AF_SYSTEM defined in Kernel/sys/socket.h */
… // some stuff
syscall::connect:entry
/pid == $target && isOpened[pid, arg0] == 1/
{
/* assume this is sockaddr_in until we can examine family */
this->s = (struct sockaddr_in *)copyin(arg1, arg2);
this->f = this->s->sin_family;
self->fileDescriptor = arg0;
}
/* this section is copied with pride from "soconnect_mac.d" */
syscall::connect:entry
/this->f == af_inet/
{
/* Convert port to host byte order without ntohs() being available. */
self->port = (this->s->sin_port & 0xFF00) >> 8;
self->port |= (this->s->sin_port & 0xFF) << 8;
/*
* Convert an IPv4 address into a dotted quad decimal string.
* Until the inet_ntoa() functions are available from DTrace, this is
* converted using the existing strjoin() and lltostr(). It's done in
* two parts to avoid exhausting DTrace registers in one line of code.
*/
this->a = (uint8_t *)&this->s->sin_addr;
this->addr1 = strjoin(lltostr(this->a[0] + 0ULL),
strjoin(".",
strjoin(lltostr(this->a[1] + 0ULL),
".")));
this->addr2 = strjoin(lltostr(this->a[2] + 0ULL),
strjoin(".",
lltostr(this->a[3] + 0ULL)));
self->address = strjoin(this->addr1, this->addr2);
}
/* this section is my */
syscall::connect:entry
/this->f == af_system/
{
/* TODO: Problem how to handle AF_SYSTEM address family */
/* Convert port to host byte order without ntohs() being available. */
self->port = (this->s->sin_port & 0xFF00) >> 8;
self->port |= (this->s->sin_port & 0xFF) << 8; // this also doen't work as it should
self->address = "system family address needed here";
}
// a fallback
syscall::connect:entry
/this->f && this->f != af_inet && this->f != af_system/
{
/* Convert port to host byte order without ntohs() being available. */
self->port = (this->s->sin_port & 0xFF00) >> 8;
self->port |= (this->s->sin_port & 0xFF) << 8;
self->address = strjoin("Can't handle family: ", lltostr(this->f));
}
syscall::connect:return
/self->fileDescriptor/
{
this->errstr = err[errno] != NULL ? err[errno] : lltostr(errno);
printf("%Y.%03d FD:%d Status:%s Address:%s Port:%d",
walltimestamp, walltimestamp % 1000000000 / 1000000,
self->fileDescriptor, this->errstr, self->address, self->port);
self->fileDescriptor = 0;
self->address = 0;
self->port = 0;
}
What is even more annoying my code has failed to read port number (I get 512 value instead one of this: 443, 8443, 5061).
IMO problem is first syscall::connect:entry where it is assumed that second argument can be treated as struct sockaddr_in. I'm guessing struct sockaddr_storage should be used in case of AF_SYSTEM address family, but I didn't found any documentation or source code which proves this in direct way.
My section with this->f == af_system condition properly catches events from application I'm investigating.
I am writing an I2C slave routine for PIC18F25K80 and I am stuck on a weird problem.
This is my routine:
void interrupt interruption_handler() {
PIE1bits.SSPIE = 0; // Disable Master Synchronous Serial Port Interrupt
if (PIR1bits.SSPIF != 1) {
//This is not I2C interruption;
PIE1bits.SSPIE = 1; // Enable Master Synchronous Serial Port Interrupt
return;
}
//Treat overflow
if ((SSPCON1bits.SSPOV) || (SSPCON1bits.WCOL)) {
dummy = SSPBUF; // Read the previous value to clear the buffer
SSPCON1bits.SSPOV = 0; // Clear the overflow flag
SSPCON1bits.WCOL = 0; // Clear the collision bit
SSPCON1bits.CKP = 1;
board_state = BOARD_STATE_ERROR;
} else {
if (!SSPSTATbits.D_NOT_A) {
//Slave address
debug(0, ON);
//Read address
address = SSPBUF; //Clear BF
while(BF); //Wait until completion
if (SSPSTATbits.R_NOT_W) {
SSPCON1bits.WCOL = 0;
unsigned char a = 0x01;
SSPBUF = a;//0x01 works //Deliver first byte
asm("nop");
}
} else {
if (SSPSTATbits.BF) {
dummy = SSPBUF; // Clear BF (just in case)
while(BF);
}
if (SSPSTATbits.R_NOT_W) {
//Multi-byte read
debug(1, ON);
SSPCON1bits.WCOL = 0;
SSPBUF = 0x02; //Deliver second byte
asm("nop");
} else {
//WRITE
debug(2, ON);
}
}
transmitted = TRUE;
SSPCON1bits.CKP = 1;
PIR1bits.SSPIF = 0;
PIE1bits.SSPIE = 1; // Enable Master Synchronous Serial Port Interrupt
}
}
It works like a charm if I set constant values on SSPBUF. For example, if you do:
SSPBUF = 0x01;
(...)
SSPBUF = 0x02;
I get the two bytes on the master. I can even see the wave forms of the bytes being transmitted on the oscilloscope. Quite fun!
But when I try to set SSPBUF using a variable like:
unsigned char a = 0x01;
SSPBUF = a;
I get zero on the master.
It is driving me crazy.
Some hypothesis I've discarded:
Watchdog timer is messing up interrupting in the middle of the protocol: It is not. It is disabled and the problem happens in both SSPBUF assignments
I need to wait until BF goes low to continue: I don't. AFAIK, you setup the SSPBUF, clear SSPIF, set CKP and return from interruption to take care of life in 4Mhz while the hardware send data in few Khz. It will interrupt you again when it finishes.
It makes no sense to me. How good it is if you cannot define an arbitrary value using a variable?
Please gurus out there, enlighten this poor programmer.
Thanks in advance.
It has something to do with how the compiler generates the code and some undocumented/unknown PIC restriction around SSPBUF (it is an special register anyway).
I found out that it works when the compiler uses movwf and does not work when the compiler uses movff.
I moved the question to another forum because I realized the audience there is more adequate.
You will find more details here:
https://electronics.stackexchange.com/questions/251763/writing-sspbuf-from-variable-in-i2c-slave-protocol-in-pic18/251771#251771
Try move declaration : "unsigned char a = 0x01;"
to the beginning of the function or try define it as volatile global variable.
take into accunte that SSPBUF is both read and write buffer.check if there are conditions that may cause I2C module to reset this buffer.
I wrote a function to watch a file (given an fd) growing to a certain size including a timeout. I'm using kqueue()/kevent() to wait for the file to be "extended" but after I get the notification that the file grew I have to check the file size (and compare it against the desired size). That seems to be easy but I cannot figure out a way to do that reliably in POSIX.
NB: The timeout will hit if the file doesn't grow at all for the time specified. So, this is not an absolute timeout, just a timeout that some growing happens to the file. I'm on OS X but this question is meant for "every POSIX that has kevent()/kqueue()", that should be OS X and the BSDs I think.
Here's my current version of my function:
/**
* Blocks until `fd` reaches `size`. Times out if `fd` isn't extended for `timeout`
* amount of time. Returns `-1` and sets `errno` to `EFBIG` should the file be bigger
* than wanted.
*/
int fwait_file_size(int fd,
off_t size,
const struct timespec *restrict timeout)
{
int ret = -1;
int kq = kqueue();
struct kevent changelist[1];
if (kq < 0) {
/* errno set by kqueue */
ret = -1;
goto out;
}
memset(changelist, 0, sizeof(changelist));
EV_SET(&changelist[0], fd, EVFILT_VNODE, EV_ADD | EV_ENABLE | EV_CLEAR, NOTE_DELETE | NOTE_RENAME | NOTE_EXTEND, 0, 0);
if (kevent(kq, changelist, 1, NULL, 0, NULL) < 0) {
/* errno set by kevent */
ret = -1;
goto out;
}
while (true) {
{
/* Step 1: Check the size */
int suc_sz = evaluate_fd_size(fd, size); /* IMPLEMENTATION OF THIS IS THE QUESTION */
if (suc_sz > 0) {
/* wanted size */
ret = 0;
goto out;
} else if (suc_sz < 0) {
/* errno and return code already set */
ret = -1;
goto out;
}
}
{
/* Step 2: Wait for growth */
int suc_kev = kevent(kq, NULL, 0, changelist, 1, timeout);
if (0 == suc_kev) {
/* That's a timeout */
errno = ETIMEDOUT;
ret = -1;
goto out;
} else if (suc_kev > 0) {
if (changelist[0].filter == EVFILT_VNODE) {
if (changelist[0].fflags & NOTE_RENAME || changelist[0].fflags & NOTE_DELETE) {
/* file was deleted, renamed, ... */
errno = ENOENT;
ret = -1;
goto out;
}
}
} else {
/* errno set by kevent */
ret = -1;
goto out;
}
}
}
out: {
int errno_save = errno;
if (kq >= 0) {
close(kq);
}
errno = errno_save;
return ret;
}
}
So the basic algorithm works the following way:
Set up the kevent
Check size
Wait for file growth
Steps 2 and 3 are repeated until the file reached the wanted size.
The code uses a function int evaluate_fd_size(int fd, off_t wanted_size) which will return < 0 for "some error happened or file larger than wanted", == 0 for "file not big enough yet", or > 0 for file has reached the wanted size.
Obviously this only works if evaluate_fd_size is reliable in determining file size. My first go was to implement it with off_t eof_pos = lseek(fd, 0, SEEK_END) and compare eof_pos against wanted_size. Unfortunately, lseek seems to cache the results. So even when kevent returned with NOTE_EXTEND, so the file grew, the result may be the same! Then I thought to switch to fstat but found articles that fstat caches as well.
The last thing I tried was using fsync(fd); before off_t eof_pos = lseek(fd, 0, SEEK_END); and suddenly things started working. But:
Nothing states that fsync() really solves my problem
I don't want to fsync() because of performance
EDIT: It's really hard to reproduce but I saw one case in which fsync() didn't help. It seems to take (very little) time until the file size is larger after a NOTE_EXTEND event hit user space. fsync() probably just works as a good enough sleep() and therefore it works most of the time :-.
So, in other words: How to reliably check file size in POSIX without opening/closing the file which I cannot do because I don't know the file name. Additionally, I can't find a guarantee that this would help
By the way: int new_fd = dup(fd); off_t eof_pos = lseek(new_fd, 0, SEEK_END); close(new_fd); did not overcome the caching issue.
EDIT 2: I also created an all in one demo program. If it prints Ok, success before exiting, everything went fine. But usually it prints Timeout (10000000) which manifests the race condition: The file size check for the last kevent triggered is smaller than the actual file size at this very moment. Weirdly when using ftruncate() to grow the file instead of write() it seems to work (you can compile the test program with -DUSE_FTRUNCATE to test that).
Nothing states that fsync() really solves my problem
I don't want to fsync() because of performance
Your problem isn't "fstat caching results", it's the I/O system buffering writes. Fstat doesn't get updated until the kernel flushes the I/O buffers to the underlying file system.
This is why fsync fixes your problem and any solution to your problem more or less has to do the equivalent of fsync. ( This is what the open/close solution does as a side effect. )
Can't help you with 2 because I don't see any way to avoid doing fsync.
I'm using a library in my C++ application and trying to capture all the output in a file. I tried to redirect the stderr to stdout and then stdout to a file like so:
./a.out 2>&1 > out.txt
This captures pretty much everything in my application but there are still some output on the console related to the library I'm using. My question is:
Are there anything besides stdout/stderr? (other than stdin)
If there are, how can I identify these in my case?
And then how can I redirect these to the same file?
Note: In case someone is familiar, library is called SystemC (which is an event driven simulation library/language on top of C++ for mainly system/hardware design).
You must set output file before any stream-to-stream redirection, else bash can't detect file name to output. In your case you can see stderr output.
See bash redirections reference manual.
Solution:
./a.out >out.txt 2>&1
Or just:
./a.out &>out.txt
Hmm, well I think what might be happening is that your program is printing to the controlling terminal. One possibility could be to have your program run as a daemon with no controlling terminal. I have a C function that I call to turn my code into a daemon, I got this from a book called The Linux Programming Interface which I highly recommend.
#define BD_NO_CHDIR 01 /* Don't chdir("/") */
#define BD_NO_CLOSE_FILES 02 /* Don't close all open files */
#define BD_NO_REOPEN_STD_FDS 04 /* Don't reopen stdin, stdout, and
stderr to /dev/null */
#define BD_NO_UMASK0 010 /* Don't do a umask(0) */
#define BD_MAX_CLOSE 8192 /* Maximum file descriptors to close if
sysconf(_SC_OPEN_MAX) is indeterminate */
int becomeDaemon(int flags){
int maxfd, fd, new_stdout;
switch (fork()) { /* Become background process */
case -1: return -1;
case 0: break; /* Child falls through... */
default: _exit(EXIT_SUCCESS); /* while parent terminates */
}
if (setsid() == -1) /* Become leader of new session */
return -1;
switch (fork()) { /* Ensure we are not session leader */
case -1: return -1;
case 0: break;
default: _exit(EXIT_SUCCESS);
}
if (!(flags & BD_NO_UMASK0))
umask(0); /* Clear file mode creation mask */
if (!(flags & BD_NO_CHDIR))
chdir("/"); /* Change to root directory */
if (!(flags & BD_NO_CLOSE_FILES)) { /* Close all open files */
maxfd = sysconf(_SC_OPEN_MAX);
if (maxfd == -1) /* Limit is indeterminate... */
maxfd = BD_MAX_CLOSE; /* so take a guess */
for (fd = 0; fd < maxfd; fd++)
close(fd);
}
if (!(flags & BD_NO_REOPEN_STD_FDS)) {
/*
STDIN = 0
STDOUT = 1
STDERR = 2
*/
close(0); /* Reopen standard fd's to /dev/null */
fd = open("/dev/null", O_RDWR);
if (fd != 0) /* 'fd' should be 0 */
return -1;
if (dup2(0, 1) != 1)
return -1;
if (dup2(0, 2) != 2)
return -1;
}
return 0;
}
Now I suppose that you can change the line open("/dev/null", O_RDWR) to open("/home/you/output.txt", O_RDWR) and redirect the output there. Ofcourse then you wouldn't be able to directly input from the terminal to your program, but from the sounds of the error message you're getting I think you're using a socket anyway so could possible write a client to do that for you if it was necessary.
Hope that helps.