I am trying to use native messaging to send some data to my native windows application. It works well with the runtime.sendNativeMessage() method. When I am trying to use long lived connections which uses a port, it also can pass data from chrome to my app. However, chrome extension can only receive the first response from my app. I am sure that the port is still open because my app can still receive data from chrome. Following are my code:
Chrome Extension Script:
var port = chrome.runtime.connectNative('com.mydomain.app1');
port.onMessage.addListener(function(msg) {
console.log("Received from port:", msg);
});
port.onDisconnect.addListener(function() {
console.log("Disconnected");
});
chrome.tabs.onUpdated.addListener(
function(tabId, changeInfo, tab) {
var param = {};
param['url'] = tab.url;
port.postMessage( param);
}
}
My windows app in c++:
int _tmain(int argc, _TCHAR* argv[])
{
while( true )
{
//read the first four bytes (=> Length)
unsigned int length = 0;
for (int i = 0; i < 4; i++)
{
char c;
if( ( c=getchar()) != EOF)
length += c<<i*8;
else return 0;
}
//read the json-message
std::string msg = "";
for (int i = 0; i < length; i++)
{
msg += getchar();
}
//.... do something
//send a response message
std::string message = "{\"text\": \"This is a response message\"}";
unsigned int len = message.length();
// We need to send the 4 bytes of length information
std::cout << char(((len>>0) & 0xFF))
<< char(((len>>8) & 0xFF))
<< char(((len>>16) & 0xFF))
<< char(((len>>24) & 0xFF));
// Now we can output our message
std::cout << message.c_str();
std::cout.flush();
}
}
Notice that the last line "std::cout.flush();", if I comment it out, even the first response won't be shown in chrome. I just couldn't figure out how chrome reads from the app's stdout.
Try with automatic flushing - std::cout.setf( std::ios_base::unitbuf )
Also, the way you read/write the input/output messages length is incorrect and will fail on long messages.
This code works well for me:
int main(int argc, char* argv[])
{
std::cout.setf( std::ios_base::unitbuf );
while (true)
{
unsigned int ch, inMsgLen = 0, outMsgLen = 0;
std::string input = "", response = "";
// Read 4 bytes for data length
std::cin.read((char*)&inMsgLen, 4);
if (inMsgLen == 0)
{
break;
}
else
{
// Loop getchar to pull in the message until we reach the total length provided.
for (int i=0; i < inMsgLen; i++)
{
ch = getchar();
input += ch;
}
}
response.append("{\"echo\":").append(input).append("}");
outMsgLen = response.length();
// Send 4 bytes of data length
std::cout.write((char*)&outMsgLen, 4);
// Send the data
std::cout << response;
}
return 0;
}
Related
I'm trying to format data sent over a USB UART with printf and it's giving me garbage. I can send a simple string and that works but anything I try to format gives junk. Looking through the code I think it has to do with my string not being in program space but I'm not sure.
Here is my main:
void main(void) {
CPU_PRESCALE(CPU_16MHz);
init_uart();
int degree = 0;
char buffer[50];
while(1) {
degree = (degree + 1) % 360;
send_str(PSTR("\n\nHello!!!\n\n"));
memset(buffer, 0, 50);
sprintf_P(buffer, PSTR("%d degrees\n"), degree);
send_str(buffer);
_delay_ms(20);
}
}
The output looks like this:
Hello!!!
����/�������(/����#Q��������
Hello!!!
����/�������(/����#Q��������
The USB UART code I found in a tutorial. The relevant parts look like this:
void send_str(const char *s)
{
char c;
while (1) {
c = pgm_read_byte(s++);
if (!c) break;
usb_serial_putchar(c);
}
}
int8_t usb_serial_putchar(uint8_t c)
{
uint8_t timeout, intr_state;
// if we're not online (enumerated and configured), error
if (!usb_configuration) return -1;
// interrupts are disabled so these functions can be
// used from the main program or interrupt context,
// even both in the same program!
intr_state = SREG;
cli();
UENUM = CDC_TX_ENDPOINT;
// if we gave up due to timeout before, don't wait again
if (transmit_previous_timeout) {
if (!(UEINTX & (1<<RWAL))) {
SREG = intr_state;
return -1;
}
transmit_previous_timeout = 0;
}
// wait for the FIFO to be ready to accept data
timeout = UDFNUML + TRANSMIT_TIMEOUT;
while (1) {
// are we ready to transmit?
if (UEINTX & (1<<RWAL)) break;
SREG = intr_state;
// have we waited too long? This happens if the user
// is not running an application that is listening
if (UDFNUML == timeout) {
transmit_previous_timeout = 1;
return -1;
}
// has the USB gone offline?
if (!usb_configuration) return -1;
// get ready to try checking again
intr_state = SREG;
cli();
UENUM = CDC_TX_ENDPOINT;
}
// actually write the byte into the FIFO
UEDATX = c;
// if this completed a packet, transmit it now!
if (!(UEINTX & (1<<RWAL))) UEINTX = 0x3A;
transmit_flush_timer = TRANSMIT_FLUSH_TIMEOUT;
SREG = intr_state;
return 0;
}
This is client code by using libwebsocket version 1.5
#include <stdio.h>
#include <stdlib.h>
#include <getopt.h>
#include <string.h>
#include <assert.h>
#include <signal.h>
#include <syslog.h>
#include <sys/time.h>
#include <unistd.h>
#include <libwebsockets.h>
static volatile int force_exit = 0;
static int state, command_received = 0, forked = 0;
#define MAX_ECHO_PAYLOAD 1400
#define LOCAL_RESOURCE_PATH "./"
struct per_session_data {
unsigned char buf[LWS_SEND_BUFFER_PRE_PADDING + MAX_ECHO_PAYLOAD + LWS_SEND_BUFFER_POST_PADDING];
unsigned int len;
};
//for temporary storing data from tcpdump
struct per_session_data data1;
static int callback_echo(struct libwebsocket_context *context, struct libwebsocket *wsi, enum libwebsocket_callback_reasons reason, void *user, void *in, size_t len)
{
struct per_session_data *pss = (struct per_session_data *)user;
int n;
switch (reason) {
/* when the callback is used for client operations --> */
case LWS_CALLBACK_CLOSED:
case LWS_CALLBACK_CLIENT_CONNECTION_ERROR:
printf("Closed\n");
state = 0;
break;
case LWS_CALLBACK_ESTABLISHED:
case LWS_CALLBACK_CLIENT_ESTABLISHED:
printf("Client connected\n");
state = 2;
break;
/* we will receive our packet here*/
case LWS_CALLBACK_RECEIVE:
case LWS_CALLBACK_CLIENT_RECEIVE:
printf("Rx from server: %s\n", (char *)in);
if (!strcmp((char *)in, "tcpdump"))
{
command_received = 1;
}
break;
/* we will send our packet here */
case LWS_CALLBACK_CLIENT_WRITEABLE:
printf("client writing to server\n");
pss->len = sprintf((char *)&pss->buf[LWS_SEND_BUFFER_PRE_PADDING], "%s", data1.buf + LWS_SEND_BUFFER_PRE_PADDING);
n = libwebsocket_write(wsi, &pss->buf[LWS_SEND_BUFFER_PRE_PADDING], pss->len, LWS_WRITE_TEXT);
printf("Data: %s\n\n\n\n\n\n\n", &pss->buf[LWS_SEND_BUFFER_PRE_PADDING]);
//error handling for write fail and partial writes
if (n < 0) {
printf("ERROR %d writing to socket, hanging up\n", n);
return -1;
}
if (n < (int)pss->len) {
printf("Partial write\n");
return -1;
}
break;
default:
printf("default...\n");
break;
}
return 0;
}
/* List of available protocols */
static struct libwebsocket_protocols protocols[] = {
{
"default", /* name */
callback_echo, /* callback */
sizeof(struct per_session_data) /* per_session_data_size */
},
{
NULL, NULL, 0 /* End of list */
}
};
void sighandler(int sig)
{
force_exit = 1;
}
int main(int argc, char **argv)
{
//pipe stuff
int pipe_fd[2];
if (pipe(pipe_fd) < 0)
{
perror("PIPE:");
exit(-1);
}
//for libwebsocket_service
int n = 0;
//test port can be overidden
int port = 9000;
struct libwebsocket_context *context;
int opts = 0;
char interface_name[128] = "";
const char *interface = NULL;
int use_ssl = 0;
char ssl_cert[256] = LOCAL_RESOURCE_PATH"/libwebsockets-test-server.pem";
char ssl_key[256] = LOCAL_RESOURCE_PATH"/libwebsockets-test-server.key.pem";
int listen_port = 80;
struct lws_context_creation_info info;
char passphrase[256];
char uri[256] = "/";
char address[256], ads_port[256 + 30];
//lws servicing time intervals
int rate_us = 250000;
unsigned int oldus = 0;
struct libwebsocket *wsi;
//should check this
int debug_level = 2;
memset(&info, 0, sizeof info);
lwsl_notice("Built to support client operations\n");
//re-configuring server ip and port here
if (argc == 3)
{
strncpy(address, argv[1], sizeof(address) - 1);
address[sizeof(address) - 1] = '\0';
port = atoi(argv[2]);
}
else if (argc == 1)
{
strncpy(address, "localhost", sizeof(address) - 1);
address[sizeof(address) - 1] = '\0';
port = 9000;
}
else
{
printf("Try: ./client.exec <ip> <port>\n");
exit(-1);
}
/* we will only try to log things according to our debug_level */
setlogmask(LOG_UPTO (LOG_DEBUG));
openlog("lwsts", 0, LOG_DAEMON);
/* tell the library what debug level to emit and to send it to syslog */
lws_set_log_level(debug_level, lwsl_emit_syslog);
lwsl_notice("libwebsockets echo test - "
"(C) Copyright 2010-2015 Andy Green <andy#warmcat.com> - "
"licensed under LGPL2.1\n");
lwsl_notice("Running in client mode\n");
listen_port = CONTEXT_PORT_NO_LISTEN;
lwsl_info("requiring server cert validation againts %s\n", ssl_cert);
info.ssl_ca_filepath = ssl_cert;
info.port = listen_port;
info.iface = interface;
info.protocols = protocols;
#ifndef LWS_NO_EXTENSIONS
info.extensions = libwebsocket_get_internal_extensions();
#endif
info.gid = -1;
info.uid = -1;
info.options = opts;
context = libwebsocket_create_context(&info);
if (context == NULL) {
lwsl_err("libwebsocket init failed\n");
return -1;
}
signal(SIGINT, sighandler);
n = 0;
while (n >= 0 && !force_exit)
{
//do connect only once
if (!state) {
state = 1;
printf("Client connecting to %s:%u....\n", address, port);
address[sizeof(address) - 1] = '\0';
sprintf(ads_port, "%s:%u", address, port & 65535);
wsi = libwebsocket_client_connect(context, address, port, use_ssl, uri, ads_port, ads_port, NULL, -1);
if (!wsi) {
printf("Client failed to connect to %s:%u\n", address, port);
goto bail;
}
}
if (command_received == 1 && !forked)
{
printf("Going to fork\n");
forked = 1;
pid_t child_pid = fork();
if (child_pid == -1)
{
perror("FORK:");
exit(-1);
}
else if (child_pid == 0)
{
close(pipe_fd[0]);
printf("Starting tcpdump\n");
if (dup2(pipe_fd[1], 1) < 0)
{
perror("DUP2:");
exit(-1);
}
//closing the connection to server for child
libwebsocket_context_destroy(context);
closelog();
char *cmd[] = {"tcpdump", "-i", "any", NULL};
if (execv("/usr/sbin/tcpdump", cmd) < 0)
{
perror("EXECV:");
exit(-1);
}
}
}
/* if (forked == 1)
{
close(pipe_fd[1]);
}
*/
if (command_received == 1)
{
//stay here if the pipe is empty else try to read max 1400 bytes of data
while ((data1.len = read(pipe_fd[0], data1.buf + LWS_SEND_BUFFER_PRE_PADDING, 1400)) <= 0);
//check if server wants any service
//printf("%s\n\n\n\n\n\n\n", data1.buf + LWS_SEND_BUFFER_PRE_PADDING);
libwebsocket_callback_on_writable(context, wsi);
}
//This fn times out every 10usec
n = libwebsocket_service(context, 10);
}
//bail: jump from while loop also if connect fails
bail:
libwebsocket_context_destroy(context);
printf("libwebsockets-test-echo exited cleanly\n");
closelog();
return 0;
}
This is my server code by using socket.io
var io = require('socket.io')();
var middleware = require('socketio-wildcard')();
io.use(middleware);
io.on('connection', function(socket) {
console.log('On socket connection')
socket.on('*', function(event, data){
console.log("---- Event ----- : " + JSON.stringify(event));
console.log("---- Data ----- : " + JSON.stringify(data))
});
});
io.listen(9000, 'localhost');
The client is unable to connect with server. when i tested client with strace it infinitely does receive as below
recv(8, "", 1, 0) = 0
recv(8, "", 1, 0) = 0
recv(8, "", 1, 0) = 0
.
.
.
.
Please point out my mistake. Any help is appreciated.
Thanks
I am trying to build up a client to get data via a specific protocol from a server.
I know that my code is not the best - but at the moment I am still experimenting with the basic functions of Boost ASIO.
I want to implement an read from TCP-Function which blocks until a specific amount of bytes have been received.
My Problem:
When I call boost::asio::read or boost::asio::write i geht following error:
error C2039: 'read_some' : is not a member of boost::shared_ptr'
I am working with VS2013 Professional, Boost 1.55.00 (precompiled).
Here is my Code: ( You can find the line by the comment "//HEEERE"
boost::mutex cout_lock;
int main()
{
// creating io_service
boost::shared_ptr<boost::asio::io_service> io_service(new boost::asio::io_service);
// creating work and assigning it to io_service
boost::shared_ptr<boost::asio::io_service::work> work(new boost::asio::io_service::work(*io_service));
// creating strand and assigning it to io_service
boost::shared_ptr<boost::asio::io_service::strand> strand(new boost::asio::io_service::strand(*io_service));
// creating socket
boost::shared_ptr<boost::asio::ip::tcp::socket> socket(new boost::asio::ip::tcp::socket(*io_service));
try {
// creating resolver
boost::asio::ip::tcp::resolver resolver(*io_service);
// creating query
boost::asio::ip::tcp::resolver::query query(IPConfig_str, boost::lexical_cast<std::string>(IPConfig_PortNr));
// creating iterator
boost::asio::ip::tcp::resolver::iterator iterator = resolver.resolve(query);
// creating endpoint
boost::asio::ip::tcp::endpoint endpoint = *iterator;
// connecting synchronously
socket->connect(endpoint);
}
catch(std::exception &ex) {
cout_lock.lock();
std::cout << "[main]:\t" << "Exception:" << ex.what() << std::endl;
cout_lock.unlock();
}
// Create Query
CommandCreator CMDCreator;
Command sendCommand;
CMDCreator.Create_fpga_GetSwVers(&sendCommand);
std::cout << std::endl;
std::cout << "SENT:" << std::endl;
for (int i = 0; i < sendCommand.length; i++)
{
std::cout << std::hex << std::setw(2) << std::setfill('0') << int(sendCommand.buffer[i]) << ", ";
}
std::cout << std::endl;
// Send Query
boost::system::error_code ec;
socket->async_send(boost::asio::buffer(sendCommand.buffer, sendCommand.length), boost::asio::transfer_all());
Sleep(300); // sleep 100 ms (at least 85 <- not stable!)
// Receive Answer - Header
Command receiveCommandHeader;
receiveCommandHeader.InitBuffer(4);
// Async
// socket->async_receive(boost::asio::buffer(receiveCommandHeader.buffer, receiveCommandHeader.length), 0, boost::bind(HandleRead, ec));
//HEEERE
boost::asio::read(socket, boost::asio::buffer(receiveCommandHeader.buffer, receiveCommandHeader.length), boost::asio::transfer_all(), ec);
//shutting down
socket->shutdown(boost::asio::ip::tcp::socket::shutdown_both, ec);
socket->close(ec);
io_service->stop();
return 0;
}
class Command
{
friend class CommandCreator; // TODO: is there a better and as simple method as a friend class?
public:
Command() : buffer(0)
{}
virtual ~Command()
{
delete[] buffer;
buffer = 0;
}
void InitBuffer(int const len)
{
this->length = len;
this->buffer = new uint8_t[len];
}
uint8_t* buffer;
int length;
};
Actually the problem is located at this part of boost in the file read.hpp, where async_read_some is called from 'stream_'.
void operator()(const boost::system::error_code& ec,
std::size_t bytes_transferred, int start = 0)
{
std::size_t n = 0;
switch (start_ = start)
{
case 1:
n = this->check_for_completion(ec, total_transferred_);
for (;;)
{
stream_.async_read_some(
boost::asio::buffer(buffer_ + total_transferred_, n),
BOOST_ASIO_MOVE_CAST(read_op)(*this));
return; default:
total_transferred_ += bytes_transferred;
if ((!ec && bytes_transferred == 0)
|| (n = this->check_for_completion(ec, total_transferred_)) == 0
|| total_transferred_ == boost::asio::buffer_size(buffer_))
break;
}
handler_(ec, static_cast<const std::size_t&>(total_transferred_));
}
}
Okey, I've just found the problem.
// creating socket
boost::shared_ptr<boost::asio::ip::tcp::socket> socket(new boost::asio::ip::tcp::socket(*io_service));
I created the socket as a pointer but all the interfaces of read, read_some and other boost-library functions require the object. Therefore adding the dereferencing operator did it:
boost::asio::async_read(*socket, boost::asio::buffer(receiveCommandHeader.buffer, receiveCommandHeader.length),
boost::asio::transfer_all(), boost::bind(HandleRead, ec));
While working in client-server programming, I have passed 3 strings in client, which will be received by server and it should be printed in there 3 times. (i.e I have used a 'for' loop which will do the read & write operations in client & server side respectively.), but in server only the 1st string is getting printed.
Please explain,
Here is my code
server.c
#include "head.h"
void readstr(int connfd ,char [][20]);
//void writestr(char * ,int);
int main(int c ,char *v[])
{
// socket declarations,etc
sd =socket( AF_INET ,SOCK_STREAM ,0);
// Binding socket
retbind =bind(sd ,(struct sockaddr*)&serveraddress ,sizeof(serveraddress
));
listen(sd ,4);
for(;;)
{
printf("i am waiting for client\n");
len =sizeof(cliaddr);
connfd = accept(sd ,(struct sockaddr*)&cliaddr ,&len);
readstr(connfd ,databuf);
close(connfd);
}
return 0;
}
void readstr(int connfd ,char str[3] [20])
{
int pointer=0 ,i=0, n,pos=0;
memset(str ,'\0',sizeof(str));
for(i=0;i<3;i++)
{
while((n=read(connfd ,str[i] ,20)) >>0)
{
printf("Looping while\n");
pos =pos +n;
}
str[i][pos] ='\0';
}
for(i=0;i<3;i++)
{
printf("\n%s",str[i]);
}
}
client.c
#include "head.h"
void send1(int ,char*);
int main(int c,char*v[])
{
//Socket declarations, etc..
sd = socket(AF_INET ,SOCK_STREAM ,0);
//Connect
if(connect(sd,(struct sockaddr*)&serveraddress ,sizeof(serveraddress)) <
0)
{
printf("cannot connect to server");
exit(1);
}
for(i=0;i<3;i++)
{
memset(buf ,'\0',sizeof(buf));
printf("\n Enter the string : ");
fgets(buf[i],20,stdin);
len =strlen(buf[i]);
if(buf[i][len] =='\n')
buf[i][len]='\0';
send1(sd ,(char *)buf);
}
shutdown(sd ,SHUT_WR);
}
void send1(int sd ,char *str)
{
int n ,byteswritten =0, wr;
char buf[1024];
strcpy(buf ,str);
n =strlen(buf);
while(byteswritten < n)
{
printf("\nStarting to write in client side\n");
wr = write(sd , buf+byteswritten ,(n-byteswritten));
byteswritten+=wr;
}
printf("\n string sent %s" ,buf);
}
In server.c in readstr() you are not setting pos to zero before the next for iteration.
Also, there is strange line:
while((n=read(connfd ,str[i] ,20)) >>0)
Note ">>".
I'm having a problem with a UDPSocket wrapper I've written. I have a high bandwidth low latency local network over which I'm sending UDP packets back and forth. I don't have a concern too much for reliability of packet arrival, but it is incredibly important that the packets that do arrive do so quickly. Here's the relevant code for setting up a socket:
bool UDPSocket::create() {
int on = 1;
#ifdef WIN32
if(WSAStartup(MAKEWORD(1,1), &SocketInfo) != 0) {
MessageBox(NULL, "Cannot initialize WinSock", "WSAStartup", MB_OK);
}
#endif
m_sock = socket(PF_INET, SOCK_DGRAM, 0);
#ifdef WIN32
if(setsockopt(m_sock, SOL_SOCKET, SO_REUSEADDR, (const char*)&on, sizeof(on)) == SOCKET_ERROR)
return false;
#else
if(setsockopt(m_sock, SOL_SOCKET, SO_REUSEADDR, (const char*)&on, sizeof(on)) == -1)
return false;
#endif
addrLen = sizeof(struct sockaddr);
return true;
}
bool UDPSocket::bind(const int port) {
if(!is_valid())
return false;
m_addr.sin_family = AF_INET;
m_addr.sin_addr.s_addr = htonl(INADDR_ANY);
m_addr.sin_port = htons(port);
if(::bind(m_sock, (struct sockaddr*)&m_addr, sizeof(m_addr))<0) {
std::cout << "UDPSocket: error on bind" << std::endl;
return false;
}
return true;
}
bool UDPSocket::send(const std::string s) const {
const char* buf = s.c_str();
::sendto(m_sock, buf, strlen(buf), 0, (const sockaddr*)&clientAddr, addrLen);
return true;
}
bool UDPSocket::setDestination(const std::string ip, const int port) {
memset(&clientAddr, 0, sizeof(clientAddr));
clientAddr.sin_family = AF_INET;
clientAddr.sin_addr.s_addr = inet_addr(ip.c_str());
clientAddr.sin_port = htons(port);
return true;
}
int UDPSocket::recv(std::string& s) const {
char buffer[MAXRECV + 1];
struct timeval tv;
fd_set fdset;
int rc, nread;
memset(&buffer, 0, sizeof(buffer));
FD_ZERO(&fdset);
FD_SET(m_sock, &fdset);
tv.tv_sec = 0;
tv.tv_usec = m_timeout;
rc = select(m_sock + 1, &fdset, (fd_set *) 0, (fd_set *) 0, &tv);
if(FD_ISSET(m_sock, &fdset)) {
#ifdef WIN32
nread = ::recvfrom(m_sock, buffer, MAXRECV, 0, (sockaddr*)&clientAddr, const_cast< int * __w64 >(&addrLen));
#else
nread = ::recvfrom(m_sock, buffer, MAXRECV, 0, (sockaddr*)&clientAddr, (socklen_t*)&addrLen);
#endif
if(nread < 0) {
return -1;
} else if(nread == 0) {
return 0;
}
s = std::string(buffer);
return nread;
} else {
return 0;
}
}
void UDPSocket::set_non_blocking(const bool b) {
mNonBlocking = b;
#ifdef WIN32
u_long argp = b ? 1 : 0;
ioctlsocket(m_sock, FIONBIO, &argp);
#else
int opts = fcntl(m_sock, F_GETFL);
if(opts < 0) return;
if(b)
opts |= O_NONBLOCK;
else
opts &= ~O_NONBLOCK;
fcntl(m_sock, F_SETFL, opts);
#endif
}
My user code, on both ends, create a "sending" and "receiving" UDPSocket and bind them to their respective ports, then use send() to send data and recv() to receive. On one hand, the linux side seems to receive practically immediately, but the Windows side has a delay up to 1 second before receiving data. However, ::recv() never returns 0 in this time. Am I missing something obvious?
have you tried all four combinations (linux->linux, win->linux, linux->win, win->win)? which have delays and which not?
also, use a packet sniffer (like tcpdump or wireshark) to see if the delay is before or after hitting the wire.
It's not a direct answer. May be you can try tools like ttcp (http://www.pcausa.com/Utilities/pcattcp.htm), which can both test tcp and udp performance. And, you may also check their source code, which is in public domain.
If you've got the time to experiment, try an IOCP version of the code.
I never trusted the win select implementation, with its 64 socket limit.