I'm working on a simple service using Node.js. It receives uploaded files, stores them on the disk and records some metadata on an Oracle table. I'm using the db-oracle package together with connection pooling, following this article: http://nodejsdb.org/2011/05/connection-pooling-node-db-with-generic-pool/
However, I've noticed that the data I insert is only sent to the Oracle database after the connection pool closes the idle connection, by calling its disconnect() method.
Is there a way to flush the data before sending the 'OK' signal to my client? The way it is working now, a crash on my webservice or on Oracle itself can cause loss of data, and the client of my service would not know about it. I actually tested this by killing my app process after some uploads, and the data was indeed lost.
Here's a simplified version of the code:
var express = require('express');
var app = module.exports = express.createServer();
app.post('/upload', handleUpload);
app.listen(4001, function(){
console.log("Express server listening on port %d in %s mode", app.address().port, app.settings.env);
});
function handleUpload(req, res) {
res.contentType('application/xml');
var buf = '';
req.on('data', function(chunk) { buf += chunk; });
req.on('end', function() {
saveUpload(req, res, buf);
});
}
function saveUpload(req, res, buf) {
if (buf.length == 0)
return sendError(res, 'No data supplied', 422);
var payload = new Buffer(buf, 'base64');
files.save(payload, function(err, savedFile) {
if (err)
return sendError(res, 'Error while saving', 500);
var obj = { ip: req.connection.remoteAddress, location: savedFile.path,
created_at: new Date(), updated_at: new Date() };
var fields = ['IP', 'LOCATION', 'CREATED_AT', 'UPDATED_AT'];
var values = fields.map(function(v) { return obj[v.toLowerCase()] });
pool.acquire(function(err, conn) {
if (err)
return sendError(res, err, 500);
var q = conn.query().insert('FILES', fields, values);
q.execute(function(err, result) {
pool.release(conn);
if (err)
return sendError(res, err, 500);
if (result.affected < 1)
return sendError(res, 'Error saving the record', 500);
// The next statement sends the final result to the client.
// However, the new record was not yet flushed to the database.
res.end('<ok />');
});
});
});
}
function sendError(res, err, code) {
console.log(err);
res.send('<error>' + err + '</error>', code || 500);
}
As a workaround, I've tried to implement a fake connection pool and release all acquired connections, but now my app is dying with the message: pure virtual method calledAbort trap: 6
Here's the fake connection pooling:
var fakePool = {
acquire: function(callback) {
new oracle.Database(config.database).connect(function(err, server) {
callback(err, this);
});
},
release: function(conn) {
conn.disconnect();
}
};
Just to be clear, I don't care about the fake connection pooler, it was just a dirty workaround. I want to be able to flush the data to Oracle before sending the 'OK' to my client.
Btw I also opened a ticket on their Github: https://github.com/mariano/node-db-oracle/issues/38
You are obviously missing a transaction commit.
node-db does not need to expose a commit API because in most RDBMS (including Oracle), COMMIT is a valid query. Since the package allows the execution of arbitrary queries, commit/rollback are supposed to be done using a simple execute()
The code should be changed as follows:
pool.acquire(function(err, conn) {
if (err)
return sendError(res, err, 500);
var q = conn.query().insert('FILES', fields, values);
q.execute(function(err, result) {
if (err || result.affected < 1 ) {
pool.release(conn);
return sendError(res, err, 500);
}
conn.query().execute("commit", function(err,result) {
if (err) {
pool.release(conn);
return sendError(res, err, 500);
}
res.end('<ok />');
pool.release(conn);
});
});
});
This is not an exact answer to your question, but have a look at node-oracle package.
It lacks connection pooling, but its commit / rollback functionality can at least be controlled from code. And you can always mix it with a generic pool solution such as node-pool.
Related
My ampq system seems loosing messages, so I'd like a way to see if messages are effectively queued before being consumed.
I have several MicroServices communicating by amqp messages on NodeJs, using CloudAmqp. One of this microservice MS[B] generates .pdf, the process it's pretty heavy and requires about a minute for each request. So I send the .pdf asyncronously, triggering a webhook once finished, and generate once per time using a PreFetch = 1
So one MS[A] collects all the requests from the user, answers back to them saying "ok, request received, listen on the webhook" and in parallel it asks to the MS[B] to generate pdfs. MS[B] has prefetch=1, so consumes just one request per time. Once finished, sends the response to the callback queue of MS[A], which triggers the user webhook saying "the pdf, it's ready".
The problem is that MS[B] misses all the messages while busy:
it consumes one request from MS[A]
starts generating the .pdf
while generating, it discards all the other messages that MS[A] sends, as if there would be not any queue
it finishes the .pdf, sending ACK to MS[A]
then it starts again accepting messages, taking the last one received after being idle, losing all the previous ones.
Why? How can I find the problem, what could I monitor?
Communications between other MSs works well, with messages correctly ordered in queues. Just this one, with prefetch=1, loses messages.
I am NOT using the NO-ACK rule. I don't know what try, what test and what monitor to find the problem.
How can I see (if) messages are correctly queued before being consumed, ora just lost?
Below, the implementation of the messaging system
Channel Creation
/*
Starting Point of a connection to the CLOUDAMQP_URL server, then exec the callback
*/
start(callback) {
var self = this;
// if the connection is closed or fails to be established at all, we will reconnect
amqp.connect(process.env.CLOUDAMQP_URL + "?heartbeat=60")
.then(
function (conn) {
// create queues and consume mechanism
self.amqpConn = conn;
setTimeout(() => {
startPublisher();
}, 200);
setTimeout(() => {
createCallbackQueue();
}, 1000);
setTimeout(() => {
callback();
}, 2000);
});
// create publisher channels
function startPublisher() {
self.amqpConn.createConfirmChannel()
.then(function (ch) {
self.pubChannel = ch;
logger.debug("🗣️ pubChannel ready");
while (true) {
var m = self.offlinePubQueue.shift();
if (!m) break;
self.publish(m[0], // exchange
m[1], // routingKey
m[2], // content,
undefined // correlationId
);
}
});
}
// create callback channel
function createCallbackQueue() {
self.amqpConn.createChannel()
.then(function (channel) {
channel.assertQueue(self.CALLBACK_QUEUE_NAME, {
durable: true,
exclusive: true, // callback are exclusive
})
.then(function (q) {
logger.debug(" 👂 Waiting for RPC RESP in " + self.CALLBACK_QUEUE_NAME);
channel.consume(q.queue,
processCallback, {
noAck: false
}
);
});
// process messages of the callback
function processCallback(msg) {
var correlationId = msg.properties.correlationId;
}
//callback received
if (self.correlationIds_map[correlationId]) {
delete self.correlationIds_map[correlationId];
var content = JSON.parse(msg.content.toString());
self.eventEmitter.emit(correlationId, content);
}
}
});
}
return deferred.promise;
}
Consuming Messages
/*
#worker_queue - the name of the queue
*/
// Consume message from 'worker_queue', A worker that acks messages only if processed succesfully
startWorker(worker_queue, routes) {
var self = this;
logger.debug("startWorker " + self.CALLBACK_QUEUE_NAME);
var channel;
worker_queue = self.MICROSERVICE_NAME + worker_queue;
self.amqpConn.createChannel()
.then(
function (ch) {
channel = ch;
ch.prefetch(self.opt.prefetch); // = 1 for MS[B] generating pdf
channel.assertQueue(worker_queue, {
durable: true,
exclusive: true
})
.then(function (q) {
channel.consume(worker_queue, processMsg, {
noAck: false
});
});
});
// call the 'function from interface' passing params, and send the ACK
function processMsg(msg) {
work(msg)
.then(function (data) {
channel.ack(msg, false); // allUpTo = false
})
.catch(function (err) {
channel.ack(msg, false);
// channel.reject(msg, false); // requeue = false
// this.closeOnErr(e);
});
}
// execute the command, and queue back a response, checking if it's an error or not
function work(msg) {
var deferred = Q.defer();
var correlationId;
try {
correlationId = msg.properties.correlationId;
} catch (err) {}
work_function(msg.content, correlationId)
.then(function (resp) {
var content = {
data: resp
};
content = Buffer.from(JSON.stringify(content));
channel.sendToQueue(msg.properties.replyTo,
content, {
correlationId: correlationId,
content_type: 'application/json'
}
);
deferred.resolve(resp);
});
return deferred.promise;
}
}
Publish Messages
publish(exchange, routingKey, content, correlationId) {
var self = this;
var deferred = Q.defer();
self.correlationIds_map[correlationId] = true;
self.pubChannel.publish(exchange, routingKey, content,
{
replyTo: self.CALLBACK_QUEUE_NAME,
content_type : 'application/json',
correlationId: correlationId,
persistent : true
},
function(err, ok) {
if (err)
{
self.offlinePubQueue.push([exchange, routingKey, content]); // try again
self.pubChannel.connection.close();
deferred.resolve('requeued');
}
else
{
deferred.resolve(ok);
}
});
return deferred.promise;
}
so I am trying to develop an aws websocket function using lambda. But it seems that whenever I try to call "postToConnection" it just gives me 500 internal server error.
Cloud watch also doesn't logs the error that I am receiving.
And what I'm receiving on the terminal once I send the message is this:
"{"message": "Internal server error", "connectionId":"xxx", "requestId":"xxx"}"
(Which doesn't give me any information at all)
This is my whole code on the lambda function.
var AWS = require('aws-sdk');
AWS.config.update({ region: "us-west-2" });
var DDB = new AWS.DynamoDB({ apiVersion: "2012-10-08" });
require('aws-sdk/clients/apigatewaymanagementapi');
exports.handler = function (event, context, callback) {
var url_handler = event.requestContext.domainName + "/" + event.requestContext.stage;
// var params = event.requestContext;
// console.log(params);
var scanParams = {
TableName: "tbl-web-socket-connection",
ProjectionExpression: "id"
};
DDB.scan(scanParams, function (err, data) {
// callback(null, {
// statusCode: 200,
// body: "Data send to"
// });
if (err) {
callback(null, {
statusCode: 500,
body: JSON.stringify(err)
});
} else {
var apigwManagementApi = new AWS.ApiGatewayManagementApi({
apiVersion: "2018-11-29",
endpoint: event.requestContext.domainName + "/" + event.requestContext.stage
});
var postParams = {
Data: JSON.parse(event.body).data
};
var count = 0;
data.Items.forEach(function (element) {
postParams.ConnectionId = element.id.S;
console.log(postParams);
apigwManagementApi.postToConnection(postParams, function (err, data) {
if (err) {
// API Gateway returns a status of 410 GONE when the connection is no
// longer available. If this happens, we simply delete the identifier
// from our DynamoDB table.
if (err.statusCode === 410) {
console.log("Found stale connection, deleting " + postParams.connectionId);
DDB.deleteItem({ TableName: process.env.TABLE_NAME,
Key: { connectionId: { S: postParams.connectionId } } });
} else {
console.log("Failed to post. Error: " + JSON.stringify(err));
}
} else {
count++;
}
});
});
callback(null, {
statusCode: 200,
body: "Data send to " + count + " connection" + (count === 1 ? "" : "s")
});
}
});
};
The aws-sdk is also updated, I declared it on a lambda layer and that's what I'm using.
Any idea what's causing this?
This is due to a timeout, the dynamodb loops through all of the records which is causes timeout.
It looks like the cloudwatch was really logging the error, but I was just too focused on the terminal error which gives me the 500, Internal Server Error.
To fix this, just go to the lambda function and increase the time limit.
I have a problem with the changelog function of rethinkdb. There is no console output, when adding a document to the collection. BUT "changes" is outputted once after starting my application via node app.js, why is that so? Any help is appreciated.
// rethinkdb connection
var connection = null;
r.connect({ db: 'bc', host: 'localhost', port: 1337}, function(err, conn) {
if(err) throw err;
connection = conn;
observeBets();
});
function observeBets() {
r.table('bets').changes().run(connection, function(err, betsCursor) {
if (err) throw err;
console.log("changes");
});
}
Insert via
var bet = {
bet: "test",
userID: 213
};
r.table('bets').insert(bet);
I think you need to be iterating over betsCursor.
In the socket.io acknowledgement example we see a client's send/emit being called back with the server's response. Is the same functionality available in the reverse direction - i.e. how does the server confirm client reception for a send/emit from the server? It would be nice to have a send/emit callback even just to indicate reception success. Didn't see this functionality documented anywhere...
Thanks!
Looking in the socket.io source I found that indeed ACKs are supported for server-sent messages (but not in broadcasts!) (lines 115-123 of socket.io/lib/socket.js):
if ('function' == typeof args[args.length - 1]) {
if (this._rooms || (this.flags && this.flags.broadcast)) {
throw new Error('Callbacks are not supported when broadcasting');
}
debug('emitting packet with ack id %d', this.nsp.ids);
this.acks[this.nsp.ids] = args.pop();
packet.id = this.nsp.ids++;
}
An example of how the ack should work (not tested):
// server-side:
io.on('msg', (data, ackCallback) => {
console.log('data from client', data);
ackCallback('roger roger');
});
// client-side:
socket.emit('msg', someData, (answer) => {
console.log('server\'s acknowledgement:', answer);
});
If we want to be 100% sure the reception success, just add ack call is not enough because we also need to know whether the ack call is run or not.
The socket.io 3.0 document add this timeout example to show how to do that. But the timeout value is the tricky one.
const withTimeout = (onSuccess, onTimeout, timeout) => {
let called = false;
const timer = setTimeout(() => {
if (called) return;
called = true;
onTimeout();
}, timeout);
return (...args) => {
if (called) return;
called = true;
clearTimeout(timer);
onSuccess.apply(this, args);
}
}
socket.emit("hello", 1, 2, withTimeout(() => {
console.log("success!");
}, () => {
console.log("timeout!");
}, 1000));
Yes we can send a response back to the server from the Client(as acknowledgment )
According to new documentation of socket(4.x)
Server-side
let dataToSend={
test:"fromServer"
}
socket.timeout(5000).emit("my-event",dataToSend, (err, response) => {
if (err) {
// the other side did not acknowledge the event in the given delay
} else {
console.log(response);
}
});
Cleint- side
socket.on("my-event", (data, callback) => {
// any logic for data(that data come from server i.e { test:"fromServer" }
callback({ test: "test" });
});
I am currently using this stack expres, socket.io, sessionstore. I followed the article here http://www.danielbaulig.de/socket-ioexpress/.
Well the problem is that i cannot modify the session values in socket.io callback.
Access from express side works well, the item get increased after each refresh.
app.get('/mysession', function(req, res) {
req.session.item++;
console.log(req.session);
res.render('session.jade', {
title: 'Sample title'
});
});
Using in socket.io side it does not and here is the problem, maybe i am setting the wrong object.
var io = io.listen(app);
io.sockets.on('connection', function(socket) {
var handshake = socket.handshake;
onlineCount++;
console.log('Well done id %s', handshake.sessionID);
handshake.session.item++;
console.log(handshake.session);
});
Here is bridge code.
io.set('authorization', function(data, accept) {
if (data.headers.cookie) {
data.cookie = parseCookie(data.headers.cookie);
data.sessionID = data.cookie['express.sid'];
sessionStore.get(data.sessionID, function(err, session) {
if (err || !session) {
accept('Error', false);
} else {
data.session = session;
accept(null, true);
}
});
} else {
return accept('No cookie tansmitted', false);
}
});
The only way I found to make this work is to grab the cookie from the request object on the connect event, parse it with your favourite cookie parser (I use connect.utils.parseCookie), and set it on that socket so that I may access it in future events:
socket.on('connection', function(client) {
var cookie = client.request.headers.cookie;
var pcookie = connect.utils.parseCookie(cookie);
var session_id = pcookie["connect.sid"];
if (session_id) {
sessionStore.get(session_id, function(err, sess) {
// do whatever you want with sess here
// ...
// if you want to "save" the session for future events
client.set('session_id', session_id);
}
}
});
The sessionStore API changed a little bit, now its sessionStore.load(sessionId, cb) instead of .get.