Flux and AutobahnJs - reactjs-flux

I am using flux with authobahn and I have concerns about my architecture.
I have some components that gets their state by subscribing to pubsub topic.
Currently I am using flux for my data fetching and my actions looks like this:
// I have a global session
window.AutobahnSession = ..;
// RoomsActions
var RoomsActions = {
subscribeToRoom: function(roomName) {
var onData = function(data) {
Dispatcher.dispatch({
type: "ROOM_MESSAGE_ARRIVED",
message: data,
roomName: roomName
});
};
var subscription = window.AutobahnSession.subscribe('rooms/' + roomName, onData);
// Sending the subscription to be persisted on the store,
// So I can close & remove it later.
Dispatcher.dispatch({
type: "ROOM_SUBSCRIPTION_CREATED",
subscription: subscription,
roomName: roomName
});
},
// Called on componentWillUnmount
unsubscribeToRoom: function(roomName) {
Dispatcher.dispatch({
type: "UNSUBSCRIBE_TO_ROOM",
roomName: roomName
});
}
};
// RoomsStore
RoomsStore.dispatchToken = ChatAppDispatcher.register(function(action) {
switch(action.type) {
case "ROOM_MESSAGE_ARRIVED":
// I have the message to a messages array and emitChange
break;
case "ROOM_SUBSCRIPTION_CREATED":
// I add the subscription to mapping from roomName to subscription
break;
case "UNSUBSCRIBE_TO_ROOM":
// Get the subscription by it's room name, and close it and
// remove from the mapping.
break;
}
});
Is this the right approach for handling the subscriptions is good? I don't really like the idea of subscribeToRoom action which always listen for events, and sending the subscription in the dispatcher.

Related

Debug queue AMQP that loses messages using low prefetch

My ampq system seems loosing messages, so I'd like a way to see if messages are effectively queued before being consumed.
I have several MicroServices communicating by amqp messages on NodeJs, using CloudAmqp. One of this microservice MS[B] generates .pdf, the process it's pretty heavy and requires about a minute for each request. So I send the .pdf asyncronously, triggering a webhook once finished, and generate once per time using a PreFetch = 1
So one MS[A] collects all the requests from the user, answers back to them saying "ok, request received, listen on the webhook" and in parallel it asks to the MS[B] to generate pdfs. MS[B] has prefetch=1, so consumes just one request per time. Once finished, sends the response to the callback queue of MS[A], which triggers the user webhook saying "the pdf, it's ready".
The problem is that MS[B] misses all the messages while busy:
it consumes one request from MS[A]
starts generating the .pdf
while generating, it discards all the other messages that MS[A] sends, as if there would be not any queue
it finishes the .pdf, sending ACK to MS[A]
then it starts again accepting messages, taking the last one received after being idle, losing all the previous ones.
Why? How can I find the problem, what could I monitor?
Communications between other MSs works well, with messages correctly ordered in queues. Just this one, with prefetch=1, loses messages.
I am NOT using the NO-ACK rule. I don't know what try, what test and what monitor to find the problem.
How can I see (if) messages are correctly queued before being consumed, ora just lost?
Below, the implementation of the messaging system
Channel Creation
/*
Starting Point of a connection to the CLOUDAMQP_URL server, then exec the callback
*/
start(callback) {
var self = this;
// if the connection is closed or fails to be established at all, we will reconnect
amqp.connect(process.env.CLOUDAMQP_URL + "?heartbeat=60")
.then(
function (conn) {
// create queues and consume mechanism
self.amqpConn = conn;
setTimeout(() => {
startPublisher();
}, 200);
setTimeout(() => {
createCallbackQueue();
}, 1000);
setTimeout(() => {
callback();
}, 2000);
});
// create publisher channels
function startPublisher() {
self.amqpConn.createConfirmChannel()
.then(function (ch) {
self.pubChannel = ch;
logger.debug("🗣️ pubChannel ready");
while (true) {
var m = self.offlinePubQueue.shift();
if (!m) break;
self.publish(m[0], // exchange
m[1], // routingKey
m[2], // content,
undefined // correlationId
);
}
});
}
// create callback channel
function createCallbackQueue() {
self.amqpConn.createChannel()
.then(function (channel) {
channel.assertQueue(self.CALLBACK_QUEUE_NAME, {
durable: true,
exclusive: true, // callback are exclusive
})
.then(function (q) {
logger.debug(" 👂 Waiting for RPC RESP in " + self.CALLBACK_QUEUE_NAME);
channel.consume(q.queue,
processCallback, {
noAck: false
}
);
});
// process messages of the callback
function processCallback(msg) {
var correlationId = msg.properties.correlationId;
}
//callback received
if (self.correlationIds_map[correlationId]) {
delete self.correlationIds_map[correlationId];
var content = JSON.parse(msg.content.toString());
self.eventEmitter.emit(correlationId, content);
}
}
});
}
return deferred.promise;
}
Consuming Messages
/*
#worker_queue - the name of the queue
*/
// Consume message from 'worker_queue', A worker that acks messages only if processed succesfully
startWorker(worker_queue, routes) {
var self = this;
logger.debug("startWorker " + self.CALLBACK_QUEUE_NAME);
var channel;
worker_queue = self.MICROSERVICE_NAME + worker_queue;
self.amqpConn.createChannel()
.then(
function (ch) {
channel = ch;
ch.prefetch(self.opt.prefetch); // = 1 for MS[B] generating pdf
channel.assertQueue(worker_queue, {
durable: true,
exclusive: true
})
.then(function (q) {
channel.consume(worker_queue, processMsg, {
noAck: false
});
});
});
// call the 'function from interface' passing params, and send the ACK
function processMsg(msg) {
work(msg)
.then(function (data) {
channel.ack(msg, false); // allUpTo = false
})
.catch(function (err) {
channel.ack(msg, false);
// channel.reject(msg, false); // requeue = false
// this.closeOnErr(e);
});
}
// execute the command, and queue back a response, checking if it's an error or not
function work(msg) {
var deferred = Q.defer();
var correlationId;
try {
correlationId = msg.properties.correlationId;
} catch (err) {}
work_function(msg.content, correlationId)
.then(function (resp) {
var content = {
data: resp
};
content = Buffer.from(JSON.stringify(content));
channel.sendToQueue(msg.properties.replyTo,
content, {
correlationId: correlationId,
content_type: 'application/json'
}
);
deferred.resolve(resp);
});
return deferred.promise;
}
}
Publish Messages
publish(exchange, routingKey, content, correlationId) {
var self = this;
var deferred = Q.defer();
self.correlationIds_map[correlationId] = true;
self.pubChannel.publish(exchange, routingKey, content,
{
replyTo: self.CALLBACK_QUEUE_NAME,
content_type : 'application/json',
correlationId: correlationId,
persistent : true
},
function(err, ok) {
if (err)
{
self.offlinePubQueue.push([exchange, routingKey, content]); // try again
self.pubChannel.connection.close();
deferred.resolve('requeued');
}
else
{
deferred.resolve(ok);
}
});
return deferred.promise;
}

Rxjs Subscribing until another event happens

I am new to Rxjs and am trying to implement the following workflow in it:
User clicks on a menu item that triggers an HTTP request
Before the response has arrived, the user clicks on a second request
The subscription to the first request is ended and a subscription to the second request is started
// The code below sits inside the onClick event of my menu
var callAction = function(someParameters) {
return Rx.Observable.create(function(observer) {
var subscribed = true;
myHttpApi.someActionCall(someParameters).then(
(data: any) => {
if (subscribed) {
// Send data to the client
observer.next(data);
// Immediately complete the sequence
observer.complete();
}
}).catch((err: any) => {
if (subscribed) {
// Inform the client that an error occurred.
observer.error(ex);
}
}
);
return function () {
subscribed = false;
}
});
};
The observer is further defined below:
var observer = {
// onNext in RxJS 4
next: function (data) {
// Do what you need to do in the interface
},
// onError in RxJS 4
error: function (err) {
// Handle the error in the interface
},
// onComplete in RxJS 4
complete: function () {
//console.log("The asynchronous operation has completed.");
}
};
let subscription = callAction(somParameters).subscribe(observer);
How do I now go about implementing #3, whereby the subscription to the first request is ended and a subscription to the new request (in this example, the same block of code is executed for different menu options and therefore different requests based on the parameters) is started?
Breaking up the steps into discrete functions,
// Inner observable, calls the API
const callAction$ = function(someParameters) {
return Observable.fromPromise(
myHttpApi.someActionCall(someParameters)
)
}
// Outer observable, controls the click chain
const click$ = new Subject();
click$.switchMap(clickParams => {
return callAction$(clickParams)
})
.subscribe(
result => console.log('Result: ', result),
err => console.log('Error: ', err.message)
)
// Handler function, called from menu
const handleClick = function(clickParams) {
click$.next(clickParams)
}
Working example CodePen

Lambda that reads from SQS queue - bottleneck?

So I have implemented a an email system like the one here : https://cloudonaut.io/integrate-sqs-and-lambda-serverless-architecture-for-asynchronous-workloads/
Flow is as follows
http request to end an email -> api gateway -> HttpRequestLambda -> SQS <-> SQSMessageConsumerLambda (scheduled) -> MessageWorkerLambda (sends email via email service provider)
My SQSMessageConsumerLambda is scheduled to run every minute
I changed the SQS consumer to recursively call itself when the timeout is getting near rather than just ending. Doing this means that SQS queue has a better chance of not piling up with too many messages.
This seems to work great so far, but I have a couple quesitons:
1.if the function timesout, those messages that were read from the queue are probably still within their visibility timeout period, thus invoking the lambda recursively means that they cant actually be re-read from the queue until their visibilty timeout expires which is probably not likely to be the case immediately after the recursive call. So would it be an idea to pass these messages into the recursive call itself? and then somehow check for these 'passed in messages' at the beginning of the consumer lambda and send them directly to workers in that case ?
2.SQSMessageConsumerLambda is still a bit of a bottleneck isn't it? as it takes about 40-50 ms to invoke the MessageWorkerLambda for each message it wants to delegate. Or, does the 'async.parallel' mitigate this ?
3.Would it be better if we could somehow elastically increase the number of SQSMessageConsumerLambda based on some CloudWatch alarms , i.e. alarms that check if there are more than X amount of messages on the queue for X minutes ?
var AWS = require('aws-sdk');
var sqs = new AWS.SQS();
var async = require("async");
var lambda = new AWS.Lambda();
var QUEUE_URL = `https://sqs.${process.env.REGION}.amazonaws.com/${process.env.ACCOUNT_ID}/${process.env.STAGE}-emailtaskqueue`;
var EMAIL_WORKER = `${process.env.SERVICE}-${process.env.STAGE}-emailWorker`
var THIS_LAMBDA = `${process.env.SERVICE}-${process.env.STAGE}-emailTaskConsumer`
function receiveMessages(callback) {
var numMessagesToRead = 10;
//console.log('in receiveMessages, about to read ',numMessagesToRead);
//WaitTimeSeconds : The duration (in seconds) for which the call waits for a message to arrive in the queue before returning
var params = {
QueueUrl: QUEUE_URL,
MaxNumberOfMessages: numMessagesToRead,
WaitTimeSeconds: 20
};
sqs.receiveMessage(params, function(err, data) {
if (err) {
console.error(err, err.stack);
callback(err);
} else {
if (data.Messages && data.Messages.length > 0) {
console.log('Got ',data.Messages.length, ' messages off the queue' );
}else{
console.log('Got no messages from queue');
}
callback(null, data.Messages);
}
});
}
function invokeWorkerLambda(task, callback) {
console.log('Need to invoke worker for this task..',task);
//task.Body is a json string
var payload = {
"ReceiptHandle" : task.ReceiptHandle,
"body" : JSON.parse(task.Body)
};
console.log('payload:',payload);
//using 'Event' means use async (http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/Lambda.html#invoke-property)
//TODO need variable here
var params = {
FunctionName: EMAIL_WORKER,
InvocationType: 'Event',
Payload: JSON.stringify(payload)
};
var millis = Date.now();
lambda.invoke(params, function(err, data) {
millis = Date.now() - millis;
console.log('took ', millis, ' to invoke ', EMAIL_WORKER, ' asynchronously');
if (err) {
console.error(err, err.stack);
callback(err);
} else {
callback(null, data)
}
});
}
function handleSQSMessages(context, callback) {
//console.log('in handleSQSMessages');
receiveMessages(function(err, messages) {
if (messages && messages.length > 0) {
var invocations = [];
messages.forEach(function(message) {
invocations.push(function(callback) {
invokeWorkerLambda(message, callback)
});
});
async.parallel(invocations, function(err) {
if (err) {
console.error(err, err.stack);
callback(err);
} else {
if (context.getRemainingTimeInMillis() > 20000) {
console.log('there is more time to read more messages for this run of the cron')
handleSQSMessages(context, callback);
} else {
console.log('remaining time in millis:',context.getRemainingTimeInMillis(),' No more time here, invoking this lambda again')
lambda.invoke({FunctionName: THIS_LAMBDA, InvocationType: 'Event',Payload: '{"recursiveMarker":true}' }, function(err, data) {
if (err) {
console.error(err, err.stack);
callback(err);
} else {
console.log('data from the invocation:', data);
callback(null, 'Lambda was just called recursively');
}
});
}
}
});
} else {
callback(null, "DONE");
}
});
}
module.exports.emailTaskConsumer = (event, context, callback) => {
console.log('in an emailTaskConsumer. Was this a recursive call ?', event);
handleSQSMessages(context, callback);
}
1) The visibility timeout is a great feature of SQS allowing you to build resilient systems. Could not find a reason to try to handle failures on your own.
2) You could batch all messages read from the queue to the Worker Lambda at process them at once.
3) You could add additional CloudWatch event rules triggering the Consumer Lambda to increase the read througput.
Use SNS to trigger the Lambda. This is the correct way of working with Lambda functions. Your HttpRequestLambda would fire a SNS notification and another Lambda function is immediately triggered to response to that event. Actually, if you are not doing nothing else in HttpRequestLambda, you can also replace it with AWS API proxy. Here you can see full tutorial about exposing the SNS API via API Gateway.

Parse.Push.send success probably called too soon

I'm trying to implement one time PUSH notification subscriptions in the Parse Cloud service. Here's my model:
Installation.user - pointer to User
User - team (String)
Subscription - email (String), status (String), user (pointer to User)
As a user, I can say - send PUSH notification(s), to all my devices, when user with email became status and this user is in my team. User can have many subscriptions. User can unsubscribe manually (via OS X, iOS, Android, ... apps) or automatically - when user receives PUSH notification, subscription should be automatically cancelled. He can subscribe again, no problem, but auto cancellation is a must. These subscriptions are user based, user can see them on devices.
I wrote Parse Cloud function which consists of two pieces.
Send PUSH
Parse.Push.send({
where : query,
data : pushData
},
{ success: function() {
response.success('OK');
}, error: function(err) {
console.log(err);
response.error({ error: err });
}});
This does work. My query works and PUSH received.
Delete Subscriptions
deleteSubscriptionQuery.find({
success: function(objects) {
Parse.Object.destroyAll(objects, {
success: function() {
response.success('OK');
},
error: function(err) {
console.log(err);
response.error(err);
}
});
},
error: function(err) {
console.log(err);
response.error(err);
},
});
This does work. My deleteSubscriptionQuery works and correct subscriptions deleted.
Combined Together
Parse.Push.send({
where : query,
data : pushData
},
{ success: function() {
deleteSubscriptionQuery.find({
success: function(objects) {
Parse.Object.destroyAll(objects, {
success: function() {
response.success('OK');
},
error: function(err) {
console.log(err);
response.error(err);
}
});
},
error: function(err) {
console.log(err);
response.error(err);
},
});
}, error: function(err) {
console.log(err);
response.error({ error: err });
}});
This doesn't work and success is reported. It seems that the problem lies in Parse.Push.send, ie. success is called too soon. I assume Parse.Push.send doesn't really send notifications, but just schedules them and success is called upon successful schedule. If so, should be renamed to Parse.Push.schedule. Also I assume that they are scheduled with my query (query saved, not really executed), so, I suspect this solution doesn't work because of:
Parse.Push.send - saves my query and calls success,
Subscription objects are deleted in success,
Parse started to process my pushes with saved query, executes the query and it returns zero objects, because I already deleted Subscription objects in success of Parse.Push.send.
Am I right? Does anyone know Parse internals? If I am right, what do you propose I should do to implement one time PUSH notifications based on custom queries?
Here's the solution based on #Wain's proposal.
Fetch Subscription objects based on request parameters.
User is included in this query, so, it's fetched along with Subscription objects.
Make separate Installation query based on User objects from Subscription objects.
Send PUSH to separate Installation query.
Delete already fetched Subscription objects upon success.
I can safely delete Subscription objects in this way and it doesn't interfere with Installation query for PUSH notifications. Does work as expected.
Parse.Cloud.define("status", function(request, response) {
//
// Input validation
//
if (!request.params.hasOwnProperty("status")) {
response.error("Missing status parameter");
return;
}
var statusQuo = request.params["status"]
if (!statusQuo.hasOwnProperty("email") || !statusQuo.hasOwnProperty("team") || !statusQuo.hasOwnProperty("status")) {
response.error("Invalid status dictionary");
return;
}
var status = statusQuo["status"]
if ( status != "Available" ) {
response.success('OK');
return;
}
var email = statusQuo["email"]
var team = statusQuo["team"]
Parse.Cloud.useMasterKey();
//
// Find subscriptions
//
var usersQuery = new Parse.Query(Parse.User);
usersQuery.equalTo('team', team);
var Subscription = Parse.Object.extend("Subscription");
var subscriptionsQuery = new Parse.Query(Subscription);
subscriptionsQuery.include('user');
subscriptionsQuery.equalTo('status', status);
subscriptionsQuery.equalTo('email', email);
subscriptionsQuery.matchesQuery('user', usersQuery);
subscriptionsQuery.find({
success: function(subscriptions) {
var users = subscriptions.map(function(subscription) {
return subscription.get('user');
});
//
// Query for notifications / installations
//
var query = new Parse.Query(Parse.Installation);
query.equalTo('channels', 'status');
query.containedIn('user', users);
//
// Push notifications data
//
var pushData = { 'status' : statusQuo };
var apsData = { 'sound' : '', 'alert' : email + ' became Available.' };
pushData['aps'] = apsData
Parse.Push.send({
where : query,
data : pushData
},
{ success: function() {
Parse.Object.destroyAll(subscriptions, {
success: function() {
response.success('OK');
},
error: function(err) {
console.log(err);
response.error(err);
}
});
}, error: function(err) {
console.log(err);
response.error({ error: err });
}});
}, error: function(err) {
console.log(err);
response.error(err);
}
});
});

Modifying the session data from inside the socket.io callback

I am currently using this stack expres, socket.io, sessionstore. I followed the article here http://www.danielbaulig.de/socket-ioexpress/.
Well the problem is that i cannot modify the session values in socket.io callback.
Access from express side works well, the item get increased after each refresh.
app.get('/mysession', function(req, res) {
req.session.item++;
console.log(req.session);
res.render('session.jade', {
title: 'Sample title'
});
});
Using in socket.io side it does not and here is the problem, maybe i am setting the wrong object.
var io = io.listen(app);
io.sockets.on('connection', function(socket) {
var handshake = socket.handshake;
onlineCount++;
console.log('Well done id %s', handshake.sessionID);
handshake.session.item++;
console.log(handshake.session);
});
Here is bridge code.
io.set('authorization', function(data, accept) {
if (data.headers.cookie) {
data.cookie = parseCookie(data.headers.cookie);
data.sessionID = data.cookie['express.sid'];
sessionStore.get(data.sessionID, function(err, session) {
if (err || !session) {
accept('Error', false);
} else {
data.session = session;
accept(null, true);
}
});
} else {
return accept('No cookie tansmitted', false);
}
});
The only way I found to make this work is to grab the cookie from the request object on the connect event, parse it with your favourite cookie parser (I use connect.utils.parseCookie), and set it on that socket so that I may access it in future events:
socket.on('connection', function(client) {
var cookie = client.request.headers.cookie;
var pcookie = connect.utils.parseCookie(cookie);
var session_id = pcookie["connect.sid"];
if (session_id) {
sessionStore.get(session_id, function(err, sess) {
// do whatever you want with sess here
// ...
// if you want to "save" the session for future events
client.set('session_id', session_id);
}
}
});
The sessionStore API changed a little bit, now its sessionStore.load(sessionId, cb) instead of .get.

Resources