I have a problem with the changelog function of rethinkdb. There is no console output, when adding a document to the collection. BUT "changes" is outputted once after starting my application via node app.js, why is that so? Any help is appreciated.
// rethinkdb connection
var connection = null;
r.connect({ db: 'bc', host: 'localhost', port: 1337}, function(err, conn) {
if(err) throw err;
connection = conn;
observeBets();
});
function observeBets() {
r.table('bets').changes().run(connection, function(err, betsCursor) {
if (err) throw err;
console.log("changes");
});
}
Insert via
var bet = {
bet: "test",
userID: 213
};
r.table('bets').insert(bet);
I think you need to be iterating over betsCursor.
Related
I'm using nightwatchjs and the ssh2 npm package to test that a file name is present in a location, and asserting that it's correct.
My code is as follows;
var Client = require('ssh2');
var conn = new Client();
var longFileNamePrdArray;
module.exports = {
before:
conn.on('ready', function(browser) {
console.log('Client :: ready');
conn.sftp(function(err, sftp) {
if (err) throw err;
sftp.readdir('parkersreviewcontent/', function (err, list) {
if (err) throw err;
list.map(a => a.longname);
longFileNamePrdArray = list[1].longname;
conn.end();
});
});
})
.connect({
host: '*hostname*',
port: 22,
user: '*user*',
password: '*password*',
}),
'Test Zen Reviews production file is listed': function (browser) {
console.log(longFileNameStgArray);
},
'Closing the browser': function (browser) {
browser.browserEnd();
},
};
However, this results in undefined being outputted for longFileNamePrdArray.
If I move the console.log command inside the before code block, then the filename is correctly displayed as follows;
-rwxr--r-- 1 - - 2492238 Feb 28 06:37 parkers-reviews-staging.xml
but when I move it outside the before block and into my test block, it fails with the undefined output.
I thought by defining longFileNamePrdArray and stating it at the beginning of the test script it work 'carry across' the longFileNamePrdArray value into the test block, but it's not.
Any help and assistance would be really appreciated. Thanks.
I fixed this by using the done callback.
So my working code looks like this;
var Client = require('ssh2');
var conn = new Client();
var longFileNamePrdArray;
module.exports = {
before: function(browser, done) {
conn.on('ready', function(browser) {
console.log('Client :: ready');
conn.sftp(function(err, sftp) {
if (err) throw err;
sftp.readdir('parkersreviewcontent/', function (err, list) {
if (err) throw err;
list.map(a => a.longname);
longFileNamePrdArray = list[1].longname;
conn.end();
done();
});
});
})
.connect({
host: '*hostname*',
port: 22,
user: '*user*',
password: '*password*',
}),
'Test Zen Reviews production file is listed': function (browser) {
console.log(longFileNameStgArray);
},
'Closing the browser': function (browser) {
browser.browserEnd();
},
};
I wonder if I've hit a bug. A wrote a Node.js piece of code to trigger a "GCS Text to PubSub" Dataflow. The function is triggered upon file upload into a GCS bucket.
But it never executes successfully: "textPayload: "problem running dataflow template, error was: { Error: Invalid JSON payload received. Unknown name "staging_location": Cannot find field." It is an issue with the syntax of I specify the staging location for the job. I have tried "staginglocation", "stagingLocation", etc...none of them have worked.
Here's my code. Thanks for your help.
var {google} = require('googleapis');
exports.moveDataFromGCStoPubSub = (event, callback) => {
const file = event.data;
const context = event.context;
console.log(`Event ${context.eventId}`);
console.log(` Event Type: ${context.eventType}`);
console.log(` Bucket: ${file.bucket}`);
console.log(` File: ${file.name}`);
console.log(` Metageneration: ${file.metageneration}`);
console.log(` Created: ${file.timeCreated}`);
console.log(` Updated: ${file.updated}`);
google.auth.getApplicationDefault(function (err, authClient, projectId) {
if (err) {
throw err;
}
console.log(projectId);
const dataflow = google.dataflow({ version: 'v1b3', auth: authClient });
console.log(`gs://${file.bucket}/${file.name}`);
dataflow.projects.templates.create({
projectId: projectId,
resource: {
parameters: {
inputFile: `gs://${file.bucket}/${file.name}`,
outputTopic: `projects/iot-fitness-198120/topics/MemberFitnessData`,
},
jobName: 'CStoPubSub',
gcsPath: 'gs://dataflow-templates/latest/GCS_Text_to_Cloud_PubSub',
stagingLocation: 'gs://fitnessanalytics-tmp/tmp'
}
}, function(err, response) {
if (err) {
console.error("problem running dataflow template, error was: ", err);
}
console.log("Dataflow template response: ", response);
callback();
});
});
callback();
};
I don't think this is actually possible.
Looking at the documentation for the Dataflow API itself, there's nothing like a staging location in the parameter section, and the library you're using is basically a wrapper for this API.
I'm a bit surprised it changes the name of the parameter though.
So i finally got this to work. It was indeed a syntax issue in the parameters section. The code below works like a charm:
var {google} = require('googleapis');
exports.moveDataFromGCStoPubSub = (event, callback) => {
const file = event.data;
const context = event.context;
console.log(`Event ${context.eventId}`);
console.log(` Event Type: ${context.eventType}`);
console.log(` Bucket: ${file.bucket}`);
console.log(` File: ${file.name}`);
console.log(` Metageneration: ${file.metageneration}`);
console.log(` Created: ${file.timeCreated}`);
console.log(` Updated: ${file.updated}`);
google.auth.getApplicationDefault(function (err, authClient, projectId) {
if (err) {
throw err;
}
console.log(projectId);
const dataflow = google.dataflow({ version: 'v1b3', auth: authClient });
console.log(`gs://${file.bucket}/${file.name}`);
dataflow.projects.templates.create({
gcsPath: 'gs://dataflow-templates/latest/GCS_Text_to_Cloud_PubSub',
projectId: projectId,
resource: {
parameters: {
inputFilePattern: `gs://${file.bucket}/${file.name}`,
outputTopic: 'projects/iot-fitness-198120/topics/MemberFitnessData2'
},
environment: {
tempLocation: 'gs://fitnessanalytics-tmp/tmp'
},
jobName: 'CStoPubSub',
//gcsPath: 'gs://dataflow-templates/latest/GCS_Text_to_Cloud_PubSub',
}
}, function(err, response) {
if (err) {
console.error("problem running dataflow template, error was: ", err);
}
console.log("Dataflow template response: ", response);
callback();
});
});
callback();
};
Currently have IO sockets with laravel broadcasting with redis working perfectly. Until I then set up an SSL cert on the domain.
I have redis-server running on port 3001.
Then there is a socket.js set to listen to 3000.
My JS on the page I listen via io('//{{ $listen }}:3000').
Any guidance would be great on how to get this working over https. Would I just use 443 as the port?
Thanks.
My socket.js
var app = require('express')();
var http = require('http').Server(app);
var io = require('socket.io')(http);
var Redis = require('ioredis');
var redis = new Redis();
redis.subscribe('notifications', function(err, count) {
});
redis.on('message', function(channel, message) {
console.log('Message Recieved: ' + message);
message = JSON.parse(message);
io.emit(channel + ':' + message.event, message.data);
});
http.listen(3000, function(){
console.log('Listening on Port 3000');
});
First, setup your serverOptions object:
var serverOptions = {
port: 3000,
host: 127.0.0.1, //address to your site
key: '/etc/nginx/ssl/your_site/server.key', //Or whatever the path to your SSL is
cert: '/etc/nginx/ssl/your_site/server.crt',
NPNProtocols: ['http/2.0', 'spdy', 'http/1.1', 'http/1.0']
}
For the NPNProtocols, you may not care for all of them, but they're provided for reference.
Now just create the server:
var app = require('https').createServer(serverOptions),
io = require('socket.io')(app);
This should be pretty plug and play into your source at his point.\
As a side note your stuff is completely wide open and anyone can listen on your web socket, so nothing should be sent that is private through here. If you need to make your data private, then you're going to need 1 of two things;
Something like JWT-Auth Token
Something custom that interface with the Redis queue:
Here's an example of the latter:
var SECRET_KEY = '<YOUR_LARAVEL_SECRET_KEY>';
var laravel_session_parser = {
ord: function (string) {
return string.charCodeAt(0);
},
decryptSession: function (cookie, secret) {
if (cookie) {
var session_cookie = JSON.parse(new Buffer(cookie, 'base64'));
var iv = new Buffer(session_cookie.iv, 'base64');
var value = new Buffer(session_cookie.value, 'base64');
var rijCbc = new mcrypt.MCrypt('rijndael-128', 'cbc');
rijCbc.open(secret, iv);
var decrypted = rijCbc.decrypt(value).toString();
var len = decrypted.length - 1;
var pad = laravel_session_parser.ord(decrypted.charAt(len));
return phpunserialize.unserialize(decrypted.substr(0, decrypted.length - pad));
}
return null;
},
getUidFromObj: function (obj, pattern) {
var regexp = /login_web_([a-zA-Z0-9]+)/gi;
if (pattern) {
regexp = pattern;
}
var u_id = null;
for (var key in obj) {
var matches_array = key.match(regexp);
if (matches_array && matches_array.length > 0) {
u_id = obj[matches_array[0]];
return u_id;
}
}
return u_id;
},
getRedisSession: function (s_id, cb) {
var _sessionId = 'laravel:' + s_id;
client.get(_sessionId, function (err, session) {
if (err) {
cb && cb(err);
return;
}
cb && cb(null, session);
});
},
getSessionId: function (session, _callback) {
var u_id = null,
err = null;
try {
var laravelSession = phpunserialize.unserialize(phpunserialize.unserialize(session));
u_id = laravel_session_parser.getUidFromObj(laravelSession);
} catch (err) {
_callback(err, null);
}
_callback(err, u_id);
},
ready: function (socket, _callback) {
if (typeof socket.handshake.headers.cookie === 'string') {
var cookies = cookie.parse(socket.handshake.headers.cookie);
var laravel_session = cookies.laravel_session;
var session_id = laravel_session_parser.decryptSession(laravel_session, SECRET_KEY);
laravel_session_parser.getRedisSession(session_id, function (err, session) {
if (!err && session) {
laravel_session_parser.getSessionId(session, function (err, user_id) {
if (user_id) {
_callback(null, session_id, user_id, laravel_session)
} else {
_callback(new Error('Authentication error'), null);
}
});
} else {
_callback(new Error('Authentication error'), null);
}
});
}
}
};
Now you can just have IO get an instance of the individuals session when they establish a connection to socket.io
io.on('connection', function (socket) {
laravel_session_parser.ready(socket, function(err, session_id, user_id, laravel_session) {
//log out the variables above to see what they provide
});
});
Note, I prefer to use dotenv in NodeJS to share environment variables between Laravel and Node.
Then you can do process.env.APP_KEY and you don't need to worry about sharing variables.
Also of note, that script above is not complete and is not production ready, it's just meant to be used as an example.
I'm working on a simple service using Node.js. It receives uploaded files, stores them on the disk and records some metadata on an Oracle table. I'm using the db-oracle package together with connection pooling, following this article: http://nodejsdb.org/2011/05/connection-pooling-node-db-with-generic-pool/
However, I've noticed that the data I insert is only sent to the Oracle database after the connection pool closes the idle connection, by calling its disconnect() method.
Is there a way to flush the data before sending the 'OK' signal to my client? The way it is working now, a crash on my webservice or on Oracle itself can cause loss of data, and the client of my service would not know about it. I actually tested this by killing my app process after some uploads, and the data was indeed lost.
Here's a simplified version of the code:
var express = require('express');
var app = module.exports = express.createServer();
app.post('/upload', handleUpload);
app.listen(4001, function(){
console.log("Express server listening on port %d in %s mode", app.address().port, app.settings.env);
});
function handleUpload(req, res) {
res.contentType('application/xml');
var buf = '';
req.on('data', function(chunk) { buf += chunk; });
req.on('end', function() {
saveUpload(req, res, buf);
});
}
function saveUpload(req, res, buf) {
if (buf.length == 0)
return sendError(res, 'No data supplied', 422);
var payload = new Buffer(buf, 'base64');
files.save(payload, function(err, savedFile) {
if (err)
return sendError(res, 'Error while saving', 500);
var obj = { ip: req.connection.remoteAddress, location: savedFile.path,
created_at: new Date(), updated_at: new Date() };
var fields = ['IP', 'LOCATION', 'CREATED_AT', 'UPDATED_AT'];
var values = fields.map(function(v) { return obj[v.toLowerCase()] });
pool.acquire(function(err, conn) {
if (err)
return sendError(res, err, 500);
var q = conn.query().insert('FILES', fields, values);
q.execute(function(err, result) {
pool.release(conn);
if (err)
return sendError(res, err, 500);
if (result.affected < 1)
return sendError(res, 'Error saving the record', 500);
// The next statement sends the final result to the client.
// However, the new record was not yet flushed to the database.
res.end('<ok />');
});
});
});
}
function sendError(res, err, code) {
console.log(err);
res.send('<error>' + err + '</error>', code || 500);
}
As a workaround, I've tried to implement a fake connection pool and release all acquired connections, but now my app is dying with the message: pure virtual method calledAbort trap: 6
Here's the fake connection pooling:
var fakePool = {
acquire: function(callback) {
new oracle.Database(config.database).connect(function(err, server) {
callback(err, this);
});
},
release: function(conn) {
conn.disconnect();
}
};
Just to be clear, I don't care about the fake connection pooler, it was just a dirty workaround. I want to be able to flush the data to Oracle before sending the 'OK' to my client.
Btw I also opened a ticket on their Github: https://github.com/mariano/node-db-oracle/issues/38
You are obviously missing a transaction commit.
node-db does not need to expose a commit API because in most RDBMS (including Oracle), COMMIT is a valid query. Since the package allows the execution of arbitrary queries, commit/rollback are supposed to be done using a simple execute()
The code should be changed as follows:
pool.acquire(function(err, conn) {
if (err)
return sendError(res, err, 500);
var q = conn.query().insert('FILES', fields, values);
q.execute(function(err, result) {
if (err || result.affected < 1 ) {
pool.release(conn);
return sendError(res, err, 500);
}
conn.query().execute("commit", function(err,result) {
if (err) {
pool.release(conn);
return sendError(res, err, 500);
}
res.end('<ok />');
pool.release(conn);
});
});
});
This is not an exact answer to your question, but have a look at node-oracle package.
It lacks connection pooling, but its commit / rollback functionality can at least be controlled from code. And you can always mix it with a generic pool solution such as node-pool.
I am currently using this stack expres, socket.io, sessionstore. I followed the article here http://www.danielbaulig.de/socket-ioexpress/.
Well the problem is that i cannot modify the session values in socket.io callback.
Access from express side works well, the item get increased after each refresh.
app.get('/mysession', function(req, res) {
req.session.item++;
console.log(req.session);
res.render('session.jade', {
title: 'Sample title'
});
});
Using in socket.io side it does not and here is the problem, maybe i am setting the wrong object.
var io = io.listen(app);
io.sockets.on('connection', function(socket) {
var handshake = socket.handshake;
onlineCount++;
console.log('Well done id %s', handshake.sessionID);
handshake.session.item++;
console.log(handshake.session);
});
Here is bridge code.
io.set('authorization', function(data, accept) {
if (data.headers.cookie) {
data.cookie = parseCookie(data.headers.cookie);
data.sessionID = data.cookie['express.sid'];
sessionStore.get(data.sessionID, function(err, session) {
if (err || !session) {
accept('Error', false);
} else {
data.session = session;
accept(null, true);
}
});
} else {
return accept('No cookie tansmitted', false);
}
});
The only way I found to make this work is to grab the cookie from the request object on the connect event, parse it with your favourite cookie parser (I use connect.utils.parseCookie), and set it on that socket so that I may access it in future events:
socket.on('connection', function(client) {
var cookie = client.request.headers.cookie;
var pcookie = connect.utils.parseCookie(cookie);
var session_id = pcookie["connect.sid"];
if (session_id) {
sessionStore.get(session_id, function(err, sess) {
// do whatever you want with sess here
// ...
// if you want to "save" the session for future events
client.set('session_id', session_id);
}
}
});
The sessionStore API changed a little bit, now its sessionStore.load(sessionId, cb) instead of .get.