What is the correct field to pass --staging-location parameter for a Dataflow job in Node.js? - google-api

I wonder if I've hit a bug. A wrote a Node.js piece of code to trigger a "GCS Text to PubSub" Dataflow. The function is triggered upon file upload into a GCS bucket.
But it never executes successfully: "textPayload: "problem running dataflow template, error was: { Error: Invalid JSON payload received. Unknown name "staging_location": Cannot find field." It is an issue with the syntax of I specify the staging location for the job. I have tried "staginglocation", "stagingLocation", etc...none of them have worked.
Here's my code. Thanks for your help.
var {google} = require('googleapis');
exports.moveDataFromGCStoPubSub = (event, callback) => {
const file = event.data;
const context = event.context;
console.log(`Event ${context.eventId}`);
console.log(` Event Type: ${context.eventType}`);
console.log(` Bucket: ${file.bucket}`);
console.log(` File: ${file.name}`);
console.log(` Metageneration: ${file.metageneration}`);
console.log(` Created: ${file.timeCreated}`);
console.log(` Updated: ${file.updated}`);
google.auth.getApplicationDefault(function (err, authClient, projectId) {
if (err) {
throw err;
}
console.log(projectId);
const dataflow = google.dataflow({ version: 'v1b3', auth: authClient });
console.log(`gs://${file.bucket}/${file.name}`);
dataflow.projects.templates.create({
projectId: projectId,
resource: {
parameters: {
inputFile: `gs://${file.bucket}/${file.name}`,
outputTopic: `projects/iot-fitness-198120/topics/MemberFitnessData`,
},
jobName: 'CStoPubSub',
gcsPath: 'gs://dataflow-templates/latest/GCS_Text_to_Cloud_PubSub',
stagingLocation: 'gs://fitnessanalytics-tmp/tmp'
}
}, function(err, response) {
if (err) {
console.error("problem running dataflow template, error was: ", err);
}
console.log("Dataflow template response: ", response);
callback();
});
});
callback();
};

I don't think this is actually possible.
Looking at the documentation for the Dataflow API itself, there's nothing like a staging location in the parameter section, and the library you're using is basically a wrapper for this API.
I'm a bit surprised it changes the name of the parameter though.

So i finally got this to work. It was indeed a syntax issue in the parameters section. The code below works like a charm:
var {google} = require('googleapis');
exports.moveDataFromGCStoPubSub = (event, callback) => {
const file = event.data;
const context = event.context;
console.log(`Event ${context.eventId}`);
console.log(` Event Type: ${context.eventType}`);
console.log(` Bucket: ${file.bucket}`);
console.log(` File: ${file.name}`);
console.log(` Metageneration: ${file.metageneration}`);
console.log(` Created: ${file.timeCreated}`);
console.log(` Updated: ${file.updated}`);
google.auth.getApplicationDefault(function (err, authClient, projectId) {
if (err) {
throw err;
}
console.log(projectId);
const dataflow = google.dataflow({ version: 'v1b3', auth: authClient });
console.log(`gs://${file.bucket}/${file.name}`);
dataflow.projects.templates.create({
gcsPath: 'gs://dataflow-templates/latest/GCS_Text_to_Cloud_PubSub',
projectId: projectId,
resource: {
parameters: {
inputFilePattern: `gs://${file.bucket}/${file.name}`,
outputTopic: 'projects/iot-fitness-198120/topics/MemberFitnessData2'
},
environment: {
tempLocation: 'gs://fitnessanalytics-tmp/tmp'
},
jobName: 'CStoPubSub',
//gcsPath: 'gs://dataflow-templates/latest/GCS_Text_to_Cloud_PubSub',
}
}, function(err, response) {
if (err) {
console.error("problem running dataflow template, error was: ", err);
}
console.log("Dataflow template response: ", response);
callback();
});
});
callback();
};

Related

async await , nodejs readfile and then do other

I tried to read file through async/await and update the credentials , But i got response HI first and then hello in console.And Credentials also not updated in oauthClient2.
const getFile = async (req, res, next) => {
await fs.readFile(TOKEN_PATH, (err, token) => {
if (err) return null;
console.log("hello")
console.log(JSON.parse(token));
oauth2Client.credentials = JSON.parse(token);
});
console.log("HI")
var service = google.drive({
version: 'v3',
encoding: null
});
console.log(oauth2Client);
await service.files.get({
auth: oauth2Client,
fileId: "1ZR8kkvb2JYVxcUjmlgfBJD2IYnisaiFn",
alt: 'media'
}, function(err, response) {
if (err) {
console.log('The API returned an error: ' + err);
return;
}
responder(res)(null,response);
});
}
Is there a way in which everything should run in a order?
Thank You.
The reason you are experiencing the error you see, is because the method you are running is asynchronous. You should instead use the alternative synchronous version:
let token;
try {
token = fs.readFileSync(TOKEN_PATH, 'utf8');
} catch (err) {
console.error(err)
}
if(!token){ return; }
oauth2Client.credentials = token;
const service = google.drive({
version: 'v3',
encoding: null
});
await service.files.get({
auth: oauth2Client,
fileId: "1ZR8kkvb2JYVxcUjmlgfBJD2IYnisaiFn",
alt: 'media'
}, function(err, response) {
if (err) {
console.log('The API returned an error: ' + err);
return;
}
responder(res)(null,response);
});
Ref: https://nodejs.dev/learn/reading-files-with-nodejs

gapi.client.drive.files.create does not work

I'm writing a vue app. I read this sample code and wrote code like this:
const apiKey = 'mykey';
const discoveryDocs = ["https://www.googleapis.com/discovery/v1/apis/drive/v3/rest"]
const clientId = 'myclientid'
const scopes = 'https://www.googleapis.com/auth/drive.appdata'
function handleClientLoad() {
gapi.load('client:auth2', initClient);
}
function initClient() {
gapi.client.init({
apiKey,
discoveryDocs,
clientId,
scope: scopes
}).then(function () {
createFile()
});
}
function createFile() {
console.log('createFile')
var fileMetadata = {
'name': 'config.json',
'parents': ['appDataFolder']
};
var media = {
mimeType: 'application/json',
body: "body"
};
gapi.client.drive.files.create({
resource: fileMetadata,
media,
fields: 'id'
}, function (err, file) {
console.log('function in createFile')
if (err) {
console.error(err);
} else {
console.log('Folder Id:', file.id);
}
});
}
window.onload=handleClientLoad()
In the console, 'createFile' is logged but 'function in createFile' is not logged, so I think function(err, file)... does not work.
What is wrong?
I want the sample code to work.
I had the same issue. The function create() returns a promise, to execute the request, it seems to need a then(). See also this post.
The example code though does not work since you will get a 403 The user does not have sufficient permissions for this file error. This seems to happen since example code will create the file not in appDataFolder but in the root directory.
I managed to get it to work using the following code. Putting all request parameters flat into the object passed to create() seems to do the trick.
const s = new Readable();
s.push("beep"); // the string you want
s.push(null);
gapi.client.drive.files
.create({
name: "config.json",
parents: ["appDataFolder"],
mimeType: "application/json",
upload_type: "media",
fields: "id",
body: s,
})
.then(function (response) {
if (response.status === 200) {
var file = response.result;
console.log(file);
}
})
.catch(function (error) {
console.error(error);
});

Codepipeline lambda action never complete

Well, my lambda function work's well according to the log's, but it never get completed in the codepipeline stage, I have already set permission to role for allow notificate pipeline ("codepipeline:PutJobSuccessResult",
"codepipeline:PutJobFailureResult") and even set maximun time to 20sec but still not working (it actually ends at 800ms).
const axios = require('axios')
const AWS = require('aws-sdk');
const url = 'www.exampleurl.com'
exports.handler = async (event, context) => {
const codepipeline = new AWS.CodePipeline();
const jobId = event["CodePipeline.job"].id;
const stage = event["CodePipeline.job"].data.actionConfiguration.configuration.UserParameters;
const putJobSuccess = function(message) {
var params = {
jobId: jobId
};
codepipeline.putJobSuccessResult(params, function(err, data) {
if (err) {context.fail(err); }
else {context.succeed(message);}
});
};
const putJobFailure = function(message) {
var params = {
jobId: jobId,
failureDetails: {
message: JSON.stringify(message),
type: 'JobFailed',
externalExecutionId: context.invokeid
}
};
codepipeline.putJobFailureResult(params, function(err, data) {
if (err) console.log(err)
context.fail(message);
});
};
try {
await axios.post(url, { content: stage})
putJobSuccess('all fine')
} catch (e) {
putJobFailure(e)
}
};
The root issue
Because nodeJS runs everything async by default, codepipeline.putJobSuccessResult is being run async. The issue seems to be that the Lambda function is finishing it's execution before codepipeline.putJobSuccessResult has a chance to complete.
The solution
Run codepipeline.putJobSuccessResult synchronously so that it is forced to complete before the response is returned to Lambda for the lambdaHandler.
const putJobSuccess = function(id) {
//await sleep(60);
console.log("Telling Codepipeline test passed for job: " + id)
var params = {
jobId: id
};
return codepipeline.putJobSuccessResult(params, function(err, data) {
if(err) {
console.error(err)
} else {
console.log(data)
}
}).promise()
};
exports.lambdaHandler = async (event, context) => {
...
await putJobSuccess( jobId )
return response
};
Whenever I see this issue, most of the time it is due to 'PutJobSuccessResult' never being invoked. The best way to check this is to go to CloudTrail > 'Event History' and look for 'Event name' = 'PutJobSuccessResult' during the time range you expect the Lambda function calling this API. Probably you will not find the 'PutJobSuccessResult', then please have a look at the code again and the Lambda execution logs in CloudWatch.

calling ApiGatewayManagementApi.postToConnection() gives me 500 internal server error?

so I am trying to develop an aws websocket function using lambda. But it seems that whenever I try to call "postToConnection" it just gives me 500 internal server error.
Cloud watch also doesn't logs the error that I am receiving.
And what I'm receiving on the terminal once I send the message is this:
"{"message": "Internal server error", "connectionId":"xxx", "requestId":"xxx"}"
(Which doesn't give me any information at all)
This is my whole code on the lambda function.
var AWS = require('aws-sdk');
AWS.config.update({ region: "us-west-2" });
var DDB = new AWS.DynamoDB({ apiVersion: "2012-10-08" });
require('aws-sdk/clients/apigatewaymanagementapi');
exports.handler = function (event, context, callback) {
var url_handler = event.requestContext.domainName + "/" + event.requestContext.stage;
// var params = event.requestContext;
// console.log(params);
var scanParams = {
TableName: "tbl-web-socket-connection",
ProjectionExpression: "id"
};
DDB.scan(scanParams, function (err, data) {
// callback(null, {
// statusCode: 200,
// body: "Data send to"
// });
if (err) {
callback(null, {
statusCode: 500,
body: JSON.stringify(err)
});
} else {
var apigwManagementApi = new AWS.ApiGatewayManagementApi({
apiVersion: "2018-11-29",
endpoint: event.requestContext.domainName + "/" + event.requestContext.stage
});
var postParams = {
Data: JSON.parse(event.body).data
};
var count = 0;
data.Items.forEach(function (element) {
postParams.ConnectionId = element.id.S;
console.log(postParams);
apigwManagementApi.postToConnection(postParams, function (err, data) {
if (err) {
// API Gateway returns a status of 410 GONE when the connection is no
// longer available. If this happens, we simply delete the identifier
// from our DynamoDB table.
if (err.statusCode === 410) {
console.log("Found stale connection, deleting " + postParams.connectionId);
DDB.deleteItem({ TableName: process.env.TABLE_NAME,
Key: { connectionId: { S: postParams.connectionId } } });
} else {
console.log("Failed to post. Error: " + JSON.stringify(err));
}
} else {
count++;
}
});
});
callback(null, {
statusCode: 200,
body: "Data send to " + count + " connection" + (count === 1 ? "" : "s")
});
}
});
};
The aws-sdk is also updated, I declared it on a lambda layer and that's what I'm using.
Any idea what's causing this?
This is due to a timeout, the dynamodb loops through all of the records which is causes timeout.
It looks like the cloudwatch was really logging the error, but I was just too focused on the terminal error which gives me the 500, Internal Server Error.
To fix this, just go to the lambda function and increase the time limit.

CloudFormation Custom Resource not finishing deleting

I have setup my custom resource to return immediately on deletes
const aws = require('aws-sdk')
const util = require('util')
exports.handler = (event, context) => {
console.log('Event>>>')
console.log(JSON.stringify(event))
aws.config.update({ region: event.ResourceProperties.Region })
if (event.RequestType === 'Delete') return ApiMethodCustom.sendResponse(event, context, 'SUCCESS') // HERE!
ApiMethodCustom.setupIntegration(event, context)
}
static async sendResponse(event, context, responseStatus, responseData = {}) {
var responseBody = JSON.stringify({
Status: responseStatus,
Reason: "See the details in CloudWatch Log Stream: " + context.logStreamName,
PhysicalResourceId: context.logStreamName,
StackId: event.StackId,
RequestId: event.RequestId,
LogicalResourceId: event.LogicalResourceId,
Data: responseData
});
console.log("RESPONSE BODY:\n", responseBody);
var https = require("https");
var url = require("url");
var parsedUrl = url.parse(event.ResponseURL);
var options = {
hostname: parsedUrl.hostname,
port: 443,
path: parsedUrl.path,
method: "PUT",
headers: {
"content-type": "",
"content-length": responseBody.length
}
};
console.log("SENDING RESPONSE...\n");
var request = https.request(options, function (response) {
console.log("STATUS: " + response.statusCode);
console.log("HEADERS: " + JSON.stringify(response.headers));
// Tell AWS Lambda that the function execution is done
context.done();
});
request.on("error", function (error) {
console.log("sendResponse Error:" + error);
// Tell AWS Lambda that the function execution is done
context.done();
});
// write data to request body
request.write(responseBody);
request.end();
}
But it appears that CloudFormation is stuck in DELETE_IN_PROGRESS. Why is that?
In my logs, it seems like Lambda finished execution correctly:
2018-09-09T01:52:06.913Z f48808d0-b3d2-11e8-9e84-5b218cad3090
{
"RequestType": "Delete",
"ServiceToken": "arn:aws:lambda:ap-southeast-1:621567429603:function:income2-base-ApiVpcIntegration",
"ResponseURL": "https://cloudformation-custom-resource-response-apsoutheast1.s3-ap-southeast-1.amazonaws.com/arn%3Aaws%3Acloudformation%3Aap-southeast-1%3A621567429603%3Astack/test/5a34d100-b370-11e8-b89d-503a138dba36%7CApiTestIntegration%7C979b1814-d94c-4a49-b9f7-2fa352ab88f5?AWSAccessKeyId=AKIAIKQZQ3QDXOJPHOPA&Expires=1536465125&Signature=O2O0entoTXHCYp5jbJehghtE9Ck%3D",
"StackId": "arn:aws:cloudformation:ap-southeast-1:621567429603:stack/test/5a34d100-b370-11e8-b89d-503a138dba36",
"RequestId": "979b1814-d94c-4a49-b9f7-2fa352ab88f5",
"LogicalResourceId": "ApiTestIntegration",
"PhysicalResourceId": "2018/09/08/[$LATEST]b8a3df0fca884fe3b8abdde3ab525ac0",
"ResourceType": "Custom::ApiVpcIntegration",
"ResourceProperties": {
"ServiceToken": "arn:aws:lambda:ap-southeast-1:621567429603:function:income2-base-ApiVpcIntegration",
"ConnectionId": "24lbti",
"ResourceId": "x1gjyy",
"RestApiId": "aaj0q4dbml",
"Uri": "http://dropletapi-dev.2359media.net:3001/authentication",
"HttpMethod": "GET"
}
}
2018-09-09T01:52:06.914Z f48808d0-b3d2-11e8-9e84-5b218cad3090 RESPONSE BODY:
{
"Status": "SUCCESS",
"Reason": "See the details in CloudWatch Log Stream: 2018/09/09/[$LATEST]29276598cb9c49c1b1da3672c8707c78",
"PhysicalResourceId": "2018/09/09/[$LATEST]29276598cb9c49c1b1da3672c8707c78",
"StackId": "arn:aws:cloudformation:ap-southeast-1:621567429603:stack/test/5a34d100-b370-11e8-b89d-503a138dba36",
"RequestId": "979b1814-d94c-4a49-b9f7-2fa352ab88f5",
"LogicalResourceId": "ApiTestIntegration",
"Data": {}
}
I had a similar issue today while using the cfn-response package, which your code appears to be based on. The cfn-response package is based on a callback but your code also seems to partially use async/await (option with Runtime: node.js8.10).
In your case I suspect that you never saw the "STATUS: " or "HEADERS: " messages even if the response body was dumped to logs (synchronously). That mirrors my experience when using callback-based cfn-response mixed with async/await.
In other words, in all circumstances you will need to ensure that you send a response to Cloudformation (PUT to the event S3 ResponseURL) before your Lambda terminates or the template could hang for up to an hour before giving up and rolling back (probably with a Cloudformation error along the lines of "Failed to stabilise the resource...". Rollback (deletion) in turn can also take an hour because the delete also does not response appropriately. A bit more information here.
I ended up implementing custom resources much like this example on GitHub by https://github.com/rosberglinhares (MIT license) with a couple of differences; I didn't set-up a separate lambda to handle the sendResponse functionality and I made the custom resources server-less (using aws cloudformation package and aws cloudformation deploy commands).
Your ApiMethodCustom is not defined so it's hard for me to guide you on that implementation and so I am including my node.js8.10 code using async/await for reference.
First the Custom resource in the Cloudformation template:
---
AWSTemplateFormatVersion: '2010-09-09'
Transform: AWS::Serverless-2016-10-31
Description: CustomResource Example Stack
Resources:
CustomResource:
Type: 'AWS::Serverless::Function'
Properties:
Runtime: nodejs8.10
Handler: index.handler
MemorySize: 128
Timeout: 15
Role: !GetAtt CustomResourceRole.Arn
CodeUri: ./CustomResource/
CustomResourceUser:
Type: 'Custom::CustomResourceUser'
Properties:
ServiceToken: !GetAtt CustomResource.Arn
...
Note that CodeUri is relative to the template path. You will need to define the IAM role and policies for CustomResourceRole.
Now for the CustomResource/index.js Lambda (you will also need to run "npm install --save axios" in the CustomResource directory):
'use strict';
const AWS = require('aws-sdk');
const axios = require('axios');
exports.handler = async (event, context) => {
try {
switch (event.RequestType) {
case 'Create':
await ApiMethodCustom.create(...);
break;
case 'Update':
await ApiMethodCustom.update(...);
break;
case 'Delete':
await ApiMethodCustom.delete(...);
break;
}
console.info('Success for request type ${event.RequestType}');
await sendResponse(event, context, 'SUCCESS', { } );
} catch (error) {
console.error('Error for request type ${event.RequestType}: ', error);
await sendResponse(event, context, 'FAILED', { } );
}
}
async function sendResponse (event, context, responseStatus, responseData, physicalResourceId) {
var reason = responseStatus == 'FAILED' ? ('See the details in CloudWatch Log Stream: ' + context.logStreamName) : undefined;
var responseBody = JSON.stringify({
StackId: event.StackId,
RequestId: event.RequestId,
Status: responseStatus,
Reason: reason,
PhysicalResourceId: physicalResourceId || context.logStreamName,
LogicalResourceId: event.LogicalResourceId,
Data: responseData
});
var responseOptions = {
headers: {
'Content-Type': '',
'Content-Length': responseBody.length
}
};
console.info('Response body:\n', responseBody);
try {
await axios.put(event.ResponseURL, responseBody, responseOptions);
console.info('CloudFormationSendResponse Success');
} catch (error) {
console.error('CloudFormationSendResponse Error:');
if (error.response) {
console.error(error.response.data);
console.error(error.response.status);
console.error(error.response.headers);
} else if (error.request) {
console.error(error.request);
} else {
console.error('Error', error.message);
}
console.error(error.config);
throw new Error('Could not send CloudFormation response');
}
}
For more information on using callback vs. async with AWS Lambda's have a look here.
Finally, note the use of Axios. It's promise-based and therefore supports await instead of callbacks.

Resources