I have setup my custom resource to return immediately on deletes
const aws = require('aws-sdk')
const util = require('util')
exports.handler = (event, context) => {
console.log('Event>>>')
console.log(JSON.stringify(event))
aws.config.update({ region: event.ResourceProperties.Region })
if (event.RequestType === 'Delete') return ApiMethodCustom.sendResponse(event, context, 'SUCCESS') // HERE!
ApiMethodCustom.setupIntegration(event, context)
}
static async sendResponse(event, context, responseStatus, responseData = {}) {
var responseBody = JSON.stringify({
Status: responseStatus,
Reason: "See the details in CloudWatch Log Stream: " + context.logStreamName,
PhysicalResourceId: context.logStreamName,
StackId: event.StackId,
RequestId: event.RequestId,
LogicalResourceId: event.LogicalResourceId,
Data: responseData
});
console.log("RESPONSE BODY:\n", responseBody);
var https = require("https");
var url = require("url");
var parsedUrl = url.parse(event.ResponseURL);
var options = {
hostname: parsedUrl.hostname,
port: 443,
path: parsedUrl.path,
method: "PUT",
headers: {
"content-type": "",
"content-length": responseBody.length
}
};
console.log("SENDING RESPONSE...\n");
var request = https.request(options, function (response) {
console.log("STATUS: " + response.statusCode);
console.log("HEADERS: " + JSON.stringify(response.headers));
// Tell AWS Lambda that the function execution is done
context.done();
});
request.on("error", function (error) {
console.log("sendResponse Error:" + error);
// Tell AWS Lambda that the function execution is done
context.done();
});
// write data to request body
request.write(responseBody);
request.end();
}
But it appears that CloudFormation is stuck in DELETE_IN_PROGRESS. Why is that?
In my logs, it seems like Lambda finished execution correctly:
2018-09-09T01:52:06.913Z f48808d0-b3d2-11e8-9e84-5b218cad3090
{
"RequestType": "Delete",
"ServiceToken": "arn:aws:lambda:ap-southeast-1:621567429603:function:income2-base-ApiVpcIntegration",
"ResponseURL": "https://cloudformation-custom-resource-response-apsoutheast1.s3-ap-southeast-1.amazonaws.com/arn%3Aaws%3Acloudformation%3Aap-southeast-1%3A621567429603%3Astack/test/5a34d100-b370-11e8-b89d-503a138dba36%7CApiTestIntegration%7C979b1814-d94c-4a49-b9f7-2fa352ab88f5?AWSAccessKeyId=AKIAIKQZQ3QDXOJPHOPA&Expires=1536465125&Signature=O2O0entoTXHCYp5jbJehghtE9Ck%3D",
"StackId": "arn:aws:cloudformation:ap-southeast-1:621567429603:stack/test/5a34d100-b370-11e8-b89d-503a138dba36",
"RequestId": "979b1814-d94c-4a49-b9f7-2fa352ab88f5",
"LogicalResourceId": "ApiTestIntegration",
"PhysicalResourceId": "2018/09/08/[$LATEST]b8a3df0fca884fe3b8abdde3ab525ac0",
"ResourceType": "Custom::ApiVpcIntegration",
"ResourceProperties": {
"ServiceToken": "arn:aws:lambda:ap-southeast-1:621567429603:function:income2-base-ApiVpcIntegration",
"ConnectionId": "24lbti",
"ResourceId": "x1gjyy",
"RestApiId": "aaj0q4dbml",
"Uri": "http://dropletapi-dev.2359media.net:3001/authentication",
"HttpMethod": "GET"
}
}
2018-09-09T01:52:06.914Z f48808d0-b3d2-11e8-9e84-5b218cad3090 RESPONSE BODY:
{
"Status": "SUCCESS",
"Reason": "See the details in CloudWatch Log Stream: 2018/09/09/[$LATEST]29276598cb9c49c1b1da3672c8707c78",
"PhysicalResourceId": "2018/09/09/[$LATEST]29276598cb9c49c1b1da3672c8707c78",
"StackId": "arn:aws:cloudformation:ap-southeast-1:621567429603:stack/test/5a34d100-b370-11e8-b89d-503a138dba36",
"RequestId": "979b1814-d94c-4a49-b9f7-2fa352ab88f5",
"LogicalResourceId": "ApiTestIntegration",
"Data": {}
}
I had a similar issue today while using the cfn-response package, which your code appears to be based on. The cfn-response package is based on a callback but your code also seems to partially use async/await (option with Runtime: node.js8.10).
In your case I suspect that you never saw the "STATUS: " or "HEADERS: " messages even if the response body was dumped to logs (synchronously). That mirrors my experience when using callback-based cfn-response mixed with async/await.
In other words, in all circumstances you will need to ensure that you send a response to Cloudformation (PUT to the event S3 ResponseURL) before your Lambda terminates or the template could hang for up to an hour before giving up and rolling back (probably with a Cloudformation error along the lines of "Failed to stabilise the resource...". Rollback (deletion) in turn can also take an hour because the delete also does not response appropriately. A bit more information here.
I ended up implementing custom resources much like this example on GitHub by https://github.com/rosberglinhares (MIT license) with a couple of differences; I didn't set-up a separate lambda to handle the sendResponse functionality and I made the custom resources server-less (using aws cloudformation package and aws cloudformation deploy commands).
Your ApiMethodCustom is not defined so it's hard for me to guide you on that implementation and so I am including my node.js8.10 code using async/await for reference.
First the Custom resource in the Cloudformation template:
---
AWSTemplateFormatVersion: '2010-09-09'
Transform: AWS::Serverless-2016-10-31
Description: CustomResource Example Stack
Resources:
CustomResource:
Type: 'AWS::Serverless::Function'
Properties:
Runtime: nodejs8.10
Handler: index.handler
MemorySize: 128
Timeout: 15
Role: !GetAtt CustomResourceRole.Arn
CodeUri: ./CustomResource/
CustomResourceUser:
Type: 'Custom::CustomResourceUser'
Properties:
ServiceToken: !GetAtt CustomResource.Arn
...
Note that CodeUri is relative to the template path. You will need to define the IAM role and policies for CustomResourceRole.
Now for the CustomResource/index.js Lambda (you will also need to run "npm install --save axios" in the CustomResource directory):
'use strict';
const AWS = require('aws-sdk');
const axios = require('axios');
exports.handler = async (event, context) => {
try {
switch (event.RequestType) {
case 'Create':
await ApiMethodCustom.create(...);
break;
case 'Update':
await ApiMethodCustom.update(...);
break;
case 'Delete':
await ApiMethodCustom.delete(...);
break;
}
console.info('Success for request type ${event.RequestType}');
await sendResponse(event, context, 'SUCCESS', { } );
} catch (error) {
console.error('Error for request type ${event.RequestType}: ', error);
await sendResponse(event, context, 'FAILED', { } );
}
}
async function sendResponse (event, context, responseStatus, responseData, physicalResourceId) {
var reason = responseStatus == 'FAILED' ? ('See the details in CloudWatch Log Stream: ' + context.logStreamName) : undefined;
var responseBody = JSON.stringify({
StackId: event.StackId,
RequestId: event.RequestId,
Status: responseStatus,
Reason: reason,
PhysicalResourceId: physicalResourceId || context.logStreamName,
LogicalResourceId: event.LogicalResourceId,
Data: responseData
});
var responseOptions = {
headers: {
'Content-Type': '',
'Content-Length': responseBody.length
}
};
console.info('Response body:\n', responseBody);
try {
await axios.put(event.ResponseURL, responseBody, responseOptions);
console.info('CloudFormationSendResponse Success');
} catch (error) {
console.error('CloudFormationSendResponse Error:');
if (error.response) {
console.error(error.response.data);
console.error(error.response.status);
console.error(error.response.headers);
} else if (error.request) {
console.error(error.request);
} else {
console.error('Error', error.message);
}
console.error(error.config);
throw new Error('Could not send CloudFormation response');
}
}
For more information on using callback vs. async with AWS Lambda's have a look here.
Finally, note the use of Axios. It's promise-based and therefore supports await instead of callbacks.
Related
I have very simple lambda function running on local stack and accessing dynamo db.
Function runs fine and I can see data in dynamodb with cli command as -
aws dynamodb scan --endpoint-url=http://localhost:4566 --table-name authors
But I am not able to get output from lambda function nor any logs from lambda function
That is : console.log('response == ',JSON.stringify(response)) is not getting printed
and the output from lambda return response is also not available in the josn file I gave while invoking lambda function (myop.json)
The myop.json file contains null
The command used to invoke lambda function is :
awslocal lambda invoke --function-name lambda-dynamodb-service-local-LambdaDynamoDB myop.json --region REGION
Can anybody please help here ?
Actual lambda code is here :
let AWS = require("aws-sdk");
AWS.config.update({
region: "REGION",
});
let docClient = new AWS.DynamoDB.DocumentClient({
endpoint:"IP ADDRSS:PORT"
});
exports.handler = function (event, context, callback) {
let data = null;
docClient.scan({
TableName: 'authors',
FilterExpression: '#name > :name',
ExpressionAttributeNames: {
'#name': 'name',
},
ExpressionAttributeValues: {
':name': "",
},
}, function(err, data) {
if (err) { console.log(err); return; }
console.log(data.Items);
data = data.Items;
});
let response = {
"statusCode": 200,
"body": JSON.stringify(data)
}
console.log('response == ',JSON.stringify(response));
return response
}
so I am trying to develop an aws websocket function using lambda. But it seems that whenever I try to call "postToConnection" it just gives me 500 internal server error.
Cloud watch also doesn't logs the error that I am receiving.
And what I'm receiving on the terminal once I send the message is this:
"{"message": "Internal server error", "connectionId":"xxx", "requestId":"xxx"}"
(Which doesn't give me any information at all)
This is my whole code on the lambda function.
var AWS = require('aws-sdk');
AWS.config.update({ region: "us-west-2" });
var DDB = new AWS.DynamoDB({ apiVersion: "2012-10-08" });
require('aws-sdk/clients/apigatewaymanagementapi');
exports.handler = function (event, context, callback) {
var url_handler = event.requestContext.domainName + "/" + event.requestContext.stage;
// var params = event.requestContext;
// console.log(params);
var scanParams = {
TableName: "tbl-web-socket-connection",
ProjectionExpression: "id"
};
DDB.scan(scanParams, function (err, data) {
// callback(null, {
// statusCode: 200,
// body: "Data send to"
// });
if (err) {
callback(null, {
statusCode: 500,
body: JSON.stringify(err)
});
} else {
var apigwManagementApi = new AWS.ApiGatewayManagementApi({
apiVersion: "2018-11-29",
endpoint: event.requestContext.domainName + "/" + event.requestContext.stage
});
var postParams = {
Data: JSON.parse(event.body).data
};
var count = 0;
data.Items.forEach(function (element) {
postParams.ConnectionId = element.id.S;
console.log(postParams);
apigwManagementApi.postToConnection(postParams, function (err, data) {
if (err) {
// API Gateway returns a status of 410 GONE when the connection is no
// longer available. If this happens, we simply delete the identifier
// from our DynamoDB table.
if (err.statusCode === 410) {
console.log("Found stale connection, deleting " + postParams.connectionId);
DDB.deleteItem({ TableName: process.env.TABLE_NAME,
Key: { connectionId: { S: postParams.connectionId } } });
} else {
console.log("Failed to post. Error: " + JSON.stringify(err));
}
} else {
count++;
}
});
});
callback(null, {
statusCode: 200,
body: "Data send to " + count + " connection" + (count === 1 ? "" : "s")
});
}
});
};
The aws-sdk is also updated, I declared it on a lambda layer and that's what I'm using.
Any idea what's causing this?
This is due to a timeout, the dynamodb loops through all of the records which is causes timeout.
It looks like the cloudwatch was really logging the error, but I was just too focused on the terminal error which gives me the 500, Internal Server Error.
To fix this, just go to the lambda function and increase the time limit.
I am having an issue getting a 502 error back when I call my Netlify function. Is there something I am doing wrong in my Axios call or does the "error" sent in the callback need to be an actual Error object?
Below is the example of my function:
const axios = require('axios')
require('dotenv').config()
const https = require('https')
const headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Headers': 'Content-Type'
}
exports.handler = function (event, context, callback) {
// your server-side functionality
axios
.post(
`https://us18.api.mailchimp.com/3.0/lists/${
process.env.LIST_ID
}/members/`, {
email_address: 'deuce3608#gmail.com',
status: 'subscribed'
}, {
auth: {
username: 'admin',
password: process.env.MAILCHIMP_API_KEY
}
}
)
.then(response => {
callback(null, {
statusCode: 200,
headers,
body: response.data
})
})
.catch(err => {
callback(JSON.stringify(err.response.data))
})
}
Netlify announced in April (2018) that Node.js 8.10 would be the default in Netlify functions.
Using Callback Parameter:
When you need to return an error in Lambda functions on Netlify using the Callback Parameter, it will be the same format as the Lambda functions for AWS.
You will need to return an Error in the first parameter of the callback as you can see in the AWS documentation
callback(Error error, Object result);
The error is used if not null and the result will be ignored.
Using Async Handler:
You also have the option to return your error in the response with an error status code like the example function below.
import fetch from "node-fetch";
const API_ENDPOINT =
"https://08ad1pao69.execute-api.us-east-1.amazonaws.com/dev/random_joke";
exports.handler = async (event, context) => {
return fetch(API_ENDPOINT)
.then(response => response.json())
.then(data => ({
statusCode: 200,
body: `${data.setup} ${data.punchline} *BA DUM TSSS*`
}))
.catch(error => ({ statusCode: 422, body: String(error) }));
};
Showing simple tests
Error
exports.handler = function(event, context, callback) {
const err = new Error("this is an error")
callback(err);
}
Response (response status code 502):
{"errorMessage":"this is an error","errorType":"Error","stackTrace":["48.exports.handler (/var/task/showerror.js:75:13)"]}
Object
exports.handler = function(event, context, callback) {
const err = {statusCode: 422, body: "this is an error"}
callback(err);
}
Response (response status code 502):
{"errorMessage":"[object Object]"}
String
exports.handler = function(event, context, callback) {
const err = "this is an error"
callback(err);
}
Response (response status code 502):
{"errorMessage":"this is an error"}
NOTE:
If you want to use callback and have the error status code in the response, you would just pass it in an object to the response.
exports.handler = function(event, context, callback) {
const err = {statusCode: 422, body: "this is an error"}
callback(null, err);
}
Response (response status code 422):
this is an error
You could arrange the response like so:
var response = {
statusCode: 4xx
body: ''
}
and then pass it to the callback to return the error
response.body = 'some error text here';
callback(response);
They are using AWS Lambda, so if you can do it in the console you should in theory be able to call things the same way since they are taking your code and deploying it via Cloudformation in their infrastructure.
I wonder if I've hit a bug. A wrote a Node.js piece of code to trigger a "GCS Text to PubSub" Dataflow. The function is triggered upon file upload into a GCS bucket.
But it never executes successfully: "textPayload: "problem running dataflow template, error was: { Error: Invalid JSON payload received. Unknown name "staging_location": Cannot find field." It is an issue with the syntax of I specify the staging location for the job. I have tried "staginglocation", "stagingLocation", etc...none of them have worked.
Here's my code. Thanks for your help.
var {google} = require('googleapis');
exports.moveDataFromGCStoPubSub = (event, callback) => {
const file = event.data;
const context = event.context;
console.log(`Event ${context.eventId}`);
console.log(` Event Type: ${context.eventType}`);
console.log(` Bucket: ${file.bucket}`);
console.log(` File: ${file.name}`);
console.log(` Metageneration: ${file.metageneration}`);
console.log(` Created: ${file.timeCreated}`);
console.log(` Updated: ${file.updated}`);
google.auth.getApplicationDefault(function (err, authClient, projectId) {
if (err) {
throw err;
}
console.log(projectId);
const dataflow = google.dataflow({ version: 'v1b3', auth: authClient });
console.log(`gs://${file.bucket}/${file.name}`);
dataflow.projects.templates.create({
projectId: projectId,
resource: {
parameters: {
inputFile: `gs://${file.bucket}/${file.name}`,
outputTopic: `projects/iot-fitness-198120/topics/MemberFitnessData`,
},
jobName: 'CStoPubSub',
gcsPath: 'gs://dataflow-templates/latest/GCS_Text_to_Cloud_PubSub',
stagingLocation: 'gs://fitnessanalytics-tmp/tmp'
}
}, function(err, response) {
if (err) {
console.error("problem running dataflow template, error was: ", err);
}
console.log("Dataflow template response: ", response);
callback();
});
});
callback();
};
I don't think this is actually possible.
Looking at the documentation for the Dataflow API itself, there's nothing like a staging location in the parameter section, and the library you're using is basically a wrapper for this API.
I'm a bit surprised it changes the name of the parameter though.
So i finally got this to work. It was indeed a syntax issue in the parameters section. The code below works like a charm:
var {google} = require('googleapis');
exports.moveDataFromGCStoPubSub = (event, callback) => {
const file = event.data;
const context = event.context;
console.log(`Event ${context.eventId}`);
console.log(` Event Type: ${context.eventType}`);
console.log(` Bucket: ${file.bucket}`);
console.log(` File: ${file.name}`);
console.log(` Metageneration: ${file.metageneration}`);
console.log(` Created: ${file.timeCreated}`);
console.log(` Updated: ${file.updated}`);
google.auth.getApplicationDefault(function (err, authClient, projectId) {
if (err) {
throw err;
}
console.log(projectId);
const dataflow = google.dataflow({ version: 'v1b3', auth: authClient });
console.log(`gs://${file.bucket}/${file.name}`);
dataflow.projects.templates.create({
gcsPath: 'gs://dataflow-templates/latest/GCS_Text_to_Cloud_PubSub',
projectId: projectId,
resource: {
parameters: {
inputFilePattern: `gs://${file.bucket}/${file.name}`,
outputTopic: 'projects/iot-fitness-198120/topics/MemberFitnessData2'
},
environment: {
tempLocation: 'gs://fitnessanalytics-tmp/tmp'
},
jobName: 'CStoPubSub',
//gcsPath: 'gs://dataflow-templates/latest/GCS_Text_to_Cloud_PubSub',
}
}, function(err, response) {
if (err) {
console.error("problem running dataflow template, error was: ", err);
}
console.log("Dataflow template response: ", response);
callback();
});
});
callback();
};
I keep having a timeout in my lambda function when I try to call the iotData publish function. Code below. It always times out without error. This function works from the sam local command line. I imagine this is a permissions error with lambda. The strange thing is I've given permissions for IoT, Kinesis and SNS already to this lambda function but nothing is working.
'use strict';
console.log('Loading function');
require('dotenv').config();
const {Pool} = require('pg');
const pool = new Pool();
const AWS = require('aws-sdk');
console.log("finished loading");
/** * Provide an event that contains the following keys:
* * * - resource: API Gateway resource for event
* * - path: path of the HTTPS request to the microservices API call
* * - httpMethod: HTTP method of the HTTPS request from microservices API call
* * - headers: HTTP headers for the HTTPS request from microservices API call
* * - queryStringParameters: query parameters of the HTTPS request from microservices API call
* * - pathParameters: path parameters of the HTTPS request from microservices API call
* * - stageVariables: API Gateway stage variables, if applicable
* * - body: body of the HTTPS request from the microservices API call
* */
exports.handler = function(event, context, callback) {
console.log("starting");
let _response = "";
context.callbackWaitsForEmptyEventLoop = false;
if(event.httpMethod==="POST" && event.resource==="/pings"){
var body = JSON.parse(event.body);
console.log("here2");
pool.query("SELECT name from pings where test = decode($1,'hex');",[body.bid], (err,res)=>{
if(err){
console.error(err.stack);
_response = buildOutput(500, {
message:"error in pg"
});
callback(_response, null);
}
console.log("here3");
var iotdata = new AWS.IotData({endpoint:'XXXXXXX.iot.us-east-1.amazonaws.com'});
const publishParams = {
topic: body.topic,
payload: Buffer.from(JSON.stringify({
message: "Welcome to "+res.rows[0].name+" house"
}), 'utf8'),
qos: 0
}
console.log("here4");
iotdata.publish(publishParams, function(err, data) {
if(err){
console.error(err.stack);
_response = buildOutput(500, {
message:"error in pg"
});
callback(_response,null);
}
_response = buildOutput(200, {message: "success"});
callback(null, _response);
});
});
} else {
_response = buildOutput(500, {
message:"path not found"
});
callback(_response,null);
}
};
/* Utility function to build HTTP response for the microservices output */
function buildOutput(statusCode, data) {
let _response = {
statusCode: statusCode,
headers: {
"Access-Control-Allow-Origin": "*"
},
body: JSON.stringify(data)
};
return _response;
}
policy
{
"Sid": "",
"Effect": "Allow",
"Action": [
"iot:*"
],
"Resource": "*"
},
UPDATE:
I attempted to give the lambda function admin access temporarily and that did not even work.
This is how you can post on MQTT topic directly from lambda function.
Use the code written in Node.js 10.x
var AWS = require('aws-sdk');
const IOT_ENDPOINT = "yourIoTentPoint.iot.region.amazonaws.com"
var iotdata = new AWS.IotData({endpoint:IOT_ENDPOINT});
exports.handler = function(event, context, callback) {
var params = {
topic: 'my/topic',
payload: 'This is my menssage from Lambda function.',
qos: 1
};
iotdata.publish(params, function(err, data){
if(err){
callback(err, null);
}
else{
callback(null, {"published_message": params.payload, "topic": params.topic});
}
});
};
Also, you can check the message sent by subscribing on the topic my/topic through IoT Core>Test on AWS Console.