I have very simple lambda function running on local stack and accessing dynamo db.
Function runs fine and I can see data in dynamodb with cli command as -
aws dynamodb scan --endpoint-url=http://localhost:4566 --table-name authors
But I am not able to get output from lambda function nor any logs from lambda function
That is : console.log('response == ',JSON.stringify(response)) is not getting printed
and the output from lambda return response is also not available in the josn file I gave while invoking lambda function (myop.json)
The myop.json file contains null
The command used to invoke lambda function is :
awslocal lambda invoke --function-name lambda-dynamodb-service-local-LambdaDynamoDB myop.json --region REGION
Can anybody please help here ?
Actual lambda code is here :
let AWS = require("aws-sdk");
AWS.config.update({
region: "REGION",
});
let docClient = new AWS.DynamoDB.DocumentClient({
endpoint:"IP ADDRSS:PORT"
});
exports.handler = function (event, context, callback) {
let data = null;
docClient.scan({
TableName: 'authors',
FilterExpression: '#name > :name',
ExpressionAttributeNames: {
'#name': 'name',
},
ExpressionAttributeValues: {
':name': "",
},
}, function(err, data) {
if (err) { console.log(err); return; }
console.log(data.Items);
data = data.Items;
});
let response = {
"statusCode": 200,
"body": JSON.stringify(data)
}
console.log('response == ',JSON.stringify(response));
return response
}
Related
Well, my lambda function work's well according to the log's, but it never get completed in the codepipeline stage, I have already set permission to role for allow notificate pipeline ("codepipeline:PutJobSuccessResult",
"codepipeline:PutJobFailureResult") and even set maximun time to 20sec but still not working (it actually ends at 800ms).
const axios = require('axios')
const AWS = require('aws-sdk');
const url = 'www.exampleurl.com'
exports.handler = async (event, context) => {
const codepipeline = new AWS.CodePipeline();
const jobId = event["CodePipeline.job"].id;
const stage = event["CodePipeline.job"].data.actionConfiguration.configuration.UserParameters;
const putJobSuccess = function(message) {
var params = {
jobId: jobId
};
codepipeline.putJobSuccessResult(params, function(err, data) {
if (err) {context.fail(err); }
else {context.succeed(message);}
});
};
const putJobFailure = function(message) {
var params = {
jobId: jobId,
failureDetails: {
message: JSON.stringify(message),
type: 'JobFailed',
externalExecutionId: context.invokeid
}
};
codepipeline.putJobFailureResult(params, function(err, data) {
if (err) console.log(err)
context.fail(message);
});
};
try {
await axios.post(url, { content: stage})
putJobSuccess('all fine')
} catch (e) {
putJobFailure(e)
}
};
The root issue
Because nodeJS runs everything async by default, codepipeline.putJobSuccessResult is being run async. The issue seems to be that the Lambda function is finishing it's execution before codepipeline.putJobSuccessResult has a chance to complete.
The solution
Run codepipeline.putJobSuccessResult synchronously so that it is forced to complete before the response is returned to Lambda for the lambdaHandler.
const putJobSuccess = function(id) {
//await sleep(60);
console.log("Telling Codepipeline test passed for job: " + id)
var params = {
jobId: id
};
return codepipeline.putJobSuccessResult(params, function(err, data) {
if(err) {
console.error(err)
} else {
console.log(data)
}
}).promise()
};
exports.lambdaHandler = async (event, context) => {
...
await putJobSuccess( jobId )
return response
};
Whenever I see this issue, most of the time it is due to 'PutJobSuccessResult' never being invoked. The best way to check this is to go to CloudTrail > 'Event History' and look for 'Event name' = 'PutJobSuccessResult' during the time range you expect the Lambda function calling this API. Probably you will not find the 'PutJobSuccessResult', then please have a look at the code again and the Lambda execution logs in CloudWatch.
so I am trying to develop an aws websocket function using lambda. But it seems that whenever I try to call "postToConnection" it just gives me 500 internal server error.
Cloud watch also doesn't logs the error that I am receiving.
And what I'm receiving on the terminal once I send the message is this:
"{"message": "Internal server error", "connectionId":"xxx", "requestId":"xxx"}"
(Which doesn't give me any information at all)
This is my whole code on the lambda function.
var AWS = require('aws-sdk');
AWS.config.update({ region: "us-west-2" });
var DDB = new AWS.DynamoDB({ apiVersion: "2012-10-08" });
require('aws-sdk/clients/apigatewaymanagementapi');
exports.handler = function (event, context, callback) {
var url_handler = event.requestContext.domainName + "/" + event.requestContext.stage;
// var params = event.requestContext;
// console.log(params);
var scanParams = {
TableName: "tbl-web-socket-connection",
ProjectionExpression: "id"
};
DDB.scan(scanParams, function (err, data) {
// callback(null, {
// statusCode: 200,
// body: "Data send to"
// });
if (err) {
callback(null, {
statusCode: 500,
body: JSON.stringify(err)
});
} else {
var apigwManagementApi = new AWS.ApiGatewayManagementApi({
apiVersion: "2018-11-29",
endpoint: event.requestContext.domainName + "/" + event.requestContext.stage
});
var postParams = {
Data: JSON.parse(event.body).data
};
var count = 0;
data.Items.forEach(function (element) {
postParams.ConnectionId = element.id.S;
console.log(postParams);
apigwManagementApi.postToConnection(postParams, function (err, data) {
if (err) {
// API Gateway returns a status of 410 GONE when the connection is no
// longer available. If this happens, we simply delete the identifier
// from our DynamoDB table.
if (err.statusCode === 410) {
console.log("Found stale connection, deleting " + postParams.connectionId);
DDB.deleteItem({ TableName: process.env.TABLE_NAME,
Key: { connectionId: { S: postParams.connectionId } } });
} else {
console.log("Failed to post. Error: " + JSON.stringify(err));
}
} else {
count++;
}
});
});
callback(null, {
statusCode: 200,
body: "Data send to " + count + " connection" + (count === 1 ? "" : "s")
});
}
});
};
The aws-sdk is also updated, I declared it on a lambda layer and that's what I'm using.
Any idea what's causing this?
This is due to a timeout, the dynamodb loops through all of the records which is causes timeout.
It looks like the cloudwatch was really logging the error, but I was just too focused on the terminal error which gives me the 500, Internal Server Error.
To fix this, just go to the lambda function and increase the time limit.
Is there an example how to completely use async/await in aws lambda functions? Most lambda example starts with:
module.exports.handler = (event, context, callback) => {
Now I tried to use:
module.exports.hello = async (event, context) => {
but now I have to call a lambda-function in this lambda function.
Can I just write:
'use strict';
const AWS = require('aws-sdk');
const lambda = new AWS.Lambda({
region: 'my-region'
});
let params = {
FunctionName: process.env.lambdafunc, /* required */
Payload: "",
InvocationType: "Event"
};
module.exports.hello = async (event, context) => {
/** HERE COMES SOME CODE AND BUSINESS LOGIC
* ...
* ...
*/
params.Payload = new Buffer(JSON.stringify(MYJSONDATA));
data = await lambda.invokeAsync(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
});
return {
statusCode: 200,
body: JSON.stringify({
message: 'Go Serverless v1.0! Your function executed successfully!',
data: data
}),
};
};
I have setup my custom resource to return immediately on deletes
const aws = require('aws-sdk')
const util = require('util')
exports.handler = (event, context) => {
console.log('Event>>>')
console.log(JSON.stringify(event))
aws.config.update({ region: event.ResourceProperties.Region })
if (event.RequestType === 'Delete') return ApiMethodCustom.sendResponse(event, context, 'SUCCESS') // HERE!
ApiMethodCustom.setupIntegration(event, context)
}
static async sendResponse(event, context, responseStatus, responseData = {}) {
var responseBody = JSON.stringify({
Status: responseStatus,
Reason: "See the details in CloudWatch Log Stream: " + context.logStreamName,
PhysicalResourceId: context.logStreamName,
StackId: event.StackId,
RequestId: event.RequestId,
LogicalResourceId: event.LogicalResourceId,
Data: responseData
});
console.log("RESPONSE BODY:\n", responseBody);
var https = require("https");
var url = require("url");
var parsedUrl = url.parse(event.ResponseURL);
var options = {
hostname: parsedUrl.hostname,
port: 443,
path: parsedUrl.path,
method: "PUT",
headers: {
"content-type": "",
"content-length": responseBody.length
}
};
console.log("SENDING RESPONSE...\n");
var request = https.request(options, function (response) {
console.log("STATUS: " + response.statusCode);
console.log("HEADERS: " + JSON.stringify(response.headers));
// Tell AWS Lambda that the function execution is done
context.done();
});
request.on("error", function (error) {
console.log("sendResponse Error:" + error);
// Tell AWS Lambda that the function execution is done
context.done();
});
// write data to request body
request.write(responseBody);
request.end();
}
But it appears that CloudFormation is stuck in DELETE_IN_PROGRESS. Why is that?
In my logs, it seems like Lambda finished execution correctly:
2018-09-09T01:52:06.913Z f48808d0-b3d2-11e8-9e84-5b218cad3090
{
"RequestType": "Delete",
"ServiceToken": "arn:aws:lambda:ap-southeast-1:621567429603:function:income2-base-ApiVpcIntegration",
"ResponseURL": "https://cloudformation-custom-resource-response-apsoutheast1.s3-ap-southeast-1.amazonaws.com/arn%3Aaws%3Acloudformation%3Aap-southeast-1%3A621567429603%3Astack/test/5a34d100-b370-11e8-b89d-503a138dba36%7CApiTestIntegration%7C979b1814-d94c-4a49-b9f7-2fa352ab88f5?AWSAccessKeyId=AKIAIKQZQ3QDXOJPHOPA&Expires=1536465125&Signature=O2O0entoTXHCYp5jbJehghtE9Ck%3D",
"StackId": "arn:aws:cloudformation:ap-southeast-1:621567429603:stack/test/5a34d100-b370-11e8-b89d-503a138dba36",
"RequestId": "979b1814-d94c-4a49-b9f7-2fa352ab88f5",
"LogicalResourceId": "ApiTestIntegration",
"PhysicalResourceId": "2018/09/08/[$LATEST]b8a3df0fca884fe3b8abdde3ab525ac0",
"ResourceType": "Custom::ApiVpcIntegration",
"ResourceProperties": {
"ServiceToken": "arn:aws:lambda:ap-southeast-1:621567429603:function:income2-base-ApiVpcIntegration",
"ConnectionId": "24lbti",
"ResourceId": "x1gjyy",
"RestApiId": "aaj0q4dbml",
"Uri": "http://dropletapi-dev.2359media.net:3001/authentication",
"HttpMethod": "GET"
}
}
2018-09-09T01:52:06.914Z f48808d0-b3d2-11e8-9e84-5b218cad3090 RESPONSE BODY:
{
"Status": "SUCCESS",
"Reason": "See the details in CloudWatch Log Stream: 2018/09/09/[$LATEST]29276598cb9c49c1b1da3672c8707c78",
"PhysicalResourceId": "2018/09/09/[$LATEST]29276598cb9c49c1b1da3672c8707c78",
"StackId": "arn:aws:cloudformation:ap-southeast-1:621567429603:stack/test/5a34d100-b370-11e8-b89d-503a138dba36",
"RequestId": "979b1814-d94c-4a49-b9f7-2fa352ab88f5",
"LogicalResourceId": "ApiTestIntegration",
"Data": {}
}
I had a similar issue today while using the cfn-response package, which your code appears to be based on. The cfn-response package is based on a callback but your code also seems to partially use async/await (option with Runtime: node.js8.10).
In your case I suspect that you never saw the "STATUS: " or "HEADERS: " messages even if the response body was dumped to logs (synchronously). That mirrors my experience when using callback-based cfn-response mixed with async/await.
In other words, in all circumstances you will need to ensure that you send a response to Cloudformation (PUT to the event S3 ResponseURL) before your Lambda terminates or the template could hang for up to an hour before giving up and rolling back (probably with a Cloudformation error along the lines of "Failed to stabilise the resource...". Rollback (deletion) in turn can also take an hour because the delete also does not response appropriately. A bit more information here.
I ended up implementing custom resources much like this example on GitHub by https://github.com/rosberglinhares (MIT license) with a couple of differences; I didn't set-up a separate lambda to handle the sendResponse functionality and I made the custom resources server-less (using aws cloudformation package and aws cloudformation deploy commands).
Your ApiMethodCustom is not defined so it's hard for me to guide you on that implementation and so I am including my node.js8.10 code using async/await for reference.
First the Custom resource in the Cloudformation template:
---
AWSTemplateFormatVersion: '2010-09-09'
Transform: AWS::Serverless-2016-10-31
Description: CustomResource Example Stack
Resources:
CustomResource:
Type: 'AWS::Serverless::Function'
Properties:
Runtime: nodejs8.10
Handler: index.handler
MemorySize: 128
Timeout: 15
Role: !GetAtt CustomResourceRole.Arn
CodeUri: ./CustomResource/
CustomResourceUser:
Type: 'Custom::CustomResourceUser'
Properties:
ServiceToken: !GetAtt CustomResource.Arn
...
Note that CodeUri is relative to the template path. You will need to define the IAM role and policies for CustomResourceRole.
Now for the CustomResource/index.js Lambda (you will also need to run "npm install --save axios" in the CustomResource directory):
'use strict';
const AWS = require('aws-sdk');
const axios = require('axios');
exports.handler = async (event, context) => {
try {
switch (event.RequestType) {
case 'Create':
await ApiMethodCustom.create(...);
break;
case 'Update':
await ApiMethodCustom.update(...);
break;
case 'Delete':
await ApiMethodCustom.delete(...);
break;
}
console.info('Success for request type ${event.RequestType}');
await sendResponse(event, context, 'SUCCESS', { } );
} catch (error) {
console.error('Error for request type ${event.RequestType}: ', error);
await sendResponse(event, context, 'FAILED', { } );
}
}
async function sendResponse (event, context, responseStatus, responseData, physicalResourceId) {
var reason = responseStatus == 'FAILED' ? ('See the details in CloudWatch Log Stream: ' + context.logStreamName) : undefined;
var responseBody = JSON.stringify({
StackId: event.StackId,
RequestId: event.RequestId,
Status: responseStatus,
Reason: reason,
PhysicalResourceId: physicalResourceId || context.logStreamName,
LogicalResourceId: event.LogicalResourceId,
Data: responseData
});
var responseOptions = {
headers: {
'Content-Type': '',
'Content-Length': responseBody.length
}
};
console.info('Response body:\n', responseBody);
try {
await axios.put(event.ResponseURL, responseBody, responseOptions);
console.info('CloudFormationSendResponse Success');
} catch (error) {
console.error('CloudFormationSendResponse Error:');
if (error.response) {
console.error(error.response.data);
console.error(error.response.status);
console.error(error.response.headers);
} else if (error.request) {
console.error(error.request);
} else {
console.error('Error', error.message);
}
console.error(error.config);
throw new Error('Could not send CloudFormation response');
}
}
For more information on using callback vs. async with AWS Lambda's have a look here.
Finally, note the use of Axios. It's promise-based and therefore supports await instead of callbacks.
I keep having a timeout in my lambda function when I try to call the iotData publish function. Code below. It always times out without error. This function works from the sam local command line. I imagine this is a permissions error with lambda. The strange thing is I've given permissions for IoT, Kinesis and SNS already to this lambda function but nothing is working.
'use strict';
console.log('Loading function');
require('dotenv').config();
const {Pool} = require('pg');
const pool = new Pool();
const AWS = require('aws-sdk');
console.log("finished loading");
/** * Provide an event that contains the following keys:
* * * - resource: API Gateway resource for event
* * - path: path of the HTTPS request to the microservices API call
* * - httpMethod: HTTP method of the HTTPS request from microservices API call
* * - headers: HTTP headers for the HTTPS request from microservices API call
* * - queryStringParameters: query parameters of the HTTPS request from microservices API call
* * - pathParameters: path parameters of the HTTPS request from microservices API call
* * - stageVariables: API Gateway stage variables, if applicable
* * - body: body of the HTTPS request from the microservices API call
* */
exports.handler = function(event, context, callback) {
console.log("starting");
let _response = "";
context.callbackWaitsForEmptyEventLoop = false;
if(event.httpMethod==="POST" && event.resource==="/pings"){
var body = JSON.parse(event.body);
console.log("here2");
pool.query("SELECT name from pings where test = decode($1,'hex');",[body.bid], (err,res)=>{
if(err){
console.error(err.stack);
_response = buildOutput(500, {
message:"error in pg"
});
callback(_response, null);
}
console.log("here3");
var iotdata = new AWS.IotData({endpoint:'XXXXXXX.iot.us-east-1.amazonaws.com'});
const publishParams = {
topic: body.topic,
payload: Buffer.from(JSON.stringify({
message: "Welcome to "+res.rows[0].name+" house"
}), 'utf8'),
qos: 0
}
console.log("here4");
iotdata.publish(publishParams, function(err, data) {
if(err){
console.error(err.stack);
_response = buildOutput(500, {
message:"error in pg"
});
callback(_response,null);
}
_response = buildOutput(200, {message: "success"});
callback(null, _response);
});
});
} else {
_response = buildOutput(500, {
message:"path not found"
});
callback(_response,null);
}
};
/* Utility function to build HTTP response for the microservices output */
function buildOutput(statusCode, data) {
let _response = {
statusCode: statusCode,
headers: {
"Access-Control-Allow-Origin": "*"
},
body: JSON.stringify(data)
};
return _response;
}
policy
{
"Sid": "",
"Effect": "Allow",
"Action": [
"iot:*"
],
"Resource": "*"
},
UPDATE:
I attempted to give the lambda function admin access temporarily and that did not even work.
This is how you can post on MQTT topic directly from lambda function.
Use the code written in Node.js 10.x
var AWS = require('aws-sdk');
const IOT_ENDPOINT = "yourIoTentPoint.iot.region.amazonaws.com"
var iotdata = new AWS.IotData({endpoint:IOT_ENDPOINT});
exports.handler = function(event, context, callback) {
var params = {
topic: 'my/topic',
payload: 'This is my menssage from Lambda function.',
qos: 1
};
iotdata.publish(params, function(err, data){
if(err){
callback(err, null);
}
else{
callback(null, {"published_message": params.payload, "topic": params.topic});
}
});
};
Also, you can check the message sent by subscribing on the topic my/topic through IoT Core>Test on AWS Console.