Using SNS as a Target to Trigger Lambda Function - amazon-ec2

I have a Lambda function is working 100%, i set my Cloudwatch rule and connected the Target to the Lambda directly and everything is working fine.
My manager want me to change the Target in the Cloudwatch and set it to SNS, then use the SNS as a trigger in my Lambda.
I have done the necessary thing and now my Lambda Function is no longer working.
import os, json, boto3
def validate_instance(rec_event):
sns_msg = json.loads(rec_event['Records'][0]['Sns']['Message'])
account_id = sns_msg['account']
event_region = sns_msg['region']
assumedRoleObject = sts_client.assume_role(
RoleArn="arn:aws:iam::{}:role/{}".format(account_id, 'VSC-Admin-Account-Lambda-Execution-Role'),
RoleSessionName="AssumeRoleSession1"
)
credentials = assumedRoleObject['Credentials']
print(credentials)
ec2_client = boto3.client('ec2', event_region, aws_access_key_id=credentials['AccessKeyId'],
aws_secret_access_key=credentials['SecretAccessKey'],
aws_session_token=credentials['SessionToken'],
)
def lambda_handler(event, context):
ip_permissions=[]
print("The event log is " + str(event))
# Ensure that we have an event name to evaluate.
if 'detail' not in event or ('detail' in event and 'eventName' not in event['detail']):
return {"Result": "Failure", "Message": "Lambda not triggered by an event"}
elif event['detail']['eventName'] == 'AuthorizeSecurityGroupIngress':
items_ip_permissions = event['detail']['requestParameters']['ipPermissions']['items']
security_group_id=event['detail']['requestParameters']['groupId']
print("The total items are " + str(items_ip_permissions))
for item in items_ip_permissions:
s = [val['cidrIp'] for val in item['ipRanges']['items']]
print("The value of ipranges are " + str(s))
if ((item['fromPort'] == 22 and item['toPort'] == 22) or (item['fromPort'] == 143 and item['toPort'] == 143) or (item['fromPort'] == 3389 and item['toPort'] == 3389)) and ('0.0.0.0/0' in [val['cidrIp'] for val in item['ipRanges']['items']]):
print("Revoking the security rule for the item" + str(item))
ip_permissions.append(item)
result = revoke_security_group_ingress(security_group_id,ip_permissions)
else:
return
def revoke_security_group_ingress(security_group_id,ip_permissions):
print("The security group id is " + str(security_group_id))
print("The ip_permissions value to be revoked is " + str(ip_permissions))
ip_permissions_new=normalize_paramter_names(ip_permissions)
response = boto3.client('ec2').revoke_security_group_ingress(GroupId=security_group_id,IpPermissions=ip_permissions_new)
print("The response of the revoke is " + str(response))
def normalize_paramter_names(ip_items):
# Start building the permissions items list.
new_ip_items = []
# First, build the basic parameter list.
for ip_item in ip_items:
new_ip_item = {
"IpProtocol": ip_item['ipProtocol'],
"FromPort": ip_item['fromPort'],
"ToPort": ip_item['toPort']
}
# CidrIp or CidrIpv6 (IPv4 or IPv6)?
if 'ipv6Ranges' in ip_item and ip_item['ipv6Ranges']:
# This is an IPv6 permission range, so change the key names.
ipv_range_list_name = 'ipv6Ranges'
ipv_address_value = 'cidrIpv6'
ipv_range_list_name_capitalized = 'Ipv6Ranges'
ipv_address_value_capitalized = 'CidrIpv6'
else:
ipv_range_list_name = 'ipRanges'
ipv_address_value = 'cidrIp'
ipv_range_list_name_capitalized = 'IpRanges'
ipv_address_value_capitalized = 'CidrIp'
ip_ranges = []
# Next, build the IP permission list.
for item in ip_item[ipv_range_list_name]['items']:
ip_ranges.append(
{ipv_address_value_capitalized: item[ipv_address_value]}
)
new_ip_item[ipv_range_list_name_capitalized] = ip_ranges
new_ip_items.append(new_ip_item)
return new_ip_items

Assume the permissions are missing causing the invocation failure.
You need to explicitly grant permission for SNS to invoke the Lambda function.
Below is the CLI
aws lambda add-permission --function-name my-function --action lambda:InvokeFunction --statement-id sns-my-topic \
--principal sns.amazonaws.com --source-arn arn:aws:sns:us-east-2:123456789012:my-topic
my-function -> Name of the lambda function
my-topic -> Name of the SNS topic
Reference: https://docs.aws.amazon.com/lambda/latest/dg/access-control-resource-based.html

Related

AWS IOT button toggle for IFTTT

I have an AWS IOT button set up and working with IFTTT and SmartLife to turn a device on/off. Currently I have it set up to use single and double click to turn on and off, because IFTTT doesn't seem to have a toggle app (at least, not for use with SmartLife.)
How can I make it a toggle, so I can use a single click to alternately turn on and off?
Looking for a free solution.
There is a solution using apilio, but it's not a free solution: Create a toggle between two actions in IFTTT .
For a free solution, use DynamoDB from Lambda to save the button state, and invert the state each invocation. It either sends "IotButton2" or "IotButton2Off" to IFTTT.
'''
Example Lambda IOT button IFTTT toggle
Test payload:
{
"serialNumber": "GXXXXXXXXXXXXXXXXX",
"batteryVoltage": "990mV",
"clickType": "SINGLE" # or "DOUBLE" or "LONG"
}
'''
from __future__ import print_function
import boto3
import json
import logging
import urllib2
import boto3
from botocore.exceptions import ClientError
logger = logging.getLogger()
logger.setLevel(logging.INFO)
maker_key = 'xxxxxxxxxxxxxxxxx' # change this to your Maker key
def get_button_state(db, name):
table = db.Table('toggles')
try:
response = table.get_item(Key={'name': name})
except ClientError as e:
print(e.response['Error']['Message'])
else:
# response['item'] == {u'name': u'IotButton2', u'on': False}
if 'Item' in response:
return response['Item']['on']
return False
def set_button_state(db, name, state):
table = db.Table('toggles')
try:
response = table.put_item(Item={'name': name, 'on': state})
except ClientError as e:
print(e.response['Error']['Message'])
def lambda_handler(event, context):
logger.info('Received event: ' + json.dumps(event))
db = boto3.resource('dynamodb')
maker_event = "IotButton2"
# maker_event += ":" + event["clickType"]
state = get_button_state(db, maker_event)
logger.info(maker_event + " state = " + ("on" if state else "off"))
response = set_button_state(db, maker_event, not state)
if state:
maker_event += "Off"
logger.info('Maker event: ' + maker_event)
url = 'https://maker.ifttt.com/trigger/%s/with/key/%s' % (maker_event, maker_key)
f = urllib2.urlopen(url)
response = f.read()
f.close()
logger.info('"' + maker_event + '" event has been sent to IFTTT Maker channel')
return response
The above version responds to any type of click (single, double, long.) You can control 3 different switches by uncommenting this line:
maker_event += ":" + event["clickType"]
which would translate to these IFTTT events:
IotButton2:SINGLE
IotButton2:SINGLEOff
IotButton2:DOUBLE
IotButton2:DOUBLEOff
IotButton2:LONG
IotButton2:LONGOff
Create the DynamoDB table. For my example, the table name is "toggles" with one key field "name" and one boolean field "on". The table has to exist, but if the entry does not, it gets created the first time you click the button or test the Lambda function.
You have to update the Lambda function role to include your DynamoDb permissions. Add the following lines to the policy:
{
"Effect": "Allow",
"Action": [
"dynamodb:GetItem",
"dynamodb:PutItem"
],
"Resource": [
"arn:aws:dynamodb:us-east-1:xxxxxxxx:table/toggles"
]
}
(Get the ARN from AWS console DynamoDB -> table -> toggles -> Additional information.)
You can also edit the above function to handle multiple buttons, by checking the serial number.

HIVE_CANNOT_OPEN_SPLIT: Error opening Hive split s3://***/date=2020-05-28/hour=20/part-00000-.c000.snappy.parquet (offset=0, length=22009):

def lambda_handler(event, context):
# created query
query = "SELECT * FROM %s.%s limit 50;" % (DATABASE, TABLE)
# athena client
client = boto3.client('athena')
# Execution
response = client.start_query_execution(
QueryString=query,
QueryExecutionContext={
'Database': DATABASE
},
ResultConfiguration={
'OutputLocation': S3_OUTPUT,
}
)
# get query execution id
query_execution_id = response['QueryExecutionId']
print(query_execution_id)
# get execution status
for i in range(1, 1 + RETRY_COUNT):
# get query execution
query_status = client.get_query_execution(QueryExecutionId=query_execution_id)
query_execution_status = query_status['QueryExecution']['Status']['State']
if query_execution_status == 'SUCCEEDED':
print("STATUS:" + query_execution_status)
break
if query_execution_status == 'FAILED':
raise Exception("STATUS:" + query_execution_status)
else:
print("STATUS:" + query_execution_status)
time.sleep(100)
else:
client.stop_query_execution(QueryExecutionId=query_execution_id)
raise Exception('TIME OVER')
# get query results
result = client.get_query_results(QueryExecutionId=query_execution_id)
print(result)
# get data
if len(result['ResultSet']['Rows']) == 2:
email = result['ResultSet']['Rows'][1]['Data'][1]['VarCharValue']
return email
else:
return None
Error message:
HIVE_CANNOT_OPEN_SPLIT: Error opening Hive split s3://*************/******/date=2020-05-28/hour=20/part-00000-af4a5ab5-d8f9-4868-a974-90b3c2433366.c000.snappy.parquet (offset=0, length=22009):
The above error is showing up which I have no idea what's going on. Tried to scan through some other resources but no luck in finding them.
I am trying to run the aws lambda function that reads the snappy.parquet data and writes the results to S3.

Stop all ec2 instances that does not contains a tag with a specific value in AWS

I need to write a script in python for AWS lambda function to stop all ec2 instances which doesn't have particular tag or particular value for that tag.
I am using boto3 with python to get the all instances and using filter to filter all instances with that particular tag or it's tag value , but not able to get the instances which are running without that particular tag or it's value .
import boto3
ec2 = boto3.resource('ec2')
def lambda_handler(event, context):
filters = [{
'Name': 'tag:state:scheduleName',
'Values': ['24x7']
}]
#get all instances
AllInstances=[instance.id for instance in ec2.instances.all()]
# get instances with that tag and value
instances = ec2.instances.filter(Filters=filters)
RunningInstancesWithTag = [instance.id for instance in instances]
RunningInstancesWithoutTag= [x for x in AllInstances if x not in RunningInstancesWithTag]
if len(RunningInstancesWithoutTag) > 0:
print("found instances with out tag")
ec2.instances.filter(InstanceIds = RunningInstancesWithoutTag).stop() #for stopping an ec2 instance
print("instance stopped")
else:
print("let it be run as tag value is 24*7")
As John suggested in comments, you are overcomplicating this using a filter.
You want something like this:
import boto3
ec2 = boto3.resource('ec2')
def lambda_handler(event, context):
running_with = []
running_without = []
for instance in ec2.instances.all():
if instance.state['Name'] != 'running':
continue
has_tag = False
for tag in instance.tags:
if tag['Key'] == 'scheduleName' and tag['Value'] == '24x7':
has_tag = True
break
if has_tag:
running_with.append(instance.id)
else:
running_without.append(instance.id)
print("With: %s" % running_with)
print("Without: %s" % running_without)
Key points:
Don't use filter and just make a single call to ec2.instances.all().
Loop through the instances and then through tags and count with and without.

Cloudwatch logs filter to trigger lambda then extract values from log data

Please I have got a question from the subject-line.
I want to create a AWS CloudWatch log or Event to trigger Lambda function from filter pattern then extract values from that log data as output to lambda function in python.
Example:
Filter name: abcd
value to extract: 01234 to the lambda function.
from log data
log Data:
abcd:01234
Any ideas?
Here is a simple way to capture the events from CloudWatch. The log data is in the message. You could process here or send it on to Firehose and transform there. Alternatively, you could send CloudWatch directly to Firehose with a subscription but I think that haas to be done with the AWS CLI.
import boto3
import gzip
import json
import base64
firehose = boto3.client('firehose',region_name='us-east-2')
def print_result(firehose_return):
records_error = int(firehose_return['FailedPutCount'])
records_sent = len(firehose_return['RequestResponses'])
return 'Firehose sent %d records, %d error(s)' % (records_sent,records_error )
def lambda_handler(events, context):
cw_encoded_logs_data = events['awslogs']['data']
compressed_payload = base64.b64decode(cw_encoded_logs_data)
cw_decoded_logs_data = gzip.decompress(compressed_payload)
cw_all_events = json.loads(cw_decoded_logs_data)
records = []
for event in cw_all_events['logEvents']:
log_event = {
"Data": str(event['message']) + '\n'
}
records.insert(len(records),log_event)
if len(records) > 499:
firehose_return = firehose.put_record_batch(
DeliveryStreamName = 'streamname ',
Records = records
)
print_result(firehose_return)
records = []
if len(records) > 0:
firehose_return = firehose.put_record_batch(
DeliveryStreamName = 'streamname',
Records = records
)
print(print_result(firehose_return))

Connection error to Graphenedb hosted on heroku

Hi I am getting Unable to connect to localhost on port 7687 - is the server running? error whenever my python code executing
import os
import json
from urllib.parse import urlparse, urlunparse
from django.shortcuts import render
# Create your views here.
from py2neo import Graph, authenticate
from bottle import get,run,request,response,static_file
from py2neo.packages import neo4j
url = urlparse(os.environ.get("GRAPHENEDB_GOLD_URL"))
url_without_auth = urlunparse((url.scheme, ("{0}:{1}").format(url.hostname, url.port), '', None, None, None))
user = url.username
password = url.password
authenticate(url_without_auth,user, password)
graph = Graph(url_without_auth, bolt = False)
#graph = Graph(password='vjsj56#vb')
#get("/")
def get_index():
return static_file("index.html", root="static")
#get("/graph")
def get_graph(self):
print("i was here" )
print("graph start")
results = graph.run(
"MATCH (m:Movie)<-[:ACTED_IN]-(a:Person) "
"RETURN m.title as movie, collect(a.name) as cast "
"LIMIT {limit}", {"limit": 10})
print("graph run the run")
nodes = []
rels = []
i = 0
for movie, cast in results:
#print("i am here")
nodes.append({"title": movie, "label": "movie"})
target = i
i += 1
for name in cast:
print(name)
actor = {"title": name, "label": "actor"}
try:
source = nodes.index(actor)
except ValueError:
nodes.append(actor)
source = i
i += 1
rels.append({"source": source, "target": target})
return {"nodes": nodes, "links": rels}
#get("/search")
def get_search():
try:
q = request.query["q"]
except KeyError:
return []
else:
results = graph.run(
"MATCH (movie:Movie) "
"WHERE movie.title =~ {title} "
"RETURN movie", {"title": "(?i).*" + q + ".*"})
response.content_type = "application/json"
return json.dumps([{"movie": dict(row["movie"])} for row in results])
#get("/movie/<title>")
def get_movie(title):
results = graph.run(
"MATCH (movie:Movie {title:{title}}) "
"OPTIONAL MATCH (movie)<-[r]-(person:Person) "
"RETURN movie.title as title,"
"collect([person.name, head(split(lower(type(r)),'_')), r.roles]) as cast "
"LIMIT 1", {"title": title})
row = results.next()
return {"title": row["title"],
"cast": [dict(zip(("name", "job", "role"), member)) for member in row["cast"]]}
this code is running fine on my local syatem but giving connection error when deployed on heroku and graphenedb
exception location: /app/.heroku/python/lib/python3.6/site-packages/py2neo/packages/neo4j/v1/connection.py in connect, line 387
I'm Juanjo, from GrapheneDB.
At first glance the code looks fine and the error code points to a wrong URL. It might be a problem with the environment variable. Can you please check your GRAPHENEDB_GOLD_URL variable?
You can do it like this:
$ heroku config:get GRAPHENEDB_GOLD_URL
It should be something like:
http://<user>:<pass>#XXX.graphenedb.com:24789/db/data
(please don't share your URL here)
If your variable is empty, please read more here on retrieving GrapheneDB environment variables.
If that's not your issue, or the problem persists, could you please contact us via the support link on our admin panel? Heroku team will forward the support ticket to us and we'll have all the information related to your database injected into the ticket.
Thanks,
Juanjo

Resources