How to create EC2 instance through boto python code - amazon-ec2

requests = [conn.request_spot_instances(price=0.0034, image_id='ami-6989a659', count=1,type='one-time', instance_type='m1.micro')]
I used the following code. But it is not working.

Use the following code to create instance from python command line.
import boto.ec2
conn = boto.ec2.connect_to_region(
"us-west-2",
aws_access_key_id="<aws access key>",
aws_secret_access_key="<aws secret key>",
)
conn = boto.ec2.connect_to_region("us-west-2")
conn.run_instances(
"<ami-image-id>",
key_name="myKey",
instance_type="t2.micro",
security_groups=["your-security-group-here"],
)

To create an EC2 instance using Python on AWS, you need to have "aws_access_key_id_value" and "aws_secret_access_key_value".
You can store such variables in config.properties and write your code in create-ec2-instance.py file
Create a config.properties and save the following code in it.
aws_access_key_id_value='YOUR-ACCESS-KEY-OF-THE-AWS-ACCOUNT'
aws_secret_access_key_value='YOUR-SECRETE-KEY-OF-THE-AWS-ACCOUNT'
region_name_value='region'
ImageId_value = 'ami-id'
MinCount_value = 1
MaxCount_value = 1
InstanceType_value = 't2.micro'
KeyName_value = 'name-of-ssh-key'
Create create-ec2-instance.py and save the following code in it.
import boto3
def getVarFromFile(filename):
import imp
f = open(filename)
global data
data = imp.load_source('data', '', f)
f.close()
getVarFromFile('config.properties')
ec2 = boto3.resource(
'ec2',
aws_access_key_id=data.aws_access_key_id_value,
aws_secret_access_key=data.aws_secret_access_key_value,
region_name=data.region_name_value
)
instance = ec2.create_instances(
ImageId = data.ImageId_value,
MinCount = data.MinCount_value,
MaxCount = data.MaxCount_value,
InstanceType = data.InstanceType_value,
KeyName = data.KeyName_value)
print (instance[0].id)
Use the following command to execute the python code.
python create-ec2-instance.py

Related

trying to get ec2 image list including blockDeviceMappings.snpshotID

I need to get list of ec2 images including the bloak device snapshot ID, the list should include only the images belongs to me.
Till now i have managed to get the list of image iD without the extra information
In addtion, is there a way to get the list i own instead of adding my ID?
import boto3
ec2 = boto3.resource('ec2',aws_access_key_id = "ID",aws_secret_access_key = "ID",region_name = "eu-west-1")
filter=[{'Name':'owner-id','Values':['MY-ID']}]
count_aim = 1
for each_aim in ec2.images.filter(Filters=filter):
print (each_aim)
count_aim = count_aim + 1
print (count_aim)
I think the following should do:
from collections import defaultdict
from pprint import pprint
import boto3
ec2 = boto3.resource('ec2', aws_access_key_id = 'dddddd', aws_secret_access_key="ggggfffff",region_name = "eu-west-1")
filter=[{'Name':'owner-id','Values':['123455']}]
block_ids = defaultdict(list)
for each_aim in ec2.images.filter(Filters=filter):
for block_map in each_aim.block_device_mappings:
block_ids[each_aim.id].append(block_map['Ebs']['SnapshotId'])
pprint(dict(block_ids))
Example output:
{'ami-02b8a850c975bb610': ['snap-02f277ce5b3b670fc'],
'ami-06422cd44a94bab38': ['snap-0b0c9048f46992ee1']}

Mock the result of accessing public GCS bucket

I have the following code:
bucket = get_bucket('bucket-name')
blob = bucket.blob(os.path.join(*pieces))
blob.upload_from_string('test')
blob.make_public()
result = blob.public_url
# result is `<Mock name='mock().get_bucket().blob().public_url`
And I would do like to mock the result of public_url, my unit test code is something like this
with ExitStack() as st:
from google.cloud import storage
blob_mock = mock.Mock(spec=storage.Blob)
blob_mock.public_url.return_value = 'http://'
bucket_mock = mock.Mock(spec=storage.Bucket)
bucket_mock.blob.return_value = blob_mock
storage_client_mock = mock.Mock(spec=storage.Client)
storage_client_mock.get_bucket.return_value = bucket_mock
st.enter_context(
mock.patch('google.cloud.storage.Client', storage_client_mock))
my_function()
Is there something like FakeRedis or moto for Google Storage, so I can mock google.cloud.storage.Blob.public_url?
I found this fake gcs server written in Go which can be run within a Docker container and consumed by the Python library. See Python examples.

Call a Lambda Function with AWS Glue

Im trying to use boto3 in a job of AWS Glue to call a Lambda Function but without results.
I upload a zip with the libraries:
Like the examples by AWS
and without a zip.
The error is this " Unable to load data for: endpoints".
Im trying to invoke without zip but this go to a timeout exception.
import boto3
client = boto3.client('lambda' , region_name='us-east-1')
r_lambda = client.invoke(FunctionName='S3GlueJson')
Can someone help me ?
In Python, use Boto3 Lambda client 'invoke()'. For example, you can create a Lambda container, then call that from a Glue Job:
import boto3
import pandas as pd
lambda_client = boto3.client('lambda',region_name='us-east-1')
def get_predictions( df ):
# Call getPredictions Lambda container
response = lambda_client.invoke(
FunctionName='getPredictions',
InvocationType='RequestResponse',
LogType='Tail',
Payload=df
)
logger.info('Received response from Lambda container.')
data = response["Payload"].read().decode('utf-8')
x = json.loads(data)
df_pred = pd.DataFrame.from_dict(x)
return df_pred
dfjson = df.to_json()
df_pred = get_predictions( dfjson )
df_pred.head()
If you want to call a Glue Jobs from Lambda Function, can do it like this:
import boto3
glue = boto3.client(service_name='glue', region_name='us-east-1',
endpoint_url='https://glue.us-east-1.amazonaws.com')
#Start Job
myNewJobRun = glue.start_job_run(JobName=JOB_NAME)
#Get current state of Job, to be sure it's running
status = glue.get_job_run(JobName=JOB_NAME, RunId=myNewJobRun['JobRunId'])
logger.info('JOB State {}: {}'.format(
JOB_NAME, status['JobRun']['JobRunState']))
As Job execution can late some time to finish, it's better to don't wait on Lambda function for it to finish.

How to deploy SAM template using boto3

I am trying to deploy
https://s3.amazonaws.com/serverless-chatbot-code/chatbot.yaml
using boto3 library function create_stack but getting an error
"CreateStack cannot be used with templates containing Transforms"
How can I deploy this stack with boto3?
Thanks.
The comment is correct, you have to first create a change set and then execute the change set after change set creation is complete. Code outline is given below
client = boto3.client('cloudformation')
# Helper function to retrieve change set status
def changeSetStatus(change_set_name, client):
response = client.describe_change_set(
ChangeSetName=change_set_name,
)
return response['Status']
# Create change set
cs_response = client.create_change_set(
StackName=stackname,
TemplateURL=templateurl,
Parameters=params,
Capabilities=[capabilities],
ChangeSetType="CREATE",
ChangeSetName=stackname + "-cs"
)
#Remove print statements, here for illustration
print(str(cs_response))
change_set_name = cs_response['Id']
# Wait until change set status is CREATE_COMPLETE
while True:
response = change_set_status(change_set_name, client)
print(str(response))
time.sleep(10)
if response == 'CREATE_COMPLETE':
break
# Execute change set
ex_response = client.execute_change_set(
ChangeSetName=change_set_name
)
I think you should use Waiter instead of while loop like below;
from pprint import pprint
client = boto3.client('cloudformation')
change_set_name = stack_name + "-cs"
# Create change set
cs_response = client.create_change_set(
StackName=stack_name,
TemplateURL=templateurl,
Parameters=params,
Capabilities=[capabilities],
ChangeSetType="CREATE",
ChangeSetName=change_set_name
)
waiter = client.get_waiter('change_set_create_complete')
waiter.wait(
ChangeSetName=change_set_name,
StackName=stack_name,
WaiterConfig={
'Delay': 3,
'MaxAttempts': 50
}
)
desc_response = client.describe_change_set(
ChangeSetName=change_set_name,
StackName=stack_name,
)
print("describe_change_set response Changes:")
pprint(desc_response["Changes"], indent=4)
exec_response = client.execute_change_set(
ChangeSetName=change_set_name,
StackName=stack_name
)
print("execute_change_set Changes:")
pprint(execute_change_set, indent=4)
My work below would be helpful;
https://github.com/LittleWat/cfn-github-action-sample

Unable to create local directories error while running Ansible in AWS Lambda

I am running the following Python script which runs a playbook:
#!/usr/bin/python
from __future__ import print_function
import json
import os
import ansible.inventory
import ansible.playbook
import ansible.runner
import ansible.constants
from ansible import utils
from ansible import callbacks
print('Loading function')
def run_playbook(**kwargs):
stats = callbacks.AggregateStats()
playbook_cb = callbacks.PlaybookCallbacks(verbose=utils.VERBOSITY)
runner_cb = callbacks.PlaybookRunnerCallbacks(
stats, verbose=utils.VERBOSITY)
# use /tmp instead of $HOME
ansible.constants.DEFAULT_REMOTE_TMP = '/test'
out = ansible.playbook.PlayBook(
callbacks=playbook_cb,
runner_callbacks=runner_cb,
stats=stats,
**kwargs
).run()
return out
def lambda_handler(event, context):
return main()
def main():
out = run_playbook(
playbook='/test/little.yml',
)
return(out)
if __name__ == '__main__':
main()
And, this is my ansible.cfg file:
[ssh_connection]
ssh_args=-o ForwardAgent=yes
retries=2
sk_sudo_pass = yes
[defaults]
remote_user = root
host_key_checking = False
#remote_tmp = tmp
local_tmp = ~/tmp
I am getting the following error, when the lambda function is invoked:
START RequestId: ccfe076e-0016-11e7-befa-7ba330223a64 Version: $LATEST
module initialization error: Unable to create local directories(/home/sbx_user1080/tmp): [Errno 30] Read-only file system: '/home/sbx_user1080'
END RequestId: ccfe076e-0016-11e7-befa-7ba330223a64
which means that Ansible is not able to create the tmp file due to permissions problem in the container which Lambda is spinning up.
So, how do I work around it? Also, according to this discussion, Lambda supports writing files to the /tmp directory. So, how do I set the local_tmp to that directory?
You can only write to /tmp in an AWS Lambda environment. You can't create a ~/tmp directory in that environment. It looks like you need to change this:
local_tmp = ~/tmp
to this:
local_tmp = /tmp

Resources