I am trying to add data to Kinesis Firehose delivery stream using putrecord with python3.6 on aws lambda. When calling put record on the stream I get following exception.
An error occurred (ResourceNotFoundException) when calling the PutRecord operation: Stream MyStream under account 123456 not found.
I am executing following python code to add data to Stream.
import boto3
import json
def lambda_handler(event, context):
session = boto3.Session(aws_access_key_id=key_id, aws_secret_access_key=access_key)
kinesis_client = session.client('kinesis', region_name='ap-south-1')
records = event['Records']
write_records = list()
count = 0
for record in records:
count += 1
if str(record['eventName']).lower() == 'insert':
rec = record['dynamodb']['Keys']
rec.update(record['dynamodb']['NewImage'])
new_record = dict()
new_record['Data'] = json.dumps(rec).encode()
new_record['PartitionKey'] = 'PartitionKey'+str(count)
# Following Line throws Exception
kinesis_client.put_record(StreamName="MyStream", Data=new_record['Data'], PartitionKey='PartitionKey'+str(count))
elif str(record['eventName']).lower() == 'modify':
pass
write_records = json.dumps(write_records)
print(stream_data)
MyStream status is active and source for the stream data is set to Direct PUT and other sources
If you are sure that the stream name is correct, you can create client with regional endpoint of Kinesis
kinesis_client = session.client('kinesis', region_name='ap-south-1', endpoint_url='https://kinesis.ap-south-1.amazonaws.com/')
AWS Service Endpoints List
https://docs.aws.amazon.com/general/latest/gr/rande.html
Hope this helps !!!
Related
I'm trying to upload videos to youtube using Django and MSSQL, I want to store the user data to DB so that I can log in from multiple accounts and post videos.
The official documentation provided by youtube implements a file system and after login, all the user data gets saved there, I don't want to store any data in a file as saving files to DB would be a huge risk and not a good practice. So how can I bypass this step and save data directly to DB and retrieve it when I want to post videos to a specific account?
In short, I want to replace the pickle file implementation with storing it in the database.
Here's my code
def youtubeAuthenticate():
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
api_service_name = "youtube"
api_version = "v3"
client_secrets_file = "client_secrets.json"
creds = None
# the file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first time
if os.path.exists("token.pickle"):
with open("token.pickle", "rb") as token:
creds = pickle.load(token)
# if there are no (valid) credentials availablle, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(client_secrets_file, SCOPES)
creds = flow.run_local_server(port=0)
# save the credentials for the next run
with open("token.pickle", "wb") as token:
pickle.dump(creds, token)
return build(api_service_name, api_version, credentials=creds)
#api_view(['GET','POST'])
def postVideoYT(request):
youtube = youtubeAuthenticate()
print('yt',youtube)
try:
initialize_upload(youtube, request.data)
except HttpError as e:
print("An HTTP error %d occurred:\n%s" % (e.resp.status, e.content))
return Response("Hello")
def initialize_upload(youtube, options):
print('options', options)
print("title", options['title'])
# tags = None
# if options.keywords:
# tags = options.keywords.split(",")
body=dict(
snippet=dict(
title=options['title'],
description=options['description'],
tags=options['keywords'],
categoryId=options['categoryId']
),
status=dict(
privacyStatus=options['privacyStatus']
)
)
# # Call the API's videos.insert method to create and upload the video.
insert_request = youtube.videos().insert(
part=",".join(body.keys()),
body=body,
media_body=MediaFileUpload(options['file'], chunksize=-1, resumable=True)
)
path = pathlib.Path(options['file'])
ext = path.suffix
getSize = os.path.getsize(options['file'])
resumable_upload(insert_request,ext,getSize)
# This method implements an exponential backoff strategy to resume a
# failed upload.
def resumable_upload(insert_request, ext, getSize):
response = None
error = None
retry = 0
while response is None:
try:
print("Uploading file...")
status, response = insert_request.next_chunk()
if response is not None:
respData = response
if 'id' in response:
print("Video id '%s' was successfully uploaded." % response['id'])
else:
exit("The upload failed with an unexpected response: %s" % response)
except HttpError as e:
if e.resp.status in RETRIABLE_STATUS_CODES:
error = "A retriable HTTP error %d occurred:\n%s" % (e.resp.status,
e.content)
else:
raise
except RETRIABLE_EXCEPTIONS as e:
error = "A retriable error occurred: %s" % e
if error is not None:
print(error)
retry += 1
if retry > MAX_RETRIES:
exit("No longer attempting to retry.")
max_sleep = 2 ** retry
sleep_seconds = random.random() * max_sleep
print("Sleeping %f seconds and then retrying..." % sleep_seconds)
time.sleep(sleep_seconds)
Trying to add some basic error handling & logging to a working Lambda function. After adding the elements I now receive this error:
{
"errorMessage": "Unable to marshal response: sqs.Queue(url='https://us-west-2.queue.amazonaws.com/225565555556/Messages') is not JSON serializable",
"errorType": "Runtime.MarshalError",
"stackTrace": []
}
My searches on Stack Overflow have led me to believe something needs to be converted to a string object but I don't know where to fix that. Here is the entire function:
# Backport print_function for backwards compatibility
from __future__ import print_function
import logging
# Use built-in package for encoding/decoding JSON data
import json
# Module required to work with Boto3 environment variables
import os
# Module provides classes for manipulating date/time
from datetime import datetime
# AWS Python SDK module
import boto3
from botocore.exceptions import ClientError
# Reference function environment variables
QUEUE_NAME = os.environ['QUEUE_NAME']
MAX_QUEUE_MESSAGES = os.environ['MAX_QUEUE_MESSAGES']
DYNAMODB_TABLE = os.environ['DYNAMODB_TABLE']
# Create AWS service resource objects
sqs = boto3.resource('sqs')
dynamodb = boto3.resource('dynamodb')
logger = logging.getLogger(__name__)
# Define function entry point
def lambda_handler(event, context):
# Use service resource to call API to retrieve SQS queue name
try:
queue = sqs.get_queue_by_name(QueueName=QUEUE_NAME)
logger.info("Got queue '%s' with URL=%s", QUEUE_NAME, queue.url)
except ClientError as error:
logger.exception("Couldn't get queue named %s.", QUEUE_NAME)
raise error
else:
return queue
# Print the number of messages waiting in queue for consumer
print("ApproximateNumberOfMessages:",
queue.attributes.get('ApproximateNumberOfMessages'))
# Iterate through message event records
for message in event['Records']:
print("Starting your Lambda Function...")
body = message["body"]
id = message['messageId']
print(str(body))
# Write message to DynamoDB
table = dynamodb.Table(DYNAMODB_TABLE)
# Call DDB API to add message item to table variable
response = table.put_item(
Item={
'MessageId':message['messageId'],
'Body':message['body'],
'Timestamp':datetime.now().isoformat()
},
)
print("Wrote message to DynamoDB:", json.dumps(response))
The resolution to this error was to change the else statement from:
else:
return queue
To:
else:
return str(queue)
want to get file name from cloud watch log. which I've uploaded in s3 bucket. But it gives me Key error 'Records' I check in logs as well. Everytning in my code is according to logs event.
here is my code
def lambda_handler(event, context):
s3 = boto3.client('s3')
if event:
print("Event:", event)
for Records in event["Records"]:
file_obj = event["Records"][0]["s3"]["object"]["key"]
print("FileObj", file_obj)
filename = str(file_obj['s3']['object']['key'])
print("Filename:", filename)
fileObj = s3.get_object(Bucket = "prcbucket", key=filename)
print("FileObj", fileObj)
The following should be sufficient to retrieve the key
def lambda_handler(event, context):
key = event['Records'][0]['s3']['object']['key']
print key
I am setting up my first Lambda function on AWS. I use Python 3.6. My code is as follows:
def lambda_handler(event, context):
result = {}
result["Log stream name:"] = context.log_stream_name
result["Log group name:"] = context.log_group_name
result["Request ID:"] = context.aws_request_id
result["Mem. limits(MB)"] = context.memory_limit_in_mb
result["size of event"] = len(event)
result["type of event"] = str(type(event))
return result
I also set up an API Gateway for test Lambda.
However, no matter what query paramters I pass in to the API Gateway, the event is always an empty dict. Below is a sample response. What am I missing?
Request: /test/number?input=5
Status: 200
Latency: 223 ms
Response Body
{
"Log stream name:": "2018/12/05/[$LATEST]9d9fd5dd157046b4a67792aa49f5d71c",
"Log group name:": "/aws/lambda/test",
"Request ID:": "dce7beaf-f8c9-11e8-9cc4-85afb50a0e0c",
"Mem. limits(MB)": "128",
"size of event": 0,
"type of event": "<class 'dict'>"
}
Assuming you don't have request mapping templates, you should turn Lambda Proxy integration on.
Please I have got a question from the subject-line.
I want to create a AWS CloudWatch log or Event to trigger Lambda function from filter pattern then extract values from that log data as output to lambda function in python.
Example:
Filter name: abcd
value to extract: 01234 to the lambda function.
from log data
log Data:
abcd:01234
Any ideas?
Here is a simple way to capture the events from CloudWatch. The log data is in the message. You could process here or send it on to Firehose and transform there. Alternatively, you could send CloudWatch directly to Firehose with a subscription but I think that haas to be done with the AWS CLI.
import boto3
import gzip
import json
import base64
firehose = boto3.client('firehose',region_name='us-east-2')
def print_result(firehose_return):
records_error = int(firehose_return['FailedPutCount'])
records_sent = len(firehose_return['RequestResponses'])
return 'Firehose sent %d records, %d error(s)' % (records_sent,records_error )
def lambda_handler(events, context):
cw_encoded_logs_data = events['awslogs']['data']
compressed_payload = base64.b64decode(cw_encoded_logs_data)
cw_decoded_logs_data = gzip.decompress(compressed_payload)
cw_all_events = json.loads(cw_decoded_logs_data)
records = []
for event in cw_all_events['logEvents']:
log_event = {
"Data": str(event['message']) + '\n'
}
records.insert(len(records),log_event)
if len(records) > 499:
firehose_return = firehose.put_record_batch(
DeliveryStreamName = 'streamname ',
Records = records
)
print_result(firehose_return)
records = []
if len(records) > 0:
firehose_return = firehose.put_record_batch(
DeliveryStreamName = 'streamname',
Records = records
)
print(print_result(firehose_return))