I have the following code which serves up a public google cloud storage url for images I am uploading:
def to_internal_value(self, data):
file_name = str(uuid.uuid4())
# Get the file name extension:
file_extension = self.get_file_extension(file_name, data)
complete_file_name = "{}.{}".format(file_name, file_extension)
uploaded = data.read()
img = Image.open(io.BytesIO(uploaded))
new_image_io = io.BytesIO()
megapixels = img.width * img.height
# reduce size if image is bigger than MEGAPIXEL_LIMIT
if megapixels > self.MEGAPIXEL_LIMIT:
resize_factor = math.sqrt(megapixels/self.MEGAPIXEL_LIMIT)
resized = resizeimage.resize_thumbnail(img, [img.width/resize_factor,
img.height/resize_factor])
resized.save(new_image_io, format=file_extension.upper())
else:
img.save(new_image_io, format=file_extension.upper())
content = ContentFile(new_image_io.getvalue(), name=complete_file_name)
return super(Base64ImageField, self).to_internal_value(content)
def to_representation(self, value):
try:
blob = Blob(name=value.name, bucket=bucket)
blob.make_public()
return blob.public_url
except ValueError as e:
return value
The problem is that this is doubling the time for the request. In other words, instead of making the blob public just the first time it is uploaded, the code is executing this code each time the object is serialized to the client. I have tried moving the make_public() call into to_internal_value, but so far haven't had success, probably because I don't know exactly how to get value.
Related
I recently developed a program that allows you to connect to Twitter and do some tasks automatically (like Tweeter, Liking) using only the account information: username;password;email_or_phone.
My problem is that I am now trying to add the functionality of Tweeting with an image but I can't.
Here is my code and my error:
async def tweet_success(self, msg: str, img_path: str):
# Get the number of bytes of the image
img_bytes = str(os.path.getsize(img_path))
# Get the media_id to add an image to my tweet
params = {'command': 'INIT','total_bytes': img_bytes,'media_type': 'image/png','media_category': 'tweet_image'}
response = requests.post('https://upload.twitter.com/i/media/upload.json', params=params, headers=self.get_headers())
media_id = response.text.split('{"media_id":')[1].split(',')[0]
params = {'command': 'APPEND','media_id': media_id,'segment_index': '0',}
# Try to get the raw binary of the image, My problem is here
data = open(img_path, "rb").read()
response = requests.post('https://upload.twitter.com/i/media/upload.json', params=params, headers=self.get_headers(), data=data,)
{"request":"\/i\/media\/upload.json","error":"media parameter is missing."}
Can someone help me ?
I tried
data = open(img_path, "rb").read()
data = f'------WebKitFormBoundaryaf0mMLIS7kpsKwPv\r\nContent-Disposition: form-data; name="media"; filename="blob"\r\nContent-Type: application/octet-stream\r\n\r\n{data}\r\n------WebKitFormBoundaryaf0mMLIS7kpsKwPv--\r\n'
data = open(img_path, "rb").read()
data = f'------WebKitFormBoundaryaf0mMLIS7kpsKwPv\r\nContent-Disposition: form-data; name="media"; filename="blob"\r\nContent-Type: application/octet-stream\r\n\r\n{data}\r\n------WebKitFormBoundaryaf0mMLIS7kpsKwPv--\r\n'.encode()
data = open(img_path, "rb").read()
data = base64.b64encode(data)
I'm trying to upload videos to youtube using Django and MSSQL, I want to store the user data to DB so that I can log in from multiple accounts and post videos.
The official documentation provided by youtube implements a file system and after login, all the user data gets saved there, I don't want to store any data in a file as saving files to DB would be a huge risk and not a good practice. So how can I bypass this step and save data directly to DB and retrieve it when I want to post videos to a specific account?
In short, I want to replace the pickle file implementation with storing it in the database.
Here's my code
def youtubeAuthenticate():
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
api_service_name = "youtube"
api_version = "v3"
client_secrets_file = "client_secrets.json"
creds = None
# the file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first time
if os.path.exists("token.pickle"):
with open("token.pickle", "rb") as token:
creds = pickle.load(token)
# if there are no (valid) credentials availablle, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(client_secrets_file, SCOPES)
creds = flow.run_local_server(port=0)
# save the credentials for the next run
with open("token.pickle", "wb") as token:
pickle.dump(creds, token)
return build(api_service_name, api_version, credentials=creds)
#api_view(['GET','POST'])
def postVideoYT(request):
youtube = youtubeAuthenticate()
print('yt',youtube)
try:
initialize_upload(youtube, request.data)
except HttpError as e:
print("An HTTP error %d occurred:\n%s" % (e.resp.status, e.content))
return Response("Hello")
def initialize_upload(youtube, options):
print('options', options)
print("title", options['title'])
# tags = None
# if options.keywords:
# tags = options.keywords.split(",")
body=dict(
snippet=dict(
title=options['title'],
description=options['description'],
tags=options['keywords'],
categoryId=options['categoryId']
),
status=dict(
privacyStatus=options['privacyStatus']
)
)
# # Call the API's videos.insert method to create and upload the video.
insert_request = youtube.videos().insert(
part=",".join(body.keys()),
body=body,
media_body=MediaFileUpload(options['file'], chunksize=-1, resumable=True)
)
path = pathlib.Path(options['file'])
ext = path.suffix
getSize = os.path.getsize(options['file'])
resumable_upload(insert_request,ext,getSize)
# This method implements an exponential backoff strategy to resume a
# failed upload.
def resumable_upload(insert_request, ext, getSize):
response = None
error = None
retry = 0
while response is None:
try:
print("Uploading file...")
status, response = insert_request.next_chunk()
if response is not None:
respData = response
if 'id' in response:
print("Video id '%s' was successfully uploaded." % response['id'])
else:
exit("The upload failed with an unexpected response: %s" % response)
except HttpError as e:
if e.resp.status in RETRIABLE_STATUS_CODES:
error = "A retriable HTTP error %d occurred:\n%s" % (e.resp.status,
e.content)
else:
raise
except RETRIABLE_EXCEPTIONS as e:
error = "A retriable error occurred: %s" % e
if error is not None:
print(error)
retry += 1
if retry > MAX_RETRIES:
exit("No longer attempting to retry.")
max_sleep = 2 ** retry
sleep_seconds = random.random() * max_sleep
print("Sleeping %f seconds and then retrying..." % sleep_seconds)
time.sleep(sleep_seconds)
I am new to AWS Sagemaker. I have custom CV PyTorch model locally and deployed it to Sagemaker endpoint. I used custom inference.py code to define model_fn, input_fn, output_fn and predict_fn methods. So, I'm able to generate predictions on json input, which contains url to the image, the code is quite straigtforward:
def input_fn(request_body, content_type='application/json'):
logging.info('Deserializing the input data...')
image_transform = transforms.Compose([
transforms.Resize(size=(224, 224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
if content_type:
if content_type == 'application/json':
input_data = json.loads(request_body)
url = input_data['url']
logging.info(f'Image url: {url}')
image_data = Image.open(requests.get(url, stream=True).raw)
return image_transform(image_data)
raise Exception(f'Requested unsupported ContentType in content_type {content_type}')
Then I am able to invoke endpoint with code:
client = boto3.client('runtime.sagemaker')
inp = {"url":url}
inp = json.loads(json.dumps(inp))
response = client.invoke_endpoint(EndpointName='ENDPOINT_NAME',
Body=json.dumps(inp),
ContentType='application/json')
The problem is, I see, that locally url request return slightly different image array comparing to the one on Sagemaker. Which is why on the same URL I obtain slightly different predictions. To check that at least model weights are the same I want to generate predictions on image itself, downloaded locally and to Sagemaker. But I fail trying to put image as input to endpoint. E.g.:
def input_fn(request_body, content_type='application/json'):
logging.info('Deserializing the input data...')
image_transform = transforms.Compose([
transforms.Resize(size=(224, 224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
if content_type == 'application/x-image':
image_data = request_body
return image_transform(image_data)
raise Exception(f'Requested unsupported ContentType in content_type {content_type}')
Invoking endpoint I experience the error:
ParamValidationError: Parameter validation failed:
Invalid type for parameter Body, value: {'img': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=630x326 at 0x7F78A61461D0>}, type: <class 'dict'>, valid types: <class 'bytes'>, <class 'bytearray'>, file-like object
Does anybody know how to generate Sagemaker predictions by Pytorch model on images?
As always, after asking I found a solution. Actually, as the error suggested, I had to convert input to bytes or bytearray. For those who may need the solution:
from io import BytesIO
img = Image.open(open(PATH, 'rb'))
img_byte_arr = BytesIO()
img.save(img_byte_arr, format=img.format)
img_byte_arr = img_byte_arr.getvalue()
client = boto3.client('runtime.sagemaker')
response = client.invoke_endpoint(EndpointName='ENDPOINT_NAME
Body=img_byte_arr,
ContentType='application/x-image')
response_body = response['Body']
print(response_body.read())
I'm trying to download random public domain images from the Metropolitan Museum collection using their API (more info here : https://metmuseum.github.io/) and Python, unfortunatly the images I get are empty. Here is a minimal code :
import urllib
from urllib2 import urlopen
import json
from random import randint
url = "https://collectionapi.metmuseum.org/public/collection/v1/objects"
objectID_list = json.loads(urlopen(url).read())['objectIDs']
objectID = objectID_list[randint(0,len(objectID_list)-1)]
url_request = url+"/"+str(objectID)
fetched_data = json.loads(urlopen(url_request).read())
if fetched_data['isPublicDomain']:
name = str(fetched_data['title'])
ID = str(fetched_data['objectID'])
url_image = str(fetched_data['primaryImage'])
urllib.urlretrieve(url_image, 'path/'+name+'_'+ID+'.jpg')
If I print url_image and copy/paste it in a browser I get to the desired image, but the code retrieves an image that weights 1ko and can't be opened.
Any idea what I'm doing wrong ?
Your way of downloading is correct, however, it seems as the domain is validating request headers to prevent scraping (probably unintended as they have an API to pull images).
One way of solving this problem is by changing your headers to something realistic, or utilizing fake_useragent and requests.
import requests
from fake_useragent import UserAgent
def save_image(link, file_path):
ua = UserAgent(verify_ssl=False)
headers = {"User-Agent": ua.random}
r = requests.get(link, stream=True, headers=headers)
if r.status_code == 200:
with open(file_path, 'wb') as f:
f.write(r.content)
else:
raise Exception("Error code {}.".format(r.status_code))
I get the following error while I'm playing YouTube audio with my bot
[tls # 0000024ef8c4d480] Error in the pull function.
[matroska,webm # 0000024ef8c4a400] Read error
[tls # 0000024ef8c4d480] The specified session has been invalidated for some reason.
Last message repeated 1 times
It seems like YouTube links expire? I don't really know but I need to fix this issue. This is my code:
class YTDLSource(discord.PCMVolumeTransformer):
def __init__(self, source, *, data, requester):
super().__init__(source)
self.requester = requester
self.title = data['title']
self.description = data['description']
self.uploader = data['uploader']
self.duration = data['duration']
self.web_url = data['webpage_url']
self.thumbnail = data['thumbnail']
def __getitem__(self, item: str):
return self.__getattribute__(item)
#classmethod
async def create_source(cls, ctx, player, search: str, *, loop, download=True):
async with ctx.typing():
loop = loop or asyncio.get_event_loop()
to_run = partial(ytdl.extract_info, url=search, download=download)
raw_data = await loop.run_in_executor(None, to_run)
if 'entries' in raw_data:
# take first item from a playlist
if len(raw_data['entries']) == 1:
data = raw_data['entries'][0]
else:
data = raw_data['entries']
#loops entries to grab each video_url
total_duration = 0
try:
for i in data:
webpage = i['webpage_url']
title = i['title']
description = i['description']
uploader = i['uploader']
duration = i['duration']
thumbnail = i['thumbnail']
total_duration += duration
if download:
source = ytdl.prepare_filename(i)
source = cls(discord.FFmpegPCMAudio(source), data=i, requester=ctx.author)
else:
source = {'webpage_url': webpage, 'requester': ctx.author, 'title': title, 'uploader': uploader, 'description': description, 'duration': duration, 'thumbnail': thumbnail}
player.queue.append(source)
except Exception as e:
print(e)
return
embed=discord.Embed(title="Playlist", description="Queued", color=0x30a4fb, timestamp=datetime.now(timezone.utc))
embed.set_author(name=ctx.author.display_name, icon_url=ctx.author.avatar_url)
embed.set_thumbnail(url=data[0]['thumbnail'])
embed.add_field(name=raw_data['title'], value=f"{len(data)} videos queued.", inline=True)
embed.set_footer(text=raw_data["uploader"] + ' - ' + '{0[0]}m {0[1]}s'.format(divmod(total_duration, 60)))
await ctx.send(embed=embed)
return
embed=discord.Embed(title="Playlist", description="Queued", color=0x30a4fb, timestamp=datetime.now(timezone.utc))
embed.set_author(name=ctx.author.display_name, icon_url=ctx.author.avatar_url)
embed.set_thumbnail(url=data['thumbnail'])
embed.add_field(name=data['title'], value=(data["description"][:72] + (data["description"][72:] and '...')), inline=True)
embed.set_footer(text=data["uploader"] + ' - ' + '{0[0]}m {0[1]}s'.format(divmod(data["duration"], 60)))
await ctx.send(embed=embed)
if download:
source = ytdl.prepare_filename(data)
else:
source = {'webpage_url': data['webpage_url'], 'requester': ctx.author, 'title': data['title'], 'uploader': data['uploader'], 'description': data['description'], 'duration': data['duration'], 'thumbnail': data['thumbnail']}
player.queue.append(source)
return
source = cls(discord.FFmpegPCMAudio(source), data=data, requester=ctx.author)
player.queue.append(source)
#classmethod
async def regather_stream(cls, data, *, loop):
loop = loop or asyncio.get_event_loop()
requester = data['requester']
to_run = partial(ytdl.extract_info, url=data['webpage_url'], download=True)
data = await loop.run_in_executor(None, to_run)
return(cls(discord.FFmpegPCMAudio(data['url']), data=data, requester=requester))
I'm using the rewrite branch of discord.py for the bot.
I'm not sure if I need to provide more details? Please let me know, I really need to get this fixed...
In fact it isn't really a problem with your code (and many people complain of this error).
This is just a possible issue when streaming a video. If you absolutely want to stream it, you have to accept this as a potential issue. Note how (almost) every music bots set limitations for the video/music you want to listen to.
If you need to ensure you do not get this issue, you have to fully download the music. (Which will also make the bot loading longer before playing).
Would you be able to post all your code? i may have a solution for you if i was able to see the whole code.
the solution i would recomend is to download the soong and then delete it after.
You could set your download to true and then add this in your player_loop
try:
# We are no longer playing this song...so, lets delete it!
with YoutubeDL(ytdlopts) as ydl:
info = ydl.extract_info(source.web_url, download=False)
filename = ydl.prepare_filename(info)
try:
if os.path.exists(filename):
os.remove(filename)
else:
pass
except Exception as E:
print(E)
await self.np.delete()
except discord.HTTPException:
pass
bit botched but could be cleaned up, this was the best solution i have found for me.