Why does Google AutoML Sample Python Code Not Run? - google-cloud-automl

The sample Google AutoML prediction python code causes an error on execution. Recommended execution is "python predict.py YOUR_LOCAL_IMAGE_FILE YOUR_PROJECT_ID YOUR_MODEL_ID" Error is:
File "predict.py", line 25
print get_prediction(content, project_id, model_id)
^
SyntaxError: invalid syntax
(Thanks in advance)
Google sample code
import sys
from google.cloud import automl_v1beta1
from google.cloud.automl_v1beta1.proto import service_pb2
def get_prediction(content, project_id, model_id):
prediction_client = automl_v1beta1.PredictionServiceClient()
name = 'projects/{}/locations/us-central1/models/{}'.format(project_id, model_id)
payload = {'image': {'image_bytes': content }}
params = {}
request = prediction_client.predict(name, payload, params)
return request # waits till request is returned
if __name__ == '__main__':
file_path = sys.argv[1]
project_id = sys.argv[2]
model_id = sys.argv[3]
with open(file_path, 'rb') as ff:
content = ff.read()
print get_prediction(content, project_id, model_id)

last line of the code print... should not be in line indent.
import sys
from google.cloud import automl_v1beta1
from google.cloud.automl_v1beta1.proto import service_pb2
def get_prediction(content, project_id, model_id):
prediction_client = automl_v1beta1.PredictionServiceClient()
name = 'projects/{}/locations/us-central1/models/{}'.format(project_id, model_id)
payload = {'image': {'image_bytes': content }}
params = {}
request = prediction_client.predict(name, payload, params)
return request # waits till request is returned
if __name__ == '__main__':
file_path = sys.argv[1]
project_id = sys.argv[2]
model_id = sys.argv[3]
with open(file_path, 'rb') as ff:
content = ff.read()
print get_prediction(content, project_id, model_id)

print (get_prediction(content, project_id, model_id))
from google.cloud import automl_v1beta1
from google.cloud.automl_v1beta1.proto import service_pb2
# 'content' is base-64-encoded image data.
def get_prediction(content, project_id, model_id):
prediction_client = automl_v1beta1.PredictionServiceClient()
name = 'projects/{}/locations/us-central1/models/{}'.format(project_id, model_id)
payload = {'image': {'image_bytes': content }}
params = {}
request = prediction_client.predict(name, payload, params)
return request # waits till request is returned
if __name__ == '__main__':
file_path = sys.argv[1]
project_id = sys.argv[2]
model_id = sys.argv[3]
with open(file_path, 'rb') as ff:
content = ff.read()
print (get_prediction(content, project_id, model_id))

Related

python asyncio class to create functions dynamically and execute with own interval parallel

I am trying to write a class that create methods dynamically that should executed parallel each with it's own duration with asyncio. But I am really new in the topic python asyncio and now on a point where I got stuck and have no idea how to go.
I collect servers with ip, port and command duration from config file and try to create methods in a loop and then gather these methods with async, here is my code:
import asyncio
from datetime import datetime
# from common.config import ConfigConstructor
class RCONserver:
def __init__(self, game: str, server_name=None):
self.game = game
self.server_name = server_name
# self.game_config = ConfigConstructor('cfg/rcon_server.yml')
async def send_rcon_command(self, ip: str, port: str, period: int, cnt: int):
await asyncio.sleep(int(period))
print(str(datetime.now()) + ": " + ip + " " + port)
def get_servers(self):
servers = []
for server in ['game1','game2']:
print(server)
if server[:4] == "game":
# s = self.game_config
# s.fetch_section(server)
# print(s)
servers.append(
self.send_rcon_command('192.168.178.1',
'30000',
300,
3)
return servers
async def main():
obj = RCONserver('game')
await asyncio.gather(*obj.get_servers())
asyncio.run(main())
The code is running but only one time for each server in the yml File.
What do I have to do to run it periodically for the given parameter watch period?
i think this should do the trick with loop and gather i can create functions dynamically and run each with it's own interval parallel:
import asyncio
from datetime import datetime
import random
class RCONServer:
def __init__(self):
self.rcon_loop = asyncio.get_event_loop()
def dt(self):
return datetime.now().strftime("%Y/%m/%d %H:%M:%S")
def build_rcon_functions(self):
rcon_servers = []
for server in ['game1','game2']:
rcon_servers.append(
self.rcon_command(server,
"192.168.0.1",
"30000",
"some_password",
random.randint(5, 10)
)
)
return rcon_servers
async def rcon_command(self, server: str, ip: str, port: str, passwd: str, interval: int):
while True:
await asyncio.sleep(int(interval))
print(self.dt(), ">", server)
async def run_loop(self):
rcon_tasks = self.build_rcon_functions()
try:
print(self.dt(), "> Start")
await asyncio.gather(*rcon_tasks)
self.rcon_loop.run_forever()
except KeyboardInterrupt:
pass
finally:
print(self.dt(), "> End")
self.rcon_loop.close()
obj = RCONServer()
asyncio.run(obj.run_loop())
Some suggestions for optimizing? or some hints how it can be solved better?

How to pass flowfile attribute inside a python class in NiFi?

Goal: Is to add filename field value in CSV using executeScript in Python by getting the flowfile attribute.
Problem: How to pass the flowfile for me to get the attribute to be included the outputstream write
Below sample code is not working in getting attribute filename.
class PyStreamCallback(StreamCallback):
def __init__(self,flowFile):
self.ff = flowFile
pass
def process(self, inputStream, outputStream):
text = IOUtils.toString(inputStream, StandardCharsets.UTF_8)
list_index = 0
textArr = []
newText=''
for t in text.splitlines():
list_index += 1
t= t + '|' + str(list_index) + '|"' + t + '"|' + self.ff.getAttribute('filename')
textArr.append(t)
newText = '\n'.join(textArr)
outputStream.write(bytearray(newText.encode('utf-8')))
flowFile = session.get()
if (flowFile != None):
flowFile = session.write(flowFile,PyStreamCallback())
session.transfer(flowFile, REL_SUCCESS)
Declare a global variable to hold the file name attribute value. Sample code snippet,
from org.apache.commons.io import IOUtils
from java.nio.charset import StandardCharsets
from org.apache.nifi.processor.io import StreamCallback
from org.apache.nifi.processors.script import ExecuteScript
from org.python.core.util.FileUtil import wrap
from io import StringIO
global file_name
# Define a subclass of StreamCallback for use in session.write()
class PyStreamCallback(StreamCallback):
def __init__(self):
pass
def process(self, inputStream, outputStream):
with wrap(inputStream) as f:
lines = f.readlines()
for line in lines:
line = line.strip() + '|' + file_name + '\n'
with wrap(outputStream, 'w') as filehandle:
filehandle.writelines("%s" % line for line in lines)
# end class
flowFile = session.get()
if (flowFile != None):
try
file_name = flowFile.getAttribute('filename')
flowFile = session.write(flowFile, PyStreamCallback())
session.transfer(flowFile, ExecuteScript.REL_SUCCESS)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
excp = str(exc_type) + str(fname)+ str(exc_tb.tb_lineno)
attrMap = {'exception': str(excp)}
flowFile = session.putAllAttributes(flowFile , attrMap)
session.transfer(flowFile , ExecuteScript.REL_FAILURE)

Return a different Content-Type Reponse on error

Given an endpoint with one renderer:
class PNGRenderer(BaseRenderer):
media_type = 'image/png'
format = 'png'
def render(self, data, media_type=None, renderer_context=None):
return data
#renderer_classes((PNGRenderer, ))
def some_endpoint(request, format=None):
filename = 'foo.png'
if not os.path.exists(filename):
raise NotFound("File not found")
else:
with open(filename, 'rb') as f:
data = f.read()
return response(data)
I would like to return application/json in case foo.png is not found. In real life filename not constantly "foo.png" but dynamic, obviously.
How can I return json such that Content-Type is properly set to application/json in the response even though format='png' was provided to the endpoint.
While writing the question, I found the answer for changing the renderer: request.accepted_renderer = JSONRenderer().
In particular:
#renderer_classes((PNGRenderer, ))
def some_endpoint(request, format=None):
filename = 'foo.png'
if not os.path.exists(filename):
request.accepted_renderer = JSONRenderer()
raise NotFound("File not found")
else:
with open(filename, 'rb') as f:
data = f.read()
return response(data)
I am not sure if this a a proper way to deal with exceptions for binary Content-Type's.

Filter tweets in tweepy.StreamListener on_data method

Understand from many articles on stack overflow that the filter method in the tweepy.streaming.stream class uses a logical OR for track and location arguements
so the below will return either tweets from location=USA or with a word ""
streamObj = tweepy.streaming.Stream(oauthObject
,EchoStreamListener(api=apiInstance,
dump_json=args.json,
numtweets=args.numtweets))
keyWordList = ['panthers','falcon']
GEOBOX_USA = [-125,25.1,-60.5,49.1]
streamObj.filter(locations=GEOBOX_USA, track=keyWordList, languages=['en'])
This solution (How to add a location filter to tweepy module
) to check keywords in the on_status method works great, but if i needed to store the entire json variable i think i would have to use the on_data
so changed the on_data (as shown in code below), but get an error:
File "/Library/Python/2.7/site-packages/tweepy/streaming.py", line 294, in _run
raise exception
KeyError: 'text'
-- coding: utf-8 --
from types import *
import tweepy
import json
import argparse
import io
class EchoStreamListener(tweepy.StreamListener):
def __init__(self, api, dump_json=False, numtweets=0):
self.api = api
self.dump_json = dump_json
self.count = 0
self.limit = int(numtweets)
super(tweepy.StreamListener, self).__init__()
# def on_status(self, status):
# if any(keyWord in status.text.lower() for keyWord in keyWordList):
# print status.text
#
# self.count+=1
# return False if self.count == self.limit else True
# else:
# return True # Don't kill the stream
def on_data(self, tweet):
tweet_data = json.loads(tweet) # This allows the JSON data be used as a normal dictionary:
if any(keyWord in tweet_data['text'] for keyWord in keyWordList):
if self.dump_json:
print json.dumps(tweet_data)
saveFile.write(unicode(tweet) + "\n")
self.count+=1
return False if self.count == self.limit else True
else:
print tweet_data['created_at','name','text'].encode("utf-8").rstrip()
def on_error(self, status_code):
print >> sys.stderr, 'Encountered error with status code:', status_code
return True
def get_parser():
parser = argparse.ArgumentParser(add_help=True)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'-j', '--json',
action='store_true',
help='dump each tweet as a json string'
)
group.add_argument(
'-t', '--text',
dest='json',
action='store_false',
help='dump each tweet\'s text'
)
parser.add_argument(
'-n', '--numtweets',
metavar='numtweets',
help='set number of tweets to retrieve'
)
return parser
if __name__ == '__main__':
oauthObject = tweepy.OAuthHandler(myconsumer_key, myconsumer_secret)
oauthObject.set_access_token(myaccess_key,myaccess_secret)
apiInstance = tweepy.API(oauthObject)
parser = get_parser()
args = parser.parse_args()
streamObj = tweepy.streaming.Stream(oauthObject
,EchoStreamListener(api=apiInstance,
dump_json=args.json,
numtweets=args.numtweets))
keyWordList = ['panthers','falcon']
GEOBOX_USA = [-125,25.1,-60.5,49.1]
saveFile = io.open('/Users/deepaktanna/raw_tweets.json', 'w', encoding='utf-8')
streamObj.filter(locations=GEOBOX_USA, languages=['en'])
saveFile.close()

In Pyside, how can I handle close call when clicking a close button?

What I have to handle a close doesn't seem to work. The error I get when running this is:
Error: AttributeError: file line 82: 'Ui_Dialog' object has no attribute 'close'
from PySide import QtCore, QtGui, QtUiTools
import maya.cmds as cmds
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(400, 300)
self.label = QtGui.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(60, 20, 131, 16))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName(_fromUtf8("label"))
............
................
..................
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
self.swapRefBtn.clicked.connect(self.swapRefBtn_clicked)
self.closeBtn.clicked.connect(self.close()) <---- THIS HERE WON'T WORK
def retranslateUi(self, Dialog):
............
................
..................
def swapRefBtn_clicked(self):
pass
if __name__ == "__main__":
import sys
app = QtGui.QApplication.instance()
if app is None:
app = QtGui.QApplication(sys.argv)
Dialog = QtGui.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
app.exec_()
Perhaps you want to connect the close button to QDialog.accept or QDialog.reject slot.
As for how to handle them:
Example
import sys
import PyQt4.QtGui as qg
class Dialog(qg.QDialog):
def __init__(self):
super().__init__()
self.setup_widgets()
def setup_widgets(self):
self.ok = qg.QPushButton('OK')
self.ok.clicked.connect(self.accept)
self.cancel = qg.QPushButton('Cancel')
self.cancel.clicked.connect(self.reject)
layout = qg.QHBoxLayout()
layout.addWidget(self.ok)
layout.addWidget(self.cancel)
self.setLayout(layout)
def accept(self):
# Your logic here
super().accept()
def reject(self):
response = qg.QMessageBox.warning(self, 'Warning', 'Cancel?',
qg.QMessageBox.Yes | qg.QMessageBox.No)
if response == qg.QMessageBox.Yes:
super().reject()
if __name__ == '__main__':
app = qg.QApplication(sys.argv)
dialog = Dialog()
dialog.show()
sys.exit(app.exec_())
See also: QDialog.done()

Resources