Hi I am getting Unable to connect to localhost on port 7687 - is the server running? error whenever my python code executing
import os
import json
from urllib.parse import urlparse, urlunparse
from django.shortcuts import render
# Create your views here.
from py2neo import Graph, authenticate
from bottle import get,run,request,response,static_file
from py2neo.packages import neo4j
url = urlparse(os.environ.get("GRAPHENEDB_GOLD_URL"))
url_without_auth = urlunparse((url.scheme, ("{0}:{1}").format(url.hostname, url.port), '', None, None, None))
user = url.username
password = url.password
authenticate(url_without_auth,user, password)
graph = Graph(url_without_auth, bolt = False)
#graph = Graph(password='vjsj56#vb')
#get("/")
def get_index():
return static_file("index.html", root="static")
#get("/graph")
def get_graph(self):
print("i was here" )
print("graph start")
results = graph.run(
"MATCH (m:Movie)<-[:ACTED_IN]-(a:Person) "
"RETURN m.title as movie, collect(a.name) as cast "
"LIMIT {limit}", {"limit": 10})
print("graph run the run")
nodes = []
rels = []
i = 0
for movie, cast in results:
#print("i am here")
nodes.append({"title": movie, "label": "movie"})
target = i
i += 1
for name in cast:
print(name)
actor = {"title": name, "label": "actor"}
try:
source = nodes.index(actor)
except ValueError:
nodes.append(actor)
source = i
i += 1
rels.append({"source": source, "target": target})
return {"nodes": nodes, "links": rels}
#get("/search")
def get_search():
try:
q = request.query["q"]
except KeyError:
return []
else:
results = graph.run(
"MATCH (movie:Movie) "
"WHERE movie.title =~ {title} "
"RETURN movie", {"title": "(?i).*" + q + ".*"})
response.content_type = "application/json"
return json.dumps([{"movie": dict(row["movie"])} for row in results])
#get("/movie/<title>")
def get_movie(title):
results = graph.run(
"MATCH (movie:Movie {title:{title}}) "
"OPTIONAL MATCH (movie)<-[r]-(person:Person) "
"RETURN movie.title as title,"
"collect([person.name, head(split(lower(type(r)),'_')), r.roles]) as cast "
"LIMIT 1", {"title": title})
row = results.next()
return {"title": row["title"],
"cast": [dict(zip(("name", "job", "role"), member)) for member in row["cast"]]}
this code is running fine on my local syatem but giving connection error when deployed on heroku and graphenedb
exception location: /app/.heroku/python/lib/python3.6/site-packages/py2neo/packages/neo4j/v1/connection.py in connect, line 387
I'm Juanjo, from GrapheneDB.
At first glance the code looks fine and the error code points to a wrong URL. It might be a problem with the environment variable. Can you please check your GRAPHENEDB_GOLD_URL variable?
You can do it like this:
$ heroku config:get GRAPHENEDB_GOLD_URL
It should be something like:
http://<user>:<pass>#XXX.graphenedb.com:24789/db/data
(please don't share your URL here)
If your variable is empty, please read more here on retrieving GrapheneDB environment variables.
If that's not your issue, or the problem persists, could you please contact us via the support link on our admin panel? Heroku team will forward the support ticket to us and we'll have all the information related to your database injected into the ticket.
Thanks,
Juanjo
Related
I want to deploy this code in lambda and run it every hour to generate a CSV. How can I do that and what are the steps for it?
#!/usr/bin/env python3
import argparse
import boto3
import datetime
import re
import csv
import random
import pandas as pd
now = datetime.datetime.utcnow()
start = '2022-12-01'
end = '2022-12-20'
str = ' 00:00'
# to use a specific profile e.g. 'dev'
session = boto3.session.Session(profile_name='dev')
cd = session.client('ce', 'eu-west-2')
results = []
token = None
while True:
if token:
kwargs = {'NextPageToken': token}
else:
kwargs = {}
data = cd.get_cost_and_usage(TimePeriod={'Start': start, 'End': end}, Granularity='MONTHLY', Metrics=['UnblendedCost'], Filter={ "And": [ {"Dimensions": {"Key": "SERVICE","Values": ["Amazon Relational Database Service", "Amazon Elastic Compute Cloud - Compute"]}}, {"Tags": {"Key": "Name", "Values": ["qa-mssql"]}}, ]}, GroupBy=[{'Type': 'TAG', 'Key': 'app'}, {'Type': 'TAG', 'Key': 'Name'}], **kwargs)
results += data['ResultsByTime']
token = data.get('NextPageToken')
if not token:
break
def data():
print(','.join(['date', 'teams', 'resource_names', 'Amounts','resource_type' ]))
for result_by_time in results:
for group in result_by_time['Groups']:
amount = group['Metrics']['UnblendedCost']['Amount']
resource_type = 'mssql'
value = print(result_by_time['TimePeriod']['End'].__add__(str),',',','.join(group['Keys']).replace ("app$", "").replace("Name$", "") , ',', amount, ',', resource_type,)
return value
data()
I am pretty new to lambda and want to know the basics step by step approach to it.
Deploying your code to lambda, package it as zip file and deploy it directly to function from console or upload to s3 and then refer the path in lambda.
If your code doesn't require any dependencies which are not part of lambda environment, you can directly edit the code in console and save it.
Add trigger to the lambda with "CloudWatchEvent Schedule"
Refer this aws doc1 and this doc2
I'm trying to create a cog for my Discord bot that scrapes Indeed and returns info on job postings (position, company, location, etc). My bot is hosted on Heroku, which is where the issues start. I've tested my web scraper by itself and when implemented as a cog for my Discord bot locally. It works both times. However, when I tried to deploy it on Heroku, the cog stopped working.
I read that this was because cloud-hosting services have blacklists or something for web scraping apps and functions. So I tried to use rq as suggested in this post:
https://devcenter.heroku.com/articles/python-rq
I did all the steps, added an additional worker, a worker.py file, and installed the Redis To Go addon. However, when I try to use the following, I receive nothing back:
url = get_url(job_title, location)
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
# soup.find() returns None
I'm sure I just implemented something wrong, but can someone help me please? The full code is below:
import discord
from discord.ext import commands
import random
import requests
import time
from bs4 import BeautifulSoup
from rq import Queue
from worker import conn
ret = []
def get_url(position, location):
'''Generate url from position and location'''
template = 'https://www.indeed.com/jobs?q={}&l={}'
position = position.replace(" ", "+")
location = location.replace(" ", "+")
url = template.format(position, location)
return url
def get_jobs(job_title, location):
'''Max returned number of jobs is 15 per page.'''
global ret
url = get_url(job_title, location)
response = requests.get(url)
print(f"Responses: {response}")
### This returns <Response [200]>
soup = BeautifulSoup(response.text, "html.parser")
job_names = []
for job_name in soup.find_all("h2", class_="jobTitle"):
job_names.append(job_name.get_text())
### Each one just returns an empty list []
companies = []
for company in soup.find_all("span", class_="companyName"):
companies.append(company.get_text())
locations = []
for location in soup.find_all("div", class_="companyLocation"):
locations.append(location.get_text())
salaries = []
for salary in soup.find_all("div", class_="attribute_snippet"):
if salary.get_text().startswith("$"):
salaries.append(salary.get_text())
else:
salaries.append("Unknown")
links = []
for link in soup.find_all("a", class_=lambda value: value and value.startswith("tapItem fs-unmask result"), href=True):
link = link["href"]
link = "https://indeed.com" + link
links.append(link)
ret = [job_names, companies, locations, salaries, links]
print(ret)
### This returns [[], [], [], [], []]
class JobScraper(commands.Cog):
def __init__(self, client): # References whatever is passed through the client from discord
self.client = client
self.q = Queue(connection=conn)
#commands.command(aliases=["job", "find_job", "find_jobs", "get_job", "get_jobs"])
async def jobs(self, ctx, *, query):
'''Scrapes Indeed.com for jobs and returns them.
The input format should be "eve jobs [job title], [job location], [num returned]
e.g. eve jobs ai researcher, san francisco, 3'''
key_terms = query.split(",")
key_terms = [term.strip() for term in key_terms]
if len(key_terms) == 3:
num_jobs = int(key_terms[2])
else:
num_jobs = 15
# ret = get_jobs(key_terms[0], key_terms[1])
job = self.q.enqueue(get_jobs, key_terms[0], key_terms[1])
await ctx.send("Here is what I found:")
for i in range(num_jobs):
await ctx.send("```" +
f"\nTitle: {ret[0][i]}" +
f"\nCompany: {ret[1][i]}" +
f"\nLocation: {ret[2][i]}" +
f"\nSalary: {ret[3][i]}" +
f"\nLink: {ret[4][i]}" +
"\n```")
def setup(client):
client.add_cog(JobScraper(client))
I have an AWS IOT button set up and working with IFTTT and SmartLife to turn a device on/off. Currently I have it set up to use single and double click to turn on and off, because IFTTT doesn't seem to have a toggle app (at least, not for use with SmartLife.)
How can I make it a toggle, so I can use a single click to alternately turn on and off?
Looking for a free solution.
There is a solution using apilio, but it's not a free solution: Create a toggle between two actions in IFTTT .
For a free solution, use DynamoDB from Lambda to save the button state, and invert the state each invocation. It either sends "IotButton2" or "IotButton2Off" to IFTTT.
'''
Example Lambda IOT button IFTTT toggle
Test payload:
{
"serialNumber": "GXXXXXXXXXXXXXXXXX",
"batteryVoltage": "990mV",
"clickType": "SINGLE" # or "DOUBLE" or "LONG"
}
'''
from __future__ import print_function
import boto3
import json
import logging
import urllib2
import boto3
from botocore.exceptions import ClientError
logger = logging.getLogger()
logger.setLevel(logging.INFO)
maker_key = 'xxxxxxxxxxxxxxxxx' # change this to your Maker key
def get_button_state(db, name):
table = db.Table('toggles')
try:
response = table.get_item(Key={'name': name})
except ClientError as e:
print(e.response['Error']['Message'])
else:
# response['item'] == {u'name': u'IotButton2', u'on': False}
if 'Item' in response:
return response['Item']['on']
return False
def set_button_state(db, name, state):
table = db.Table('toggles')
try:
response = table.put_item(Item={'name': name, 'on': state})
except ClientError as e:
print(e.response['Error']['Message'])
def lambda_handler(event, context):
logger.info('Received event: ' + json.dumps(event))
db = boto3.resource('dynamodb')
maker_event = "IotButton2"
# maker_event += ":" + event["clickType"]
state = get_button_state(db, maker_event)
logger.info(maker_event + " state = " + ("on" if state else "off"))
response = set_button_state(db, maker_event, not state)
if state:
maker_event += "Off"
logger.info('Maker event: ' + maker_event)
url = 'https://maker.ifttt.com/trigger/%s/with/key/%s' % (maker_event, maker_key)
f = urllib2.urlopen(url)
response = f.read()
f.close()
logger.info('"' + maker_event + '" event has been sent to IFTTT Maker channel')
return response
The above version responds to any type of click (single, double, long.) You can control 3 different switches by uncommenting this line:
maker_event += ":" + event["clickType"]
which would translate to these IFTTT events:
IotButton2:SINGLE
IotButton2:SINGLEOff
IotButton2:DOUBLE
IotButton2:DOUBLEOff
IotButton2:LONG
IotButton2:LONGOff
Create the DynamoDB table. For my example, the table name is "toggles" with one key field "name" and one boolean field "on". The table has to exist, but if the entry does not, it gets created the first time you click the button or test the Lambda function.
You have to update the Lambda function role to include your DynamoDb permissions. Add the following lines to the policy:
{
"Effect": "Allow",
"Action": [
"dynamodb:GetItem",
"dynamodb:PutItem"
],
"Resource": [
"arn:aws:dynamodb:us-east-1:xxxxxxxx:table/toggles"
]
}
(Get the ARN from AWS console DynamoDB -> table -> toggles -> Additional information.)
You can also edit the above function to handle multiple buttons, by checking the serial number.
I tried one sample program for getting an email message in outlook account using IMAP. In this account, I have 20 folders its getting all email messages except these folders (contact, calendar, task) not getting data its throwing server error. How to fix this error.
Code
import imaplib
import pprint
import email
import base64
import json
import re
import os
import fileinput
imap_host = 'outlook.office365.com'
imap_user = 'XXXXXXXXXXX'
imap_pass = 'XXXXXXXXXXXXX'
count = 0
file_path = 'geek.txt'
# connect to host using SSL
imap = imaplib.IMAP4_SSL(imap_host,993)
# login to server
l = imap.login(imap_user, imap_pass)
# Get Flags,mailbox_name,delimiter using regex
list_response_pattern = re.compile(r'\((?P<flags>.*?)\) "(?P<delimiter>.*)" (?P<name>.*)')
# Get List of Sync folders
list_data = imap.list()
# Check Local Storage is empty Sync All Folders Details.
print(os.stat(file_path).st_size)
if os.stat(file_path).st_size == 0:
global day
# Iterate folders in Sync folder
for i in list_data[1]:
# Get Folder name
sample = re.findall('"\/"(.*)',i.decode("utf-8"))
# Get Message_ids
try:
print("message")
print(sample[0].lstrip().strip('"'))
data = imap.select(sample[0].lstrip())
search_resp, search_data = imap.search( None, "ALL" )
match = list_response_pattern.match(i.decode("utf-8"))
flags, delimiter, mailbox_name = match.groups()
print("1")
print(mailbox_name)
mailbox_name = mailbox_name.strip('"')
print(mailbox_name)
except Exception as e:
print(e)
continue
# Get Current Status of Folder
current_status = imap.status(
'"{}"'.format(mailbox_name),
'(MESSAGES RECENT UIDNEXT UIDVALIDITY UNSEEN)',
)
print(current_status)
# Get message using UID and Message_id
msg_ids = search_data[ 0 ].split()
print("total count: ",len(msg_ids))
for i in msg_ids:
print("$$$$$$$$$$$$$$$$$$$$")
print("Message Ids: ", i)
count = count + 1
fetch_resp, fetch_UID = imap.fetch( i, 'UID' )
print("Fetch UID: ", fetch_UID)
day = bytes(str(fetch_UID[0].split()[2]).split("'")[1].split(')')[0],'utf-8')
print("ID: ",day)
fetch_resp, fetch_mdg = imap.uid('fetch', day, '(RFC822)')
print(fetch_mdg)
print("$$$$$$$$$$$$$$$$$$$$$")
email_msg = fetch_mdg[0][1]
if email_msg and isinstance(email_msg, str):
try:
email_msg = email.message_from_string(email_msg)
except :
email_msg = None
elif email_msg and isinstance(email_msg, bytes):
try:
email_msg = email.message_from_bytes(email_msg)
except:
email_msg = None
print("*********************************")
print("Count: ",count)
print("UID: ",day)
print(mailbox_name)
print(email_msg['To'])
print(email_msg['From'])
print(email_msg['subject'])
print(email_msg)
print("*********************************")
# Store Folder details in File
status_details = current_status[1][0].decode("utf-8")
status_details = status_details.split('(')[1].split(')')[0].split(' ')
print(status_details)
if len(msg_ids) == 0:
json1 = json.dumps({'total_count':int(status_details[1]),'UID':0,'UIDNext':int(status_details[5]),'UIDValidity':int(status_details[7]), 'Folder name':mailbox_name})
else:
json1 = json.dumps({'total_count':int(status_details[1]),'UID':int(day),'UIDNext':int(status_details[5]),'UIDValidity':int(status_details[7]), 'Folder name':mailbox_name})
file = open(file_path,'a')
file.write(json1)
file.write("\n")
print('hi')
Response
$$$$$$$$$$$$$$$$$$$$
Message Ids: b'3'
Fetch UID: [b'3 (UID 11)']
ID: b'11'
[(b'3 (RFC822 {757}', b'MIME-Version: 1.0\r\nContent-Type: text/plain; charset="us-ascii"\r\nFrom: Microsoft Exchange Server\r\nTo: "\r\nSubject: Retrieval using the IMAP4 protocol failed for the following message:\r\n 11\r\nContent-Transfer-Encoding: quoted-printable\r\n\r\nThe server couldn\'t retrieve the following message:\r\n\r\nSubject: "Test email Sync 3"\r\nFrom: "Imap Testing" ("/O=3DEXCHANGELABS/OU=3DEXCHANGE ADMINISTRATIVE GROUP=\r\n (FYDIBOHF23SPDLT)/CN=3DRECIPIENTS/CN=3DEBF2483D9A0145A59A48B829B12A45E4-MA=\r\nILBOX1")\r\nSent date: 5/6/2020 2:02:59 AM\r\n\r\nThe message hasn\'t been deleted. You might be able to view it using either =\r\nOutlook or Outlook Web App. You can also contact the sender to find out wha=\r\nt the message says.=\r\n'), b' UID 11 FLAGS (\\Seen))']
$$$$$$$$$$$$$$$$$$$$$
Server Error
Subject: Retrieval using the IMAP4 protocol failed for the following message:
7
Content-Transfer-Encoding: quoted-printable
The server couldn't retrieve the following message:
Subject: "Testing"
Sent date: 5/6/2020 2:01:54 AM
The message hasn't been deleted. You might be able to view it using either =
Outlook or Outlook Web App. You can also contact the sender to find out wha=
t the message says.=
I have around 20 folders I iterate one by one get current status of folder and stored in sample file. Its successfully working.but I tried to print email messages some folders (contact,calender,task) its showing this response.
Does AppHub let us see reviews of our apps from all marketplaces at once? As I didn't find any, o took some time writing some code to print them all in a file, so i won't waste my time looking for them in every single language.
I'd appreciate any better solution. In the worst case, I'm glad to share the code with anyone who finds it usefull.
It uses BeautifulSoup.
The only parametter is the id of the app, like this:
wp7reviews.py 62289160-6970-4674-85a0-aef3dbe3f93d
Here is the code
import sys
import getopt
from urllib2 import URLError
from urllib2 import HTTPError
import urllib2
from BeautifulSoup import BeautifulStoneSoup
opts, extraparams = getopt.getopt(sys.argv[1:], '')
# starts at the second element of argv since the first one is the script name
# extraparms are extra arguments passed after all option/keywords are assigned
# opts is a list containing the pair "option"/"value"
#print 'Opts:',opts
#print 'Extra parameters:',extraparams
try:
appid = extraparams[0]
except:
#Awsome Linkit appid as default appid
appid="62289160-6970-4674-85a0-aef3dbe3f93d"
allreviewsFILE = open("allreviews.txt", "w")
def output(text):
allreviewsFILE.write(text)
#print text,
def outputln(text):
allreviewsFILE.write(text+'\n')
#print text
def geturl(lang):
return "http://catalog.zune.net/v3.2/"+lang+"/apps/"+appid
try:
request = urllib2.Request(geturl("en-us"))
fd = urllib2.urlopen(request)
content = fd.read()
fd.close()
soup = BeautifulStoneSoup(content)
try:
outputln("App title: "+soup.findAll("a:title")[0].string)
outputln("");
except:
print "Failed to get App Title"
langs = ["en-us", "en-gb", "de-de",
"fr-fr", "es-es", "it-it",
"en-au", "de-at", "fr-be",
"fr-ca", "en-ca", "en-hk",
"en-in", "en-ie", "es-mx",
"en-nz", "en-sg", "de-ch",
"fr-ch", "zh-hk", "zh-cn",
"en-hk"]
outputln("Here we got reviews from each marketplace")
for lang in langs:
request = urllib2.Request(geturl(lang)+"/reviews")
fd = urllib2.urlopen(request)
print "Fetching "+lang+"...",
content = fd.read()
fd.close()
print "OK"
soup = BeautifulStoneSoup(content)
#print soup.prettify()
contents = soup.findAll("a:content")
ratings = soup.findAll("userrating")
l = len(contents)
if l > 0:
outputln("----------- "+lang+" ---------------------------------------------------")
outputln("")
for i in range(0, l):
output(ratings[i].string+"/10 - ")
if len(contents[i].contents) > 0:
try:
outputln(contents[i].contents[0])
except:
outputln("*** Unknown chars ***")
else:
outputln("Rating only")
outputln("")
except HTTPError, e:
print("Error during request!\n")
print("Cod.: ", e.code)
except URLError, e:
print("Invalid URL!\n")
print("Message: ", e.reason)
There already is a site that gives you this information. Take a look at http://wp7reviews.tomverhoeff.com/
There is also a free WP7 app called AppTracker which allows you to track reviews from different regions, as well as translate them into your native language