I try to connect remotely to mongodb server from pycharm(with RDP).
this is thefunction that i run:
cluster = MongoClient("mongodb://admin:Passw0rd!#147.234.32.246:27017/NEG")
db = cluster["NEG"]
for word in Setting.dictionary_global.keys():
if word in db.list_collection_names():
collection = db[word]
for file in Setting.dictionary_global[word].keys():
if collection.find({"url":Setting.dictionary_global[word][file].url}):
continue
num_of_appearance = len(Setting.dictionary_global[word][file].indexes.get(word))
post = {"url": file, "title": Setting.dictionary_global[word][file].title,
"description": Setting.dictionary_global[word][file].description,"word in page": Setting.dictionary_global[word][file].indexes,"appearance": num_of_appearance, "date modified": Setting.dictionary_global[word][file].time}
collection.insert_one(post)
else:
collection = db.create_collection(word)
for file in Setting.dictionary_global[word].keys():
#print(Setting.dictionary_global)
num_of_appearance = len(Setting.dictionary_global[word][file].indexes.get(word))
post = {"url": file, "title": Setting.dictionary_global[word][file].title,
"description": Setting.dictionary_global[word][file].description,"word in page": Setting.dictionary_global[word][file].indexes, "appearance": num_of_appearance, "date modified":Setting.dictionary_global[word][file].time}
collection.insert_one(post)
and i get this error:
'''
Traceback (most recent call last):
File "C:/Users/edend/PycharmProjects/pythonProject11/main.py", line 118, in
crawler.start()
File "C:/Users/edend/PycharmProjects/pythonProject11/main.py", line 110, in start
insertDB()
File "C:\Users\edend\PycharmProjects\pythonProject11\DB.py", line 10, in insertDB
if word in db.list_collection_names():
File "C:\Users\edend\PycharmProjects\pythonProject11\venv\lib\site-packages\pymongo\database.py", line 863, in list_collection_names
for result in self.list_collections(session=session, **kwargs)]
File "C:\Users\edend\PycharmProjects\pythonProject11\venv\lib\site-packages\pymongo\database.py", line 825, in list_collections
return self.__client._retryable_read(
File "C:\Users\edend\PycharmProjects\pythonProject11\venv\lib\site-packages\pymongo\mongo_client.py", line 1460, in _retryable_read
server = self._select_server(
File "C:\Users\edend\PycharmProjects\pythonProject11\venv\lib\site-packages\pymongo\mongo_client.py", line 1278, in _select_server
server = topology.select_server(server_selector)
File "C:\Users\edend\PycharmProjects\pythonProject11\venv\lib\site-packages\pymongo\topology.py", line 241, in select_server
return random.choice(self.select_servers(selector,
File "C:\Users\edend\PycharmProjects\pythonProject11\venv\lib\site-packages\pymongo\topology.py", line 199, in select_servers
server_descriptions = self._select_servers_loop(
File "C:\Users\edend\PycharmProjects\pythonProject11\venv\lib\site-packages\pymongo\topology.py", line 215, in _select_servers_loop
raise ServerSelectionTimeoutError(
pymongo.errors.ServerSelectionTimeoutError: 147.234.32.246:27017: timed out, Timeout: 30s, Topology Description: <TopologyDescription id: 5ff3d15a2dcaa1e4fb3db4cd, topology_type: Single, servers: [<ServerDescription ('147.234.32.246', 27017) server_type: Unknown, rtt: None, error=NetworkTimeout('147.234.32.246:27017: timed out')>]>
'''
please help me im stuck and iv'e try everything
thank you in advance!!
Common causes:
MongoDB server is not running
MongoDB server is running on a different port
No connectivity between client and server (can you ping)
mongod.conf is configure to only allow local connections by default (set bind_ip_all?)
Related
Im running an ansible playbook from a debian machine that connects with 3 hosts to deploy some features.
Since upgrading the ansible to 2.9.2 from 2.8 , I've repeatedly started to get errors from python libraries such as:
res = self.send_message(xmltodict.unparse(req))
File "/usr/local/lib/python3.7/dist-packages/winrm/protocol.py", line 243, in send_message
resp = self.transport.send_message(message)
File "/usr/local/lib/python3.7/dist-packages/winrm/transport.py", line 323, in send_message
response = self._send_message_request(prepared_request, message)
File "/usr/local/lib/python3.7/dist-packages/winrm/transport.py", line 328, in _send_message_request
response = self.session.send(prepared_request, timeout=self.read_timeout_sec)
File "/usr/local/lib/python3.7/dist-packages/requests/sessions.py", line 646, in send
r = adapter.send(request, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/requests/adapters.py", line 449, in send
timeout=timeout
File "/usr/local/lib/python3.7/dist-packages/urllib3/connectionpool.py", line 672, in urlopen
chunked=chunked,
File "/usr/local/lib/python3.7/dist-packages/urllib3/connectionpool.py", line 447, in _make_request
exc_info=True,
Message: 'Failed to parse headers (url=%s): %s'
Arguments: ('http://{{MY.DEST.IP}}:5985/wsman', HeaderParsingError("[StartBoundaryNotFoundDefect(), MultipartInvariantViolationDefect()], unparsed data: ''"))
MY.DEST.IP = my public IP
Port 5985 is open in the destination and i could connect with telnet.
Is there any issue known about the newest Ansible version?
Thanks, any help would be appreciated
Lost connection to MySQL server during query, how can I fix this? Better fix this in my program.
import pymysql
connection = pymysql.connect(host='***',
user='***',
password='***',
db='***',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor
)
with connection.cursor() as cursor:
sql = "SELECT MAX(group_id) FROM topic_duplicate_check"
cursor.execute(sql) # Exception
r = cursor.fetchone()
max_gid = None
try:
max_gid = r['MAX(group_id)']
except:
pass
print(max_gid)
C:\ProgramData\Anaconda3\python.exe F:/group_topics/main.py
Traceback (most recent call last):
File "F:/group_topics/main.py", line 41, in
cursor.execute(sql)
File "C:\ProgramData\Anaconda3\lib\site-packages\pymysql\cursors.py", line 166, in execute
result = self._query(query)
File "C:\ProgramData\Anaconda3\lib\site-packages\pymysql\cursors.py", line 322, in _query
conn.query(q)
File "C:\ProgramData\Anaconda3\lib\site-packages\pymysql\connections.py", line 856, in query
self._affected_rows = self._read_query_result(unbuffered=unbuffered)
File "C:\ProgramData\Anaconda3\lib\site-packages\pymysql\connections.py", line 1057, in _read_query_result
result.read()
File "C:\ProgramData\Anaconda3\lib\site-packages\pymysql\connections.py", line 1340, in read
first_packet = self.connection._read_packet()
File "C:\ProgramData\Anaconda3\lib\site-packages\pymysql\connections.py", line 987, in _read_packet
packet_header = self._read_bytes(4)
File "C:\ProgramData\Anaconda3\lib\site-packages\pymysql\connections.py", line 1033, in _read_bytes
CR.CR_SERVER_LOST, "Lost connection to MySQL server during query")
pymysql.err.OperationalError: (2013, 'Lost connection to MySQL server during query')
Process finished with exit code 1
I got error when i perform ajax request. Without authentication lines user = authenticate(username = user_username, password = user_password) in the views.py, success function is called. And if i add, error function is called with Errno 10053.
I am using MySql in Wamp. Why it is happening ?
views.py
class LoginVerify(View):
print('login')
def post(self,request,*args,**kwargs):
if request.is_ajax():
print("post called")
user_email = request.POST.get('email',False)
user_password = request.POST.get('pswd',False)
print(user_email)
try:
user_username = User.objects.get(email=user_email).username
user = authenticate(username = user_username, password = user_password)
except:
print("error occured")
return HttpResponse("response form server")
def get(self, request,*args,**kwargs):
print("get method")
return render(request,'feeds/feeds_home.html')
ajax request:
$(document).ready(function(){
$("#submit").on("click",function(){
var $email = $("#signin-email").val();
var $pswd = $("#signin-password").val();
alert($pswd);
$.ajax({
url : '{% url "feeds:login_view" %}',
type: "POST",
data: {csrfmiddlewaretoken :"{{ csrf_token }}", pswd : $pswd, email: $email},
success: function(data){
location.reload();
},
error: function(){
alert("fails");
}
});
});
Tracebrack
post called
vivek.ananthan.m.s#gmail.com
[19/Apr/2015 11:10:22] "POST / HTTP/1.1" 200 12
Traceback (most recent call last):
File "C:\Python27\lib\wsgiref\handlers.py", line 86, in run
self.finish_response()
File "C:\Python27\lib\wsgiref\handlers.py", line 127, in finish_response
self.write(data)
File "C:\Python27\lib\wsgiref\handlers.py", line 210, in write
self.send_headers()
File "C:\Python27\lib\wsgiref\handlers.py", line 268, in send_headers
self.send_preamble()
File "C:\Python27\lib\wsgiref\handlers.py", line 192, in send_preamble
'Date: %s\r\n' % format_date_time(time.time())
File "C:\Python27\lib\socket.py", line 324, in write
self.flush()
File "C:\Python27\lib\socket.py", line 303, in flush
self._sock.sendall(view[write_offset:write_offset+buffer_size])
error: [Errno 10053] An established connection was aborted by the software in your host machine
Traceback (most recent call last):
File "C:\Python27\lib\SocketServer.py", line 582, in process_request_thread
self.finish_request(request, client_address)
File "C:\Python27\lib\SocketServer.py", line 323, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "C:\Python27\lib\site-packages\django-1.7-py2.7.egg\django\core\servers\basehttp.py", line 129, in __init__
super(WSGIRequestHandler, self).__init__(*args, **kwargs)
File "C:\Python27\lib\SocketServer.py", line 640, in __init__
self.finish()
File "C:\Python27\lib\SocketServer.py", line 693, in finish
self.wfile.flush()
File "C:\Python27\lib\socket.py", line 303, in flush
self._sock.sendall(view[write_offset:write_offset+buffer_size])
error: [Errno 10053] An established connection was aborted by the software in your host machine
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 52490)
Please explain where i made the mistake and why it is happening.
Thanks in advance !!
Reference: https://stackoverflow.com/a/17854758/3940406
From the Windows Sockets Error Codes list:
WSAECONNABORTED 10053 Software caused connection abort. An established
connection was aborted by the software in your host computer, possibly
due to a data transmission time-out or protocol error.
There was a timeout or other network-level error. This is your operating system closing the socket, nothing to do with Python, django or Flask, really.
It could be the remote browser stopped responding, the network connection died, or a firewall closed the connection because it was open too long, or any other number of reasons.
I came by the question when I was trying to research a problem with regards to running a mysql code using the pymysql as the python sql client. I happen to have this same issue. Renaming the config setting for mysql and then restarting your system or running mysql server. Works to fix this issue
Im using django-haystack with elasticsearch but there is a problem with indexing. When rebuilding my index python manage.py rebuild_index following error is raised:
Traceback (most recent call last):
File "/home/palo/.virtualenvs/toro/local/lib/python2.7/site-packages/haystack/management/commands/update_index.py", line 210, in handle_label
self.update_backend(label, using)
File "/home/palo/.virtualenvs/toro/local/lib/python2.7/site-packages/haystack/management/commands/update_index.py", line 256, in update_backend
do_update(backend, index, qs, start, end, total, self.verbosity)
File "/home/palo/.virtualenvs/toro/local/lib/python2.7/site-packages/haystack/management/commands/update_index.py", line 78, in do_update
backend.update(index, current_qs)
File "/home/palo/.virtualenvs/toro/local/lib/python2.7/site-packages/haystack/backends/elasticsearch_backend.py", line 177, in update
self.conn.bulk_index(self.index_name, 'modelresult', prepped_docs, id_field=ID)
File "/home/palo/.virtualenvs/toro/src/pyelasticsearch/pyelasticsearch/client.py", line 95, in decorate
return func(*args, query_params=query_params, **kwargs)
File "/home/palo/.virtualenvs/toro/src/pyelasticsearch/pyelasticsearch/client.py", line 366, in bulk_index
query_params=query_params)
File "/home/palo/.virtualenvs/toro/src/pyelasticsearch/pyelasticsearch/client.py", line 221, in send_request
**({'data': request_body} if body else {}))
File "/home/palo/.virtualenvs/toro/src/requests/requests/sessions.py", line 387, in post
return self.request('POST', url, data=data, **kwargs)
File "/home/palo/.virtualenvs/toro/src/requests/requests/sessions.py", line 345, in request
resp = self.send(prep, **send_kwargs)
File "/home/palo/.virtualenvs/toro/src/requests/requests/sessions.py", line 448, in send
r = adapter.send(request, **kwargs)
File "/home/palo/.virtualenvs/toro/src/requests/requests/adapters.py", line 324, in send
raise Timeout(e)
Timeout: HTTPConnectionPool(host='127.0.0.1', port=9200): Request timed out. (timeout=10)
Timeout: HTTPConnectionPool(host='127.0.0.1', port=9200): Request timed out. (timeout=10)
I used django-haystack - 2.0.0-beta, pyelasticsearch - 0.5, elasticsearch 0.20.6, java version "1.6.0_24"
Haystack Settings
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': 'http://127.0.0.1:9200/',
'INDEX_NAME': 'haystack',
},
}
And Im sure my elasticsearch serivce is running.
This does not necessarily mean that your es server is down, especially if you get something reasonable returned with curl -I "127.0.0.1:9200". More likely, this is an issue of your request simply not getting enough time given the speed of connections involved.
Interestingly, the default timeout set in pyelasticsearch is 60 seconds, see def __init__(self, urls, timeout=60, max_retries=0, revival_delay=300): in https://github.com/rhec/pyelasticsearch/blob/master/pyelasticsearch/client.py. However, haystack overwrites that with its default setting, which is 10 seconds, as per self.timeout = connection_options.get('TIMEOUT', 10) in https://github.com/toastdriven/django-haystack/blob/master/haystack/backends/__init__.py.
As you can see though, haystack allows you to easily modify your setting, by adding 'TIMEOUT': 60, to your engine configuration.
And solved :)
I too had the similar problem
sudo service elasticsearch restart
then it worked
are you running
bin/elasticsearch -f
I think you are not running the searchengine.
I am trying to set up a pyramid app. I am using wsgi and apache2.
I keep getting Internal server error and the contents of the apache log is
mod_wsgi (pid=11200): Exception occurred processing WSGI script '/home/ubuntu/modwsgi/env/pyramid.wsgi'.
Traceback (most recent call last):
File "/home/ubuntu/modwsgi/env/lib/python2.6/site-packages/pyramid-1.3-py2.6.egg/pyramid/router.py", line 191, in __call__
request._process_response_callbacks(response)
File "/home/ubuntu/modwsgi/env/lib/python2.6/site-packages/pyramid-1.3-py2.6.egg/pyramid/request.py", line 243, in _process_response_callbacks
callback(self, response)
File "/home/ubuntu/modwsgi/env/lib/python2.6/site-packages/pyramid_beaker-0.6.1-py2.6.egg/pyramid_beaker/__init__.py", line 26, in session_callback
self.persist()
File "/home/ubuntu/modwsgi/env/lib/python2.6/site-packages/Beaker-1.6.3-py2.6.egg/beaker/session.py", line 706, in persist
self._session().save()
File "/home/ubuntu/modwsgi/env/lib/python2.6/site-packages/Beaker-1.6.3-py2.6.egg/beaker/session.py", line 400, in save
**self.namespace_args)
File "/home/ubuntu/modwsgi/env/lib/python2.6/site-packages/Beaker-1.6.3-py2.6.egg/beaker/container.py", line 622, in __init__
util.verify_directory(self.file_dir)
File "/home/ubuntu/modwsgi/env/lib/python2.6/site-packages/Beaker-1.6.3-py2.6.egg/beaker/util.py", line 85, in verify_directory
os.makedirs(dir)
File "/usr/lib/python2.6/os.py", line 150, in makedirs
makedirs(head, mode)
File "/usr/lib/python2.6/os.py", line 157, in makedirs
mkdir(name, mode)
OSError: [Errno 13] Permission denied: 'beaker_dir'
I can use simple pserve to serve the page and run wget http://localhost:6543/user/form on the terminal. It works well with form getting downloaded. But over the browser I get the 500 error.
I am using EC2 to host the app.
In the app:main stanza I am using the following code:
session.type = file
session.data_dir = beaker_dir
session.key = mvc
session.encrypt_key = mysecretencryptionkey
session.validate_key = mysecretvalidationkey
session.cookie_on_exception = true
Thanks a lot for reading
Supply an absolute path for:
session.data_dir = beaker_dir
The current working directory could be anything, usually '/', and so path will be wrong.