from elasticsearch import Elasticsearch
from elasticsearch import helpers
es_url = '*****.us-east-1.es.amazonaws.com'
# es_conn = Elasticsearch(es_url)
es_conn = Elasticsearch([{'host': es_url, 'port': 443, 'use_ssl': True}])
while 1:
for ....:
actions.append(....)
if len(actions) >= 5000:
helpers.bulk(es_conn, actions)
actions = []
helpers.bulk(es_conn, actions)
The code above run on an ec2 instance, it often throw the following error:
helpers.bulk(es_conn, actions)
File "/usr/local/lib/python2.7/site-packages/elasticsearch/helpers/__init__.py", line 194, in bulk
for ok, item in streaming_bulk(client, actions, **kwargs):
File "/usr/local/lib/python2.7/site-packages/elasticsearch/helpers/__init__.py", line 162, in streaming_bulk
for result in _process_bulk_chunk(client, bulk_actions, raise_on_exception, raise_on_error, **kwargs):
File "/usr/local/lib/python2.7/site-packages/elasticsearch/helpers/__init__.py", line 91, in _process_bulk_chunk
raise e
ConnectionTimeout: ConnectionTimeout caused by - ReadTimeoutError(HTTPSConnectionPool(host='search-shinezoneels-pc3ib5rkhuylqynfoz6rph7gh4.us-east-1.es.amazonaws.com', port=443): Read timed out.)
At the same time, I run the code on another EMR instance, the error did not happen at all. Bulk speed on ec2 instance is about twice of EMR instance, but often error. How to fix it?
Related
I have a simple Python app that invokes a Vertex AI API that fails when it runs and I can't understand why. The application is as follows:
from google.cloud import aiplatform_v1
def sample_list_datasets():
client = aiplatform_v1.DatasetServiceClient()
request = aiplatform_v1.ListDatasetsRequest(
parent="projects/MYPROJECT/locations/us-central1",
)
page_result = client.list_datasets(request=request)
for response in page_result:
print(response)
sample_list_datasets()
when run, it fails with:
E0126 03:52:04.146970105 22462 hpack_parser.cc:1218] Error parsing metadata: error=invalid value key=content-type value=text/html; charset=UTF-8
Traceback (most recent call last):
File "/home/kolban/projects/vertex-ai/datasets/env/lib/python3.7/site-packages/google/api_core/grpc_helpers.py", line 72, in error_remapped_callable
return callable_(*args, **kwargs)
File "/home/kolban/projects/vertex-ai/datasets/env/lib/python3.7/site-packages/grpc/_channel.py", line 946, in __call__
return _end_unary_response_blocking(state, call, False, None)
File "/home/kolban/projects/vertex-ai/datasets/env/lib/python3.7/site-packages/grpc/_channel.py", line 849, in _end_unary_response_blocking
raise _InactiveRpcError(state)
grpc._channel._InactiveRpcError: <_InactiveRpcError of RPC that terminated with:
status = StatusCode.UNIMPLEMENTED
details = "Received http2 header with status: 404"
debug_error_string = "UNKNOWN:Error received from peer ipv4:108.177.120.95:443 {created_time:"2023-01-26T03:52:04.147076255+00:00", grpc_status:12, grpc_message:"Received http2 header with status: 404"}"
>
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "run.py", line 25, in <module>
sample_list_datasets()
File "run.py", line 19, in sample_list_datasets
page_result = client.list_datasets(request=request)
File "/home/kolban/projects/vertex-ai/datasets/env/lib/python3.7/site-packages/google/cloud/aiplatform_v1/services/dataset_service/client.py", line 1007, in list_datasets
metadata=metadata,
File "/home/kolban/projects/vertex-ai/datasets/env/lib/python3.7/site-packages/google/api_core/gapic_v1/method.py", line 113, in __call__
return wrapped_func(*args, **kwargs)
File "/home/kolban/projects/vertex-ai/datasets/env/lib/python3.7/site-packages/google/api_core/grpc_helpers.py", line 74, in error_remapped_callable
raise exceptions.from_grpc_error(exc) from exc
google.api_core.exceptions.MethodNotImplemented: 501 Received http2 header with status: 404
What might I be doing wrong?
Changing the code to the following caused it to work:
from google.cloud import aiplatform_v1
from google.api_core.client_options import ClientOptions
def sample_list_datasets():
service_base_path='aiplatform.googleapis.com'
region='us-central1'
client_options = ClientOptions(api_endpoint=f"{region}-{service_base_path}")
client = aiplatform_v1.DatasetServiceClient(client_options=client_options)
request = aiplatform_v1.ListDatasetsRequest(
parent="projects/MYPROJECT/locations/us-central1",
)
# Make the request
page_result = client.list_datasets(request=request)
# Handle the response
for response in page_result:
print(response)
sample_list_datasets()
The resolution was hinted at in the documentation for the API request found here. At that article there is a code sample and in the code sample there are some comments and in the comments the following is written:
It may require specifying regional endpoints when creating the service
client as shown in:
https://googleapis.dev/python/google-api-core/latest/client_options.html
And this was the core clue. When we make Vertex AI calls we must specify where the request is to be sent. We do this by setting the api_endpoint option to a URL of the form [REGION]-aiplatform.googleapis.com.
I'm having very inconsistent results when trying to use boto3 dynamodb resources from my local machine vs from within a lambda function in localstack. I have the following simple lambda handler, that just queries a table based on the Hash Key:
import boto3
from boto3.dynamodb.conditions import Key
def handler(event, context):
dynamodb = boto3.resource(
"dynamodb", endpoint_url=os.environ["AWS_EP"]
)
table = dynamodb.Table("precalculated_scores")
items = table.query(
KeyConditionExpression=Key("customer_id").eq(event["customer_id"])
)
return items
The environment variable "AWS_EP" is set to my localstack DNS when protyping (http://localstack:4566).
When I call this lamdba I get the following error:
{
"errorMessage": "Parameter validation failed:\nInvalid type for parameter KeyConditionExpression, value: <boto3.dynamodb.conditions.Equals object at 0x7f7440201960>, type: <class 'boto3.dynamodb.conditions.Equals'>, valid types: <class 'str'>",
"errorType": "ParamValidationError",
"stackTrace": [
" File \"/opt/code/localstack/localstack/services/awslambda/lambda_executors.py\", line 1423, in do_execute\n execute_result = lambda_function_callable(inv_context.event, context)\n",
" File \"/opt/code/localstack/localstack/services/awslambda/lambda_api.py\", line 782, in exec_local_python\n return inner_handler(event, context)\n",
" File \"/var/lib/localstack/tmp/lambda_script_l_dbef16b3.py\", line 29, in handler\n items = table.query(\n",
" File \"/opt/code/localstack/.venv/lib/python3.10/site-packages/boto3/resources/factory.py\", line 580, in do_action\n response = action(self, *args, **kwargs)\n",
" File \"/opt/code/localstack/.venv/lib/python3.10/site-packages/boto3/resources/action.py\", line 88, in __call__\n response = getattr(parent.meta.client, operation_name)(*args, **params)\n",
" File \"/opt/code/localstack/.venv/lib/python3.10/site-packages/botocore/client.py\", line 514, in _api_call\n return self._make_api_call(operation_name, kwargs)\n",
" File \"/opt/code/localstack/.venv/lib/python3.10/site-packages/botocore/client.py\", line 901, in _make_api_call\n request_dict = self._convert_to_request_dict(\n",
" File \"/opt/code/localstack/.venv/lib/python3.10/site-packages/botocore/client.py\", line 962, in _convert_to_request_dict\n request_dict = self._serializer.serialize_to_request(\n",
" File \"/opt/code/localstack/.venv/lib/python3.10/site-packages/botocore/validate.py\", line 381, in serialize_to_request\n raise ParamValidationError(report=report.generate_report())\n"
]
}
Which is a weird error - From what I researched on other question it usually happens when using the boto3 client, but I am using boto3 resources. Furthermore, when I run the code locally in my machine it runs fine.
At first I thought that it might be due to different versions for boto3 (My local machine is using version 1.24.96, while the version inside the lambda runtime is 1.16.31). However I downgraded my local version to the same as the one in the runtime, and I keep getting the same results.
After some answers on this question I managed to get the code working against actual AWS services, but it still won't work when running against localstack.
Am I doing anything wrong? Os might this be a bug with localstack?
--- Update 1 ---
Changing the return didn't solve the problem:
return {"statusCode": 200, "body": json.dumps(items)}
--- Update 2 ---
The code works when running against actual AWS services instead of running against localstack. Updating the question with this information.
This works fine from both my local machine and Lambda:
import json, boto3
from boto3.dynamodb.conditions import Key
def lambda_handler(event, context):
dynamodb = boto3.resource(
"dynamodb",
endpoint_url="https://dynamodb.eu-west-1.amazonaws.com"
)
table = dynamodb.Table("test")
items = table.query(
KeyConditionExpression=Key("pk").eq("1")
)
print(items)
return {
'statusCode': 200,
'body': json.dumps('Hello from Lambda!')
}
Also be sure that event["customer_id"] is in fact a string value as expected by the eq function.
I would check to ensure you have the endpoint setup correctly and that you have the current version deployed.
It may also be the fact you are trying to return the results of your API call via the handler, instead of a proper JSON response as expected:
return {
'statusCode': 200,
'body': json.dumps(items)
}
It's very often for me to get error when trying to stop or start ec2-instances through AWS Lambda. Quite strange for me, because sometimes it works (for both start and stop ec2-instances).
The error I get is like below. When I run test on Lambda console, most of the time it successfully executed. But when I run it through AWS Event Rules (CloudWatch), it's very often the function got fail.
This is my code on line 48
[ERROR] ConnectTimeoutError: Connect timeout on endpoint URL: "https://ec2.ap-southeast-2.amazonaws.com/"
Traceback (most recent call last):
File "/var/task/lambda_function.py", line 48, in lambda_handler
if stop_ec2_instances():
File "/var/task/lambda_function.py", line 155, in stop_ec2_instances
ec2_client.stop_instances(InstanceIds=ec2_instances)
File "/var/task/botocore/client.py", line 316, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/var/task/botocore/client.py", line 621, in _make_api_call
http, parsed_response = self._make_request(
File "/var/task/botocore/client.py", line 641, in _make_request
return self._endpoint.make_request(operation_model, request_dict)
File "/var/task/botocore/endpoint.py", line 102, in make_request
return self._send_request(request_dict, operation_model)
File "/var/task/botocore/endpoint.py", line 136, in _send_request
while self._needs_retry(attempts, operation_model, request_dict,
File "/var/task/botocore/endpoint.py", line 253, in _needs_retry
responses = self._event_emitter.emit(
File "/var/task/botocore/hooks.py", line 356, in emit
return self._emitter.emit(aliased_event_name, **kwargs)
File "/var/task/botocore/hooks.py", line 228, in emit
return self._emit(event_name, kwargs)
File "/var/task/botocore/hooks.py", line 211, in _emit
response = handler(**kwargs)
File "/var/task/botocore/retryhandler.py", line 183, in __call__
if self._checker(attempts, response, caught_exception):
File "/var/task/botocore/retryhandler.py", line 250, in __call__
should_retry = self._should_retry(attempt_number, response,
File "/var/task/botocore/retryhandler.py", line 277, in _should_retry
return self._checker(attempt_number, response, caught_exception)
File "/var/task/botocore/retryhandler.py", line 316, in __call__
checker_response = checker(attempt_number, response,
File "/var/task/botocore/retryhandler.py", line 222, in __call__
return self._check_caught_exception(
File "/var/task/botocore/retryhandler.py", line 359, in _check_caught_exception
raise caught_exception
File "/var/task/botocore/endpoint.py", line 200, in _do_get_response
http_response = self._send(request)
File "/var/task/botocore/endpoint.py", line 269, in _send
return self.http_session.send(request)
File "/var/task/botocore/httpsession.py", line 287, in send
raise ConnectTimeoutError(endpoint_url=request.url, error=e)
This is my code for start & stop the instances:
Even, I already move instantiation og ec2_res ec2_client inside the function but it did not help.,
def start_ec2_instances():
try:
ec2_res = boto3.resource('ec2', region_name="ap-southeast-2")
ec2_client = boto3.client('ec2', region_name="ap-southeast-2")
ec2_client.start_instances(InstanceIds=ec2_instances)
for ec2_id in ec2_instances:
instance = ec2_res.Instance(id=ec2_id)
logger.info("Waiting instance " + ec2_id + " to start")
instance.wait_until_running()
return True
except bex.ClientError as err:
logger.error(err.response['Error']['Message'])
return False
def stop_ec2_instances():
try:
ec2_res = boto3.resource('ec2', region_name="ap-southeast-2")
ec2_client = boto3.client('ec2', region_name="ap-southeast-2")
ec2_client.stop_instances(InstanceIds=ec2_instances)
for ec2_id in ec2_instances:
instance = ec2_res.Instance(id=ec2_id)
logger.info("Waiting instance " + ec2_id + " to stop")
instance.wait_until_stopped()
return True
except bex.ClientError as err:
logger.error(err.response['Error']['Message'])
return False
If any one of you ever face the same guys?
Thanks
Edit: I set function timeout to 8 minutes. In normal condition, time required to execute the function is less than 5 minutes.
Additional note:
Sometimes I work using VPN (south-east-2) in which this VPN is in a different region from the region I live. Instances (and another components) also deployed on this region VPN (south-east-2).
Your code to start and stop the instance looks right to me. The timeout is happening because the time taken to perform your operation is not getting completed in the configured timeout for your lambda function.
You can measure what is the time taken for your function by simply subtracting the time between start and stop of the function.
The default timeout is 3 seconds. So you should consider increasing this timeout interval for your lambda function. Say to 5 minutes.
Please note that the maximum value for this timeout is 300 seconds(15 minutes) and you can not configure value higher than this. I am sure the above code would complete within that limit and hence it should not be a problem for you.
How do I increase my timeout interval for my lambda function?
There are multiple ways to do this. By AWS CLI, AWS console, or probably some other way.
In AWS Console you can do like this:
Click on the Save button after doing this change.
Hope this helps.
I am new to multiprocessing in python.I am extracting some features from a list of 70,000 URLs. I have them from 2 different files. After the feature extraction process I pass the result to a list and then to a CSV file.
The code runs but then stops with the error.I tried to catch the error but it produced another one.
Python version = 3.5
from feature_extractor import Feature_extraction
import pandas as pd
from pandas.core.frame import DataFrame
import sys
from multiprocessing.dummy import Pool as ThreadPool
import threading as thread
from multiprocessing import Process,Manager,Array
import time
class main():
lst = None
def __init__(self):
manager = Manager()
self.lst = manager.list()
self.dostuff()
self.read_lst()
def feature_extraction(self,url):
if self.lst is None:
self.lst = []
features = Feature_extraction(url)
self.lst.append(features.get_features())
print(len(self.lst))
def Pool(self,url):
pool = ThreadPool(8)
results = pool.map(self.feature_extraction, url)
def dostuff(self):
df = pd.read_csv('verified_online.csv',encoding='latin-1')
df['label'] = df['phish_id'] * 0
mal_urls = df['url']
df2 = pd.read_csv('new.csv')
df2['label'] = df['phish_id']/df['phish_id']
ben_urls = df2['urls']
t = Process(target=self.Pool,args=(mal_urls,))
t2 = Process(target=self.Pool,args=(ben_urls,))
t.start()
t2.start()
t.join()
t2.join
def read_lst(self):
nw_df = DataFrame(list(self.lst))
nw_df.columns = ['Redirect count','ssl_classification','url_length','hostname_length','subdomain_count','at_sign_in_url','exe_extension_in_request_url','exe_extension_in_landing_url',
'ip_as_domain_name','no_of_slashes_in requst_url','no_of_slashes_in_landing_url','no_of_dots_in_request_url','no_of_dots_in_landing_url','tld_value','age_of_domain',
'age_of_last_modified','content_length','same_landing_and_request_ip','same_landing_and_request_url']
frames = [df['label'],df2['label']]
new_df = pd.concat(frames)
new_df = new_df.reset_index()
nw_df['label'] = new_df['label']
nw_df.to_csv('dataset.csv', sep=',', encoding='latin-1')
if __name__ == '__main__':
start_time = time.clock()
try:
main()
except BrokenPipeError:
print("broken pipe....")
pass
print (time.clock() - start_time, "seconds")
Error Traceback
Process Process-3:
Traceback (most recent call last):
File "F:\Continuum\Anaconda3\lib\multiprocessing\connection.py", line 312, in _recv_bytes
nread, err = ov.GetOverlappedResult(True)
BrokenPipeError: [WinError 109] The pipe has been ended
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "F:\Continuum\Anaconda3\lib\multiprocessing\process.py", line 249, in _bootstrap
self.run()
File "F:\Continuum\Anaconda3\lib\multiprocessing\process.py", line 93, in run
self._target(*self._args, **self._kwargs)
File "H:\Projects\newoproject\src\main.py", line 33, in Pool
results = pool.map(self.feature_extraction, url)
File "F:\Continuum\Anaconda3\lib\multiprocessing\pool.py", line 260, in map
return self._map_async(func, iterable, mapstar, chunksize).get()
File "F:\Continuum\Anaconda3\lib\multiprocessing\pool.py", line 608, in get
raise self._value
File "F:\Continuum\Anaconda3\lib\multiprocessing\pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "F:\Continuum\Anaconda3\lib\multiprocessing\pool.py", line 44, in mapstar
return list(map(*args))
File "H:\Projects\newoproject\src\main.py", line 26, in feature_extraction
self.lst.append(features.get_features())
File "<string>", line 2, in append
File "F:\Continuum\Anaconda3\lib\multiprocessing\managers.py", line 717, in _callmethod
kind, result = conn.recv()
File "F:\Continuum\Anaconda3\lib\multiprocessing\connection.py", line 250, in recv
buf = self._recv_bytes()
File "F:\Continuum\Anaconda3\lib\multiprocessing\connection.py", line 321, in _recv_bytes
raise EOFError
EOFError
My response is late and does not address the posted problem directly; but hopefully will provide a clue to others who encounter similar errors.
Errors that I encountered:
BrokenPipeError
WinError 109 The pipe has been ended &
WinError 232 The pipe is being closed
Observed with Python 36 on Windows 7, when:
(1) the same async function was submitted multiple times, each time with a different instance of a multiprocessing data store, a Queue in my case (multiprocessing.Manager().Queue())
AND
(2) the references to the Queues were saved in short-life local variables in the enveloping function.
The errors were occurring despite the fact that the Queues, shared with the successfully spawned and executing async-functions, had items and would still be in active use (put() & get()) at the time of exception.
The error consistently occurred when the same async_func was called the 2nd time with a 2nd instance of the Queue. Immediately after apply_async() of the function, the connection to the 1st Queue supplied to the async_func the 1st time, would get broken.
The issue got resolved when the references to the Queues were saved in non-overlapping (like a Queue-list) & longer-life variables (like variables returned to functions higher in the call-stack) in the enveloping function.
Im using django-haystack with elasticsearch but there is a problem with indexing. When rebuilding my index python manage.py rebuild_index following error is raised:
Traceback (most recent call last):
File "/home/palo/.virtualenvs/toro/local/lib/python2.7/site-packages/haystack/management/commands/update_index.py", line 210, in handle_label
self.update_backend(label, using)
File "/home/palo/.virtualenvs/toro/local/lib/python2.7/site-packages/haystack/management/commands/update_index.py", line 256, in update_backend
do_update(backend, index, qs, start, end, total, self.verbosity)
File "/home/palo/.virtualenvs/toro/local/lib/python2.7/site-packages/haystack/management/commands/update_index.py", line 78, in do_update
backend.update(index, current_qs)
File "/home/palo/.virtualenvs/toro/local/lib/python2.7/site-packages/haystack/backends/elasticsearch_backend.py", line 177, in update
self.conn.bulk_index(self.index_name, 'modelresult', prepped_docs, id_field=ID)
File "/home/palo/.virtualenvs/toro/src/pyelasticsearch/pyelasticsearch/client.py", line 95, in decorate
return func(*args, query_params=query_params, **kwargs)
File "/home/palo/.virtualenvs/toro/src/pyelasticsearch/pyelasticsearch/client.py", line 366, in bulk_index
query_params=query_params)
File "/home/palo/.virtualenvs/toro/src/pyelasticsearch/pyelasticsearch/client.py", line 221, in send_request
**({'data': request_body} if body else {}))
File "/home/palo/.virtualenvs/toro/src/requests/requests/sessions.py", line 387, in post
return self.request('POST', url, data=data, **kwargs)
File "/home/palo/.virtualenvs/toro/src/requests/requests/sessions.py", line 345, in request
resp = self.send(prep, **send_kwargs)
File "/home/palo/.virtualenvs/toro/src/requests/requests/sessions.py", line 448, in send
r = adapter.send(request, **kwargs)
File "/home/palo/.virtualenvs/toro/src/requests/requests/adapters.py", line 324, in send
raise Timeout(e)
Timeout: HTTPConnectionPool(host='127.0.0.1', port=9200): Request timed out. (timeout=10)
Timeout: HTTPConnectionPool(host='127.0.0.1', port=9200): Request timed out. (timeout=10)
I used django-haystack - 2.0.0-beta, pyelasticsearch - 0.5, elasticsearch 0.20.6, java version "1.6.0_24"
Haystack Settings
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': 'http://127.0.0.1:9200/',
'INDEX_NAME': 'haystack',
},
}
And Im sure my elasticsearch serivce is running.
This does not necessarily mean that your es server is down, especially if you get something reasonable returned with curl -I "127.0.0.1:9200". More likely, this is an issue of your request simply not getting enough time given the speed of connections involved.
Interestingly, the default timeout set in pyelasticsearch is 60 seconds, see def __init__(self, urls, timeout=60, max_retries=0, revival_delay=300): in https://github.com/rhec/pyelasticsearch/blob/master/pyelasticsearch/client.py. However, haystack overwrites that with its default setting, which is 10 seconds, as per self.timeout = connection_options.get('TIMEOUT', 10) in https://github.com/toastdriven/django-haystack/blob/master/haystack/backends/__init__.py.
As you can see though, haystack allows you to easily modify your setting, by adding 'TIMEOUT': 60, to your engine configuration.
And solved :)
I too had the similar problem
sudo service elasticsearch restart
then it worked
are you running
bin/elasticsearch -f
I think you are not running the searchengine.