I have the method below that creates an Excel file, but how can I return it like an Excel report?
def report_excel(self,results,report_name,header,indice,title_report):
fileExcel = xlsxwriter.Workbook('C:\\Users\\Pc-Pc\\Desktop\\411\\'+report_name+'.xlsx')
listUsersSheet = fileExcel.add_worksheet(report_name)
column = 0
row = 15
for res in results:
listUsersSheet.merge_range(index[ind][0] + str(row+1) + ':'+ index[ind][0] + str(row + a), res[index[ind][1]], cell_format)
fileExcel.close()
How can I download it from the client as a report?
You can use a Web Controller for that:
from openerp.http import request
from openerp import http
from openerp.addons.web.controllers.main import serialize_exception,content_disposition
class Binary(http.Controller):
#http.route('/web/binary/download_report', type='http', auth="public")
#serialize_exception
def download_xls_document(self, path, filename="My report.xlsx", **kw):
with open(path, "rb") as pdf_file:
return request.make_response(pdf_file.read(),
[('Content-Type', 'application/octet-stream'),
('Content-Disposition', content_disposition(filename))])
and report_excel method should return:
return {
'type': 'ir.actions.act_url',
'url': '/web/binary/download_report?path=%s&filename=%s' % (path, "The report name.xlsx"),
'target': 'blank',
}
Related
I want to create a user | role | privilege of elastic using API in flask
Documentation for creating user provided an example
it's working fine in elastic Dev Tools
but how can I convert it into a python POST request?
My Code
from flask import Flask, request, jsonify, render_template
from elasticsearch import Elasticsearch
CLOUD_ID = "myfirstdeployment:XXX"
ELASTIC_PASS = 'XXX'
ELASTIC_USER = 'XXX'
client = Elasticsearch(cloud_id=CLOUD_ID, basic_auth=(ELASTIC_USER, ELASTIC_PASS))
app = Flask(__name__)
import requests
from requests.structures import CaseInsensitiveDict
#app.route('/get')
def getting():
data = client.search(index="kibana_sample_data_ecommerce", body={"query" :{"match_all":{}}})
return f'{[x["_source"]["category"] for x in data["hits"]["hits"]]}'
es = Elasticsearch(hosts="https://localhost:9200", basic_auth=('elastic', 'zoU_Ec8JjbPnQNG4b8kY'), verify_certs=False)
#app.route('/local')
def local():
return f'{es.info()}'
#app.route('/users')
def getAllUser():
uri = 'https://localhost:9200/_security/user/'
es = Elasticsearch(hosts=uri, basic_auth=('elastic', 'zoU_Ec8JjbPnQNG4b8kY'), ca_certs="872ee6c0879fc0cfe73054c3ba7afb5902dbb171a2c215af35a5faab1206b924", verify_certs=False)
return f'{es.info()}'
#app.route('/users/<name>')
def getSingleUser(name):
try:
uri = f'https://localhost:9200/_security/user/{name}'
es = Elasticsearch(hosts=uri, basic_auth=('elastic', 'zoU_Ec8JjbPnQNG4b8kY'), ca_certs="872ee6c0879fc0cfe73054c3ba7afb5902dbb171a2c215af35a5faab1206b924", verify_certs=False)
return f'{es.info()}'
except:
content = {'error':'User Not Found'}
return content, 404
#app.route('/create-new-user', methods=['GET','POST'])
def createUser():
if request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
email = request.form.get('email')
fullname = request.form.get('fullname')
role = request.form.getlist('role')
body ={"password":password, "username":username, "email":email, "fullname":fullname, "role":role}
try:
uri = f'https://localhost:9200/_security/user/{username}'
es = Elasticsearch(hosts=uri, basic_auth=('elastic', 'zoU_Ec8JjbPnQNG4b8kY'), ca_certs="872ee6c0879fc0cfe73054c3ba7afb5902dbb171a2c215af35a5faab1206b924", verify_certs=False)
return f'{es.info()}'
except:
content = {'error':'something went wrong'}
return content, 501
return render_template('add_user.html')
if __name__ == "__main__":
app.run(debug=True)
when I create a user from Stack Management > Security > User > Create
POST request send to security/user/new_user_username
post data = {password=password, username=username, email=email, role=[], fullname=fullname
first Thanks to Paulo
Using put_user() method we can easily create user
username, password & email fields are mandatory when creating a user using API
#app.route('/create-new-user', methods=['GET','POST'])
def createUser():
if request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
email = request.form.get('email')
fullname = request.form.get('fullname')
roles = request.form.getlist('role')
body ={"password":password, "username":username, "email":email, "fullname":fullname, "roles":roles}
try:
client = Elasticsearch(hosts=https://localhost:9200/, basic_auth=(ELASTIC_USERNAME, ELASTIC_PASSWORD), ca_certs=CERTIFICATE, verify_certs=False)
es = SecurityClient(client)
es.put_user(**body)
return {'message':'User created'}, 201
except:
return {'message':'something went wrong'}, 501
return render_template('add_user.html')
Remember to pass keyword args of roles in put_user
Edited if someone experimenting can also try perform_request
Edited 2 Simple and better solution
body ={"password":password, "username":username, "email":email, "full_name":fullname, 'enabled':True, 'roles':role}
uri = f'https://localhost:9200/'
client = Elasticsearch(hosts=uri, basic_auth=(ELASTIC_USER, ELASTIC_PASS), ca_certs=CERTIFICATE, verify_certs=False)
client.perform_request(body=body, method='POST', path=f'/_security/user/{username}', headers={'content-type':'application/json', 'accept':'application/json'})
Graphene sends the email, but the url doesn't exist. How should I set up the token url for this?
I can't find docs on how to configure urls.py so that the link that it sends through the email works.
http://127.0.0.1:8090/activate/eyJ1c2VybmFtZSI6ImtkZWVuZXl6eiIsImFjdGlvbiI6ImFjdGl2YXRpb24ifQ:1m2a0v:04V3Ho0msVn7nHuFW469DC9GBYuUz2czfsFai09EOyM
settings.py
GRAPHQL_JWT = {
'JWT_VERIFY_EXPIRATION': True,
'JWT_LONG_RUNNING_REFRESH_TOKEN': True,
'ALLOW_LOGIN_NOT_VERIFIED': True,
'JWT_ALLOW_ARGUMENT': True,
"JWT_ALLOW_ANY_CLASSES": [
"graphql_auth.mutations.Register",
"graphql_auth.mutations.VerifyAccount",
"graphql_auth.mutations.ResendActivationEmail",
"graphql_auth.mutations.SendPasswordResetEmail",
"graphql_auth.mutations.PasswordReset",
"graphql_auth.mutations.ObtainJSONWebToken",
"graphql_auth.mutations.VerifyToken",
"graphql_auth.mutations.RefreshToken",
"graphql_auth.mutations.RevokeToken",
"graphql_auth.mutations.VerifySecondaryEmail",
],
}
schema.py
class AuthMutation(graphene.ObjectType):
register = mutations.Register.Field()
verify_account = mutations.VerifyAccount.Field()
token_auth = mutations.ObtainJSONWebToken.Field()
update_account = mutations.UpdateAccount.Field()
resend_activation_email = mutations.ResendActivationEmail.Field()
send_password_reset_email = mutations.SendPasswordResetEmail.Field()
password_reset = mutations.PasswordReset.Field()
password_change = mutations.PasswordChange.Field()
You need to create a view to handle this url.
from django.http import HttpResponseRedirect
from django.views import View
from graphql_auth.models import UserStatus
class ActivateView(View):
def get(self, request, **kwargs):
try:
token = kwargs.get("token")
UserStatus.verify(token)
except Exception:
return HttpResponseRedirect(f"/some/error/url")
return HttpResponseRedirect(f"/activate/thankyou")
And in your urls.py
urlpatterns = [
....
path("activate/<str:token>", ActivateView.as_view()),
...
]
class Article(Document):
title = Text(analyzer='snowball', fields={'raw': Keyword()})
body = Text(analyzer='snowball')
tags = Keyword()
published_from = Date()
lines = Integer()
class Index:
name = 'blog45'
settings = {
"number_of_shards": 2,
}
def save(self, ** kwargs):
self.lines = len(self.body.split())
return super(Article, self).save(** kwargs)
def is_published(self):
return datetime.now() >= self.published_from
# create the mappings in elasticsearch
Article.init()
# create and save and article
article = Article(meta={'id': 42}, title='Hello world!', tags=['test'])
article.body = ''' looong text '''
article.published_from = datetime.now()
article.save() ### BOMBS HERE!!! ###
My save() always throws the error:
TypeError: index() missing 1 required positional argument: 'doc_type'
The example above was taken from the documentation but does not work correctly. How can I specify the [doc_type]?
In elasticsearch-py, its
res = elastic_client.index(index="bb8_index", body=doc, doc_type='_doc')
As of 2/1/2020:
I would avoid using the elasticsearch_dsl library as it isn't able to save/index a document to Elasticsearch. The library is not compatible with the latest Elasticsearch (7.5+).
Use the regular elasticsearch-py library
I am looking for some help to add one additional variable to the HTTP plugin.
I wanted to pass variable 'ansible_domain' from the inventory or variable file.
I am able to pass to the username and password, but additional variables are not accepted from an inventory file or var file.
The end point required one more parameter to log in for few functions, and I am unable to proceed.
How can I fix this?
I am using Ansible 2.8.
options:
domain:
type: str
description:
- Specifies the api token path of the FTD device
env:
- name: ansible_domain
vars:
- name: ansible_domain
"""
import json
from ansible.module_utils.basic import to_text
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.plugins.httpapi import HttpApiBase
from ansible.module_utils.connection import ConnectionError
BASE_HEADERS = {
'Content-Type': 'application/json',
}
DOMAIN1 = 'domain'
class HttpApi(HttpApiBase):
def login(self, username, password ):
if username and password:
payload = {'user': username, 'password': password, 'domain': DOMAIN1}
url = '/web_api/login'
response, response_data = self.send_request(url, payload)
else:
raise AnsibleConnectionFailure('Username and password are required for login')
try:
self.connection._auth = {'X-chkp-sid': response_data['sid']}
self.connection._session_uid = response_data['uid']
except KeyError:
raise ConnectionError(
'Server returned response without token info during connection authentication: %s' % response)
def logout(self):
url = '/web_api/logout'
response, dummy = self.send_request(url, None)
def get_session_uid(self):
return self.connection._session_uid
def send_request(self, path, body_params):
data = json.dumps(body_params) if body_params else '{}'
try:
self._display_request()
response, response_data = self.connection.send(path, data, method='POST', headers=BASE_HEADERS)
value = self._get_response_value(response_data)
return response.getcode(), self._response_to_json(value)
except AnsibleConnectionFailure as e:
return 404, 'Object not found'
except HTTPError as e:
error = json.loads(e.read())
return e.code, error
def _display_request(self):
self.connection.queue_message('vvvv', 'Web Services: %s %s' % ('POST', self.connection._url))
def _get_response_value(self, response_data):
return to_text(response_data.getvalue())
def _response_to_json(self, response_text):
try:
return json.loads(response_text) if response_text else {}
# JSONDecodeError only available on Python 3.5+
except ValueError:
raise ConnectionError('Invalid JSON response: %s' % response_text)
I've a spider which fetches both the data and images. I want to rename the images with the respective 'title' which i'm fetching.
Following is my code:
spider1.py
from imageToFileSystemCheck.items import ImagetofilesystemcheckItem
import scrapy
class TestSpider(scrapy.Spider):
name = 'imagecheck'
def start_requests(self):
searchterms=['keyword1','keyword2',]
for item in searchterms:
yield scrapy.Request('http://www.example.com/s?=%s' % item,callback=self.parse, meta={'item': item})
def parse(self,response):
start_urls=[]
item = response.meta.get('item')
for i in range(0,2):
link=str(response.css("div.tt a.chek::attr(href)")[i].extract())
start_urls.append(link)
for url in start_urls:
print(url)
yield scrapy.Request(url=url, callback=self.parse_info ,meta={'item': item})
def parse_info(self, response):
url=response.url
title=str(response.xpath('//*[#id="Title"]/text()').extract_first())
img_url_1=response.xpath("//img[#id='images']/#src").extract_first()
scraped_info = {
'url' : url,
'title' : title,
'image_urls': [img_url_1]
}
yield scraped_info
items.py
import scrapy
class ImagetofilesystemcheckItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
image_urls = scrapy.Field()
images = scrapy.Field()
pass
pipelines.py
class ImagetofilesystemcheckPipeline(object):
def process_item(self, item, spider):
return item
settings.py
BOT_NAME = 'imageToFileSystemCheck'
SPIDER_MODULES = ['imageToFileSystemCheck.spiders']
NEWSPIDER_MODULE = 'imageToFileSystemCheck.spiders'
ITEM_PIPELINES = {'scrapy.pipelines.images.ImagesPipeline': 1}
IMAGES_STORE = '/home/imageToFileSystemCheck/images/'
ROBOTSTXT_OBEY = True
Can you please help me with the required changes so that scrapy could save the scraped images in the 'title'.jpg format where title is scraped by the spider?
Create a Spider like this
class ShopeeSpider(scrapy.Spider):
_TEMP_IMAGES_STORE = "/home/crawler/scrapers/images"
custom_settings = {
'ITEM_PIPELINES': {
'coszi.pipelines.CustomImagePipeline': 400,
}
"IMAGES_STORE": _TEMP_IMAGES_STORE
}
def parse(self, response):
data = {}
data['images'] = {"image_link_here": "image_name_here"}
Then your pipelines.py should be like this
class CustomImagePipeline(ImagesPipeline):
def get_media_requests(self, item, info):
if 'images' in item:
for image_url, img_name in item['images'].iteritems():
if os.path.exists(os.path.join(item['images_path'], img_name)) == False:
request = scrapy.Request(url=image_url)
request.meta['img_name'] = img_name
request.meta['this_prod_img_folder'] = item['img_name_here']
request.dont_filter = True
yield request
def file_path(self, request, response=None, info=None):
return os.path.join(info.spider.CRAWLER_IMAGES_STORE, request.meta['this_prod_img_folder'], request.meta['img_name'])