Django-import-export: Importing ForeignKey fields succeeds only for 1st Value - django-import-export

When trying to import data via excel, the code only reads first entry from data-base.
#resource.py
from .models import(
Country, GeoMaster
)
from import_export import resources, fields
from import_export.widgets import ForeignKeyWidget
class CountryResource(resources.ModelResource):
geo_master_id= fields.Field(
column_name= 'geo_master_id',
attribute='geo_master_id',
widget=ForeignKeyWidget(GeoMaster, 'id')
)
class Meta:
model = Country
import_id_fields = ['country_name']
fields = ( 'id', 'country_name', 'geo_master_id')
My model GeoMaster contains multiple entries with id's ranging from 1-10
my successful import dataset (geo_master_id=1 is first entry in databse):
id|country_name|geo_master_id
--|--------------|--------------|
|India |1.0 |
un-successful import dataset (geo_master_id=2 is NOT first entry in databse):
id|country_name|geo_master_id
--|--------------|--------------|
|India |2.0 |
Error: GeoMaster matching query does not exist.
Is there somethinf wrong with my #resources.py?
UPDATE:
I added custom widget:
class GeoMasterForeignKeyWidget(ForeignKeyWidget):
def get_queryset(self, value, row):
return self.model.objects.filter(
id=row["geo_master_id"],
)
#to be used in subsequent field
geo_master_id= fields.Field(
column_name= 'geo_master_id',
attribute='geo_master_id',
widget=GeoMasterForeignKeyWidget(GeoMaster, 'id')
)```
Still it returns Error: GeoMaster matching query does not exist.

I tried this and it worked for me
class GeoMasterForeignKeyWidget(ForeignKeyWidget):
def get_queryset(self, value, row):
#print(int(value), row)
qs= GeoMaster.objects.filter(
id=int(value),
)
print("returning:", qs)
return qs

Related

Polars string column to pl.datetime in Polars: conversion issue

working with a csv file with the following schema
'Ticket ID': polars.datatypes.Int64,
..
'Created time': polars.datatypes.Utf8,
'Due by Time': polars.datatypes.Utf8,
..
Converting to Datetime:
df = (
df.lazy()
.select(list_cols)
.with_columns([
pl.col(convert_to_date).str.strptime(pl.Date, fmt='%d-%m-%Y %H:%M',strict=False).alias("Create_date") #.cast(pl.Datetime)
])
)
Here is the output. 'Created time' is the original str and 'Create_date' is the conversion:
Created time
Create_date
str
date
04-01-2021 10:26
2021-01-04
04-01-2021 10:26
2021-01-04
04-01-2021 10:26
2021-01-04
04-01-2021 11:48
2021-01-05
...
...
22-09-2022 22:44
null
22-09-2022 22:44
null
22-09-2022 22:44
null
22-09-2022 22:47
null
Getting a bunch of nulls and some of the date conversions seems to be incorrect (see 4th row in the output above). Also, how may I keep the time values?
Sure I am doing something wrong and any help would be greatly appreciated.
import polars as pl
from datetime import datetime
from datetime import date, timedelta
import pyarrow as pa
import pandas as pd
convert_to_date = ['Created time','Due by Time','Resolved time','Closed time','Last update time','Initial response time']
url = 'https://raw.githubusercontent.com/DOakville/PolarsDate/main/3000265945_tickets-Dates.csv'
df = (
pl.read_csv(url,parse_dates=True)
)
df = df.with_column(
pl.col(convert_to_date).str.strptime(pl.Date, fmt='%d-%m-%Y %H:%M',strict=False).alias("Create_date") #.cast(pl.Datetime)
)
Ahhh... I think I see what is happening - your with_columns expression is successfully converting all of the columns given in the "convert_to_date" list, but assigning the result of each conversion to the same name: "Create date".
So, the values you finally get are coming from the last column to be converted ("Initial response time"), which does have nulls where you see them.
If you want each column to be associated with a separate date-converted entry, you can use the suffix expression to ensure that each conversion is mapped to its own distinct output column (based on the original name).
For example:
df.with_columns(
pl.col(convert_to_date).str.strptime(
datatype = pl.Date,
fmt = '%d-%m-%Y %H:%M',
).suffix(" date") # << adds " date" to the existing column name
)
Or, if you prefer to overwrite the existing columns with the converted ones, you could keep the existing column names:
df.with_columns(
pl.col(convert_to_date).str.strptime(
datatype = pl.Date,
fmt = '%d-%m-%Y %H:%M'
).keep_name() # << keeps original name (effectively overwriting it)
)
Finally, if you actually want datetimes (not dates), just change the value of the datatype param in the strptime expression to pl.Datetime.

How to solve for pyodbc.ProgrammingError: The second parameter to executemany must not be empty

Hi i'm having an issue with the transfer of data from one database to another. I created a list using field in a table on a msql db, used that list to query and oracle db table (using the initial list in the where statement to filter results) I then load the query results back into the msql db.
The program runs for the first few iterations but then errors out, with the following error (
Traceback (most recent call last):
File "C:/Users/1/PycharmProjects/DataExtracts/BuyerGroup.py", line 67, in
insertIntoMSDatabase(idString)
File "C:/Users/1/PycharmProjects/DataExtracts/BuyerGroup.py", line 48, in insertIntoMSDatabase
mycursor.executemany(sql, val)
pyodbc.ProgrammingError: The second parameter to executemany must not be empty.)
I can't seem to find and guidance online to troubleshoot this error message. I feel it may be a simple solution but I just can't get there...
# import libraries
import cx_Oracle
import pyodbc
import logging
import time
import re
import math
import numpy as np
logging.basicConfig(level=logging.DEBUG)
conn = pyodbc.connect('''Driver={SQL Server Native Client 11.0};
Server='servername';
Database='dbname';
Trusted_connection=yes;''')
b = conn.cursor()
dsn_tns = cx_Oracle.makedsn('Hostname', 'port', service_name='name')
conn1 = cx_Oracle.connect(user=r'uid', password='pwd', dsn=dsn_tns)
c = conn1.cursor()
beginTime = time.time()
bind = (b.execute('''select distinct field1
from [server].[db].[dbo].[table]'''))
print('MSQL table(s) queried, List Generated')
# formats ids for sql string
def surroundWithQuotes(id):
return "'" + re.sub(",|\s$", "", str(id)) + "'"
def insertIntoMSDatabase(idString):
osql = '''SELECT distinct field1, field2
FROM Database.Table
WHERE field2 is not null and field3 IN ({})'''.format(idString)
c.execute(osql)
claimsdata = c.fetchall()
print('Oracle table(s) queried, Data Pulled')
mycursor = conn.cursor()
sql = '''INSERT INTO [dbo].[tablename]
(
[fields1]
,[field2]
)
VALUES (?,?)'''
val = claimsdata
mycursor.executemany(sql, val)
conn.commit()
ids = []
formattedIdStrings = []
# adds all the ids found in bind to an iterable array
for row in bind:
ids.append(row[0])
# splits the ids[] array into multiple arrays < 1000 in length
batchedIds = np.array_split(ids, math.ceil(len(ids) / 1000))
# formats the value inside each batchedId to be a string
for batchedId in batchedIds:
formattedIdStrings.append(",".join(map(surroundWithQuotes, batchedId)))
# runs insert into MS database for each batch of IDs
for idString in formattedIdStrings:
insertIntoMSDatabase(idString)
print("MSQL table loaded, Data inserted into destination")
endTime = time.time()
print("Program Time Elapsed: ",endTime-beginTime)
conn.close()
conn1.close()
mycursor.executemany(sql, val)
pyodbc.ProgrammingError: The second parameter to executemany must not be empty.
Before calling .executemany() you need to verify that val is not an empty list (as would be the case if .fetchall() is called on a SELECT statement that returns no rows) , e.g.,
if val:
mycursor.executemany(sql, val)

copying custom field value from sale order line to stock.move in odoo

Copying value from sale order line to stock move it shows only second row value in both rows. code:
[enter image description here][1]
from odoo import models, fields, api
class StockMove(models.Model):
#api.multi
def get_data(self):
data = self.env['sale.order.line'].search([])
print("Get Data Function")
for rec in data:
print(rec.x_serialnumber)
for record in self:
record.x_serialnumber = rec.x_serialnumber
_inherit = 'stock.move'
x_serialnumber = fields.Text(string="Serial Number", compute='get_data')"
On Sale order line it shows like this:
Serial Number: in both lines
112233
445566
But in stock move:
Serial Number: in both lines
445566
445566
Aurangzaib
Update your code:
#api.multi
def get_data(self):
for move in self:
if not (move.picking_id and move.picking_id.group_id):
continue
picking = move.picking_id
sale_order = self.env['sale.order'].sudo().search([
('procurement_group_id', '=', picking.group_id.id)], limit=1)
for line in sale_order.order_line:
if line.product_id.id != move.product_id.id:
continue
move.update({
'x_serialnumber': line.x_serialnumber,
})

scrapy / python 3.5 : targeting and filtering

i want to extract the following field : movie's,director's,actors' name
on the page allocine.fr
This will help me to make my template for further scraps.
Here is my bad working code (inside spiders directory)
from scrapy.contrib.spiders import CrawlSpider, Rule
from cinefil.items import Article
#from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor ==> depreciated
from scrapy.linkextractors import LinkExtractor
from scrapy import log
class CinefilSpider(CrawlSpider):
name="cinefil"
allowed_domains = ["allocine.fr"]
start_urls = ["http://www.allocine.fr/film/fichefilm_gen_cfilm=29007.html"]
rules = [
Rule(LinkExtractor(allow=('(/film/)((?!:).)*$'),), callback="parse_item", follow=False)
]
def parse_item(self, response):
ROOTPATH = '//div[#class="meta-body-item"]'
item = Article()
casiers = response.xpath(ROOTPATH).extract()
for matos in casiers:
print("\n----- ------ ------ -------- ---------")
print(matos)
return item
For extracting the movie's,director's,actors' name on the page allocine.fr
Movie name
#get from <div class="titlebar-title titlebar-title-lg">
>>> movie=response.xpath('//div[#class="titlebar-title titlebar-title-lg"]/text()').extract_first()
>>> movie
u'Spider-Man'
Director name
#start from
#<span itemprop="director">
#<a>
#<span itemprop="name">
>>> director=response.xpath('//span[#itemprop="director"]/a/span[#itemprop="name"]/text()').extract()
>>> director
u'Sam Raimi'
Actors name
#Take the word "Avec" as landmark and get its siblings <spans>
>>> movie_stars=response.xpath('//span[contains(text(),"Avec")]/following-sibling::span/text()').extract()
>>> movie_stars
[u'Tobey Maguire', u'Willem Dafoe', u'Kirsten Dunst', u' plus ']
#remove last item 'plus'
>>> movie_stars.pop()
u' plus '
>>> movie_stars
[u'Tobey Maguire', u'Willem Dafoe', u'Kirsten Dunst']
And the items.py should be declared as :
import scrapy
class Movie(scrapy.Item):
name = scrapy.Field()
director = scrapy.Field()
actors = scrapy.Field()

Printing QModelIndex vs QModelIndex.model(): different hex values?

When you print out a QModelIndex in Pyside, the object representation shows the row, column, parent, model, and memory address. However, if you print out index.model(), the memory address for the model is different.
Here is some code that demonstrates what I mean:
from PySide import QtGui, QtCore
class TestQModelIndexModelWin(QtGui.QMainWindow):
def __init__(self, parent=None):
super(TestQModelIndexModelWin, self).__init__(parent)
self.listView = QtGui.QListView()
self.setCentralWidget(self.listView)
listModel = QtGui.QStringListModel(['foo', 'bar', 'baz'])
self.listView.setModel(listModel)
numItems = len(listModel.stringList())
for i in range(numItems):
index = listModel.index(i, 0)
print index
print index.model()
When running this code, the results look something like the following:
<PySide.QtCore.QModelIndex(0,0,0x0,QStringListModel(0xef1b7e0) ) at 0x0000000017656D08>
<PySide.QtGui.QStringListModel object at 0x0000000017656948>
<PySide.QtCore.QModelIndex(1,0,0x0,QStringListModel(0xef1b7e0) ) at 0x00000000176564C8>
<PySide.QtGui.QStringListModel object at 0x0000000017656948>
<PySide.QtCore.QModelIndex(2,0,0x0,QStringListModel(0xef1b7e0) ) at 0x0000000017656D08>
<PySide.QtGui.QStringListModel object at 0x0000000017656948>
Why does the QModelIndex show the QStringListModel hex value as 0xef1b7e0 but the QStringListModel shows its address as 0x0000000017656948?
The repr for index is showing the C++ address of the model it is associated with. Whereas the repr for index.model() is showing the address of the python object that wraps the C++ model.
You can verify this by using the shiboken module:
import shiboken
...
print index
print index.model()
print shiboken.dump(index.model())
which will produce output like this:
<PySide.QtCore.QModelIndex(2,0,0x0,QStringListModel(0x17b0b40) ) at 0x7ff1a3715998>
<PySide.QtGui.QStringListModel object at 0x7ff1a3715950>
C++ address....... PySide.QtGui.QStringListModel/0x17b0b40
hasOwnership...... 1
containsCppWrapper 1
validCppObject.... 1
wasCreatedByPython 1

Resources