Drop Mime data (csv file) into QTableView with custom QSqlTableModel is not working - mime

I'm using a QTableView along with a superclassed QSqlTableModel to display a sqlite table into Qt and inserting new records dropping a csv file.
I have followed the doc and came up with the example reported below. I have recreated a very light reproducible example to show what is happening to my code, no sanity check or quality code has been intentionally used. It's tested against PySide6
import sys
from qtpy.QtWidgets import QApplication, QTableView, QWidget
from qtpy.QtCore import QModelIndex, QMimeData, Qt
from qtpy.QtSql import QSqlDatabase, QSqlTableModel, QSqlQuery
from pandas import read_csv
def create_table():
# Dummy very simple table
_query_str = """CREATE TABLE MyTable (
ID INTEGER PRIMARY KEY AUTOINCREMENT,
Field1 INTEGER,
Field2 TEXT);"""
query = QSqlQuery(db=db, query=_query_str)
query.exec_()
class MyTableModel(QSqlTableModel):
def __init__(self, table_name, db):
QSqlTableModel.__init__(self, db=db)
self.setTable(table_name)
def canDropMimeData(self, data: QMimeData, action: Qt.DropAction, row: int, column: int, parent: QModelIndex) -> bool:
return True # <-- Just for the example
def supportedDropActions(self) -> Qt.DropAction:
return Qt.DropAction.CopyAction | Qt.DropAction.MoveAction | Qt.DropAction.LinkAction
def dropMimeData(self, data: QMimeData, action: Qt.DropAction, row: int, column: int, parent: QModelIndex) -> bool:
csv_filename = data.urls()[0].toLocalFile()
df = read_csv(csv_filename, delimiter=',', header=0)
for _, row in df.iterrows():
record = self.record()
record.remove(0) # <-- Remove the ID field
record.setValue('Field1', row['Field1'].values[0])
record.setValue('Field2', row['Field2'].values[0])
self.insertRecord(-1, record)
if __name__ == '__main__':
# In memory database just for the purpose of the example
db = QSqlDatabase.addDatabase("QSQLITE", ":memory:")
db.open()
if not db.open():
raise "Database not opened"
create_table()
app = QApplication([])
table = QTableView()
model = MyTableModel('MyTable', db)
table.setModel(model)
table.setAcceptDrops(True)
table.show()
sys.exit(app.exec_())
What I get is that canDropMimeData and supportedDropActions are correctly called, but (using debug) dropMimeData is never called
And the below image shows that, even if canDropMimeData returns True, the file seems not to be accepted.
Edit 1 - QSqlTableModel issue
I found out that the problem is with QSqlTableModel. If I use a bare QStandardItemModel, everything works fine. Any work-around?

By default, item models don't provide drag and drop support.
In order to properly allow that, many aspects have to be checked, including that the flags() returned by any index that would accept drop must also have the Qt.ItemIsDropEnabled.
If you want to allow that only for the model (drop on an empty area, not on items), that index would be the root index, aka, an invalid index:
def flags(self, index):
flags = super().flags(index)
if not index.isValid():
flags |= Qt.ItemIsDropEnabled
return flags

Related

Is there any good way to rewrite the edgetpu old code by using pycoral api?

I'm a beginner using coral devboard mini.
I want to start a Smart Bird Feeder project.
https://coral.ai/projects/bird-feeder/
I've been trying to execute the code by referring to
I can't run bird_classify.py.
The error is as follows
untimeError: Internal: Unsupported data type in custom op handler: 0Node number 0 (edgetpu-custom-op) failed to prepare.
Originally, the samples in this project seemed to be deprecated, and
The edgetpu requires an old runtimeversion of 13, instead of the current 14.
(tflite is 2.5 ) I have downloaded it directly and re-installed it in
/usr/lib/python3/dist-packagesm
, but I cannot uninstall the new version and cannot match the version.
Is there a better way to do this?
Also, I've decided to give up on running the same environment as the sample, and use the pycoralapi to run the
If there is a good way to rewrite the code to use pycoral, please let me know.
Thanks
#!/usr/bin/python3
"""
Coral Smart Bird Feeder
Uses ClassificationEngine from the EdgeTPU API to analyze animals in
camera frames. Sounds a deterrent if a squirrel is detected.
Users define model, labels file, storage path, deterrent sound, and
optionally can set this to training mode for collecting images for a custom
model.
"""
import argparse
import time
import re
import imp
import logging
import gstreamer
import sys
sys.path.append('/usr/lib/python3/dist-packages/edgetpu')
from edgetpu.classification.engine import ClassificationEngine
from PIL import Image
from playsound import playsound
from pycoral.adapters import classify
from pycoral.adapters import common
from pycoral.utils.dataset import read_label_file
from pycoral.utils.edgetpu import make_interpreter
def save_data(image,results,path,ext='png'):
"""Saves camera frame and model inference results
to user-defined storage directory."""
tag = '%010d' % int(time.monotonic()*1000)
name = '%s/img-%s.%s' %(path,tag,ext)
image.save(name)
print('Frame saved as: %s' %name)
logging.info('Image: %s Results: %s', tag,results)
def load_labels(path):
"""Parses provided label file for use in model inference."""
p = re.compile(r'\s*(\d+)(.+)')
with open(path, 'r', encoding='utf-8') as f:
lines = (p.match(line).groups() for line in f.readlines())
return {int(num): text.strip() for num, text in lines}
def print_results(start_time, last_time, end_time, results):
"""Print results to terminal for debugging."""
inference_rate = ((end_time - start_time) * 1000)
fps = (1.0/(end_time - last_time))
print('\nInference: %.2f ms, FPS: %.2f fps' % (inference_rate, fps))
for label, score in results:
print(' %s, score=%.2f' %(label, score))
def do_training(results,last_results,top_k):
"""Compares current model results to previous results and returns
true if at least one label difference is detected. Used to collect
images for training a custom model."""
new_labels = [label[0] for label in results]
old_labels = [label[0] for label in last_results]
shared_labels = set(new_labels).intersection(old_labels)
if len(shared_labels) < top_k:
print('Difference detected')
return True
def user_selections():
parser = argparse.ArgumentParser()
parser.add_argument('--model', required=True,
help='.tflite model path')
parser.add_argument('--labels', required=True,
help='label file path')
parser.add_argument('--top_k', type=int, default=3,
help='number of classes with highest score to display')
parser.add_argument('--threshold', type=float, default=0.1,
help='class score threshold')
parser.add_argument('--storage', required=True,
help='File path to store images and results')
parser.add_argument('--sound', required=True,
help='File path to deterrent sound')
parser.add_argument('--print', default=False, required=False,
help='Print inference results to terminal')
parser.add_argument('--training', default=False, required=False,
help='Training mode for image collection')
args = parser.parse_args()
return args
def main():
"""Creates camera pipeline, and pushes pipeline through ClassificationEngine
model. Logs results to user-defined storage. Runs either in training mode to
gather images for custom model creation or in deterrent mode that sounds an
'alarm' if a defined label is detected."""
args = user_selections()
print("Loading %s with %s labels."%(args.model, args.labels))
engine = ClassificationEngine(args.model)
labels = load_labels(args.labels)
storage_dir = args.storage
#Initialize logging file
logging.basicConfig(filename='%s/results.log'%storage_dir,
format='%(asctime)s-%(message)s',
level=logging.DEBUG)
last_time = time.monotonic()
last_results = [('label', 0)]
def user_callback(image,svg_canvas):
nonlocal last_time
nonlocal last_results
start_time = time.monotonic()
results = engine.classify_with_image(image, threshold=args.threshold, top_k=args.top_k)
end_time = time.monotonic()
results = [(labels[i], score) for i, score in results]
if args.print:
print_results(start_time,last_time, end_time, results)
if args.training:
if do_training(results,last_results,args.top_k):
save_data(image,results, storage_dir)
else:
#Custom model mode:
#The labels can be modified to detect/deter user-selected items
if results[0][0] !='background':
save_data(image, storage_dir,results)
if 'fox squirrel, eastern fox squirrel, Sciurus niger' in results:
playsound(args.sound)
logging.info('Deterrent sounded')
last_results=results
last_time = end_time
result = gstreamer.run_pipeline(user_callback)
if __name__ == '__main__':
main()
enter code here
I suggest that you follow one of the examples available from the coral examples. There is an example named classify_image.py which uses the edgetpu (tflite) that I found works. After you install the coral examples, you have to drill down through the directory hierarchy. So, in my case, from root it is: /home/pi/ml-projects/coral/pycoral/tensorflow/examples/lite/examples. There are 17 files in that last examples directory. I'm using: numpy 1.19.3, pycoral 2.0.0, scipy 1.7.1, tensorflow 2.4.0, tflite-runtime 2.5.0.post1. I've installed the following edgetpu-runtime: edgetpu_runtime_20201105.zip.

Getting java.lang.RuntimeException:driver class not found when used jdbc_hook more than once in airflow operator

Use case is to run list of sql in hive and update impala metadata. As shown below two methods for hive and impala uses jdbc_hook. In which ever order I call these methods only first one runs and second one throws ERROR - java.lang.RuntimeException: Class <driver name of hive/impala> not found. Each method runs fine when used separately.
Please find the execute method of airflow custom operator :::
Note :: I can't use hive_operator to run hive statements. And I don't see any methods in HiveServer2_Hook. Am new to airflow any help is much appreciated
from airflow.models.baseoperator import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.hooks.jdbc_hook import JdbcHook
import sqlparse
class CustomHiveOperator(BaseOperator):
"""
Executes hql code and invalidates,compute stats impala for that table.
Requires JdbcHook,sqlparse.
:param hive_jdbc_conn: reference to a predefined hive database
:type hive_jdbc_conn: str
:param impala_jdbc_conn: reference to a predefined impala database
:type impala_jdbc_conn: str
:param table_name: hive table name, used for post process in impala
:type table_name: str
:param script_path: hql scirpt path to run in hive
:type script_path: str
:param autocommit: if True, each command is automatically committed.
(default value: False)
:type autocommit: bool
:param parameters: (optional) the parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
#apply_defaults
def __init__(
self,
hive_jdbc_conn: str,
impala_jdbc_conn:str,
table_name:str,
script_path:str,
autocommit=False,
parameters=None,
*args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.hive_jdbc_conn= hive_jdbc_conn
self.impala_jdbc_conn= impala_jdbc_conn
self.table_name=table_name
self.script_path=script_path
self.autocommit=autocommit
self.parameters=parameters
def execute(self,context):
self.hive_run()
self.impala_post_process()
def format_string(self,x):
return x.replace(";","")
def hive_run(self):
with open(self.script_path) as f:
data = f.read()
hql_temp = sqlparse.split((data))
hql = [self.format_string(x) for x in hql_temp]
self.log.info('Executing: %s', hql)
self.hive_hook = JdbcHook(jdbc_conn_id=self.hive_jdbc_conn)
self.hive_hook.run(hql, self.autocommit, parameters=self.parameters)
def impala_post_process(self):
invalidate = 'INVALIDATE METADATA '+self.table_name
compute_stats = 'COMPUTE STATS '+self.table_name
hql = [invalidate,compute_stats]
self.log.info('Executing: %s', hql)
self.impala_hook = JdbcHook(jdbc_conn_id=self.impala_jdbc_conn)
self.impala_hook.run(hql, self.autocommit, parameters=self.parameters)
This is actually an issue with how Airflow uses jaydebeapi and the underlying JPype modules to facilitate the JDBC connection.
A Java virtual machine is started when JPype is first used (the first JdbcHook.get_conn call) and the only libraries that the virtual machine is made aware of is the specific one you're using for whichever JDBC connection is being made. When you create another connection the virtual machine is already started and isn't aware of the libraries necessary for a different connection type.
The only way that I have found around this is to use an extension of JdbcHook which overrides the get_conn method to gather the paths of all JDBC drivers that are defined as a Connection object in Airflow. See here for the Airflow implementation.

TypeError: Object of type RowProxy is not JSON serializable - Flask

I am using SQLAlchemy to query the database from my Flask web-application using engine.After I do the SELECT Query and also do use fetchall object after ResultProxy is returned which ultimately returns RowProxy object and then I store in session.
Here is my code:
import os
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from flask import Flask, session
engine = create_engine(os.environ.get('DATABASE_URL'))
db = scoped_session(sessionmaker(bind=engine))
app = Flask(__name__)
app.secret_key = os.environ.get('SECRET_KEY')
#app.route('/')
def index():
session['list'] = db.execute("SELECT title,author,year FROM books WHERE year = 2011 LIMIT 4").fetchall()
print(session['list'])
return "<h1>hello world</h1>"
if __name__ == "__main__":
app.run(debug = True)
Here is the output:
[('Steve Jobs', 'Walter Isaacson', 2011), ('Legend', 'Marie Lu', 2011), ('Hit List', 'Laurell K. Hamilton', 2011), ('Born at Midnight', 'C.C. Hunter', 2011)]
Traceback (most recent call last):
File "C:\Users\avise\AppData\Local\Programs\Python\Python38\Lib\site-packages\flask\app.py", line 2463, in __call__
return self.wsgi_app(environ, start_response)
File "C:\Users\avise\AppData\Local\Programs\Python\Python38\Lib\site-packages\flask\app.py", line 2449, in wsgi_app
response = self.handle_exception(e)
File "C:\Users\avise\AppData\Local\Programs\Python\Python38\Lib\site-packages\flask\app.py", line 1866, in handle_exception
reraise(exc_type, exc_value, tb)
File "C:\Users\avise\AppData\Local\Programs\Python\Python38\Lib\json\encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type RowProxy is not JSON serializable
The session item stores the data as i can see in output.But "hello world" is not rendered.
And if i replace the session variable by ordinary variable say x then it seems to be working.
But i think i need to use sessions so that my application will be used simultaneously by to users to display different things. So, how could i use sessions in this case or is there any other way?
Any help will be appreciated as I am new to Flask and web-development.
From what I understand about the Flask Session object is that it acts as a python dictionary; however values must be JSON serializable. In this case, just like the error suggests, the RowProxy object that is being returned by fetch all is not json serializable.
A solution to this problem would be to instead pass through a result of your query as a dictionary (which is JSON serializable).
It looks like the result of your query is returning a list of tuples so we can do the following:
res = db.execute("SELECT title,author,year FROM books WHERE year = 2011 LIMIT 4").fetchall()
user_books = {}
index = 0
for entry in res:
user_books[index] = {'title':res[index][0],
'author':res[index][1],
'year':res[index][2],
}
index += 1
session['list'] = user_books
A word of caution; however, is that since we are using the title of the book as a key, if there are two books with the same title, information may be overwritten, so consider using a unique id as the key.
Also note that the dictionary construction above would only work for the query you already have - if you added another column to the select statement you would have to edit the code to include the extra column information.

Syncing DynamoDB with ElasticSearch for old Data

I'm using this function https://github.com/bfansports/dynamodb-to-elasticsearch to sync my DynamoDB table with ElasticSearch. Unfortunately it's only processing the newly added data and Updated ones and not the previously existing rows in the table despite i chose "New and old images - both the new and the old images of the item
" in the Manage stream section.
How to fix that ?
Ok, i ended up with updating the DynamoDB and that triggers the Stream so Sync between ElasticSearch and DynamoDB can be done.
This is the script that i use :
import json
import boto3
import random
def lambda_handler(event, context):
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('image-library')
response = table.scan(
ProjectionExpression='#k',
ExpressionAttributeNames={
'#k' : 'id', #partition key
}
)
items = response['Items']
random_number = random.randint(0,1000)
for item in items:
response = table.update_item(
Key=item,
UpdateExpression='SET #f = :f',
ExpressionAttributeNames={
'#f' :'force_update'
},
ExpressionAttributeValues={
':f' : random_number
}
)

WTForms custom validator- RequiredIf on a RadioField

I borrowed a validator that requires the user to input data if the value of another field is a certain value:
class RequiredIf(object):
def __init__(self, *args, **kwargs):
self.conditions = kwargs
def __call__(self, form, field):
for name, data in self.conditions.items():
if name not in form._fields:
Optional(form, field)
else:
condition_field = form._fields.get(name)
if condition_field.data == data and not field.data:
DataRequired()(form, field)
Optional()(form, field)
This works really well when the field containing the validator argument is a TextField, but it doesn't seem to work when the field is a RadioField. How can I adapt the validator so that this also works on RadioFields?
As it stands, regardless of whether the validation condition applies or not, not a valid choice is always returned for the RadioField.
Thanks in advance.
For example:
class new_form(Form):
code=BooleanField('Do you code?')
code2=RadioField('If so, what languages do you use?',
choices=[('python','python'),('C++','C++')],
validators=[RequiredIf(code=1)])
Regardless of whether the BooleanField code is checked or not, this is not a valid choice is always returned for code2. I would like a validator that requires an input for any type of field(including RadioField), conditional on the value of another field (code=1 in this case).
Updated!. You can create any custom processing using __call__. Example:
from multidict import CIMultiDict
from wtforms import Form, RadioField, BooleanField
class RequiredIf(object):
def __init__(self, **kwargs):
self.conditions = kwargs
def __call__(self, form, field):
# NOTE! you can create here any custom processing
current_value = form.data.get(field.name)
if current_value == 'None':
for condition_field, reserved_value in self.conditions.items():
dependent_value = form.data.get(condition_field)
if condition_field not in form.data:
continue
elif dependent_value == reserved_value:
# just an example of error
raise Exception(
'Invalid value of field "%s". Field is required when %s==%s' % (
field.name,
condition_field,
dependent_value
))
class NewForm(Form):
code = BooleanField('Do you code?')
code2 = RadioField(
'If so, what languages do you use?',
choices=[('python', 'python'), ('C++', 'C++')],
validators=[RequiredIf(code=True)])
form = NewForm(formdata=CIMultiDict(code=True, code2='python'), )
form.validate() # valid data - without errors
# invalid data
form = NewForm(formdata=CIMultiDict(code=True), )
form.validate() # invalid data - Exception: Invalid value of field "code2". Field is required when code==True
One more example with 2 RadioField:
class NewForm(Form):
list_one = RadioField('City/Country', choices=['city', 'country'])
list_two = RadioField(
'Cities',
choices=[('minsk', 'Minsk'), ('tbilisi', 'Tbilisi')],
validators=[RequiredIf(list_one='city')])
form = NewForm(formdata=CIMultiDict(list_one='city', list_two='minsk'), )
form.validate() # without errors
form = NewForm(formdata=CIMultiDict(list_one='country'), )
form.validate() # without errors
form = NewForm(formdata=CIMultiDict(list_one='city'), )
form.validate() # invalid data - Exception: Invalid value of field "list_two". Field is required when list_one==city
Hope this helps.

Resources