Related
I made a code using pysimplegui. it basically shows some images from a database based on a scanned number. it works but sometimes it could be useful to be able to increase the size of the image + it would make my user interface a bit more interactive
i want to have the possibility to either:
when i fly over the image with the mouse, i want the image to increase in size
have the possibility to clic on the image and have a pop-up of the image showing up (in a bigger size)
i am not sure on how to interact with a sg.image()
Below you will find a trunkated part of my code where i show my way of getting the image to show up.
layout = [
[
sg.Text("Numéro de boîte"),
sg.Input(size=(25, 1), key="-FILE-"),
sg.Button("Load Image"),
sg.Button("Update DATA"),
sg.Text("<- useless text ")
],
[sg.Text("Indicateur au max" , size = (120, 1),font = ("Arial", 18), justification = "center")],
[sg.Image(key="-ALV1-"),sg.Image(key="-ALV2-"), sg.Image(key="-ALV3-"), sg.Image(key="-ALV4-"), sg.Image(key="-ALV5-")],
[sg.Image(key="-ALV6-"),sg.Image(key="-ALV7-"), sg.Image(key="-ALV8-"), sg.Image(key="-ALV9-"), sg.Image(key="-ALV10-")],
[sg.Text("_" * 350, size = (120, 1), justification = "center")],
[sg.Text("Indicateur au milieu" , size = (120, 1),font = ("Arial", 18), justification = "center")],
[sg.Image(key="-ALV11-"),sg.Image(key="-ALV12-"), sg.Image(key="-ALV13-"), sg.Image(key="-ALV14-"), sg.Image(key="-ALV15-")],
[sg.Image(key="-ALV16-"),sg.Image(key="-ALV17-"), sg.Image(key="-ALV18-"), sg.Image(key="-ALV19-"), sg.Image(key="-ALV20-")],
[sg.Text("↓↓↓ ↓↓↓" , size = (120, 1),font = ("Arial", 18), justification = "center")],
]
ImageAlv1 = Image.open(PathAlv1)
ImageAlv1.thumbnail((250, 250))
bio1 = io.BytesIO()
ImageAlv1.save(bio1, format="PNG")
window["-ALV1-"].update(data=bio1.getvalue())
Using bind method for events, like
"<Enter>", the user moved the mouse pointer into a visible part of an element.
"<Double-1>", specifies two click events happening close together in time.
Using PIL.Image to resize image and io.BytesIO as buffer.
import base64
from io import BytesIO
from PIL import Image
import PySimpleGUI as sg
def resize(image, size=(256, 256)):
imgdata = base64.b64decode(image)
im = Image.open(BytesIO(imgdata))
width, height = size
w, h = im.size
scale = min(width/w, height/h)
new_size = (int(w*scale+0.5), int(h*scale+0.5))
new_im = im.resize(new_size, resample=Image.LANCZOS)
buffer = BytesIO()
new_im.save(buffer, format="PNG")
return buffer.getvalue()
sg.theme('DarkBlue3')
number = 4
column_layout, line = [], []
limit = len(sg.EMOJI_BASE64_HAPPY_LIST) - 1
for i, image in enumerate(sg.EMOJI_BASE64_HAPPY_LIST):
line.append(sg.Image(data=image, size=(64, 64), pad=(1, 1), background_color='#10C000', expand_y=True, key=f'IMAGE {i}'))
if i % number == number-1 or i == limit:
column_layout.append(line)
line = []
layout = [
[sg.Image(size=(256, 256), pad=(0, 0), expand_x=True, background_color='green', key='-IMAGE-'),
sg.Column(column_layout, expand_y=True, pad=(0, 0))],
]
window = sg.Window("Title", layout, margins=(0, 0), finalize=True)
for i in range(limit+1):
window[f'IMAGE {i}'].bind("<Enter>", "") # Binding for Mouse enter sg.Image
#window[f'IMAGE {i}'].bind("<Double-1>", "") # Binding for Mouse double click on sg.Image
element = window['-IMAGE-']
now = None
while True:
event, values = window.read()
if event == sg.WINDOW_CLOSED:
break
elif event.startswith("IMAGE"):
index = int(event.split()[-1])
if index != now:
element.update(data=resize(sg.EMOJI_BASE64_HAPPY_LIST[index]))
now = index
window.close()
Once I have created the ratingscales in PsychoPy, I cannot get them to show after showing my stimuli. What am I doing wrong?
Preparing experiment
Define window:
win = visual.Window(fullscr=True, color = 'Black')
Preparing stimuli
Pictures
Make list of images in my folder:
images = glob.glob("Yellow/*.jpg")
Randomize the order for pictures:
random.shuffle(images)
Prepare Fixation cross
stim_fix_left = visual.TextStim(win, '+') # Fixation cross is just the character "+". Units are inherited from Window when not explicitly specified.
stim_fix_left.pos = (0.5,0)
stim_fix_right = visual.TextStim(win, '+') # Fixation cross is just the character "+". Units are inherited from Window when not explicitly specified.
stim_fix_right.pos = (-0.5, 0)
Ratings
# the item to-be-rated or respond to:
whichred_left = visual.TextStim(win, text="Does left or right picture contain most red?", height=.04, units='norm')
whichred_left.pos = (0.5,0)
whichred_right = visual.TextStim(win, text="Does left or right picture contain most red?", height=.04, units='norm')
whichred_right.pos = (-0.5, 0)
Create stimuli
Show introduction message:
msg(intro, "white")
Show introduction to block:
msg(block_red, "red")
for i in images:
##Pictures
stim_fix_right.draw()
stim_fix_left.draw()
win.flip()#flip screen
core.wait(1.0)
#Picture 1
img1 = visual.ImageStim(win, image = i)#create visual stimuli
img1.pos = (0.4, 0)
img1.size = (0.5)
#Picture 2
img2 = visual.ImageStim(win, image = images[2])#create visual stimuli
img2.pos = (0.7, 0)
img2.size = (0.5)
#Picture 3
img3 = visual.ImageStim(win, image = i)#create visual stimuli
img3.pos = (-0.4, 0)
img3.size = (0.5)
#Picture 4
img4 = visual.ImageStim(win, image = images[2])#create visual stimuli
img4.pos = (-0.7, 0)
img4.size = (0.5)
#Drawing picures
img1.draw()#draw picture
img2.draw()#draw picture
img3.draw()#draw picture
img4.draw()#draw picture
win.flip()#flip screen
stopwatch.reset() #set clock
core.wait(1.0)
#Ratings
event.clearEvents()
# define window
winrating = visual.Window(size = (1280, 720), color = 'black', units='pix')
x, y = winrating.size # for converting norm units to pix
leftward = -0.35 * x / 2 # use pix units, because the drawing window's units are pix
rightward = -1 * leftward
# create a RatingScale object:
RatingLeft = visual.RatingScale(winrating, choices=map(str, range(1, 8)), mouseOnly=True, pos=(leftward, -y/6),
marker='circle', size=0.85, name='left')
RatingRight = visual.RatingScale(winrating, choices=map(str, range(1, 8)), low=0, high=8, markerStart=4,
leftKeys='left', rightKeys = 'right', acceptKeys='down', pos=(rightward, -y/6),
marker='circle', size=0.85, name='right')
while RatingLeft.noResponse or RatingRight.noResponse:
whichred_left.draw
whichred_right.draw
RatingLeft.draw()
RatingRight.draw()
winrating.flip()
if event.getKeys(['escape']):
core.quit()
There is no need to define a second window. Do all of your drawing into your originally defined window, win. This is created as a full screen window, so I'd assume that the second window, winrating is hidden behind it and any drawing wouldn't be visible.
For a while I have tried to figure out that on which coordinate or body frame the ”LandmarkDetection“ Module is based on . Because I need the (0, 0, 0) position for the calculation to achieve the self localization for the robot with the help of "Landmark detection", I think it's already important...
I have letted it working together with the Pepper robot under NAOqi 2.5.5 which I hope it's compatible.
You can read the whole code of "LandmarkDetection.py" here:
"LandmarkDetection.py":
#! /usr/bin/env python
# -*- encoding: UTF-8 -*-
"""Example: Demonstrates a way to localize the robot with
ALLandMarkDetection"""
import qi
import time
import sys
import argparse
import math
import almath
class LandmarkDetector(object):
"""
We first instantiate a proxy to the ALLandMarkDetection module
Note that this module should be loaded on the robot's naoqi.
The module output its results in ALMemory in a variable
called "LandmarkDetected".
We then read this ALMemory value and check whether we get
interesting things.
After that we get the related position of the landmark compared to robot.
"""
def __init__(self, app):
"""
Initialisation of qi framework and event detection.
"""
super(LandmarkDetector, self).__init__()
app.start()
session = app.session
# Get the service ALMemory.
self.memory = session.service("ALMemory")
# Connect the event callback.
# Get the services ALMotion & ALRobotPosture.
self.motion_service = session.service("ALMotion")
self.posture_service = session.service("ALRobotPosture")
self.subscriber = self.memory.subscriber("LandmarkDetected")
print "self.subscriber = self.memory.subscriber(LandmarkDetected)"
self.subscriber.signal.connect(self.on_landmark_detected)
print "self.subscriber.signal.connect(self.on_landmark_detected)"
# Get the services ALTextToSpeech, ALLandMarkDetection and ALMotion.
self.tts = session.service("ALTextToSpeech")
self.landmark_detection = session.service("ALLandMarkDetection")
# print "self.landmark_detection" is repr(self.landmark_detection)
self.motion_service = session.service("ALMotion")
self.landmark_detection.subscribe("LandmarkDetector", 500, 0.0 )
print "self.landmark_detection.subscribe(LandmarkDetector, 500, 0.0 )"
self.got_landmark = False
# Set here the size of the landmark in meters.
self.landmarkTheoreticalSize = 0.06 #in meters 0 #.05 or 0.06?
# Set here the current camera ("CameraTop" or "CameraBottom").
self.currentCamera = "CameraTop"
def on_landmark_detected(self, markData):
"""
Callback for event LandmarkDetected.
"""
while markData == [] : # empty value when the landmark disappears
self.got_landmark = False
self.motion_service.moveTo(0, 0, 0.1 * math.pi)
if not self.got_landmark: # only speak the first time a landmark appears
self.got_landmark = True
#stop.motion_service.moveTo
print "Ich sehe eine Landmarke! "
self.tts.say("Ich sehe eine Landmarke! ")
# Retrieve landmark center position in radians.
wzCamera = markData[1][0][0][1]
wyCamera = markData[1][0][0][2]
# Retrieve landmark angular size in radians.
angularSize = markData[1][0][0][3]
# Compute distance to landmark.
distanceFromCameraToLandmark = self.landmarkTheoreticalSize / ( 2 * math.tan( angularSize / 2))
# Get current camera position in NAO space.
transform = self.motion_service.getTransform(self.currentCamera, 2, True)
transformList = almath.vectorFloat(transform)
robotToCamera = almath.Transform(transformList)
# Compute the rotation to point towards the landmark.
cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, wyCamera, wzCamera)
# Compute the translation to reach the landmark.
cameraToLandmarkTranslationTransform = almath.Transform(distanceFromCameraToLandmark, 0, 0)
# Combine all transformations to get the landmark position in NAO space.
robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform *cameraToLandmarkTranslationTransform
# robotTurnAroundAngle = almath.rotationFromAngleDirection(0, 1, 1, 1)
# print "robotTurnAroundAngle = ", robotTurnAroundAngle
print "x " + str(robotToLandmark.r1_c4) + " (in meters)"
print "y " + str(robotToLandmark.r2_c4) + " (in meters)"
print "z " + str(robotToLandmark.r3_c4) + " (in meters)"
def run(self):
"""
Loop on, wait for events until manual interruption.
"""
# Wake up robot
self.motion_service.wakeUp()
# Send robot to Pose Init
self.posture_service.goToPosture("StandInit", 0.5)
# Example showing how to get a simplified robot position in world.
useSensorValues = False
result = self.motion_service.getRobotPosition(useSensorValues)
print "Robot Position", result
# Example showing how to use this information to know the robot's diplacement.
useSensorValues = False
# initRobotPosition = almath.Pose2D(self.motion_service.getRobotPosition(useSensorValues))
# Make the robot move
for i in range(1, 12, 1):
self.motion_service.moveTo(0, 0, 0.1 * math.pi)
print "self.motion_service.moveTo(0, 0, (0.1)*math.pi)"
print "Starting LandmarkDetector"
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print "Interrupted by user, stopping LandmarkDetector"
self.landmark_detection.unsubscribe("LandmarkDetector")
#stop
sys.exit(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", type=str, default="10.0.0.10",
help="Robot IP address. On robot or Local Naoqi: use
'10.0.0.10'.")
parser.add_argument("--port", type=int, default=9559,
help="Naoqi port number")
args = parser.parse_args()
try:
# Initialize qi framework.
connection_url = "tcp://" + args.ip + ":" + str(args.port)
app = qi.Application(["LandmarkDetector", "--qi-url=" + connection_url])
except RuntimeError:
print ("Can't connect to Naoqi at ip \"" + args.ip + "\" on port " + str(args.port) +".\n"
"Please check your script arguments. Run with -h option for help.")
sys.exit(1)
landmark_detector = LandmarkDetector(app)
landmark_detector.run()
At line 69 like the picture shows it says NAO space, but after googling and looking after this concept at the offcial Aledebaran site, I did't find it.
At line 70 from the function "getTransform()" of " transform = self.motion_service.getTransform(self.currentCamera, 2, True)" , combining with the information of the Aldebaran website: Cartesian control API: the parameter "2" could represent "FRAME_ROBOT ".
Due to the official documentation,"FRAME_ROBOT"(I think for NAO) is one of the 3 spatial references used by ALMotion. This is average of the two feet positions projected around a vertical z axis. This space is useful, because the x axis is always forwards, so provides a natural ego-centric reference.
But which from the following body frames is the FRAME coordinate for Pepper?
Body frames - Aldebaran
And basically the sensor values should be based on the Coordinate like it in this picture:
I have just tracked the values of the transformation:
('wzCamera = markData[1][0][0][1] = :', 0.20727014541625977)
('wyCamera = markData[1][0][0][2] = :', 0.13570936024188995)
robotToCamera = [[0.984479, 0.00163947, -0.175494, 0.0191678]
[-0.00241521, 0.999988, -0.00420686, -0.00231434]
[0.175485, 0.00456542, 0.984471, 1.16607]]
cameraToLandmarkRotationTransform =[[0.969599, -0.205789, 0.132397, 0]
[0.203897, 0.978596, 0.0278419, 0]
[-0.135293, 0, 0.990806, 0]]
cameraToLandmarkTranslationTransform=[[1, 0, 0, 1.32545]
[0, 1, 0, 0]
[0, 0, 1, 0]]
robotToLandmark = [[0.978627, -0.200991, -0.0434926, 1.31629]
[0.202122, 0.979082, 0.0233536, 0.265588]
[0.0378889, -0.0316453, 0.998781, 1.21629]]
Combining these with the information of the following picture
from the video: Robotics: 3D World to First Person Transformation
I am really wondering whether the “robot“ of “robotToCamera” means the original cemara point… Has anyone idea how to understand these values of matrix from “robotToCamera”?
Observing the values of “robotTocamera”, it changes a little bit when the robot rotates himself(the whole body + head, he doesn’t nod at the same time) to look for the landmarks on the walls like this:
1. robotToCamera =
[[0.979136, 0.0719852, -0.190025, 0.0157459]
[-0.0700786, 0.997401, 0.0167426, -0.00282726]
[0.190737, -0.00307653, 0.981636, 1.16688]]
2. robotToCamera =
[[0.981156, 0.0583434, -0.184198, 0.0168686]
[-0.057959, 0.998291, 0.00747475, -0.00518941]
[0.184319, 0.00334202, 0.982861, 1.16652]]
3. robotToCamera =
[[0.981628, 0.0598889, -0.18116, 0.0196037]
[-0.0594456, 0.9982, 0.0078808, -0.00525115]
[0.181306, 0.00303314, 0.983422, 1.16649]]
4. robotToCamera =
[[0.982268, 0.0583533, -0.178169, 0.0191384]
[-0.0579817, 0.998291, 0.0072969, -0.00522032]
[0.17829, 0.00316301, 0.983973, 1.16627] ]
5. robotToCamera =
[[0.984, 0.021, -0.178, 0.016]
[-0.021, 1.00, 0.0027, -0.0019]
[0.178, 0.001, 0.984, 1.17] ]
Here are couple of my questions:
Could we just calculate the position of “Robot” with help of “robotToCamera”?
Could we get a value for “CameraToRobot” using the values of “robotToCamera”, e.g. calculating the inverse matrix or adjoint matrix ?
For the fifth matrix from “robotToCamera”, if we calculate the inverse matrix for the part
[[0.984, 0.021, -0.178]
[-0.021, 1.00, 0.0027,]
[0.178, 0.001, 0.984] ]
would be
[[0.984, -0.021,-0.178]
[0.021, 1.00, -0.001,]
[-0.178, 0.0027, 0.984] ]
equal to its adjoint matrix. Then it should be a Orthogonal matrix due to Wikipedia.
What info can I get from this ?
At the time 6:10 of the video, the teacher continues to explain something about lens:
In my situation, there is no need to learn about lens(L), or?
Landmark detection is detecting the NAOMarks with its top camera. The doc explains that you get a "Shape info" in the event value such as:
ShapeInfo = [ 1, alpha, beta, sizeX, sizeY, heading]
alpha and beta represent the Naomark’s location in terms of camera
angles
sizeX and sizeY are the mark’s size in camera angles
the heading angle describes how the Naomark is oriented about the
vertical axis with regards to the robot’s head.
The code then calls getTransform which will give you the direction of the camera, so you can relate the detected mark angles to an actual direction (X,Y,Z) in front of the robot (becoming the robot frame).
marks are here if needed: http://doc.aldebaran.com/2-5/naoqi/vision/allandmarkdetection.html
Hello!
i'm trying to have my images appearing on a label by using this code
this is to save a random image from a particular URL
def photo_1():
urls = "https://www.flickr.com/photos/flickr/galleries/72157644537473411/" # You may change this into other websites!
regex = '<img src="([^"]+)".*>'
pattern = re.compile(regex)
photofile=urllib.urlopen(urls)
raw_data=photofile.read()
download=re.findall(pattern,raw_data)
randomdownload=random.choice(download)
urllib.urlretrieve(randomdownload, "1.gif")
global done_button2
done_button2 = Button(photo_window, text = 'Click here to display your chosen image on the black screen!', width = 53, command = generate_1)
done_button2.grid(row = 5, sticky = N)
done_button.config(state='disabled')
and this is to have the saved image appearing on a label but apparently not working so well ..
def generate_1():
img = ImageTk.PhotoImage(Image.open("1.gif"))
image_area = Label(photo_window, image = img, width = 55, height = 5).grid(row=2)
global done_button3
done_button3 = Button(photo_window, text = 'Click here to save the second random image locally! ', width = 53, command = photo_2)
done_button3.grid(row = 6, sticky = N)
done_button2.config(state='disabled')
this was a part of my code and when i run this application i made,
the only thing i can see is a white rectangular shape and i would like it to be as big as a black label underneath (size of (55,5) )with the actual image appearing...
can anyone help me with this problem?
You might have thought that my English is not that great haha
but please have mercy!
I've got (working) application done with ttk. It uses self-created module for showing a comport-related controls and a canvas which draws a few graphs on it. When I crate an instance of my object, it starts a thread in which processes serial input and appends this to a list (one list per graph). When I have 3-6 graphs, the application gets noticeably slow. It also has got a few bugs, but I will adress them when I'm done with the general concept.
Things that may help you help me:
comport is an instance of a self-written object that derives from
LabelFrame and Serial.Serial
coordinates for graphs are stored in a dictionary of lists:
self.graphs = {} self.graphs['name1']=[] number of coordinates stored
is up to the width of canvas, so about 1000-2000 per graph. Have six
graphs - please multiply by 6
With every new coordinate arriving I pop(0) from the list and
append() the new coordinate
I forgot, I also store timing of each new set of coordinates arriving
in a separate list
I use a preiodic call function to process the lists: self.after(100,
func=self.periodicCall)Thus every 100ms I delete(ALL) from the canvas
and I draw every graph with theset of lines. So if I have 1000 coords
in 6 graps, I draw 6000 small lines
Plus some service info of course such as a few rulers
So I guess the idea is clear. I want to figure out what would be the better approach. I'm just a started in python and in programming as well, so I'm asking for your excuse for the code that I'm going to post and for the pain in your eyes it's gonna cause. I don't have any programming style and I want to fix it. At least a bit. So any other comments on anything you'll se in the code are welcome.
#-------------------------------------------------------------------------------
# Name: dataVisualizer
# Purpose:
#
# Author: dccharacter
#
# Created: 23.03.2012
# Copyright: (c) dccharacter 2012
# Licence: <your licence>
#-------------------------------------------------------------------------------
#!/usr/bin/env python
from tkinter import *
from tkinter.ttk import *
from robowidgets.serialPortGui import *
import threading
import re
import atexit
import random
from datetime import datetime
import time
class dataVisualizer(LabelFrame):
def __init__(self, master, comport , cnf={}, **kw):
self.master = master
self.comport = comport
LabelFrame.__init__(self, *cnf, **kw)
self.messageVar = StringVar()
Label(self, text="Message format regexp:").pack()
self.messagePattern = Entry(self, width = 20, text = 234234, textvariable = self.messageVar);
self.messageVar.set(r'(-*\d+),(-*\d+),(-*\d+),(-*\d+),(-*\d+),(-*\d+)')
self.messagePattern.pack()
Button(self, text = "Pause", command = self.pause).pack()
self.pauseFlag = TRUE
self.canvWidth, self.canvHeight = 1000, 700
self.density = 1 ##width of pixel - the bigger, the wider graph
self.numOfDots = self.canvWidth//self.density
self.graphs = {}
self.graphs['name1']=[]
self.graphs['name2']=[]
self.graphs['name3']=[]
self.graphs['name4']=[]
self.graphs['name5']=[]
self.graphs['name6']=[]
self.timings = []
self.zeroTiming = datetime.now()
self.colors = ['red', 'blue', 'green', 'orange', 'violet', 'black', 'cyan']
self.canv = Canvas(self, width = self.canvWidth, height = self.canvHeight)
self.canv.pack()
self.thread = threading.Thread(target = self.workerThread)
self.thread.start()
self.serialData = []
self.periodicCall()
def pause(self):
self.pauseFlag = ~self.pauseFlag
def redraw(self):
self.canv.delete(ALL)
colorIndex = 0
for graphName in self.graphs:
runningAverage = sum(self.graphs[graphName][-10:])//10
text = str(runningAverage)
self.canv.create_text(self.canvWidth-60, 20*(colorIndex+1), text = text,
fill = self.colors[colorIndex], anchor = W)
prev_xxx, prev_yyy = 0, 0
for yyy in self.graphs[graphName]:
self.canv.create_line(prev_xxx, prev_yyy, prev_xxx+self.density, self.canvHeight//2 - yyy,
width = 1.4, fill = self.colors[colorIndex])
prev_xxx, prev_yyy = prev_xxx+self.density, self.canvHeight//2 - yyy
colorIndex = colorIndex + 1
self.drawMesh()
def drawMesh(self):
self.canv.create_rectangle(3, 3, self.canvWidth,
self.canvHeight, outline = 'black', width = 2)
self.canv.create_line(0, self.canvHeight/2, self.canvWidth,
self.canvHeight/2, fill="black", width = 1)
mouseX = self.canv.winfo_pointerx() - self.canv.winfo_rootx()
mouseY = self.canv.winfo_pointery() - self.canv.winfo_rooty()
if mouseY < 60: aaa = -1
else: aaa = 1
if mouseX > self.canvWidth - 200 : bbb = -12
else: bbb = 1
try:
self.canv.create_rectangle(mouseX + 10*bbb - 5, mouseY - 20*aaa +10,
mouseX + 10*bbb + 115, mouseY - 20*aaa - 30, outline = "black",
fill = "red")
self.canv.create_text(mouseX + 10*bbb, mouseY - 40*aaa,
text = "t="+str(self.timings[mouseX//self.density]),
anchor = W)
self.canv.create_text(mouseX + 10*bbb, mouseY - 20*aaa,
text = "value="+str(self.canvHeight//2 - mouseY),
anchor = W)
except IndexError:
pass
self.canv.create_line(mouseX, 0, mouseX,
self.canvHeight, fill="blue", dash = [4, 1, 2, 1], width = 1)
self.canv.create_line(0, mouseY, self.canvWidth,
mouseY, fill="blue", dash = [4, 1, 2, 1], width = 1)
def periodicCall(self):
self.redraw()
self.after(100, func=self.periodicCall)
def workerThread(self):
while (1):
try:
if self.comport.isOpen() and (self.pauseFlag == TRUE):
comLine = self.comport.readline()
if len(self.timings) == self.numOfDots:
self.timings.pop(0)
td = datetime.now() - self.zeroTiming
## b'271;-3:-50\r\n'
parsedLine = re.search(self.messagePattern.get(), str(comLine))
index = 1
if parsedLine:
self.timings.append(td)
for graphName in self.graphs:
if len(self.graphs[graphName]) == self.numOfDots:
self.graphs[graphName].pop(0)
try:
self.graphs[graphName].append(int(parsedLine.group(index)))
except IndexError:
self.graphs[graphName].append(0)
index = index + 1
else:
self.comport.flush();
time.sleep(1)
except TclError:
self.thread._stop()
def main():
root = Tk()
mainWindow = Frame(root)
mainWindow.pack()
port = comPortWidget(mainWindow)
port.pack()
dv = dataVisualizer(mainWindow, port)
dv.pack()
root.mainloop()
if __name__ == '__main__':
main()
And the serial part - may lag as well (used to lag when I used to reenumerate ports evey second or so...)
#-------------------------------------------------------------------------------
# Name: robowidgets
# Purpose:
#
# Author: dccharacter
#
# Created: 10.03.2012
# Copyright: (c) dccharacter 2012
# Licence: <your licence>
#-------------------------------------------------------------------------------
#!/usr/bin/env python
import serial
from serial.tools.list_ports_windows import comports
from tkinter import *
from tkinter.ttk import *
class comPortWidget(LabelFrame, serial.Serial):
commonComPortSpeeds = ["1200", "2400", "4800", "9600", "14400", "19200", "38400", "57600", "115200"]
def __init__(self, master=None, cnf={}, **kw):
"""Construct a comPortWidget widget with the parent MASTER.
STANDARD OPTIONS
borderwidth, cursor, font, foreground,
highlightbackground, highlightcolor,
highlightthickness, padx, pady, relief,
takefocus, text, background, class, colormap, container,
height, labelanchor, labelwidget,
visual, width
WIDGET-SPECIFIC OPTIONS
"""
self.master = master
LabelFrame.__init__(self, master, text="Serial settings", *cnf, **kw)
serial.Serial.__init__(self)
self.parent = master
self.draw()
def draw(self):
self.strVarComPort = StringVar()
self.comboComport = Combobox(self,
textvariable=self.strVarComPort)
self.comboComport.grid(row=0, column=1)
self.labelComportName = Label(self, text="Com port:")
self.labelComportName.grid(row=0, column=0)
self.strVarComSpeed = StringVar()
self.comboComSpeed = Combobox(self,
textvariable=self.strVarComSpeed, values=self.commonComPortSpeeds)
self.comboComSpeed.current(len(self.commonComPortSpeeds)-1)
self.comboComSpeed.grid(row=1, column=1)
self.labelComSpeed = Label(self, text="Com speed:")
self.labelComSpeed.grid(row=1, column=0)
self.buttonComOpen = Button(self, text="Open port", command=self.openPort)
self.buttonComOpen.grid(row=0, column=2)
self.buttonComClose = Button(self, text="Close port", command=self.closePort)
self.buttonComClose.grid(row=1, column=2)
self.buttonRefreshPorts = Button(self, text="Re", width=3, command=self.refreshComPortsCombo)
##self.buttonRefreshPorts.grid(row=0, column=2)
self.refreshComPortsCombo()
def refreshComPortsCombo(self):
listComs = self.enumerateComPorts()
if not listComs:
listComs.append("No com ports found")
self.disableControls(~self.isOpen())
self.buttonComClose.configure(state=DISABLED)
else:
self.disableControls(self.isOpen())
self.buttonRefreshPorts.configure(state=NORMAL)
self.comboComport.config(values=listComs)
self.comboComport.current(len(listComs)-1)
##self.after(500, func=self.refreshComPortsCombo)
def enumerateComPorts(self):
"""
Returns the list ofcom port names in the system or an empty list if
no ports found
"""
listComs = []
for port, desc, hwid in sorted(comports()):
listComs.append(port)
return listComs
def openPort(self):
if self.isOpen():
return
self.port = self.comboComport.get()
self.baudrate = int(self.comboComSpeed.get())
self.timeout = 1
try:
self.open()
self.disableControls(self.isOpen())
except IOError:
pass
def closePort(self):
if self.isOpen():
self.flush()
self.close()
self.disableControls(self.isOpen())
def disableControls(self, isConnected):
if isConnected:
self.labelComportName.configure(state=DISABLED)
self.labelComSpeed.configure(state=DISABLED)
self.comboComport.configure(state=DISABLED)
self.comboComSpeed.configure(state=DISABLED)
self.buttonComClose.configure(state=NORMAL)
self.buttonComOpen.configure(state=DISABLED)
self.buttonRefreshPorts.configure(state=DISABLED)
else:
self.labelComportName.configure(state=NORMAL)
self.labelComSpeed.configure(state=NORMAL)
self.comboComport.configure(state=NORMAL)
self.comboComSpeed.configure(state=NORMAL)
self.buttonComClose.configure(state=DISABLED)
self.buttonComOpen.configure(state=NORMAL)
self.buttonRefreshPorts.configure(state=NORMAL)
def main():
pass
if __name__ == '__main__':
main()
UPDATE: I did as Brian advised. Now I have two screen redraw functions. Difference between them is that first moves all the lines to the left adding new to the right and deleting those that fall off the canvas. The second one moves lines to the left and re-deploys elements that fall off the canvas to the right (without creating new ones). There's a huge improvement with any of these in respect to my initial variant, but I don't see big difference between the two wit the naked eye - mayme if I had more elements I would. The latter though works better specifically for my application as I don't have to track those who fall off the cliff.
Here the functions:
def drawGraph(self): ###needed for self.updateGraph2() only as it is creates the lines
for graphNum in range(0, self.numOfGraphs):
self.graphLines.append([])
self.graphData.append([0,]*self.numOfDots)
for iii in range(0,self.numOfDots):
self.graphLines[graphNum].append(
self.canv.create_line(0,0,0,0,fill=self.colors[graphNum],
width=1.2, tags=('graphLines', 'graph'+str(graphNum)))
)
def updateGraph2(self):
while not self.queue.empty():
iTuple = self.queue.get()
self.canv.move('graphLines', -self.density,0)
for graphNum in range(0, self.numOfGraphs):
try: self.graphData[graphNum].append(iTuple[graphNum])
except IndexError:
self.graphData[graphNum].append(0)
self.graphData[graphNum].pop(0)
self.graphLines[graphNum].append(self.graphLines[graphNum].pop(0))
self.canv.coords(self.graphLines[graphNum][-1],
self.canv.winfo_width()-self.density,
int(int(self.graphData[graphNum][-2])+int(self.canv.winfo_height()//2)),
self.canv.winfo_width(),
int(int(self.graphData[graphNum][-1])+int(self.canv.winfo_height()//2))
)
def updateGraph(self):
while not self.queue.empty():
self.timingIndex = self.timingIndex + 1
self.canv.move('graphLines', -self.density, 0)
iTuple = self.queue.get()
for iii in range(0, len(iTuple)):
yyy = int(iTuple[iii])+self.canv.winfo_height()//2
if yyy < 0: yyy = 0
if yyy > self.canv.winfo_height(): yyy = self.canv.winfo_height()
prev_yyy = int(self.prevTuple[iii])+self.canv.winfo_height()//2
if prev_yyy < 0: prev_yyy = 0
if prev_yyy > self.canv.winfo_height(): prev_yyy = self.canv.winfo_height()
self.canv.create_line(
self.canv.winfo_width()-self.density, prev_yyy,
self.canv.winfo_width(), yyy,
width = 1.4, fill = self.colors[iii], tags=('graphLines','graph'+str(iii)))
self.prevTuple = iTuple
self.canv.addtag_overlapping('todelete',-1,-1,-3,self.canv.winfo_height()+1)
self.canv.dtag('preserve','todelete')
self.canv.delete('todelete')
My understanding of the canvas is that the more element ids that have been allocated, the slower it gets. It can handle tens of thousands without much problem (and maybe even 100's of thousands), but if you're creating and deleting 6000 items every 100ms, that is likely your problem. Even though you are deleting the items, it still affects performance especially when you are creating 60,000 per second.
Instead of deleting all items every 100ms, simply move the items off screen and remember them, then reuse them by using the coords method to change their coordinates for the new graph.