Don't see images on my buttons if I create them with a loop - image

I'm trying to make a program in Python 3.3.0 to train with Tkinter, but when I try to put images on buttons which are created in a loop, I obtain a few buttons that don't work (I can't click on them and they don't have images), and the last one is working and with the image on it. Here there's the code:
elenco = [immagine1, immagine2, immagine3, immagine 4]
class secondWindow:
def __init__(self):
self.secondWindow = Tk()
self.secondWindow.geometry ('500x650+400+30')
class mainWindow:
def __init__(self):
self.mainWindow = Tk()
self.mainWindow.geometry ('1100x650+100+10')
self.mainWindow.title('MainWindow')
def Buttons(self, stringa):
i = 0
for _ in elenco:
if stringa in _.lower():
j = int(i/10)
self.IM = PIL.Image.open (_ + ".jpg")
self.II = PIL.ImageTk.PhotoImage (self.IM)
self.button = Button(text = _, compound = 'top', image = self.II, command = secondWindow).grid(row = j, column = i-j*10)
i += 1
def mainEnter (self):
testoEntry = StringVar()
self.mainEntry = Entry(self.mainWindow, textvariable = testoEntry).place (x = 900, y = 20)
def search ():
testoEntry2 = testoEntry.get()
if testoEntry2 == "":
pass
else:
testoEntry2 = testoEntry2.lower()
mainWindow.Buttons(self, testoEntry2)
self.button4Entry = Button (self.mainWindow, text = 'search', command = search).place (x = 1050, y = 17)
MW = mainWindow()
MW.mainEnter()
mainloop()
If I try to create buttons in a loop without images, they work:
def Buttons(self, stringa):
i = 0
for _ in elenco:
if stringa in _.lower():
j = int(i/10)
self.button = Button(text = _, command = secondWindow).grid(row = j, column = i-j*10)
i += 1
And if I try to create a button with an image but not in a loop, it works too:
im = PIL.Image.open("immagine1.jpg")
ge = PIL.ImageTk.PhotoImage (im)
butt = Button(text = 'immagine', compound = 'top', image = ge, command = secondWindow).grid(row = 0, column = 0)

Let's say you have images named "image-i.png", i=0,..,9.
When I execute the following code (with python 3.5) I obtain ten working buttons with image and text:
from tkinter import Tk, Button, PhotoImage
root = Tk()
images = []
buttons = []
for i in range(10):
im = PhotoImage(master=root, file="image-%i.png" % i)
b = Button(root, text="Button %i" % i, image=im, compound="left",
command=lambda x=i: print(x))
b.grid(row=i)
images.append(im)
buttons.append(b)
root.mainloop()

Related

Fine tuning Bert for NER attempt on Mac OS

I'm using a MacBook Air/OS Monterey 12.5 (There are updates available; Ventura 13.1
Python version 3.10.8 and also tried using 3.11
Pylance has pointed that all the imports I was trying to execute were not being resolved so I changed the VS Code interpreter to Python 3.10.
Anyways, here's the code:
import pandas as pd
import torch
import numpy as np
from tqdm import tqdm
from transformers import BertTokenizerFast
from transformers import BertForTokenClassification
from torch.utils.data import Dataset, DataLoader
df = pd.read_csv('ner.csv')
labels = [i.split() for i in df['labels'].values.tolist()]
unique_labels = set()
for lb in labels:
[unique_labels.add(i) for i in lb if i not in unique_labels]
# print(unique_labels)
labels_to_ids = {k: v for v, k in enumerate(sorted(unique_labels))}
ids_to_labels = {v: k for v, k in enumerate(sorted(unique_labels))}
# print(labels_to_ids)
text = df['text'].values.tolist()
example = text[36]
#print(example)
tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')
text_tokenized = tokenizer(example, padding='max_length', max_length=512, truncation=True, return_tensors='pt')
'''
print(text_tokenized)
print(tokenizer.decode(text_tokenized.input_ids[0]))
'''
def align_label_example(tokenized_input, labels):
word_ids = tokenized_input.word_ids()
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
if word_idx is None:
label_ids.append(-100)
elif word_idx != previous_word_idx:
try:
label_ids.append(labels_to_ids[labels[word_idx]])
except:
label_ids.append(-100)
else:
label_ids.append(labels_to_ids[labels[word_idx]] if label_all_tokens else -100)
previous_word_idx = word_idx
return label_ids;
label = labels[36]
label_all_tokens = False
new_label = align_label_example(text_tokenized, label)
'''
print(new_label)
print(tokenizer.convert_ids_to_tokens(text_tokenized['input_ids'][0]))
'''
def align_label(texts, labels):
tokenized_inputs = tokenizer(texts, padding='max_length', max_length=512, truncation=True)
word_ids = tokenized_inputs.word_ids()
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
if word_idx is None:
label_ids.append(-100)
elif word_idx != previous_word_idx:
try:
label_ids.append(labels_to_ids[labels[word_idx]])
except:
label_ids.append(-100)
else:
try:
label_ids.append(labels_to_ids[labels[word_idx]] if label_all_tokens else -100)
except:
label_ids.append(-100)
previous_word_idx = word_idx
return label_ids
class DataSequence(torch.utils.data.Dataset):
def __init__(self, df):
lb = [i.split() for i in df['labels'].values.tolist()]
txt = df['text'].values.tolist()
self.texts = [tokenizer(str(i),
padding='max_length', max_length=512, truncation=True, return_tensors='pt') for i in txt]
self.labels = [align_label(i,j) for i,j in zip(txt, lb)]
def __len__(self):
return len(self.labels)
def get_batch_labels(self, idx):
return torch.LongTensor(self.labels[idx])
def __getitem__(self, idx):
batch_data = self.get_batch_data(idx)
batch_labels = self.get_batch_labels(idx)
return batch_data, batch_labels
df = df[0:1000]
df_train, df_val, df_test = np.split(df.sample(frac=1, random_state=42),
[int(.8 * len(df)), int(.9 * len(df))])
class BertModel(torch.nn.Module):
def __init__(self):
super(BertModel, self).__init__()
self.bert = BertForTokenClassification.from_pretrained('bert-base-cased', num_labels=len(unique_labels))
def forward(self, input_id, mask, label):
output = self.bert(input_ids=input_id, attention_mask=mask, labels=label, return_dict=False)
return output
def train_loop(model, df_train, df_val):
train_dataset = DataSequence(df_train)
val_dataset = DataSequence(df_val)
train_dataloader = DataLoader(train_dataset, num_workers=4, batch_size=BATCH_SIZE, shuffle=True)
val_dataloader = DataLoader(val_dataset, num_workers=4, batch_size=BATCH_SIZE)
use_cuda = torch.cuda.is_available()
device = torch.device('cuda' if use_cuda else 'cpu')
optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATE)
if use_cuda:
model = model.cuda()
best_acc = 0
best_loss = 1000
for epoch_num in range(EPOCHS):
total_acc_train = 0
total_loss_train = 0
model.train()
for train_data, train_label in tqdm(train_dataloader):
train_label = train_label.to(device)
mask = train_data['attention_mask'].squeeze(1).to(device)
input_id = train_data['input_ids'].squeeze(1).to(device)
optimizer.zero_grad()
loss, logits = model(input_id, mask, train_label)
for i in range(logits.shape[0]):
logits_clean = logits[i][train_label[i] != -100]
label_clean = train_label[i][train_label[i] != -100]
predictions = logits_clean.argmax(dim=1)
acc = (predictions == label_clean).float().mean()
total_acc_train += acc
total_loss_train += loss.item()
loss.backward()
optimizer.step()
model.eval()
total_acc_val = 0
total_loss_val = 0
for val_data, val_label in val_dataloader:
val_label = val_label.to(device)
mask = val_data['attention_mask'].squeeze(1).to(device)
input_id = val_data['input_ids'].squeeze(1).to(device)
loss, logits = model(input_id, mask, val_label)
for i in range(logits.shape[0]):
logits_clean = logits[i][val_label[i] != -100]
label_clean = val_label[i][val_label[i] != -100]
predictions = logits_clean.argmax(dim=1)
acc = (predictions == label_clean).float().mean()
total_acc_val += acc
total_loss_val += loss.item()
val_accuracy = total_acc_val / len(df_val)
val_loss = total_loss_val / len(df_val)
print(
f'Epochs: {epoch_num + 1} | Loss: {total_loss_train / len(df_train): .3f} | Accuracy: {total_acc_train / len(df_train): .3f} | Val_Loss: {total_loss_val / len(df_val): .3f} | Accuracy: {total_acc_val / len(df_val): .3f}')
LEARNING_RATE = 5e-3
EPOCHS = 5
BATCH_SIZE = 2
model = BertModel()
train_loop(model, df_train, df_val)
And the debugger says:
Exception has occurred: RuntimeError (note: full exception trace is shown but execution is paused at: <module>)
An attempt has been made to start a new process before the
current process has finished its bootstrapping phase.
This probably means that you are not using fork to start your
child processes and you have forgotten to use the proper idiom
in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce an executable.
File "/Users/filipedonatti/Projects/pyCodes/second_try.py", line 141, in train_loop
for train_data, train_label in tqdm(train_dataloader):
File "/Users/filipedonatti/Projects/pyCodes/second_try.py", line 197, in <module>
train_loop(model, df_train, df_val)
File "<string>", line 1, in <module> (Current frame)
By the way,
Despite using Mac, I have downloaded Anaconda-Navigator, however I've been trying and executing this code on VS Code. I've downloaded numpy, torch, datasets and other libraries through Brew with the pip3 command.
I'm at a loss, I can run the code on a google collab notebook or Jupiter notebook, and I know training models and such in my humble Mac would not be advised, but I am just exercising this so I can train and use the model in a much more powerful machine.
Please help me with this issue, I've been trying to find a solution for days.
Peace and happy holidays.
I've tried solving the issue by writing:
if __name__ == '__main__':
freeze_support()
I've tried using this:
import parallelTestModule
extractor = parallelTestModule.ParallelExtractor()
extractor.runInParallel(numProcesses=2, numThreads=4)
So...
It turns out the correct way to solve this is to implement a function to train the loop as such:
def run():
model = BertModel()
torch.multiprocessing.freeze_support()
print('loop')
train_loop(model, df_train, df_val)
if __name__ == '__main__':
run()
Redefining that train_loop line in the end. Issue solved. For more see this link: https://github.com/pytorch/pytorch/issues/5858

How to correctly use Distributed Data Parallel when customizing 'parameters' in the model ?

I have customized a parameter in my model:
self.params = list(self.backbone.parameters())
for head in self.headlist:
self.params += list(head.parameters())
When I wrap my model with DDP, an error occurs when defining the optimizer
optimizer = optim.SGD(model.params, lr=FLAGS.lr, momentum=FLAGS.momentum, weight_decay=FLAGS.weight_decay)
AttributeError 'DistributedDataParallel' object has no attribute 'params '
I think the error is probably caused by my customized "self.params"
Is the following code correct:
model = torch.nn.parallel.DistributedDataParallel(model,device_ids=local_rank)
model_without_ddp = model.module
**
optimizer = optim.SGD(model_without_ddp.params, lr=FLAGS.lr, momentum=FLAGS.momentum, weight_decay=FLAGS.weight_decay)
Or is there any simpler code?
###################################
The detailed definition of the network is as follows:
class multiheadModel():
def __init__(self, num_heads, device, model_name):
self.device = device
self.num_heads = num_heads # global+K
if model_name == 'fcn8s':
self.backbone = VGG16_FCN8s(num_classes=19, backbone=1, head=0).to(device)
self.headlist = [VGG16_FCN8s(num_classes=19, backbone=0, head=1).to(device) for i in range(num_heads)]
self.model = VGG16_FCN8s(num_classes=19).to(device)
for name, param in self.backbone.named_parameters():
if ('conv3' in name) or ('conv4' in name):
param.requires_grad = True
else:
param.requires_grad = False
elif model_name == 'deeplab':
self.backbone = Res_Deeplab(num_classes=19, backbone=1, head=0).to(device)
self.headlist = [Res_Deeplab(num_classes=19, backbone=0, head=1).to(device) for i in range(num_heads)]
self.model = Res_Deeplab(num_classes=19).to(device)
for name, param in self.backbone.named_parameters():
if 'layer3' in name:
param.requires_grad = True
else:
param.required_grad = False
else:
print('ERROR : wrong model name')
sys.exit()
self.params = list(self.backbone.parameters())
for head in self.headlist:
self.params += list(head.parameters())
self.loss_fn = None
#self.k2head = {0:2,1:1,2:0,3:0,4:0,5:4,6:4,7:5}
#self.k2head = {0:2,1:1,2:0,3:0,4:0,5:3,6:3,7:4}
self.k2head = {0:2,1:1,2:0,3:0,4:3,5:3,6:4}
# set train and eval mode
def train(self):
self.backbone.train()
for head in self.headlist:
head.train()
def eval(self):
self.backbone.eval()
for head in self.headlist:
head.eval()
def computePredLoss(self, rgb, lbl, k):
x = self.backbone(rgb)
head_id = list(range(self.num_heads))
head_id.remove(self.k2head[k])
input_size = rgb.size()[2:]
loss = 0
for i in head_id:
pred = self.headlist[i](x)
pred = F.interpolate(pred, size=input_size, mode='bilinear', align_corners=True)
loss += self.loss_fn(pred, lbl)
return pred, loss
def forward(self, input):
output = {}
if "label" in input:
self.train()
pred,loss = self.computePredLoss(input['rgb'], input['label'], input['k'])
output['pred'], output['loss']=pred, loss
else:
self.eval()
x = self.backbone(input['rgb'])
k = -1
if "k" in input:
k = self.k2head[input['k']]
pred = self.headlist[k](x)
input_size = input['rgb'].size()[2:]
pred = F.interpolate(pred, size=input_size, mode='bilinear', align_corners=True)
output['pred'] = pred
return output
def validate(self, loader, k=-2):
self.eval()
if k!=-2:
val_metrics = StreamSegMetrics(19)
val_metrics.reset()
with torch.no_grad():
for i, (batch, rgb_batch) in enumerate(loader):
rgb_batch = rgb_batch.to(device=self.device, dtype=torch.float)
batch = batch.to(device=self.device, dtype=torch.int64)
input_size = rgb_batch.size()[2:]
x = self.backbone(rgb_batch)
pred = self.headlist[k](x)
pred = F.interpolate(pred, size=input_size, mode='bilinear', align_corners=True)
preds = pred.detach().max(dim=1)[1].cpu().numpy()
targets = batch.cpu().numpy()
val_metrics.update(targets, preds)
score = val_metrics.get_results()
else:
val_metrics = [StreamSegMetrics(19) for i in range(self.num_heads)]
for metric in val_metrics:
metric.reset()
with torch.no_grad():
for i, (batch, rgb_batch) in enumerate(loader):
rgb_batch = rgb_batch.to(device=self.device, dtype=torch.float)
batch = batch.to(device=self.device, dtype=torch.int64)
input_size = rgb_batch.size()[2:]
x = self.backbone(rgb_batch)
for k in range(self.num_heads):
pred = self.headlist[k](x)
pred = F.interpolate(pred, size=input_size, mode='bilinear', align_corners=True)
preds = pred.detach().max(dim=1)[1].cpu().numpy()
targets = batch.cpu().numpy()
val_metrics[k].update(targets, preds)
score = [val_metrics[k].get_results() for k in range(self.num_heads)]
return score
def getHeadPaths(self, model_path, iteration=-1):
head_paths = []
if '_iter' in model_path:
base_path = model_path.split('_iter')[0]
else:
base_path = model_path.split('.pth')[0]
if iteration==-1:
for i in range(self.num_heads-1):
head_paths.append(base_path+'_except_g'+chr(97+i)+'.pth')
head_paths.append(model_path)
else:
for i in range(self.num_heads-1):
head_paths.append(base_path+'_except_g'+chr(97+i)+'_iter'+str(iteration)+'.pth')
head_paths.append(base_path+'_iter'+str(iteration)+'.pth')
return head_paths
def save(self, model_path, iteration=-1):
self.model.load_state_dict(self.backbone.state_dict(), strict=False)
head_paths = self.getHeadPaths(model_path, iteration)
for i in range(self.num_heads):
self.model.load_state_dict(self.headlist[i].state_dict(), strict=False)
torch.save(self.model.state_dict(), head_paths[i])
def load(self, model_path):
iteration = -1
if '_iter' in model_path:
iteration = int(model_path.split('_iter')[1].split('.pth')[0])
self.model.load_state_dict(torch.load(model_path))
self.backbone.load_state_dict(self.model.state_dict(), strict=False)
head_paths = self.getHeadPaths(model_path, iteration)
existance = 1
for path in head_paths:
if os.path.isfile(path)==False:
existance = 0
if existance==1:
print('loading from multiheads')
for i in range(self.num_heads):
self.model.load_state_dict(torch.load(head_paths[i]))
self.headlist[i].load_state_dict(self.model.state_dict(), strict=False)
else:
print('loading from singlehead')
for i in range(self.num_heads):
self.model.load_state_dict(torch.load(head_paths[-1]))
self.headlist[i].load_state_dict(self.model.state_dict(), strict=False)
def __call__(self, input):
return self.forward(input)

tensorflow: After adding a rnn the whole work doesn't work

I have downloaded a code of FCN for image segmentation and it ran well. Now I want to add a rnn layer attempting to refine the result according to the work "ReSeg: A Recurrent Neural Network-Based Model for Semantic Segmentation". My code shows as follows:
This part is for the inference:
def inference(image, keep_prob):
"""
Semantic segmentation network definition
:param image: input image. Should have values in range 0-255
:param keep_prob:
:return:
"""
print("setting up vgg initialized conv layers ...")
#model_data = utils.get_model_data(FLAGS.model_dir, MODEL_URL)
model_data = scipy.io.loadmat("H:/Deep Learning/FCN.tensorflow-master/imagenet-vgg-verydeep-19.mat")
mean = model_data['normalization'][0][0][0]
mean_pixel = np.mean(mean, axis=(0, 1))
weights = np.squeeze(model_data['layers'])
processed_image = utils.process_image(image, mean_pixel)
with tf.variable_scope("inference"):
image_net = vgg_net(weights, processed_image)
conv_final_layer = image_net["conv5_3"]
pool5 = utils.max_pool_2x2(conv_final_layer)
W6 = utils.weight_variable([7, 7, 512, 4096], name="W6")
b6 = utils.bias_variable([4096], name="b6")
conv6 = utils.conv2d_basic(pool5, W6, b6)
relu6 = tf.nn.relu(conv6, name="relu6")
if FLAGS.debug:
utils.add_activation_summary(relu6)
relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)
W7 = utils.weight_variable([1, 1, 4096, 4096], name="W7")
b7 = utils.bias_variable([4096], name="b7")
conv7 = utils.conv2d_basic(relu_dropout6, W7, b7)
relu7 = tf.nn.relu(conv7, name="relu7")
if FLAGS.debug:
utils.add_activation_summary(relu7)
relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob)
W8 = utils.weight_variable([1, 1, 4096, NUM_OF_CLASSESS], name="W8")
b8 = utils.bias_variable([NUM_OF_CLASSESS], name="b8")
conv8 = utils.conv2d_basic(relu_dropout7, W8, b8)
# annotation_pred1 = tf.argmax(conv8, dimension=3, name="prediction1")
# now to upscale to actual image size
deconv_shape1 = image_net["pool4"].get_shape()
W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, NUM_OF_CLASSESS], name="W_t1")
b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1")
conv_t1 = utils.conv2d_transpose_strided(conv8, W_t1, b_t1, output_shape=tf.shape(image_net["pool4"]))
#fuse_1 = tf.add(conv_t1, image_net["pool4"], name="fuse_1")
deconv_shape2 = image_net["pool3"].get_shape()
W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name="W_t2")
b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2")
conv_t2 = utils.conv2d_transpose_strided(conv_t1, W_t2, b_t2, output_shape=tf.shape(image_net["pool3"]))
#fuse_2 = tf.add(conv_t2, image_net["pool3"], name="fuse_2")
shape = tf.shape(image)
deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], NUM_OF_CLASSESS])
W_t3 = utils.weight_variable([16, 16, NUM_OF_CLASSESS, deconv_shape2[3].value], name="W_t3")
b_t3 = utils.bias_variable([NUM_OF_CLASSESS], name="b_t3")
conv_t3 = utils.conv2d_transpose_strided(conv_t2, W_t3, b_t3, output_shape=deconv_shape3, stride=8)
/////////////////////////////////////////////////////this is from where i added the rnn
shape_5 = tf.shape(image)
W_a = 224
H_a = 224
p_size_a = NUM_OF_CLASSESS
# x = tf.reshape(conv_t1, [shape_5[0],H_a,W_a, p_size_a])
x = tf.transpose(conv_t3, perm=[0,2,1,3])
x = tf.reshape(x,[-1,H_a,p_size_a])
mat = tf.unstack(x, H_a, 1)
lstm_fw_cell = rnn.BasicLSTMCell(N_HIDDEN, forget_bias=1.0)
lstm_bw_cell = rnn.BasicLSTMCell(N_HIDDEN, forget_bias=1.0)
#with tf.variable_scope('rnn1_1'):
try:
outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, mat,
dtype=tf.float32,scope='rnn1_1')
except Exception: # Old TensorFlow version only returns outputs not states
outputs = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, mat,
dtype=tf.float32)
outputs1 = tf.reshape(outputs,[H_a, shape_5[0], W_a, 2 * N_HIDDEN])
outputs1 = tf.transpose(outputs1,(1,0,2,3))
x_1 = tf.reshape(outputs1,[-1,W_a,2 * N_HIDDEN])
mat_1 = tf.unstack(x_1, W_a, 1)
lstm_lw_cell = rnn.BasicLSTMCell(N_HIDDEN, forget_bias=1.0)
lstm_rw_cell = rnn.BasicLSTMCell(N_HIDDEN, forget_bias=1.0)
#with tf.variable_scope('rnn1_2'):
try:
outputs2, _, _ = rnn.static_bidirectional_rnn(lstm_lw_cell, lstm_rw_cell, mat_1,
dtype=tf.float32,scope = 'rnn1_2')
except Exception: # Old TensorFlow version only returns outputs not states
outputs2 = rnn.static_bidirectional_rnn(lstm_lw_cell, lstm_rw_cell, mat_1,
dtype=tf.float32)
outputs2 = tf.reshape(outputs,[W_a, shape_5[0], H_a, 2 * N_HIDDEN])
outputs2 = tf.transpose(outputs2,(1,2,0,3))
///////////////////////////////////////////////////till here
annotation_pred = tf.argmax(outputs2, dimension=3, name="prediction")
return tf.expand_dims(annotation_pred, dim=3), outputs2
and this part is for the training:
def train(loss_val, var_list):
optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
grads = optimizer.compute_gradients(loss_val, var_list=var_list)
if FLAGS.debug:
# print(len(var_list))
for grad, var in grads:
utils.add_gradient_summary(grad, var)
return optimizer.apply_gradients(grads)
def main(argv=None):
keep_probability = tf.placeholder(tf.float32, name="keep_probabilty")
image = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="input_image")
annotation = tf.placeholder(tf.int32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 1], name="annotation")
pred_annotation, logits = inference(image, keep_probability)
tf.summary.image("input_image", image, max_outputs=2)
tf.summary.image("ground_truth", tf.cast(annotation, tf.uint8), max_outputs=2)
tf.summary.image("pred_annotation", tf.cast(pred_annotation, tf.uint8), max_outputs=2)
loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
labels=tf.squeeze(annotation, squeeze_dims=[3]),
name="entropy")))
tf.summary.scalar("entropy", loss)
trainable_var = tf.trainable_variables()
if FLAGS.debug:
for var in trainable_var:
utils.add_to_regularization_and_summary(var)
train_op = train(loss, trainable_var)
print("Setting up summary op...")
summary_op = tf.summary.merge_all()
print("Setting up image reader...")
train_records, valid_records = scene_parsing.read_dataset(FLAGS.data_dir)
print(len(train_records))
print(len(valid_records))
print("Setting up dataset reader")
image_options = {'resize': True, 'resize_size': IMAGE_SIZE}
if FLAGS.mode == 'train':
train_dataset_reader = dataset.BatchDatset(train_records, image_options)
validation_dataset_reader = dataset.BatchDatset(valid_records, image_options)
sess = tf.Session()
print("Setting up Saver...")
saver = tf.train.Saver()
summary_writer = tf.summary.FileWriter(FLAGS.logs_dir, sess.graph)
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print("Model restored...")
if FLAGS.mode == "train":
for itr in xrange(MAX_ITERATION):
train_images, train_annotations = train_dataset_reader.next_batch(FLAGS.batch_size)
feed_dict = {image: train_images, annotation: train_annotations, keep_probability: 0.85}
sess.run(train_op, feed_dict=feed_dict)
if itr % 10 == 0:
train_loss, summary_str = sess.run([loss, summary_op], feed_dict=feed_dict)
print("Step: %d, Train_loss:%g" % (itr, train_loss))
summary_writer.add_summary(summary_str, itr)
if itr % 500 == 0:
valid_images, valid_annotations = validation_dataset_reader.next_batch(FLAGS.batch_size)
valid_loss = sess.run(loss, feed_dict={image: valid_images, annotation: valid_annotations,
keep_probability: 1.0})
print("%s ---> Validation_loss: %g" % (datetime.datetime.now(), valid_loss))
saver.save(sess, FLAGS.logs_dir + "model.ckpt", itr)
elif FLAGS.mode == "visualize":
valid_images, valid_annotations = validation_dataset_reader.get_random_batch(FLAGS.batch_size)
pred = sess.run(pred_annotation, feed_dict={image: valid_images, annotation: valid_annotations,
keep_probability: 1.0})
valid_annotations = np.squeeze(valid_annotations, axis=3)
pred = np.squeeze(pred, axis=3)
for itr in range(FLAGS.batch_size):
utils.save_image(valid_images[itr].astype(np.uint8), FLAGS.logs_dir, name="inp_" + str(5+itr))
utils.save_image(valid_annotations[itr].astype(np.uint8), FLAGS.logs_dir, name="gt_" + str(5+itr))
utils.save_image(pred[itr].astype(np.uint8), FLAGS.logs_dir, name="pred_" + str(5+itr))
print("Saved image: %d" % itr)
The error was described as:
Not found: Key inference/rnn1_2/fw/basic_lstm_cell/weights not found in checkpoint
So i think there must be something wrong with the variables.
I'll be very appreciate if someone could tell me how to fix it!
looking forward to your help!

Can I sort a QPainter to be drawn on top on other widgets in PyQt4?

I'm doing a basic node editor with PyQt4. However I'm stuck with this: The lines linking nodes are drawn behind of the widgets when it should be in the reverse order.
Is there a way to force this or should I restart from scratch and try another approach?
The attacahed code should work and give you 2 nodes that you can move. The line linking them is drawn behind of everything instead of on top.
Thank you
from PyQt4 import QtGui
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtCore import Qt
import sys
class Connector():
def __init__(self, name="", id=id, data=None, connected=None, isInput=True, index=-1, parent=None):
self.id = id
self.name = name
self.data = data
self.parent = parent
self.isConnected = not connected
self.connectedTo = connected
self.isInput = isInput
self.index = index
self.width = 16
self.height = 16
self.upperMargin = 30
def getAbsPosition(self):
return QPoint(self.parent.pos().x()+self.getPosition().x(), self.parent.pos().y() + self.getPosition().y())
def getPosition(self):
if self.parent:
if self.isInput:
deltaX = 0 + self.width / 2
else:
deltaX = self.parent.width() - self.width
deltaY = self.upperMargin + (self.index * self.height*2)
pos = QPoint(deltaX, deltaY)
else:
pos = QPoint(0, 0)
return pos
class Node(QLineEdit ):
def __init__(self,parent=None, name=""):
QLineEdit.__init__(self,parent)
self.size = QSize(200, 300)
self.setText(name)
self.resize(self.size.width(), self.size.height())
self.connectors = []
self.createInputConnector()
self.createOutputConnector()
def getConnectors(self):
return self.connectors
def getNewConnectorIndex(self):
indexInputs = 0
indexOutputs = 0
for connector in self.connectors:
if connector.isInput:
indexInputs += 1
else:
indexOutputs += 1
return indexInputs, indexOutputs
def createConnector(self, name, isInput, index):
newConnector = Connector(name=name, isInput=isInput, index=index, parent=self)
self.connectors.append(newConnector)
return newConnector
def createInputConnector(self, name=""):
index, ignore = self.getNewConnectorIndex()
newConnector = self.createConnector(name, isInput=True, index=index)
return newConnector
def createOutputConnector(self, name=""):
ignore, index = self.getNewConnectorIndex()
newConnector = self.createConnector(name, isInput=False, index=index)
return newConnector
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
painter.path = QPainterPath()
# disabling next line "solves" the problem but not really.
painter.setBrush(QColor(122, 163, 39))
painter.path.addRoundRect(2,2, self.size.width()-6,self.size.height()-6,20,15)
painter.drawPath(painter.path)
for connector in self.connectors:
pos = connector.getPosition()
cx = pos.x()
cy = pos.y()
cw = connector.width
ch = connector.height
painter.drawEllipse(cx, cy, cw, ch)
painter.end()
def mousePressEvent(self, event):
self.__mousePressPos = None
self.__mouseMovePos = None
if event.button() == Qt.LeftButton:
self.__mousePressPos = event.globalPos()
self.__mouseMovePos = event.globalPos()
super(Node, self).mousePressEvent(event)
def mouseMoveEvent(self, event):
if event.buttons() == Qt.LeftButton:
currPos = self.mapToGlobal(self.pos())
globalPos = event.globalPos()
diff = globalPos - self.__mouseMovePos
newPos = self.mapFromGlobal(currPos + diff)
self.move(newPos)
self.__mouseMovePos = globalPos
super(Node, self).mouseMoveEvent(event)
def mouseReleaseEvent(self, event):
if self.__mousePressPos is not None:
moved = event.globalPos() - self.__mousePressPos
if moved.manhattanLength() > 3:
event.ignore()
return
super(Node, self).mouseReleaseEvent(event)
class Window(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.resize(640,480)
self.entry1 = Node(self, "NODE1")
self.entry2 = Node(self, "NODE2")
def paintEvent(self, e):
qp = QtGui.QPainter()
qp.begin(self)
qp.setClipping(False)
self.doDrawing(qp)
qp.end()
self.update()
def doDrawing(self, qp):
p1 = self.entry1.getConnectors()[0].getAbsPosition()
p2 = self.entry2.getConnectors()[0].getAbsPosition()
qp.drawLine(p1.x(),p1.y(), p2.x(), p2.y())
if __name__=="__main__":
app=QApplication(sys.argv)
win=Window()
win.show()
sys.exit(app.exec_())

Add buttons to grid with either icons or text in vb6

I have a EXGrid in a form and want to add button to a cell that either say "Edit" and "Remove", or Have icons for the two. Here is the code I have so far.
With grdGrade
.BeginUpdate
'.DataSource = objCo.DAL.Connection.Execute(sql)
.DefaultItemHeight = 21
.TreeColumnIndex = -1
.MarkSearchColumn = False
.FullRowSelect = exItemSel
.DrawGridLines = exAllLines
.SortOnClick = exNoSort
.BackColorHeader = grdGrade.BackColor
.Appearance = Flat
.BackColorAlternate = Me.BackColor
.SelBackColor = RGB(224, 224, 224)
.SelForeColor = vbBlack
.UseTabKey = True
.Font = Me.Font
.EndUpdate
End With
Set col = grdGrade.Columns.Add("Actions")
col.AllowDragging = False
col.Width = grdGrade.Width * 0.05
col.Editor.AddButton "Edit", , 0
col.Editor.AddButton "Remove", , 0
col.Editor.EditType = ButtonType
Thanks so much for all your help.
I finally got it. When loading the grid I used:
Me.grdGrade.Items.CellValue(h, "Actions") = " Edit "
Me.grdGrade.Items.CellHasButton(h, "Actions") = True
s = Me.grdGrade.Items.SplitCell(h, "Actions")
Me.grdGrade.Items.CellValue(0, s) = " Remove "
Me.grdGrade.Items.CellHasButton(0, s) = True
and it works like a charm

Resources