UnboundLocalError: local variable 'f' referenced before assignment. ("f" is just defined variable) - curve-fitting

I have a calculating and fitting code(below is some part of it).
def NFW(self, r, r_s, c):
x = r/r_s
delta_c = (200/3) * (c**3 / (log(1+c) - (c/(1+c))))
surface_mass_density = (c_velocity**2 / (4*pi*G)) * (self.D_s / (self.D_l * self.D_ls))
rho_c = self.Cosmological_Model.critical_density(self.z_l).to(u.M_sun/(u.Mpc)**3).value
Kk = ( 2*r_s*delta_c*rho_c ) / surface_mass_density
# Kappa of NFW
K_NFW = np.array([])
for i in range(len(r)):
if 0 <= x[i] <1:
f = ( 1 / (x[i]**2-1) ) * ( 1 - ( 2*atanh( np.sqrt( ( (1-x[i]) / (1+x[i]) ) ) ) / np.sqrt(1-x[i]**2) ) )
elif x[i] == 1:
f = 1/3
elif 1 < x[i]:
f = ( 1 / (x[i]**2-1) ) * ( 1 - ( 2*atan( np.sqrt( (x[i]-1) / (1+x[i]) ) ) / np.sqrt(x[i]**2-1) ) )
else:
print("x[i] can not have negative number!")
K_NFW = np.append(K_NFW, Kk * f)
And I tried to fit it with "scipy.curve_fit".
But the strange things is that sometimes it works, some times it dose not work depending on "r" which is one of the input parameter "NFW" function
below is fitting code.
ind_sub_R01 = np.where(self.obs_sub_R_LensPlane>0.10)#(<--this)
# If I put this '0.10' it doesn't work but if I put this '0.20' it works.
# This number means cut of the radius which is fitting start point.
def NFW_fitting(self, main_NFW_p0, main_NFW_bounds, sub_NFW_p0, sub_NFW_bounds):
main_popt, main_pcov = curve_fit(self.NFWfunc, self.obs_main_R_LensPlane, self.obs_main_r_T_avg, p0=main_NFW_p0, bounds=main_NFW_bounds)
sub_popt, sub_pcov = curve_fit(self.NFWfunc, self.obs_sub_R_LensPlane, self.obs_sub_r_T_avg, p0=sub_NFW_p0, bounds=sub_NFW_bounds)
return main_popt[0], main_popt[1], main_pcov, sub_popt[0], sub_popt[1], sub_pcov
IF I put 'this' '0.1', I face the error like below.
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
/var/folders/42/grbryvqx3vndy45_5d7lxyqr0000gn/T/ipykernel_34140/3894437891.py in <module>
198 drz_sci_606 = fits.open('bullet_f606w_drz_sci.fits')
199 FITandPLOT = profile_FITandPLOT(reduced_shear_main=reduced_shear_main, reduced_shear_sub=reduced_shear_sub, SISfunc=profile.SIS, NFWfunc=profile.NFW, data_for_WCS=drz_sci_606, D_l=profile.D_l)
--> 200 FITandPLOT.plotting(main_SIS_p0=v_disp_main, main_SIS_bounds=[v_disp_main-5,v_disp_main+5], sub_SIS_p0=v_disp_sub, sub_SIS_bounds=[v_disp_main-5,v_disp_main+5],
201 main_NFW_p0=(r_s_main,c_vir_main), main_NFW_bounds=([r_s_main-5,c_vir_main-5], [r_s_main+5,c_vir_main+5]), sub_NFW_p0=(r_s_sub,c_vir_sub), sub_NFW_bounds=([r_s_sub-5,c_vir_sub-5], [r_s_sub+5,c_vir_sub+5]))
202
/var/folders/42/grbryvqx3vndy45_5d7lxyqr0000gn/T/ipykernel_34140/3894437891.py in plotting(self, main_SIS_p0, main_SIS_bounds, sub_SIS_p0, sub_SIS_bounds, main_NFW_p0, main_NFW_bounds, sub_NFW_p0, sub_NFW_bounds)
59 def plotting(self, main_SIS_p0, main_SIS_bounds, sub_SIS_p0, sub_SIS_bounds, main_NFW_p0, main_NFW_bounds, sub_NFW_p0, sub_NFW_bounds):
60 main_SIS_v_disp, main_SIS_err_v_disp, sub_SIS_v_disp, sub_SIS_err_v_disp = self.SIS_fitting(main_SIS_p0, main_SIS_bounds, sub_SIS_p0, sub_SIS_bounds)
---> 61 main_NFW_r_s, main_NFW_c, main_NFW_err_matrix, sub_NFW_r_s, sub_NFW_c, sub_NFW_err_matrix = self.NFW_fitting(main_NFW_p0, main_NFW_bounds, sub_NFW_p0, sub_NFW_bounds)
62
63 #----main
/var/folders/42/grbryvqx3vndy45_5d7lxyqr0000gn/T/ipykernel_34140/3894437891.py in NFW_fitting(self, main_NFW_p0, main_NFW_bounds, sub_NFW_p0, sub_NFW_bounds)
54 def NFW_fitting(self, main_NFW_p0, main_NFW_bounds, sub_NFW_p0, sub_NFW_bounds):
55 main_popt, main_pcov = curve_fit(self.NFWfunc, self.obs_main_R_LensPlane, self.obs_main_r_T_avg, p0=main_NFW_p0, bounds=main_NFW_bounds)
---> 56 sub_popt, sub_pcov = curve_fit(self.NFWfunc, self.obs_sub_R_LensPlane, self.obs_sub_r_T_avg, p0=sub_NFW_p0, bounds=sub_NFW_bounds)
57 return main_popt[0], main_popt[1], main_pcov, sub_popt[0], sub_popt[1], sub_pcov
58
~/opt/anaconda3/envs/first_envs/lib/python3.9/site-packages/scipy/optimize/minpack.py in curve_fit(f, xdata, ydata, p0, sigma, absolute_sigma, check_finite, bounds, method, jac, **kwargs)
798 kwargs['max_nfev'] = kwargs.pop('maxfev', None)
799
--> 800 res = least_squares(func, p0, jac=jac, bounds=bounds, method=method,
801 **kwargs)
802
~/opt/anaconda3/envs/first_envs/lib/python3.9/site-packages/scipy/optimize/_lsq/least_squares.py in least_squares(fun, x0, jac, bounds, method, ftol, xtol, gtol, x_scale, loss, f_scale, diff_step, tr_solver, tr_options, jac_sparsity, max_nfev, verbose, args, kwargs)
926
927 elif method == 'trf':
--> 928 result = trf(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol,
929 gtol, max_nfev, x_scale, loss_function, tr_solver,
930 tr_options.copy(), verbose)
~/opt/anaconda3/envs/first_envs/lib/python3.9/site-packages/scipy/optimize/_lsq/trf.py in trf(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale, loss_function, tr_solver, tr_options, verbose)
121 loss_function, tr_solver, tr_options, verbose)
122 else:
--> 123 return trf_bounds(
124 fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,
125 loss_function, tr_solver, tr_options, verbose)
~/opt/anaconda3/envs/first_envs/lib/python3.9/site-packages/scipy/optimize/_lsq/trf.py in trf_bounds(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale, loss_function, tr_solver, tr_options, verbose)
336
337 x_new = make_strictly_feasible(x + step, lb, ub, rstep=0)
--> 338 f_new = fun(x_new)
339 nfev += 1
340
~/opt/anaconda3/envs/first_envs/lib/python3.9/site-packages/scipy/optimize/_lsq/least_squares.py in fun_wrapped(x)
813
814 def fun_wrapped(x):
--> 815 return np.atleast_1d(fun(x, *args, **kwargs))
816
817 if method == 'trf':
~/opt/anaconda3/envs/first_envs/lib/python3.9/site-packages/scipy/optimize/minpack.py in func_wrapped(params)
483 if transform is None:
484 def func_wrapped(params):
--> 485 return func(xdata, *params) - ydata
486 elif transform.ndim == 1:
487 def func_wrapped(params):
/var/folders/42/grbryvqx3vndy45_5d7lxyqr0000gn/T/ipykernel_34140/2654829019.py in NFW(self, r, r_s, c)
142 else:
143 print("x[i] can not have negative number!")
--> 144 K_NFW = np.append(K_NFW, Kk * f)
145
146 # Gamma of NFW
UnboundLocalError: local variable 'f' referenced before assignment
If I put 'this' '0.20' or '0.09', '0.25', I can obtain the fitting graph parameter well.
And these number mean the fitting start point like below
If I put the number '0.25', I fit the graph only right of '0.25' region.
I want to know why the error occurs.

Related

How do I convert the image to the required size in PyTorch?

I have been trying to use this image classification model I found online to classify traffic signs. The original code classifies image files saved on the device. I am trying to make it classify live video from my webcam, but I ran into this error. What changes should I make to the webcam image or the model?
test_transforms = transforms.Compose([
transforms.Resize([112, 112]),
transforms.ToTensor()
])
while True:
with torch.no_grad():
model.eval()
isTrue, frame = capture.read()
cv2.imshow('Video', frame)
cv2.waitKey(16)
PIL_image = Image.fromarray(np.uint8(frame)).convert('RGB')
PIL_image = Image.fromarray(frame.astype('uint8'), 'RGB')
img = test_transforms(PIL_image)
print(img.shape)
y_test_pred = model(img)
The error:
RuntimeError Traceback (most recent call last)
Cell In [8], line 40
27 print(img.shape)
28 # img.size
29 # numpyimg = asarray(frame)
30
(...)
38
39 # print(image)
---> 40 y_test_pred = model(img)
41 # print(y_test_pred)
42 # y_pred_softmax = torch.log_softmax(y_test_pred[0], dim=1)
43 # _, y_pred_tags = torch.max(y_pred_softmax, dim=1)
File c:\Users\lenovo\AppData\Local\Programs\Python\Python39\lib\site-packages\torch\nn\modules\module.py:1194, in Module._call_impl(self, *input, **kwargs)
1190 # If we don't have any hooks, we want to skip the rest of the logic in
1191 # this function, and just call forward.
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = [], []
Cell In [5], line 42, in AlexnetTS.forward(self, x)
40 x = self.features(x)
41 h = x.view(x.shape[0], -1)
---> 42 x = self.classifier(h)
43 return x, h
File c:\Users\lenovo\AppData\Local\Programs\Python\Python39\lib\site-packages\torch\nn\modules\module.py:1194, in Module._call_impl(self, *input, **kwargs)
1190 # If we don't have any hooks, we want to skip the rest of the logic in
1191 # this function, and just call forward.
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = [], []
File c:\Users\lenovo\AppData\Local\Programs\Python\Python39\lib\site-packages\torch\nn\modules\container.py:204, in Sequential.forward(self, input)
202 def forward(self, input):
203 for module in self:
--> 204 input = module(input)
205 return input
File c:\Users\lenovo\AppData\Local\Programs\Python\Python39\lib\site-packages\torch\nn\modules\module.py:1194, in Module._call_impl(self, *input, **kwargs)
1190 # If we don't have any hooks, we want to skip the rest of the logic in
1191 # this function, and just call forward.
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = [], []
File c:\Users\lenovo\AppData\Local\Programs\Python\Python39\lib\site-packages\torch\nn\modules\linear.py:114, in Linear.forward(self, input)
113 def forward(self, input: Tensor) -> Tensor:
--> 114 return F.linear(input, self.weight, self.bias)
RuntimeError: mat1 and mat2 shapes cannot be multiplied (256x196 and 12544x1000)
The neural network model:
class AlexnetTS(nn.Module):
def __init__(self, output_dim):
super().__init__()
self.features = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=2, padding=1),
nn.MaxPool2d(kernel_size=2),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=64, out_channels=192, kernel_size=3, padding=1),
nn.MaxPool2d(kernel_size=2),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=192, out_channels=384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=384, out_channels=256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
nn.MaxPool2d(kernel_size=2),
nn.ReLU(inplace=True),
)
self.classifier = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(256*7*7, 1000),
nn.ReLU(inplace=True),
nn.Dropout(0.5),
nn.Linear(in_features=1000, out_features=256),
nn.ReLU(inplace=True),
nn.Linear(256, output_dim)
)
def forward(self, x):
x = self.features(x)
h = x.view(x.shape[0], -1)
x = self.classifier(h)
return x, h
I think you should pass the image as a batch of one image with the shape [batch_size, channels, height, width] where batch_size=1, so you need to add a dimension of batch_size before passing it through the model using the line img = img.unsqueeze(0) as follow:
test_transforms = transforms.Compose([
transforms.Resize([112, 112]),
transforms.ToTensor()
])
while True:
with torch.no_grad():
model.eval()
isTrue, frame = capture.read()
cv2.imshow('Video', frame)
cv2.waitKey(16)
PIL_image = Image.fromarray(np.uint8(frame)).convert('RGB')
PIL_image = Image.fromarray(frame.astype('uint8'), 'RGB')
img = test_transforms(PIL_image)
print(img.shape)
img = img.unsqueeze(0)
y_test_pred = model(img)

How do I solve the following error message: TypeError: fit() missing 1 required positional argument: 'y'

My code is:
#Drop the irrelevant variables from train2 dataset
#Create the independant variable X train and dependant variable y train
X_train = train2.drop(['Item_Outlet_Sales', 'Outlet_Identifier', 'Item_Identifier'], axis=1)
y_train = train2.Item_Outlet_Sales
#Drop those irrelevant variables from test2 dataset
X_test = test2.drop(['Outlet_Identifier', 'Item_Identifier'], axis=1)
#Lets 1st import sklearn liobrary for model selection
from sklearn import model_selection
from sklearn.linear_model import LinearRegression
#Create a train and test split. Use X-train and y_train for linear regression.
xtrain, xtest, ytrain, ytest = model_selection.train_test_split(X_train, y_train, test_size=0.3, random_state=42)
#Fit the linear regression to the training dataset
lin = LinearRegression()
LinearRegression.fit(xtrain, ytrain)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
/var/folders/mh/_vtvlkm54rn8_9pqdq1_7g9m0000gn/T/ipykernel_1637/3652998115.py in <module>
----> 1 LinearRegression.fit(xtrain, ytrain)
TypeError: fit() missing 1 required positional argument: 'y'
I first tried:
lin.fit(xtrain, ytrain)
Output:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/var/folders/mh/_vtvlkm54rn8_9pqdq1_7g9m0000gn/T/ipykernel_1637/2886984673.py in <module>
----> 1 lin.fit(xtrain, ytrain)
~/opt/anaconda3/lib/python3.9/site-packages/sklearn/linear_model/_base.py in fit(self, X, y, sample_weight)
660 accept_sparse = False if self.positive else ["csr", "csc", "coo"]
661
--> 662 X, y = self._validate_data(
663 X, y, accept_sparse=accept_sparse, y_numeric=True, multi_output=True
664 )
~/opt/anaconda3/lib/python3.9/site-packages/sklearn/base.py in _validate_data(self, X, y, reset, validate_separately, **check_params)
579 y = check_array(y, **check_y_params)
580 else:
--> 581 X, y = check_X_y(X, y, **check_params)
582 out = X, y
583
~/opt/anaconda3/lib/python3.9/site-packages/sklearn/utils/validation.py in check_X_y(X, y, accept_sparse, accept_large_sparse, dtype, order, copy, force_all_finite, ensure_2d, allow_nd, multi_output, ensure_min_samples, ensure_min_features, y_numeric, estimator)
962 raise ValueError("y cannot be None")
963
--> 964 X = check_array(
965 X,
966 accept_sparse=accept_sparse,
~/opt/anaconda3/lib/python3.9/site-packages/sklearn/utils/validation.py in check_array(array, accept_sparse, accept_large_sparse, dtype, order, copy, force_all_finite, ensure_2d, allow_nd, ensure_min_samples, ensure_min_features, estimator)
744 array = array.astype(dtype, casting="unsafe", copy=False)
745 else:
--> 746 array = np.asarray(array, order=order, dtype=dtype)
747 except ComplexWarning as complex_warning:
748 raise ValueError(
~/opt/anaconda3/lib/python3.9/site-packages/pandas/core/generic.py in __array__(self, dtype)
2062
2063 def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray:
-> 2064 return np.asarray(self._values, dtype=dtype)
2065
2066 def __array_wrap__(
ValueError: could not convert string to float: 'Grocery Store'

fine tuning with hugging face trainer when adding layer on eletra model

i'm trying to fine tune my own model with hugging face trainer module. There was no problem until just training ElectraforQuestionAnswering, however I tried to add additional layer on the model and tried the same process. And there comes this error
from transformers import ElectraForQuestionAnswering
from torch import nn
class Jelectra(nn.Module):
def __init__(self):
super().__init__()
self.model = ElectraForQuestionAnswering.from_pretrained("google/electra-small-discriminator")
self.sm = nn.Softmax(dim=1)
def forward(self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,):
outputs = self.model(input_ids, token_type_ids, attention_mask, start_positions, end_positions)
output_start = self.sm(outputs[0])
output_end = self.sm(outputs[1])
return QuestionAnsweringModelOutput(start_logits=output_start, end_logits=output_end)
model = Jelectra()
from transformers import TrainingArguments, Trainer
training_args = TrainingArguments(
output_dir="./fine_tuned_electra",
evaluation_strategy="epoch",
learning_rate=5e-4,
per_device_train_batch_size=12,
per_device_eval_batch_size=12,
num_train_epochs=2,
weight_decay=0.01,
gradient_accumulation_steps=2,
eval_accumulation_steps=1,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_squad["train"],
eval_dataset=tokenized_squad["validation"],
tokenizer=tokenizer,
data_collator=data_collator,
)
trainer.train()
The error is...
RuntimeError Traceback (most recent call last)
Input In [12], in <module>
3 training_args = TrainingArguments(
4 output_dir="./fine_tuned_electra",
5 evaluation_strategy="epoch",
(...)
12 eval_accumulation_steps=1,
13 )
15 trainer = Trainer(
16 model=model,
17 args=training_args,
(...)
21 data_collator=data_collator,
22 )
---> 24 trainer.train()
File ~/anaconda3/envs/domain/lib/python3.8/site-packages/transformers/trainer.py:1365, in Trainer.train(self, resume_from_checkpoint, trial, ignore_keys_for_eval, **kwargs)
1363 tr_loss_step = self.training_step(model, inputs)
1364 else:
-> 1365 tr_loss_step = self.training_step(model, inputs)
1367 if (
1368 args.logging_nan_inf_filter
1369 and not is_torch_tpu_available()
1370 and (torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step))
1371 ):
1372 # if loss is nan or inf simply add the average of previous logged losses
1373 tr_loss += tr_loss / (1 + self.state.global_step - self._globalstep_last_logged)
File ~/anaconda3/envs/domain/lib/python3.8/site-packages/transformers/trainer.py:1940, in Trainer.training_step(self, model, inputs)
1937 return loss_mb.reduce_mean().detach().to(self.args.device)
1939 with self.autocast_smart_context_manager():
-> 1940 loss = self.compute_loss(model, inputs)
1942 if self.args.n_gpu > 1:
1943 loss = loss.mean() # mean() to average on multi-gpu parallel training
File ~/anaconda3/envs/domain/lib/python3.8/site-packages/transformers/trainer.py:1972, in Trainer.compute_loss(self, model, inputs, return_outputs)
1970 else:
1971 labels = None
-> 1972 outputs = model(**inputs)
1973 # Save past state if it exists
1974 # TODO: this needs to be fixed and made cleaner later.
1975 if self.args.past_index >= 0:
File ~/anaconda3/envs/domain/lib/python3.8/site-packages/torch/nn/modules/module.py:727, in Module._call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
730 self._forward_hooks.values()):
731 hook_result = hook(self, input, result)
Input In [11], in Jelectra.forward(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, start_positions, end_positions, output_attentions, output_hidden_states, return_dict)
9 def forward(self,
10 input_ids=None,
11 attention_mask=None,
(...)
19 output_hidden_states=None,
20 return_dict=None,):
---> 22 outputs = self.model(input_ids, token_type_ids, attention_mask, start_positions, end_positions)
23 output_start = self.sm(outputs[0])
24 output_end = self.sm(outputs[1])
File ~/anaconda3/envs/domain/lib/python3.8/site-packages/torch/nn/modules/module.py:727, in Module._call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
730 self._forward_hooks.values()):
731 hook_result = hook(self, input, result)
File ~/anaconda3/envs/domain/lib/python3.8/site-packages/transformers/models/electra/modeling_electra.py:1377, in ElectraForQuestionAnswering.forward(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, start_positions, end_positions, output_attentions, output_hidden_states, return_dict)
1365 r"""
1366 start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1367 Labels for position (index) of the start of the labelled span for computing the token classification loss.
(...)
1373 are not taken into account for computing the loss.
1374 """
1375 return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-> 1377 discriminator_hidden_states = self.electra(
1378 input_ids,
1379 attention_mask=attention_mask,
1380 token_type_ids=token_type_ids,
1381 position_ids=position_ids,
1382 head_mask=head_mask,
1383 inputs_embeds=inputs_embeds,
1384 output_attentions=output_attentions,
1385 output_hidden_states=output_hidden_states,
1386 )
1388 sequence_output = discriminator_hidden_states[0]
1390 logits = self.qa_outputs(sequence_output)
File ~/anaconda3/envs/domain/lib/python3.8/site-packages/torch/nn/modules/module.py:727, in Module._call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
730 self._forward_hooks.values()):
731 hook_result = hook(self, input, result)
File ~/anaconda3/envs/domain/lib/python3.8/site-packages/transformers/models/electra/modeling_electra.py:905, in ElectraModel.forward(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, encoder_hidden_states, encoder_attention_mask, past_key_values, use_cache, output_attentions, output_hidden_states, return_dict)
901 encoder_extended_attention_mask = None
903 head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
--> 905 hidden_states = self.embeddings(
906 input_ids=input_ids,
907 position_ids=position_ids,
908 token_type_ids=token_type_ids,
909 inputs_embeds=inputs_embeds,
910 past_key_values_length=past_key_values_length,
911 )
913 if hasattr(self, "embeddings_project"):
914 hidden_states = self.embeddings_project(hidden_states)
File ~/anaconda3/envs/domain/lib/python3.8/site-packages/torch/nn/modules/module.py:727, in Module._call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
730 self._forward_hooks.values()):
731 hook_result = hook(self, input, result)
File ~/anaconda3/envs/domain/lib/python3.8/site-packages/transformers/models/electra/modeling_electra.py:212, in ElectraEmbeddings.forward(self, input_ids, token_type_ids, position_ids, inputs_embeds, past_key_values_length)
210 if self.position_embedding_type == "absolute":
211 position_embeddings = self.position_embeddings(position_ids)
--> 212 embeddings += position_embeddings
213 embeddings = self.LayerNorm(embeddings)
214 embeddings = self.dropout(embeddings)
RuntimeError: The size of tensor a (512) must match the size of tensor b (12) at non-singleton dimension 1
how can i solve this?? I'm using squad data

the pairplot data is not plotting correctly

Question:
I have this code and it gives me an error:
AttributeError: 'NoneType' object has no attribute 'update'
Any suggestions?
Thank you so much!
import seaborn as sns
features = ['rdeep','dtc','rhob', 'gr','npss']
feature_vectors=df[features]
feature_vectors = feature_vectors.apply(np.nan_to_num)
feature_vectors.reset_index(inplace=True, drop=True)
feature_vectors
sns.pairplot(feature_vectors, dropna=True )
Here is the fullstack. Does this help?
AttributeError Traceback (most recent call last)
<ipython-input-59-bf2539b2257e> in <module>
6 feature_vectors.reset_index(inplace=True, drop=True)
7 feature_vectors
----> 8 sns.pairplot(feature_vectors, dropna=True )
c:\program files\extensions\subpy_1.0.0.0\mc\lib\site-packages\seaborn\axisgrid.py
in pairplot(data, hue, hue_order, palette, vars, x_vars, y_vars, kind,
diag_kind, markers, height, aspect, dropna, plot_kws, diag_kws,
grid_kws, size)
2105 if grid.square_grid:
2106 if diag_kind == "hist":
-> 2107 grid.map_diag(plt.hist, **diag_kws)
2108 elif diag_kind == "kde":
2109 diag_kws.setdefault("shade", True)
c:\program files\extensions\subpy_1.0.0.0\mc\lib\site-packages\seaborn\axisgrid.py
in map_diag(self, func, **kwargs)
1397 color = fixed_color
1398
-> 1399 func(data_k, label=label_k, color=color, **kwargs)
1400
1401 self._clean_axis(ax)
c:\program files\extensions\subpy_1.0.0.0\mc\lib\site-packages\matplotlib\pyplot.py
in hist(x, bins, range, density, weights, cumulative, bottom,
histtype, align, orientation, rwidth, log, color, label, stacked,
normed, data, **kwargs)
2659 align=align, orientation=orientation, rwidth=rwidth, log=log,
2660 color=color, label=label, stacked=stacked, normed=normed,
-> 2661 **({"data": data} if data is not None else {}), **kwargs)
2662
2663
c:\program files\extensions\subpy_1.0.0.0\mc\lib\site-packages\matplotlib\__init__.py
in inner(ax, data, *args, **kwargs)
1808 "the Matplotlib list!)" % (label_namer, func.name),
1809 RuntimeWarning, stacklevel=2)
-> 1810 return func(ax, *args, **kwargs)
1811
1812 inner.doc = _add_data_doc(inner.doc,
c:\program files\extensions\subpy_1.0.0.0\mc\lib\site-packages\matplotlib\axes\_axes.py
in hist(self, x, bins, range, density, weights, cumulative, bottom,
histtype, align, orientation, rwidth, log, color, label, stacked,
normed, **kwargs)
6533 # Unit conversion is done individually on each dataset
6534 self._process_unit_info(xdata=x[0], kwargs=kwargs)
-> 6535 x = [self.convert_xunits(xi) for xi in x]
6536
6537 if bin_range is not None:
c:\program files\extensions\subpy_1.0.0.0\mc\lib\site-packages\matplotlib\axes\_axes.py
in (.0)
6533 # Unit conversion is done individually on each dataset
6534 self._process_unit_info(xdata=x[0], kwargs=kwargs)
-> 6535 x = [self.convert_xunits(xi) for xi in x]
6536
6537 if bin_range is not None:
c:\program files\extensions\subpy_1.0.0.0\mc\lib\site-packages\matplotlib\artist.py
in convert_xunits(self, x)
184 if ax is None or ax.xaxis is None:
185 return x
--> 186 return ax.xaxis.convert_units(x)
187
188 def convert_yunits(self, y):
c:\program files\extensions\subpy_1.0.0.0\mc\lib\site-packages\matplotlib\axis.py
in convert_units(self, x)
1528 return x
1529
-> 1530 ret = self.converter.convert(x, self.units, self)
1531 return ret
1532
c:\program files\extensions\subpy_1.0.0.0\mc\lib\site-packages\matplotlib\category.py
in convert(value, unit, axis)
51
52 # force an update so it also does type checking
---> 53 unit.update(values)
54
55 str2idx = np.vectorize(unit._mapping.getitem,
AttributeError: 'NoneType' object has no attribute 'update'

what is the best way to generate random pattern inside of a table

I'v got a table (2d array), c x r. Need to generate a random pattern of connected cells inside of it. No self-crossings and no diagonal-moves. See related picture for example. ex. 1
с = 6, r = 7, the pattern is shown in numbers.
I'w wrote a function for this and it works fine, but I'm looking for hard optimization. In the code below you can see that if the pattern gets into a dead end it just rebuilds itself from the start. That is very inefficient if the pattern length is close or equals to the number of cells, c*r (42 in the example). So some smart solution is needed for this, like moving the whole pattern symmetrically when it runs out of possible moves or to add some analytics to the function so it never cathes up in the dead ends. Again, for the low values of c, r and patternLength my example works fine, but I'm looking for algorithmic perfection and high performance even on pretty high numbers.
function ClassLogic:generatePattern()
--[[ subfunctions ]]
--choosing next point for the pattern
local move = function( seq )
--getting the last sequence point
local last = seq[#seq]
-- checking the nearness of walls
local
wallLeft,
wallRight,
wallUp,
wallDown =
(last.c==1),
(last.c==config.tableSize.c),
(last.r==1),
(last.r==config.tableSize.r)
-- checking the nearness of already sequenced points
local
spLeft,
spRight,
spUp,
spDown =
(utilities.indexOfTable( seq, { c = last.c - 1, r = last.r } )~=-1),
(utilities.indexOfTable( seq, { c = last.c + 1, r = last.r } )~=-1),
(utilities.indexOfTable( seq, { c = last.c, r = last.r - 1 } )~=-1),
(utilities.indexOfTable( seq, { c = last.c, r = last.r + 1 } )~=-1)
local leftRestricted = (wallLeft or spLeft)
local rightRestricted = (wallRight or spRight)
local upRestricted = (wallUp or spUp)
local downRestricted = (wallDown or spDown)
if ( leftRestricted and rightRestricted and upRestricted and downRestricted ) then
-- dead end
print('d/e')
return nil
else
-- go somewhere possible
local possibleDirections = {}
if (not leftRestricted) then possibleDirections[#possibleDirections+1] = 1 end
if (not rightRestricted) then possibleDirections[#possibleDirections+1] = 2 end
if (not upRestricted) then possibleDirections[#possibleDirections+1] = 3 end
if (not downRestricted) then possibleDirections[#possibleDirections+1] = 4 end
local direction = possibleDirections[math.random( 1, #possibleDirections )]
if (direction==1) then
--next point is left
return { c = last.c - 1, r = last.r }
elseif (direction==2) then
--next point is right
return { c = last.c + 1, r = last.r }
elseif (direction==3) then
--next point is up
return { c = last.c, r = last.r - 1 }
elseif (direction==4) then
--next point is down
return { c = last.c, r = last.r + 1 }
end
end
end
--[[ subfunctions end ]]
-- choose random entry point
local entry = { c = math.random( 1, config.tableSize.c ),
r = math.random( 1, config.tableSize.r ) }
-- start points sequence
local pointSequence = { [1] = entry }
-- building the pattern
local succeed = false
while (not succeed) do
for i = 2, self.patternLength do
local nextPoint = move( pointSequence )
if (nextPoint~=nil) then
pointSequence[i] = nextPoint
if (i==self.patternLength) then succeed = true end
else
pointSequence = { [1] = entry }
break
end
end
end
return pointSequence
end
Any ideas or approaches on how this could be realized would be highly appreciated. Maybe some recursive backtracker or a pathfinding or a random-walk algorithms?
The snake-style growing is not enough for good performance.
The main idea is to randomly modify the path being generated by adding small detours like the following:
- - 6 - - - - 8 - -
- - 5 - - - 6 7 - -
- - 4 1 - ===> - 5 4 1 -
- - 3 2 - - - 3 2 -
- - - - - - - - - -
(note the additional two cells added to the left of 4-5 segment)
Such implementation works very fast for area filling < 95%
local function generate_path(W, H, L)
-- W = field width (number of columns) -- c = 1..W
-- H = field height (number of rows) -- r = 1..H
-- L = path length, must be within range 1..W*H
assert(L >= 1 and L <= W * H, "Path length is greater than field area")
local function get_idx(x, y)
return x >= 1 and x <= W and y >= 1 and y <= H and (y - 1) * W + x
end
local function get_x_y(idx)
local x = (idx - 1) % W + 1
local y = (idx - x) / W + 1
return x, y
end
local function random_sort(array)
for last = #array, 2, -1 do
local pos = math.random(last)
array[pos], array[last] = array[last], array[pos]
end
end
local path_sum_x = 0
local path_sum_y = 0
local path_ctr = 0
local is_unused = {} -- [idx] = true/nil (or idx recently swapped with)
local function mark_as_unused(idx, value)
local x, y = get_x_y(idx)
path_sum_x = path_sum_x - x
path_sum_y = path_sum_y - y
path_ctr = path_ctr - 1
is_unused[idx] = value or true
end
local function mark_as_path(idx)
local x, y = get_x_y(idx)
path_sum_x = path_sum_x + x
path_sum_y = path_sum_y + y
path_ctr = path_ctr + 1
is_unused[idx] = nil
end
for x = 1, W do
for y = 1, H do
is_unused[get_idx(x, y)] = true
end
end
-- create path of length 1 by selecting random cell
local idx = get_idx(math.random(W), math.random(H))
mark_as_path(idx)
local path = {first = idx, last = idx, [idx] = {}}
-- path[idx] == {next=next_idx/nil, prev=prev_idx/nil}
local function grow()
local variants = {
{dx=-1, dy=0, origin="last"}, {dx=1, dy=0, origin="last"},
{dx=0, dy=-1, origin="last"}, {dx=0, dy=1, origin="last"},
{dx=-1, dy=0, origin="first"}, {dx=1, dy=0, origin="first"},
{dx=0, dy=-1, origin="first"}, {dx=0, dy=1, origin="first"}
}
random_sort(variants)
for _, vector in ipairs(variants) do
local x, y = get_x_y(path[vector.origin])
local idx = get_idx(vector.dx + x, vector.dy + y)
if is_unused[idx] then
if vector.origin == 'first' then
-- add new first cell of the path
local old_first = path.first
path[old_first].prev = idx
path[idx] = {next = old_first}
path.first = idx
else
-- add new last cell of the path
local old_last = path.last
path[old_last].next = idx
path[idx] = {prev = old_last}
path.last = idx
end
mark_as_path(idx)
return true
end
end
end
local function shrink()
if math.random(2) == 2 then
-- remove first cell of the path
local old_first = path.first
local new_first = assert(path[old_first].next)
path[old_first] = nil
path.first = new_first
path[new_first].prev = nil
mark_as_unused(old_first)
else
-- remove last cell of the path
local old_last = path.last
local new_last = assert(path[old_last].prev)
path[old_last] = nil
path.last = new_last
path[new_last].next = nil
mark_as_unused(old_last)
end
end
local function inflate()
local variants = {}
local idx1 = path.first
repeat
local idx4 = path[idx1].next
if idx4 then
local x1, y1 = get_x_y(idx1)
local x4, y4 = get_x_y(idx4)
local dx14, dy14 = x4 - x1, y4 - y1
local dx, dy = dy14, dx14
for side = 1, 2 do
dx, dy = -dx, -dy
local x2, y2 = x1 + dx, y1 + dy
local idx2 = get_idx(x2, y2)
local idx3 = get_idx(x2 + dx14, y2 + dy14)
if is_unused[idx2] and is_unused[idx3] then
table.insert(variants, {idx1, idx2, idx3, idx4})
end
end
end
idx1 = idx4
until not idx4
if #variants > 0 then
local idx1, idx2, idx3, idx4 =
(table.unpack or unpack)(variants[math.random(#variants)])
-- insert idx2 and idx3 between idx1 and idx4
path[idx1].next = idx2
path[idx2] = {prev = idx1, next = idx3}
path[idx3] = {prev = idx2, next = idx4}
path[idx4].prev = idx3
mark_as_path(idx2)
mark_as_path(idx3)
return true
end
end
local function euclid(dx, dy)
return dx*dx + dy*dy
end
local function swap()
local variants = {}
local path_center_x = path_sum_x / path_ctr
local path_center_y = path_sum_y / path_ctr
local idx1 = path.first
repeat
local idx2 = path[idx1].next
local idx3 = idx2 and path[idx2].next
if idx3 then
local x1, y1 = get_x_y(idx1)
local x2, y2 = get_x_y(idx2)
local x3, y3 = get_x_y(idx3)
local dx12, dy12 = x2 - x1, y2 - y1
local dx23, dy23 = x3 - x2, y3 - y2
if dx12 * dx23 + dy12 * dy23 == 0 then
local x, y = x1 + dx23, y1 + dy23
local idx = get_idx(x, y)
local dist2 = euclid(x2 - path_center_x, y2 - path_center_y)
local dist = euclid(x - path_center_x, y - path_center_y)
if is_unused[idx] and dist2<dist and is_unused[idx]~=idx2 then
table.insert(variants, {idx1, idx2, idx3, idx})
end
end
end
idx1 = idx2
until not idx3
if #variants > 0 then
local idx1, idx2, idx3, idx =
(table.unpack or unpack)(variants[math.random(#variants)])
-- swap idx2 and idx
path[idx1].next = idx
path[idx] = path[idx2]
path[idx3].prev = idx
path[idx2] = nil
mark_as_unused(idx2, idx)
mark_as_path(idx)
return true
end
end
local actions = {grow, inflate, swap}
repeat
random_sort(actions)
local success
for _, action in ipairs(actions) do
success = action()
if success then
break
end
end
if not success and path_ctr < L then
-- erase and rewind
while path_ctr > 1 do
shrink()
end
end
until path_ctr >= L
while path_ctr > L do
shrink()
end
local pointSequence = {}
local idx = path.first
local step = 0
repeat
step = step + 1
path[idx].step = step
local x, y = get_x_y(idx)
pointSequence[step] = {c = x, r = y}
idx = path[idx].next
until not idx
local field = 'W = '..W..', H = '..H..', L = '..L..'\n'
for y = 1, H do
for x = 1, W do
local c = path[get_idx(x, y)]
field = field..(' '..(c and c.step or '-')):sub(-4)
end
field = field..'\n'
end
print(field)
return pointSequence
end
Usage example:
math.randomseed(os.time())
local pointSequence = generate_path(6, 7, 10)
-- pointSequence = {[1]={r=r1,c=c1}, [2]={r=r2,c=c2},...,[10]={r=r10,c=c10}}
Result examples:
W = 5, H = 5, L = 10
- - - 9 10
- 6 7 8 -
- 5 4 1 -
- - 3 2 -
- - - - -
W = 5, H = 5, L = 19
15 16 17 18 19
14 1 2 3 4
13 12 11 6 5
- - 10 7 -
- - 9 8 -
W = 6, H = 7, L = 35
- 35 34 25 24 23
- - 33 26 21 22
- 31 32 27 20 19
- 30 29 28 - 18
- 1 10 11 12 17
3 2 9 8 13 16
4 5 6 7 14 15
W = 19, H = 21, L = 394
77 78 79 84 85 118 119 120 121 122 123 124 125 126 127 128 129 254 255
76 75 80 83 86 117 116 115 114 141 140 139 138 135 134 131 130 253 256
73 74 81 82 87 88 89 112 113 142 145 146 137 136 133 132 - 252 257
72 69 68 67 92 91 90 111 - 143 144 147 148 149 150 151 152 251 258
71 70 65 66 93 108 109 110 163 162 161 160 159 158 157 156 153 250 259
58 59 64 63 94 107 166 165 164 191 192 193 196 197 - 155 154 249 260
57 60 61 62 95 106 167 168 189 190 - 194 195 198 241 242 243 248 261
56 55 54 53 96 105 170 169 188 203 202 201 200 199 240 239 244 247 262
47 48 51 52 97 104 171 172 187 204 205 206 231 232 237 238 245 246 263
46 49 50 99 98 103 174 173 186 209 208 207 230 233 236 267 266 265 264
45 42 41 100 101 102 175 184 185 210 211 228 229 234 235 268 269 270 271
44 43 40 39 38 177 176 183 214 213 212 227 226 225 276 275 274 273 272
33 34 35 36 37 178 179 182 215 216 217 218 223 224 277 278 279 280 281
32 29 28 23 22 - 180 181 12 11 10 219 222 287 286 285 284 283 282
31 30 27 24 21 18 17 14 13 8 9 220 221 288 289 290 291 292 293
380 381 26 25 20 19 16 15 394 7 4 3 304 303 300 299 296 295 294
379 382 383 384 387 388 391 392 393 6 5 2 305 302 301 298 297 312 313
378 371 370 385 386 389 390 347 346 343 342 1 306 307 308 309 310 311 314
377 372 369 364 363 350 349 348 345 344 341 340 333 332 319 318 317 316 315
376 373 368 365 362 351 352 353 354 355 338 339 334 331 320 321 322 323 324
375 374 367 366 361 360 359 358 357 356 337 336 335 330 329 328 327 326 325

Resources