Question:
I have this code and it gives me an error:
AttributeError: 'NoneType' object has no attribute 'update'
Any suggestions?
Thank you so much!
import seaborn as sns
features = ['rdeep','dtc','rhob', 'gr','npss']
feature_vectors=df[features]
feature_vectors = feature_vectors.apply(np.nan_to_num)
feature_vectors.reset_index(inplace=True, drop=True)
feature_vectors
sns.pairplot(feature_vectors, dropna=True )
Here is the fullstack. Does this help?
AttributeError Traceback (most recent call last)
<ipython-input-59-bf2539b2257e> in <module>
6 feature_vectors.reset_index(inplace=True, drop=True)
7 feature_vectors
----> 8 sns.pairplot(feature_vectors, dropna=True )
c:\program files\extensions\subpy_1.0.0.0\mc\lib\site-packages\seaborn\axisgrid.py
in pairplot(data, hue, hue_order, palette, vars, x_vars, y_vars, kind,
diag_kind, markers, height, aspect, dropna, plot_kws, diag_kws,
grid_kws, size)
2105 if grid.square_grid:
2106 if diag_kind == "hist":
-> 2107 grid.map_diag(plt.hist, **diag_kws)
2108 elif diag_kind == "kde":
2109 diag_kws.setdefault("shade", True)
c:\program files\extensions\subpy_1.0.0.0\mc\lib\site-packages\seaborn\axisgrid.py
in map_diag(self, func, **kwargs)
1397 color = fixed_color
1398
-> 1399 func(data_k, label=label_k, color=color, **kwargs)
1400
1401 self._clean_axis(ax)
c:\program files\extensions\subpy_1.0.0.0\mc\lib\site-packages\matplotlib\pyplot.py
in hist(x, bins, range, density, weights, cumulative, bottom,
histtype, align, orientation, rwidth, log, color, label, stacked,
normed, data, **kwargs)
2659 align=align, orientation=orientation, rwidth=rwidth, log=log,
2660 color=color, label=label, stacked=stacked, normed=normed,
-> 2661 **({"data": data} if data is not None else {}), **kwargs)
2662
2663
c:\program files\extensions\subpy_1.0.0.0\mc\lib\site-packages\matplotlib\__init__.py
in inner(ax, data, *args, **kwargs)
1808 "the Matplotlib list!)" % (label_namer, func.name),
1809 RuntimeWarning, stacklevel=2)
-> 1810 return func(ax, *args, **kwargs)
1811
1812 inner.doc = _add_data_doc(inner.doc,
c:\program files\extensions\subpy_1.0.0.0\mc\lib\site-packages\matplotlib\axes\_axes.py
in hist(self, x, bins, range, density, weights, cumulative, bottom,
histtype, align, orientation, rwidth, log, color, label, stacked,
normed, **kwargs)
6533 # Unit conversion is done individually on each dataset
6534 self._process_unit_info(xdata=x[0], kwargs=kwargs)
-> 6535 x = [self.convert_xunits(xi) for xi in x]
6536
6537 if bin_range is not None:
c:\program files\extensions\subpy_1.0.0.0\mc\lib\site-packages\matplotlib\axes\_axes.py
in (.0)
6533 # Unit conversion is done individually on each dataset
6534 self._process_unit_info(xdata=x[0], kwargs=kwargs)
-> 6535 x = [self.convert_xunits(xi) for xi in x]
6536
6537 if bin_range is not None:
c:\program files\extensions\subpy_1.0.0.0\mc\lib\site-packages\matplotlib\artist.py
in convert_xunits(self, x)
184 if ax is None or ax.xaxis is None:
185 return x
--> 186 return ax.xaxis.convert_units(x)
187
188 def convert_yunits(self, y):
c:\program files\extensions\subpy_1.0.0.0\mc\lib\site-packages\matplotlib\axis.py
in convert_units(self, x)
1528 return x
1529
-> 1530 ret = self.converter.convert(x, self.units, self)
1531 return ret
1532
c:\program files\extensions\subpy_1.0.0.0\mc\lib\site-packages\matplotlib\category.py
in convert(value, unit, axis)
51
52 # force an update so it also does type checking
---> 53 unit.update(values)
54
55 str2idx = np.vectorize(unit._mapping.getitem,
AttributeError: 'NoneType' object has no attribute 'update'
Related
I have been trying to use this image classification model I found online to classify traffic signs. The original code classifies image files saved on the device. I am trying to make it classify live video from my webcam, but I ran into this error. What changes should I make to the webcam image or the model?
test_transforms = transforms.Compose([
transforms.Resize([112, 112]),
transforms.ToTensor()
])
while True:
with torch.no_grad():
model.eval()
isTrue, frame = capture.read()
cv2.imshow('Video', frame)
cv2.waitKey(16)
PIL_image = Image.fromarray(np.uint8(frame)).convert('RGB')
PIL_image = Image.fromarray(frame.astype('uint8'), 'RGB')
img = test_transforms(PIL_image)
print(img.shape)
y_test_pred = model(img)
The error:
RuntimeError Traceback (most recent call last)
Cell In [8], line 40
27 print(img.shape)
28 # img.size
29 # numpyimg = asarray(frame)
30
(...)
38
39 # print(image)
---> 40 y_test_pred = model(img)
41 # print(y_test_pred)
42 # y_pred_softmax = torch.log_softmax(y_test_pred[0], dim=1)
43 # _, y_pred_tags = torch.max(y_pred_softmax, dim=1)
File c:\Users\lenovo\AppData\Local\Programs\Python\Python39\lib\site-packages\torch\nn\modules\module.py:1194, in Module._call_impl(self, *input, **kwargs)
1190 # If we don't have any hooks, we want to skip the rest of the logic in
1191 # this function, and just call forward.
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = [], []
Cell In [5], line 42, in AlexnetTS.forward(self, x)
40 x = self.features(x)
41 h = x.view(x.shape[0], -1)
---> 42 x = self.classifier(h)
43 return x, h
File c:\Users\lenovo\AppData\Local\Programs\Python\Python39\lib\site-packages\torch\nn\modules\module.py:1194, in Module._call_impl(self, *input, **kwargs)
1190 # If we don't have any hooks, we want to skip the rest of the logic in
1191 # this function, and just call forward.
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = [], []
File c:\Users\lenovo\AppData\Local\Programs\Python\Python39\lib\site-packages\torch\nn\modules\container.py:204, in Sequential.forward(self, input)
202 def forward(self, input):
203 for module in self:
--> 204 input = module(input)
205 return input
File c:\Users\lenovo\AppData\Local\Programs\Python\Python39\lib\site-packages\torch\nn\modules\module.py:1194, in Module._call_impl(self, *input, **kwargs)
1190 # If we don't have any hooks, we want to skip the rest of the logic in
1191 # this function, and just call forward.
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = [], []
File c:\Users\lenovo\AppData\Local\Programs\Python\Python39\lib\site-packages\torch\nn\modules\linear.py:114, in Linear.forward(self, input)
113 def forward(self, input: Tensor) -> Tensor:
--> 114 return F.linear(input, self.weight, self.bias)
RuntimeError: mat1 and mat2 shapes cannot be multiplied (256x196 and 12544x1000)
The neural network model:
class AlexnetTS(nn.Module):
def __init__(self, output_dim):
super().__init__()
self.features = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=2, padding=1),
nn.MaxPool2d(kernel_size=2),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=64, out_channels=192, kernel_size=3, padding=1),
nn.MaxPool2d(kernel_size=2),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=192, out_channels=384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=384, out_channels=256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
nn.MaxPool2d(kernel_size=2),
nn.ReLU(inplace=True),
)
self.classifier = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(256*7*7, 1000),
nn.ReLU(inplace=True),
nn.Dropout(0.5),
nn.Linear(in_features=1000, out_features=256),
nn.ReLU(inplace=True),
nn.Linear(256, output_dim)
)
def forward(self, x):
x = self.features(x)
h = x.view(x.shape[0], -1)
x = self.classifier(h)
return x, h
I think you should pass the image as a batch of one image with the shape [batch_size, channels, height, width] where batch_size=1, so you need to add a dimension of batch_size before passing it through the model using the line img = img.unsqueeze(0) as follow:
test_transforms = transforms.Compose([
transforms.Resize([112, 112]),
transforms.ToTensor()
])
while True:
with torch.no_grad():
model.eval()
isTrue, frame = capture.read()
cv2.imshow('Video', frame)
cv2.waitKey(16)
PIL_image = Image.fromarray(np.uint8(frame)).convert('RGB')
PIL_image = Image.fromarray(frame.astype('uint8'), 'RGB')
img = test_transforms(PIL_image)
print(img.shape)
img = img.unsqueeze(0)
y_test_pred = model(img)
My code is:
#Drop the irrelevant variables from train2 dataset
#Create the independant variable X train and dependant variable y train
X_train = train2.drop(['Item_Outlet_Sales', 'Outlet_Identifier', 'Item_Identifier'], axis=1)
y_train = train2.Item_Outlet_Sales
#Drop those irrelevant variables from test2 dataset
X_test = test2.drop(['Outlet_Identifier', 'Item_Identifier'], axis=1)
#Lets 1st import sklearn liobrary for model selection
from sklearn import model_selection
from sklearn.linear_model import LinearRegression
#Create a train and test split. Use X-train and y_train for linear regression.
xtrain, xtest, ytrain, ytest = model_selection.train_test_split(X_train, y_train, test_size=0.3, random_state=42)
#Fit the linear regression to the training dataset
lin = LinearRegression()
LinearRegression.fit(xtrain, ytrain)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
/var/folders/mh/_vtvlkm54rn8_9pqdq1_7g9m0000gn/T/ipykernel_1637/3652998115.py in <module>
----> 1 LinearRegression.fit(xtrain, ytrain)
TypeError: fit() missing 1 required positional argument: 'y'
I first tried:
lin.fit(xtrain, ytrain)
Output:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/var/folders/mh/_vtvlkm54rn8_9pqdq1_7g9m0000gn/T/ipykernel_1637/2886984673.py in <module>
----> 1 lin.fit(xtrain, ytrain)
~/opt/anaconda3/lib/python3.9/site-packages/sklearn/linear_model/_base.py in fit(self, X, y, sample_weight)
660 accept_sparse = False if self.positive else ["csr", "csc", "coo"]
661
--> 662 X, y = self._validate_data(
663 X, y, accept_sparse=accept_sparse, y_numeric=True, multi_output=True
664 )
~/opt/anaconda3/lib/python3.9/site-packages/sklearn/base.py in _validate_data(self, X, y, reset, validate_separately, **check_params)
579 y = check_array(y, **check_y_params)
580 else:
--> 581 X, y = check_X_y(X, y, **check_params)
582 out = X, y
583
~/opt/anaconda3/lib/python3.9/site-packages/sklearn/utils/validation.py in check_X_y(X, y, accept_sparse, accept_large_sparse, dtype, order, copy, force_all_finite, ensure_2d, allow_nd, multi_output, ensure_min_samples, ensure_min_features, y_numeric, estimator)
962 raise ValueError("y cannot be None")
963
--> 964 X = check_array(
965 X,
966 accept_sparse=accept_sparse,
~/opt/anaconda3/lib/python3.9/site-packages/sklearn/utils/validation.py in check_array(array, accept_sparse, accept_large_sparse, dtype, order, copy, force_all_finite, ensure_2d, allow_nd, ensure_min_samples, ensure_min_features, estimator)
744 array = array.astype(dtype, casting="unsafe", copy=False)
745 else:
--> 746 array = np.asarray(array, order=order, dtype=dtype)
747 except ComplexWarning as complex_warning:
748 raise ValueError(
~/opt/anaconda3/lib/python3.9/site-packages/pandas/core/generic.py in __array__(self, dtype)
2062
2063 def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray:
-> 2064 return np.asarray(self._values, dtype=dtype)
2065
2066 def __array_wrap__(
ValueError: could not convert string to float: 'Grocery Store'
I tried DeIT classification using pytorch, it's a workable code. When I change my collab account to pro I still get an error in the second epochs:
I execute this instruction :
model_ft = train_model(model, criterion, optimizer, exp_lr_scheduler) # now it is a lot faster
Training model function: :
def train_model(model, criterion, optimizer, scheduler, num_epochs=10):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print(f'Epoch {epoch}/{num_epochs - 1}')
print("-"*10)
for phase in ['train', 'val']: # We do training and validation phase per epoch
if phase == 'train':
model.train() # model to training mode
else:
model.eval() # model to evaluate
running_loss = 0.0
running_corrects = 0.0
for inputs, labels in tqdm(dataloaders[phase]):
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'): # no autograd makes validation go faster
outputs = model(inputs)
_, preds = torch.max(outputs, 1) # used for accuracy
loss = criterion(outputs, labels)
if phase == 'train':
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step() # step at end of epoch
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print("{} Loss: {:.4f} Acc: {:.4f}".format(phase, epoch_loss, epoch_acc))
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict()) # keep the best validation accuracy model
print()
time_elapsed = time.time() - since # slight error
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print("Best Val Acc: {:.4f}".format(best_acc))
model.load_state_dict(best_model_wts)
return model
After the first epoch i get this error
Epoch 0/9
----------
100%|██████████| 175/175 [15:40<00:00, 5.37s/it]
train Loss: 6.2008 Acc: 0.0037
1%|▏ | 3/232 [00:11<14:36, 3.83s/it]
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-23-c552e1f19194> in <module>
----> 1 model_ft = train_model(model, criterion, optimizer, exp_lr_scheduler) # now it is a lot faster
2 # I will come back after 10 epochs
2 frames
<ipython-input-22-58764f3d79c1> in train_model(model, criterion, optimizer, scheduler, num_epochs)
26 outputs = model(inputs)
27 _, preds = torch.max(outputs, 1) # used for accuracy
---> 28 loss = criterion(outputs, labels)
29
30 if phase == 'train':
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1128 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1129 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1130 return forward_call(*input, **kwargs)
1131 # Do not call functions when jit is used
1132 full_backward_hooks, non_full_backward_hooks = [], []
/usr/local/lib/python3.7/dist-packages/timm/loss/cross_entropy.py in forward(self, x, target)
20 def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
21 logprobs = F.log_softmax(x, dim=-1)
---> 22 nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
23 nll_loss = nll_loss.squeeze(1)
24 smooth_loss = -logprobs.mean(dim=-1)
RuntimeError: CUDA error: device-side assert triggered
( my outputs are matched, my dataset has 37374 images and 488 classes
Sequential(
(0): Linear(in_features=192, out_features=150, bias=True)
(1): ReLU()
(2): Dropout(p=0.3, inplace=False)
(3): Linear(in_features=150, out_features=488, bias=True)
)
I tried os.environ['CUDA_LAUNCH_BLOCKING'] = "1" and still have the error)
i'm trying to fine tune my own model with hugging face trainer module. There was no problem until just training ElectraforQuestionAnswering, however I tried to add additional layer on the model and tried the same process. And there comes this error
from transformers import ElectraForQuestionAnswering
from torch import nn
class Jelectra(nn.Module):
def __init__(self):
super().__init__()
self.model = ElectraForQuestionAnswering.from_pretrained("google/electra-small-discriminator")
self.sm = nn.Softmax(dim=1)
def forward(self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,):
outputs = self.model(input_ids, token_type_ids, attention_mask, start_positions, end_positions)
output_start = self.sm(outputs[0])
output_end = self.sm(outputs[1])
return QuestionAnsweringModelOutput(start_logits=output_start, end_logits=output_end)
model = Jelectra()
from transformers import TrainingArguments, Trainer
training_args = TrainingArguments(
output_dir="./fine_tuned_electra",
evaluation_strategy="epoch",
learning_rate=5e-4,
per_device_train_batch_size=12,
per_device_eval_batch_size=12,
num_train_epochs=2,
weight_decay=0.01,
gradient_accumulation_steps=2,
eval_accumulation_steps=1,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_squad["train"],
eval_dataset=tokenized_squad["validation"],
tokenizer=tokenizer,
data_collator=data_collator,
)
trainer.train()
The error is...
RuntimeError Traceback (most recent call last)
Input In [12], in <module>
3 training_args = TrainingArguments(
4 output_dir="./fine_tuned_electra",
5 evaluation_strategy="epoch",
(...)
12 eval_accumulation_steps=1,
13 )
15 trainer = Trainer(
16 model=model,
17 args=training_args,
(...)
21 data_collator=data_collator,
22 )
---> 24 trainer.train()
File ~/anaconda3/envs/domain/lib/python3.8/site-packages/transformers/trainer.py:1365, in Trainer.train(self, resume_from_checkpoint, trial, ignore_keys_for_eval, **kwargs)
1363 tr_loss_step = self.training_step(model, inputs)
1364 else:
-> 1365 tr_loss_step = self.training_step(model, inputs)
1367 if (
1368 args.logging_nan_inf_filter
1369 and not is_torch_tpu_available()
1370 and (torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step))
1371 ):
1372 # if loss is nan or inf simply add the average of previous logged losses
1373 tr_loss += tr_loss / (1 + self.state.global_step - self._globalstep_last_logged)
File ~/anaconda3/envs/domain/lib/python3.8/site-packages/transformers/trainer.py:1940, in Trainer.training_step(self, model, inputs)
1937 return loss_mb.reduce_mean().detach().to(self.args.device)
1939 with self.autocast_smart_context_manager():
-> 1940 loss = self.compute_loss(model, inputs)
1942 if self.args.n_gpu > 1:
1943 loss = loss.mean() # mean() to average on multi-gpu parallel training
File ~/anaconda3/envs/domain/lib/python3.8/site-packages/transformers/trainer.py:1972, in Trainer.compute_loss(self, model, inputs, return_outputs)
1970 else:
1971 labels = None
-> 1972 outputs = model(**inputs)
1973 # Save past state if it exists
1974 # TODO: this needs to be fixed and made cleaner later.
1975 if self.args.past_index >= 0:
File ~/anaconda3/envs/domain/lib/python3.8/site-packages/torch/nn/modules/module.py:727, in Module._call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
730 self._forward_hooks.values()):
731 hook_result = hook(self, input, result)
Input In [11], in Jelectra.forward(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, start_positions, end_positions, output_attentions, output_hidden_states, return_dict)
9 def forward(self,
10 input_ids=None,
11 attention_mask=None,
(...)
19 output_hidden_states=None,
20 return_dict=None,):
---> 22 outputs = self.model(input_ids, token_type_ids, attention_mask, start_positions, end_positions)
23 output_start = self.sm(outputs[0])
24 output_end = self.sm(outputs[1])
File ~/anaconda3/envs/domain/lib/python3.8/site-packages/torch/nn/modules/module.py:727, in Module._call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
730 self._forward_hooks.values()):
731 hook_result = hook(self, input, result)
File ~/anaconda3/envs/domain/lib/python3.8/site-packages/transformers/models/electra/modeling_electra.py:1377, in ElectraForQuestionAnswering.forward(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, start_positions, end_positions, output_attentions, output_hidden_states, return_dict)
1365 r"""
1366 start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1367 Labels for position (index) of the start of the labelled span for computing the token classification loss.
(...)
1373 are not taken into account for computing the loss.
1374 """
1375 return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-> 1377 discriminator_hidden_states = self.electra(
1378 input_ids,
1379 attention_mask=attention_mask,
1380 token_type_ids=token_type_ids,
1381 position_ids=position_ids,
1382 head_mask=head_mask,
1383 inputs_embeds=inputs_embeds,
1384 output_attentions=output_attentions,
1385 output_hidden_states=output_hidden_states,
1386 )
1388 sequence_output = discriminator_hidden_states[0]
1390 logits = self.qa_outputs(sequence_output)
File ~/anaconda3/envs/domain/lib/python3.8/site-packages/torch/nn/modules/module.py:727, in Module._call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
730 self._forward_hooks.values()):
731 hook_result = hook(self, input, result)
File ~/anaconda3/envs/domain/lib/python3.8/site-packages/transformers/models/electra/modeling_electra.py:905, in ElectraModel.forward(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, encoder_hidden_states, encoder_attention_mask, past_key_values, use_cache, output_attentions, output_hidden_states, return_dict)
901 encoder_extended_attention_mask = None
903 head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
--> 905 hidden_states = self.embeddings(
906 input_ids=input_ids,
907 position_ids=position_ids,
908 token_type_ids=token_type_ids,
909 inputs_embeds=inputs_embeds,
910 past_key_values_length=past_key_values_length,
911 )
913 if hasattr(self, "embeddings_project"):
914 hidden_states = self.embeddings_project(hidden_states)
File ~/anaconda3/envs/domain/lib/python3.8/site-packages/torch/nn/modules/module.py:727, in Module._call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
730 self._forward_hooks.values()):
731 hook_result = hook(self, input, result)
File ~/anaconda3/envs/domain/lib/python3.8/site-packages/transformers/models/electra/modeling_electra.py:212, in ElectraEmbeddings.forward(self, input_ids, token_type_ids, position_ids, inputs_embeds, past_key_values_length)
210 if self.position_embedding_type == "absolute":
211 position_embeddings = self.position_embeddings(position_ids)
--> 212 embeddings += position_embeddings
213 embeddings = self.LayerNorm(embeddings)
214 embeddings = self.dropout(embeddings)
RuntimeError: The size of tensor a (512) must match the size of tensor b (12) at non-singleton dimension 1
how can i solve this?? I'm using squad data
I have a calculating and fitting code(below is some part of it).
def NFW(self, r, r_s, c):
x = r/r_s
delta_c = (200/3) * (c**3 / (log(1+c) - (c/(1+c))))
surface_mass_density = (c_velocity**2 / (4*pi*G)) * (self.D_s / (self.D_l * self.D_ls))
rho_c = self.Cosmological_Model.critical_density(self.z_l).to(u.M_sun/(u.Mpc)**3).value
Kk = ( 2*r_s*delta_c*rho_c ) / surface_mass_density
# Kappa of NFW
K_NFW = np.array([])
for i in range(len(r)):
if 0 <= x[i] <1:
f = ( 1 / (x[i]**2-1) ) * ( 1 - ( 2*atanh( np.sqrt( ( (1-x[i]) / (1+x[i]) ) ) ) / np.sqrt(1-x[i]**2) ) )
elif x[i] == 1:
f = 1/3
elif 1 < x[i]:
f = ( 1 / (x[i]**2-1) ) * ( 1 - ( 2*atan( np.sqrt( (x[i]-1) / (1+x[i]) ) ) / np.sqrt(x[i]**2-1) ) )
else:
print("x[i] can not have negative number!")
K_NFW = np.append(K_NFW, Kk * f)
And I tried to fit it with "scipy.curve_fit".
But the strange things is that sometimes it works, some times it dose not work depending on "r" which is one of the input parameter "NFW" function
below is fitting code.
ind_sub_R01 = np.where(self.obs_sub_R_LensPlane>0.10)#(<--this)
# If I put this '0.10' it doesn't work but if I put this '0.20' it works.
# This number means cut of the radius which is fitting start point.
def NFW_fitting(self, main_NFW_p0, main_NFW_bounds, sub_NFW_p0, sub_NFW_bounds):
main_popt, main_pcov = curve_fit(self.NFWfunc, self.obs_main_R_LensPlane, self.obs_main_r_T_avg, p0=main_NFW_p0, bounds=main_NFW_bounds)
sub_popt, sub_pcov = curve_fit(self.NFWfunc, self.obs_sub_R_LensPlane, self.obs_sub_r_T_avg, p0=sub_NFW_p0, bounds=sub_NFW_bounds)
return main_popt[0], main_popt[1], main_pcov, sub_popt[0], sub_popt[1], sub_pcov
IF I put 'this' '0.1', I face the error like below.
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
/var/folders/42/grbryvqx3vndy45_5d7lxyqr0000gn/T/ipykernel_34140/3894437891.py in <module>
198 drz_sci_606 = fits.open('bullet_f606w_drz_sci.fits')
199 FITandPLOT = profile_FITandPLOT(reduced_shear_main=reduced_shear_main, reduced_shear_sub=reduced_shear_sub, SISfunc=profile.SIS, NFWfunc=profile.NFW, data_for_WCS=drz_sci_606, D_l=profile.D_l)
--> 200 FITandPLOT.plotting(main_SIS_p0=v_disp_main, main_SIS_bounds=[v_disp_main-5,v_disp_main+5], sub_SIS_p0=v_disp_sub, sub_SIS_bounds=[v_disp_main-5,v_disp_main+5],
201 main_NFW_p0=(r_s_main,c_vir_main), main_NFW_bounds=([r_s_main-5,c_vir_main-5], [r_s_main+5,c_vir_main+5]), sub_NFW_p0=(r_s_sub,c_vir_sub), sub_NFW_bounds=([r_s_sub-5,c_vir_sub-5], [r_s_sub+5,c_vir_sub+5]))
202
/var/folders/42/grbryvqx3vndy45_5d7lxyqr0000gn/T/ipykernel_34140/3894437891.py in plotting(self, main_SIS_p0, main_SIS_bounds, sub_SIS_p0, sub_SIS_bounds, main_NFW_p0, main_NFW_bounds, sub_NFW_p0, sub_NFW_bounds)
59 def plotting(self, main_SIS_p0, main_SIS_bounds, sub_SIS_p0, sub_SIS_bounds, main_NFW_p0, main_NFW_bounds, sub_NFW_p0, sub_NFW_bounds):
60 main_SIS_v_disp, main_SIS_err_v_disp, sub_SIS_v_disp, sub_SIS_err_v_disp = self.SIS_fitting(main_SIS_p0, main_SIS_bounds, sub_SIS_p0, sub_SIS_bounds)
---> 61 main_NFW_r_s, main_NFW_c, main_NFW_err_matrix, sub_NFW_r_s, sub_NFW_c, sub_NFW_err_matrix = self.NFW_fitting(main_NFW_p0, main_NFW_bounds, sub_NFW_p0, sub_NFW_bounds)
62
63 #----main
/var/folders/42/grbryvqx3vndy45_5d7lxyqr0000gn/T/ipykernel_34140/3894437891.py in NFW_fitting(self, main_NFW_p0, main_NFW_bounds, sub_NFW_p0, sub_NFW_bounds)
54 def NFW_fitting(self, main_NFW_p0, main_NFW_bounds, sub_NFW_p0, sub_NFW_bounds):
55 main_popt, main_pcov = curve_fit(self.NFWfunc, self.obs_main_R_LensPlane, self.obs_main_r_T_avg, p0=main_NFW_p0, bounds=main_NFW_bounds)
---> 56 sub_popt, sub_pcov = curve_fit(self.NFWfunc, self.obs_sub_R_LensPlane, self.obs_sub_r_T_avg, p0=sub_NFW_p0, bounds=sub_NFW_bounds)
57 return main_popt[0], main_popt[1], main_pcov, sub_popt[0], sub_popt[1], sub_pcov
58
~/opt/anaconda3/envs/first_envs/lib/python3.9/site-packages/scipy/optimize/minpack.py in curve_fit(f, xdata, ydata, p0, sigma, absolute_sigma, check_finite, bounds, method, jac, **kwargs)
798 kwargs['max_nfev'] = kwargs.pop('maxfev', None)
799
--> 800 res = least_squares(func, p0, jac=jac, bounds=bounds, method=method,
801 **kwargs)
802
~/opt/anaconda3/envs/first_envs/lib/python3.9/site-packages/scipy/optimize/_lsq/least_squares.py in least_squares(fun, x0, jac, bounds, method, ftol, xtol, gtol, x_scale, loss, f_scale, diff_step, tr_solver, tr_options, jac_sparsity, max_nfev, verbose, args, kwargs)
926
927 elif method == 'trf':
--> 928 result = trf(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol,
929 gtol, max_nfev, x_scale, loss_function, tr_solver,
930 tr_options.copy(), verbose)
~/opt/anaconda3/envs/first_envs/lib/python3.9/site-packages/scipy/optimize/_lsq/trf.py in trf(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale, loss_function, tr_solver, tr_options, verbose)
121 loss_function, tr_solver, tr_options, verbose)
122 else:
--> 123 return trf_bounds(
124 fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,
125 loss_function, tr_solver, tr_options, verbose)
~/opt/anaconda3/envs/first_envs/lib/python3.9/site-packages/scipy/optimize/_lsq/trf.py in trf_bounds(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale, loss_function, tr_solver, tr_options, verbose)
336
337 x_new = make_strictly_feasible(x + step, lb, ub, rstep=0)
--> 338 f_new = fun(x_new)
339 nfev += 1
340
~/opt/anaconda3/envs/first_envs/lib/python3.9/site-packages/scipy/optimize/_lsq/least_squares.py in fun_wrapped(x)
813
814 def fun_wrapped(x):
--> 815 return np.atleast_1d(fun(x, *args, **kwargs))
816
817 if method == 'trf':
~/opt/anaconda3/envs/first_envs/lib/python3.9/site-packages/scipy/optimize/minpack.py in func_wrapped(params)
483 if transform is None:
484 def func_wrapped(params):
--> 485 return func(xdata, *params) - ydata
486 elif transform.ndim == 1:
487 def func_wrapped(params):
/var/folders/42/grbryvqx3vndy45_5d7lxyqr0000gn/T/ipykernel_34140/2654829019.py in NFW(self, r, r_s, c)
142 else:
143 print("x[i] can not have negative number!")
--> 144 K_NFW = np.append(K_NFW, Kk * f)
145
146 # Gamma of NFW
UnboundLocalError: local variable 'f' referenced before assignment
If I put 'this' '0.20' or '0.09', '0.25', I can obtain the fitting graph parameter well.
And these number mean the fitting start point like below
If I put the number '0.25', I fit the graph only right of '0.25' region.
I want to know why the error occurs.