'Image' object has no attribute 'shape'; Pytorch Object Detection - image

I doing object detection with Pytorch/Torchvision with this tutorial, which is very similar to the official pytorch Tutorial.
https://towardsdatascience.com/building-your-own-object-detector-pytorch-vs-tensorflow-and-how-to-even-get-started-1d314691d4ae
While training the model, execution stops after set epochs and gives me this error:
/usr/local/lib/python3.7/dist-packages/torch/autograd/grad_mode.py in decorate_context(*args, **kwargs)
26 def decorate_context(*args, **kwargs):
27 with self.__class__():
---> 28 return func(*args, **kwargs)
29 return cast(F, decorate_context)
30
/content/Pytorch_OD/My Drive/Pytorch_OD/vision/engine.py in evaluate(model, data_loader, device)
78 header = 'Test:'
79
---> 80 coco = get_coco_api_from_dataset(data_loader.dataset)
81 iou_types = _get_iou_types(model)
82 coco_evaluator = CocoEvaluator(coco, iou_types)
/content/Pytorch_OD/My Drive/Pytorch_OD/vision/coco_utils.py in get_coco_api_from_dataset(dataset)
204 if isinstance(dataset, torchvision.datasets.CocoDetection):
205 return dataset.coco
--> 206 return convert_to_coco_api(dataset)
207
208
/content/Pytorch_OD/My Drive/Pytorch_OD/vision/coco_utils.py in convert_to_coco_api(ds)
157 img_dict = {}
158 img_dict['id'] = image_id
--> 159 img_dict['height'] = img.shape[-2]
160 img_dict['width'] = img.shape[-1]
161 dataset['images'].append(img_dict)
AttributeError: 'Image' object has no attribute 'shape
'Image' is imported via from PIL import Image.
The attribute shape is not being defined in the code in my notebook.
Can it be that itis an error in the source file of coco_utils.py?
Or do I need to provide more information or parts of my code?

Related

Keep getting this error message "AttributeError: can't set attribute"

The code below is triggering a AttributeError: can't set attribute. I'm still new to programming so am having a difficult time figuring out why this error is occurring. Any help is appreciated.
import cimcb_lite as cb
cv = cb.cross_val.kfold(model=cb.model.PLS_SIMPLS,X=XTknn,
Y=Ytrain,
param_dict={'n_components': [1,2,3,4,5]},
folds=5,
bootnum=100)
cv.run()
seeing this error
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
/var/folders/rs/f6nsd1894354_821jj157jnr0000gn/T/ipykernel_30013/1292624611.py in <module>
8
9 # run the cross validation
---> 10 cv.run()
11
/opt/anaconda3/lib/python3.9/site-packages/cimcb_lite/cross_val/kfold.py in run(self)
82 def run(self):
83 """Runs all functions prior to plot."""
---> 84 self.calc_ypred()
85 self.calc_stats()
86 if self.bootnum > 1:
/opt/anaconda3/lib/python3.9/site-packages/cimcb_lite/cross_val/kfold.py in calc_ypred(self)
55 model_i = self.model(**params_i)
56 # Full
---> 57 model_i.train(self.X, self.Y)
58 ypred_full_i = model_i.test(self.X)
59 self.ypred_full.append(ypred_full_i)
/opt/anaconda3/lib/python3.9/site-packages/cimcb_lite/model/PLS_SIMPLS.py in train(self, X, Y)
77 # Calculates and store attributes of PLS SIMPLS
78 Xscores, Yscores, Xloadings, Yloadings, Weights, Beta = self.pls_simpls(X, Y, ncomp=self.n_component)
---> 79 self.model.x_scores_ = Xscores
80 self.model.y_scores_ = Yscores
81 self.model.x_loadings_ = Xloadings
AttributeError: can't set attribute

Cannot compile DQN agent: TypeError: ('Keyword argument not understood:', 'units')

I have this model:
poss_in = layers.Input((1,))
poss_lr = layers.Dense(8, activation='relu')(poss_in)
hist_in = layers.Input((100,))
hist_lr = layers.Reshape((100, 1))(hist_in)
hist_lr = layers.LSTM(32)(hist_lr)
hist_lr = layers.Dense(32, activation='relu')(hist_lr)
sent_in = layers.Input((10,))
sent_lr = layers.Reshape((10, 1))(sent_in)
sent_lr = layers.Conv1D(4, 3)(sent_lr)
sent_lr = layers.GRU(4)(sent_lr)
root_lr = layers.concatenate([poss_lr, hist_lr, sent_lr])
root_lr = layers.Reshape((44, 1))(root_lr)
root_lr = Attention(16)(root_lr)
root_lr = layers.Dense(16)(root_lr)
root_lr = layers.Dense(1)(root_lr)
model = Model([poss_in, hist_in, sent_in], root_lr)
and I'm trying to create a DQN agent with:
dqn = agents.DQNAgent(
model=model,
memory=memory.SequentialMemory(limit=50000, window_length=1),
policy=policy.BoltzmannQPolicy(),
nb_actions=1,
nb_steps_warmup=64,
target_model_update=1e-2
)
dqn.compile('Adam', metrics=['mae'])
but I receive this error:
/usr/local/lib/python3.7/dist-packages/keras/optimizer_v2/adam.py:105: UserWarning: The `lr` argument is deprecated, use `learning_rate` instead.
super(Adam, self).__init__(name, **kwargs)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-18-3d71fb800af2> in <module>
7 target_model_update=1e-2
8 )
----> 9 dqn.compile(opt.Adam(lr=1e-3), metrics=['mae'])
17 frames
/usr/local/lib/python3.7/dist-packages/rl/agents/dqn.py in compile(self, optimizer, metrics)
165
166 # We never train the target model, hence we can set the optimizer and loss arbitrarily.
--> 167 self.target_model = clone_model(self.model, self.custom_model_objects)
168 self.target_model.compile(optimizer='sgd', loss='mse')
169 self.model.compile(optimizer='sgd', loss='mse')
/usr/local/lib/python3.7/dist-packages/rl/util.py in clone_model(model, custom_objects)
13 'config': model.get_config(),
14 }
---> 15 clone = model_from_config(config, custom_objects=custom_objects)
16 clone.set_weights(model.get_weights())
17 return clone
/usr/local/lib/python3.7/dist-packages/keras/saving/model_config.py in model_from_config(config, custom_objects)
50 '`Sequential.from_config(config)`?')
51 from keras.layers import deserialize # pylint: disable=g-import-not-at-top
---> 52 return deserialize(config, custom_objects=custom_objects)
53
54
/usr/local/lib/python3.7/dist-packages/keras/layers/serialization.py in deserialize(config, custom_objects)
209 module_objects=LOCAL.ALL_OBJECTS,
210 custom_objects=custom_objects,
--> 211 printable_module_name='layer')
212
213
/usr/local/lib/python3.7/dist-packages/keras/utils/generic_utils.py in deserialize_keras_object(identifier, module_objects, custom_objects, printable_module_name)
681 custom_objects=dict(
682 list(_GLOBAL_CUSTOM_OBJECTS.items()) +
--> 683 list(custom_objects.items())))
684 else:
685 with CustomObjectScope(custom_objects):
/usr/local/lib/python3.7/dist-packages/keras/engine/functional.py in from_config(cls, config, custom_objects)
707 'name', 'layers', 'input_layers', 'output_layers']):
708 input_tensors, output_tensors, created_layers = reconstruct_from_config(
--> 709 config, custom_objects)
710 model = cls(
711 inputs=input_tensors,
/usr/local/lib/python3.7/dist-packages/keras/engine/functional.py in reconstruct_from_config(config, custom_objects, created_layers)
1324 # First, we create all layers and enqueue nodes to be processed
1325 for layer_data in config['layers']:
-> 1326 process_layer(layer_data)
1327 # Then we process nodes in order of layer depth.
1328 # Nodes that cannot yet be processed (if the inbound node
/usr/local/lib/python3.7/dist-packages/keras/engine/functional.py in process_layer(layer_data)
1306 from keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
1307
-> 1308 layer = deserialize_layer(layer_data, custom_objects=custom_objects)
1309 created_layers[layer_name] = layer
1310
/usr/local/lib/python3.7/dist-packages/keras/layers/serialization.py in deserialize(config, custom_objects)
209 module_objects=LOCAL.ALL_OBJECTS,
210 custom_objects=custom_objects,
--> 211 printable_module_name='layer')
212
213
/usr/local/lib/python3.7/dist-packages/keras/utils/generic_utils.py in deserialize_keras_object(identifier, module_objects, custom_objects, printable_module_name)
684 else:
685 with CustomObjectScope(custom_objects):
--> 686 deserialized_obj = cls.from_config(cls_config)
687 else:
688 # Then `cls` may be a function returning a class.
/usr/local/lib/python3.7/dist-packages/keras/engine/base_layer_v1.py in from_config(cls, config)
515 A layer instance.
516 """
--> 517 return cls(**config)
518
519 def compute_output_shape(self, input_shape):
/usr/local/lib/python3.7/dist-packages/keras/layers/dense_attention.py in __init__(self, use_scale, **kwargs)
321
322 def __init__(self, use_scale=False, **kwargs):
--> 323 super(Attention, self).__init__(**kwargs)
324 self.use_scale = use_scale
325
/usr/local/lib/python3.7/dist-packages/keras/layers/dense_attention.py in __init__(self, causal, dropout, **kwargs)
70
71 def __init__(self, causal=False, dropout=0.0, **kwargs):
---> 72 super(BaseDenseAttention, self).__init__(**kwargs)
73 self.causal = causal
74 self.dropout = dropout
/usr/local/lib/python3.7/dist-packages/tensorflow/python/training/tracking/base.py in _method_wrapper(self, *args, **kwargs)
627 self._self_setattr_tracking = False # pylint: disable=protected-access
628 try:
--> 629 result = method(self, *args, **kwargs)
630 finally:
631 self._self_setattr_tracking = previous_value # pylint: disable=protected-access
/usr/local/lib/python3.7/dist-packages/keras/engine/base_layer.py in __init__(self, seed, force_generator, **kwargs)
3436 **kwargs: other keyword arguments that will be passed to the parent class
3437 """
-> 3438 super().__init__(**kwargs)
3439 self._random_generator = backend.RandomGenerator(
3440 seed, force_generator=force_generator)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/training/tracking/base.py in _method_wrapper(self, *args, **kwargs)
627 self._self_setattr_tracking = False # pylint: disable=protected-access
628 try:
--> 629 result = method(self, *args, **kwargs)
630 finally:
631 self._self_setattr_tracking = previous_value # pylint: disable=protected-access
/usr/local/lib/python3.7/dist-packages/keras/engine/base_layer_v1.py in __init__(self, trainable, name, dtype, dynamic, **kwargs)
138 }
139 # Validate optional keyword arguments.
--> 140 generic_utils.validate_kwargs(kwargs, allowed_kwargs)
141
142 # Mutable properties
/usr/local/lib/python3.7/dist-packages/keras/utils/generic_utils.py in validate_kwargs(kwargs, allowed_kwargs, error_message)
1172 for kwarg in kwargs:
1173 if kwarg not in allowed_kwargs:
-> 1174 raise TypeError(error_message, kwarg)
1175
1176
TypeError: ('Keyword argument not understood:', 'units')
I have tryied to replace the DQN with SARSA and DDPG agents but they all generated the same error.
I looked up the problem in internet for a while and I've asked on r/tensorflow but I haven't resolved anything yet.
For additional information, I'm using Google Colab.
Thanks for every reply!
UPDATE:
I tryied to simplify the model in order to check if the problem was in a layer, so I created this model:
poss_in = layers.Input((1,))
poss_lr = layers.Dense(1)(poss_in)
hist_in = layers.Input((100,))
hist_lr = layers.Dense(1)(hist_in)
sent_in = layers.Input((10,))
sent_lr = layers.Dense(1)(sent_in)
root_lr = layers.concatenate([poss_lr, hist_lr, sent_lr])
root_lr = layers.Dense(1)(root_lr)
model = Model([poss_in, hist_in, sent_in], root_lr)
Using this model the DQN agent was compiled with no errors.

How to get the shap values for the masked language modeling task using transformer?

I am trying to get the shap values for the masked language modeling task using transformer. I get the error KeyError: 'label' for the code where I input a single data sample to get the explanation. My complete code and error trace are as follows:
import transformers
import shap
from transformers import RobertaTokenizer, RobertaForMaskedLM, pipeline
import torch
model = RobertaForMaskedLM.from_pretrained('microsoft/codebert-base-mlm')
tokenizer = RobertaTokenizer.from_pretrained('microsoft/codebert-base-mlm')
code_example = "if (x <mask> 10)"
fill_mask = pipeline('fill-mask', model=model, tokenizer=tokenizer)
explainer = shap.Explainer(fill_mask)
shap_values = explainer(['x {tokenizer.mask_token} 10'])
Following is the error trace
KeyError Traceback (most recent call last)
[<ipython-input-12-bb3832d1772d>](https://localhost:8080/#) in <module>
6 # explain the model on two sample inputs
7 explainer = shap.Explainer(fill_mask)
----> 8 shap_values = explainer(['x {tokenizer.mask_token} 10'])
9 print(shap_values)
10 # visualize the first prediction's explanation for the POSITIVE output class
5 frames
[/usr/local/lib/python3.7/dist-packages/shap/explainers/_partition.py](https://localhost:8080/#) in __call__(self, max_evals, fixed_context, main_effects, error_bounds, batch_size, outputs, silent, *args)
136 return super().__call__(
137 *args, max_evals=max_evals, fixed_context=fixed_context, main_effects=main_effects, error_bounds=error_bounds, batch_size=batch_size,
--> 138 outputs=outputs, silent=silent
139 )
140
[/usr/local/lib/python3.7/dist-packages/shap/explainers/_explainer.py](https://localhost:8080/#) in __call__(self, max_evals, main_effects, error_bounds, batch_size, outputs, silent, *args, **kwargs)
266 row_result = self.explain_row(
267 *row_args, max_evals=max_evals, main_effects=main_effects, error_bounds=error_bounds,
--> 268 batch_size=batch_size, outputs=outputs, silent=silent, **kwargs
269 )
270 values.append(row_result.get("values", None))
[/usr/local/lib/python3.7/dist-packages/shap/explainers/_partition.py](https://localhost:8080/#) in explain_row(self, max_evals, main_effects, error_bounds, batch_size, outputs, silent, fixed_context, *row_args)
159 # if not fixed background or no base value assigned then compute base value for a row
160 if self._curr_base_value is None or not getattr(self.masker, "fixed_background", False):
--> 161 self._curr_base_value = fm(m00.reshape(1, -1), zero_index=0)[0] # the zero index param tells the masked model what the baseline is
162 f11 = fm(~m00.reshape(1, -1))[0]
163
[/usr/local/lib/python3.7/dist-packages/shap/utils/_masked_model.py](https://localhost:8080/#) in __call__(self, masks, zero_index, batch_size)
65
66 else:
---> 67 return self._full_masking_call(masks, batch_size=batch_size)
68
69 def _full_masking_call(self, masks, zero_index=None, batch_size=None):
[/usr/local/lib/python3.7/dist-packages/shap/utils/_masked_model.py](https://localhost:8080/#) in _full_masking_call(self, masks, zero_index, batch_size)
142
143 joined_masked_inputs = tuple([np.concatenate(v) for v in all_masked_inputs])
--> 144 outputs = self.model(*joined_masked_inputs)
145 _assert_output_input_match(joined_masked_inputs, outputs)
146 all_outputs.append(outputs)
[/usr/local/lib/python3.7/dist-packages/shap/models/_transformers_pipeline.py](https://localhost:8080/#) in __call__(self, strings)
33 val = [val]
34 for obj in val:
---> 35 output[i, self.label2id[obj["label"]]] = sp.special.logit(obj["score"]) if self.rescale_to_logits else obj["score"]
36 return output
KeyError: 'label'

Kusto Ingest - KustoServiceError 'BadRequest_SyntaxError'

I have the following code for ingesting data into Azure Data Explore using Python in Databricks:
df=pd.DataFrame({"StringCol": ["123ABC", 'B123', 'C123','D123'],"NumberCol": [1,2,3,4],"DecimalCol": [1,2.2,3.3,4.4],"DateCol": ['1/1/20','2/2/20','3/3/30','4/4/20']})
ingestion_props = IngestionProperties(database=db, table='TestTable_DeleteMe')
connWrite.ingest_from_dataframe(df, ingestion_properties=ingestion_props)
This gives me the error:
BadRequest_SyntaxError', 'message': 'Request is invalid and cannot be executed
Earlier in my code I created a table using the same data types as this dummy pandas dataframe. Now I'm trying to load the data into the table. Full stack trace:
KustoServiceError Traceback (most recent call last)
<command-3953651275234016> in <module>
1 df=pd.DataFrame({"StringCol": ["123ABC", 'B123', 'C123','D123'],"NumberCol": [1,2,3,4],"DecimalCol": [1,2.2,3.3,4.4],"DateCol": ['1/1/20','2/2/20','3/3/30','4/4/20']})
2 ingestion_props = IngestionProperties(database=db, table='TestTable_DeleteMe')
----> 3 connWrite.ingest_from_dataframe(df, ingestion_properties=ingestion_props)
4
5 #adx_loadIntoTable(connWrite,db,df,'TestTable_DeleteMe')
/databricks/python/lib/python3.7/site-packages/azure/kusto/ingest/ingest_client.py in ingest_from_dataframe(self, df, ingestion_properties)
52 ingestion_properties.format = DataFormat.CSV
53
---> 54 self.ingest_from_file(temp_file_path, ingestion_properties)
55
56 os.unlink(temp_file_path)
/databricks/python/lib/python3.7/site-packages/azure/kusto/ingest/ingest_client.py in ingest_from_file(self, file_descriptor, ingestion_properties)
64 :param azure.kusto.ingest.IngestionProperties ingestion_properties: Ingestion properties.
65 """
---> 66 containers = self._resource_manager.get_containers()
67
68 if isinstance(file_descriptor, FileDescriptor):
/databricks/python/lib/python3.7/site-packages/azure/kusto/ingest/_resource_manager.py in get_containers(self)
121
122 def get_containers(self) -> List[_ResourceUri]:
--> 123 self._refresh_ingest_client_resources()
124 return self._ingest_client_resources.containers
125
/databricks/python/lib/python3.7/site-packages/azure/kusto/ingest/_resource_manager.py in _refresh_ingest_client_resources(self)
79 or not self._ingest_client_resources.is_applicable()
80 ):
---> 81 self._ingest_client_resources = self._get_ingest_client_resources_from_service()
82 self._ingest_client_resources_last_update = datetime.utcnow()
83
/databricks/python/lib/python3.7/site-packages/azure/kusto/ingest/_resource_manager.py in _get_ingest_client_resources_from_service(self)
86
87 def _get_ingest_client_resources_from_service(self):
---> 88 table = self._kusto_client.execute("NetDefaultDB", ".get ingestion resources").primary_results[0]
89
90 secured_ready_for_aggregation_queues = self._get_resource_by_name(table, "SecuredReadyForAggregationQueue")
/databricks/python/lib/python3.7/site-packages/azure/kusto/data/client.py in execute(self, database, query, properties)
553 query = query.strip()
554 if query.startswith("."):
--> 555 return self.execute_mgmt(database, query, properties)
556 return self.execute_query(database, query, properties)
557
/databricks/python/lib/python3.7/site-packages/azure/kusto/data/client.py in execute_mgmt(self, database, query, properties)
578 :rtype: azure.kusto.data.response.KustoResponseDataSet
579 """
--> 580 return self._execute(self._mgmt_endpoint, database, query, None, KustoClient._mgmt_default_timeout, properties)
581
582 def execute_streaming_ingest(
/databricks/python/lib/python3.7/site-packages/azure/kusto/data/client.py in _execute(self, endpoint, database, query, payload, timeout, properties)
654 )
655
--> 656 raise KustoServiceError([response.json()], response)
KustoServiceError: (KustoServiceError(...), [{'error': {'code': 'BadRequest_SyntaxError', 'message': 'Request is invalid and cannot be executed.', '#type': 'Kusto.Data.Exceptions.SyntaxException', '#message': "Syntax error: Query could not be parsed: . Query: '.get ingestion resources'", '#context': {'timestamp': '2020-06-27T21:44:48.0697658Z', 'serviceAlias': 'USCPIRSTASADE01', 'machineName': 'KEngine000000', 'processName': 'Kusto.WinSvc.Svc', 'processId': 7124, 'threadId': 7240, 'appDomainName': 'Kusto.WinSvc.Svc.exe', 'clientRequestId': 'KPC.execute;0c2173bf-ea69-4253-bbaf-0203f3aa298c', 'activityId': 'cf41c806-8e15-458e-b388-386613f63952', 'subActivityId': 'df366667-ca8d-487b-a281-723f696a8f68', 'activityType': 'DN.FE.ExecuteControlCommand', 'parentActivityId': 'f8cd0bb8-04e9-48cf-8a84-8b16e1e24197', 'activityStack': '(Activity stack: CRID=KPC.execute;0c2173bf-ea69-4253-bbaf-0203f3aa298c ARID=cf41c806-8e15-458e-b388-386613f63952 > DN.Admin.Client.ExecuteControlCommand/7271d9ec-2adf-4714-b19e-69495ad80d65 > P.WCF.Service.ExecuteControlCommandInternal..IAdminClientServiceCommunicationContract/f8cd0bb8-04e9-48cf-8a84-8b16e1e24197 > DN.FE.ExecuteControlCommand/df366667-ca8d-487b-a281-723f696a8f68)'}, '#permanent': True}}])
It is likely that your connection has the engine endpoint instead of the data management endpoint. Can you check that the connection to the cluster starts with "ingest-"? See here an example:
client = KustoIngestClient("https://ingest-{cluster_name}.kusto.windows.net")

KeyError: 'Entity c does not exist in dfs'

when i try to run this code,
ftr_mtrx_custmr, features_defs = ft.dfs(entities=entities,
relationships=relationship,
target_entity="transactions")
i get such error,
490 featuretools.entityset - WARNING index session_id not found in dataframe, creating new integer column
KeyError Traceback (most recent call last)
<ipython-input-82-d467a36d5254> in <module>()
1 ftr_mtrx_custmr, features_defs = ft.dfs(entities=entities,
2 relationships=relationshp,
----> 3 target_entity="transactions")
4 frames
/usr/local/lib/python3.6/dist-packages/featuretools/utils/entry_point.py
in function_wrapper(*args, **kwargs)
38 ep.on_error(error=e,
39 runtime=runtime)
---> 40 raise e
41
42 # send return value
/usr/local/lib/python3.6/dist-packages/featuretools/utils/entry_point.py
in function_wrapper(*args, **kwargs)
30 # call function
31 start = time.time()
---> 32 return_value = func(*args, **kwargs)
33 runtime = time.time() - start
34 except Exception as e:
/usr/local/lib/python3.6/dist-packages/featuretools/synthesis/dfs.py
in dfs(entities, relationships, entityset, target_entity, cutoff_time,
instance_ids, agg_primitives, trans_primitives,
groupby_trans_primitives, allowed_paths, max_depth, ignore_entities,
ignore_variables, primitive_options, seed_features, drop_contains,
drop_exact, where_primitives, max_features, cutoff_time_in_index,
save_progress, features_only, training_window, approximate,
chunk_size, n_jobs, dask_kwargs, verbose, return_variable_types,
progress_callback)
225 '''
226 if not isinstance(entityset, EntitySet):
--> 227 entityset = EntitySet("dfs", entities, relationships)
228
229 dfs_object = DeepFeatureSynthesis(target_entity, entityset,
/usr/local/lib/python3.6/dist-packages/featuretools/entityset/entityset.py
in init(self, id, entities, relationships)
83
84 for relationship in relationships:
---> 85 parent_variable = self[relationship[0]][relationship[1]]
86 child_variable = self[relationship[2]][relationship[3]]
87 self.add_relationship(Relationship(parent_variable,
/usr/local/lib/python3.6/dist-packages/featuretools/entityset/entityset.py
in getitem(self, entity_id)
124 return self.entity_dict[entity_id]
125 name = self.id or "entity set"
--> 126 raise KeyError('Entity %s does not exist in %s' % (entity_id, name))
127
128 #property
however, this returned KeyError : 'Entity c does not exist in dfs'
any idea what's wrong with my code?

Resources