I am able to save an H2O model, load it again and then show it...
# save the model
model_path_2 = h2o.save_model(model=my_xgboost_2, path="tmp/mymodel", force=True)
print (model_path_2)
>>>/home/dell/Documents/Enigma/tmp/mymodel/XGBoost_model_python_1503367354328_27
# load the model
saved_model_2 = h2o.load_model(model_path_2)
but I cannot use it to predict.
saved_model_2.predict(test)
>>>xgboost prediction progress: | (failed)
---------------------------------------------------------------------------
OSError Traceback (most recent call last)
<ipython-input-100-fa76fd498ee6> in <module>()
----> 1 saved_model_2.predict(test)
/home/dell/anaconda3/lib/python3.6/site-packages/h2o/model/model_base.py in predict(self, test_data)
130 j = H2OJob(h2o.api("POST /4/Predictions/models/%s/frames/%s" % (self.model_id, test_data.frame_id)),
131 self._model_json["algo"] + " prediction")
--> 132 j.poll()
133 return h2o.get_frame(j.dest_key)
134
/home/dell/anaconda3/lib/python3.6/site-packages/h2o/job.py in poll(self, verbose_model_scoring_history)
75 if (isinstance(self.job, dict)) and ("stacktrace" in list(self.job)):
76 raise EnvironmentError("Job with key {} failed with an exception: {}\nstacktrace: "
---> 77 "\n{}".format(self.job_key, self.exception, self.job["stacktrace"]))
78 else:
79 raise EnvironmentError("Job with key %s failed with an exception: %s" % (self.job_key, self.exception))
OSError: Job with key $03017f00000132d4ffffffff$_927b7278904ecf169173d48a23de4c10 failed with an exception: java.lang.NullPointerException
stacktrace:
java.lang.NullPointerException
I can, however, predict on the model without saving it. I am using Python 3.6.1, and H2O 3.14.0.1 on Ubuntu 16.04.
This is a known issue with H2O's XGBoost implementation and should be fixed soon. It does not affect other H2O models.
Related
I'm using the following code out of the box from this url: https://lightning-transformers.readthedocs.io/en/latest/tasks/nlp/question_answering.html
import pytorch_lightning as pl
from transformers import AutoTokenizer
from lightning_transformers.task.nlp.question_answering import (
QuestionAnsweringTransformer,
SquadDataModule,
)
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path="bert-base-uncased")
model = QuestionAnsweringTransformer(pretrained_model_name_or_path="bert-base-uncased")
dm = SquadDataModule(
batch_size=1,
dataset_config_name="plain_text",
max_length=384,
version_2_with_negative=False,
null_score_diff_threshold=0.0,
doc_stride=128,
n_best_size=20,
max_answer_length=30,
tokenizer=tokenizer,
)
trainer = pl.Trainer(accelerator="auto", devices="auto", max_epochs=1)
trainer.fit(model, dm)
which throws this error
AssertionError Traceback (most recent call last)
<ipython-input-2-0b608c02a52e> in <module>
14 trainer = pl.Trainer(accelerator="auto", devices="auto", max_epochs=1)
15
---> 16 trainer.fit(model, dm)
16 frames
/usr/local/lib/python3.8/dist-packages/lightning_transformers/task/nlp/question_answering/datasets/squad/processing.py in postprocess_qa_predictions(examples, features, predictions, version_2_with_negative, n_best_size, max_answer_length, null_score_diff_threshold, output_dir, prefix)
245 all_start_logits, all_end_logits, example_ids = predictions
246
--> 247 assert len(predictions[0]) == len(features), f"Got {len(predictions[0])} predictions and {len(features)} features."
248
249 # Build a map example to its corresponding features.
AssertionError: Got 2 predictions and 10784 features.
I was simply trying to get a single example from the documentation to run within google colab before investigating further if this would meet my use case, but I see an error when I try to use the example as is, which is disheartening to consider investigating it. Nothing comes up when I google "AssertionError: Got 2 predictions and 10784 features."
In this Notebook, we use Explainable AI SDK from Google to load a model, right after saving it. This fails with a message that the model is missing.
But note
the info message saying that the model was saved
checking working/model shows that the model is there.
However, working/model/assets is empty.
Why do we get this error message? How can we avoid it?
model_path = "working/model"
model.save(model_path)
builder = SavedModelMetadataBuilder(model_path)
builder.set_numeric_metadata(
"numpy_inputs",
input_baselines=[X_train.median().tolist()], # attributions relative to the median of the target
index_feature_mapping=X_train.columns.tolist(), # the names of each feature
)
builder.save_metadata(model_path)
explainer = explainable_ai_sdk.load_model_from_local_path(
model_path=model_path,
config=configs.SampledShapleyConfig(path_count=20),
)
INFO:tensorflow:Assets written to: working/model/assets
---------------------------------------------------------------------------
NotImplementedError Traceback (most recent call last)
/tmp/ipykernel_26061/1928503840.py in <module>
18 explainer = explainable_ai_sdk.load_model_from_local_path(
19 model_path=model_path,
---> 20 config=configs.SampledShapleyConfig(path_count=20),
21 )
22
/opt/conda/lib/python3.7/site-packages/explainable_ai_sdk/model/model_factory.py in load_model_from_local_path(model_path, config)
128 """
129 if _LOCAL_MODEL_KEY not in _MODEL_REGISTRY:
--> 130 raise NotImplementedError('There are no implementations of local model.')
131 return _MODEL_REGISTRY[_LOCAL_MODEL_KEY](model_path, config)
132
NotImplementedError: There are no implementations of local model.
Why this works in google colab but doesn't work on docker?
So this is my Dockerfile.
FROM python:3.7
RUN pip install -q transformers tensorflow
RUN pip install ipython
ENTRYPOINT ["/bin/bash"]
And I'm executing this.
from transformers import *
nlp = pipeline(
'question-answering',
model='mrm8488/distill-bert-base-spanish-wwm-cased-finetuned-spa-squad2-es',
tokenizer=(
'mrm8488/distill-bert-base-spanish-wwm-cased-finetuned-spa-squad2-es',
{"use_fast": False}
)
)
But I get this error
...:
Downloading: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 465/465 [00:00<00:00, 325kB/s]
Downloading: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 242k/242k [00:00<00:00, 796kB/s]
Downloading: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 112/112 [00:00<00:00, 70.1kB/s]
Downloading: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 135/135 [00:00<00:00, 99.6kB/s]
---------------------------------------------------------------------------
OSError Traceback (most recent call last)
/usr/local/lib/python3.7/site-packages/transformers/modeling_tf_utils.py in from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs)
461 if resolved_archive_file is None:
--> 462 raise EnvironmentError
463 except EnvironmentError:
OSError:
During handling of the above exception, another exception occurred:
OSError Traceback (most recent call last)
<ipython-input-1-1f9fed95967a> in <module>
5 tokenizer=(
6 'mrm8488/distill-bert-base-spanish-wwm-cased-finetuned-spa-squad2-es',
----> 7 {"use_fast": False}
8 )
9 )
/usr/local/lib/python3.7/site-packages/transformers/pipelines.py in pipeline(task, model, config, tokenizer, framework, **kwargs)
1882 "Trying to load the model with Tensorflow."
1883 )
-> 1884 model = model_class.from_pretrained(model, config=config, **model_kwargs)
1885
1886 return task_class(model=model, tokenizer=tokenizer, modelcard=modelcard, framework=framework, task=task, **kwargs)
/usr/local/lib/python3.7/site-packages/transformers/modeling_tf_auto.py in from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs)
1207 for config_class, model_class in TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING.items():
1208 if isinstance(config, config_class):
-> 1209 return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
1210 raise ValueError(
1211 "Unrecognized configuration class {} for this kind of TFAutoModel: {}.\n"
/usr/local/lib/python3.7/site-packages/transformers/modeling_tf_utils.py in from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs)
467 f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {TF2_WEIGHTS_NAME}, {WEIGHTS_NAME}.\n\n"
468 )
--> 469 raise EnvironmentError(msg)
470 if resolved_archive_file == archive_file:
471 logger.info("loading weights file {}".format(archive_file))
OSError: Can't load weights for 'mrm8488/distill-bert-base-spanish-wwm-cased-finetuned-spa-squad2-es'. Make sure that:
- 'mrm8488/distill-bert-base-spanish-wwm-cased-finetuned-spa-squad2-es' is a correct model identifier listed on 'https://huggingface.co/models'
- or 'mrm8488/distill-bert-base-spanish-wwm-cased-finetuned-spa-squad2-es' is the correct path to a directory containing a file named one of tf_model.h5, pytorch_model.bin.
However this works perfectly in google colab. This Google Colab doesn't require GPU to be ran, so why wouldn't it work in docker? What dependencies could I be missing? It doesn't see in the error message that dependencies could be missing, more than the model is no there but look:
And yes, this model exists "mrm8488/distill-bert-base-spanish-wwm-cased-finetuned-spa-squad2-es" in hugging.co
I'm trying to build a convolutional neural network for image classification in Python.
I run my code on CoLab and have loaded my data on Google Drive.
I can see all the files and folders in my google drive from python, but when I try to actually load an image it gives me the error in the title.
I'm using the skimage.io package, I'm actually just running a notebook I found on kaggle so the code should run fine, only difference I noticed is that the kaggle user was probably not working on CoLab with his data in GoogleDrive so I think maybe that's the problem, anyway here's my code:
from skimage.io import imread
img=imread('/content/drive/My Drive/CoLab/Data/chest_xray/train/PNEUMONIA/person53_bacteria_255.jpeg')
Which gives me the following error:
AttributeError: 'NoneType' object has no attribute 'ReadAsArray'
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-12-4a64aebb8504> in <module>()
----> 1 img=imread('/content/drive/My Drive/CoLab/Data/chest_xray/train/PNEUMONIA/person53_bacteria_255.jpeg')
4 frames
/usr/local/lib/python3.6/dist-packages/skimage/io/_io.py in imread(fname, as_gray, plugin, flatten, **plugin_args)
59
60 with file_or_url_context(fname) as fname:
---> 61 img = call_plugin('imread', fname, plugin=plugin, **plugin_args)
62
63 if not hasattr(img, 'ndim'):
/usr/local/lib/python3.6/dist-packages/skimage/io/manage_plugins.py in call_plugin(kind, *args, **kwargs)
208 (plugin, kind))
209
--> 210 return func(*args, **kwargs)
211
212
/usr/local/lib/python3.6/dist-packages/imageio/core/functions.py in imread(uri, format, **kwargs)
221 reader = read(uri, format, "i", **kwargs)
222 with reader:
--> 223 return reader.get_data(0)
224
225
/usr/local/lib/python3.6/dist-packages/imageio/core/format.py in get_data(self, index, **kwargs)
345 self._checkClosed()
346 self._BaseReaderWriter_last_index = index
--> 347 im, meta = self._get_data(index, **kwargs)
348 return Array(im, meta) # Array tests im and meta
349
/usr/local/lib/python3.6/dist-packages/imageio/plugins/gdal.py in _get_data(self, index)
64 if index != 0:
65 raise IndexError("Gdal file contains only one dataset")
---> 66 return self._ds.ReadAsArray(), self._get_meta_data(index)
67
68 def _get_meta_data(self, index):
AttributeError: 'NoneType' object has no attribute 'ReadAsArray'
Frist instead of My Drive it should be MyDrive (no space).
If it still doesn't work, you can try the following:
%cd /content/drive/MyDrive/CoLab/Data/chest_xray/train/PNEUMONIA
img=imread('person53_bacteria_255.jpeg')```
Having apparent compatibility issues running H2O (via the 3.18.0.2 MapR 5.2 driver (trying with the latest driver (3.20.0.7) as recommended in another SO post did not help the problem)) on MapR 6.0.
While able to start an H2O cluster on MapR 6.0 (via something like hadoop jar h2odriver.jar -nodes 3 -mapperXmx 6g -output hdfsOutputDirName
) and seem to be able to access h2o Flow UI, having problems accessing the cluster via python API (pip show h2o confirms matching package version to driver being used).
Is the MapR 5.2 driver (currently the latest MapR driver version offered by H2O) incompatible with MapR 6.0 (would not be asking if not for the fact that seem to be able to use the H2O Flow UI on cluster instance started on MapR 6.0)? Any workaround other than standalone driver version (would like to still be able to leverage YARN on hadoop cluster)?
The code and error being seen when trying to connect to the running H2O using the python APIis shown below.
# connect to h2o service
h2o.init(ip=h2o_cnxn_ip)
where the h2o_cnxn_ip is the IP and port generated after starting the h2o cluster on the MapR 6.0 system. Produces error
Checking whether there is an H2O instance running at http://172.18.0.123:54321...
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-5-1728877a03a2> in <module>()
1 # connect to h2o service
----> 2 h2o.init(ip=h2o_cnxn_ip)
/home/me/projects/myproject/lib/python2.7/site-packages/h2o/h2o.pyc in init(url, ip, port, https, insecure, username, password, cookies, proxy, start_h2o, nthreads, ice_root, enable_assertions, max_mem_size, min_mem_size, strict_version_check, ignore_config, extra_classpath, **kwargs)
250 auth=auth, proxy=proxy,cookies=cookies, verbose=True,
251 _msgs=("Checking whether there is an H2O instance running at {url}",
--> 252 "connected.", "not found."))
253 except H2OConnectionError:
254 # Backward compatibility: in init() port parameter really meant "baseport" when starting a local server...
/home/me/projects/myproject/lib/python2.7/site-packages/h2o/backend/connection.pyc in open(server, url, ip, port, https, auth, verify_ssl_certificates, proxy, cookies, verbose, _msgs)
316 conn._stage = 1
317 conn._timeout = 3.0
--> 318 conn._cluster = conn._test_connection(retries, messages=_msgs)
319 # If a server is unable to respond within 1s, it should be considered a bug. However we disable this
320 # setting for now, for no good reason other than to ignore all those bugs :(
/home/me/projects/myproject/lib/python2.7/site-packages/h2o/backend/connection.pyc in _test_connection(self, max_retries, messages)
558 raise H2OServerError("Local server was unable to start")
559 try:
--> 560 cld = self.request("GET /3/Cloud")
561 if cld.consensus and cld.cloud_healthy:
562 self._print(" " + messages[1])
/home/me/projects/myproject/lib/python2.7/site-packages/h2o/backend/connection.pyc in request(self, endpoint, data, json, filename, save_to)
400 auth=self._auth, verify=self._verify_ssl_cert, proxies=self._proxies)
401 self._log_end_transaction(start_time, resp)
--> 402 return self._process_response(resp, save_to)
403
404 except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError) as e:
/home/me/projects/myproject/lib/python2.7/site-packages/h2o/backend/connection.pyc in _process_response(response, save_to)
711 if content_type == "application/json":
712 try:
--> 713 data = response.json(object_pairs_hook=H2OResponse)
714 except (JSONDecodeError, requests.exceptions.ContentDecodingError) as e:
715 raise H2OServerError("Malformed JSON from server (%s):\n%s" % (str(e), response.text))
/home/me/projects/myproject/lib/python2.7/site-packages/requests/models.pyc in json(self, **kwargs)
882 try:
883 return complexjson.loads(
--> 884 self.content.decode(encoding), **kwargs
885 )
886 except UnicodeDecodeError:
/usr/lib64/python2.7/json/__init__.pyc in loads(s, encoding, cls, object_hook, parse_float, parse_int, parse_constant, object_pairs_hook, **kw)
349 if parse_constant is not None:
350 kw['parse_constant'] = parse_constant
--> 351 return cls(encoding=encoding, **kw).decode(s)
/usr/lib64/python2.7/json/decoder.pyc in decode(self, s, _w)
364
365 """
--> 366 obj, end = self.raw_decode(s, idx=_w(s, 0).end())
367 end = _w(s, end).end()
368 if end != len(s):
/usr/lib64/python2.7/json/decoder.pyc in raw_decode(self, s, idx)
380 """
381 try:
--> 382 obj, end = self.scan_once(s, idx)
383 except StopIteration:
384 raise ValueError("No JSON object could be decoded")
/home/me/projects/myproject/lib/python2.7/site-packages/h2o/backend/connection.pyc in __new__(cls, keyvals)
823 for k, v in keyvals:
824 if k == "__meta" and isinstance(v, dict):
--> 825 schema = v["schema_name"]
826 break
827 if k == "__schema" and is_type(v, str):
KeyError: u'schema_name'
MapR 6 is not currently supported by H2O. Currently H2O supports up to MapR 5.2.
Please see the downloads page for supported Hadoop versions.