I was doing some testing with imports, and I wanted to test how fast certain packages get imported using function decorators. Here is my code:
import time
def timeit(func):
def wrapper():
start = time.time()
func()
end = time.time()
print(f'{func.__name__} executed in {end - start} second(s)')
return wrapper
#timeit
def import_matplotlib():
import matplotlib.pyplot
#timeit
def import_numpy():
import numpy
import_matplotlib()
import_numpy()
Output
import_matplotlib executed in 0.4385249614715576 second(s)
import_numpy executed in 0.0 second(s)
This is not the expected output given that numpy isn't imported in an instant. What is happening here, and how can this be fixed? Thank you.
Edit
If I make this change to import_numpy():
#timeit
def import_numpy():
import numpy
time.sleep(2)
The output becomes this:
import_matplotlib executed in 0.4556155204772949 second(s)
import_numpy executed in 2.0041260719299316 second(s)
This tells me that there isn't anything wrong with my decorator function. Why is this behavior occurring?
Try using the timeit module? It was built for this purpose and makes that code simpler.
>>> import timeit
>>> timeit.timeit(stmt='import numpy')
0.13844075199995132
Related
When I run this simple pipeline (in GCP's Vertex AI Workbench) I get an error:
ModuleNotFoundError: No module named 'sklearn'
Here is my code:
from kfp.v2 import compiler
from kfp.v2.dsl import pipeline, component
from google.cloud import aiplatform
#component(
packages_to_install=["sklearn"],
base_image="python:3.9",
)
def test_sklearn():
import sklearn
#pipeline(
pipeline_root=PIPELINE_ROOT,
name="sklearn-pipeline",
)
def pipeline():
test_sklearn()
compiler.Compiler().compile(pipeline_func=pipeline, package_path="sklearn_pipeline.json")
job = aiplatform.PipelineJob(
display_name=PIPELINE_DISPLAY_NAME,
template_path="sklearn_pipeline.json",
pipeline_root=PIPELINE_ROOT,
location=REGION
)
job.run(service_account=SERVICE_ACCOUNT)
What do I do wrong? :)
It seems that the package name sklearn does not work after a version upgrade.You need to change the value of packages_to_install from "sklearn" to "scikit-learn" in the #component block.
I am using IBM's quantum computing lab, and was following a tutorial made by IBM for getting started, and my code is throwing errors. I followed the tutorial exactly. Here is my code:
#-----------Cell 1:
import numpy as np
# Importing standard Qiskit libraries
from qiskit import QuantumCircuit, transpile, Aer, IBMQ
from qiskit.tools.jupyter import *
from qiskit.visualization import *
from ibm_quantum_widgets import *
from qiskit.providers.aer import QasmSimulator
# Loading your IBM Quantum account(s)
provider = IBMQ.load_account()
#-----------Cell 2:
# Build
#------
# Create a Quantum Circuit acting on the q register
circuit = QuantumCircuit(2, 2)
# Add a H gate on qubit 0
circuit.h(0)
# Add a CX (CNOT) gate on control qubit 0 and target qubit 1
circuit.cx(0, 1)
# Map the quantum measurement to the classical bits
circuit.measure([0,1], [0,1])
# END
# Execute
#--------
# Use Aer's qasm_simulator
simulator = Aer.get_backend('qasm_simulator')
# Execute the circuit on the qasm simulator
job = execute(circuit, simulator, shots=1000)
# Grab results from the job
result = job.result()
# Return counts
counts = result.get_counts(circuit)
print("\nTotal count for 00 and 11 are:",counts)
# END
# Visualize
#----------
# Import draw_circuit, then use it to draw the circuit
from ibm_quantum_widgets import draw_circuit
draw_circuit(circuit)
# Analyze
#--------
# Plot a histogram
plot_histogram(counts)
# END
This code throws this error:
Traceback (most recent call last):
File "/tmp/ipykernel_59/1801586149.py", line 26, in <module>
job = execute(circuit, simulator, shots=1000)
NameError: name 'execute' is not defined
Use %tb to get the full traceback.
I am new to IBM and quantum computing, how do I fix this error?
Here is the tutorial I was following if you need it: https://quantum-computing.ibm.com/lab/docs/iql/first-circuit
You did not import execute from qiskit.
Change
from qiskit import QuantumCircuit, transpile, Aer, IBMQ
to
from qiskit import QuantumCircuit, transpile, Aer, IBMQ, execute
This is simple to test if you get the error on your side:
import geopandas as gpd
gdf = gpd.read_file('https://hepgis.fhwa.dot.gov/fhwagis/AltFuels_Rounds1-5_2021-05-25.zip')
File "fiona/ogrext.pyx", line 540, in fiona.ogrext.Session.start
File "fiona/_shim.pyx", line 90, in fiona._shim.gdal_open_vector
fiona.errors.DriverError: '/vsimem/6101ab5f23764c15b5fe47aa52a049d6' not recognized as a supported file format.
Interestingly, I have received this error for other URLs recently and thought there was something wrong with the URL. But, now I suspect that something else is going on since it is happening with more than one URL. On the other hand, some URLs don't have this issue. One other interesting thing, this error only occurs sometimes. For instance, if I rerun that command it will work maybe 1 out of 20 times.
My Fiona version:
fiona 1.8.20 py39hea8b339_1 conda-forge
Any help would be much appreciated.
Investigating, the URL does not return a zip file. See code below, it actually returns a HTML input page...
import geopandas as gpd
import requests, io
from pathlib import Path
from zipfile import ZipFile, BadZipFile
import urllib
import fiona
url = "https://hepgis.fhwa.dot.gov/fhwagis/AltFuels_Rounds1-5_2021-05-25.zip"
try:
gdf = gpd.read_file(url)
except Exception:
f = Path.cwd().joinpath(urllib.parse.urlparse(url).path.split("/")[-1])
r = requests.get(url, stream=True, headers={"User-Agent": "XY"})
with open(f, "wb") as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
try:
zfile = ZipFile(f)
zfile.extractall(f.stem)
except BadZipFile:
with open(f) as fh:
print(fh.read())
when i use the st.cache decorator to cash hugging-face transformer model i get
Unhashable TypeError
this is the code
from transformers import pipeline
import streamlit as st
from io import StringIO
#st.cache(hash_funcs={StringIO: StringIO.getvalue})
def model() :
return pipeline("sentiment-analysis", model='akhooli/xlm-r-large-arabic-sent')
after searching in issues section in streamlit repo
i found that hashing argument is not required , just need to pass this argument
allow_output_mutation = True
This worked for me:
from transformers import pipeline
import tokenizers
import streamlit as st
import copy
#st.cache(hash_funcs={tokenizers.Tokenizer: lambda _: None, tokenizers.AddedToken: lambda _: None})
def get_model() :
return pipeline("sentiment-analysis", model='akhooli/xlm-r-large-arabic-sent')
input = st.text_input('Text')
bt = st.button("Get Sentiment Analysis")
if bt and input:
model = copy.deepcopy(get_model())
st.write(model(input))
Note 1:
calling the pipeline with input model(input) changes the model and we shouldn't change a cached value so we need to copy the model and run it on the copy.
Note 2:
First run will load the model using the get_model function next run will use the chace.
Note 3:
You can read more about Advanced caching in stremlit in thier documentation.
Output examples:
I'm trying to call a unittest from another python file, and evaluate the exit code. I was able to use unittest.TestLoader().loadTestsFromModule and unittest.TextTestRunner.run to call the unittest from another python file, but that's returning the entire results to the cmd. I would like to simply set a variable equal to the status code so I can evaluate it. I was able to find a method unittest.TestResult.wasSuccessful, but I'm having trouble implementing it. When I add it to the use case, I get the following AttributeError: AttributeError: 'ConnectionTest' object has no attribute 'failures'
I've included some code samples below and a mockup of the desired result as an illustration of what I'm trying to achieve. Thank you in advance.
""" Tests/ConnectionTest.py """
import unittest
from Connection import Connection
class ConnectionTest(unittest.TestCase):
def test_connection(self):
#my tests
def test_pass(self):
return unittest.TestResult.wasSuccessful(self)
if __name__ == '__main__':
unittest.main()
""" StatusTest.py """
import unittest
import Tests.ConnectionTest as test
#import Tests.Test2 as test2
#import Tests.Test3 as test3
#import other unit tests ...
suite = unittest.TestLoader().loadTestsFromModule(test)
unittest.TextTestRunner(verbosity=2).run(suite)
""" Return True if unit test passed
"""
def test_passed(test):
if test.test_pass() == 0:
return True
else:
return False
""" Run unittest for each module before using it in code
"""
def main():
tests = "test test2 test3".split()
for test in tests:
if test_passed(test):
# do something
else:
# log failure
pass
Update
To put the question more simply, I need to set the highlighted variable below to the highlighted value.
You mentioned you tried implementing result.wasSuccessful, but would something like the following work:
result = unittest.TextTestRunner(verbosity=2).run(suite)
test_exit_code = int(not result.wasSuccessful())
The value of test_exit_code would then be either 0 when the test suite ran successfully or 1 otherwise.
If you want to disable the output of the TextTestRunner you can specify your own stream, such as:
from io import StringIO
result = unittest.TextTestRunner(stream=StringIO(), verbosity=2).run(suite)