Error running healpy.sphtfunc.alm2map with alm size different from l_max required - healpy

I run healpy.sphtfunc.alm2map passing alm as an array with l_max = 256 and asking as output a map with l_max = 191, but it seems that the function does not accept correctly the new l_max.
Here an example code of what I am doing, with artificially generated alms:
import healpy as hp
import numpy as np
#nside and lmax
nside_high = 128
lmax_high= 2*nside_high
nside_low = 64
lmax_low= 3*nside_low-1
#Cl
Cl = np.ones(lmax_high)
#Alm
Alm = hp.synalm(Cl, lmax = lmax_high)
#Map
Map = hp.alm2map(Alm, nside = nside_low, lmax = lmax_low)
I get this error:
ValueError Traceback (most recent call last)
/var/folders/k_/2j08yy711tb17zmmsznfx1zh0000gn/T/ipykernel_9189/2207532576.py in <cell line: 18>()
16
17 #Map
---> 18 Map = hp.alm2map(Alm, nside = nside_l, lmax = lmax_l)
/opt/anaconda3/lib/python3.8/site-packages/astropy/utils/decorators.py in wrapper(*args, **kwargs)
552 warnings.warn(msg, warning_type, stacklevel=2)
553
--> 554 return function(*args, **kwargs)
555
556 return wrapper
/opt/anaconda3/lib/python3.8/site-packages/healpy/sphtfunc.py in alm2map(alms, nside, lmax, mmax, pixwin, fwhm, sigma, pol, inplace, verbose)
502 mmax = -1
503 if pol:
--> 504 output = sphtlib._alm2map(
505 alms_new[0] if lonely else tuple(alms_new), nside, lmax=lmax, mmax=mmax
506 )
ValueError: Wrong alm size.
Can anyone help?

The purpose of lmax argument in alm2map is just to handle input alms which have mmax != lmax, they cannot be used to clip alms.
Currently the easiest way to clip alms is:
alm_clipped = hp.almxfl(Alm, np.ones(lmax_low+1))
Map = hp.alm2map(alm_clipped, nside = nside_low)
The next version of healpy will have a dedicated function resize_alm:
https://github.com/healpy/healpy/pull/803
In the future it would be nice to use the function to automatically handle this, I opened an issue to track this:
https://github.com/healpy/healpy/issues/817

Related

Fitting Lightgbm distributed with lgb.train hangs

I'm trying to learn how to use lightgbm distributed.
I wrote a simple hello world kind of code where I use iris dataset with 150 rows, split it into train (100 rows) and test(50 rows). Then training the train test set are further split into two parts. Each part is fed into two machines with appropriate rank.
The problem I see is that lgb.train hangs.
Here is the code:
import argparse
import logging
import lightgbm as lgb
import pandas as pd
from sklearn import datasets
import socket
print('lightgbm', lgb.__version__)
HOST = socket.gethostname()
ip_address = socket.gethostbyname(HOST)
print("IP=", ip_address)
# looks like lightgbm operates only with ip addresses
IPS = ['10.121.22.166', '10.121.22.83']
assert ip_address in IPS
logger = logging.getLogger(__name__)
pd.set_option('display.max_rows', 4)
pd.set_option('display.max_columns', 100)
pd.set_option('display.width', 10000)
pd.set_option('max_colwidth', 100)
pd.set_option('precision', 5)
def read_train_data(rank):
iris = datasets.load_iris()
iris_df = pd.DataFrame(iris.data, columns=iris.feature_names)
partition = rank
assert partition < 2
separate = 100
train_df = iris_df.iloc[:separate]
test_df = iris_df.iloc[separate:]
separate_train = 60
separate_test = 30
if partition == 0:
train_df = train_df.iloc[:separate_train]
test_df = test_df.iloc[:separate_test]
else:
train_df = train_df.iloc[separate_train:]
test_df = test_df.iloc[separate_test:]
def get_lgb_dataset(df):
target_column = df.columns[-1]
columns = df.columns[:-1]
assert target_column not in columns
print('Target column', target_column)
x = df[columns]
y = df[target_column]
print(x)
ds = lgb.Dataset(free_raw_data=False, data=x, label=y, params={
"enable_bundle": False
})
ds.construct()
return ds
dtrain = get_lgb_dataset(train_df)
dtest = get_lgb_dataset(test_df)
return dtrain, dtest
def train(args):
port0 = 56456
rank = IPS.index(ip_address)
print("Rank=", rank, HOST)
print("RR", rank)
dtrain, dtest = read_train_data(rank=rank)
params = {'boosting_type': 'gbdt',
'class_weight': None,
'colsample_bytree': 1.0,
'importance_type': 'split',
'learning_rate': 0.1,
'max_depth': 2,
'min_child_samples': 20,
'min_child_weight': 0.001,
'min_split_gain': 0.0,
'n_estimators': 1,
'num_leaves': 31,
'objective': 'regression',
'metric': 'rmse',
'random_state': None,
'reg_alpha': 0.0,
'reg_lambda': 0.0,
'silent': False,
'subsample': 1.0,
'subsample_for_bin': 200000,
'subsample_freq': 0,
'tree_learner': 'data_parallel',
'num_threads': 48,
'machines': ','.join([f'{machine}:{port0}' for i, machine in enumerate(IPS)]),
'local_listen_port': port0,
'time_out': 120,
'num_machines': len(IPS)
}
print(params)
logging.info("starting to train lgb at node with rank %d", rank)
evals_result = {}
if args.scikit == 1:
print("Using scikit learn")
bst = lgb.sklearn.LGBMRegressor(**params)
bst.fit(
dtrain.data,
dtrain.label,
eval_set=[(dtest.data, dtest.label)],
)
else:
print("Using regular LGB")
bst = lgb.train(params,
dtrain,
valid_sets=[dtest],
evals_result=evals_result)
print(evals_result)
logging.info("finish xgboost training at node with rank %d", rank)
return bst
def main(args):
logging.info("starting the train job")
model = train(args)
pd.set_option('display.max_rows', 500)
print("OUT", model.__class__)
try:
print(model.trees_to_dataframe())
except:
print(model.booster_.trees_to_dataframe())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--scikit',
help='scikit',
default=0,
type=int,
)
main(parser.parse_args())
I can run it with the scikit fit interface by running: python simple_distributed_lgb_test.py --scikit 1
On the two machines. It produces a reasonable result.
However, when I use -- scikit 0 (which uses lgb.train), then fitting just hangs on both nodes. Last messages before it hangs:
[LightGBM] [Info] Total Bins 22
[LightGBM] [Info] Number of data points in the train set: 40, number of used features: 2
[LightGBM] [Warning] Found whitespace in feature_names, replace with underlines
[LightGBM] [Info] Start training from score 0.873750
Is that a bug or an expected behavior? dask.py in lightgbm does use scikit learn fit interface.
I use an overnight master version 3.2.1.99. 5b7a6f3e7150aeb704d1dd2b852d246af3e913a3 tag to be exact from Jul 12.
UPDATE 1
I'm trying to dig into the code. So far I see few things:
scikit.train interface appears to have an extra syncronization step before fitting first tree. lgb.train doesn't have it. Dunno yet where it comes from. (I see some Network::Allreduce operations)
It appears that scikit.train has workers syncronized - each worker knows the correct sizes of the blocks to send and receive during reducescatter operations. For example one the first allreduce worker1 sends 208 blocks and receives 368 blocks of data (in Linkers::SendRecv), while worker2 is reversed - sends 368 and receives 208. So allreduce completes fine. ()
On the contrary, lgb.train has workers not syncronized - each worker has numbers for send and receive blocks during reducescatter at the first DataParallelTreeLearner::FindBestSplits encounter. But they don't match. Worker1 sends 208 abd wants to receive 400. Worker2 sends 192 and wants to receive 176. So, the worker that wants to receive more just hangs. The other worker eventually hangs too.
Possibly it has something to do with lgb.Dataset. That thing may need to have same bins or something. I tried to force it by forcedbins_filename parameter. But it doesn't seem to help with lgb.train.
UPDATE 2
Success. If I remove the following line from the example:
ds.construct()
Everything works. So I guess we can't use construct on Dataset when using distributed training.

How to get the highest predicted value in multiclass classification problem using H2O AI?

When predicting values in a multiclass classification problem, I would like to get the probability of the predicted value.
I tried to solve this by using H2O's apply function:
predicted_df = modelo_assessor.predict(to_predict_h2o_frame)
predicted_df.apply((lambda x: x.max()), axis=1)
But it does not work:
'ValueError: unimpl bytecode instr: CALL_METHOD'
Maybe it doesn't work because h2o.max does not have axis parameter as h2o.mean does???
I couldn't find the documentation of which operations are supported on apply function.
I would like to solve the problem using h2o data manipulation similarly to this pandas code:
predicted_df = modelo_assessor.predict(to_predict_h2o_frame).as_data_frame()
predicted_df['PROB_PREDICTED']=predicted_df.iloc[:,1:].max(axis=1)
This is happening whenever using apply. Use the example from H2O documentation:
I was able to solve the problem by downgrading to Python 3.6.x
http://docs.h2o.ai/h2o/latest-stable/h2o-py/docs/frame.html#h2oframe
python_lists = [[1,2,3,4], [1,2,3,4]]
h2oframe = h2o.H2OFrame(python_obj=python_lists,
na_strings=['NA'])
colMean = h2oframe.apply(lambda x: x.mean(), axis=0)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-43-8da6b76c71bd> in <module>
2 h2oframe = h2o.H2OFrame(python_obj=python_lists,
3 na_strings=['NA'])
----> 4 colMean = h2oframe.apply(lambda x: x.mean(), axis=0)
~/anaconda3/envs/h2o1/lib/python3.7/site-packages/h2o/frame.py in apply(self, fun, axis)
4910 assert_is_type(fun, FunctionType)
4911 assert_satisfies(fun, fun.__name__ == "<lambda>")
-> 4912 res = lambda_to_expr(fun)
4913 return H2OFrame._expr(expr=ExprNode("apply", self, 1 + (axis == 0), *res))
4914
~/anaconda3/envs/h2o1/lib/python3.7/site-packages/h2o/astfun.py in lambda_to_expr(fun)
133 code = fun.__code__
134 lambda_dis = _disassemble_lambda(code)
--> 135 return _lambda_bytecode_to_ast(code, lambda_dis)
136
137 def _lambda_bytecode_to_ast(co, ops):
~/anaconda3/envs/h2o1/lib/python3.7/site-packages/h2o/astfun.py in _lambda_bytecode_to_ast(co, ops)
147 body, s = _opcode_read_arg(s, ops, keys)
148 else:
--> 149 raise ValueError("unimpl bytecode instr: " + instr)
150 if s > 0:
151 print("Dumping disassembled code: ")
ValueError: unimpl bytecode instr: CALL_METHOD

RuntimeError: module must have its parameters and buffers on device cuda:1 (device_ids[0]) but found one of them on device: cuda:2

I have 4 GPUs (0,1,2,3) and I want to run one Jupyter notebook on GPU 2 and another one on GPU 0. Thus, after executing,
export CUDA_VISIBLE_DEVICES=0,1,2,3
for the GPU 2 notebook I do,
device = torch.device( f'cuda:{2}' if torch.cuda.is_available() else 'cpu')
device, torch.cuda.device_count(), torch.cuda.is_available(), torch.cuda.current_device(), torch.cuda.get_device_properties(1)
and after creating a new model or loading one,
model = nn.DataParallel( model, device_ids = [ 0, 1, 2, 3])
model = model.to( device)
Then, when I start training the model, I get,
RuntimeError Traceback (most recent call last)
<ipython-input-18-849ffcb53e16> in <module>
46 with torch.set_grad_enabled( phase == 'train'):
47 # [N, Nclass, H, W]
---> 48 prediction = model(X)
49 # print( prediction.shape, y.shape)
50 loss_matrix = criterion( prediction, y)
~/.local/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
491 result = self._slow_forward(*input, **kwargs)
492 else:
--> 493 result = self.forward(*input, **kwargs)
494 for hook in self._forward_hooks.values():
495 hook_result = hook(self, input, result)
~/.local/lib/python3.6/site-packages/torch/nn/parallel/data_parallel.py in forward(self, *inputs, **kwargs)
144 raise RuntimeError("module must have its parameters and buffers "
145 "on device {} (device_ids[0]) but found one of "
--> 146 "them on device: {}".format(self.src_device_obj, t.device))
147
148 inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
RuntimeError: module must have its parameters and buffers on device cuda:0 (device_ids[0]) but found one of them on device: cuda:2
DataParallel requires every input tensor be provided on the first device in its device_ids list.
It basically uses that device as a staging area before scattering to the other GPUs and it's the device where final outputs are gathered before returning from forward. If you want device 2 to be the primary device then you just need to put it at the front of the list as follows
model = nn.DataParallel(model, device_ids = [2, 0, 1, 3])
model.to(f'cuda:{model.device_ids[0]}')
After which all tensors provided to model should be on the first device as well.
x = ... # input tensor
x = x.to(f'cuda:{model.device_ids[0]}')
y = model(x)
this error happened when using the torch, model and data both are not on cuda:
try some code like this to model and data set on cuda
model = model.toDevice(‘cuda’)
images = images.toDevice(‘cuda’)
For me even the following works:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
network = nn.DataParallel(network)
network.to(device)
tnsr = tnsr.to(device)

InvalidArgumentError: input_14:0 is both fed and fetched

I want to Visualize my CNN filters on every layer. I write a code for this but this is giving me some error.I want to see filter images of every layer and also want to see the heat maps of the area which my neural net use the most to predict the particular label. By doing this I am able to understand the working of my cnn and do further work on my model for better results
I searched it on google but I found mostly sited with theory but i need to see code for the solution
x = Conv2D(64,(3,3),strides = (1,1),name='layer_conv1',padding='same')(input)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling2D((2,2),name='maxPool1')(x)
x = Conv2D(64,(3,3),strides = (1,1),name='layer_conv2',padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling2D((2,2),name='maxPool2')(x)
x = Conv2D(32,(3,3),strides = (1,1),name='conv3',padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling2D((2,2),name='maxPool3')(x)
x = Flatten()(x)
x = Dense(64,activation = 'relu',name='fc0')(x)
x = Dropout(0.25)(x)
x = Dense(32,activation = 'relu',name='fc1')(x)
x = Dropout(0.25)(x)
x = Dense(2,activation = 'softmax',name='fc2')(x)
model = Model(inputs = input,outputs = x,name='Predict')
a=np.expand_dims( X_train[10],axis=0)
a.shape
from keras.models import Model
layer_outputs = [layer.output for layer in model.layers]
activation_model = Model(inputs=model.input, outputs=layer_outputs)
activations = activation_model.predict(a)
I am getting this error
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
<ipython-input-249-119bf7ea835a> in <module>()
2 layer_outputs = [layer.output for layer in model.layers]
3 activation_model = Model(inputs=model.input, outputs=layer_outputs)
----> 4 activations = activation_model.predict(a)
5
6
/opt/conda/lib/python3.6/site-packages/Keras-2.2.4-py3.6.egg/keras/engine/training.py in predict(self, x, batch_size, verbose, steps, callbacks)
1185 verbose=verbose,
1186 steps=steps,
-> 1187 callbacks=callbacks)
1188
1189 def train_on_batch(self, x, y,
/opt/conda/lib/python3.6/site-packages/Keras-2.2.4-py3.6.egg/keras/engine/training_arrays.py in predict_loop(model, f, ins, batch_size, verbose, steps, callbacks)
320 batch_logs = {'batch': batch_index, 'size': len(batch_ids)}
321 callbacks._call_batch_hook('predict', 'begin', batch_index, batch_logs)
--> 322 batch_outs = f(ins_batch)
323 batch_outs = to_list(batch_outs)
324 if batch_index == 0:
/opt/conda/lib/python3.6/site-packages/Keras-2.2.4-py3.6.egg/keras/backend/tensorflow_backend.py in __call__(self, inputs)
2919 return self._legacy_call(inputs)
2920
-> 2921 return self._call(inputs)
2922 else:
2923 if py_any(is_tensor(x) for x in inputs):
/opt/conda/lib/python3.6/site-packages/Keras-2.2.4-py3.6.egg/keras/backend/tensorflow_backend.py in _call(self, inputs)
2873 feed_symbols,
2874 symbol_vals,
-> 2875 session)
2876 if self.run_metadata:
2877 fetched = self._callable_fn(*array_vals, run_metadata=self.run_metadata)
/opt/conda/lib/python3.6/site-packages/Keras-2.2.4-py3.6.egg/keras/backend/tensorflow_backend.py in _make_callable(self, feed_arrays, feed_symbols, symbol_vals, session)
2825 callable_opts.run_options.CopyFrom(self.run_options)
2826 # Create callable.
-> 2827 callable_fn = session._make_callable_from_options(callable_opts)
2828 # Cache parameters corresponding to the generated callable, so that
2829 # we can detect future mismatches and refresh the callable.
/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py in _make_callable_from_options(self, callable_options)
1469 """
1470 self._extend_graph()
-> 1471 return BaseSession._Callable(self, callable_options)
1472
1473
/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py in __init__(self, session, callable_options)
1423 with errors.raise_exception_on_not_ok_status() as status:
1424 self._handle = tf_session.TF_SessionMakeCallable(
-> 1425 session._session, options_ptr, status)
1426 finally:
1427 tf_session.TF_DeleteBuffer(options_ptr)
/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/errors_impl.py in __exit__(self, type_arg, value_arg, traceback_arg)
526 None, None,
527 compat.as_text(c_api.TF_Message(self.status.status)),
--> 528 c_api.TF_GetCode(self.status.status))
529 # Delete the underlying status object from memory otherwise it stays alive
530 # as there is a reference to status from this from the traceback due to
InvalidArgumentError: input_14:0 is both fed and fetched.
I tried by removing some layers and adding some layer but it didnt help me. I found very less code on google.
To access any layer's output, you can use function in keras, something like this:
from keras import backend as K
last_layer_output = K.function([model.layers[0].input],
[model.layers[-1].output])
layer_output = last_layer_output([x])[0]
So to access all layer's output, you can create as many such function as follows:
outputs = [layer.output for layer in model.layers]
functors = [K.function([model.input, K.learning_phase()], [out]) for out in outputs]
layer_outs = [func([x_test[:4], 1.]) for func in functors]
Note: keras-function produce one output for one layer.
More you can read it here
with this model my problem is not solving so I make a simple model and use keras fucntions to get layers output and this is easy as compared to my previous model.
model = Sequential()
model.add(Conv2D(16,kernel_size = (5,5),activation = 'relu', activity_regularizer=regularizers.l2(1e-8)))
model.add(Conv2D(32,kernel_size = (5,5),activation = 'relu', activity_regularizer = regularizers.l2(1e-8)))
model.add(MaxPooling2D(3,3))
model.add(Conv2D(64,kernel_size = (5,5),activation = 'relu', activity_regularizer = regularizers.l2(1e-8)))
model.add(MaxPooling2D(3,3))
model.add(Conv2D(128,activation = 'relu',kernel_size = (3,3),activity_regularizer = regularizers.l2(1e-8)))
model.add(Flatten())
model.add(Dropout(0.8))
model.add(Dense(64,activation = 'relu',activity_regularizer = regularizers.l2(1e-8)))
model.add(Dropout(0.8))
model.add(Dense(64,activation = 'relu',activity_regularizer = regularizers.l2(1e-8)))
model.add(Dropout(0.8))
model.add(Dense(2,activation = 'softmax'))
model.compile(loss=keras.losses.binary_crossentropy, optimizer=keras.optimizers.SGD(lr = 0.001,clipnorm = 1,momentum= 0.9), metrics=["accuracy"])
model.fit(X_train,y_train, epochs = 10 ,batch_size = 16,validation_data=(X_test,y_test_Categorical))
model.summary()
#a is my one example from test set
a=np.expand_dims( X_train[10],axis=0)
a.shape
layer_outputs = [layer.output for layer in model.layers]
activation_model = Model(inputs=model.input, outputs=layer_outputs)
activations = activation_model.predict(a)
def display_activation(activations, col_size, row_size, act_index):
activation = activations[act_index]
activation_index=0
fig, ax = plt.subplots(row_size, col_size, figsize=(row_size*2.5,col_size*1.5))
for row in range(0,row_size):
for col in range(0,col_size):
ax[row][col].imshow(activation[0, :, :, activation_index])
activation_index += 1
display_activation(activations, 4, 4,0)
by doing this I am able to get my output

Dask multiprocessing fails with embarrassingly parallel for loop including call to MongoDB when number of iterations is high enough

I'm trying to run a kind of simulation in Python for loop in parallel using Dask multiprocessing. Parallelization works fine when number of iterations is fairly low but fails when the amount increases. The issue occurs on Win7 (4 cores, 10 Gb RAM), Win10 (8 cores, 8 Gb RAM) and Azure VM running Windows Server 2016 (16 cores, 32 Gb RAM). The slowest one, Win7, can go through most iterations before failing. The issue can be mitigated by adding long enough sleep time at the end of each function included in the process, but the required amount of sleeping results in very low performance, similar to running sequentially.
I hope someone will be able to help me out here. Thanks in advance for comments and answers!
The following simple code contains some phases of the for loop and repeats the error.
import json
import pandas as pd
from pymongo import MongoClient
# Create random DataFrame
df = pd.DataFrame(np.random.randint(0,100,size=(100,11)), columns=list('ABCDEFGHIJK'))
# Save to Mongo
client = MongoClient()
db = client.errordemo
res = db.errordemo.insert_many(json.loads(df.to_json(orient='records')))
db.client.close()
class ToBeRunParallel:
def __init__(self):
pass
def functionToBeRunParallel(self, i):
# Read data from mongo
with MongoClient() as client:
db = client.errordemo
dataFromMongo = pd.DataFrame.from_records(db.errordemo.find({}, {'_id': 0}))
# Randomize data
dataRand = dataFromMongo.apply(pd.to_numeric).apply(rand, volatility=0.1)
# Sum rows
dataSum = dataRand.sum(axis=1)
# Select randomly one of the resulting values and return
return dataSum.sample().values[0]
Call the function functionToBeRunParallel either in console or Jupyter (both fail). 'errordemo' is a local module containing the class ToBeRunParallel. While running the on Azure VM, the code succeeds with 500 loops and fails with 5,000.
import errordemo
from dask import delayed, compute, multiprocessing
# Determine how many times to loop
rng = range(15000)
# Define empty result lists
resList = []
# Create instance
err = errordemo.ToBeRunParallel()
# Loop in parallel using Dask
for i in rng:
sampleValue = delayed(err.functionToBeRunParallel)(i)
resList.append(sampleValue)
# Compute in parallel
result = compute(*resList, get=multiprocessing.get)
The error stack in Jupyter is as follows.
---------------------------------------------------------------------------
AutoReconnect Traceback (most recent call last)
<ipython-input-3-9f535dd4c621> in <module>()
----> 1 get_ipython().run_cell_magic('time', '', '# Determine how many times to loop\nrng = range(50000)\n\n# Define empty result lists\nresList = []\n\n# Create instance\nerr = errordemo.ToBeRunParallel()\n\n# Loop in parallel using Dask\nfor i in rng:\n sampleValue = delayed(err.functionToBeRunParallel)(i)\n resList.append(sampleValue)\n \n# Compute in parallel \nresult = compute(*resList, get=dask.multiprocessing.get)')
C:\ProgramData\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py in run_cell_magic(self, magic_name, line, cell)
2113 magic_arg_s = self.var_expand(line, stack_depth)
2114 with self.builtin_trap:
-> 2115 result = fn(magic_arg_s, cell)
2116 return result
2117
<decorator-gen-60> in time(self, line, cell, local_ns)
C:\ProgramData\Anaconda3\lib\site-packages\IPython\core\magic.py in <lambda>(f, *a, **k)
186 # but it's overkill for just that one bit of state.
187 def magic_deco(arg):
--> 188 call = lambda f, *a, **k: f(*a, **k)
189
190 if callable(arg):
C:\ProgramData\Anaconda3\lib\site-packages\IPython\core\magics\execution.py in time(self, line, cell, local_ns)
1178 else:
1179 st = clock2()
-> 1180 exec(code, glob, local_ns)
1181 end = clock2()
1182 out = None
<timed exec> in <module>()
C:\ProgramData\Anaconda3\lib\site-packages\dask\base.py in compute(*args, **kwargs)
200 dsk = collections_to_dsk(variables, optimize_graph, **kwargs)
201 keys = [var._keys() for var in variables]
--> 202 results = get(dsk, keys, **kwargs)
203
204 results_iter = iter(results)
C:\ProgramData\Anaconda3\lib\site-packages\dask\multiprocessing.py in get(dsk, keys, num_workers, func_loads, func_dumps, optimize_graph, **kwargs)
85 result = get_async(pool.apply_async, len(pool._pool), dsk3, keys,
86 get_id=_process_get_id,
---> 87 dumps=dumps, loads=loads, **kwargs)
88 finally:
89 if cleanup:
C:\ProgramData\Anaconda3\lib\site-packages\dask\async.py in get_async(apply_async, num_workers, dsk, result, cache, get_id, raise_on_exception, rerun_exceptions_locally, callbacks, dumps, loads, **kwargs)
498 _execute_task(task, data) # Re-execute locally
499 else:
--> 500 raise(remote_exception(res, tb))
501 state['cache'][key] = res
502 finish_task(dsk, key, state, results, keyorder.get)
AutoReconnect: localhost:27017: [WinError 10048] Only one usage of each socket address (protocol/network address/port) is normally permitted
Traceback
---------
File "C:\ProgramData\Anaconda3\lib\site-packages\dask\async.py", line 266, in execute_task
result = _execute_task(task, data)
File "C:\ProgramData\Anaconda3\lib\site-packages\dask\async.py", line 247, in _execute_task
return func(*args2)
File "C:\Git_repository\footie\Pipeline\errordemo.py", line 20, in functionToBeRunParallel
dataFromMongo = pd.DataFrame.from_records(db.errordemo.find({}, {'_id': 0}))
File "C:\ProgramData\Anaconda3\lib\site-packages\pandas\core\frame.py", line 981, in from_records
first_row = next(data)
File "C:\ProgramData\Anaconda3\lib\site-packages\pymongo\cursor.py", line 1090, in next
if len(self.__data) or self._refresh():
File "C:\ProgramData\Anaconda3\lib\site-packages\pymongo\cursor.py", line 1012, in _refresh
self.__read_concern))
File "C:\ProgramData\Anaconda3\lib\site-packages\pymongo\cursor.py", line 850, in __send_message
**kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\pymongo\mongo_client.py", line 844, in _send_message_with_response
exhaust)
File "C:\ProgramData\Anaconda3\lib\site-packages\pymongo\mongo_client.py", line 855, in _reset_on_error
return func(*args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\pymongo\server.py", line 99, in send_message_with_response
with self.get_socket(all_credentials, exhaust) as sock_info:
File "C:\ProgramData\Anaconda3\lib\contextlib.py", line 82, in __enter__
return next(self.gen)
File "C:\ProgramData\Anaconda3\lib\site-packages\pymongo\server.py", line 163, in get_socket
with self.pool.get_socket(all_credentials, checkout) as sock_info:
File "C:\ProgramData\Anaconda3\lib\contextlib.py", line 82, in __enter__
return next(self.gen)
File "C:\ProgramData\Anaconda3\lib\site-packages\pymongo\pool.py", line 582, in get_socket
sock_info = self._get_socket_no_auth()
File "C:\ProgramData\Anaconda3\lib\site-packages\pymongo\pool.py", line 618, in _get_socket_no_auth
sock_info, from_pool = self.connect(), False
File "C:\ProgramData\Anaconda3\lib\site-packages\pymongo\pool.py", line 555, in connect
_raise_connection_failure(self.address, error)
File "C:\ProgramData\Anaconda3\lib\site-packages\pymongo\pool.py", line 65, in _raise_connection_failure
raise AutoReconnect(msg)
UPDATE:
Following this post, I created a decorator to catch AutoReconnect exception like shown below. Together with parameters for MongoClient the looping works, but it's still very slow, double the time it should take. (timing on the Azure VM):
500 iterations: 3.74s
50,000 iterations: 12min 12s
def safe_mongocall(call):
def _safe_mongocall(*args, **kwargs):
for i in range(5):
try:
return call(*args, **kwargs)
except errors.AutoReconnect:
sleep(random.random() / 100)
print('Error: Failed operation!')
return _safe_mongocall
#safe_mongocall
def functionToBeRunParallel(self, i):
# Read data from mongo
with MongoClient(connect=False, maxPoolSize=None, maxIdleTimeMS=100) as client:
db = client.errordemo
dataFromMongo = pd.DataFrame.from_records(db.errordemo.find({}, {'_id': 0}))
# Randomize data
dataRand = dataFromMongo.apply(pd.to_numeric).apply(rand, volatility=0.1)
# Sum rows
dataSum = dataRand.sum(axis=1)
# Select randomly one of the resulting values and return
return dataSum.sample().values[0]
The actual issue is exhausting of TCP/IP ports, hence the solution is to avoid exhaustion. Following article by Microsoft, I added the following registry keys and values to HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\Tcpip\Parameters:
MaxUserPort: 65534
TcpTimedWaitDelay: 30

Resources