Python: inverse of len(set) is null - python-2.6

I have a non null set, let' say myset. I am trying to get the inverse of its length this way 1/len(myset) and I am getting 0 although the length of the set is not that large so its inverse is null; len(myset) is in the order of 50.
Does anyone knows how to eccounter this?
Many thanks!
EDIT/UPDATE:
The code is very long, but here is the portion where I am using it:
from random import randint,random,uniform
import os
import math
import cplex
from datetime import datetime
from scipy.stats import norm
class Solving:
def __init__(self, init):
self.init = init
return
def run(self):
model=cplex.Cplex()
for i in self.init.getGreenNodes() - self.init.getRedNodes():
for j in self.init.getDiscretValuesNb():
model.variables.add(names=["X"+str((i,j))])
model.variables.set_types("X"+str((i,j)),"B")
model.variables.add(names=["Da"+str(i)])
model.variables.set_types("Da"+str(i),"C")
model.objective.set_sense(model.objective.sense.minimize)
model.objective.set_linear("TCost",1)
for i in self.init.getGreenNodes() - self.init.getRedNodes():
aa=1/len(self.init.getGreenNodes() - self.init.getRedNodes())
print aa
model.linear_constraints.set_coefficients("Cstr1", "Da"+str((i)), aa)
This code is perfecly working when using python 3.5

Stop performing integer division.
1. / foo

Related

Two StatsModels modules have totally different 'end-runs'

I'm running StatsModels to estimate parameters of a multiple regression model, using county-level data for 3085 counties. When I use statsmodels.formula.api, and drop a few rows from the data, I get desired results. All seems well enough.
import pandas as pd
import numpy as np
import statsmodels.formula.api as sm
%matplotlib inline
from statsmodels.compat import lzip
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="whitegrid")
eg=pd.read_csv(r'C:/Users/user/anaconda3/une_edu_pipc_06.csv')
pd.options.display.precision = 3
plt.rc("figure", figsize=(16,8))
plt.rc("font", size=14)
sm_col = eg["lt_hsd_17"] + eg["hsd_17"]
eg["ut_hsd_17"] = sm_col
sm_col2 = eg["sm_col_17"] + eg["col_17"]
eg["bnd_hsd_17"] = sm_col2
eg["d_09"]= eg["Rate_09"]-eg["Rate_06"]
eg["d_10"]= eg["Rate_10"]-eg["Rate_06"]
inc_2=eg["p_c_inc_18"]*eg["p_c_inc_18"]
res = sm.ols(formula = "Rate_18 ~ p_c_inc_18 + ut_hsd_17 + d_10 + inc_2",
data=eg, missing='drop').fit()
print(res.summary()).
(BTW, eg["p_c_inc_18"]is per-capita income, and inc_2 is p_c_inc_18 squarred).
But when I wish to use import statsmodels.api as smas the module, everything else staying pretty much the same, and run the following code after all appropriate preliminaries,
inc_2=eg["p_c_inc_18"]*eg["p_c_inc_18"]
X = eg[["p_c_inc_18","ut_hsd_17","d_10","inc_2"]]
y = eg["Rate_18"]
X = sm.add_constant(X)
mod = sm.OLS(y, X)
res = mod.fit()
print(res.summary())
then things fall apart, and the Python interpreter throws an error, as follows:
[......]
KeyError: "['inc_2'] not in index"
BTW, the only difference between the two 'runs' is that 15 rows are dropped during the first, successful, model run, while I don't as yet know how to drop missing rows from the second model formulation. Could that difference be responsible for why the second run fails? (I chose to omit large parts of the error message, to reduce clutter.)
You need to assign inc_2 in your DataFrame.
inc_2=eg["p_c_inc_18"]*eg["p_c_inc_18"]
should be
eg["inc_2"] = eg["p_c_inc_18"]*eg["p_c_inc_18"]

I can't load my nn model that I've trained and saved

I used transfer learning to train the model. The fundamental model was efficientNet.
You can read more about it here
from tensorflow import keras
from keras.models import Sequential,Model
from keras.layers import Dense,Dropout,Conv2D,MaxPooling2D,
Flatten,BatchNormalization, Activation
from keras.optimizers import RMSprop , Adam ,SGD
from keras.backend import sigmoid
Activation function
class SwishActivation(Activation):
def __init__(self, activation, **kwargs):
super(SwishActivation, self).__init__(activation, **kwargs)
self.__name__ = 'swish_act'
def swish_act(x, beta = 1):
return (x * sigmoid(beta * x))
from keras.utils.generic_utils import get_custom_objects
from keras.layers import Activation
get_custom_objects().update({'swish_act': SwishActivation(swish_act)})
Model Definition
model = enet.EfficientNetB0(include_top=False, input_shape=(150,50,3), pooling='avg', weights='imagenet')
Adding 2 fully-connected layers to B0.
x = model.output
x = BatchNormalization()(x)
x = Dropout(0.7)(x)
x = Dense(512)(x)
x = BatchNormalization()(x)
x = Activation(swish_act)(x)
x = Dropout(0.5)(x)
x = Dense(128)(x)
x = BatchNormalization()(x)
x = Activation(swish_act)(x)
x = Dense(64)(x)
x = Dense(32)(x)
x = Dense(16)(x)
# Output layer
predictions = Dense(1, activation="sigmoid")(x)
model_final = Model(inputs = model.input, outputs = predictions)
model_final.summary()
I saved it using:
model.save('model.h5')
I get the following error trying to load it:
model=tf.keras.models.load_model('model.h5')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-12-e3bef1680e4f> in <module>()
1 # Recreate the exact same model, including its weights and the optimizer
----> 2 model = tf.keras.models.load_model('PhoneDetection-CNN_29_July.h5')
3
4 # Show the model architecture
5 model.summary()
10 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/utils/generic_utils.py in class_and_config_for_serialized_keras_object(config, module_objects, custom_objects, printable_module_name)
319 cls = get_registered_object(class_name, custom_objects, module_objects)
320 if cls is None:
--> 321 raise ValueError('Unknown ' + printable_module_name + ': ' + class_name)
322
323 cls_config = config['config']
ValueError: Unknown layer: FixedDropout
```python
I was getting the same error while trying to do the inference by loading my saved model.
Then i just imported the effiecientNet library in my inference notebook as well and the error was gone.
My import command looked like:
import efficientnet.keras as efn
(Note that if you havent installed effiecientNet already(which is unlikely), you can do so by using !pip install efficientnet command.)
I had this same issue with a recent model. Researching the source code you can find the FixedDropout Class. I added this to my inference code with import of backend and layers. The rate should also match the rate from your efficientnet model, so for the EfficientNetB0 the rate is .2 (others are different).
from tensorflow.keras import backend, layers
class FixedDropout(layers.Dropout):
def _get_noise_shape(self, inputs):
if self.noise_shape is None:
return self.noise_shape
symbolic_shape = backend.shape(inputs)
noise_shape = [symbolic_shape[axis] if shape is None else shape
for axis, shape in enumerate(self.noise_shape)]
return tuple(noise_shape)
model = keras.models.load_model('model.h5',
custom_objects={'FixedDropout':FixedDropout(rate=0.2)})
I was getting the same error. Then I import the below code. then it id working properly
import cv2
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn.metrics import confusion_matrix
import itertools
import os, glob
from tqdm import tqdm
from efficientnet.tfkeras import EfficientNetB4
if you don't have to install this. !pip install efficientnet. If you have any problem put here.
In my case, I had two files train.py and test.py.
I was saving my .h5 model inside train.py and was attempting to load it inside test.py and got the same error. To fix it, you need to add the import statements for your efficientnet models inside the file that is attempting to load it as well (in my case, test.py).
from efficientnet.tfkeras import EfficientNetB0

Not able to fit a function with scipy.optimize.curve_fit()

I would like to fit the following function:
def invlaplace_stehfest2(time,EL,tau):
upsilon=0.25
pmax=6.9
E0=0.0154
M=8
results=[]
for t in time:
func=0
for k in range(1,2*M+1):
SUM=0
for j in range(int(math.floor((k+1)/2)),min(k,M)+1):
dummy=j**(M+1)*scipy.special.binom(M,j)*scipy.special.binom(2*j,j)*scipy.special.binom(j,k-j)/scipy.math.factorial(M)
SUM+=dummy
s=k*math.log(2)/t[enter image description here][1]
func+=(-1)**(M+k)*SUM*pmax*EL/(mp.exp(tau*s)*mp.expint(1,tau*s)*E0+EL)/s
func=func*math.log(2)/t
results.append(func)
return [float(i) for i in results]
To do so I use the following data:
data_time=np.array([69.0,99.0,139.0,179.0,219.0,259.0,295.5,299.03])
data_relax=np.array([6.2536,6.1652,6.0844,6.0253,5.9782,5.9404,5.9104,5.9066])
With the folowing guess:
guess=np.array([0.1,0.05])
And scipy.optimize.curve_fit() as folow:
Parameter,Covariance=scipy.optimize.curve_fit(invlaplace_stehfest2,data_time,data_relax,guess)
For A reason that I don't understand I am not able to fit the data correctly. I get the following graph.
Bad fitting
My function is undoubtedly correct since when I use the correct guess:
guess=np.array([0.33226685047281592707364253044085038793404361200072,8.6682623502960394383501102909774397295654841654769])
I am able to fit correctly my data.
Expected fitting
Any hint on why I am not able to fit correctly? Should I use another method?
Here is the hole program:
##############################################
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.pylab as mplab
import math
from math import *
import numpy as np
import scipy
from scipy.optimize import curve_fit
import mpmath as mp
############################################################################################
def invlaplace_stehfest2(time,EL,tau):
upsilon=0.25
pmax=6.9
E0=0.0154
M=8
results=[]
for t in time:
func=0
for k in range(1,2*M+1):
SUM=0
for j in range(int(math.floor((k+1)/2)),min(k,M)+1):
dummy=j**(M+1)*scipy.special.binom(M,j)*scipy.special.binom(2*j,j)*scipy.special.binom(j,k-j)/scipy.math.factorial(M)
SUM+=dummy
s=k*math.log(2)/t
func+=(-1)**(M+k)*SUM*pmax*EL/(mp.exp(tau*s)*mp.expint(1,tau*s)*E0+EL)/s
func=func*math.log(2)/t
results.append(func)
return [float(i) for i in results]
############################################################################################
###Constant###
####Value####
data_time=np.array([69.0,99.0,139.0,179.0,219.0,259.0,295.5,299.03])
data_relax=np.array([6.2536,6.1652,6.0844,6.0253,5.9782,5.9404,5.9104,5.9066])
###Fitting###
guess=np.array([0.33226685047281592707364253044085038793404361200072,8.6682623502960394383501102909774397295654841654769])
#guess=np.array([0.1,0.05])
Parameter,Covariance=scipy.optimize.curve_fit(invlaplace_stehfest2,data_time,data_relax,guess)
print Parameter
residu=sum(data_relax-invlaplace_stehfest2(data_time,Parameter[0],Parameter[1]))
Graph_Curves=plt.figure()
ax = Graph_Curves.add_subplot(111)
ax.plot(data_time,invlaplace_stehfest2(data_time,Parameter[0],Parameter[1]),"-")
ax.plot(data_time,data_relax,"o")
plt.show()
Non-linear fitters such as the default Levenberg-Marquardt solver used in scipy.optimize.curve_fit(), like most iterative solvers, can stop in a local minimum in error space. If error space is "bumpy" then initial parameter estimates become very important, as in this case.
Scipy has added the Differential Evolution genetic algorithm to the optimize module, which can be used to determine initial parameter estimates for curve_fit(). Scipy's implementation uses the Latin Hypercube algorithm to ensure a thorough search of parameter space, requiring parameter upper and lower bounds within which to search. As you can see below, I have used this scipy module to replace the hard-coded values for the value named "guess" in your code. This does not run quickly, but a somewhat slower correct result is much better than a fast wrong result. Try this code:
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.pylab as mplab
import math
from math import *
import numpy as np
import scipy
from scipy.optimize import curve_fit
from scipy.optimize import differential_evolution
import mpmath as mp
############################################################################################
def invlaplace_stehfest2(time,EL,tau):
upsilon=0.25
pmax=6.9
E0=0.0154
M=8
results=[]
for t in time:
func=0
for k in range(1,2*M+1):
SUM=0
for j in range(int(math.floor((k+1)/2)),min(k,M)+1):
dummy=j**(M+1)*scipy.special.binom(M,j)*scipy.special.binom(2*j,j)*scipy.special.binom(j,k-j)/scipy.math.factorial(M)
SUM+=dummy
s=k*math.log(2)/t
func+=(-1)**(M+k)*SUM*pmax*EL/(mp.exp(tau*s)*mp.expint(1,tau*s)*E0+EL)/s
func=func*math.log(2)/t
results.append(func)
return [float(i) for i in results]
############################################################################################
###Constant###
####Value####
data_time=np.array([69.0,99.0,139.0,179.0,219.0,259.0,295.5,299.03])
data_relax=np.array([6.2536,6.1652,6.0844,6.0253,5.9782,5.9404,5.9104,5.9066])
# function for genetic algorithm to minimize (sum of squared error)
def sumOfSquaredError(parameterTuple):
return np.sum((data_relax - invlaplace_stehfest2(data_time, *parameterTuple)) ** 2)
###Fitting###
#guess=np.array([0.33226685047281592707364253044085038793404361200072,8.6682623502960394383501102909774397295654841654769])
#guess=np.array([0.1,0.05])
parameterBounds = [[0.0, 1.0], [0.0, 10.0]]
# "seed" the numpy random number generator for repeatable results
# note the ".x" here to return only the parameter estimates in this example
guess = differential_evolution(sumOfSquaredError, parameterBounds, seed=3).x
Parameter,Covariance=scipy.optimize.curve_fit(invlaplace_stehfest2,data_time,data_relax,guess)
print Parameter
residu=sum(data_relax-invlaplace_stehfest2(data_time,Parameter[0],Parameter[1]))
Graph_Curves=plt.figure()
ax = Graph_Curves.add_subplot(111)
ax.plot(data_time,invlaplace_stehfest2(data_time,Parameter[0],Parameter[1]),"-")
ax.plot(data_time,data_relax,"o")
plt.show()

how to sample multiple chains in PyMC3

I'm trying to sample multiple chains in PyMC3. In PyMC2 I would do something like this:
for i in range(N):
model.sample(iter=iter, burn=burn, thin = thin)
How should I do the same thing in PyMC3? I saw there is a 'njobs' argument in the 'sample' method, but it throws an error when I set a value for it. I want to use sampled chains to get 'pymc.gelman_rubin' output.
Better is to use njobs to run chains in parallel:
#!/usr/bin/env python3
import pymc3 as pm
import numpy as np
from pymc3.backends.base import merge_traces
xobs = 4 + np.random.randn(20)
model = pm.Model()
with model:
mu = pm.Normal('mu', mu=0, sd=20)
x = pm.Normal('x', mu=mu, sd=1., observed=xobs)
step = pm.NUTS()
with model:
trace = pm.sample(1000, step, njobs=2)
To run them serially, you can use a similar approach to your PyMC 2
example. The main difference is that each call to sample returns a
multi-chain trace instance (containing just a single chain in this
case). merge_traces will take a list of multi-chain instances and
create a single instance with all the chains.
#!/usr/bin/env python3
import pymc as pm
import numpy as np
from pymc.backends.base import merge_traces
xobs = 4 + np.random.randn(20)
model = pm.Model()
with model:
mu = pm.Normal('mu', mu=0, sd=20)
x = pm.Normal('x', mu=mu, sd=1., observed=xobs)
step = pm.NUTS()
with model:
trace = merge_traces([pm.sample(1000, step, chain=i)
for i in range(2)])

Porting algorithm from Python to Go

I am trying to port this python code to Go but there is no beta() in math package. Where can i find beta and other functions required for this?
from numpy import *
from scipy.stats import beta
class BetaBandit(object):
def __init__(self, num_options=2, prior=(1.0,1.0)):
self.trials = zeros(shape=(num_options,), dtype=int)
self.successes = zeros(shape=(num_options,), dtype=int)
self.num_options = num_options
self.prior = prior
def add_result(self, trial_id, success):
self.trials[trial_id] = self.trials[trial_id] + 1
if (success):
self.successes[trial_id] = self.successes[trial_id] + 1
def get_recommendation(self):
sampled_theta = []
for i in range(self.num_options):
#Construct beta distribution for posterior
dist = beta(self.prior[0]+self.successes[i],
self.prior[1]+self.trials[i]-self.successes[i])
#Draw sample from beta distribution
sampled_theta += [ dist.rvs() ]
# Return the index of the sample with the largest value
return sampled_theta.index( max(sampled_theta) )
If you are talking about numpy.random.beta, the Beta distribution which is a special case of the Dirichlet distribution, and is related to the Gamma distribution, you can check the project gostat.
It has a beta.go source code which implements that function.

Resources