I'm trying to develop a regression model with constraints on effect from the independent variables. So my model equation is y = a0 + a1x1 + a2x2 with 200 datapoints. What I want to achieve is sum(a1x1) over 200 datapoints should fall in certain range i.e. lb1<sum(a1x1)<ub1. I am using Gekko for the optimization part and a got stuck while applying this condition.
I am using the following code where ubdict is the dictionary for the boundaries:
m = gk.GEKKO(remote=False)
m.options.IMODE=2 #Regression mode
y = np.array(df['y']) #dependant vars for optimization
x = np.array(df[X]) #array of independent vars for optimization
n = x.shape[1] #number of variables
c = m.Array(m.FV, n+1) #array of parameters and intercept
for ci in c:
ci.STATUS = 1 #calculate fixed parameter
xp = [None]*n
#load data
xd = m.Array(m.Param,n)
yd = m.Param(value=y)
for i in range(n):
xd[i].value = x[:,i]
xp[i] = m.Var()
if ubound_dict[i] >= 0:
xp[i] = m.Var(lb=0, ub=ubdict[i])
elif ubound_dict[i] < 0:
xp[i] = m.Var(lb=ubdict[i], ub=0)
m.Equation(xp[i]==c[i]*xd[i])
yp = m.Var()
m.Equation(yp==m.sum([xp[i] for i in range(n)] + [c[n]]))
#Minimize difference between actual and predicted y
m.Minimize((yd-yp)**2)
#APOPT solver
m.options.SOLVER = 1
#Solve
m.solve(disp=True)
#Retrieve parameter values
a = [i.value[0] for i in c]
print(a)
But this is applying the constraint row-wise. What I want is something like
xp[i] = m.Var(lb=0, ub=ubdict[i])
m.Equation(xp[i]==sum(c[i]*xd[i]) over observations)
Any suggestion would be of great help!
Below is a similar problem with sample data.
Regression Mode with IMODE=2
Use the m.vsum() object in Gekko with IMODE=2. Gekko lets you write the equations once and then applies the data to each equation. This is more efficient for large-scale data sets.
import numpy as np
from gekko import GEKKO
# load data
x1 = np.array([1,2,5,3,2,5,2])
x2 = np.array([5,6,7,2,1,3,2])
ym = np.array([3,2,3,5,6,7,8])
# model
m = GEKKO()
c = m.Array(m.FV,3)
for ci in c:
ci.STATUS=1
x1 = m.Param(value=x1)
x2 = m.Param(value=x2)
ymeas = m.Param(value=ym)
ypred = m.Var()
m.Equation(ypred == c[0] + c[1]*x1 + c[2]*x2)
# add constraint on sum(c[1]*x1) with vsum
v1 = m.Var(); m.Equation(v1==c[1]*x1)
con = m.Var(lb=0,ub=10); m.Equation(con==m.vsum(v1))
m.Minimize((ypred-ymeas)**2)
m.options.IMODE = 2
m.solve()
print('Final SSE Objective: ' + str(m.options.objfcnval))
print('Solution')
for i,ci in enumerate(c):
print(i,ci.value[0])
# plot solution
import matplotlib.pyplot as plt
plt.figure(figsize=(8,4))
plt.plot(ymeas,ypred,'ro')
plt.plot([0,10],[0,10],'k-')
plt.xlabel('Meas')
plt.ylabel('Pred')
plt.savefig('results.png',dpi=300)
plt.show()
Optimization Mode (IMODE=3)
The optimization mode 3 allows you to write each equation and objective term individually. Both give the same solution.
import numpy as np
from gekko import GEKKO
# load data
x1 = np.array([1,2,5,3,2,5,2])
x2 = np.array([5,6,7,2,1,3,2])
ym = np.array([3,2,3,5,6,7,8])
n = len(ym)
# model
m = GEKKO()
c = m.Array(m.FV,3)
for ci in c:
ci.STATUS=1
yp = m.Array(m.Var,n)
for i in range(n):
m.Equation(yp[i]==c[0]+c[1]*x1[i]+c[2]*x2[i])
m.Minimize((yp[i]-ym[i])**2)
# add constraint on sum(c[1]*x1)
s = m.Var(lb=0,ub=10); m.Equation(s==c[1]*sum(x1))
m.options.IMODE = 3
m.solve()
print('Final SSE Objective: ' + str(m.options.objfcnval))
print('Solution')
for i,ci in enumerate(c):
print(i,ci.value[0])
# plot solution
import matplotlib.pyplot as plt
plt.figure(figsize=(8,4))
ypv = [yp[i].value[0] for i in range(n)]
plt.plot(ym,ypv,'ro')
plt.plot([0,10],[0,10],'k-')
plt.xlabel('Meas')
plt.ylabel('Pred')
plt.savefig('results.png',dpi=300)
plt.show()
For future questions, please create a simple and complete example that demonstrates the issue.
I have this code which model the imbalance class via decision tree. but some how ccp_alpha in the end its not picking the right value. the ccp_alpha should be around 0.005 instead of code is picking up 0.020.
I am not sure why "cp_alpha=0.02044841897041862" instead of 0.005 as per the graph of
"Recall vs alpha for training and testing sets"
class_weight_t={0: 0.07, 1: 0.89}
clf = DecisionTreeClassifier(random_state=1, class_weight=class_weight_t)
path = clf.cost_complexity_pruning_path(X_train, y_train)
ccp_alphas, impurities = path.ccp_alphas, path.impurities
pd.DataFrame(path)
clfs = []
for ccp_alpha in ccp_alphas:
clf = DecisionTreeClassifier(
random_state=1, ccp_alpha=ccp_alpha, class_weight=class_weight_t
)
clf.fit(X_train, y_train)
clfs.append(clf)
#print(str(clf)+","+str(ccp_alpha)+","+str(clfs[-1].tree_.node_count))
print(
"Number of nodes in the last tree is: {} with ccp_alpha: {}".format(
clfs[-1].tree_.node_count, ccp_alphas[-1]
)
)
Number of nodes in the last tree is: 1 with ccp_alpha: 0.29696815935983295
recall_train = []
for clf in clfs:
pred_train = clf.predict(X_train)
values_train = recall_score(y_train, pred_train)
recall_train.append(values_train)
recall_test = []
for clf in clfs:
pred_test = clf.predict(X_test)
values_test = recall_score(y_test, pred_test)
recall_test.append(values_test)
fig, ax = plt.subplots(figsize=(15, 5))
ax.set_xlabel("alpha")
ax.set_ylabel("Recall")
ax.set_title("Recall vs alpha for training and testing sets")
ax.plot(
ccp_alphas, recall_train, marker="o", label="train", drawstyle="steps-post",
)
ax.plot(ccp_alphas, recall_test, marker="o", label="test", drawstyle="steps-post")
#ax.plot(
# ccp_alphas, train_scores, marker="o", label="train", drawstyle="steps-post",
#)
#ax.plot(ccp_alphas, test_scores, marker="o", label="test", drawstyle="steps-post")
ax.legend()
plt.show()
https://i.stack.imgur.com/0imAq.png
index_best_model = np.argmax(recall_test)
best_model = clfs[index_best_model]
print(best_model)
DecisionTreeClassifier(ccp_alpha=0.02044841897041862,class_weight={0: 0.07, 1: 0.89}, random_state=1)
I wish to model the biweekly HFMD cases in Malaysia.
Then, I want to show that the model using the Squared Error Objective and L1-Norm Objective can better model the biweekly HFMD cases than the model without objectives.
My question is, is it possible to model the biweekly HFMD cases without using the Squared Error Objective and L1-Norm Objective?
With this, I have attached the coding below:
from gekko import GEKKO
import numpy as np
import matplotlib.pyplot as plt
m1 = GEKKO(remote=False)
m2 = GEKKO(remote=False)
m = m1
# Known parameters
nb = 26 # Number of biweeks in a year
ny = 3 # Number of years
biweeks = np.zeros((nb,ny*nb+1))
biweeks[0][0] = 1
for i in range(nb):
for j in range(ny):
biweeks[i][j*nb+i+1] = 1
# Write csv data file
tm = np.linspace(0,78,79)
# case data
# Malaysia weekly HFMD data from the year 2013 - 2015
cases = np.array([506,506,700,890,1158,1605,1694,1311,1490,1310,1368,\
1009,1097,934,866,670,408,481,637,749,700,648,710,\
740,627,507,516,548,636,750,1066,1339,1565,\
1464,1575,1759,1631,1601,1227,794,774,623,411,\
750,1017,976,1258,1290,1546,1662,1720,1553,1787,1291,1712,2227,2132,\
2550,2140,1645,1743,1296,1153,871,621,570,388,\
347,391,446,442,390,399,421,398,452,470,437,411])
data = np.vstack((tm,cases))
data = data.T
# np.savetxt('measles_biweek_2.csv',data,delimiter=',',header='time,cases')
np.savetxt('hfmd_biweek_2.csv',data,delimiter=',',header='time,cases')
# Load data from csv
# m.time, cases_meas = np.loadtxt('measles_biweek_2.csv', \
m.time, cases_hfmd = np.loadtxt('hfmd_biweek_2.csv', \
delimiter=',',skiprows=1,unpack=True)
# m.Vr = m.Param(value = 0)
# Variables
# m.N = m.FV(value = 3.2e6)
# m.mu = m.FV(value = 7.8e-4)
# m.N = m.FV(value = 3.11861e7)
# m.mu = m.FV(value = 6.42712e-4)
m.N = m.FV(value = 3.16141e7) # Malaysia average total population (2015 - 2017)
m.mu = m.FV(value = 6.237171519e-4) # Malaysia scaled birth rate (births/biweek/total population)
m.rep_frac = m.FV(value = 0.45) # Percentage of underreporting
# Beta values (unknown parameters in the model)
m.beta = [m.FV(value=1, lb=0.1, ub=5) for i in range(nb)]
# Predicted values
m.S = m.SV(value = 0.162492875*m.N.value, lb=0,ub=m.N) # Susceptibles (Kids from 0 - 9 YO: 5137066 people) - Average of 94.88% from total reported cases
m.I = m.SV(value = 7.907863896e-5*m.N.value, lb=0,ub=m.N) #
# m.V = m.Var(value = 2e5)
# measured values
m.cases = m.CV(value = cases_hfmd, lb=0)
# turn on feedback status for CASES
m.cases.FSTATUS = 1
# weight on prior model predictions
m.cases.WMODEL = 0
# meas_gap = deadband that represents level of
# accuracy / measurement noise
db = 100
m.cases.MEAS_GAP = db
for i in range(nb):
m.beta[i].STATUS=1
#m.gamma = m.FV(value=0.07)
m.gamma = m.FV(value=0.07)
m.gamma.STATUS = 1
m.gamma.LOWER = 0.05
m.gamma.UPPER = 0.5
m.biweek=[None]*nb
for i in range(nb):
m.biweek[i] = m.Param(value=biweeks[i])
# Intermediate
m.Rs = m.Intermediate(m.S*m.I/m.N)
# Equations
sum_biweek = sum([m.biweek[i]*m.beta[i]*m.Rs for i in range(nb)])
# m.Equation(m.S.dt()== -sum_biweek + m.mu*m.N - m.Vr)
m.Equation(m.S.dt()== -sum_biweek + m.mu*m.N)
m.Equation(m.I.dt()== sum_biweek - m.gamma*m.I)
m.Equation(m.cases == m.rep_frac*sum_biweek)
# m.Equation(m.V.dt()==-m.Vr)
# options
m.options.SOLVER = 1
m.options.NODES=3
# imode = 5, dynamic estimation
m.options.IMODE = 5
# ev_type = 1 (L1-norm) or 2 (squared error)
m.options.EV_TYPE = 2
# solve model and print solver output
m.solve()
[print('beta['+str(i+1)+'] = '+str(m.beta[i][0])) \
for i in range(nb)]
print('gamma = '+str(m.gamma.value[0]))
# export data
# stack time and avg as column vectors
my_data = np.vstack((m.time,np.asarray(m.beta),m.gamma))
# transpose data
my_data = my_data.T
# save text file with comma delimiter
beta_str = ''
for i in range(nb):
beta_str = beta_str + ',beta[' + str(i+1) + ']'
header_name = 'time,gamma' + beta_str
##np.savetxt('solution_data.csv',my_data,delimiter=',',\
## header = header_name, comments='')
np.savetxt('solution_data_EVTYPE_'+str(m.options.EV_TYPE)+\
'_gamma'+str(m.gamma.STATUS)+'.csv',\
my_data,delimiter=',',header = header_name)
plt.figure(num=1, figsize=(16,8))
plt.suptitle('Estimation')
plt.subplot(2,2,1)
plt.plot(m.time,m.cases, label='Cases (model)')
plt.plot(m.time,cases_hfmd, label='Cases (measured)')
if m.options.EV_TYPE==2:
plt.plot(m.time,cases_hfmd+db/2, 'k-.',\
lw=0.5, label=r'$Cases_{db-hi}$')
plt.plot(m.time,cases_hfmd-db/2, 'k-.',\
lw=0.5, label=r'$Cases_{db-lo}$')
plt.fill_between(m.time,cases_hfmd-db/2,\
cases_hfmd+db/2,color='gold',alpha=.5)
plt.legend(loc='best')
plt.ylabel('Cases')
plt.subplot(2,2,2)
plt.plot(m.time,m.S,'r--')
plt.ylabel('S')
plt.subplot(2,2,3)
[plt.plot(m.time,m.beta[i], label='_nolegend_')\
for i in range(nb)]
plt.plot(m.time,m.gamma,'c--', label=r'$\gamma$')
plt.legend(loc='best')
plt.ylabel(r'$\beta, \gamma$')
plt.xlabel('Time')
plt.subplot(2,2,4)
plt.plot(m.time,m.I,'g--')
plt.xlabel('Time')
plt.ylabel('I')
plt.subplots_adjust(hspace=0.2,wspace=0.4)
name = 'cases_EVTYPE_'+ str(m.options.EV_TYPE) +\
'_gamma' + str(m.gamma.STATUS) + '.png'
plt.savefig(name)
plt.show()
To define a custom objective, use the m.Minimize() or m.Maximize() functions instead of the squared error or l1-norm objectives that are built into the m.CV() objects. To create a custom objective, use m.Var() instead of m.CV() such as:
from gekko import GEKKO
import numpy as np
m = GEKKO()
x = m.Array(m.Var,4,value=1,lb=1,ub=5)
x1,x2,x3,x4 = x
# change initial values
x2.value = 5; x3.value = 5
m.Equation(x1*x2*x3*x4>=25)
m.Equation(x1**2+x2**2+x3**2+x4**2==40)
m.Minimize(x1*x4*(x1+x2+x3)+x3)
m.solve()
print('x: ', x)
print('Objective: ',m.options.OBJFCNVAL)
Here is a similar problem with disease prediction (Measles) that uses m.CV().
import numpy as np
from gekko import GEKKO
import matplotlib.pyplot as plt
# Import Data
# write csv data file
t_s = np.linspace(0,78,79)
# case data
cases_s = np.array([180,180,271,423,465,523,649,624,556,420,\
423,488,441,268,260,163,83,60,41,48,65,82,\
145,122,194,237,318,450,671,1387,1617,2058,\
3099,3340,2965,1873,1641,1122,884,591,427,282,\
174,127,84,97,68,88,79,58,85,75,121,174,209,458,\
742,929,1027,1411,1885,2110,1764,2001,2154,1843,\
1427,970,726,416,218,160,160,188,224,298,436,482,468])
# Initialize gekko model
m = GEKKO()
# Number of collocation nodes
nodes = 4
# Number of phases (years in this case)
n = 3
#Biweek periods per year
bi = 26
# Time horizon (for all 3 phases)
m.time = np.linspace(0,1,bi+1)
# Parameters that will repeat each year
N = m.Param(3.2e6)
mu = m.Param(7.8e-4)
rep_frac = m.Param(0.45)
Vr = m.Param(0)
beta = m.MV(2,lb = 0.1)
beta.STATUS = 1
gamma = m.FV(value=0.07)
gamma.STATUS = 1
gamma.LOWER = 0.05
gamma.UPPER = 0.5
# Data used to control objective function
casesobj1 = m.Param(cases_s[0:(bi+1)])
casesobj2 = m.Param(cases_s[bi:(2*bi+1)])
casesobj3 = m.Param(cases_s[2*bi:(3*bi+1)])
# Variables that vary between years, one version for each year
cases = [m.CV(value = cases_s[(i*bi):(i+1)*(bi+1)-i],lb=0) for i in range(n)]
for i in cases:
i.FSTATUS = 1
i.WMODEL = 0
i.MEAS_GAP = 100
S = [m.Var(0.06*N,lb = 0,ub = N) for i in range(n)]
I = [m.Var(0.001*N, lb = 0,ub = N) for i in range(n)]
V = [m.Var(2e5) for i in range(n)]
# Equations (created for each year)
for i in range(n):
R = m.Intermediate(beta*S[i]*I[i]/N)
m.Equation(S[i].dt() == -R + mu*N - Vr)
m.Equation(I[i].dt() == R - gamma*I[i])
m.Equation(cases[i] == rep_frac*R)
m.Equation(V[i].dt() == -Vr)
# Connect years together at endpoints
for i in range(n-1):
m.Connection(cases[i+1],cases[i],1,bi,1,nodes)#,1,nodes)
m.Connection(cases[i+1],'CALCULATED',pos1=1,node1=1)
m.Connection(S[i+1],S[i],1,bi,1,nodes)
m.Connection(S[i+1],'CALCULATED',pos1=1,node1=1)
m.Connection(I[i+1],I[i],1,bi,1,nodes)
m.Connection(I[i+1],'CALCULATED',pos1=1, node1=1)
# Solver options
m.options.IMODE = 5
m.options.NODES = nodes
m.EV_TYPE = 1
m.options.SOLVER = 1
# Solve
m.Obj(2*(casesobj1-cases[0])**2+(casesobj3-cases[2])**2)
m.solve()
# Calculate the start time of each phase
ts = np.linspace(1,n,n)
# Plot
plt.figure()
plt.subplot(4,1,1)
tm = np.empty(len(m.time))
for i in range(n):
tm = m.time + ts[i]
plt.plot(tm,cases[i].value,label='Cases Year %s'%(i+1))
plt.plot(tm,cases_s[(i*bi):(i+1)*(bi+1)-i],'.')
plt.legend()
plt.ylabel('Cases')
plt.subplot(4,1,2)
for i in range(n):
tm = m.time + ts[i]
plt.plot(tm,beta.value,label='Beta Year %s'%(i+1))
plt.legend()
plt.ylabel('Contact Rate')
plt.subplot(4,1,3)
for i in range(n):
tm = m.time + ts[i]
plt.plot(tm,I[i].value,label='I Year %s'%(i+1))
plt.legend()
plt.ylabel('Infectives')
plt.subplot(4,1,4)
for i in range(n):
tm = m.time + ts[i]
plt.plot(tm,S[i].value,label='S Year %s'%(i+1))
plt.legend()
plt.ylabel('Susceptibles')
plt.xlabel('Time (yr)')
plt.show()
This question is related to this my previous question how to formulate the problem of finding the optimal PID paramters in gekko?
I have successfully estimated the optimal PID parameters by minimizing the IAE based on the answer to the above question. However, I would like to add a constraint of MV overshoot in the objective function. Below is the pseudo code to add the constraint that the MV overshoot should be less than 10 % .
MV_Overshoot= Max_MV - SteadyState_MV
MV_Overshoot < 0.1*Steady_State MV # constraint
However, I am stuck with the 2 questions below
How to find the steady state_MV ?
How to find the maximum MV from MV array ?
or is there any better way to calculate the MV overshoot ?
Solve a first step with a steady state calculation to determine the OP (MV) and PV values.
from gekko import GEKKO
import numpy as np
import matplotlib.pyplot as plt
# Process model
Kp = 0.5 # process gain
tauP = 10.0 # process time constant
# pre-solve for steady state values
m = GEKKO()
OP = m.Var(value=0.0) # controller output (MV)
PV = m.Var(value=0.0) # process variable
SP = m.Param(value=5.0) # final set point
m.Equation(SP==PV) # assume PV reaches set point
m.Equation(tauP*PV.dt() + PV == Kp*OP)
m.options.IMODE = 1
m.solve(disp=False)
print('Steady state target values')
print('OP: ' + str(OP.value[0]))
print('PV: ' + str(PV.value[0]))
print('SP: ' + str(SP.value[0]))
PV_ss = PV.value[0]
OP_ss = OP.value[0]
Solve a second step to optimize the tuning parameters to stay under the requested overshoot targets. Overshoot is typically defined in terms of the distance above the setpoint, not the amount over the MV target. Both are included in the example.
# dynamic optimization of controller tuning
m = GEKKO()
tf = 40
m.time = np.linspace(0,tf,2*tf+1)
step = np.zeros(2*tf+1)
step[3:40] = 0.0
step[40:] = 5.0
# Controller model
Kc = m.FV(15.0,lb=0,ub=100) # controller gain
tauI = m.FV(100.0,lb=0.01,ub=100) # controller reset time
tauD = m.FV(0.0,lb=0,ub=100) # derivative constant
Kc.STATUS = 1; tauI.STATUS = 1; tauD.STATUS = 1
OP_0 = m.Const(value=0.0) # OP bias
OP = m.Var(value=0.0,ub=OP_ss*1.1) # controller output
# with overshoot bound on OP (MV)
PV = m.Var(value=0.0,ub=PV_ss*1.1) # process variable
# with overshoot bound on PV
SP = m.Param(value=step) # set point
Intgl = m.Var(value=0.0) # integral of the error
err = m.Intermediate(SP-PV) # set point error
m.Equation(Intgl.dt()==err) # integral of the error
m.Equation(OP == OP_0 + Kc*err + (Kc/tauI)*Intgl - PV.dt())
m.Equation(tauP*PV.dt() + PV == Kp*OP)
m.Minimize((SP-PV)**2)
m.options.IMODE=6
m.solve(disp=False)
print('Tuning values')
print('Kc: ' + str(Kc.value[0]))
print('tauI: ' + str(tauI.value[0]))
print('tauD: ' + str(tauD.value[0]))
plt.figure()
plt.subplot(2,1,1)
plt.plot([0,max(m.time)],[1.1*OP_ss,1.1*OP_ss],\
'b-',label='Upper bound')
plt.plot(m.time,OP.value,'b:',label='OP')
plt.ylabel('Output')
plt.legend()
plt.subplot(2,1,2)
plt.plot(m.time,SP.value,'k-',label='SP')
plt.plot(m.time,PV.value,'r--',label='PV')
plt.plot([0,max(m.time)],[1.1*PV_ss,1.1*PV_ss],\
'b-',label='Upper bound')
plt.xlabel('Time (sec)')
plt.ylabel('Process')
plt.legend()
plt.show()
I built 5-layer neural network by using tensorflow.
I have a problem to get reproducible results (or stable results).
I found similar questions regarding reproducibility of tensorflow and the corresponding answers, such as How to get stable results with TensorFlow, setting random seed
But the problem is not solved yet.
I also set random seed like the following
tf.set_random_seed(1)
Furthermore, I added seed options to every random function such as
b1 = tf.Variable(tf.random_normal([nHidden1], seed=1234))
I confirmed that the first epoch shows the identical results, but not identical from the second epoch little by little.
How can I get the reproducible results?
Am I missing something?
Here is a code block I use.
def xavier_init(n_inputs, n_outputs, uniform=True):
if uniform:
init_range = tf.sqrt(6.0 / (n_inputs + n_outputs))
return tf.random_uniform_initializer(-init_range, init_range, seed=1234)
else:
stddev = tf.sqrt(3.0 / (n_inputs + n_outputs))
return tf.truncated_normal_initializer(stddev=stddev, seed=1234)
import numpy as np
import tensorflow as tf
import dataSetup
from scipy.stats.stats import pearsonr
tf.set_random_seed(1)
x_train, y_train, x_test, y_test = dataSetup.input_data()
# Parameters
learningRate = 0.01
trainingEpochs = 1000000
batchSize = 64
displayStep = 100
thresholdReduce = 1e-6
thresholdNow = 0.6
#dropoutRate = tf.constant(0.7)
# Network Parameter
nHidden1 = 128 # number of 1st layer nodes
nHidden2 = 64 # number of 2nd layer nodes
nInput = 24 #
nOutput = 1 # Predicted score: 1 output for regression
# save parameter
modelPath = 'model/model_layer5_%d_%d_mini%d_lr%.3f_noDrop_rollBack.ckpt' %(nHidden1, nHidden2, batchSize, learningRate)
# tf Graph input
X = tf.placeholder("float", [None, nInput])
Y = tf.placeholder("float", [None, nOutput])
# Weight
W1 = tf.get_variable("W1", shape=[nInput, nHidden1], initializer=xavier_init(nInput, nHidden1))
W2 = tf.get_variable("W2", shape=[nHidden1, nHidden2], initializer=xavier_init(nHidden1, nHidden2))
W3 = tf.get_variable("W3", shape=[nHidden2, nHidden2], initializer=xavier_init(nHidden2, nHidden2))
W4 = tf.get_variable("W4", shape=[nHidden2, nHidden2], initializer=xavier_init(nHidden2, nHidden2))
WFinal = tf.get_variable("WFinal", shape=[nHidden2, nOutput], initializer=xavier_init(nHidden2, nOutput))
# biases
b1 = tf.Variable(tf.random_normal([nHidden1], seed=1234))
b2 = tf.Variable(tf.random_normal([nHidden2], seed=1234))
b3 = tf.Variable(tf.random_normal([nHidden2], seed=1234))
b4 = tf.Variable(tf.random_normal([nHidden2], seed=1234))
bFinal = tf.Variable(tf.random_normal([nOutput], seed=1234))
# Layers for dropout
L1 = tf.nn.relu(tf.add(tf.matmul(X, W1), b1))
L2 = tf.nn.relu(tf.add(tf.matmul(L1, W2), b2))
L3 = tf.nn.relu(tf.add(tf.matmul(L2, W3), b3))
L4 = tf.nn.relu(tf.add(tf.matmul(L3, W4), b4))
hypothesis = tf.add(tf.matmul(L4, WFinal), bFinal)
print "Layer setting DONE..."
# define loss and optimizer
cost = tf.reduce_mean(tf.square(hypothesis - Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learningRate).minimize(cost)
# Initialize the variable
init = tf.initialize_all_variables()
# save op to save and restore all the variables
saver = tf.train.Saver()
with tf.Session() as sess:
# initialize
sess.run(init)
print "Initialize DONE..."
# Training
costPrevious = 100000000000000.0
best = float("INF")
totalBatch = int(len(x_train)/batchSize)
print "Total Batch: %d" %totalBatch
for epoch in range(trainingEpochs):
#print "EPOCH: %04d" %epoch
avgCost = 0.
for i in range(totalBatch):
np.random.seed(i+epoch)
randidx = np.random.randint(len(x_train), size=batchSize)
batch_xs = x_train[randidx,:]
batch_ys = y_train[randidx,:]
# Fit traiing using batch data
sess.run(optimizer, feed_dict={X:batch_xs, Y:batch_ys})
# compute average loss
avgCost += sess.run(cost, feed_dict={X:batch_xs, Y:batch_ys})/totalBatch
# compare the current cost and the previous
# if current cost > the previous
# just continue and make the learning rate half
#print "Cost: %1.8f --> %1.8f at epoch %05d" %(costPrevious, avgCost, epoch+1)
if avgCost > costPrevious + .5:
#sess.run(init)
load_path = saver.restore(sess, modelPath)
print "Cost increases at the epoch %05d" %(epoch+1)
print "Cost: %1.8f --> %1.8f" %(costPrevious, avgCost)
continue
costNow = avgCost
reduceCost = abs(costPrevious - costNow)
costPrevious = costNow
#Display logs per epoch step
if costNow < best:
best = costNow
bestMatch = sess.run(hypothesis, feed_dict={X:x_test})
# model save
save_path = saver.save(sess, modelPath)
if epoch % displayStep == 0:
print "step {}".format(epoch)
pearson = np.corrcoef(bestMatch.flatten(), y_test.flatten())
print 'train loss = {}, current loss = {}, test corrcoef={}'.format(best, costNow, pearson[0][1])
if reduceCost < thresholdReduce or costNow < thresholdNow:
print "Epoch: %04d, Cost: %.9f, Prev: %.9f, Reduce: %.9f" %(epoch+1, costNow, costPrevious, reduceCost)
break
print "Optimization Finished"
It seems that your results are perhaps not reproducible because you are using Saver to write/restore from checkpoint each time? (i.e. the second time that you run the code, the variable values aren't initialized using your random seed -- they are restored from your previous checkpoint)
Please trim down your code example to just the code necessary to reproduce irreproducibility.