# [makes] bvsm_red.dat# [depends] lc.dat flow.dat lcsim.dat# This py-file loads data file 'lc.dat'.# This py-file loads data file 'flow.dat'.# This py-file loads data file 'lcsim.dat'.fromparestoimportParestofrommiscimportellipse,octave_saveimportnumpyasnpfromscipy.interpolateimportinterp1dimportscipy.statsasstatsimportmatplotlib.pyplotaspltimportcasadifromscipy.statsimportfasfinv# Load datateaf=0.00721teaden=0.728flow=np.loadtxt('flow.dat')lc=np.loadtxt('lc.dat')tQf=np.concatenate(([0.0],flow[:,0]))Qf=np.concatenate(([0.0],flow[:,1]/teaden))tlc=lc[:,0]lc=lc[:,1]# Get all time points occurring in either tlc or tflowtout,ic=np.unique(np.concatenate([tQf,tlc]),return_inverse=True)Qf_ind=ic[:len(tQf)]lc_ind=ic[len(tQf):]# Interpolate lcmeas and Qf to this new gridQf_interp=interp1d(tQf,Qf,kind='previous',fill_value=0,bounds_error=False)Qf=Qf_interp(tout)lc_m_interp=interp1d(tlc,lc,kind='previous',fill_value=0,bounds_error=False)lc_m=lc_m_interp(tout)N=len(Qf)# Replace NaNs with zerosQf[np.isnan(Qf)]=0.0lc_m[np.isnan(lc_m)]=0.0# Right-shift Qf so that index k+1 carries the Qf value active during# interval [t[k], t[k+1]]. paresto's shooting transcription passes the# data symbol bound to t[k+1] (i.e. d[:, k+1]) into the integrator for# interval k; without this shift the integrator runs each interval with# the *next* flowrate, biasing every parameter estimate. The first# index is not used by any interval, so its value is arbitrary.Qf=np.concatenate([[Qf[0]],Qf[:-1]])# Initial volumeVR0=2370# "optimal" valuesvrdiclo=2.3497k1k2ratio=2# Function for lcmeasdeflcmeas(t,v,p):Badded=(v.VR-p.VR0)*p.cBfnD=v.eps2nC=Badded-2*nDreturn[1/(1+2*nD/casadi.fmax(nC,1e-6))]# Function for reduced_modeldefreduced_model(t,v,p):Badded=casadi.fmax((v.VR-p.VR0)*p.cBf,1e-6)deps2dt=v.Qf*p.cBf/(1.0+p.k*(p.nA0-Badded+v.eps2)/(Badded-2*v.eps2))return[v.Qf,deps2dt]# Relative least squares objective functiondeflsq_func(t,y,p):return[y.lc_m/y.lc-1]# Create a paresto instancepe=Paresto()pe.transcription='shooting'pe.nlp_solver_options={'ipopt':{'mumps_scaling':0}}pe.nlp_solver_options['sens_linsol_options']={'eps':0}pe.x=['VR','eps2']pe.p=['nA0','k','cBf','VR0']pe.y=['lc']pe.h=lcmeaspe.d=['Qf','lc_m']pe.ode=reduced_modelpe.lsq=lsq_funcpe.tout=toutpe.lsq_ind=lc_ind# only include lc_ind in objectivepe.ndata=len(pe.lsq_ind)pe.initialize()# Initial guess for parameterspe.ic.nA0=vrdiclope.ic.k=k1k2ratiope.ic.cBf=teafpe.ic.VR0=VR0pe.ic.VR=VR0pe.ic.eps2=0pe.lb_ub()pe.lb.nA0=0.5*pe.ic.nA0pe.lb.k=0.5*pe.ic.kpe.ub.nA0=1.5*pe.ic.nA0pe.ub.k=1.5*pe.ic.k# Estimate parametersest,v,p=pe.optimize(np.vstack([Qf,lc_m]).reshape(2,N,1))# Calculate confidence intervals with 95% confidenceconf=pe.confidence(est,0.95)# Display resultsprint('Estimated parameters')print('\n'.join(f'{k}: {float(v)}'fork,vinest.theta.items()))print('Bounding box intervals')print('\n'.join(f'{k}: {float(v)}'fork,vinconf.bbox.items()))# Calculate ellipse for confidence intervalsnp_=len(est.conf_ind)ndata=len(tlc)alpha=0.95Fstat=np_*stats.f.ppf(alpha,np_,ndata-np_)a=np.asarray(2*est.f/(ndata-np_)*Fstat).item()xx,yy,major,minor,bbox=ellipse(conf.H,a,100,[est.theta.nA0,est.theta.k])tmp=np.array([xx,yy])table1=np.column_stack((pe.tout,v['lc'].flatten()))table2=np.column_stack((tlc,lc))# Estimate parameters again with the early time LC dataflow=np.loadtxt('flow.dat')lc=np.loadtxt('lc.dat')lcsim=np.loadtxt('lcsim.dat')tQf=np.concatenate(([0.0],flow[:,0]))Qf=np.concatenate(([0.0],flow[:,1]/teaden))tlc=np.concatenate((lcsim[:,0],lc[:,0]))lc=np.concatenate((lcsim[:,2],lc[:,1]))Qf_interp=interp1d(tQf,Qf,kind='previous',fill_value=0,bounds_error=False)lc_m_interp=interp1d(tlc,lc,kind='previous',fill_value=0,bounds_error=False)# Get all time points occurring in either tlc or tflowtout,ic=np.unique(np.concatenate([tQf,tlc]),return_inverse=True)Qf_ind=ic[:len(tQf)]lc_ind=ic[len(tQf):]# Interpolate lcmeas and Qf to this new gridQf=Qf_interp(tout)lc_m=lc_m_interp(tout)N=len(Qf)# Replace NaNs with zerosQf[np.isnan(Qf)]=0.0lc_m[np.isnan(lc_m)]=0.0# Same right-shift as in the first optimization (see comment above).Qf=np.concatenate([[Qf[0]],Qf[:-1]])VR0=2370vrdiclo=2.3497k1k2ratio=2# Create a paresto instance for the simulationpesim=Paresto()pesim.transcription='shooting'pesim.nlp_solver_options={'ipopt':{'mumps_scaling':0}}pesim.nlp_solver_options['sens_linsol_options']={'eps':0}pesim.x=['VR','eps2']pesim.p=['nA0','k','cBf','VR0']pesim.y=['lc']pesim.h=lcmeaspesim.d=['Qf','lc_m']pesim.ode=reduced_modelpesim.lsq=lsq_funcpesim.tout=toutpesim.lsq_ind=lc_ind# only include lc_ind in objectivepesim.ndata=len(pesim.lsq_ind)pesim.initialize()pesim.ic.nA0=vrdiclopesim.ic.k=k1k2ratiopesim.ic.cBf=teafpesim.ic.VR0=VR0pesim.ic.VR=VR0pesim.ic.eps2=0pesim.lb_ub()pesim.lb.nA0=0.5*pesim.ic.nA0pesim.lb.k=0.5*pesim.ic.kpesim.ub.nA0=1.5*pesim.ic.nA0pesim.ub.k=1.5*pesim.ic.k# Estimate parametersestsim,vsim,psim=pesim.optimize(np.vstack([Qf,lc_m]).reshape(2,N,1))# Calculate confidence intervals with 95% confidenceconfsim=pesim.confidence(estsim,0.95)# Display resultsprint('Estimated parameters')print(estsim['theta'])print('Bounding box intervals')print(confsim['bbox'])# Ellipse for second estimationndata_sim=len(tlc)Fstat_sim=np_*stats.f.ppf(alpha,np_,ndata_sim-np_)a_sim=np.asarray(2*estsim.f/(ndata_sim-np_)*Fstat_sim).item()xxex,yyex,major_ex,minor_ex,bboxex=ellipse(confsim.H,a_sim,100,[estsim.theta.nA0,estsim.theta.k])tmpex=np.array([xxex,yyex])table1ex=np.column_stack((pesim.tout,vsim['lc'].flatten()))table2ex=np.column_stack((tlc,lc))# Optimal-parameter point (nA0, k) for each estimation, written as 1x2# arrays so the .gp can plot them with `w p` instead of hard-coding the# marker positions.opt=np.array([[float(est.theta.nA0),float(est.theta.k)]])optex=np.array([[float(estsim.theta.nA0),float(estsim.theta.k)]])# Save all datasets (order matches Octave: table1 table2 bbox tmp table1ex# table2ex bboxex tmpex; opt and optex appended for the markers).octave_save('bvsm_red.dat',('table1',table1),('table2',table2),('bbox',bbox),('tmp',tmp.T),('table1ex',table1ex),('table2ex',table2ex),('bboxex',bboxex),('tmpex',tmpex.T),('opt',opt),('optex',optex),)