Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Develop #3

Merged
merged 13 commits into from
Aug 4, 2018
Prev Previous commit
Next Next commit
Update with new model
Helge-Andre Langåker committed Jun 20, 2018
commit fd4d077489abdd5ef6dfae635411b0e7000ca2fb
86 changes: 31 additions & 55 deletions examples/car.py
Original file line number Diff line number Diff line change
@@ -58,8 +58,8 @@ def ode(x, u, z, p):
lr = l - lf # Distance from CG to the rear tyre
Fzf = lr * m * g / (2 * l) # Vertical load on front wheels
Fzr = lf * m * g / (2 * l) # Vertical load on rear wheels
eps = 1e-10 # Small epsilon to avoid dividing by zero
# x[0] = ca.fmax(x[0], 1e-10)
eps = 1e-20 # Small epsilon to avoid dividing by zero

dxdt = [
1/m * (m*x[1]*x[2] + 2*mu*Fzf*u[0] + 2*Cf*u[1]**2
- 2*Cf*u[1] * (x[1] + lf*x[2]) / (x[0] + eps) + 2*mu*Fzr*u[0]),
@@ -98,35 +98,15 @@ def ode_gp(x, u, z, p):
+ 2*Cr*(x[1] - lf*x[2]) / (x[0] + eps)),
1/Iz * (2*lf*mu*Fzf*u[0]*u[1] + 2*lf*Cf*(x[1] + lf*x[2]) / (x[0] + eps)
- 2*lf*Cf*u[1] - 2*lr*Cr*(x[1] - lf*x[2]) / (x[0] + eps)),
# x[2],
# x[0]*ca.cos(x[3]) - x[1]*ca.sin(x[3]),
# x[0]*ca.sin(x[3]) + x[1]*ca.cos(x[3])
]
return ca.vertcat(*dxdt)


def ode_hybrid(x, u, z, p):
# Model Parameters (Gao et al., 2014)
g = 9.18 # Gravity [m/s^2]
m = 2050 # Vehicle mass [kg]
Iz = 3344 # Yaw inertia [kg*m^2]
Cr = 65000 # Tyre corning stiffness [N/rad]
Cf = 65000 # Tyre corning stiffness [N/rad]
mu = 0.5 # Tyre friction coefficient
l = 4.0 # Vehicle length
lf = 2.0 # Distance from CG to the front tyre
lr = l - lf # Distance from CG to the rear tyre
Fzf = lr * m * g / (2 * l) # Vertical load on front wheels
Fzr = lf * m * g / (2 * l) # Vertical load on rear wheels
eps = 1e-10 # Small epsilon to avoid dividing by zero

dxdt = [
0,
0,
0,
x[2],
x[0]*ca.cos(x[3]) - x[1]*ca.sin(x[3]),
x[0]*ca.sin(x[3]) + x[1]*ca.cos(x[3])
u[2],
u[0]*ca.cos(x[0]) - u[1]*ca.sin(x[0]),
u[0]*ca.sin(x[0]) + u[1]*ca.cos(x[0])
]
return ca.vertcat(*dxdt)

@@ -159,24 +139,24 @@ def inequality_constraints(x, covar, u, eps, par):
slip_r = ca.Function('slip_r', [dx_s, dy_s, dpsi_s],
[(dy_s - lr*dpsi_s)/(dx_s + 1e-6)])

con_ineq.append(slip_f(x[0], x[1], x[2], u[1]) - slip_max - eps)
con_ineq.append(slip_f(x[0], x[1], x[2], u[1]) - slip_max - eps[0])
con_ineq_ub.append(0)
con_ineq_lb.append(-np.inf)

con_ineq.append(slip_min - slip_f(x[0], x[1], x[2], u[1]) - eps)
con_ineq.append(slip_min - slip_f(x[0], x[1], x[2], u[1]) - eps[0])
con_ineq_ub.append(0)
con_ineq_lb.append(-np.inf)

con_ineq.append(slip_r(x[0], x[1], x[2]) - slip_max - eps)
con_ineq.append(slip_r(x[0], x[1], x[2]) - slip_max - eps[0])
con_ineq_ub.append(0)
con_ineq_lb.append(-np.inf)

con_ineq.append(slip_min - slip_r(x[0], x[1], x[2]) - eps)
con_ineq.append(slip_min - slip_r(x[0], x[1], x[2]) - eps[0])
con_ineq_ub.append(0)
con_ineq_lb.append(-np.inf)

""" Add road boundry constraints """
con_ineq.append(x[5])
con_ineq.append(x[5] - eps[1])
con_ineq_ub.append(road_bound)
con_ineq_lb.append(-road_bound)

@@ -187,7 +167,7 @@ def inequality_constraints(x, covar, u, eps, par):
ellipse = ca.Function('ellipse', [Xcg_s, Ycg_s, obs_s],
[ ((Xcg_s - obs_s[0]) / (obs_s[2] + car_length))**2
+ ((Ycg_s - obs_s[1]) / (obs_s[3] + car_width))**2] )
con_ineq.append(1 - ellipse(x[4], x[5], par) - eps)
con_ineq.append(1 - ellipse(x[4], x[5], par) - eps[2])

con_ineq_ub.append(0)
con_ineq_lb.append(-np.inf)
@@ -201,9 +181,6 @@ def inequality_constraints(x, covar, u, eps, par):






""" Model options """
dt = 0.05
Nx = 3
@@ -275,8 +252,8 @@ def inequality_constraints(x, covar, u, eps, par):
gp.save_model('gp_car_' + str((n_train + n_update)*N_new + N) + '_dt_' + str(dt) + '_normalize_' + str(normalize) + '_reduced_latin')
else:
gp = GP.load_model('gp_car_' + str((n_train + n_update)*N_new + N) + '_dt_' + str(dt) + '_normalize_' + str(normalize) + '_reduced_latin')
X_test, Y_test = model_gp.generate_training_data(N_test, uub, ulb, xub, xlb, noise=False)
gp.validate(X_test, Y_test)
# X_test, Y_test = model_gp.generate_training_data(N_test, uub, ulb, xub, xlb, noise=False)
# gp.validate(X_test, Y_test)


""" Estimation of model errors using GP and RK4
@@ -316,19 +293,19 @@ def inequality_constraints(x, covar, u, eps, par):
x0 = np.array([13.89, 0.0, 0.0])
x_sp = np.array([13.89, 0., 0.001])
u0 = [0.0, 0.0]

cov0 = np.eye(Nx+Nu)
t = np.linspace(0,20*dt, 20)
#u_test = np.ones((20, 2))* np.array([1e-1, 0.0])
u_i = np.sin(0.01*t) * 0
u_test = np.vstack([0.5*u_i, 0.02*u_i]).T

# Penalty values for LQR
Q = np.diag([.1, 10., 50.])
R = np.diag([.1, 1])

xnames = [r'$\dot{x}$', r'$\dot{y}$', r'$\dot{\psi}$']

gp.predict_compare(x0, u_test, model_gp, feedback=False, x_ref=x_sp, Q=Q, R=R,
methods = ['TA','ME'], num_cols=1, xnames=xnames)
#gp.predict_compare(x0, u_test, model_gp, feedback=True, x_ref=x_sp, Q=Q, R=R,
@@ -342,17 +319,17 @@ def inequality_constraints(x, covar, u, eps, par):

# Create hybrid model with state integrator
Nx = 6
R_n = np.diag([1e-5, 1e-7, 1e-7, 1e-7, 1e-5, 1e-5])
model_hybrid = Model(Nx=Nx, Nu=Nu, ode=ode_hybrid, dt=dt, R=R_n)
R_n = np.diag([1e-5, 1e-8, 1e-8, 1e-8, 1e-5, 1e-5])
model_hybrid = Model(Nx=3, Nu=3, ode=ode_hybrid, dt=dt, R=R_n)
model = Model(Nx=Nx, Nu=Nu, ode=ode, dt=dt, R=R_n)

solver_opts = {}
#solver_opts['ipopt.linear_solver'] = 'ma27'
solver_opts['ipopt.max_cpu_time'] = 10
solver_opts['ipopt.max_cpu_time'] = 2
#solver_opts['ipopt.max_iter'] = 75
solver_opts['expand']= True
solver_opts['ipopt.expect_infeasible_problem'] = 'yes'

# Constraint parameters
slip_min = -4.0 * np.pi / 180
slip_max = 4.0 * np.pi / 180
@@ -363,39 +340,38 @@ def inequality_constraints(x, covar, u, eps, par):
[60, -0.3, .01, .01],
[100, 0.3, .01, .01],
])

# obs = np.array([[-100,2.0,0.01,0.01]
# ])

# Limits in the MPC problem
ulb = [-.5, -.034]
uub = [.5, .034]
xlb = [10.0, -.5, -.15, -.3, .0, -10]
xub = [30.0, .5, .15, .3, 500, 10]

# Penalty values
P = np.diag([.0, 50., 10, .1, 0, 10])
#Q = np.diag([.0, 5., 1., .01, 0, .1])
Q = np.diag([.001, 5., 1., .1, 1e-20, 1])
Q = np.diag([.001, 5., 1., .1, 1e-10, 1])
R = np.diag([.1, 1])
S = np.diag([1, 10])
lam = 100
Af = ca.diag([0,0,0,1,1,1])
Bd = np.vstack([np.eye(3), np.zeros((3,3))])


x0 = np.array([13.89, 0.0, 0.0, 0.0,.0 , 0.0])
x_sp = np.array([13.89, 0., 0., 0., 50., 0. ])
x_sp = np.array([13.89, 0., 0., 0., 100., 0. ])

mpc = MPC(horizon=20*dt, model=model,gp=gp, hybrid=None, Bd=Bd, Af=Af,
mpc = MPC(horizon=20*dt, model=model,gp=gp, hybrid=model_hybrid,
discrete_method='rk4', gp_method='ME',
ulb=ulb, uub=uub, xlb=xlb, xub=xub, Q=Q, P=P, R=R, S=S, lam=lam,
terminal_constraint=None, costFunc='quad', feedback=False,
terminal_constraint=None, costFunc='quad', feedback=True,
solver_opts=solver_opts,
inequality_constraints=inequality_constraints, num_con_par=4
)


x, u = mpc.solve(x0, sim_time=200*dt, x_sp=x_sp, debug=False, noise=False,
x, u = mpc.solve(x0, sim_time=50*dt, x_sp=x_sp, debug=False, noise=False,
con_par_func=constraint_parameters)
mpc.plot()
plot_car(x[:, 4], x[:, 5])
21 changes: 17 additions & 4 deletions gp_mpc/model_class.py
Original file line number Diff line number Diff line change
@@ -147,17 +147,30 @@ def discrete_rk4_linearize(self, x0, u0, p0=[]):
Ad = np.array(self.__discrete_rk4_jac_x(x0, u0, p0))
Bd = np.array(self.__discrete_rk4_jac_u(x0, u0, p0))
return Ad, Bd

def rk4_jacobian(self, x0, u0, p0=[]):
""" Linearize the discrete rk4 system around the operating point


def rk4_jacobian_x(self, x0, u0, p0=[]):
""" Return state jacobian evaluated at the operating point
x[k+1] = Ax[k] + Bu[k]
# Arguments:
x0: State vector
u0: Input vector
p0: Parameter vector (optional)
"""
return self.__discrete_rk4_jac_x(x0, u0, p0)



def rk4_jacobian_u(self, x0, u0, p0=[]):
""" Return input jacobian evaluated at the operating point
x[k+1] = Ax[k] + Bu[k]
# Arguments:
x0: State vector
u0: Input vector
p0: Parameter vector (optional)
"""
return self.__discrete_rk4_jac_u(x0, u0, p0)


def check_rk4_stability(self, x0, u0, d=.1, plot=False):
""" Check if Runga Kutta 4 method is stable around operating point

169 changes: 106 additions & 63 deletions gp_mpc/mpc_class.py
Original file line number Diff line number Diff line change
@@ -22,7 +22,7 @@ def __init__(self, horizon, model, gp=None,
ulb=None, uub=None, xlb=None, xub=None, terminal_constraint=None,
feedback=True, gp_method='TA', costFunc='quad', solver_opts=None,
discrete_method='gp', inequality_constraints=None, num_con_par=0,
hybrid=None, Bd=None, Af=None
hybrid=None, Bd=None, Bf=None
):

""" Initialize and build the MPC solver
@@ -83,6 +83,7 @@ def __init__(self, horizon, model, gp=None,
self.__Nu = Nu
self.__num_con_par = num_con_par
self.__model = model
self.__hybrid = hybrid
self.__gp = gp
self.__feedback = feedback
self.__discrete_method = discrete_method
@@ -105,6 +106,7 @@ def __init__(self, horizon, model, gp=None,
self.__R = R
self.__S = S
self.__Bd = Bd
self.__Bf = Bf

if xub is None:
xub = np.full((Ny), np.inf)
@@ -147,10 +149,10 @@ def __init__(self, horizon, model, gp=None,
""" Initialize state variance with the GP noise variance """
if gp is not None:
#TODO: Cannot use gp variance with hybrid model
self.__variance_0 = np.full((Ny), 1e-8) #gp.noise_variance()
self.__variance_0 = np.full((Ny), 1e-10) #gp.noise_variance()
else:
self.__variance_0 = np.full((Ny), 0)
print('var_0')
self.__variance_0 = np.full((Ny), 1e-10)


""" Define which cost function to use """
self.__set_cost_function(costFunc, mean_ref_s, P_s.reshape((Ny, Ny)))
@@ -170,10 +172,9 @@ def __init__(self, horizon, model, gp=None,
""" Create variables struct """
var = ctools.struct_symMX([(
ctools.entry('mean', shape=(Ny,), repeat=Nt + 1),
ctools.entry('sparse_dummy', shape=(Ny*Ny,), repeat=Nt + 1),
ctools.entry('L', shape=(int((Ny**2 - Ny)/2 + Ny),), repeat=Nt + 1),
ctools.entry('v', shape=(Nu,), repeat=Nt),
ctools.entry('eps', repeat=Nt + 1),
ctools.entry('eps', shape=(3,), repeat=Nt + 1),
)])
self.__var = var
self.__num_var = var.size
@@ -199,29 +200,50 @@ def __init__(self, horizon, model, gp=None,


""" Input covariance matrix """
covar_x_s = ca.MX.sym('covar_x', Ny, Ny)
covar_x_sx = ca.SX.sym('cov_x', Ny, Ny)
K_sx = ca.SX.sym('K', Nu, Ny)

covar_u_func = ca.Function('cov_u', [covar_x_sx, K_sx],
[K_sx @ covar_x_sx @ K_sx.T])

cov_xu_func = ca.Function('cov_xu', [covar_x_sx, K_sx],
[covar_x_sx @ K_sx.T])
if hybrid is None:
N_gp, Ny_gp, Nu_gp = self.__gp.get_size()
Nz_gp = Ny_gp + Nu_gp
covar_x_s = ca.MX.sym('covar_x', Nz_gp, Nz_gp)
covar_x_sx = ca.SX.sym('cov_x', Nz_gp, Nz_gp)
K_sx = ca.SX.sym('K', Nu, Ny)

covar_u_func = ca.Function('cov_u', [covar_x_sx, K_sx],
[K_sx @ covar_x_sx @ K_sx.T])

cov_xu_func = ca.Function('cov_xu', [covar_x_sx, K_sx],
[covar_x_sx @ K_sx.T])

cov_xu = cov_xu_func(covar_x_s, K_s.reshape((Nu, Ny)))
cov_u = covar_u_func(covar_x_s, K_s.reshape((Nu, Ny)))
covar_s = ca.blockcat(covar_x_s, cov_xu, cov_xu.T, cov_u)

covar_s = ca.MX(Nx, Nx)
cov_xu = cov_xu_func(covar_x_s, K_s.reshape((Nu, Ny)))
covar_s[:Ny, :Ny] = covar_x_s
covar_s[Ny:, Ny:] = covar_u_func(covar_x_s, K_s.reshape((Nu, Ny)))
covar_s[Ny:, :Ny] = cov_xu.T
covar_s[:Ny, Ny:] = cov_xu
covar_func = ca.Function('covar', [covar_x_s], [covar_s])
covar_func = ca.Function('covar', [covar_x_s], [covar_s])


else:
covar_x_s = ca.MX.sym('covar_x', Ny, Ny)
covar_x_sx = ca.SX.sym('cov_x', Ny, Ny)
K_sx = ca.SX.sym('K', Nu, Ny)

covar_u_func = ca.Function('cov_u', [covar_x_sx, K_sx],
[K_sx @ covar_x_sx @ K_sx.T])

cov_xu_func = ca.Function('cov_xu', [covar_x_sx, K_sx],
[covar_x_sx @ K_sx.T])

# covar_s = ca.MX(Nx, Nx)
cov_xu = cov_xu_func(covar_x_s, K_s.reshape((Nu, Ny)))
cov_u = covar_u_func(covar_x_s, K_s.reshape((Nu, Ny)))
covar_s = ca.blockcat(covar_x_s, cov_xu, cov_xu.T, cov_u)
# covar_s[:Ny, :Ny] = covar_x_s
# covar_s[Ny:, Ny:] = covar_u_func(covar_x_s, K_s.reshape((Nu, Ny)))
# covar_s[Ny:, :Ny] = cov_xu.T
# covar_s[:Ny, Ny:] = cov_xu
covar_func = ca.Function('covar', [covar_x_s], [covar_s])


""" Hybrid output covariance matrix """
if hybrid is not None:
if Af is None:
Af = ca.diag([0,0,0,1,1,1])
N_gp, Ny_gp, Nu_gp = self.__gp.get_size()
# Nz_gp = Ny_gp + Nu_gp
covar_d_s = ca.SX.sym('covar_d', Ny_gp, Ny_gp)
@@ -230,11 +252,19 @@ def __init__(self, horizon, model, gp=None,
# L_d = ca.SX.sym('L', ca.Sparsity.lower(3))
mean_s = ca.SX.sym('mean', Ny)
u_s = ca.SX.sym('u', Nu)
jac_f = hybrid.rk4_jacobian(mean_s, u_s)
jac_mean = ca.SX(Ny_gp, Ny)
jac_mean[:, :Ny_gp] = self.__gp.jacobian(mean_s[:Ny_gp], u_s, 0)

A_f = hybrid.rk4_jacobian_x(mean_s[Ny_gp:], mean_s[:Ny_gp])
B_f = hybrid.rk4_jacobian_u(mean_s[Ny_gp:], mean_s[:Ny_gp])
C = ca.horzcat(A_f, B_f)
cov = ca.blocksplit(covar_x_s, Ny_gp, Ny_gp)
cov[-1][-1] = covar_d_s
cov_i = ca.blockcat(cov)
cov_f = C @ cov_i @ C.T
cov[0][0] = cov_f
# jac_mean = ca.SX(Ny_gp, Ny)
# jac_mean = self.__gp.jacobian(mean_s[:Ny_gp], u_s, 0)
# A = ca.horzcat(jac_f, Bd)
jac = Af @ jac_f + Bd @ jac_mean
# jac = Bf @ jac_f @ Bf.T + Bd @ jac_mean @ Bd.T

# temp = jac_mean @ covar_x_s
# temp = jac_mean @ L_s
@@ -247,7 +277,8 @@ def __init__(self, horizon, model, gp=None,
covar_x_next_func = ca.Function( 'cov', [mean_s, u_s, covar_d_s, covar_x_s],
#TODO: Clean up
# [A @ cov_i @ A.T])
[Bd @ covar_d_s @ Bd.T + jac @ covar_x_s @ jac.T])
# [Bd @ covar_d_s @ Bd.T + jac @ covar_x_s @ jac.T])
[ca.blockcat(cov)])
# [covar_x_s])
# Cholesky factorization of covariance function
# S_x_next_func = ca.Function( 'S_x', [mean_s, u_s, covar_d_s, covar_x_s],
@@ -256,6 +287,7 @@ def __init__(self, horizon, model, gp=None,

L_s = ca.SX.sym('L', ca.Sparsity.lower(Ny))
L_to_cov_func = ca.Function('cov', [L_s], [L_s @ L_s.T])
cholesky = ca.Function('cholesky', [covar_x_sx], [ca.chol(covar_x_sx).T])

""" Set initial values """
obj = ca.MX(0)
@@ -265,13 +297,11 @@ def __init__(self, horizon, model, gp=None,
con_ineq_ub = []
con_eq.append(var['mean', 0] - mean_0_s)
L_0_s = ca.MX(ca.Sparsity.lower(Ny), var['L', 0])
con_eq.append(L_to_cov_func(L_0_s).reshape((Ny * Ny,1)) - covariance_0_s)
#TODO: Fix initialization of covariance
# con_eq.append(var['L', 0] - covariance_0_s)
L_init = cholesky(covariance_0_s.reshape((Ny,Ny)))
con_eq.append(L_0_s.nz[:]- L_init.nz[:])

u_past = u_0_s

cholesky = ca.Function('cholesky', [covar_x_sx], [ca.chol(covar_x_sx).T])

#TODO: Fix hybrid model
f = hybrid

@@ -286,8 +316,10 @@ def __init__(self, horizon, model, gp=None,
# covar_x_t = var['covariance', t].reshape((Ny, Ny))
covar_x_t = L_to_cov_func(L_x)
covar_t = covar_func(covar_x_t)
if hybrid is not None:
covar_t = ca.MX(Nx, Nx)

# Choose which integrator to use
""" Choose which integrator to use """
if discrete_method is 'rk4':
mean_next_pred = model.rk4(mean_t, u_t,[])
covar_x_next_pred = ca.MX(Ny, Ny)
@@ -298,38 +330,37 @@ def __init__(self, horizon, model, gp=None,
# Deterministic hybrid GP model
#TODO: Clean up this...
mean_d, covar_d = self.__gp.predict(mean_t[:Ny_gp], u_t, covar_t[:5,:5])
mean_next_pred = Af @ f.rk4(mean_t, u_t, []) + Bd @ mean_d
covar_x_next_pred = ca.MX(Ny, Ny)
# mean_next_pred = Bf @ f.rk4(mean_t[Ny_gp:], mean_t[:Ny_gp], []) + Bd @ mean_d
mean_next_pred = ca.vertcat(mean_d ,f.rk4(mean_t[Ny_gp:], mean_t[:Ny_gp], []))
covar_x_next_pred = ca.MX(Ny, Ny)
elif discrete_method is 'hybrid':
#TODO: Clean up this...
mean_d, covar_d = self.__gp.predict(mean_t[:Ny_gp], u_t, covar_t[:5,:5])
mean_next_pred = Af @ f.rk4(mean_t, u_t, []) + Bd @ mean_d
# mean_next_pred = Bf @ f.rk4(mean_t[Ny_gp:], mean_t[:Ny_gp], []) + Bd @ mean_d
mean_next_pred = ca.vertcat(mean_d ,f.rk4(mean_t[Ny_gp:], mean_t[:Ny_gp], []))
covar_x_next_pred = covar_x_next_func(mean_t, u_t, covar_d, covar_x_t)
else: # Use GP as default
mean_next_pred, covar_x_next_pred = self.__gp.predict(mean_t, u_t, covar_t)

# S = cholesky(covar_x_next_pred)

# Continuity constraints
""" Continuity constraints """
mean_next = var['mean', t + 1]
# covar_x_next = var['covariance', t + 1]
con_eq.append(mean_next_pred - mean_next )

L_x_next = ca.MX(ca.Sparsity.lower(Ny), var['L', t + 1])
covar_x_next = L_to_cov_func(L_x_next).reshape((Ny*Ny,1))

con_eq.append(mean_next_pred - mean_next )
con_eq.append(covar_x_next_pred.reshape((Ny * Ny, 1)) - covar_x_next)
# con_eq.append(S.reshape((Ny * Ny, 1)) - covar_x_next)
# con_eq.append(covar_x_next_pred.reshape((Ny * Ny, 1)) - covar_x_next.reshape((Ny*Ny,1)))
L_x_next_pred = cholesky(covar_x_next_pred)
con_eq.append(L_x_next_pred.nz[:]- L_x_next.nz[:])

# Chance state constraints
# cons = self.__constraint(mean_next_pred, covar_x_next_pred, Hx, quantile_x, xub, xlb)

""" Chance state constraints """
cons = self.__constraint(mean_next, L_x_next, Hx, quantile_x, xub, xlb)
## cons = self.__constraint(mean_next, covar_x_next, Hx, quantile_x, xub, xlb)
con_ineq.extend(cons['con'])
con_ineq_lb.extend(cons['con_lb'])
con_ineq_ub.extend(cons['con_ub'])

# Input constraints
""" Input constraints """
cov_u = covar_u_func(covar_x_t, K_s.reshape((Nu, Ny)))
# cons = self.__constraint(u_t, cov_u, Hu, quantile_u, uub, ulb)
# con_ineq.extend(cons['con'])
@@ -344,24 +375,24 @@ def __init__(self, horizon, model, gp=None,
con_ineq_ub.append(np.full((Nu,), ca.inf))
con_ineq_lb.append(ulb)

# Add extra constraints
""" Add extra constraints """
if inequality_constraints is not None:
cons = inequality_constraints(var['mean', t + 1],
# var['covariance', t + 1],
covar_x_next,
u_t, var['eps', t], con_par)
con_ineq.extend(cons['con_ineq'])
con_ineq_lb.extend(cons['con_ineq_lb'])
con_ineq_ub.extend(cons['con_ineq_ub'])

# Objective function

""" Objective function """
u_delta = u_t - u_past
obj += self.__l_func(var['mean', t], covar_x_t, u_t, cov_u, u_delta) + lam * var['eps', t]
obj += self.__l_func(var['mean', t], covar_x_t, u_t, cov_u, u_delta) + lam * var['eps', t].T @ var['eps', t]
u_t = u_past
L_x = ca.MX(ca.Sparsity.lower(Ny), var['L', Nt])
covar_x_t = L_to_cov_func(L_x)
obj += self.__lf_func(var['mean', Nt], covar_x_t, P_s.reshape((Ny, Ny)))
# obj += self.__lf_func(var['mean', Nt], covar_x_next)


num_eq_con = ca.vertcat(*con_eq).size1()
num_ineq_con = ca.vertcat(*con_ineq).size1()
@@ -478,19 +509,33 @@ def solve(self, x0, sim_time, x_sp=None, u0=None, debug=False, noise=False,
elif self.__discrete_method is 'rk4':
A, B = self.__model.discrete_rk4_linearize(x0, u0)
elif self.__discrete_method is 'hybrid':
A_f, B_f = self.__model.discrete_rk4_linearize(x0, u0)
N_gp, Ny_gp, Nu_gp = self.__gp.get_size()
A_f, B_f = self.__hybrid.discrete_rk4_linearize(x0[Ny_gp:], x0[:Ny_gp])
A_gp, B_gp = self.__gp.discrete_linearize(x0[:Ny_gp],
u0, np.eye(Ny_gp+Nu_gp)*1e-8)
A = A_f + self.__Bd @ A_gp @ self.__Bd.T
B = B_f + self.__Bd @ B_gp
A = np.zeros((Ny, Ny))
B = np.zeros((Ny, Nu))
A[:Ny_gp, :Ny_gp] = A_gp
A[Ny_gp:, Ny_gp:] = A_f
A[Ny_gp:, :Ny_gp] = B_f
B[:Ny_gp, :] = B_gp
elif self.__discrete_method is 'd_hybrid':
A_f, B_f = self.__model.discrete_rk4_linearize(x0, u0)
# N_gp, Ny_gp, Nu_gp = self.__gp.get_size()
# A_f, B_f = self.__hybrid.discrete_rk4_linearize(x0[Ny_gp:], x0[:Ny_gp])
# A_gp, B_gp = self.__gp.discrete_linearize(x0[:Ny_gp],
# u0, np.eye(Ny_gp+Nu_gp)*1e-8)
# A = self.__Bf @ A_f @ self.__Bf.T + self.__Bd @ A_gp @ self.__Bd.T
# B = self.__Bf @ B_f + self.__Bd @ B_gp
N_gp, Ny_gp, Nu_gp = self.__gp.get_size()
A_f, B_f = self.__hybrid.discrete_rk4_linearize(x0[Ny_gp:], x0[:Ny_gp])
A_gp, B_gp = self.__gp.discrete_linearize(x0[:Ny_gp],
u0, np.eye(Ny_gp+Nu_gp)*1e-8)
A = A_f + self.__Bd @ A_gp @ self.__Bd.T
B = B_f + self.__Bd @ B_gp
A = np.zeros((Ny, Ny))
B = np.zeros((Ny, Nu))
A[:Ny_gp, :Ny_gp] = A_gp
A[Ny_gp:, Ny_gp:] = A_f
A[Ny_gp:, :Ny_gp] = B_f
B[:Ny_gp, :] = B_gp
elif self.__discrete_method is 'gp':
A, B = self.__gp.discrete_linearize(x0, u0, np.eye(self.__Nx)*1e-8)
K, P, E = lqr(A, B, self.__Q, self.__R)
@@ -510,8 +555,6 @@ def solve(self, x0, sim_time, x_sp=None, u0=None, debug=False, noise=False,

""" Update Initial values with measurment"""
self.__var_init['mean', 0] = self.__mean[t]
self.__varlb['mean', 0] = self.__mean[t]
self.__varub['mean', 0] = self.__mean[t]

# Get constraint parameters
if con_par_func is not None:
@@ -524,7 +567,7 @@ def solve(self, x0, sim_time, x_sp=None, u0=None, debug=False, noise=False,
).format(x=self.__num_con_par))

param = ca.vertcat(self.__mean[t, :], self.__x_sp,
self.__covariance[t, :].flatten(), u0, K.flatten(),
cov0.flatten(), u0, K.flatten(),
P.flatten(), con_par)
args = dict(x0=self.__var_init,
lbx=self.__varlb,