Commit 8adad8af authored by CAI, JIONGJIAN's avatar CAI, JIONGJIAN
Browse files

Replace GPs_in_gurobi_multidimension.py

parent 3d04b929
......@@ -2,6 +2,16 @@
"""
Optimization of GPs_from_GPFlow in Gurobi
minimize
f(x,m,v) = 0*x + m - v
subject to
m - K_x,X*(K_X,X)^(-1)*y = 0
v - K_x,x + K_x,X*(K_X,X)^(-1)*K_X,x = 0
x: R^d, m: R, v: R, y: R^n (constant vector)
where:
x = [x1,x2], x1,x2:[-1,1]
y = x1^2 + x2^2
Author: JIONGJIAN CAI
"""
import gpflow
......@@ -34,33 +44,33 @@ from gurobipy import GRB
data = np.genfromtxt("data/regression_2D.csv", delimiter=",")
# use the MinMaxScaler() to normalize the dataset into [0,1]
#scaler = MinMaxScaler()
#scaler.fit(data)
##print(scaler.data_max_)
#data = scaler.transform(data)
scaler = MinMaxScaler()
scaler.fit(data)
#print(scaler.data_max_)
data = scaler.transform(data)
X = data[:, :-1].reshape(-1, 2)
Y = data[:, 2].reshape(-1, 1)
#ax.scatter(X[:,0], X[:,1], Y)
#plt.show()
# Use GPflow to model the dataset and plot the predict mean and variance
## Use GPflow to model the dataset and plot the predict mean and variance
#k = gpflow.kernels.Matern32()
#
#print_summary(k)
#
##
#m = gpflow.models.GPR(data=(X, Y), kernel=k, mean_function=None)
#
##
#print_summary(m)
#
#opt = gpflow.optimizers.Scipy()
#opt_logs = opt.minimize(m.training_loss, m.trainable_variables, options=dict(maxiter=100))
#print_summary(m)
#
### generate test points for prediction
## test points must be of shape (N, D)
#XX = np.linspace(-1, 1, 100).reshape(100, 1)
#YY = np.linspace(-1, 1, 100).reshape(100, 1)
##generate test points for prediction
##test points must be of shape (N, D)
#XX = np.linspace(0, 1, 100).reshape(100, 1)
#YY = np.linspace(0, 1, 100).reshape(100, 1)
#mesh_grid = np.meshgrid(XX, YY)
#xx, yy = mesh_grid
#xx, yy = xx.flatten(), yy.flatten()
......@@ -80,6 +90,13 @@ Y = data[:, 2].reshape(-1, 1)
#surf = ax.plot_surface(mesh_grid[0], mesh_grid[1], upper)
#surf = ax.plot_surface(mesh_grid[0], mesh_grid[1], lower)
##contour plot
#plt.contourf(mesh_grid[0], mesh_grid[1], mean)
#print(data_points[np.argmin(mean)])
#print(np.min(mean))
#opt_m, opt_v = m.predict_f(data_points[np.argmin(mean)])
try:
# Create a full_space formulation model for Lower Confidence Bound Acquisition Function
......@@ -94,9 +111,9 @@ try:
# Create a multi-dimensional variable - belonging to R^D
# Use MVar - model.addMVar((D,), vtype=GRB.CONTINUOUS, lb=-GRB.INFINITY, name="x")
x = LCB.addMVar(2, vtype=GRB.CONTINUOUS, lb=-1, ub=1, name="x")
x = LCB.addMVar(2, vtype=GRB.CONTINUOUS, lb=0, ub=1, name="x")
m = LCB.addMVar(1, vtype=GRB.CONTINUOUS, lb=-GRB.INFINITY, name="m")
v = LCB.addMVar(1, vtype=GRB.CONTINUOUS, name="v")
# v = LCB.addMVar(1, vtype=GRB.CONTINUOUS, name="v")
# the bound of variables does matter, if not accurate, too many nodes should be researched ?
K_xX = LCB.addMVar(len(X), vtype=GRB.CONTINUOUS, name="K_xX")
......@@ -151,20 +168,20 @@ try:
LCB.addConstr(K_xX @ product == m)
# LCB.addConstr(K_xx - K_xX @ K_XX_inv @ K_xX == v)
## generate list of x and e^x for using addGenConstrPWL() to model y = e^x
# x_min = -np.sqrt(X.shape[1])*np.sqrt(3)
# x_max = 0
# size = 1e-3
# generate list of x and e^x for using addGenConstrPWL() to model y = e^x
x_min = -np.sqrt(3)*np.sqrt(X.shape[1])/lengthscales
x_max = 0
size = 1e-3
# x_pw = np.arange(x_min, x_max, size)
# e_x_pw = np.exp(x_pw)
x_pw = np.arange(x_min, x_max, size)
e_x_pw = np.exp(x_pw)
for i in range(len(X)):
LCB.addConstr(-np.sqrt(3)*r_xX[i] == e_xX_x[i])
# LCB.addGenConstrPWL(e_xX_x_list[i], e_xX_list[i], x_pw, e_x_pw)
LCB.addGenConstrPWL(e_xX_x_list[i], e_xX_list[i], x_pw, e_x_pw)
# LCB.addGenConstrExp(e_xX_x_list[i], e_xX_list[i], options="FuncPieces=1 FuncPieceLength=0.1")
LCB.addGenConstrExp(e_xX_x_list[i], e_xX_list[i])
# LCB.addGenConstrExp(e_xX_x_list[i], e_xX_list[i])
LCB.addConstr(K_xX_list[i] == sigma*(1 + np.sqrt(3)*r_xX_list[i])*e_xX_list[i])
LCB.addConstr(x-X[i,:] == drift_xX[i,:])
LCB.addConstr(drift_xX[i,:] @ drift_xX[i,:] == dot_xX[i])
......@@ -174,63 +191,69 @@ try:
# LCB.addConstr(r_xX_list[i] == sqrt_r_xX_list[i]/lengthscales)
# Optimize model
LCB.params.FuncPieces = -2
LCB.params.FuncPieceError = 1e-6
# LCB.params.FuncPieces = -2
# LCB.params.FuncPieceError = 1e-3
LCB.Params.TimeLimit = 300
LCB.optimize()
## reduce the ranges and use a smaller FuncPieceLength to optimize the model
## print(x.getAttr(GRB.Attr.X))
## print(x[0].x)
#
# reduce the ranges and use a smaller FuncPieceLength to optimize the model
# print(x.getAttr(GRB.Attr.X))
# print(x[0].x)
# for i in range(X.shape[1]):
# x[i].lb = max(x[i].lb, x[i].x-0.01)
# x[i].ub = min(x[i].ub, x[i].x+0.01)
# x[i].lb = np.min(x.x)
# x[i].ub = np.max(x.x)
#
# LCB.update()
# LCB.reset()
#
## LCB.params.FuncPieces = 1
# LCB.params.FuncPieceLength = 1e-5
## LCB.params.FuncPieces = -2
## LCB.params.FuncPieceError = 1e-6
#
## LCB.params.Presolve = 2
#
# LCB.optimize()
#
# for i in range(len(X)):
# print(e_xX[i].x)
#
# for i in range(len(X)):
# print(e_xX_x[i].x)
#
## Compute every term in the constraints above to check if they are correct
# opt = np.array([0.694103, -1])
## Use GPflow to compute the predict mean and variance
# mean_min, var_min = GPs.predict_f(opt.reshape(-1,2))
# print(mean_min)
# print(var_min)
# drift = np.zeros((X.shape[0], X.shape[1]))
# dot = np.zeros(len(X))
# sqrt_r = np.zeros(len(X))
# r = np.zeros(len(X))
# e_x = np.zeros(len(X))
# e = np.zeros(len(X))
# Kx = np.zeros(len(X))
# for i in range(len(X)):
# drift[i,:] = opt - X[i,:]
# dot[i] = np.dot(drift[i,:], drift[i,:])
# sqrt_r[i] = np.sqrt(dot[i])
# r[i] = sqrt_r[i]/lengthscales
# e_x[i] = -np.sqrt(3)*r[i]
## addGenConstrExp()'s error is too large by default
# e[i] = np.exp(e_x[i])
# Kx[i] = sigma*(1 + np.sqrt(3)*r[i])*np.exp(e_x[i])
## Use the constraints above to compute the predict mean and variance
# opt_m = np.dot(Kx, product)
# opt_v = K_xx - np.dot(Kx, np.dot(K_XX_inv, Kx))
##
### print(x.x)
### print(m.x)
###
### for i in range(len(X)):
### print(e_xX[i].x)
###
### for i in range(len(X)):
### print(e_xX_x[i].x)
###
##
### Compute every term in the constraints above to check if they are correct
## opt = np.array([0.00991187, 0.01])
### Use GPflow to compute the predict mean and variance
## mean_min, var_min = GPs.predict_f(opt.reshape(-1,2))
## print(mean_min)
## print(var_min)
## drift = np.zeros((X.shape[0], X.shape[1]))
## dot = np.zeros(len(X))
## sqrt_r = np.zeros(len(X))
## r = np.zeros(len(X))
## e_x = np.zeros(len(X))
## e = np.zeros(len(X))
## Kx = np.zeros(len(X))
## for i in range(len(X)):
## drift[i,:] = opt - X[i,:]
## dot[i] = np.dot(drift[i,:], drift[i,:])
## sqrt_r[i] = np.sqrt(dot[i])
## r[i] = sqrt_r[i]/lengthscales
## e_x[i] = -np.sqrt(3)*r[i]
### addGenConstrExp()'s error is too large by default
## e[i] = np.exp(e_x[i])
## Kx[i] = sigma*(1 + np.sqrt(3)*r[i])*np.exp(e_x[i])
### Use the constraints above to compute the predict mean and variance
## opt_m = np.dot(Kx, product)
## opt_v = K_xx - np.dot(Kx, np.dot(K_XX_inv, Kx))
#
### print(drift_xX[0,:].x)
## print(np.exp(-0.334917))
#
for v in LCB.getVars():
print('%s %g' % (v.varName, v.x))
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment