本文整理汇总了Python中cvxopt.solvers.options方法的典型用法代码示例。如果您正苦于以下问题:Python solvers.options方法的具体用法?Python solvers.options怎么用?Python solvers.options使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cvxopt.solvers
的用法示例。
在下文中一共展示了solvers.options方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: radius
# 需要导入模块: from cvxopt import solvers [as 别名]
# 或者: from cvxopt.solvers import options [as 别名]
def radius(K):
"""evaluate the radius of the MEB (Minimum Enclosing Ball) of examples in
feature space.
Parameters
----------
K : (n,n) ndarray,
the kernel that represents the data.
Returns
-------
r : np.float64,
the radius of the minimum enclosing ball of examples in feature space.
"""
K = validation.check_K(K).numpy()
n = K.shape[0]
P = 2 * matrix(K)
p = -matrix(K.diagonal())
G = -spdiag([1.0] * n)
h = matrix([0.0] * n)
A = matrix([1.0] * n).T
b = matrix([1.0])
solvers.options['show_progress']=False
sol = solvers.qp(P,p,G,h,A,b)
return abs(sol['primal objective'])**.5
示例2: __init__
# 需要导入模块: from cvxopt import solvers [as 别名]
# 或者: from cvxopt.solvers import options [as 别名]
def __init__(self, transitions, reward, discount, skip_check=False):
# Initialise a linear programming MDP.
# import some functions from cvxopt and set them as object methods
try:
from cvxopt import matrix, solvers
self._linprog = solvers.lp
self._cvxmat = matrix
except ImportError:
raise ImportError("The python module cvxopt is required to use "
"linear programming functionality.")
# initialise the MDP. epsilon and max_iter are not needed
MDP.__init__(self, transitions, reward, discount, None, None,
skip_check=skip_check)
# Set the cvxopt solver to be quiet by default, but ...
# this doesn't do what I want it to do c.f. issue #3
if not self.verbose:
solvers.options['show_progress'] = False
示例3: default_psd_opts
# 需要导入模块: from cvxopt import solvers [as 别名]
# 或者: from cvxopt.solvers import options [as 别名]
def default_psd_opts():
"""
Return default options for psd method
Returns
-------
dict : dictionary
Default options for psd method
"""
return { # Default option values
'method': 'cvx', # solution method (no other currently supported)
'bas_nonneg': True, # bseline strictly non-negative
'noise_range': (.25, .5), # frequency range for averaging noise PSD
'noise_method': 'logmexp', # method of averaging noise PSD
'lags': 5, # number of lags for estimating time constants
'resparse': 0, # times to resparse original solution (not supported)
'fudge_factor': 1, # fudge factor for reducing time constant bias
'verbosity': False, # display optimization details
}
示例4: margin
# 需要导入模块: from cvxopt import solvers [as 别名]
# 或者: from cvxopt.solvers import options [as 别名]
def margin(K,Y):
"""evaluate the margin in a classification problem of examples in feature space.
If the classes are not linearly separable in feature space, then the
margin obtained is 0.
Note that it works only for binary tasks.
Parameters
----------
K : (n,n) ndarray,
the kernel that represents the data.
Y : (n) array_like,
the labels vector.
"""
K, Y = validation.check_K_Y(K, Y, binary=True)
n = Y.size()[0]
Y = [1 if y==Y[0] else -1 for y in Y]
YY = spdiag(Y)
P = 2*(YY*matrix(K.numpy())*YY)
p = matrix([0.0]*n)
G = -spdiag([1.0]*n)
h = matrix([0.0]*n)
A = matrix([[1.0 if Y[i]==+1 else 0 for i in range(n)],
[1.0 if Y[j]==-1 else 0 for j in range(n)]]).T
b = matrix([[1.0],[1.0]],(2,1))
solvers.options['show_progress']=False
sol = solvers.qp(P,p,G,h,A,b)
return sol['primal objective']**.5
示例5: opt_radius
# 需要导入模块: from cvxopt import solvers [as 别名]
# 或者: from cvxopt.solvers import options [as 别名]
def opt_radius(K, init_sol=None):
n = K.shape[0]
K = matrix(K.numpy())
P = 2 * K
p = -matrix([K[i,i] for i in range(n)])
G = -spdiag([1.0] * n)
h = matrix([0.0] * n)
A = matrix([1.0] * n).T
b = matrix([1.0])
solvers.options['show_progress']=False
sol = solvers.qp(P,p,G,h,A,b,initvals=init_sol)
radius2 = (-p.T * sol['x'])[0] - (sol['x'].T * K * sol['x'])[0]
return sol, radius2
示例6: opt_margin
# 需要导入模块: from cvxopt import solvers [as 别名]
# 或者: from cvxopt.solvers import options [as 别名]
def opt_margin(K, YY, init_sol=None):
'''optimized margin evaluation'''
n = K.shape[0]
P = 2 * (YY * matrix(K.numpy()) * YY)
p = matrix([0.0]*n)
G = -spdiag([1.0]*n)
h = matrix([0.0]*n)
A = matrix([[1.0 if YY[i,i]==+1 else 0 for i in range(n)],
[1.0 if YY[j,j]==-1 else 0 for j in range(n)]]).T
b = matrix([[1.0],[1.0]],(2,1))
solvers.options['show_progress']=False
sol = solvers.qp(P,p,G,h,A,b,initvals=init_sol)
margin2 = sol['primal objective']
return sol, margin2
示例7: opt_margin
# 需要导入模块: from cvxopt import solvers [as 别名]
# 或者: from cvxopt.solvers import options [as 别名]
def opt_margin(K,YY,init_sol=None):
'''optimized margin evaluation'''
n = K.shape[0]
P = 2 * (YY * matrix(K) * YY)
p = matrix([0.0]*n)
G = -spdiag([1.0]*n)
h = matrix([0.0]*n)
A = matrix([[1.0 if YY[i,i]==+1 else 0 for i in range(n)],
[1.0 if YY[j,j]==-1 else 0 for j in range(n)]]).T
b = matrix([[1.0],[1.0]],(2,1))
solvers.options['show_progress']=False
sol = solvers.qp(P,p,G,h,A,b,initvals=init_sol)
margin2 = sol['primal objective']
return margin2, sol['x'], sol
示例8: _fit
# 需要导入模块: from cvxopt import solvers [as 别名]
# 或者: from cvxopt.solvers import options [as 别名]
def _fit(self,X,Y):
self.X = X
values = np.unique(Y)
Y = [1 if l==values[1] else -1 for l in Y]
self.Y = Y
npos = len([1.0 for l in Y if l == 1])
nneg = len([1.0 for l in Y if l == -1])
gamma_unif = matrix([1.0/npos if l == 1 else 1.0/nneg for l in Y])
YY = matrix(np.diag(list(matrix(Y))))
Kf = self.__kernel_definition__()
ker_matrix = matrix(Kf(X,X).astype(np.double))
#KLL = (1.0 / (gamma_unif.T * YY * ker_matrix * YY * gamma_unif)[0])*(1.0-self.lam)*YY*ker_matrix*YY
KLL = (1.0-self.lam)*YY*ker_matrix*YY
LID = matrix(np.diag([self.lam * (npos * nneg / (npos+nneg))]*len(Y)))
Q = 2*(KLL+LID)
p = matrix([0.0]*len(Y))
G = -matrix(np.diag([1.0]*len(Y)))
h = matrix([0.0]*len(Y),(len(Y),1))
A = matrix([[1.0 if lab==+1 else 0 for lab in Y],[1.0 if lab2==-1 else 0 for lab2 in Y]]).T
b = matrix([[1.0],[1.0]],(2,1))
solvers.options['show_progress'] = False#True
solvers.options['maxiters'] = self.max_iter
sol = solvers.qp(Q,p,G,h,A,b)
self.gamma = sol['x']
if self.verbose:
print ('[KOMD]')
print ('optimization finished, #iter = %d' % sol['iterations'])
print ('status of the solution: %s' % sol['status'])
print ('objval: %.5f' % sol['primal objective'])
bias = 0.5 * self.gamma.T * ker_matrix * YY * self.gamma
self.bias = bias
self.is_fitted = True
self.ker_matrix = ker_matrix
return self
示例9: update
# 需要导入模块: from cvxopt import solvers [as 别名]
# 或者: from cvxopt.solvers import options [as 别名]
def update(self, b, x, x2):
# Update portfolio with no regret
last_x = x[-1, :]
leader = np.zeros_like(last_x)
leader[np.argmax(last_x)] = -1
b = simplex_proj(self.opt.optimize(leader, b))
# Manage allocation risk
b = minimize(
self.loss,
b,
args=(*risk.polar_returns(x2, self.k), last_x),
constraints=self.cons,
options={'maxiter': 300},
tol=1e-6,
bounds=tuple((0,1) for _ in range(b.shape[0]))
)
# Log variables
self.log['lr'] = "%.4f" % self.opt.lr
self.log['mpc'] = "%.4f" % self.mpc
self.log['risk'] = "%.6f" % b['fun']
# Return best portfolio
return b['x']
示例10: loss
# 需要导入模块: from cvxopt import solvers [as 别名]
# 或者: from cvxopt.solvers import options [as 别名]
def loss(self, w, alpha, Z, x):
# minimize allocation risk
gamma = self.estimate_gamma(alpha, Z, w)
# if the experts mean returns are low and you have no options, you can choose fiat
return self.rc * gamma + w[-1] * ((x.mean()) * x.var()) ** 2
示例11: fit
# 需要导入模块: from cvxopt import solvers [as 别名]
# 或者: from cvxopt.solvers import options [as 别名]
def fit(self, x, y):
from cvxopt import matrix, solvers
solvers.options['show_progress'] = False
check_classification_targets(y)
x, y = check_X_y(x, y)
x_s, x_u = x[y == +1, :], x[y == 0, :]
n_s, n_u = len(x_s), len(x_u)
p_p = self.prior
p_n = 1 - self.prior
p_s = p_p ** 2 + p_n ** 2
k_s = self._basis(x_s)
k_u = self._basis(x_u)
d = k_u.shape[1]
P = np.zeros((d + 2 * n_u, d + 2 * n_u))
P[:d, :d] = self.lam * np.eye(d)
q = np.vstack((
-p_s / (n_s * (p_p - p_n)) * k_s.T.dot(np.ones((n_s, 1))),
-p_n / (n_u * (p_p - p_n)) * np.ones((n_u, 1)),
-p_p / (n_u * (p_p - p_n)) * np.ones((n_u, 1))
))
G = np.vstack((
np.hstack((np.zeros((n_u, d)), -np.eye(n_u), np.zeros((n_u, n_u)))),
np.hstack((0.5 * k_u, -np.eye(n_u), np.zeros((n_u, n_u)))),
np.hstack((k_u, -np.eye(n_u), np.zeros((n_u, n_u)))),
np.hstack((np.zeros((n_u, d)), np.zeros((n_u, n_u)), -np.eye(n_u))),
np.hstack((-0.5 * k_u, np.zeros((n_u, n_u)), -np.eye(n_u))),
np.hstack((-k_u, np.zeros((n_u, n_u)), -np.eye(n_u)))
))
h = np.vstack((
np.zeros((n_u, 1)),
-0.5 * np.ones((n_u, 1)),
np.zeros((n_u, 1)),
np.zeros((n_u, 1)),
-0.5 * np.ones((n_u, 1)),
np.zeros((n_u, 1))
))
sol = solvers.qp(matrix(P), matrix(q), matrix(G), matrix(h))
self.coef_ = np.array(sol['x'])[:d]
示例12: find_nearest_valid_distribution
# 需要导入模块: from cvxopt import solvers [as 别名]
# 或者: from cvxopt.solvers import options [as 别名]
def find_nearest_valid_distribution(u_alpha, kernel, initial=None, reg=0):
""" (solution,distance_sqd)=find_nearest_valid_distribution(u_alpha,kernel):
Given a n-vector u_alpha summing to 1, with negative terms,
finds the distance (squared) to the nearest n-vector summing to 1,
with non-neg terms. Distance calculated using nxn matrix kernel.
Regularization parameter reg --
min_v (u_alpha - v)^\top K (u_alpha - v) + reg* v^\top v"""
P = matrix(2 * kernel)
n = kernel.shape[0]
q = matrix(np.dot(-2 * kernel, u_alpha))
A = matrix(np.ones((1, n)))
b = matrix(1.)
G = spmatrix(-1., range(n), range(n))
h = matrix(np.zeros(n))
dims = {'l': n, 'q': [], 's': []}
solvers.options['show_progress'] = False
solution = solvers.coneqp(
P,
q,
G,
h,
dims,
A,
b,
initvals=initial
)
distance_sqd = solution['primal objective'] + np.dot(u_alpha.T,
np.dot(kernel, u_alpha))[0, 0]
return (solution, distance_sqd)
示例13: calculate
# 需要导入模块: from cvxopt import solvers [as 别名]
# 或者: from cvxopt.solvers import options [as 别名]
def calculate(self, x_fit: np.ndarray) -> np.ndarray:
if x_fit.ndim == 1:
x_fit = x_fit.reshape(x_fit.shape[0], 1)
solvers.options['show_progress'] = False
M = self.X.collapse()
N, p1 = M.shape
nvars, p2 = x_fit.T.shape
C = _numpy_to_cvxopt_matrix(x_fit)
Q = C.T * C
lb_A = -np.eye(nvars)
lb = np.repeat(0, nvars)
A = _numpy_None_vstack(None, lb_A)
b = _numpy_None_concatenate(None, -lb)
A = _numpy_to_cvxopt_matrix(A)
b = _numpy_to_cvxopt_matrix(b)
Aeq = _numpy_to_cvxopt_matrix(np.ones((1, nvars)))
beq = _numpy_to_cvxopt_matrix(np.ones(1))
M = np.array(M, dtype=np.float64)
self.map = np.zeros((N, nvars), dtype=np.float32)
for n1 in range(N):
d = matrix(M[n1], (p1, 1), 'd')
q = - d.T * C
sol = solvers.qp(Q, q.T, A, b, Aeq, beq, None, None)['x']
self.map[n1] = np.array(sol).squeeze()
self.map = self.map.reshape(self.X.shape[:-1] + (x_fit.shape[-1],))
return self.map
示例14: _design_knockoff_sdp
# 需要导入模块: from cvxopt import solvers [as 别名]
# 或者: from cvxopt.solvers import options [as 别名]
def _design_knockoff_sdp(exog):
"""
Use semidefinite programming to construct a knockoff design
matrix.
Requires cvxopt to be installed.
"""
try:
from cvxopt import solvers, matrix
except ImportError:
raise ValueError("SDP knockoff designs require installation of cvxopt")
nobs, nvar = exog.shape
# Standardize exog
xnm = np.sum(exog**2, 0)
xnm = np.sqrt(xnm)
exog = exog / xnm
Sigma = np.dot(exog.T, exog)
c = matrix(-np.ones(nvar))
h0 = np.concatenate((np.zeros(nvar), np.ones(nvar)))
h0 = matrix(h0)
G0 = np.concatenate((-np.eye(nvar), np.eye(nvar)), axis=0)
G0 = matrix(G0)
h1 = 2 * Sigma
h1 = matrix(h1)
i, j = np.diag_indices(nvar)
G1 = np.zeros((nvar * nvar, nvar))
G1[i * nvar + j, i] = 1
G1 = matrix(G1)
solvers.options['show_progress'] = False
sol = solvers.sdp(c, G0, h0, [G1], [h1])
sl = np.asarray(sol['x']).ravel()
xcov = np.dot(exog.T, exog)
exogn = _get_knmat(exog, xcov, sl)
return exog, exogn, sl
示例15: _design_knockoff_sdp
# 需要导入模块: from cvxopt import solvers [as 别名]
# 或者: from cvxopt.solvers import options [as 别名]
def _design_knockoff_sdp(exog):
"""
Use semidefinite programming to construct a knockoff design
matrix.
Requires cvxopt to be installed.
"""
try:
from cvxopt import solvers, matrix
except ImportError:
raise ValueError("SDP knockoff designs require installation of cvxopt")
nobs, nvar = exog.shape
# Standardize exog
xnm = np.sum(exog**2, 0)
xnm = np.sqrt(xnm)
exog /= xnm
Sigma = np.dot(exog.T, exog)
c = matrix(-np.ones(nvar))
h0 = np.concatenate((np.zeros(nvar), np.ones(nvar)))
h0 = matrix(h0)
G0 = np.concatenate((-np.eye(nvar), np.eye(nvar)), axis=0)
G0 = matrix(G0)
h1 = 2 * Sigma
h1 = matrix(h1)
i, j = np.diag_indices(nvar)
G1 = np.zeros((nvar*nvar, nvar))
G1[i*nvar + j, i] = 1
G1 = matrix(G1)
solvers.options['show_progress'] = False
sol = solvers.sdp(c, G0, h0, [G1], [h1])
sl = np.asarray(sol['x']).ravel()
xcov = np.dot(exog.T, exog)
exogn = _get_knmat(exog, xcov, sl)
return exog, exogn, sl