本文整理汇总了Python中cvxpy.SCS属性的典型用法代码示例。如果您正苦于以下问题:Python cvxpy.SCS属性的具体用法?Python cvxpy.SCS怎么用?Python cvxpy.SCS使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类cvxpy
的用法示例。
在下文中一共展示了cvxpy.SCS属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: fit
# 需要导入模块: import cvxpy [as 别名]
# 或者: from cvxpy import SCS [as 别名]
def fit(self, max_iters=100, eps=1e-2, use_indirect=False, warm_start=False):
Xv, Yp, pX = self.probX
Xp, Yv, pY = self.probY
self.converge.reset()
# alternating minimization
while not self.converge.d():
objX = pX.solve(solver=cp.SCS, eps=eps, max_iters=max_iters,
use_indirect=use_indirect, warm_start=warm_start)
Xp.value[:,:-1] = copy(Xv.value)
# can parallelize this
for ypj, yvj, pyj in zip(Yp, Yv, pY):
objY = pyj.solve(solver=cp.SCS, eps=eps, max_iters=max_iters,
use_indirect=use_indirect, warm_start=warm_start)
ypj.value = copy(yvj.value)
self.converge.obj.append(objX)
self._finalize_XY(Xv, Yv)
return self.X, self.Y
示例2: solve
# 需要导入模块: import cvxpy [as 别名]
# 或者: from cvxpy import SCS [as 别名]
def solve(self, X, missing_mask):
X = check_array(X, force_all_finite=False)
m, n = X.shape
S, objective = self._create_objective(m, n)
constraints = self._constraints(
X=X,
missing_mask=missing_mask,
S=S,
error_tolerance=self.error_tolerance)
problem = cvxpy.Problem(objective, constraints)
problem.solve(
verbose=self.verbose,
solver=cvxpy.SCS,
max_iters=self.max_iters,
# use_indirect, see: https://github.com/cvxgrp/cvxpy/issues/547
use_indirect=False)
return S.value
示例3: get_inpaint_func_tv
# 需要导入模块: import cvxpy [as 别名]
# 或者: from cvxpy import SCS [as 别名]
def get_inpaint_func_tv():
def inpaint_func(image, mask):
"""Total variation inpainting"""
inpainted = np.zeros_like(image)
for c in range(image.shape[2]):
image_c = image[:, :, c]
mask_c = mask[:, :, c]
if np.min(mask_c) > 0:
# if mask is all ones, no need to inpaint
inpainted[:, :, c] = image_c
else:
h, w = image_c.shape
inpainted_c_var = cvxpy.Variable(h, w)
obj = cvxpy.Minimize(cvxpy.tv(inpainted_c_var))
constraints = [cvxpy.mul_elemwise(mask_c, inpainted_c_var) == cvxpy.mul_elemwise(mask_c, image_c)]
prob = cvxpy.Problem(obj, constraints)
# prob.solve(solver=cvxpy.SCS, max_iters=100, eps=1e-2) # scs solver
prob.solve() # default solver
inpainted[:, :, c] = inpainted_c_var.value
return inpainted
return inpaint_func
示例4: ball_con
# 需要导入模块: import cvxpy [as 别名]
# 或者: from cvxpy import SCS [as 别名]
def ball_con():
# print(f'--- {sys._getframe().f_code.co_name} ---')
print('ball con')
npr.seed(0)
n = 2
A = cp.Parameter((n, n))
z = cp.Parameter(n)
p = cp.Parameter(n)
x = cp.Variable(n)
t = cp.Variable(n)
obj = cp.Minimize(0.5 * cp.sum_squares(x - p))
# TODO automate introduction of variables.
cons = [0.5 * cp.sum_squares(A * t) <= 1, t == (x - z)]
prob = cp.Problem(obj, cons)
L = npr.randn(n, n)
A.value = L.T
z.value = npr.randn(n)
p.value = npr.randn(n)
prob.solve(solver=cp.SCS)
print(x.value)
示例5: relu
# 需要导入模块: import cvxpy [as 别名]
# 或者: from cvxpy import SCS [as 别名]
def relu():
# print(f'--- {sys._getframe().f_code.co_name} ---')
print('relu')
npr.seed(0)
n = 4
_x = cp.Parameter(n)
_y = cp.Variable(n)
obj = cp.Minimize(cp.sum_squares(_y - _x))
cons = [_y >= 0]
prob = cp.Problem(obj, cons)
_x.value = npr.randn(n)
prob.solve(solver=cp.SCS)
print(_y.value)
示例6: test_lml
# 需要导入模块: import cvxpy [as 别名]
# 或者: from cvxpy import SCS [as 别名]
def test_lml(self):
tf.random.set_seed(0)
k = 2
x = cp.Parameter(4)
y = cp.Variable(4)
obj = -x * y - cp.sum(cp.entr(y)) - cp.sum(cp.entr(1. - y))
cons = [cp.sum(y) == k]
problem = cp.Problem(cp.Minimize(obj), cons)
lml = CvxpyLayer(problem, [x], [y])
x_tf = tf.Variable([1., -1., -1., -1.], dtype=tf.float64)
with tf.GradientTape() as tape:
y_opt = lml(x_tf, solver_args={'eps': 1e-10})[0]
loss = -tf.math.log(y_opt[1])
def f():
problem.solve(solver=cp.SCS, eps=1e-10)
return -np.log(y.value[1])
grad = tape.gradient(loss, [x_tf])
numgrad = numerical_grad(f, [x], [x_tf])
np.testing.assert_almost_equal(grad, numgrad, decimal=3)
示例7: tune_temp
# 需要导入模块: import cvxpy [as 别名]
# 或者: from cvxpy import SCS [as 别名]
def tune_temp(logits, labels, binary_search=True, lower=0.2, upper=5.0, eps=0.0001):
logits = np.array(logits)
if binary_search:
import torch
import torch.nn.functional as F
logits = torch.FloatTensor(logits)
labels = torch.LongTensor(labels)
t_guess = torch.FloatTensor([0.5*(lower + upper)]).requires_grad_()
while upper - lower > eps:
if torch.autograd.grad(F.cross_entropy(logits / t_guess, labels), t_guess)[0] > 0:
upper = 0.5 * (lower + upper)
else:
lower = 0.5 * (lower + upper)
t_guess = t_guess * 0 + 0.5 * (lower + upper)
t = min([lower, 0.5 * (lower + upper), upper], key=lambda x: float(F.cross_entropy(logits / x, labels)))
else:
import cvxpy as cx
set_size = np.array(logits).shape[0]
t = cx.Variable()
expr = sum((cx.Minimize(cx.log_sum_exp(logits[i, :] * t) - logits[i, labels[i]] * t)
for i in range(set_size)))
p = cx.Problem(expr, [lower <= t, t <= upper])
p.solve() # p.solve(solver=cx.SCS)
t = 1 / t.value
return t
示例8: forward_single_np
# 需要导入模块: import cvxpy [as 别名]
# 或者: from cvxpy import SCS [as 别名]
def forward_single_np(Q, p, G, h, A, b):
nz, neq, nineq = p.shape[0], A.shape[0] if A is not None else 0, G.shape[0]
z_ = cp.Variable(nz)
obj = cp.Minimize(0.5 * cp.quad_form(z_, Q) + p.T * z_)
eqCon = A * z_ == b if neq > 0 else None
if nineq > 0:
slacks = cp.Variable(nineq)
ineqCon = G * z_ + slacks == h
slacksCon = slacks >= 0
else:
ineqCon = slacks = slacksCon = None
cons = [x for x in [eqCon, ineqCon, slacksCon] if x is not None]
prob = cp.Problem(obj, cons)
prob.solve() # solver=cp.SCS, max_iters=5000, verbose=False)
# prob.solve(solver=cp.SCS, max_iters=10000, verbose=True)
assert('optimal' in prob.status)
zhat = np.array(z_.value).ravel()
nu = np.array(eqCon.dual_value).ravel() if eqCon is not None else None
if ineqCon is not None:
lam = np.array(ineqCon.dual_value).ravel()
slacks = np.array(slacks.value).ravel()
else:
lam = slacks = None
return prob.value, zhat, nu, lam, slacks
示例9: simple_qp
# 需要导入模块: import cvxpy [as 别名]
# 或者: from cvxpy import SCS [as 别名]
def simple_qp():
# print(f'--- {sys._getframe().f_code.co_name} ---')
print('simple qp')
npr.seed(0)
nx, ncon = 2, 3
G = cp.Parameter((ncon, nx))
h = cp.Parameter(ncon)
x = cp.Variable(nx)
obj = cp.Minimize(0.5 * cp.sum_squares(x - 1))
cons = [G * x <= h]
prob = cp.Problem(obj, cons)
data, chain, inv_data = prob.get_problem_data(solver=cp.SCS)
param_prob = data[cp.settings.PARAM_PROB]
print(param_prob.A.A)
x0 = npr.randn(nx)
s0 = npr.randn(ncon)
G.value = npr.randn(ncon, nx)
h.value = G.value.dot(x0) + s0
prob.solve(solver=cp.SCS)
delC = npr.randn(param_prob.c.shape[0])[:-1]
delA = npr.randn(param_prob.A.shape[0])
num_con = delA.size // (param_prob.x.size + 1)
delb = delA[-num_con:]
delA = delA[:-num_con]
delA = sp.csc_matrix(np.reshape(delA, (num_con, param_prob.x.size)))
del_param_dict = param_prob.apply_param_jac(delC, delA, delb)
print(del_param_dict)
var_map = param_prob.split_solution(npr.randn(param_prob.x.size))
print(var_map)
print(param_prob.split_adjoint(var_map))
print(x.value)
示例10: full_qp
# 需要导入模块: import cvxpy [as 别名]
# 或者: from cvxpy import SCS [as 别名]
def full_qp():
# print(f'--- {sys._getframe().f_code.co_name} ---')
print('full qp')
npr.seed(0)
nx, ncon_eq, ncon_ineq = 5, 2, 3
Q = cp.Parameter((nx, nx))
p = cp.Parameter((nx, 1))
A = cp.Parameter((ncon_eq, nx))
b = cp.Parameter(ncon_eq)
G = cp.Parameter((ncon_ineq, nx))
h = cp.Parameter(ncon_ineq)
x = cp.Variable(nx)
# obj = cp.Minimize(0.5*cp.quad_form(x, Q) + p.T * x)
obj = cp.Minimize(0.5 * cp.sum_squares(Q@x) + p.T * x)
cons = [A * x == b, G * x <= h]
prob = cp.Problem(obj, cons)
x0 = npr.randn(nx)
s0 = npr.randn(ncon_ineq)
G.value = npr.randn(ncon_ineq, nx)
h.value = G.value.dot(x0) + s0
A.value = npr.randn(ncon_eq, nx)
b.value = A.value.dot(x0)
L = npr.randn(nx, nx)
Q.value = L.T # L.dot(L.T)
p.value = npr.randn(nx, 1)
prob.solve(solver=cp.SCS)
print(x.value)
示例11: sigmoid
# 需要导入模块: import cvxpy [as 别名]
# 或者: from cvxpy import SCS [as 别名]
def sigmoid():
# print(f'--- {sys._getframe().f_code.co_name} ---')
print('sigmoid')
npr.seed(0)
n = 4
_x = cp.Parameter((n, 1))
_y = cp.Variable(n)
obj = cp.Minimize(-_x.T * _y - cp.sum(cp.entr(_y) + cp.entr(1. - _y)))
prob = cp.Problem(obj)
_x.value = npr.randn(n, 1)
prob.solve(solver=cp.SCS)
print(_y.value)
示例12: sdp
# 需要导入模块: import cvxpy [as 别名]
# 或者: from cvxpy import SCS [as 别名]
def sdp():
print('sdp')
npr.seed(0)
d = 2
X = cp.Variable((d, d), PSD=True)
Y = cp.Parameter((d, d))
obj = cp.Minimize(cp.trace(Y * X))
prob = cp.Problem(obj, [X >= 1])
Y.value = np.abs(npr.randn(d, d))
print(Y.value.sum())
prob.solve(solver=cp.SCS, verbose=True)
print(X.value)
示例13: test_docstring_example
# 需要导入模块: import cvxpy [as 别名]
# 或者: from cvxpy import SCS [as 别名]
def test_docstring_example(self):
np.random.seed(0)
tf.random.set_seed(0)
n, m = 2, 3
x = cp.Variable(n)
A = cp.Parameter((m, n))
b = cp.Parameter(m)
constraints = [x >= 0]
objective = cp.Minimize(0.5 * cp.pnorm(A @ x - b, p=1))
problem = cp.Problem(objective, constraints)
assert problem.is_dpp()
cvxpylayer = CvxpyLayer(problem, parameters=[A, b], variables=[x])
A_tf = tf.Variable(tf.random.normal((m, n)))
b_tf = tf.Variable(tf.random.normal((m,)))
with tf.GradientTape() as tape:
# solve the problem, setting the values of A and b to A_tf and b_tf
solution, = cvxpylayer(A_tf, b_tf)
summed_solution = tf.math.reduce_sum(solution)
gradA, gradb = tape.gradient(summed_solution, [A_tf, b_tf])
def f():
problem.solve(solver=cp.SCS, eps=1e-10)
return np.sum(x.value)
numgradA, numgradb = numerical_grad(f, [A, b], [A_tf, b_tf])
np.testing.assert_almost_equal(gradA, numgradA, decimal=4)
np.testing.assert_almost_equal(gradb, numgradb, decimal=4)
示例14: test_logistic_regression
# 需要导入模块: import cvxpy [as 别名]
# 或者: from cvxpy import SCS [as 别名]
def test_logistic_regression(self):
np.random.seed(243)
N, n = 10, 2
def sigmoid(z):
return 1 / (1 + np.exp(-z))
X_np = np.random.randn(N, n)
a_true = np.random.randn(n, 1)
y_np = np.round(sigmoid(X_np @ a_true + np.random.randn(N, 1) * 0.5))
X_tf = tf.Variable(X_np)
lam_tf = tf.Variable(1.0 * tf.ones(1))
a = cp.Variable((n, 1))
X = cp.Parameter((N, n))
lam = cp.Parameter(1, nonneg=True)
y = y_np
log_likelihood = cp.sum(
cp.multiply(y, X @ a) -
cp.log_sum_exp(cp.hstack([np.zeros((N, 1)), X @ a]).T, axis=0,
keepdims=True).T
)
prob = cp.Problem(
cp.Minimize(-log_likelihood + lam * cp.sum_squares(a)))
fit_logreg = CvxpyLayer(prob, [X, lam], [a])
with tf.GradientTape(persistent=True) as tape:
weights = fit_logreg(X_tf, lam_tf, solver_args={'eps': 1e-8})[0]
summed = tf.math.reduce_sum(weights)
grad_X_tf, grad_lam_tf = tape.gradient(summed, [X_tf, lam_tf])
def f_train():
prob.solve(solver=cp.SCS, eps=1e-8)
return np.sum(a.value)
numgrad_X_tf, numgrad_lam_tf = numerical_grad(
f_train, [X, lam], [X_tf, lam_tf], delta=1e-6)
np.testing.assert_allclose(grad_X_tf, numgrad_X_tf, atol=1e-2)
np.testing.assert_allclose(grad_lam_tf, numgrad_lam_tf, atol=1e-2)
示例15: test_sdp
# 需要导入模块: import cvxpy [as 别名]
# 或者: from cvxpy import SCS [as 别名]
def test_sdp(self):
tf.random.set_seed(5)
n = 3
p = 3
C = cp.Parameter((n, n))
A = [cp.Parameter((n, n)) for _ in range(p)]
b = [cp.Parameter((1, 1)) for _ in range(p)]
C_tf = tf.Variable(tf.random.normal((n, n), dtype=tf.float64))
A_tf = [tf.Variable(tf.random.normal((n, n), dtype=tf.float64))
for _ in range(p)]
b_tf = [tf.Variable(tf.random.normal((1, 1), dtype=tf.float64))
for _ in range(p)]
X = cp.Variable((n, n), symmetric=True)
constraints = [X >> 0]
constraints += [
cp.trace(A[i]@X) == b[i] for i in range(p)
]
problem = cp.Problem(cp.Minimize(
cp.trace(C @ X) - cp.log_det(X) + cp.sum_squares(X)),
constraints)
layer = CvxpyLayer(problem, [C] + A + b, [X])
values = [C_tf] + A_tf + b_tf
with tf.GradientTape() as tape:
soln = layer(*values,
solver_args={'eps': 1e-10, 'max_iters': 10000})[0]
summed = tf.math.reduce_sum(soln)
grads = tape.gradient(summed, values)
def f():
problem.solve(cp.SCS, eps=1e-10, max_iters=10000)
return np.sum(X.value)
numgrads = numerical_grad(f, [C] + A + b, values, delta=1e-4)
for g, ng in zip(grads, numgrads):
np.testing.assert_allclose(g, ng, atol=1e-1)