本文整理汇总了Python中autograd.numpy.prod方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.prod方法的具体用法?Python numpy.prod怎么用?Python numpy.prod使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类autograd.numpy
的用法示例。
在下文中一共展示了numpy.prod方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _evaluate
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import prod [as 别名]
def _evaluate(self, x, out, *args, **kwargs):
l = []
for j in range(self.n_var):
l.append((j + 1) * x[:, j] ** 2)
sum_jx = anp.sum(anp.column_stack(l), axis=1)
a = anp.sum(anp.cos(x) ** 4, axis=1)
b = 2 * anp.prod(anp.cos(x) ** 2, axis=1)
c = (anp.sqrt(sum_jx)).flatten()
c = c + (c == 0) * 1e-20
f = -anp.absolute((a - b) / c)
# Constraints
g1 = -anp.prod(x, 1) + 0.75
g2 = anp.sum(x, axis=1) - 7.5 * self.n_var
out["F"] = f
out["G"] = anp.column_stack([g1, g2])
示例2: log_norm
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import prod [as 别名]
def log_norm(self):
try:
return self._log_norm
except AttributeError:
if self.frame != self.model_frame:
images_ = self.images[self.slices_for_images]
weights_ = self.weights[self.slices_for_images]
else:
images_ = self.images
weights_ = self.weights
# normalization of the single-pixel likelihood:
# 1 / [(2pi)^1/2 (sigma^2)^1/2]
# with inverse variance weights: sigma^2 = 1/weight
# full likelihood is sum over all data samples: pixel in images
# NOTE: this assumes that all pixels are used in likelihood!
log_sigma = np.zeros(weights_.shape, dtype=self.weights.dtype)
cuts = weights_ > 0
log_sigma[cuts] = np.log(1 / weights_[cuts])
self._log_norm = (
np.prod(images_.shape) / 2 * np.log(2 * np.pi)
+ np.sum(log_sigma) / 2
)
return self._log_norm
示例3: get_loss
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import prod [as 别名]
def get_loss(self, model):
"""Computes the loss/fidelity of a given model wrt to the observation
Parameters
----------
model: array
A model from `Blend`
Returns
-------
loss: float
Loss of the model
"""
model_ = self.render(model)
images_ = self.images
weights_ = self.weights
# properly normalized likelihood
log_sigma = np.zeros(weights_.shape, dtype=weights_.dtype)
cuts = weights_ > 0
log_sigma[cuts] = np.log(1 / weights_[cuts])
log_norm = (
np.prod(images_.shape) / 2 * np.log(2 * np.pi)
+ np.sum(log_sigma) / 2
)
return log_norm + 0.5 * np.sum(weights_ * (model_ - images_) ** 2)
示例4: _do_optim
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import prod [as 别名]
def _do_optim(self, p, optim_x0, gn, data, entries='all'):
optim_bounds = [self.wrt_bounds[p] for k in
range(np.prod(self.wrt_dims[p]))]
result = minimize(fun=self._optim_wrap,jac=True,
x0=np.array(optim_x0).reshape(-1),
args=(p,
{'wrt': p,
'p': self.precision_,
'm': self.mu_,
'a': self.alpha_,
'xn': data['obs'],
'xln': data['lagged'],
'gn': gn, # post. uni. concat.
'entries': entries
}),
bounds=optim_bounds,
method='TNC')
new_value = result.x.reshape(self.wrt_dims[p])
return new_value
示例5: obj_func
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import prod [as 别名]
def obj_func(self, X_, g, alpha=1):
f = []
for i in range(0, self.n_obj):
_f = (1 + g)
_f *= anp.prod(anp.cos(anp.power(X_[:, :X_.shape[1] - i], alpha) * anp.pi / 2.0), axis=1)
if i > 0:
_f *= anp.sin(anp.power(X_[:, X_.shape[1] - i], alpha) * anp.pi / 2.0)
f.append(_f)
f = anp.column_stack(f)
return f
示例6: _evaluate
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import prod [as 别名]
def _evaluate(self, x, out, *args, **kwargs):
X_, X_M = x[:, :self.n_obj - 1], x[:, self.n_obj - 1:]
g = self.g1(X_M)
f = []
for i in range(0, self.n_obj):
_f = 0.5 * (1 + g)
_f *= anp.prod(X_[:, :X_.shape[1] - i], axis=1)
if i > 0:
_f *= 1 - X_[:, X_.shape[1] - i]
f.append(_f)
out["F"] = anp.column_stack(f)
示例7: _evaluate
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import prod [as 别名]
def _evaluate(self, x, out, *args, **kwargs):
out["F"] = 1 + 1 / 4000 * np.sum(np.power(x, 2), axis=1) \
- np.prod(np.cos(x / np.sqrt(np.arange(1, x.shape[1] + 1))), axis=1)
示例8: matmul_last_axis
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import prod [as 别名]
def matmul_last_axis(self, mat, axes=1):
reshaped_liks = np.reshape(self.liks, [-1] + [np.prod(
self.liks.shape[-axes:])])
reshaped_mat = np.reshape(mat, [np.prod(
mat.shape[:axes], dtype=int)] + [-1])
reshaped_liks = np.dot(reshaped_liks, reshaped_mat)
self.liks = np.reshape(reshaped_liks, list(self.liks.shape[:-axes]) + list(mat.shape[axes:]))
示例9: _reshape
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import prod [as 别名]
def _reshape(in_arr, in_sublist, *out_sublists):
assert len(out_sublists) == 3
old_sublist = in_sublist
in_sublist = sum(out_sublists, [])
in_arr = _transpose(in_arr, old_sublist, in_sublist)
# in_arr.shape breaks in autograd if it has no dimension
if in_sublist:
shapes = {s:i for i,s in zip(in_arr.shape, in_sublist)}
else: shapes = {}
return np.reshape(in_arr, [np.prod([shapes[s] for s in out_subs], dtype=int)
for out_subs in out_sublists])
示例10: add_shape
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import prod [as 别名]
def add_shape(self, name, shape):
start = self.num_weights
self.num_weights += np.prod(shape)
self.idxs_and_shapes[name] = (slice(start, self.num_weights), shape)
示例11: add_weights
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import prod [as 别名]
def add_weights(self, name, shape):
start = self.N
self.N += np.prod(shape)
self.idxs_and_shapes[name] = (slice(start, self.N), shape)
示例12: build_weights_dict
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import prod [as 别名]
def build_weights_dict(self, input_shape):
# Input shape is anything (all flattened)
input_size = np.prod(input_shape, dtype=int)
self.parser = WeightsParser()
self.parser.add_weights('params', (input_size, self.size))
self.parser.add_weights('biases', (self.size,))
return self.parser.N, (self.size,)
示例13: forward_pass
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import prod [as 别名]
def forward_pass(self, inputs, param_vector):
params = self.parser.get(param_vector, 'params')
biases = self.parser.get(param_vector, 'biases')
if inputs.ndim > 2:
inputs = inputs.reshape((inputs.shape[0], np.prod(inputs.shape[1:])))
return self.nonlinearity(np.dot(inputs[:, :], params) + biases)
示例14: load_mnist
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import prod [as 别名]
def load_mnist():
partial_flatten = lambda x : np.reshape(x, (x.shape[0], np.prod(x.shape[1:])))
one_hot = lambda x, k: np.array(x[:,None] == np.arange(k)[None, :], dtype=int)
train_images, train_labels, test_images, test_labels = data_mnist.mnist()
train_images = partial_flatten(train_images) / 255.0
test_images = partial_flatten(test_images) / 255.0
train_labels = one_hot(train_labels, 10)
test_labels = one_hot(test_labels, 10)
N_data = train_images.shape[0]
return N_data, train_images, train_labels, test_images, test_labels
示例15: test_jacobian_against_stacked_grads
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import prod [as 别名]
def test_jacobian_against_stacked_grads():
scalar_funs = [
lambda x: np.sum(x**3),
lambda x: np.prod(np.sin(x) + np.sin(x)),
lambda x: grad(lambda y: np.exp(y) * np.tanh(x[0]))(x[1])
]
vector_fun = lambda x: np.array([f(x) for f in scalar_funs])
x = npr.randn(5)
jac = jacobian(vector_fun)(x)
grads = [grad(f)(x) for f in scalar_funs]
assert np.allclose(jac, np.vstack(grads))