本文整理汇总了Python中autograd.numpy.full方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.full方法的具体用法?Python numpy.full怎么用?Python numpy.full使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类autograd.numpy
的用法示例。
在下文中一共展示了numpy.full方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: plot_images
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import full [as 别名]
def plot_images(images, ax, ims_per_row=5, padding=5, digit_dimensions=(28, 28),
cmap=matplotlib.cm.binary, vmin=None, vmax=None):
"""Images should be a (N_images x pixels) matrix."""
N_images = images.shape[0]
N_rows = (N_images - 1) // ims_per_row + 1
pad_value = np.min(images.ravel())
concat_images = np.full(((digit_dimensions[0] + padding) * N_rows + padding,
(digit_dimensions[1] + padding) * ims_per_row + padding), pad_value)
for i in range(N_images):
cur_image = np.reshape(images[i, :], digit_dimensions)
row_ix = i // ims_per_row
col_ix = i % ims_per_row
row_start = padding + (padding + digit_dimensions[0]) * row_ix
col_start = padding + (padding + digit_dimensions[1]) * col_ix
concat_images[row_start: row_start + digit_dimensions[0],
col_start: col_start + digit_dimensions[1]] = cur_image
cax = ax.matshow(concat_images, cmap=cmap, vmin=vmin, vmax=vmax)
plt.xticks(np.array([]))
plt.yticks(np.array([]))
return cax
示例2: __init__
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import full [as 别名]
def __init__(self, sigma2s, wts=None):
"""
Mixture of isotropic Gaussian kernels:
sum wts[i] * exp(- ||x - y||^2 / (2 * sigma2s[i]))
sigma2s: a list/array of squared bandwidths
wts: a list/array of weights. Defaults to equal weights summing to 1.
"""
self.sigma2s = sigma2s = np.asarray(sigma2s)
assert len(sigma2s) > 0
if wts is None:
self.wts = wts = np.full(len(sigma2s), 1/len(sigma2s))
else:
self.wts = wts = np.asarray(wts)
assert len(wts) == len(sigma2s)
assert all(w >= 0 for w in wts)
示例3: setup
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import full [as 别名]
def setup(self, x_shape):
"""
Parameters
----------
x_shape : np.array(batch size, time steps, input shape)
"""
self.input_dim = x_shape[2]
# Input -> Hidden
self._params["W"] = self._params.init((self.input_dim, self.hidden_dim))
# Bias
self._params["b"] = np.full((self.hidden_dim,), self._params.initial_bias)
# Hidden -> Hidden layer
self._params["U"] = self.inner_init((self.hidden_dim, self.hidden_dim))
# Init gradient arrays
self._params.init_grad()
self.hprev = np.zeros((x_shape[0], self.hidden_dim))
示例4: get_scale
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import full [as 别名]
def get_scale(n, scale_factor):
return anp.power(anp.full(n, scale_factor), anp.arange(n))
示例5: get_power
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import full [as 别名]
def get_power(self, n):
p = anp.full(n, 4.0)
p[-1] = 2.0
return p
示例6: _calc_pareto_set
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import full [as 别名]
def _calc_pareto_set(self):
return np.full(self.n_var, 0)
示例7: _calc_pareto_set
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import full [as 别名]
def _calc_pareto_set(self):
return anp.full(self.n_var, 0)
示例8: _calc_pareto_set
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import full [as 别名]
def _calc_pareto_set(self):
return anp.full(self.n_var, 0.5)
示例9: _calc_pareto_set
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import full [as 别名]
def _calc_pareto_set(self):
return np.full(self.n_var, 420.9687)
示例10: make_grad_softplus
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import full [as 别名]
def make_grad_softplus(ans, x):
x = np.asarray(x)
def gradient_product(g):
return np.full(x.shape, g) * np.exp(x - ans)
return gradient_product
示例11: logsumexp_vjp
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import full [as 别名]
def logsumexp_vjp(ans, x):
# If you want to be able to take higher-order derivatives, then all the
# code inside this function must be itself differentiable by Autograd.
# This closure multiplies g with the Jacobian of logsumexp (d_ans/d_x).
# Because Autograd uses reverse-mode differentiation, g contains
# the gradient of the objective w.r.t. ans, the output of logsumexp.
# This returned VJP function doesn't close over `x`, so Python can
# garbage-collect `x` if there are no references to it elsewhere.
x_shape = x.shape
return lambda g: np.full(x_shape, g) * np.exp(x - np.full(x_shape, ans))
# Now we tell Autograd that logsumexmp has a gradient-making function.
示例12: uniform_reference_directions
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import full [as 别名]
def uniform_reference_directions(self, n_partitions, n_dim):
ref_dirs = []
ref_dir = anp.full(n_dim, anp.inf)
self.__uniform_reference_directions(ref_dirs, ref_dir, n_partitions, n_partitions, 0)
return anp.concatenate(ref_dirs, axis=0)
示例13: get_power
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import full [as 别名]
def get_power(n):
p = anp.full(n, 4.0)
p[-1] = 2.0
return p
示例14: setup
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import full [as 别名]
def setup(self, x_shape):
"""
Naming convention:
i : input gate
f : forget gate
c : cell
o : output gate
Parameters
----------
x_shape : np.array(batch size, time steps, input shape)
"""
self.input_dim = x_shape[2]
# Input -> Hidden
W_params = ["W_i", "W_f", "W_o", "W_c"]
# Hidden -> Hidden
U_params = ["U_i", "U_f", "U_o", "U_c"]
# Bias terms
b_params = ["b_i", "b_f", "b_o", "b_c"]
# Initialize params
for param in W_params:
self._params[param] = self._params.init((self.input_dim, self.hidden_dim))
for param in U_params:
self._params[param] = self.inner_init((self.hidden_dim, self.hidden_dim))
for param in b_params:
self._params[param] = np.full((self.hidden_dim,), self._params.initial_bias)
# Combine weights for simplicity
self.W = [self._params[param] for param in W_params]
self.U = [self._params[param] for param in U_params]
# Init gradient arrays for all weights
self._params.init_grad()
self.hprev = np.zeros((x_shape[0], self.hidden_dim))
self.oprev = np.zeros((x_shape[0], self.hidden_dim))