本文整理汇总了Python中autograd.numpy.reshape方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.reshape方法的具体用法?Python numpy.reshape怎么用?Python numpy.reshape使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类autograd.numpy
的用法示例。
在下文中一共展示了numpy.reshape方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: callback
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import reshape [as 别名]
def callback(X, y, predict_func, acquisition_function, next_point, new_value):
plt.cla()
# Show posterior marginals.
plot_xs = np.reshape(np.linspace(domain_min, domain_max, 300), (300,1))
pred_mean, pred_std = predict_func(plot_xs)
ax.plot(plot_xs, pred_mean, 'b')
ax.fill(np.concatenate([plot_xs, plot_xs[::-1]]),
np.concatenate([pred_mean - 1.96 * pred_std,
(pred_mean + 1.96 * pred_std)[::-1]]),
alpha=.15, fc='Blue', ec='None')
ax.plot(X, y, 'kx')
ax.plot(next_point, new_value, 'ro')
alphas = acquisition_function(plot_xs)
ax.plot(plot_xs, alphas, 'r')
ax.set_ylim([-1.5, 1.5])
ax.set_xticks([])
ax.set_yticks([])
plt.draw()
plt.pause(1)
示例2: plot_images
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import reshape [as 别名]
def plot_images(images, ax, ims_per_row=5, padding=5, digit_dimensions=(28, 28),
cmap=matplotlib.cm.binary, vmin=None, vmax=None):
"""Images should be a (N_images x pixels) matrix."""
N_images = images.shape[0]
N_rows = (N_images - 1) // ims_per_row + 1
pad_value = np.min(images.ravel())
concat_images = np.full(((digit_dimensions[0] + padding) * N_rows + padding,
(digit_dimensions[1] + padding) * ims_per_row + padding), pad_value)
for i in range(N_images):
cur_image = np.reshape(images[i, :], digit_dimensions)
row_ix = i // ims_per_row
col_ix = i % ims_per_row
row_start = padding + (padding + digit_dimensions[0]) * row_ix
col_start = padding + (padding + digit_dimensions[1]) * col_ix
concat_images[row_start: row_start + digit_dimensions[0],
col_start: col_start + digit_dimensions[1]] = cur_image
cax = ax.matshow(concat_images, cmap=cmap, vmin=vmin, vmax=vmax)
plt.xticks(np.array([]))
plt.yticks(np.array([]))
return cax
示例3: advect
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import reshape [as 别名]
def advect(f, vx, vy):
"""Move field f according to x and y velocities (u and v)
using an implicit Euler integrator."""
rows, cols = f.shape
cell_xs, cell_ys = np.meshgrid(np.arange(cols), np.arange(rows))
center_xs = (cell_xs - vx).ravel()
center_ys = (cell_ys - vy).ravel()
# Compute indices of source cells.
left_ix = np.floor(center_ys).astype(int)
top_ix = np.floor(center_xs).astype(int)
rw = center_ys - left_ix # Relative weight of right-hand cells.
bw = center_xs - top_ix # Relative weight of bottom cells.
left_ix = np.mod(left_ix, rows) # Wrap around edges of simulation.
right_ix = np.mod(left_ix + 1, rows)
top_ix = np.mod(top_ix, cols)
bot_ix = np.mod(top_ix + 1, cols)
# A linearly-weighted sum of the 4 surrounding cells.
flat_f = (1 - rw) * ((1 - bw)*f[left_ix, top_ix] + bw*f[left_ix, bot_ix]) \
+ rw * ((1 - bw)*f[right_ix, top_ix] + bw*f[right_ix, bot_ix])
return np.reshape(flat_f, (rows, cols))
示例4: callback
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import reshape [as 别名]
def callback(params):
print("Log marginal likelihood {}".format(log_marginal_likelihood(params)))
# Show posterior marginals.
plot_xs = np.reshape(np.linspace(-5, 5, 300), (300,1))
pred_mean, pred_cov = combined_predict_fun(params, X, y, plot_xs)
plot_gp(ax_end_to_end, X, y, pred_mean, pred_cov, plot_xs)
ax_end_to_end.set_title("X to y")
layer1_params, layer2_params, hiddens = unpack_all_params(params)
h_star_mean, h_star_cov = predict_layer_funcs[0](layer1_params, X, hiddens, plot_xs)
y_star_mean, y_star_cov = predict_layer_funcs[0](layer2_params, np.atleast_2d(hiddens).T, y, plot_xs)
plot_gp(ax_x_to_h, X, hiddens, h_star_mean, h_star_cov, plot_xs)
ax_x_to_h.set_title("X to hiddens")
plot_gp(ax_h_to_y, np.atleast_2d(hiddens).T, y, y_star_mean, y_star_cov, plot_xs)
ax_h_to_y.set_title("hiddens to y")
plt.draw()
plt.pause(1.0/60.0)
# Initialize covariance parameters and hiddens.
示例5: jacobian_and_value
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import reshape [as 别名]
def jacobian_and_value(fun, x):
"""
Makes a function that returns both the Jacobian and value of a function.
Assumes that the function `fun` broadcasts along the first dimension of the
input being differentiated with respect to such that a batch of outputs can
be computed concurrently for a batch of inputs.
"""
val = fun(x)
v_vspace = vspace(val)
x_vspace = vspace(x)
x_rep = np.tile(x, (v_vspace.size,) + (1,) * x_vspace.ndim)
vjp_rep, _ = make_vjp(fun, x_rep)
jacobian_shape = v_vspace.shape + x_vspace.shape
basis_vectors = np.array([b for b in v_vspace.standard_basis()])
jacobian = vjp_rep(basis_vectors)
return np.reshape(jacobian, jacobian_shape), val
示例6: hessian_grad_and_value
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import reshape [as 别名]
def hessian_grad_and_value(fun, x):
"""
Makes a function that returns the Hessian, gradient & value of a function.
Assumes that the function `fun` broadcasts along the first dimension of the
input being differentiated with respect to such that a batch of outputs can
be computed concurrently for a batch of inputs.
"""
def grad_fun(x):
vjp, val = make_vjp(fun, x)
return vjp(vspace(val).ones()), val
x_vspace = vspace(x)
x_rep = np.tile(x, (x_vspace.size,) + (1,) * x_vspace.ndim)
vjp_grad, (grad, val) = make_vjp(lambda x: atuple(grad_fun(x)), x_rep)
hessian_shape = x_vspace.shape + x_vspace.shape
basis_vectors = np.array([b for b in x_vspace.standard_basis()])
hessian = vjp_grad((basis_vectors, vspace(val).zeros()))
return np.reshape(hessian, hessian_shape), grad[0], val[0]
示例7: jacobian_numerical
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import reshape [as 别名]
def jacobian_numerical(fn, x, step_size=1e-7):
""" numerically differentiate `fn` w.r.t. its argument `x` """
in_array = float_2_array(x).flatten()
out_array = float_2_array(fn(x)).flatten()
m = in_array.size
n = out_array.size
shape = (n, m)
jacobian = npa.zeros(shape)
for i in range(m):
input_i = in_array.copy()
input_i[i] += step_size
arg_i = input_i.reshape(in_array.shape)
output_i = fn(arg_i).flatten()
grad_i = (output_i - out_array) / step_size
jacobian[:, i] = get_value_arr(get_value(grad_i)) # need to convert both the grad_i array and its contents to actual data.
return jacobian
示例8: outer_rows
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import reshape [as 别名]
def outer_rows(X, Y):
"""
Compute the outer product of each row in X, and Y.
X: n x dx numpy array
Y: n x dy numpy array
Return an n x dx x dy numpy array.
"""
# Matlab way to do this. According to Jonathan Huggins, this is not
# efficient. Use einsum instead. See below.
#n, dx = X.shape
#dy = Y.shape[1]
#X_col_rep = X[:, np.tile(range(dx), (dy, 1)).T.reshape(-1) ]
#Y_tile = np.tile(Y, (1, dx))
#Z = X_col_rep*Y_tile
#return np.reshape(Z, (n, dx, dy))
return np.einsum('ij,ik->ijk', X, Y)
示例9: gradX_y
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import reshape [as 别名]
def gradX_y(self, X, y):
"""
Compute the gradient with respect to X (the first argument of the
kernel). Base class provides a default autograd implementation for convenience.
Subclasses should override if this does not work.
X: nx x d numpy array.
y: numpy array of length d.
Return a numpy array G of size nx x d, the derivative of k(X, y) with
respect to X.
"""
yrow = np.reshape(y, (1, -1))
f = lambda X: self.eval(X, yrow)
g = autograd.elementwise_grad(f)
G = g(X)
assert G.shape[0] == X.shape[0]
assert G.shape[1] == X.shape[1]
return G
# end class KSTKernel
示例10: eval
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import reshape [as 别名]
def eval(self, X, Y):
"""
Evaluate the Gaussian kernel on the two 2d numpy arrays.
Parameters
----------
X : n1 x d numpy array
Y : n2 x d numpy array
Return
------
K : a n1 x n2 Gram matrix.
"""
#(n1, d1) = X.shape
#(n2, d2) = Y.shape
#assert d1==d2, 'Dimensions of the two inputs must be the same'
sumx2 = np.reshape(np.sum(X**2, 1), (-1, 1))
sumy2 = np.reshape(np.sum(Y**2, 1), (1, -1))
D2 = sumx2 - 2*np.dot(X, Y.T) + sumy2
K = np.exp(old_div(-D2,(2.0*self.sigma2)))
return K
示例11: forward_pass
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import reshape [as 别名]
def forward_pass(self, X, hyp):
Q = self.hidden_dim
H = np.zeros((X.shape[1],Q))
idx_1 = 0
idx_2 = idx_1 + self.X_dim*Q
idx_3 = idx_2 + Q
idx_4 = idx_3 + Q*Q
U = np.reshape(hyp[idx_1:idx_2], (self.X_dim,Q))
b = np.reshape(hyp[idx_2:idx_3], (1,Q))
W = np.reshape(hyp[idx_3:idx_4], (Q,Q))
for i in range(0, self.lags):
H = activation(np.matmul(H,W) + np.matmul(X[i,:,:],U) + b)
idx_1 = idx_4
idx_2 = idx_1 + Q*self.Y_dim
idx_3 = idx_2 + self.Y_dim
V = np.reshape(hyp[idx_1:idx_2], (Q,self.Y_dim))
c = np.reshape(hyp[idx_2:idx_3], (1,self.Y_dim))
Y = np.matmul(H,V) + c
return Y
示例12: forward_pass
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import reshape [as 别名]
def forward_pass(self, X, Q, hyp):
H = X
idx_3 = 0
layers = Q.shape[0]
for layer in range(0,layers-2):
idx_1 = idx_3
idx_2 = idx_1 + Q[layer]*Q[layer+1]
idx_3 = idx_2 + Q[layer+1]
A = np.reshape(hyp[idx_1:idx_2], (Q[layer],Q[layer+1]))
b = np.reshape(hyp[idx_2:idx_3], (1,Q[layer+1]))
H = activation(np.matmul(H,A) + b)
idx_1 = idx_3
idx_2 = idx_1 + Q[-2]*Q[-1]
idx_3 = idx_2 + Q[-1]
A = np.reshape(hyp[idx_1:idx_2], (Q[-2],Q[-1]))
b = np.reshape(hyp[idx_2:idx_3], (1,Q[-1]))
mu = np.matmul(H,A) + b
return mu
示例13: _do_optim
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import reshape [as 别名]
def _do_optim(self, p, optim_x0, gn, data, entries='all'):
optim_bounds = [self.wrt_bounds[p] for k in
range(np.prod(self.wrt_dims[p]))]
result = minimize(fun=self._optim_wrap,jac=True,
x0=np.array(optim_x0).reshape(-1),
args=(p,
{'wrt': p,
'p': self.precision_,
'm': self.mu_,
'a': self.alpha_,
'xn': data['obs'],
'xln': data['lagged'],
'gn': gn, # post. uni. concat.
'entries': entries
}),
bounds=optim_bounds,
method='TNC')
new_value = result.x.reshape(self.wrt_dims[p])
return new_value
示例14: iuwt
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import reshape [as 别名]
def iuwt(starlet):
""" Inverse starlet transform
Parameters
----------
starlet: Shapelet object
Starlet to be inverted
Returns
-------
cJ: array
a 2D image that corresponds to the inverse transform of stralet.
"""
lvl, n1, n2 = np.shape(starlet)
n = np.size(h)
# Coarse scale
cJ = fft.Fourier(starlet[-1, :, :])
for i in np.arange(1, lvl):
newh = np.zeros((n + (n - 1) * (2 ** (lvl - i - 1) - 1), 1))
newh[0::2 ** (lvl - i - 1), 0] = h
newhT = fft.Fourier(newh.T)
newh = fft.Fourier(newh)
# Line convolution
cnew = fft.convolve(cJ, newh, axes=[0])
# Column convolution
cnew = fft.convolve(cnew, newhT, axes=[1])
cJ = fft.Fourier(cnew.image + starlet[lvl - 1 - i, :, :])
return np.reshape(cJ.image, (n1, n2))
示例15: sum_trailing_antidiagonals
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import reshape [as 别名]
def sum_trailing_antidiagonals(self):
trailing_shape = list(self.liks.shape[-2:])
lik = np.reshape(self.liks, [-1] + trailing_shape)
lik = sum_trailing_antidiagonals(lik)
self.liks = np.reshape(lik, [-1] + list(self.liks.shape[1:-2]) + [sum(trailing_shape) - 1])
self.pop_labels.pop()