本文整理汇总了Python中numpy.broadcast_to函数的典型用法代码示例。如果您正苦于以下问题:Python broadcast_to函数的具体用法?Python broadcast_to怎么用?Python broadcast_to使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了broadcast_to函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_input_features
def get_input_features(self, mol):
"""get input features
Args:
mol (Mol):
Returns:
"""
type_check_num_atoms(mol, self.max_atoms)
num_atoms = mol.GetNumAtoms()
# Construct the atom array and adjacency matrix.
atom_array = construct_atomic_number_array(mol, out_size=self.out_size)
adj_array = construct_adj_matrix(mol, out_size=self.out_size)
# Adjust the adjacency matrix.
degree_vec = numpy.sum(adj_array[:num_atoms], axis=1)
degree_sqrt_inv = 1. / numpy.sqrt(degree_vec)
adj_array[:num_atoms, :num_atoms] *= numpy.broadcast_to(
degree_sqrt_inv[:, None], (num_atoms, num_atoms))
adj_array[:num_atoms, :num_atoms] *= numpy.broadcast_to(
degree_sqrt_inv[None, :], (num_atoms, num_atoms))
super_node_x = construct_supernode_feature(mol, atom_array, adj_array, out_size=self.out_size_super)
return atom_array, adj_array, super_node_x
示例2: texture_along_ray
def texture_along_ray(myradar, var, wind_size=7):
"""
Compute field texture along ray using a user specified
window size.
Parameters
----------
myradar : radar object
The radar object where the field is
var : str
Name of the field which texture has to be computed
wind_size : int
Optional. Size of the rolling window used
Returns
-------
tex : radar field
the texture of the specified field
"""
half_wind = int(wind_size/2)
fld = myradar.fields[var]['data']
tex = np.ma.zeros(fld.shape)
tex[:] = np.ma.masked
tex.set_fill_value(get_fillvalue())
tex_aux = np.ma.std(rolling_window(fld, wind_size), -1)
tex[:, half_wind:-half_wind] = tex_aux
tex[:, 0:half_wind] = np.broadcast_to(
tex_aux[:, 0].reshape(tex.shape[0], 1), (tex.shape[0], half_wind))
tex[:, -half_wind:] = np.broadcast_to(
tex_aux[:, -1].reshape(tex.shape[0], 1), (tex.shape[0], half_wind))
return tex
示例3: _initialize_updated_shapes
def _initialize_updated_shapes(self, session):
shapes = array_ops.shape_n(self._vars)
var_shapes = list(map(tuple, session.run(shapes)))
if self._var_shapes is not None:
new_old_shapes = zip(self._var_shapes, var_shapes)
if all([old == new for old, new in new_old_shapes]):
return
self._var_shapes = var_shapes
vars_and_shapes = zip(self._vars, self._var_shapes)
vars_and_shapes_dict = dict(vars_and_shapes)
packed_bounds = None
if self._var_to_bounds is not None:
left_packed_bounds = []
right_packed_bounds = []
for var, var_shape in vars_and_shapes:
shape = list(var_shape)
bounds = (-np.infty, np.infty)
if var in var_to_bounds:
bounds = var_to_bounds[var]
left_packed_bounds.extend(list(np.broadcast_to(bounds[0], shape).flat))
right_packed_bounds.extend(list(np.broadcast_to(bounds[1], shape).flat))
packed_bounds = list(zip(left_packed_bounds, right_packed_bounds))
self._packed_bounds = packed_bounds
self._update_placeholders = [
array_ops.placeholder(var.dtype) for var in self._vars
]
self._var_updates = [
var.assign(array_ops.reshape(placeholder, vars_and_shapes_dict[var]))
for var, placeholder in zip(self._vars, self._update_placeholders)
]
loss_grads = _compute_gradients(self._loss, self._vars)
equalities_grads = [
_compute_gradients(equality, self._vars)
for equality in self._equalities
]
inequalities_grads = [
_compute_gradients(inequality, self._vars)
for inequality in self._inequalities
]
self._packed_var = self._pack(self._vars)
self._packed_loss_grad = self._pack(loss_grads)
self._packed_equality_grads = [
self._pack(equality_grads) for equality_grads in equalities_grads
]
self._packed_inequality_grads = [
self._pack(inequality_grads) for inequality_grads in inequalities_grads
]
dims = [_prod(vars_and_shapes_dict[var]) for var in self._vars]
accumulated_dims = list(_accumulate(dims))
self._packing_slices = [
slice(start, end)
for start, end in zip(accumulated_dims[:-1], accumulated_dims[1:])
]
示例4: multinomial
def multinomial(n, p, size=None):
plates_n = np.shape(n)
plates_p = np.shape(p)[:-1]
k = np.shape(p)[-1]
if size is None:
size = misc.broadcasted_shape(plates_n, plates_p)
if not misc.is_shape_subset(plates_n, size):
raise ValueError("Shape of n does not broadcast to the given size")
if not misc.is_shape_subset(plates_p, size):
raise ValueError("Shape of p does not broadcast to the given size")
# This isn't a very efficient implementation. One could use NumPy's
# multinomial once for all those plates for which n and p is the same.
n = np.broadcast_to(n, size)
p = np.broadcast_to(p, size + (k,))
x = np.empty(size + (k,))
for i in misc.nested_iterator(size):
x[i] = np.random.multinomial(n[i], p[i])
return x.astype(np.int)
示例5: roll_dist_1d
def roll_dist_1d(y, kernel):
n = kernel.size
samples = rolling_window_1d(y, n)
a = samples.sum(axis = 1).reshape((-1,1))
samples = samples / np.broadcast_to(a, samples.shape)
K = np.broadcast_to(kernel, samples.shape)
return np.sum(np.square(samples - K), axis = 1)
示例6: test_integer_split_2D_rows_greater_max_int32
def test_integer_split_2D_rows_greater_max_int32(self):
a = np.broadcast_to([0], (1 << 32, 2))
res = array_split(a, 4)
chunk = np.broadcast_to([0], (1 << 30, 2))
tgt = [chunk] * 4
for i in range(len(tgt)):
assert_equal(res[i].shape, tgt[i].shape)
示例7: _assign_to_class
def _assign_to_class(zh, zdr, kdp, rhohv, relh, mass_centers,
weights=np.array([1., 1., 1., 0.75, 0.5])):
"""
assigns an hydrometeor class to a radar range bin computing
the distance between the radar variables an a centroid
Parameters
----------
zh,zdr,kdp,rhohv,relh : radar field
variables used for assigment normalized to [-1, 1] values
mass_centers : matrix
centroids normalized to [-1, 1] values
weights : array
optional. The weight given to each variable
Returns
-------
hydroclass : int array
the index corresponding to the assigned class
mind_dist : float array
the minimum distance to the centroids
"""
# prepare data
nrays = zh.shape[0]
nbins = zdr.shape[1]
nclasses = mass_centers.shape[0]
nvariables = mass_centers.shape[1]
data = np.ma.array([zh, zdr, kdp, rhohv, relh])
weights_mat = np.broadcast_to(
weights.reshape(nvariables, 1, 1),
(nvariables, nrays, nbins))
dist = np.ma.zeros((nclasses, nrays, nbins), dtype='float64')
# compute distance: masked entries will not contribute to the distance
for i in range(nclasses):
centroids_class = mass_centers[i, :]
centroids_class = np.broadcast_to(
centroids_class.reshape(nvariables, 1, 1),
(nvariables, nrays, nbins))
dist[i, :, :] = np.ma.sqrt(np.ma.sum(
((centroids_class-data)**2.)*weights_mat, axis=0))
# use very large fill_value so that masked entries will be sorted at the
# end. There should not be any masked entry anyway
class_vec = dist.argsort(axis=0, fill_value=10e40)
# get minimum distance. Acts as a confidence value
dist_sorted = dist.sort(axis=0, fill_value=10e40)
min_dist = dist[0, :, :]
# Entries with non-valid reflectivity values are set to 0 (No class)
mask = np.ma.getmaskarray(zh)
hydroclass = class_vec[0, :, :]+1
hydroclass[mask] = 0
return hydroclass, min_dist
示例8: setup_node_coords
def setup_node_coords(shape, spacing=1., origin=0.):
spacing = np.broadcast_to(spacing, 2)
origin = np.broadcast_to(origin, 2)
rows = np.arange(shape[0], dtype=float) * spacing[0] + origin[0]
cols = np.arange(shape[1], dtype=float) * spacing[1] + origin[1]
return setup_node_coords_rectilinear((rows, cols))
示例9: __init__
def __init__(self, shape, spacing=(1., 1.), origin=(0., 0.)):
spacing = np.broadcast_to(spacing, 2)
origin = np.broadcast_to(origin, 2)
node_y_and_x = (np.arange(shape[0]) * spacing[0] + origin[0],
np.arange(shape[1]) * spacing[1] + origin[1])
super(DualUniformRectilinearGraph, self).__init__(node_y_and_x)
示例10: analytic_dipole_setup
def analytic_dipole_setup(nside, nfreq, sigma=0.4, z0_cza=None):
def transform_basis(nside, jones, z0_cza, R_z0):
npix = hp.nside2npix(nside)
hpxidx = np.arange(npix)
cza, ra = hp.pix2ang(nside, hpxidx)
fR = R_z0
tb, pb = rotate_sphr_coords(fR, cza, ra)
cza_v = t_hat_cart(cza, ra)
ra_v = p_hat_cart(cza, ra)
tb_v = t_hat_cart(tb, pb)
fRcza_v = np.einsum('ab...,b...->a...', fR, cza_v)
fRra_v = np.einsum('ab...,b...->a...', fR, ra_v)
cosX = np.einsum('a...,a...', fRcza_v, tb_v)
sinX = np.einsum('a...,a...', fRra_v, tb_v)
basis_rot = np.array([[cosX, sinX],[-sinX, cosX]])
basis_rot = np.transpose(basis_rot,(2,0,1))
return np.einsum('...ab,...bc->...ac', jones, basis_rot)
if z0_cza is None:
z0_cza = np.radians(120.72)
npix = hp.nside2npix(nside)
hpxidx = np.arange(npix)
th, phi = hp.pix2ang(nside, hpxidx)
R_z0 = hp.rotator.Rotator(rot=[0,-np.degrees(z0_cza)])
th_l, phi_l = R_z0(th, phi)
phi_l[phi_l < 0] += 2. * np.pi
ct,st = np.cos(th_l), np.sin(th_l)
cp,sp = np.cos(phi_l), np.sin(phi_l)
jones_dipole = np.array([
[ct * cp, -sp],
[ct * sp, cp]
], dtype=np.complex128).transpose(2,0,1)
jones_c = transform_basis(nside, jones_dipole, z0_cza, np.array(R_z0.mat))
G = np.exp(-(th_l/sigma)**2. /2.)
G = np.broadcast_to(G, (2,2,npix)).T
jones_c *= G
jones_out = np.broadcast_to(jones_c, (nfreq, npix, 2,2))
return jones_out
示例11: scalar_broadcast_match
def scalar_broadcast_match(a, b):
""" Returns arguments as np.array, if one is a scalar it will broadcast the other one's shape.
"""
a, b = np.atleast_1d(a, b)
if a.size == 1 and b.size != 1:
a = np.broadcast_to(a, b.shape)
elif b.size == 1 and a.size != 1:
b = np.broadcast_to(b, a.shape)
return a, b
示例12: dist2D
def dist2D(dist: pd.DataFrame,
ranges: pd.DataFrame,
nlevels: int=16,
nx: int=2,
size: int=6,
colorbar: bool=True,
name: str='dist') -> plt.Figure:
"""
Plot 2D probability distributions.
Parameters
----------
dist : Multiindexed dataframe with force field as primary
index and distributions as created by dist2D().
ranges : Multiindexed dataframe with force field as primary
index and edges as created by dist1D().
nlevels : Number of contour levels to use.
nx : Number of plots per row.
size : Relative size of each plot.
colorbar : If true, will plot a colorbar.
name : Name of the distribution.
Returns
-------
fig : matplotlib figure.
"""
# Setup plotting parameters
nplots = dist.shape[1]
xsize, ysize = nx, (nplots // nx) + 1
cmap = plt.get_cmap('viridis')
fig = plt.figure(figsize=(xsize * size, ysize * size))
for i, k in enumerate(dist.keys()):
# Get keys for both CVs
kx, ky = k.split('.')
# Prepare plotting grid (np.meshgrid doesn't work)
X = np.broadcast_to(ranges[kx], dist[k].unstack().shape)
Y = np.broadcast_to(ranges[ky], dist[k].unstack().shape).T
Z = dist[k].unstack().values.T
# Contour levels taking inf into account
levels = np.linspace(np.amin(Z[~np.isinf(Z)]),
np.amax(Z[~np.isinf(Z)]), nlevels)
ax = fig.add_subplot(ysize, xsize, i + 1)
cm = ax.contourf(X, Y, Z, cmap=cmap, levels=levels)
ax.set_xlabel(kx)
ax.set_ylabel(ky)
ax.set_title(name)
if colorbar:
fig.colorbar(cm)
return fig
示例13: __init__
def __init__(self, shape, spacing=(1., 1.), origin=(0., 0.)):
spacing = np.broadcast_to(spacing, 2)
origin = np.broadcast_to(origin, 2)
rows = np.arange(shape[0], dtype=float) * spacing[0] + origin[0]
cols = np.arange(shape[1], dtype=float) * spacing[1] + origin[1]
super(UniformRectilinearGraph, self).__init__((rows, cols))
示例14: _get_merged_embeddings
def _get_merged_embeddings(data_dict, mapping_fn, out_prefix):
region_names = data_dict['region_names']
region_weights = data_dict['region_weights']
squeezed = region_weights.ndim == 1
if squeezed:
region_weights = region_weights[:, np.newaxis]
n_subsets = region_weights.shape[1]
mapped_names = [mapping_fn(r) for r in region_names]
m_names = sorted(set(mapped_names))
m_names_lookup = {n: i for i, n in enumerate(m_names)}
transform = np.zeros(
(len(m_names), len(region_names), n_subsets))
for r_i, (m, w) in enumerate(zip(mapped_names, region_weights)):
transform[m_names_lookup[m], r_i, :] = w
m_weights = transform.sum(axis=1)
# normalize transform so that its sum along axis 1 is 1
# this is kind of gross to allow for zero sums...maybe there's a better way
nz = np.broadcast_to((m_weights != 0)[:, np.newaxis, :], transform.shape)
transform[nz] /= \
np.broadcast_to(m_weights[:, np.newaxis, :], transform.shape)[nz]
ret = {'{}_names'.format(out_prefix): m_names,
'{}_weights'.format(out_prefix): m_weights}
for k in data_dict:
if k.startswith('emb_'):
print("Mapping {}...".format(k), end='', file=sys.stderr)
emb = data_dict[k]
if squeezed:
emb = emb[:, :, np.newaxis]
# need to do a matrix multiply for each subset:
# - np.einsum('grs,rfs->gfs') would do this, but doesn't call BLAS
# - rolling the subset axis to the front and calling np.matmul
# would do this, but it just calls einsum anyway:
# https://github.com/numpy/numpy/issues/7569
out = np.empty((n_subsets, len(m_names), emb.shape[1]))
for i in xrange(n_subsets):
np.dot(transform[:, :, i], emb[:, :, i], out=out[i])
ret[k] = np.rollaxis(out, 0, 3)
if squeezed:
ret[k] = ret[k][:, :, 0]
print("done", file=sys.stderr)
elif k in {'region_names', 'region_weights'}:
pass
else:
ret[k] = data_dict[k]
return ret
示例15: mfunc
def mfunc(self, x):
N, n = x.shape
if n != self._n:
raise Exception("Input dimension mismatch")
p = np.broadcast_to(self._p, (N, self._m, self._n))
q = np.broadcast_to(self._q, (N, self._m, self._n))
r = np.broadcast_to(self._r, (N, self._m, self._n))
X = np.broadcast_to(x, (self._m, N, self._n))
X = np.swapaxes(X, 0, 1)
self._M = self._mfunc(X, p, q, r)
return self._M