本文整理匯總了Python中tensorflow.compat.v1.newaxis方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.newaxis方法的具體用法?Python v1.newaxis怎麽用?Python v1.newaxis使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.newaxis方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: sample_q
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import newaxis [as 別名]
def sample_q(
self, targets, targets_mask, decoder_self_attention_bias, n_samples,
temp, **kwargs):
hparams = self._hparams
batch_size, targets_max_length = common_layers.shape_list(targets_mask)[:2]
q_params = ops.posterior("posterior", hparams, targets, targets_mask,
decoder_self_attention_bias, **kwargs)
q_dist = gops.diagonal_normal(q_params, "posterior")
loc, scale = q_dist.loc, q_dist.scale
z_shape = [batch_size, targets_max_length, hparams.latent_size]
iw_z_shape = [n_samples*batch_size, targets_max_length, hparams.latent_size]
if n_samples == 1:
noise = tf.random_normal(z_shape, stddev=temp)
z_q = loc + scale * noise
log_q_z = q_dist.log_prob(z_q) # [B, L, C]
else:
noise = tf.random_normal([n_samples] + z_shape, stddev=temp)
z_q = loc[tf.newaxis, ...] + scale[tf.newaxis, ...] * noise
log_q_z = q_dist.log_prob(z_q) # [K, B, L, C]
z_q = tf.reshape(z_q, iw_z_shape)
log_q_z = tf.reshape(log_q_z, iw_z_shape)
return z_q, log_q_z, q_dist
示例2: reduce_sum_over_lc
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import newaxis [as 別名]
def reduce_sum_over_lc(x, x_mask):
"""Returns sum of x (over L and C) given the actual length and pad.
Args:
x: input. (B,L,C)
x_mask: binary padding mask. (B,L)
Returns:
sum of x. (B)
"""
if x.shape.rank == 3 and x_mask.shape.rank == 2:
x_mask = x_mask[..., tf.newaxis]
else:
tf.logging.info("x: {}, x_mask: {}".format(x.shape.rank, x_mask.shape.rank))
raise ValueError("Dimension not supported.")
mean = x * x_mask
return tf.reduce_sum(mean, axis=[1, 2]) # sum over L, C
示例3: reduce_mean_over_bl
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import newaxis [as 別名]
def reduce_mean_over_bl(x, x_mask):
"""Returns average of x (over B and L) given the actual length and pad.
Args:
x: input. (B,L,C)
x_mask: binary padding mask. (B,L)
Returns:
mean of x. (C)
"""
if x.shape.rank == 3 and x_mask.shape.rank == 2:
x_mask = x_mask[..., tf.newaxis]
else:
tf.logging.info("x: {}, x_mask: {}".format(x.shape.rank, x_mask.shape.rank))
raise ValueError("Dimension not supported.")
mean = x * x_mask
mean = tf.reduce_sum(mean, axis=[0, 1]) # sum over B, L
return mean / tf.reduce_sum(x_mask)
示例4: call
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import newaxis [as 別名]
def call(self, inputs):
batch_shape = tf.shape(inputs)[:-1]
length = tf.shape(inputs)[-1]
ngram_range_counts = []
for n in range(self.minval, self.maxval):
# Reshape inputs from [..., length] to [..., 1, length // n, n], dropping
# remainder elements. Each n-vector is an ngram.
reshaped_inputs = tf.reshape(
inputs[..., :(n * (length // n))],
tf.concat([batch_shape, [1], (length // n)[tf.newaxis], [n]], 0))
# Count the number of times each ngram appears in the input. We do so by
# checking whether each n-vector in the input is equal to each n-vector
# in a Tensor of all possible ngrams. The comparison is batched between
# the input Tensor of shape [..., 1, length // n, n] and the ngrams Tensor
# of shape [..., input_dim**n, 1, n].
ngrams = tf.reshape(
list(np.ndindex((self.input_dim,) * n)),
[1] * (len(inputs.shape)-1) + [self.input_dim**n, 1, n])
is_ngram = tf.equal(
tf.reduce_sum(tf.cast(tf.equal(reshaped_inputs, ngrams), tf.int32),
axis=-1),
n)
ngram_counts = tf.reduce_sum(tf.cast(is_ngram, tf.float32), axis=-1)
ngram_range_counts.append(ngram_counts)
return tf.concat(ngram_range_counts, axis=-1)
示例5: _compute_auxiliary_structure
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import newaxis [as 別名]
def _compute_auxiliary_structure(self, contents_and_mask):
"""Compute segment and position metadata."""
contents = contents_and_mask[:, :self._num_sequences]
start_mask = tf.cast(contents_and_mask[:, self._num_sequences:],
dtype=INDEX_DTYPE)
segment = tf.cumsum(start_mask, axis=0)
uniform_count = tf.ones_like(segment[:, 0])
position = []
for i in range(self._num_sequences):
segment_slice = segment[:, i]
counts = tf.math.segment_sum(uniform_count, segment[:, i])
position.append(tf.range(self._packed_length) - tf.cumsum(
tf.gather(counts, segment_slice - 1) * start_mask[:, i]))
position = tf.concat([i[:, tf.newaxis] for i in position], axis=1)
# Correct for padding tokens.
pad_mask = tf.cast(tf.not_equal(contents, 0), dtype=INDEX_DTYPE)
segment *= pad_mask
position *= pad_mask
return segment, position
示例6: get_in_out_from_samples
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import newaxis [as 別名]
def get_in_out_from_samples(mesh, npoints, sample_factor=10, std=0.01):
"""Get in/out point samples from a given mesh.
Args:
mesh: trimesh mesh. Original mesh to sample points from.
npoints: int, number of points to sample on the mesh surface.
sample_factor: int, number of samples to pick per surface point.
std: float, std of samples to generate.
Returns:
surface_samples: [npoints, 6], where first 3 dims are xyz, last 3 dims are
normals (nx, ny, nz).
"""
surface_point_samples, fid = mesh.sample(int(npoints), return_index=True)
surface_point_normals = mesh.face_normals[fid]
offsets = np.random.randn(int(npoints), sample_factor, 1) * std
near_surface_samples = (surface_point_samples[:, np.newaxis, :] +
surface_point_normals[:, np.newaxis, :] * offsets)
near_surface_samples = np.concatenate([near_surface_samples, offsets],
axis=-1)
near_surface_samples = near_surface_samples.reshape([-1, 4])
surface_samples = np.concatenate([surface_point_samples,
surface_point_normals], axis=-1)
return surface_samples, near_surface_samples
示例7: get_in_out_from_ray
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import newaxis [as 別名]
def get_in_out_from_ray(points_from_ray, sample_factor=10, std=0.01):
"""Get sample points from points from ray.
Args:
points_from_ray: [npts, 6], where first 3 dims are xyz, last 3 are ray dir.
sample_factor: int, number of samples to pick per surface point.
std: float, std of samples to generate.
Returns:
near_surface_samples: [npts*sample_factor, 4], where last dimension is
distance to surface point.
"""
surface_point_samples = points_from_ray[:, :3]
surface_point_normals = points_from_ray[:, 3:]
# make sure normals are normalized to unit length
n = surface_point_normals
surface_point_normals = n / (np.linalg.norm(n, axis=1, keepdims=True)+1e-8)
npoints = points_from_ray.shape[0]
offsets = np.random.randn(npoints, sample_factor, 1) * std
near_surface_samples = (surface_point_samples[:, np.newaxis, :] +
surface_point_normals[:, np.newaxis, :] * offsets)
near_surface_samples = np.concatenate([near_surface_samples, offsets],
axis=-1)
near_surface_samples = near_surface_samples.reshape([-1, 4])
return near_surface_samples
示例8: regular_grid_interpolation
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import newaxis [as 別名]
def regular_grid_interpolation(grid,
pts,
min_grid_value=(0, 0, 0),
max_grid_value=(1, 1, 1)):
"""Regular grid interpolator, returns inpterpolation values.
Args:
grid: `[batch_size, *size, features]` tensor, input feature grid.
pts: `[batch_size, num_points, dim]` tensor, coordinates of points that
in each dim are within the range (min_grid_value[dim], max_grid_value[dim]).
min_grid_value: tuple, minimum value in each dimension corresponding to the
grid.
max_grid_value: tuple, maximum values in each dimension corresponding to the
grid.
Returns:
vals: `[batch_size, num_points, features]` tensor, values
"""
lats, weights, _ = get_interp_coefficients(grid, pts, min_grid_value,
max_grid_value)
vals = tf.reduce_sum(lats * weights[..., tf.newaxis], axis=-2)
return vals
示例9: _init_graph
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import newaxis [as 別名]
def _init_graph(self):
"""Initialize computation graph for tensorflow."""
with self.graph.as_default():
self.encoder = g2v.GridEncoder(
in_grid_res=self.in_grid_res,
num_filters=self.num_filters,
codelen=self.codelen,
name='g2v')
self.global_step = tf.get_variable(
'global_step', shape=[], dtype=tf.int64)
self.grid_ph = tf.placeholder(
tf.float32, shape=[self.gres, self.gres, self.gres])
self.start_ph = tf.placeholder(tf.int32, shape=[self.grid_batch, 3])
self.ingrid = self._batch_slice(self.grid_ph, self.start_ph,
self.in_grid_res, self.grid_batch)
self.ingrid = self.ingrid[..., tf.newaxis]
self.lats = self.encoder(self.ingrid, training=False) # [gb, codelen]
self.saver = tf.train.Saver()
self.sess = tf.Session()
self.saver.restore(self.sess, self.ckpt)
示例10: waves_to_stfts
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import newaxis [as 別名]
def waves_to_stfts(self, waves):
"""Convert from waves to complex stfts.
Args:
waves: Tensor of the waveform, shape [batch, time, 1].
Returns:
stfts: Complex64 tensor of stft, shape [batch, time, freq, 1].
"""
waves_padded = tf.pad(waves, [[0, 0], [self._pad_l, self._pad_r], [0, 0]])
stfts = tf.signal.stft(
waves_padded[:, :, 0],
frame_length=self._nfft,
frame_step=self._nhop,
fft_length=self._nfft,
pad_end=False)[:, :, :, tf.newaxis]
stfts = stfts[:, :, 1:] if self._discard_dc else stfts[:, :, :-1]
stft_shape = stfts.get_shape().as_list()[1:3]
if tuple(stft_shape) != tuple(self._spec_shape):
raise ValueError(
'Spectrogram returned the wrong shape {}, is not the same as the '
'constructor spec_shape {}.'.format(stft_shape, self._spec_shape))
return stfts
示例11: stfts_to_waves
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import newaxis [as 別名]
def stfts_to_waves(self, stfts):
"""Convert from complex stfts to waves.
Args:
stfts: Complex64 tensor of stft, shape [batch, time, freq, 1].
Returns:
waves: Tensor of the waveform, shape [batch, time, 1].
"""
dc = 1 if self._discard_dc else 0
nyq = 1 - dc
stfts = tf.pad(stfts, [[0, 0], [0, 0], [dc, nyq], [0, 0]])
waves_resyn = tf.signal.inverse_stft(
stfts=stfts[:, :, :, 0],
frame_length=self._nfft,
frame_step=self._nhop,
fft_length=self._nfft,
window_fn=tf.signal.inverse_stft_window_fn(
frame_step=self._nhop))[:, :, tf.newaxis]
# Python does not allow rslice of -0
if self._pad_r == 0:
return waves_resyn[:, self._pad_l:]
else:
return waves_resyn[:, self._pad_l:-self._pad_r]
示例12: stfts_to_specgrams
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import newaxis [as 別名]
def stfts_to_specgrams(self, stfts):
"""Converts stfts to specgrams.
Args:
stfts: Complex64 tensor of stft, shape [batch, time, freq, 1].
Returns:
specgrams: Tensor of log magnitudes and instantaneous frequencies,
shape [batch, time, freq, 2].
"""
stfts = stfts[:, :, :, 0]
logmag = self._safe_log(tf.abs(stfts))
phase_angle = tf.angle(stfts)
if self._ifreq:
p = spectral_ops.instantaneous_frequency(phase_angle)
else:
p = phase_angle / np.pi
return tf.concat(
[logmag[:, :, :, tf.newaxis], p[:, :, :, tf.newaxis]], axis=-1)
示例13: specgrams_to_stfts
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import newaxis [as 別名]
def specgrams_to_stfts(self, specgrams):
"""Converts specgrams to stfts.
Args:
specgrams: Tensor of log magnitudes and instantaneous frequencies,
shape [batch, time, freq, 2].
Returns:
stfts: Complex64 tensor of stft, shape [batch, time, freq, 1].
"""
logmag = specgrams[:, :, :, 0]
p = specgrams[:, :, :, 1]
mag = tf.exp(logmag)
if self._ifreq:
phase_angle = tf.cumsum(p * np.pi, axis=-2)
else:
phase_angle = p * np.pi
return spectral_ops.polar2rect(mag, phase_angle)[:, :, :, tf.newaxis]
示例14: top_k_logits
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import newaxis [as 別名]
def top_k_logits(logits, k):
if k == 0:
# no truncation
return logits
def _top_k():
values, _ = tf.nn.top_k(logits, k=k)
min_values = values[:, -1, tf.newaxis]
return tf.where(
logits < min_values,
tf.ones_like(logits, dtype=logits.dtype) * -1e10,
logits,
)
return tf.cond(
tf.equal(k, 0),
lambda: logits,
lambda: _top_k(),
)
示例15: compute_valid_mask
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import newaxis [as 別名]
def compute_valid_mask(num_valid_elements, num_elements):
"""Computes mask of valid entries within padded context feature.
Args:
num_valid_elements: A int32 Tensor of shape [batch_size].
num_elements: An int32 Tensor.
Returns:
A boolean Tensor of the shape [batch_size, num_elements]. True means
valid and False means invalid.
"""
batch_size = num_valid_elements.shape[0]
element_idxs = tf.range(num_elements, dtype=tf.int32)
batch_element_idxs = tf.tile(element_idxs[tf.newaxis, ...], [batch_size, 1])
num_valid_elements = num_valid_elements[..., tf.newaxis]
valid_mask = tf.less(batch_element_idxs, num_valid_elements)
return valid_mask