本文整理汇总了Python中tensorflow.scatter_nd方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.scatter_nd方法的具体用法?Python tensorflow.scatter_nd怎么用?Python tensorflow.scatter_nd使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.scatter_nd方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: restore
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_nd [as 别名]
def restore(self, x):
"""Add padding back to the given tensor.
Args:
x (tf.Tensor): of shape [dim_compressed,...]
Returns:
a tensor of shape [dim_origin,...] with dim_compressed >= dim_origin. The
dim is restored from the original reference tensor
"""
with tf.name_scope("pad_reduce/restore"):
x = tf.scatter_nd(
indices=self.nonpad_ids,
updates=x,
shape=tf.concat([self.dim_origin, tf.shape(x)[1:]], axis=0),
)
return x
示例2: restore
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_nd [as 别名]
def restore(self, x):
"""Add padding back to the given tensor.
Args:
x: A Tensor of shape [dim_compressed,...]
Returns:
A tensor of shape [dim_origin,...] with
dim_compressed >= dim_origin. The
dim is restored from the original reference tensor
"""
with tf.name_scope("pad_reduce/restore"):
x = tf.scatter_nd(
indices=self.nonpad_ids,
updates=x,
shape=tf.concat([self.dim_origin, tf.shape(x)[1:]], axis=0),
)
return x
示例3: decode
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_nd [as 别名]
def decode(self,
encoded_tensors,
decode_params,
num_summands=None,
shape=None):
"""See base class."""
del decode_params, num_summands # Unused.
indices = encoded_tensors[self.ENCODED_INDICES_KEY]
non_zero_x = encoded_tensors[self.ENCODED_VALUES_KEY]
indices = tf.expand_dims(indices, 1)
shape = tf.cast(shape, indices.dtype)
decoded_x = tf.scatter_nd(indices=indices, updates=non_zero_x, shape=shape)
return decoded_x
示例4: batch_skew
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_nd [as 别名]
def batch_skew(vec, batch_size=None):
"""
vec is N x 3, batch_size is int
returns N x 3 x 3. Skew_sym version of each matrix.
"""
with tf.variable_scope("batch_skew", [vec]):
if batch_size is None:
batch_size = vec.shape.as_list()[0]
col_inds = tf.constant([1, 2, 3, 5, 6, 7])
indices = tf.reshape(
tf.reshape(tf.range(0, batch_size) * 9, [-1, 1]) + col_inds,
[-1, 1])
updates = tf.reshape(
tf.stack(
[
-vec[:, 2], vec[:, 1], vec[:, 2], -vec[:, 0], -vec[:, 1],
vec[:, 0]
],
axis=1), [-1])
out_shape = [batch_size * 9]
res = tf.scatter_nd(indices, updates, out_shape)
res = tf.reshape(res, [batch_size, 3, 3])
return res
示例5: LandmarkImageLayer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_nd [as 别名]
def LandmarkImageLayer(Landmarks):
def draw_landmarks(L):
def draw_landmarks_helper(Point):
intLandmark = tf.to_int32(Point)
locations = Offsets + intLandmark
dxdy = Point - tf.to_float(intLandmark)
offsetsSubPix = tf.to_float(Offsets) - dxdy
vals = 1 / (1 + tf.norm(offsetsSubPix, axis=2))
img = tf.scatter_nd(locations, vals, shape=(IMGSIZE, IMGSIZE))
return img
Landmark = tf.reverse(tf.reshape(L, [-1,2]), [-1])
# Landmark = tf.reshape(L, (-1, 2))
Landmark = tf.clip_by_value(Landmark, HalfSize, IMGSIZE - 1 - HalfSize)
# Ret = 1 / (tf.norm(tf.map_fn(DoIn,Landmarks),axis = 3) + 1)
Ret = tf.map_fn(draw_landmarks_helper, Landmark)
Ret = tf.reshape(tf.reduce_max(Ret, axis=0), [IMGSIZE, IMGSIZE, 1])
return Ret
return tf.map_fn(draw_landmarks, Landmarks)
示例6: reorder
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_nd [as 别名]
def reorder(updates, sd_indices, argsort_axis=1):
"""
updates: [N, M]
"""
def prepare_fd(fd_indices, sd_dims):
fd_indices = tf.expand_dims(fd_indices, 1)
fd_indices = tf.tile(fd_indices, [1, sd_dims])
return fd_indices
# define the updates
sd_dims = tf.shape(updates)[1]
fd_indices_range = tf.range(0, limit=tf.shape(updates)[0])
# define the indices
indices1 = tf.stack((prepare_fd(fd_indices_range, sd_dims), sd_indices), axis=2)
shape = tf.shape(updates)
scatter1 = tf.scatter_nd(indices1, updates, shape)
return scatter1
示例7: max_unpool_with_argmax
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_nd [as 别名]
def max_unpool_with_argmax(bottom, mask, output_shape=None):
with tf.name_scope('max_unpool_with_argmax'):
ksize = [1, 2, 2, 1]
input_shape = bottom.get_shape().as_list()
# calculation new shape
if output_shape is None:
output_shape = (input_shape[0],
input_shape[1] * ksize[1],
input_shape[2] * ksize[2],
input_shape[3])
# calculation indices for batch, height, width and feature maps
one_like_mask = tf.ones_like(mask)
batch_range = tf.reshape(tf.range(output_shape[0],
dtype=tf.int64),
shape=[input_shape[0], 1, 1, 1])
b = one_like_mask * batch_range
y = mask // (output_shape[2] * output_shape[3])
x = mask % (output_shape[2] * output_shape[3]) // output_shape[3]
feature_range = tf.range(output_shape[3], dtype=tf.int64)
f = one_like_mask * feature_range
# transpose indices & reshape update values to one dimension
updates_size = tf.size(bottom)
indices = tf.transpose(tf.reshape(tf.stack([b, y, x, f]), [4, updates_size]))
values = tf.reshape(bottom, [updates_size])
return tf.scatter_nd(indices, values, output_shape)
示例8: fock_state
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_nd [as 别名]
def fock_state(n, cutoff, pure=True, batched=False):
"""creates a single mode input Fock state"""
if not isinstance(n, (np.ndarray, int)):
raise ValueError("'n' is expected to be either an int or a numpy array")
if batched:
batch_size = n.shape[0]
idxs = [(b, f) for (b, f) in zip(range(batch_size), n)]
values = [1.0] * batch_size
shape = [batch_size, cutoff]
else:
idxs = [(n,)]
values = [1.0]
shape = [cutoff]
fock_sparse = tf.scatter_nd(idxs, values, shape)
fock = tf.cast(fock_sparse, def_type)
if not pure:
fock = mixed(fock, batched)
return fock
示例9: unpool
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_nd [as 别名]
def unpool(pool, ind, shape, ksize=[1, 2, 2, 1], scope=None):
with tf.name_scope(scope):
input_shape = tf.shape(pool)
output_shape = [input_shape[0], input_shape[1] * ksize[1], input_shape[2] * ksize[2], input_shape[3]]
flat_input_size = tf.cumprod(input_shape)[-1]
flat_output_shape = tf.stack([output_shape[0], output_shape[1] * output_shape[2] * output_shape[3]])
pool_ = tf.reshape(pool, tf.stack([flat_input_size]))
batch_range = tf.reshape(tf.range(tf.cast(output_shape[0], tf.int64), dtype=ind.dtype),
shape=tf.stack([input_shape[0], 1, 1, 1]))
b = tf.ones_like(ind) * batch_range
b = tf.reshape(b, tf.stack([flat_input_size, 1]))
ind_ = tf.reshape(ind, tf.stack([flat_input_size, 1]))
ind_ = tf.concat([b, ind_], 1)
ret = tf.scatter_nd(ind_, pool_, shape=tf.cast(flat_output_shape, tf.int64))
ret = tf.reshape(ret, tf.stack(output_shape))
ret = tf.reshape(ret, shape=shape)
return ret
示例10: next_inputs
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_nd [as 别名]
def next_inputs(self, time, outputs, state, sample_ids):
(finished, base_next_inputs, state) = super().next_inputs(
time=time, outputs=outputs, state=state, sample_ids=sample_ids
)
def maybe_sample():
"""Perform scheduled sampling."""
where_sampling = tf.cast(tf.where(sample_ids > -1), tf.int32)
where_not_sampling = tf.cast(tf.where(sample_ids <= -1), tf.int32)
sample_ids_sampling = tf.gather_nd(sample_ids, where_sampling)
inputs_not_sampling = tf.gather_nd(base_next_inputs, where_not_sampling)
sampled_next_inputs = self.embedding_fn(sample_ids_sampling)
base_shape = tf.shape(base_next_inputs)
return tf.scatter_nd(
indices=where_sampling, updates=sampled_next_inputs, shape=base_shape
) + tf.scatter_nd(
indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape,
)
all_finished = tf.reduce_all(finished)
next_inputs = tf.cond(all_finished, lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
示例11: batch_skew
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_nd [as 别名]
def batch_skew(vec, batch_size=None):
"""
vec is N x 3, batch_size is int
returns N x 3 x 3. Skew_sym version of each matrix.
"""
with tf.name_scope("batch_skew", values=[vec]):
if batch_size is None:
batch_size = vec.shape.as_list()[0]
col_inds = tf.constant([1, 2, 3, 5, 6, 7])
indices = tf.reshape(
tf.reshape(tf.range(0, batch_size) * 9, [-1, 1]) + col_inds,
[-1, 1])
updates = tf.reshape(
tf.stack(
[
-vec[:, 2], vec[:, 1], vec[:, 2], -vec[:, 0], -vec[:, 1],
vec[:, 0]
],
axis=1), [-1])
out_shape = [batch_size * 9]
res = tf.scatter_nd(indices, updates, out_shape)
res = tf.reshape(res, [batch_size, 3, 3])
return res
示例12: scatter_add_tensor
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_nd [as 别名]
def scatter_add_tensor(tensor, indices, out_shape, name=None):
"""
Code taken from https://github.com/tensorflow/tensorflow/issues/2358 and adapted.
Adds up elements in tensor that have the same value in indices.
Must have shape(tensor)[0] == shape(indices)[0].
:param tensor: A Tensor. Must be one of the following types: float32, float64, int64, int32, uint8, uint16,
int16, int8, complex64, complex128, qint8, quint8, qint32, half.
:param indices: 1-D tensor of indices.
:param out_shape: The shape of the output tensor. Must have out_shape[1] == shape(tensor)[1].
:param name: A name for the operation (optional).
:return: Tensor with same datatype as tensor and shape out_shape.
"""
with tf.name_scope(name, 'scatter_add_tensor') as scope:
indices = tf.expand_dims(indices, -1)
# the scatter_nd function adds up values for duplicate indices what is exactly what we want
return tf.scatter_nd(indices, tensor, out_shape, name=scope)
示例13: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_nd [as 别名]
def __init__(self, features, graph_adj, targets, nodes_to_consider, labelled_nodes, prop_type, return_prob):
if prop_type not in ['vanilla', 'smoothed']:
raise ValueError('Unsupported propagation type.')
self.prop_type = prop_type
# if running on Planetoid data these typecasts are necessary
if isinstance(labelled_nodes, range):
labelled_nodes = np.array(list(labelled_nodes), dtype=np.int64)
if targets.dtype != np.float32:
targets = targets.astype(np.float32)
super().__init__(features, graph_adj, tf.gather(targets, nodes_to_consider))
self.labelled_nodes = tf.constant(labelled_nodes, dtype=tf.int64)
self.initial_predicted_labels = tf.scatter_nd(tf.expand_dims(self.labelled_nodes, -1),
targets[labelled_nodes], shape=targets.shape)
self.predicted_labels = tf.Variable(self.initial_predicted_labels, dtype=tf.float32, name="predicted_labels")
self.nodes_to_consider = nodes_to_consider
self.num_nodes = int(self.graph_adj.get_shape()[0])
self.num_classes = int(self.targets.get_shape()[1])
self.return_prob = return_prob
self._build_model_graphs()
示例14: testScatterOutOfRangeCpu
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_nd [as 别名]
def testScatterOutOfRangeCpu(self):
# TODO(simister): Re-enable once binary size increase due to
# scatter_nd ops is under control.
# tf.scatter_nd_mul, tf.scatter_nd_div,
for op in (tf.scatter_nd_add, tf.scatter_nd_sub, tf.scatter_nd_update):
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
with self.test_session(use_gpu=False):
ref = tf.Variable(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([[2], [0], [5]])
op(ref, indices, updates).eval()
# Test some out of range errors.
indices = np.array([[-1], [0], [5]])
with self.assertRaisesOpError(
r"Invalid indices: \[0,0\] = \[-1\] is not in \[0, 6\)"):
op(ref, indices, updates).eval()
indices = np.array([[2], [0], [6]])
with self.assertRaisesOpError(
r"Invalid indices: \[2,0\] = \[6\] is not in \[0, 6\)"):
op(ref, indices, updates).eval()
示例15: _disabledTestScatterOutOfRangeGpu
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_nd [as 别名]
def _disabledTestScatterOutOfRangeGpu(self):
if not tf.test.IsBuiltWithCuda():
return
# TODO(simister): Re-enable once binary size increase due to
# scatter_nd ops is under control.
# tf.scatter_nd_mul, tf.scatter_nd_div,
for op in (tf.scatter_nd_add, tf.scatter_nd_sub, tf.scatter_nd_update):
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
# With GPU, the code ignores indices that are out of range.
# We don't test the implementation; just test there's no failures.
with self.test_session(force_gpu=True):
ref = tf.Variable(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
op(ref, indices, updates).eval()
# Indicies out of range should not fail.
indices = np.array([-1, 0, 5])
op(ref, indices, updates).eval()
indices = np.array([2, 0, 6])
op(ref, indices, updates).eval()