本文整理汇总了Python中tensorflow.python.ops.array_ops.gather方法的典型用法代码示例。如果您正苦于以下问题:Python array_ops.gather方法的具体用法?Python array_ops.gather怎么用?Python array_ops.gather使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.array_ops
的用法示例。
在下文中一共展示了array_ops.gather方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _apply_sparse
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import gather [as 别名]
def _apply_sparse(self, grad, var):
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
alpha_t = math_ops.cast(self._alpha_t, var.dtype.base_dtype)
beta_t = math_ops.cast(self._beta_t, var.dtype.base_dtype)
eps = 1e-7 # cap for moving average
m = self.get_slot(var, "m")
m_slice = tf.gather(m, grad.indices)
m_t = state_ops.scatter_update(m, grad.indices,
tf.maximum(beta_t * m_slice + eps, tf.abs(grad.values)))
m_t_slice = tf.gather(m_t, grad.indices)
var_update = state_ops.scatter_sub(var, grad.indices, lr_t * grad.values * tf.exp(
tf.log(alpha_t) * tf.sign(grad.values) * tf.sign(m_t_slice))) # Update 'ref' by subtracting 'value
# Create an op that groups multiple operations.
# When this op finishes, all ops in input have finished
return control_flow_ops.group(*[var_update, m_t])
示例2: _MultiDeviceAddN
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import gather [as 别名]
def _MultiDeviceAddN(tensor_list):
"""Adds tensors from potentially multiple devices."""
# Basic function structure comes from control_flow_ops.group().
# Sort tensors according to their devices.
tensors_on_device = collections.defaultdict(lambda: [])
for tensor in tensor_list:
tensors_on_device[tensor.device].append(tensor)
# For each device, add the tensors on that device first.
# Then gather the partial sums from multiple devices.
# TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion.
# E.g., aggregate per GPU, then per task, and so on.
summands = []
def DeviceKey(dev):
return "" if dev is None else dev
for dev in sorted(six.iterkeys(tensors_on_device), key=DeviceKey):
tensors = tensors_on_device[dev]
with ops.colocate_with(tensors[0].op, ignore_existing=True):
summands.append(math_ops.add_n(tensors))
return math_ops.add_n(summands)
示例3: _DynamicStitchGrads
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import gather [as 别名]
def _DynamicStitchGrads(op, grad):
"""Gradients for DynamicStitch."""
num_values = len(op.inputs) // 2
indices_grad = [None] * num_values
def AsInt32(x):
return (x if op.inputs[0].dtype == dtypes.int32 else
math_ops.cast(x, dtypes.int32))
inputs = [AsInt32(op.inputs[i]) for i in xrange(num_values)]
if isinstance(grad, ops.IndexedSlices):
output_shape = array_ops.shape(op.outputs[0])
output_rows = output_shape[0]
grad = math_ops.unsorted_segment_sum(grad.values, grad.indices, output_rows)
values_grad = [array_ops.gather(grad, inp) for inp in inputs]
return indices_grad + values_grad
示例4: _SegmentMinOrMaxGrad
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import gather [as 别名]
def _SegmentMinOrMaxGrad(op, grad, is_sorted):
"""Gradient for SegmentMin and (unsorted) SegmentMax. They share similar code."""
zeros = array_ops.zeros(array_ops.shape(op.inputs[0]),
dtype=op.inputs[0].dtype)
# Get the number of selected (minimum or maximum) elements in each segment.
gathered_outputs = array_ops.gather(op.outputs[0], op.inputs[1])
is_selected = math_ops.equal(op.inputs[0], gathered_outputs)
if is_sorted:
num_selected = math_ops.segment_sum(math_ops.cast(is_selected, grad.dtype),
op.inputs[1])
else:
num_selected = math_ops.unsorted_segment_sum(math_ops.cast(is_selected, grad.dtype),
op.inputs[1], op.inputs[2])
# Compute the gradient for each segment. The gradient for the ith segment is
# divided evenly among the selected elements in that segment.
weighted_grads = math_ops.div(grad, num_selected)
gathered_grads = array_ops.gather(weighted_grads, op.inputs[1])
if is_sorted:
return array_ops.where(is_selected, gathered_grads, zeros), None
else:
return array_ops.where(is_selected, gathered_grads, zeros), None, None
示例5: _gini
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import gather [as 别名]
def _gini(self, class_counts):
"""Calculate the Gini impurity.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = 1 - sum_i ( c(i) / c )^2
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return 1.0 - sum_squares / (sums * sums)
示例6: _weighted_gini
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import gather [as 别名]
def _weighted_gini(self, class_counts):
"""Our split score is the Gini impurity times the number of examples.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = c * (1 - sum_i ( c(i) / c )^2 )
= c - sum_i c(i)^2 / c
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return sums - sum_squares / sums
示例7: average_impurity
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import gather [as 别名]
def average_impurity(self):
"""Constructs a TF graph for evaluating the average leaf impurity of a tree.
If in regression mode, this is the leaf variance. If in classification mode,
this is the gini impurity.
Returns:
The last op in the graph.
"""
children = array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
squeeze_dims=[1]))
counts = array_ops.gather(self.variables.node_sums, leaves)
gini = self._weighted_gini(counts)
# Guard against step 1, when there often are no leaves yet.
def impurity():
return gini
# Since average impurity can be used for loss, when there's no data just
# return a big number so that loss always decreases.
def big():
return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000.
return control_flow_ops.cond(math_ops.greater(
array_ops.shape(leaves)[0], 0), impurity, big)
示例8: _linear_predictions
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import gather [as 别名]
def _linear_predictions(self, examples):
"""Returns predictions of the form w*x."""
with name_scope('sdca/prediction'):
sparse_variables = self._convert_n_to_tensor(self._variables[
'sparse_features_weights'])
result = 0.0
for sfc, sv in zip(examples['sparse_features'], sparse_variables):
# TODO(sibyl-Aix6ihai): following does not take care of missing features.
result += math_ops.segment_sum(
math_ops.multiply(
array_ops.gather(sv, sfc.feature_indices), sfc.feature_values),
sfc.example_indices)
dense_features = self._convert_n_to_tensor(examples['dense_features'])
dense_variables = self._convert_n_to_tensor(self._variables[
'dense_features_weights'])
for i in range(len(dense_variables)):
result += math_ops.matmul(dense_features[i],
array_ops.expand_dims(dense_variables[i], -1))
# Reshaping to allow shape inference at graph construction time.
return array_ops.reshape(result, [-1])
示例9: _check_shape
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import gather [as 别名]
def _check_shape(self, shape):
"""Check that the init arg `shape` defines a valid operator."""
shape = ops.convert_to_tensor(shape, name="shape")
if not self._verify_pd:
return shape
# Further checks are equivalent to verification that this is positive
# definite. Why? Because the further checks simply check that this is a
# square matrix, and combining the fact that this is square (and thus maps
# a vector space R^k onto itself), with the behavior of .matmul(), this must
# be the identity operator.
rank = array_ops.size(shape)
assert_matrix = check_ops.assert_less_equal(2, rank)
with ops.control_dependencies([assert_matrix]):
last_dim = array_ops.gather(shape, rank - 1)
second_to_last_dim = array_ops.gather(shape, rank - 2)
assert_square = check_ops.assert_equal(last_dim, second_to_last_dim)
return control_flow_ops.with_dependencies([assert_matrix, assert_square],
shape)
示例10: vector_space_dimension
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import gather [as 别名]
def vector_space_dimension(self, name="vector_space_dimension"):
"""Dimension of vector space on which this acts. The `k` in `R^k`.
If this operator represents the batch matrix `A` with
`A.shape = [N1,...,Nn, k, k]`, the `vector_space_dimension` is `k`.
Args:
name: A name scope to use for ops added by this method.
Returns:
`int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with ops.name_scope(self.name):
with ops.name_scope(name, values=self.inputs):
return array_ops.gather(self.shape(), self.rank() - 1)
示例11: _check_chol
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import gather [as 别名]
def _check_chol(self, chol):
"""Verify that `chol` is proper."""
chol = ops.convert_to_tensor(chol, name="chol")
if not self.verify_pd:
return chol
shape = array_ops.shape(chol)
rank = array_ops.rank(chol)
is_matrix = check_ops.assert_rank_at_least(chol, 2)
is_square = check_ops.assert_equal(
array_ops.gather(shape, rank - 2), array_ops.gather(shape, rank - 1))
deps = [is_matrix, is_square]
diag = array_ops.matrix_diag_part(chol)
deps.append(check_ops.assert_positive(diag))
return control_flow_ops.with_dependencies(deps, chol)
示例12: _get_identity_operator
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import gather [as 别名]
def _get_identity_operator(self, v):
"""Get an `OperatorPDIdentity` to play the role of `D` in `VDV^T`."""
with ops.name_scope("get_identity_operator", values=[v]):
if v.get_shape().is_fully_defined():
v_shape = v.get_shape().as_list()
v_batch_shape = v_shape[:-2]
r = v_shape[-1]
id_shape = v_batch_shape + [r, r]
else:
v_shape = array_ops.shape(v)
v_rank = array_ops.rank(v)
v_batch_shape = array_ops.strided_slice(v_shape, [0], [v_rank - 2])
r = array_ops.gather(v_shape, v_rank - 1) # Last dim of v
id_shape = array_ops.concat((v_batch_shape, [r, r]), 0)
return operator_pd_identity.OperatorPDIdentity(
id_shape, v.dtype, verify_pd=self._verify_pd)
示例13: _clip_sparse
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import gather [as 别名]
def _clip_sparse(self, grad, var):
assert isinstance(grad, ops.IndexedSlices)
clip_dims = self._vars_to_clip_dims[var]
if 0 in clip_dims:
logging.warning("Clipping norm across dims %s for %s is inefficient "
"when including sparse dimension 0.", clip_dims,
var.op.name)
return self._clip_dense(var)
with ops.colocate_with(var):
var_subset = array_ops.gather(var, grad.indices)
with self._maybe_colocate_with(var):
normalized_var_subset = clip_ops.clip_by_norm(
var_subset, self._max_norm, clip_dims)
delta = ops.IndexedSlices(
var_subset - normalized_var_subset, grad.indices, grad.dense_shape)
with ops.colocate_with(var):
return var.scatter_sub(delta, use_locking=self._use_locking)
示例14: _SegmentMinOrMaxGrad
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import gather [as 别名]
def _SegmentMinOrMaxGrad(op, grad):
"""Gradient for SegmentMin and SegmentMax. Both share the same code."""
zeros = array_ops.zeros(
array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype)
# Get the number of selected (minimum or maximum) elements in each segment.
gathered_outputs = array_ops.gather(op.outputs[0], op.inputs[1])
is_selected = math_ops.equal(op.inputs[0], gathered_outputs)
num_selected = math_ops.segment_sum(
math_ops.cast(is_selected, grad.dtype), op.inputs[1])
# Compute the gradient for each segment. The gradient for the ith segment is
# divided evenly among the selected elements in that segment.
weighted_grads = math_ops.div(grad, num_selected)
gathered_grads = array_ops.gather(weighted_grads, op.inputs[1])
return array_ops.where(is_selected, gathered_grads, zeros), None
示例15: flatten_nested_indexed_slices
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import gather [as 别名]
def flatten_nested_indexed_slices(grad):
assert isinstance(grad, ops.IndexedSlices)
if isinstance(grad.values, ops.Tensor):
return grad
else:
assert isinstance(grad.values, ops.IndexedSlices)
g = flatten_nested_indexed_slices(grad.values)
return ops.IndexedSlices(g.values, array_ops.gather(grad.indices,
g.indices),
g.dense_shape)