本文整理匯總了Python中tensorflow.sparse_reduce_sum方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorflow.sparse_reduce_sum方法的具體用法?Python tensorflow.sparse_reduce_sum怎麽用?Python tensorflow.sparse_reduce_sum使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow
的用法示例。
在下文中一共展示了tensorflow.sparse_reduce_sum方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: count_nonzero_wrapper
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import sparse_reduce_sum [as 別名]
def count_nonzero_wrapper(X, optype):
"""Wrapper for handling sparse and dense versions of `tf.count_nonzero`.
Parameters
----------
X : tf.Tensor (N, K)
optype : str, {'dense', 'sparse'}
Returns
-------
tf.Tensor (1,K)
"""
with tf.name_scope('count_nonzero_wrapper') as scope:
if optype == 'dense':
return tf.count_nonzero(X, axis=0, keep_dims=True)
elif optype == 'sparse':
indicator_X = tf.SparseTensor(X.indices, tf.ones_like(X.values), X.dense_shape)
return tf.sparse_reduce_sum(indicator_X, axis=0, keep_dims=True)
else:
raise NameError('Unknown input type in count_nonzero_wrapper')
示例2: _loss_op
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import sparse_reduce_sum [as 別名]
def _loss_op(self):
'''Operator to compute the loss for the model.
This method should not be directly called the variables outside the class.
Not we do not need to initialise the loss as zero for each batch as process the entire data in just one batch.'''
complete_loss = tf.nn.weighted_cross_entropy_with_logits(
targets = self.labels,
logits = self.outputs,
pos_weight=self.positive_sample_weight
)
def _compute_masked_loss(complete_loss):
'''Method to compute the masked loss'''
normalized_mask = self.mask / tf.sparse_reduce_sum(self.mask)
complete_loss = tf.multiply(complete_loss, tf.sparse_tensor_to_dense(normalized_mask))
return tf.reduce_sum(complete_loss)
# the sparse_tensor_to_dense would be the bottleneck step and should be replaced by something more efficient
complete_loss = tf.cond(tf.equal(self.mode, TRAIN),
true_fn=lambda : tf.reduce_mean(complete_loss),
false_fn=lambda : _compute_masked_loss(complete_loss))
return complete_loss * self.normalisation_constant
示例3: _accuracy_op
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import sparse_reduce_sum [as 別名]
def _accuracy_op(self):
'''Operator to compute the accuracy for the model.
This method should not be directly called the variables outside the class.'''
correct_predictions = tf.cast(tf.equal(self.predictions,
self.labels), dtype=tf.float32)
def _compute_masked_accuracy(correct_predictions):
'''Method to compute the masked loss'''
normalized_mask = self.mask / tf.sparse_reduce_sum(self.mask)
correct_predictions = tf.multiply(correct_predictions, tf.sparse_tensor_to_dense(normalized_mask))
return tf.reduce_sum(correct_predictions, name="accuracy_op")
accuracy = tf.cond(tf.equal(self.mode, TRAIN),
true_fn=lambda: tf.reduce_mean(correct_predictions, name="accuracy_op"),
false_fn=lambda: _compute_masked_accuracy(correct_predictions))
return accuracy
示例4: weighted_margin_rank_batch
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import sparse_reduce_sum [as 別名]
def weighted_margin_rank_batch(self, tf_prediction_serial, tf_interactions, tf_sample_predictions, tf_n_items,
tf_n_sampled_items):
positive_interaction_mask = tf.greater(tf_interactions.values, 0.0)
positive_interaction_indices = tf.boolean_mask(tf_interactions.indices,
positive_interaction_mask)
positive_interaction_values = tf.boolean_mask(tf_interactions.values,
positive_interaction_mask)
positive_interactions = tf.SparseTensor(indices=positive_interaction_indices,
values=positive_interaction_values,
dense_shape=tf_interactions.dense_shape)
listening_sum_per_item = tf.sparse_reduce_sum(positive_interactions, axis=0)
gathered_sums = tf.gather(params=listening_sum_per_item,
indices=tf.transpose(positive_interaction_indices)[1])
# [ n_positive_interactions ]
positive_predictions = tf.boolean_mask(tf_prediction_serial,
positive_interaction_mask)
n_items = tf.cast(tf_n_items, dtype=tf.float32)
n_sampled_items = tf.cast(tf_n_sampled_items, dtype=tf.float32)
# [ n_positive_interactions, n_sampled_items ]
mapped_predictions_sample_per_interaction = tf.gather(params=tf_sample_predictions,
indices=tf.transpose(positive_interaction_indices)[0])
# [ n_positive_interactions, n_sampled_items ]
summation_term = tf.maximum(1.0
- tf.expand_dims(positive_predictions, axis=1)
+ mapped_predictions_sample_per_interaction,
0.0)
# [ n_positive_interactions ]
sampled_margin_rank = ((n_items / n_sampled_items)
* tf.reduce_sum(summation_term, axis=1)
* positive_interaction_values / gathered_sums)
loss = tf.log(sampled_margin_rank + 1.0)
return loss
示例5: text_to_labels
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import sparse_reduce_sum [as 別名]
def text_to_labels(self,
text,
return_dense=True,
pad_value=-1,
return_lengths=False):
"""Convert text strings to label sequences.
Args:
text: ascii encoded string tensor with shape [batch_size]
dense: whether to return dense labels
pad_value: Value used to pad labels to the same length.
return_lengths: if True, also return text lengths
Returns:
labels: sparse or dense tensor of labels
"""
batch_size = tf.shape(text)[0]
chars = tf.string_split(text, delimiter='')
labels_sp = tf.SparseTensor(
chars.indices,
self._char_to_label_table.lookup(chars.values),
chars.dense_shape
)
if return_dense:
labels = tf.sparse_tensor_to_dense(labels_sp, default_value=pad_value)
else:
labels = labels_sp
if return_lengths:
text_lengths = tf.sparse_reduce_sum(
tf.SparseTensor(
chars.indices,
tf.fill([tf.shape(chars.indices)[0]], 1),
chars.dense_shape
),
axis=1
)
text_lengths.set_shape([None])
return labels, text_lengths
else:
return labels
示例6: gather_forced_att_logits
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import sparse_reduce_sum [as 別名]
def gather_forced_att_logits(encoder_input_symbols, encoder_decoder_vocab_map,
att_logit, batch_size, attn_length,
target_vocab_size):
"""Gathers attention weights as logits for forced attention."""
flat_input_symbols = tf.reshape(encoder_input_symbols, [-1])
flat_label_symbols = tf.gather(encoder_decoder_vocab_map,
flat_input_symbols)
flat_att_logits = tf.reshape(att_logit, [-1])
flat_range = tf.to_int64(tf.range(tf.shape(flat_label_symbols)[0]))
batch_inds = tf.floordiv(flat_range, attn_length)
position_inds = tf.mod(flat_range, attn_length)
attn_vocab_inds = tf.transpose(tf.pack(
[batch_inds, position_inds, tf.to_int64(flat_label_symbols)]))
# Exclude indexes of entries with flat_label_symbols[i] = -1.
included_flat_indexes = tf.reshape(tf.where(tf.not_equal(
flat_label_symbols, -1)), [-1])
included_attn_vocab_inds = tf.gather(attn_vocab_inds,
included_flat_indexes)
included_flat_att_logits = tf.gather(flat_att_logits,
included_flat_indexes)
sparse_shape = tf.to_int64(tf.pack(
[batch_size, attn_length, target_vocab_size]))
sparse_label_logits = tf.SparseTensor(included_attn_vocab_inds,
included_flat_att_logits, sparse_shape)
forced_att_logit_sum = tf.sparse_reduce_sum(sparse_label_logits, [1])
forced_att_logit = tf.reshape(forced_att_logit_sum,
[-1, target_vocab_size])
return forced_att_logit
示例7: sparse_placeholder
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import sparse_reduce_sum [as 別名]
def sparse_placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a sparse tensor that will be always fed.
**Important**: This sparse tensor will produce an error if evaluated.
Its value must be fed using the `feed_dict` optional argument to
`Session.run()`, `Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.sparse_placeholder(tf.float32)
y = tf.sparse_reduce_sum(x)
with tf.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)
values = np.array([1.0, 2.0], dtype=np.float32)
shape = np.array([7, 9, 2], dtype=np.int64)
print(sess.run(y, feed_dict={
x: tf.SparseTensorValue(indices, values, shape)})) # Will succeed.
print(sess.run(y, feed_dict={
x: (indices, values, shape)})) # Will succeed.
sp = tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
sp_value = sp.eval(session=sess)
print(sess.run(y, feed_dict={x: sp_value})) # Will succeed.
```
Args:
dtype: The type of `values` elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a sparse tensor of any shape.
name: A name for prefixing the operations (optional).
Returns:
A `SparseTensor` that may be used as a handle for feeding a value, but not
evaluated directly.
"""
shape_name = (name + "/shape") if name is not None else None
shape = _normalize_sparse_shape(shape, shape_name)
if shape is None:
shape = placeholder(dtypes.int64, shape=[None], name=shape_name)
return sparse_tensor.SparseTensor(
values=placeholder(
dtype, shape=[None],
name=(name + "/values") if name is not None else None),
indices=placeholder(
dtypes.int64, shape=[None, None],
name=(name + "/indices") if name is not None else None),
dense_shape=shape)
# pylint: enable=redefined-outer-name
示例8: sparse_placeholder
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import sparse_reduce_sum [as 別名]
def sparse_placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a sparse tensor that will be always fed.
**Important**: This sparse tensor will produce an error if evaluated.
Its value must be fed using the `feed_dict` optional argument to
`Session.run()`, `Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.sparse_placeholder(tf.float32)
y = tf.sparse_reduce_sum(x)
with tf.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)
values = np.array([1.0, 2.0], dtype=np.float32)
shape = np.array([7, 9, 2], dtype=np.int64)
print(sess.run(y, feed_dict={
x: tf.SparseTensorValue(indices, values, shape)})) # Will succeed.
print(sess.run(y, feed_dict={
x: (indices, values, shape)})) # Will succeed.
sp = tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
sp_value = sp.eval(session)
print(sess.run(y, feed_dict={x: sp_value})) # Will succeed.
```
Args:
dtype: The type of `values` elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a sparse tensor of any shape.
name: A name for prefixing the operations (optional).
Returns:
A `SparseTensor` that may be used as a handle for feeding a value, but not
evaluated directly.
"""
shape_name = (name + "/shape") if name is not None else None
shape = _normalize_sparse_shape(shape, shape_name)
if shape is None:
shape = placeholder(dtypes.int64, shape=[None], name=shape_name)
return sparse_tensor.SparseTensor(
values=placeholder(
dtype, shape=[None],
name=(name + "/values") if name is not None else None),
indices=placeholder(
dtypes.int64, shape=[None, None],
name=(name + "/indices") if name is not None else None),
dense_shape=shape)
# pylint: enable=redefined-outer-name
示例9: sparse_placeholder
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import sparse_reduce_sum [as 別名]
def sparse_placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a sparse tensor that will be always fed.
**Important**: This sparse tensor will produce an error if evaluated.
Its value must be fed using the `feed_dict` optional argument to
`Session.run()`, `Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.sparse_placeholder(tf.float32)
y = tf.sparse_reduce_sum(x)
with tf.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)
values = np.array([1.0, 2.0], dtype=np.float32)
shape = np.array([7, 9, 2], dtype=np.int64)
print(sess.run(y, feed_dict={
x: tf.SparseTensorValue(indices, values, shape)})) # Will succeed.
print(sess.run(y, feed_dict={
x: (indices, values, shape)})) # Will succeed.
sp = tf.SparseTensor(indices=indices, values=values, shape=shape)
sp_value = sp.eval(session)
print(sess.run(y, feed_dict={x: sp_value})) # Will succeed.
```
Args:
dtype: The type of `values` elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a sparse tensor of any shape.
name: A name for prefixing the operations (optional).
Returns:
A `SparseTensor` that may be used as a handle for feeding a value, but not
evaluated directly.
"""
shape_name = (name + "/shape") if name is not None else None
shape = _normalize_sparse_shape(shape, shape_name)
if shape is None:
shape = placeholder(dtypes.int64, shape=[None], name=shape_name)
return sparse_tensor.SparseTensor(
values=placeholder(
dtype, shape=[None],
name=(name + "/values") if name is not None else None),
indices=placeholder(
dtypes.int64, shape=[None, None],
name=(name + "/indices") if name is not None else None),
shape=shape
)
# pylint: enable=redefined-outer-name
示例10: dice_coef_2
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import sparse_reduce_sum [as 別名]
def dice_coef_2(ground_truth, prediction, weight_map=None):
"""
Function to calculate the dice loss with the definition given in
Milletari, F., Navab, N., & Ahmadi, S. A. (2016)
V-net: Fully convolutional neural
networks for volumetric medical image segmentation. 3DV 2016
using a square in the denominator
:param prediction: the logits
:param ground_truth: the segmentation ground_truth
:param weight_map:
:return: the loss
"""
ground_truth = tf.to_int64(ground_truth)
prediction = tf.cast(prediction, tf.float32)
ids = tf.range(tf.to_int64(tf.shape(ground_truth)[0]), dtype=tf.int64)
ids = tf.stack([ids, ground_truth], axis=1)
one_hot = tf.SparseTensor(
indices=ids,
values=tf.ones_like(ground_truth, dtype=tf.float32),
dense_shape=tf.to_int64(tf.shape(prediction)))
if weight_map is not None:
n_classes = prediction.shape[1].value
weight_map_nclasses = tf.reshape(
tf.tile(weight_map, [n_classes]), prediction.get_shape())
dice_numerator = 2.0 * tf.sparse_reduce_sum(
weight_map_nclasses * one_hot * prediction, reduction_axes=[0])
dice_denominator = \
tf.reduce_sum(weight_map_nclasses * tf.square(prediction),
reduction_indices=[0]) + \
tf.sparse_reduce_sum(one_hot * weight_map_nclasses,
reduction_axes=[0])
else:
dice_numerator = 2.0 * tf.sparse_reduce_sum(
one_hot * prediction, reduction_axes=[0])
dice_denominator = \
tf.reduce_sum(tf.square(prediction), reduction_indices=[0]) + \
tf.sparse_reduce_sum(one_hot, reduction_axes=[0])
epsilon_denominator = 0.00001
dice_score = dice_numerator / (dice_denominator + epsilon_denominator)
# dice_score.set_shape([n_classes])
# minimising (1 - dice_coefficients)
# return 1.0 - tf.reduce_mean(dice_score)
return tf.reduce_mean(dice_score)
示例11: dice_coef_2
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import sparse_reduce_sum [as 別名]
def dice_coef_2(ground_truth, prediction, weight_map=None):
"""
Function to calculate the dice loss with the definition given in
Milletari, F., Navab, N., & Ahmadi, S. A. (2016)
V-net: Fully convolutional neural
networks for volumetric medical image segmentation. 3DV 2016
using a square in the denominator
:param prediction: the logits
:param ground_truth: the segmentation ground_truth
:param weight_map:
:return: the loss
"""
ground_truth = tf.to_int64(ground_truth)
prediction = tf.cast(prediction, tf.float32)
ids = tf.range(tf.to_int64(tf.shape(ground_truth)[0]), dtype=tf.int64)
ids = tf.stack([ids, ground_truth], axis=1)
one_hot = tf.SparseTensor(
indices=ids,
values=tf.ones_like(ground_truth, dtype=tf.float32),
dense_shape=tf.to_int64(tf.shape(prediction)))
if weight_map is not None:
n_classes = prediction.shape[1].value
weight_map_nclasses = tf.reshape(
tf.tile(weight_map, [n_classes]), prediction.get_shape())
dice_numerator = 2.0 * tf.sparse_reduce_sum(
weight_map_nclasses * one_hot * prediction, reduction_axes=[0])
dice_denominator = \
tf.reduce_sum(weight_map_nclasses * tf.square(prediction),
reduction_indices=[0]) + \
tf.sparse_reduce_sum(one_hot * weight_map_nclasses,
reduction_axes=[0])
else:
dice_numerator = 2.0 * tf.sparse_reduce_sum(
one_hot * prediction, reduction_axes=[0])
dice_denominator = \
tf.reduce_sum(tf.square(prediction), reduction_indices=[0]) + \
tf.sparse_reduce_sum(one_hot, reduction_axes=[0])
epsilon_denominator = 0.00001
dice_score = dice_numerator / (dice_denominator + epsilon_denominator)
# dice_score.set_shape([n_classes])
# minimising (1 - dice_coefficients)
#return 1.0 - tf.reduce_mean(dice_score)
return tf.reduce_mean(dice_score)
示例12: soft_ncut
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import sparse_reduce_sum [as 別名]
def soft_ncut(image, image_segment, image_weights):
"""
Args:
image: [B, H, W, C]
image_segment: [B, H, W, K]
image_weights: [B, H*W, H*W]
Returns:
Soft_Ncut: scalar
"""
batch_size = tf.shape(image)[0]
num_class = tf.shape(image_segment)[-1]
image_shape = image.get_shape()
weight_size = image_shape[1].value * image_shape[2].value
image_segment = tf.transpose(image_segment, [0, 3, 1, 2]) # [B, K, H, W]
image_segment = tf.reshape(image_segment, tf.stack([batch_size, num_class, weight_size])) # [B, K, H*W]
# Dis-association
# [B0, H*W, H*W] @ [B1, K1, H*W] contract on [[2],[2]] = [B0, H*W, B1, K1]
W_Ak = sparse_tensor_dense_tensordot(image_weights, image_segment, axes=[[2],[2]])
W_Ak = tf.transpose(W_Ak, [0,2,3,1]) # [B0, B1, K1, H*W]
W_Ak = sycronize_axes(W_Ak, [0,1], tensor_dims=4) # [B0=B1, K1, H*W]
# [B1, K1, H*W] @ [B2, K2, H*W] contract on [[2],[2]] = [B1, K1, B2, K2]
dis_assoc = tf.tensordot(W_Ak, image_segment, axes=[[2],[2]])
dis_assoc = sycronize_axes(dis_assoc, [0,2], tensor_dims=4) # [B1=B2, K1, K2]
dis_assoc = sycronize_axes(dis_assoc, [1,2], tensor_dims=3) # [K1=K2, B1=B2]
dis_assoc = tf.transpose(dis_assoc, [1,0]) # [B1=B2, K1=K2]
dis_assoc = tf.identity(dis_assoc, name="dis_assoc")
# Association
# image_segment: [B0, K0, H*W]
sum_W = tf.sparse_reduce_sum(image_weights,axis=2) # [B1, W*H]
assoc = tf.tensordot(image_segment, sum_W, axes=[2,1]) # [B0, K0, B1]
assoc = sycronize_axes(assoc, [0,2], tensor_dims=3) # [B0=B1, K0]
assoc = tf.identity(assoc, name="assoc")
utils.add_activation_summary(dis_assoc)
utils.add_activation_summary(assoc)
# Soft NCut
eps = 1e-6
soft_ncut = tf.cast(num_class, tf.float32) - \
tf.reduce_sum((dis_assoc + eps) / (assoc + eps), axis=1)
return soft_ncut
開發者ID:lwchen6309,項目名稱:unsupervised-image-segmentation-by-WNet-with-NormalizedCut,代碼行數:47,代碼來源:soft_ncut.py
示例13: construct_computation_graph
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import sparse_reduce_sum [as 別名]
def construct_computation_graph(self):
batch_size = tf.shape(self.placeholder['clicked_feature'])[1]
denseshape = tf.concat([tf.cast(tf.reshape(batch_size, [-1]), tf.int64), tf.reshape(self.placeholder['time'], [-1]), tf.reshape(self.placeholder['item_size'], [-1])], 0)
# construct lstm
cell = tf.contrib.rnn.BasicLSTMCell(self.rnn_hidden, state_is_tuple=True)
initial_state = cell.zero_state(batch_size, tf.float32)
rnn_outputs, rnn_states = tf.nn.dynamic_rnn(cell, self.placeholder['clicked_feature'], initial_state=initial_state, time_major=True)
# rnn_outputs: (time, user=batch, rnn_hidden)
# (1) output forward one-step (2) then transpose
u_bar_feature = tf.concat([tf.zeros([1, batch_size, self.rnn_hidden], dtype=tf.float32), rnn_outputs], 0)
u_bar_feature = tf.transpose(u_bar_feature, perm=[1, 0, 2]) # (user, time, rnn_hidden)
# gather corresponding feature
u_bar_feature_gather = tf.gather_nd(u_bar_feature, self.placeholder['ut_dispid_ut'])
combine_feature = tf.concat([u_bar_feature_gather, self.placeholder['ut_dispid_feature']], axis=1)
# indicate size
combine_feature = tf.reshape(combine_feature, [-1, self.rnn_hidden + self.f_dim])
# utility
u_net = mlp(combine_feature, self.hidden_dims, 1, activation=tf.nn.elu, sd=1e-1, act_last=False)
u_net = tf.reshape(u_net, [-1])
click_u_tensor = tf.SparseTensor(self.placeholder['ut_clickid'], tf.gather(u_net, self.placeholder['click_sublist_index']), dense_shape=denseshape)
disp_exp_u_tensor = tf.SparseTensor(self.placeholder['ut_dispid'], tf.exp(u_net), dense_shape=denseshape) # (user, time, id)
disp_sum_exp_u_tensor = tf.sparse_reduce_sum(disp_exp_u_tensor, axis=2)
sum_click_u_tensor = tf.sparse_reduce_sum(click_u_tensor, axis=2)
loss_tmp = - sum_click_u_tensor + tf.log(disp_sum_exp_u_tensor + 1) # (user, time) loss
loss_sum = tf.reduce_sum(tf.multiply(self.placeholder['ut_dense'], loss_tmp))
event_cnt = tf.reduce_sum(self.placeholder['ut_dense'])
loss = loss_sum / event_cnt
dense_exp_disp_util = tf.sparse_tensor_to_dense(disp_exp_u_tensor, default_value=0.0, validate_indices=False)
click_tensor = tf.sparse_to_dense(self.placeholder['ut_clickid'], denseshape, self.placeholder['ut_clickid_val'], default_value=0.0, validate_indices=False)
argmax_click = tf.argmax(click_tensor, axis=2)
argmax_disp = tf.argmax(dense_exp_disp_util, axis=2)
top_2_disp = tf.nn.top_k(dense_exp_disp_util, k=2, sorted=False)[1]
argmax_compare = tf.cast(tf.equal(argmax_click, argmax_disp), tf.float32)
precision_1_sum = tf.reduce_sum(tf.multiply(self.placeholder['ut_dense'], argmax_compare))
tmpshape = tf.concat([tf.cast(tf.reshape(batch_size, [-1]), tf.int64), tf.reshape(self.placeholder['time'], [-1]), tf.constant([1], dtype=tf.int64)], 0)
top2_compare = tf.reduce_sum(tf.cast(tf.equal(tf.reshape(argmax_click, tmpshape), tf.cast(top_2_disp, tf.int64)), tf.float32), axis=2)
precision_2_sum = tf.reduce_sum(tf.multiply(self.placeholder['ut_dense'], top2_compare))
precision_1 = precision_1_sum / event_cnt
precision_2 = precision_2_sum / event_cnt
return loss, precision_1, precision_2, loss_sum, precision_1_sum, precision_2_sum, event_cnt
示例14: sparse_placeholder
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import sparse_reduce_sum [as 別名]
def sparse_placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a sparse tensor that will be always fed.
**Important**: This sparse tensor will produce an error if evaluated.
Its value must be fed using the `feed_dict` optional argument to
`Session.run()`, `Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.sparse_placeholder(tf.float32)
y = tf.sparse_reduce_sum(x)
with tf.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)
values = np.array([1.0, 2.0], dtype=np.float32)
shape = np.array([7, 9, 2], dtype=np.int64)
print(sess.run(y, feed_dict={
x: tf.SparseTensorValue(indices, values, shape)})) # Will succeed.
print(sess.run(y, feed_dict={
x: (indices, values, shape)})) # Will succeed.
sp = tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
sp_value = sp.eval(session=sess)
print(sess.run(y, feed_dict={x: sp_value})) # Will succeed.
```
Args:
dtype: The type of `values` elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a sparse tensor of any shape.
name: A name for prefixing the operations (optional).
Returns:
A `SparseTensor` that may be used as a handle for feeding a value, but not
evaluated directly.
"""
shape_name = (name + "/shape") if name is not None else None
shape, rank = _normalize_sparse_shape(shape, shape_name)
if shape is None:
shape = placeholder(dtypes.int64, shape=[rank], name=shape_name)
return sparse_tensor.SparseTensor(
values=placeholder(
dtype,
shape=[None],
name=(name + "/values") if name is not None else None),
indices=placeholder(
dtypes.int64, shape=[None, rank],
name=(name + "/indices") if name is not None else None),
dense_shape=shape)
# pylint: enable=redefined-outer-name
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:57,代碼來源:array_ops.py