当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.cumprod方法代码示例

本文整理汇总了Python中tensorflow.cumprod方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.cumprod方法的具体用法?Python tensorflow.cumprod怎么用?Python tensorflow.cumprod使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.cumprod方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _discount_reward_tensor_1d

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumprod [as 别名]
def _discount_reward_tensor_1d(reward, sequence_length,
                               discount=1., dtype=None):
    if sequence_length is None:
        raise ValueError('sequence_length must not be `None` for 1D reward.')

    batch_size = tf.shape(reward)[0]
    max_seq_length = tf.reduce_max(sequence_length)
    dtype = dtype or reward.dtype

    if discount == 1.:
        dmat = tf.ones(
            tf.concat([[batch_size], [max_seq_length]], 0), dtype=dtype)
    else:
        mask = tf.sequence_mask(sequence_length, dtype=dtype)
        mask = tf.concat([mask[:, 1:], tf.zeros_like(mask[:, -1:])], axis=1)
        # Make each row = [discount, ..., discount, 1, ..., 1]
        dmat = mask * discount + (1 - mask)
        dmat = tf.cumprod(dmat, axis=1, reverse=True)

    disc_reward = dmat * tf.expand_dims(reward, -1)
    disc_reward = mask_sequences(
        disc_reward, sequence_length, dtype=dtype, tensor_rank=2)

    return disc_reward 
开发者ID:qkaren,项目名称:Counterfactual-StoryRW,代码行数:26,代码来源:rewards.py

示例2: make_bert

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumprod [as 别名]
def make_bert(input_ids, word_end_mask):
    # We can derive input_mask from either input_ids or word_end_mask
    input_mask = (1 - tf.cumprod(1 - word_end_mask, axis=-1, reverse=True))
    token_type_ids = tf.zeros_like(input_ids)
    bert_model = make_bert_instance(input_ids, input_mask, token_type_ids)

    bert_features = bert_model.get_sequence_output()
    bert_features_packed = tf.gather(
        tf.reshape(bert_features, [-1, int(bert_features.shape[-1])]),
        tf.to_int32(tf.where(tf.reshape(word_end_mask, (-1,))))[:,0])
    projected_annotations = tf.matmul(
        bert_features_packed,
        tf.constant(sd['project_bert.weight'].numpy().transpose()))

    # input_mask is over subwords, whereas valid_mask is over words
    sentence_lengths = tf.reduce_sum(word_end_mask, -1)
    valid_mask = (tf.range(tf.reduce_max(sentence_lengths))[None,:] < sentence_lengths[:, None])
    dim_padded = tf.shape(valid_mask)[:2]
    mask_flat = tf.reshape(valid_mask, (-1,))
    dim_flat = tf.shape(mask_flat)[:1]
    nonpad_ids = tf.to_int32(tf.where(mask_flat)[:,0])

    return projected_annotations, nonpad_ids, dim_flat, dim_padded, valid_mask, sentence_lengths

#%% 
开发者ID:nikitakit,项目名称:self-attentive-parser,代码行数:27,代码来源:export_bert.py

示例3: unpool

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumprod [as 别名]
def unpool(pool, ind, shape, ksize=[1, 2, 2, 1], scope=None):
    with tf.name_scope(scope):
        input_shape =  tf.shape(pool)
        output_shape = [input_shape[0], input_shape[1] * ksize[1], input_shape[2] * ksize[2], input_shape[3]]
        flat_input_size = tf.cumprod(input_shape)[-1]
        flat_output_shape = tf.stack([output_shape[0], output_shape[1] * output_shape[2] * output_shape[3]])
        pool_ = tf.reshape(pool, tf.stack([flat_input_size]))
        batch_range = tf.reshape(tf.range(tf.cast(output_shape[0], tf.int64), dtype=ind.dtype),
                                shape=tf.stack([input_shape[0], 1, 1, 1]))
        b = tf.ones_like(ind) * batch_range
        b = tf.reshape(b, tf.stack([flat_input_size, 1]))
        ind_ = tf.reshape(ind, tf.stack([flat_input_size, 1]))
        ind_ = tf.concat([b, ind_], 1)
        ret = tf.scatter_nd(ind_, pool_, shape=tf.cast(flat_output_shape, tf.int64))
        ret = tf.reshape(ret, tf.stack(output_shape))
        ret = tf.reshape(ret, shape=shape)
        return ret 
开发者ID:SaoYan,项目名称:bgsCNN,代码行数:19,代码来源:utilities.py

示例4: crf_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumprod [as 别名]
def crf_loss(y, y_, transitions, nums_tags, batch_size):
    tag_scores = y
    nums_steps = len(tf.unstack(tag_scores, axis=1))
    masks = tf.cast(tf.sign(y_), dtype=tf.float32)
    lengths = tf.reduce_sum(tf.sign(y_), reduction_indices=1)
    tag_ids = y_
    b_id = tf.stack([[nums_tags]] * batch_size)
    padded_tag_ids = tf.concat([b_id, tag_ids], axis=1)
    idx_tag_ids = tf.stack([tf.slice(padded_tag_ids, [0, i], [-1, 2]) for i in range(nums_steps)], axis=1)
    tag_ids = tf.contrib.layers.one_hot_encoding(tag_ids, nums_tags)
    point_score = tf.reduce_sum(tag_scores * tag_ids, reduction_indices=2)
    point_score *= masks
    #Save for future
    trans_sh = tf.stack(transitions.get_shape())
    trans_sh = tf.cumprod(trans_sh, exclusive=True, reverse=True)
    flat_tag_ids = tf.reduce_sum(trans_sh * idx_tag_ids, reduction_indices=2)
    trans_score = tf.gather(tf.reshape(transitions, [-1]), flat_tag_ids)
    ##
    extend_mask = masks
    trans_score *= extend_mask
    target_path_score = tf.reduce_sum(point_score) + tf.reduce_sum(trans_score)
    total_path_score, _, _ = Forward(tag_scores, transitions, nums_tags, lengths, batch_size)()
    return - (target_path_score - total_path_score) 
开发者ID:koala-ai,项目名称:tensorflow_nlp,代码行数:25,代码来源:losses.py

示例5: testInvalidAxis

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumprod [as 别名]
def testInvalidAxis(self):
    x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
    input_tensor = tf.convert_to_tensor(x)
    with self.test_session(use_gpu=True):
      with self.assertRaisesWithPredicateMatch(
          tf.errors.InvalidArgumentError,
          lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
        tf.cumprod(input_tensor, -3).eval()
      with self.assertRaisesWithPredicateMatch(
          tf.errors.InvalidArgumentError,
          lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
        tf.cumprod(input_tensor, 2).eval()
      with self.assertRaisesWithPredicateMatch(
          tf.errors.InvalidArgumentError,
          lambda e: "axis must be a scalar" in str(e)):
        tf.cumprod(input_tensor, [0]).eval() 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:18,代码来源:scan_ops_test.py

示例6: _update_alloc_and_usage_vectors

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumprod [as 别名]
def _update_alloc_and_usage_vectors(self, pre_write_weightings, pre_read_weightings, pre_usage_vector, free_gates):

        retention_vector = tf.reduce_prod(1 - free_gates * pre_read_weightings, axis=1, keepdims=False,
                                          name='retention_prod')
        usage_vector = (
                           pre_usage_vector + pre_write_weightings - pre_usage_vector * pre_write_weightings) * retention_vector

        sorted_usage, free_list = tf.nn.top_k(-1 * usage_vector, self.h_N)
        sorted_usage = -1 * sorted_usage

        cumprod_sorted_usage = tf.cumprod(sorted_usage, axis=1, exclusive=True)
        corrected_free_list = free_list + self.const_batch_memory_range

        cumprod_sorted_usage_re = [tf.reshape(cumprod_sorted_usage, [-1, ]), ]
        corrected_free_list_re = [tf.reshape(corrected_free_list, [-1]), ]

        stitched_usage = tf.dynamic_stitch(corrected_free_list_re, cumprod_sorted_usage_re, name=None)

        stitched_usage = tf.reshape(stitched_usage, [self.h_B, self.h_N])

        alloc_weighting = (1 - usage_vector) * stitched_usage

        return alloc_weighting, usage_vector 
开发者ID:JoergFranke,项目名称:ADNC,代码行数:25,代码来源:dnc_cell.py

示例7: _cumprod

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumprod [as 别名]
def _cumprod(tensor, axis=0):
    """A custom version of cumprod to prevent NaN gradients when there are zeros in `tensor`
    as reported here: https://github.com/tensorflow/tensorflow/issues/3862

    :param tensor: tf.Tensor
    :return: tf.Tensor
    """
    transpose_permutation = None
    n_dim = len(tensor.get_shape())
    if n_dim > 1 and axis != 0:

        if axis < 0:
            axis += n_dim

        transpose_permutation = np.arange(n_dim)
        transpose_permutation[-1], transpose_permutation[0] = 0, axis

    tensor = tf.transpose(tensor, transpose_permutation)

    def prod(acc, x):
        return acc * x

    prob = tf.scan(prod, tensor)
    tensor = tf.transpose(prob, transpose_permutation)
    return tensor 
开发者ID:akosiorek,项目名称:attend_infer_repeat,代码行数:27,代码来源:prior.py

示例8: calculate_allocation_weighting

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumprod [as 别名]
def calculate_allocation_weighting(self, usage_vector):
        """

        :param: usage vector: tensor of shape [batch_size, memory_size]
        :return: allocation tensor of shape [batch_size, memory_size]
        """
        usage_vector = Memory.epsilon + (1 - Memory.epsilon) * usage_vector

        # We're sorting the "-self.usage_vector" because top_k returns highest values and we need the lowest
        highest_usage, inverse_indices = tf.nn.top_k(-usage_vector, k=self.memory_size)
        lowest_usage = -highest_usage

        allocation_scrambled = (1 - lowest_usage) * tf.cumprod(lowest_usage, axis=1, exclusive=True)

        # allocation is not in the correct order. alloation[i] contains the sorted[i] value
        # reversing the already inversed indices for each batch
        indices = tf.stack([tf.invert_permutation(batch_indices) for batch_indices in tf.unstack(inverse_indices)])
        allocation = tf.stack([tf.gather(mem, ind)
                               for mem, ind in
                               zip(tf.unstack(allocation_scrambled), tf.unstack(indices))])

        return allocation 
开发者ID:bgavran,项目名称:DNC,代码行数:24,代码来源:memory.py

示例9: reduce_prod

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumprod [as 别名]
def reduce_prod(x, axis, name=None):
  """Efficient reduce product over axis.

  Uses tf.cumprod and tf.gather_nd as a workaround to the poor performance of calculating tf.reduce_prod's gradient on CPU.
  """
  with tf.name_scope(name, 'util_reduce_prod', values=[x]):
    cp = tf.cumprod(x, axis, reverse=True)
    size = tf.shape(cp)[0]
    idx1 = tf.range(tf.cast(size, tf.float32), dtype=tf.float32)
    idx2 = tf.zeros([size], tf.float32)
    indices = tf.stack([idx1, idx2], 1)
    return tf.gather_nd(cp, tf.cast(indices, tf.int32)) 
开发者ID:deepmind,项目名称:dnc,代码行数:14,代码来源:util.py

示例10: _allocation

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumprod [as 别名]
def _allocation(self, usage):
    r"""Computes allocation by sorting `usage`.

    This corresponds to the value a = a_t[\phi_t[j]] in the paper.

    Args:
      usage: tensor of shape `[batch_size, memory_size]` indicating current
          memory usage. This is equal to u_t in the paper when we only have one
          write head, but for multiple write heads, one should update the usage
          while iterating through the write heads to take into account the
          allocation returned by this function.

    Returns:
      Tensor of shape `[batch_size, memory_size]` corresponding to allocation.
    """
    with tf.name_scope('allocation'):
      # Ensure values are not too small prior to cumprod.
      usage = _EPSILON + (1 - _EPSILON) * usage

      nonusage = 1 - usage
      sorted_nonusage, indices = tf.nn.top_k(
          nonusage, k=self._memory_size, name='sort')
      sorted_usage = 1 - sorted_nonusage
      prod_sorted_usage = tf.cumprod(sorted_usage, axis=1, exclusive=True)
      sorted_allocation = sorted_nonusage * prod_sorted_usage
      inverse_indices = util.batch_invert_permutation(indices)

      # This final line "unsorts" sorted_allocation, so that the indexing
      # corresponds to the original indexing of `usage`.
      return util.batch_gather(sorted_allocation, inverse_indices) 
开发者ID:deepmind,项目名称:dnc,代码行数:32,代码来源:addressing.py

示例11: _discount_reward_py_1d

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumprod [as 别名]
def _discount_reward_py_1d(reward, sequence_length, discount=1., dtype=None):
    if sequence_length is None:
        raise ValueError('sequence_length must not be `None` for 1D reward.')

    reward = np.array(reward)
    sequence_length = np.array(sequence_length)

    batch_size = reward.shape[0]
    max_seq_length = np.max(sequence_length)
    dtype = dtype or reward.dtype

    if discount == 1.:
        dmat = np.ones([batch_size, max_seq_length], dtype=dtype)
    else:
        steps = np.tile(np.arange(max_seq_length), [batch_size, 1])
        mask = np.asarray(steps < (sequence_length-1)[:, None], dtype=dtype)
        # Make each row = [discount, ..., discount, 1, ..., 1]
        dmat = mask * discount + (1 - mask)
        dmat = np.cumprod(dmat[:, ::-1], axis=1)[:, ::-1]

    disc_reward = dmat * reward[:, None]
    disc_reward = mask_sequences(disc_reward, sequence_length, dtype=dtype)
    #mask = np.asarray(steps < sequence_length[:, None], dtype=dtype)
    #disc_reward = mask * disc_reward

    return disc_reward 
开发者ID:qkaren,项目名称:Counterfactual-StoryRW,代码行数:28,代码来源:rewards.py

示例12: prefix_accuracy

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumprod [as 别名]
def prefix_accuracy(predictions,
                    labels,
                    weights_fn=common_layers.weights_nonzero):
  """Average # of correct tokens at start of sequences, ignoring padding 0s.

  See section 4.3 of Learning to Transduce with Unbounded Memory,
  Grefenstette et al., 2015.

  Args:
    predictions: Tensor of shape [`batch_size`, `length`, 1, `num_classes`] and
        type tf.float32 representing the logits, 0-padded.
    labels: Tensor of shape [`batch_size`, `length`, 1, 1] and type tf.int32
        representing the labels of same length as logits and 0-padded.
    weights_fn: ignored. The weights returned are the total length of the ground
        truth labels, excluding 0-paddings.

  Returns:
    (prefix accuracy, 1.0)

  Raises:
    ValueError: if weights_fn is not common_layers.weights_nonzero.
  """
  if weights_fn is not common_layers.weights_nonzero:
    raise ValueError("Only weights_nonzero can be used for this metric.")

  predictions = tf.to_int32(tf.squeeze(tf.argmax(predictions, axis=-1), axis=2))
  labels = tf.squeeze(labels, axis=(2, 3))
  seq_len = tf.reduce_sum(
      tf.cast(tf.not_equal(labels, tf.constant(0)), dtype=tf.float32), axis=1)
  matching_elements = tf.equal(labels, predictions)
  prefix_len = tf.reduce_sum(
      tf.cumprod(tf.cast(matching_elements, tf.float32), axis=1), axis=1)
  return tf.reduce_mean(prefix_len / seq_len), tf.constant(1.0) 
开发者ID:yyht,项目名称:BERT,代码行数:35,代码来源:metrics.py

示例13: cumprod

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumprod [as 别名]
def cumprod(x, axis=0):
    """Cumulative product of the values in a tensor, alongside the specified axis.

    # Arguments
        x: A tensor or variable.
        axis: An integer, the axis to compute the product.

    # Returns
        A tensor of the cumulative product of values of `x` along `axis`.
    """
    return tf.cumprod(x, axis=axis) 
开发者ID:Relph1119,项目名称:GraphicDesignPatternByPython,代码行数:13,代码来源:tensorflow_backend.py

示例14: handle_options

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumprod [as 别名]
def handle_options(func, x, axis, exclusive, reverse):
  """Adds tf options to numpy scan ops."""
  length = len(x.shape)
  if axis < 0:
    axis = length + axis

  if reverse:
    x = numpy_reverse(x, axis)

  if exclusive:
    ix_head = [slice(0, 1) if i == axis else slice(None)
               for i in range(length)]
    ix_init = [slice(0, -1) if i == axis else slice(None)
               for i in range(length)]
    if func == np.cumsum:
      init = np.zeros_like(x[ix_head])
    elif func == np.cumprod:
      init = np.ones_like(x[ix_head])
    else:
      raise ValueError("Unknown scan function.")
    x = np.concatenate([init, func(x[ix_init], axis)], axis=axis)
  else:
    x = func(x, axis=axis)

  if reverse:
    x = numpy_reverse(x, axis)
  return x 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:29,代码来源:scan_ops_test.py

示例15: _compare

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumprod [as 别名]
def _compare(self, x, axis, exclusive, reverse):
    np_out = handle_options(np.cumprod, x, axis, exclusive, reverse)
    with self.test_session(use_gpu=True):
      tf_out = tf.cumprod(x, axis, exclusive, reverse).eval()

    self.assertAllClose(np_out, tf_out) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:8,代码来源:scan_ops_test.py


注:本文中的tensorflow.cumprod方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。