当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.sequence_mask方法代码示例

本文整理汇总了Python中tensorflow.sequence_mask方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.sequence_mask方法的具体用法?Python tensorflow.sequence_mask怎么用?Python tensorflow.sequence_mask使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.sequence_mask方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: embed_subword

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sequence_mask [as 别名]
def embed_subword(x, size, dim, sequence_length, seed=0, mask_zero=False, maxlen=None):
    # std = np.sqrt(2 / dim)
    std = 0.001
    minval = -std
    maxval = std
    emb = tf.Variable(tf.random_uniform([size, dim], minval, maxval, dtype=tf.float32, seed=seed))
    # None * max_seq_len * max_word_len * embed_dim
    out = tf.nn.embedding_lookup(emb, x)
    if mask_zero:
        # word_len: None * max_seq_len
        # mask: shape=None * max_seq_len * max_word_len
        mask = tf.sequence_mask(sequence_length, maxlen)
        mask = tf.expand_dims(mask, axis=-1)
        mask = tf.cast(mask, tf.float32)
        out = out * mask
    # None * max_seq_len * embed_dim
    # according to facebook subword paper, it's sum
    out = tf.reduce_sum(out, axis=2)
    return out 
开发者ID:ChenglongChen,项目名称:tensorflow-XNN,代码行数:21,代码来源:nn_module.py

示例2: exp_mask

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sequence_mask [as 别名]
def exp_mask(logits, mask, mask_is_length=True):
  """Exponential mask for logits.

  Logits cannot be masked with 0 (i.e. multiplying boolean mask)
  because expnentiating 0 becomes 1. `exp_mask` adds very large negative value
  to `False` portion of `mask` so that the portion is effectively ignored
  when exponentiated, e.g. softmaxed.

  Args:
    logits: Arbitrary-rank logits tensor to be masked.
    mask: `boolean` type mask tensor.
      Could be same shape as logits (`mask_is_length=False`)
      or could be length tensor of the logits (`mask_is_length=True`).
    mask_is_length: `bool` value. whether `mask` is boolean mask.
  Returns:
    Masked logits with the same shape of `logits`.
  """
  if mask_is_length:
    mask = tf.sequence_mask(mask, maxlen=tf.shape(logits)[-1])
  return logits + (1.0 - tf.cast(mask, 'float')) * VERY_LARGE_NEGATIVE_VALUE 
开发者ID:google,项目名称:mipsqa,代码行数:22,代码来源:tf_utils.py

示例3: _discount_reward_tensor_1d

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sequence_mask [as 别名]
def _discount_reward_tensor_1d(reward, sequence_length,
                               discount=1., dtype=None):
    if sequence_length is None:
        raise ValueError('sequence_length must not be `None` for 1D reward.')

    batch_size = tf.shape(reward)[0]
    max_seq_length = tf.reduce_max(sequence_length)
    dtype = dtype or reward.dtype

    if discount == 1.:
        dmat = tf.ones(
            tf.concat([[batch_size], [max_seq_length]], 0), dtype=dtype)
    else:
        mask = tf.sequence_mask(sequence_length, dtype=dtype)
        mask = tf.concat([mask[:, 1:], tf.zeros_like(mask[:, -1:])], axis=1)
        # Make each row = [discount, ..., discount, 1, ..., 1]
        dmat = mask * discount + (1 - mask)
        dmat = tf.cumprod(dmat, axis=1, reverse=True)

    disc_reward = dmat * tf.expand_dims(reward, -1)
    disc_reward = mask_sequences(
        disc_reward, sequence_length, dtype=dtype, tensor_rank=2)

    return disc_reward 
开发者ID:qkaren,项目名称:Counterfactual-StoryRW,代码行数:26,代码来源:rewards.py

示例4: __create_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sequence_mask [as 别名]
def __create_loss(self):

        print('Creating loss...')
        start = time.time()

        self.decoder_logits = tf.identity(self.decoder_outputs_train.rnn_output, name="decoder_logits")
        self.decoder_pred = tf.argmax(self.decoder_logits, axis=-1, name="decoder_pred")

        # masking the sequence in order to calculate the error according to the calculated
        mask = tf.sequence_mask(self.decoder_inputs_length_train, maxlen=self.decoder_max_length, dtype=tf.float32,
                                name="masks")

        # Control loss dimensions with `average_across_timesteps` and `average_across_batch`
        self.loss = tf.contrib.seq2seq.sequence_loss(logits=self.decoder_logits,
                                                     targets=self.decoder_targets_train,
                                                     average_across_timesteps=False,
                                                     average_across_batch=False,
                                                     weights=mask,
                                                     name="batch_loss")

        print('Building loss in: ', time.time() - start, ' secs') 
开发者ID:hadyelsahar,项目名称:Zeroshot-QuestionGeneration,代码行数:23,代码来源:tripletext2seq.py

示例5: cross_entropy_sequence_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sequence_mask [as 别名]
def cross_entropy_sequence_loss(logits, targets, sequence_length):
    """Calculates the per-example cross-entropy loss for a sequence of logits and
      masks out all losses passed the sequence length.
    Args:
      logits: Logits of shape `[B, T, vocab_size]`
      targets: Target classes of shape `[B, T]`
      sequence_length: An int32 tensor of shape `[B]` corresponding
        to the length of each input
    Returns:
      A tensor of shape [T, B] that contains the loss per example, per time step.
    """
    with tf.compat.v1.variable_scope('sequence_loss'):
        losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=logits, labels=targets)
        # Mask out the losses we don't care about
        loss_mask = tf.sequence_mask(
            tf.cast(sequence_length, tf.int32),
            tf.cast(tf.shape(targets)[1], tf.int32)
        )
        losses = losses * tf.cast(loss_mask, tf.float32)
        return losses 
开发者ID:uber,项目名称:ludwig,代码行数:23,代码来源:loss_modules.py

示例6: token_seq_truncted

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sequence_mask [as 别名]
def token_seq_truncted(token_seq, finished_index, max_length): 
	seq_shape = bert_utils.get_shape_list(token_seq, expected_rank=[2,3])
	batch_size = seq_shape[0]
	token_seq = token_seq[:, :max_length]

	token_seq = tf.concat([token_seq, finished_index*tf.cast(tf.ones((batch_size, 1)), tf.int32)], axis=-1)

	token_seq = tf.cast(token_seq, tf.int32)
	seq_shape = bert_utils.get_shape_list(token_seq, expected_rank=[2,3])
	match_indices = tf.where(                          # [[5, 5, 2, 5, 4],
	tf.equal(finished_index, token_seq),                              #  [0, 5, 2, 3, 5],
		x=tf.range(seq_shape[1]) * tf.ones_like(token_seq),  #  [5, 1, 5, 5, 5]]
		y=(seq_shape[1])*tf.ones_like(token_seq))

	finished_pos = tf.reduce_min(match_indices, axis=1)				
	sequence_mask = tf.sequence_mask(finished_pos+1, maxlen=seq_shape[1])

	token_seq = tf.cast(sequence_mask, tf.float32) * tf.cast(token_seq, tf.float32)
				
	return tf.cast(token_seq, tf.int32) 
开发者ID:yyht,项目名称:BERT,代码行数:22,代码来源:trf_bert_ebm_gpt_estimator.py

示例7: lengths_to_area_mask

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sequence_mask [as 别名]
def lengths_to_area_mask(feature_length, length, max_area_size):
  """Generates a non-padding mask for areas based on lengths.

  Args:
    feature_length: a tensor of [batch_size]
    length: the length of the batch
    max_area_size: the maximum area size considered
  Returns:
    mask: a tensor in shape of [batch_size, num_areas]
  """

  paddings = tf.cast(tf.expand_dims(
      tf.logical_not(
          tf.sequence_mask(feature_length, maxlen=length)), 2), tf.float32)
  _, _, area_sum, _, _ = compute_area_features(paddings,
                                               max_area_width=max_area_size)
  mask = tf.squeeze(tf.logical_not(tf.cast(area_sum, tf.bool)), [2])
  return mask 
开发者ID:yyht,项目名称:BERT,代码行数:20,代码来源:area_attention.py

示例8: test_softmax_masking

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sequence_mask [as 别名]
def test_softmax_masking(self):

        max_len = 3
        axis = 1
        logits = tf.eye(max_len)
        seq_len = [1,2,2]
        mask = tf.sequence_mask(seq_len, max_len)

        r = softmax_with_masking(logits, mask, axis)
        r = np.array(r)

        d = math.exp(1) + math.exp(0)

        expected = np.array([
            [1,0,0],
            [math.exp(0)/d, math.exp(1)/d,0],
            [0.5, 0.5, 0],
        ])

        np.testing.assert_almost_equal(r, expected) 
开发者ID:Octavian-ai,项目名称:shortest-path,代码行数:22,代码来源:attention_test.py

示例9: test_softmax_masking2

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sequence_mask [as 别名]
def test_softmax_masking2(self):

        max_len = 3
        axis = 1
        logits = tf.zeros([max_len, max_len])
        seq_len = [1,2,3]
        mask = tf.sequence_mask(seq_len, max_len)

        r = softmax_with_masking(logits, mask, axis)
        r = np.array(r)

        expected = np.array([
            [1.0,0.0,0],
            [0.5,0.5,0],
            [1.0/3.0, 1.0/3.0, 1.0/3.0],
        ])

        np.testing.assert_almost_equal(r, expected) 
开发者ID:Octavian-ai,项目名称:shortest-path,代码行数:20,代码来源:attention_test.py

示例10: mask_3d

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sequence_mask [as 别名]
def mask_3d(sequences, sequence_lengths, mask_value, dimension=2):
    """
    Given a batch of matrices, each with shape m x n, mask the values in each
    row after the positions indicated in sentence_sizes.
    This function is supposed to mask the last columns in the raw attention
    matrix (e_{i, j}) in cases where the sentence2 is smaller than the
    maximum.
    :param sequences: tensor with shape (batch_size, m, n)
    :param sequence_lengths: tensor with shape (batch_size) containing the sentence sizes that
        should be limited
    :param mask_value: scalar value to assign to items after sentence size
    :param dimension: over which dimension to mask values
    :return: a tensor with the same shape as `values`
    """
    if dimension == 1:
        sequences = tf.transpose(sequences, [0, 2, 1])
    time_steps1, time_steps2 = tf.shape(sequences)[1], tf.shape(sequences)[2]
    ones = tf.ones_like(sequences, dtype=tf.int32)
    pad_values = mask_value * tf.cast(ones, tf.float32)
    mask = tf.sequence_mask(sequence_lengths, time_steps2)
    # mask is (batch_size, sentence2_size). we have to tile it for 3d
    mask3d = tf.tile(tf.expand_dims(mask, 1), (1, time_steps1, 1))
    masked = tf.where(mask3d, sequences, pad_values)
    return tf.transpose(masked, [0, 2, 1]) if dimension == 1 else masked 
开发者ID:uclnlp,项目名称:inferbeddings,代码行数:26,代码来源:tfutil.py

示例11: add_loss_op

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sequence_mask [as 别名]
def add_loss_op(self):
        """Defines the loss"""
        if self.config.use_crf:
            log_likelihood, trans_params = tf.contrib.crf.crf_log_likelihood(
                    self.logits, self.labels, self.sequence_lengths)
            self.trans_params = trans_params # need to evaluate it for decoding
            self.loss = tf.reduce_mean(-log_likelihood)
        else:
            losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
                    logits=self.logits, labels=self.labels)
            mask = tf.sequence_mask(self.sequence_lengths)
            losses = tf.boolean_mask(losses, mask)
            self.loss = tf.reduce_mean(losses)

        # for tensorboard
        tf.summary.scalar("loss", self.loss) 
开发者ID:ijmarshall,项目名称:robotreviewer,代码行数:18,代码来源:ner_model.py

示例12: _compute_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sequence_mask [as 别名]
def _compute_loss(self, logits):
        """Compute optimization loss."""
        target_output = self.iterator.target_output

        if self.time_major:
            target_output = tf.transpose(target_output)

        max_time = self.get_max_time(target_output)

        crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target_output, logits=logits)

        target_weights = tf.sequence_mask(self.iterator.target_sequence_length, max_time, dtype=logits.dtype)

        if self.time_major:
            target_weights = tf.transpose(target_weights)

        loss = tf.reduce_sum(crossent * target_weights) / tf.to_float(self.batch_size)

        return loss 
开发者ID:neccam,项目名称:nslt,代码行数:21,代码来源:model.py

示例13: SeqLayerNorm

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sequence_mask [as 别名]
def SeqLayerNorm(input, seq_len, centre=True, scale=True): # layer norm for 3D tensor.
	mask = tf.cast(tf.expand_dims(tf.sequence_mask(seq_len), 2), tf.float32) # convert mask to float.
	input_dim = input.get_shape().as_list()[-1] # get number of input dimensions.
	den = tf.multiply(tf.reduce_sum(mask, axis=1, keepdims=True), input_dim) # inverse of the number of input dimensions.
	mean = tf.divide(tf.reduce_sum(tf.multiply(input, mask), axis=[1, 2], keepdims=True), den) # mean over the input dimensions.
	var = tf.divide(tf.reduce_sum(tf.multiply(tf.square(tf.subtract(input, mean)), mask), axis=[1, 2], 
	 	keepdims = True), den) # variance over the input dimensions.
	if centre:
		beta = tf.get_variable("beta", input_dim, dtype=tf.float32,  
			initializer=tf.constant_initializer(0.0), trainable=True)
	else: beta = tf.constant(np.zeros(input_dim), name="beta", dtype=tf.float32)
	if scale:
		gamma = tf.get_variable("Gamma", input_dim, dtype=tf.float32,  
			initializer=tf.constant_initializer(1.0), trainable=True)
	else: gamma = tf.constant(np.ones(input_dim), name="Gamma", dtype=tf.float32)
	norm = tf.nn.batch_normalization(input, mean, var, offset=beta, scale=gamma, 
		variance_epsilon = 1e-12) # normalise batch.
	norm = tf.multiply(norm, mask)
	return norm 
开发者ID:anicolson,项目名称:DeepXi,代码行数:21,代码来源:normalisation.py

示例14: example

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sequence_mask [as 别名]
def example(self, s, d, s_len, d_len, snr):
		"""
		Compute example for Deep Xi, i.e. observation (noisy-speech STMS)
		and target (mapped a priori SNR).

		Argument/s:
			s - clean speech (dtype=tf.int32).
			d - noise (dtype=tf.int32).
			s_len - clean-speech length without padding (samples).
			d_len - noise length without padding (samples).
			snr - SNR level.

		Returns:
			x_STMS - noisy-speech short-time magnitude spectrum.
			xi_bar - mapped a priori SNR.
			n_frames - number of time-domain frames.
		"""
		s_STMS, d_STMS, x_STMS, n_frames = self.mix(s, d, s_len, d_len, snr)
		mask = tf.expand_dims(tf.cast(tf.sequence_mask(n_frames), tf.float32), 2)
		xi_bar = tf.multiply(self.xi_bar(s_STMS, d_STMS), mask)
		return x_STMS, xi_bar, n_frames 
开发者ID:anicolson,项目名称:DeepXi,代码行数:23,代码来源:sig.py

示例15: update_metrics

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sequence_mask [as 别名]
def update_metrics(self, metrics, predictions, labels):
    weights = tf.sequence_mask(
        labels["length"], maxlen=tf.shape(labels["tags"])[1], dtype=tf.float32)

    metrics["accuracy"].update_state(
        labels["tags_id"], predictions["tags_id"], sample_weight=weights)

    if self.tagging_scheme in ("bioes",):
      flag_fn = None
      if self.tagging_scheme == "bioes":
        flag_fn = flag_bioes_tags

      gold_flags, predicted_flags = tf.numpy_function(
          flag_fn,
          [labels["tags"], predictions["tags"], labels["length"]],
          [tf.bool, tf.bool])

      metrics["f1"].update_state(gold_flags, predicted_flags) 
开发者ID:OpenNMT,项目名称:OpenNMT-tf,代码行数:20,代码来源:sequence_tagger.py


注:本文中的tensorflow.sequence_mask方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。