当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.edit_distance方法代码示例

本文整理汇总了Python中tensorflow.edit_distance方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.edit_distance方法的具体用法?Python tensorflow.edit_distance怎么用?Python tensorflow.edit_distance使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.edit_distance方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: error_rate

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import edit_distance [as 别名]
def error_rate(y_true, decoded):

    y_true_shape = tf.shape(y_true)
    decoded_shape = tf.shape(decoded)

    max_length = tf.maximum(y_true_shape[-1], decoded_shape[-1])

    if y_true.dtype == tf.string:
        truth = string_to_sparse(y_true)
    else:
        truth = tf.sparse.from_dense(y_true)

    if decoded.dtype == tf.string:
        hypothesis = string_to_sparse(decoded)
    else:
        hypothesis = tf.sparse.from_dense(decoded)

    err = tf.edit_distance(hypothesis, truth, normalize=False)
    err_norm = err / tf.cast(max_length, dtype=tf.float32)

    return err_norm 
开发者ID:noahchalifour,项目名称:rnnt-speech-recognition,代码行数:23,代码来源:metrics.py

示例2: compute_edit_distance

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import edit_distance [as 别名]
def compute_edit_distance(session, labels_true_st, labels_pred_st):
    """Compute edit distance per mini-batch.
    Args:
        session:
        labels_true_st: A `SparseTensor` of ground truth
        labels_pred_st: A `SparseTensor` of prediction
    Returns:
        edit_distances: list of edit distance of each uttearance
    """
    indices, values, dense_shape = labels_true_st
    labels_pred_pl = tf.SparseTensor(indices, values, dense_shape)
    indices, values, dense_shape = labels_pred_st
    labels_true_pl = tf.SparseTensor(indices, values, dense_shape)

    edit_op = tf.edit_distance(labels_pred_pl, labels_true_pl, normalize=True)
    edit_distances = session.run(edit_op)

    return edit_distances 
开发者ID:hirofumi0810,项目名称:tensorflow_end2end_speech_recognition,代码行数:20,代码来源:edit_distance.py

示例3: update_state

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import edit_distance [as 别名]
def update_state(self, sparse_predictions, samples, logit_length=None):
        """ Accumulate errors and counts """
        validated_label = tf.cast(
            tf.sparse.from_dense(samples["output"]), dtype=tf.int64
        )
        labels_counter = tf.cast(tf.shape(validated_label.values)[0], tf.float32)

        num_errs = tf.edit_distance(
            sparse_predictions, validated_label, normalize=False
        )
        num_errs = tf.reduce_sum(num_errs)
        if self.rank_size > 1:
            num_errs = hvd.allreduce(num_errs, average=False)
            labels_counter = hvd.allreduce(labels_counter, average=False)
        self.error_count(num_errs)
        self.total_count(labels_counter)
        return num_errs, labels_counter 
开发者ID:athena-team,项目名称:athena,代码行数:19,代码来源:metrics.py

示例4: loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import edit_distance [as 别名]
def loss(self):      
        """ Define loss
        return
        """              
        # ctc loss   
        with tf.name_scope('loss'):
            self.avg_loss = tf.reduce_mean(ctc_ops.ctc_loss(self.text, self.logits, self.seq_length))
            tf.summary.scalar('loss',self.avg_loss)
        # [optimizer]    
        with tf.name_scope('train'):
            self.optimizer = tf.train.AdamOptimizer(learning_rate=self.hyparam.learning_rate).minimize(self.avg_loss)
                         
        with tf.name_scope("decode"):
            self.decoded, log_prob = ctc_ops.ctc_beam_search_decoder(self.logits, self.seq_length, merge_repeated=False)

        with tf.name_scope("ctc_beam_search_decode"):
            self.prob = tf.nn.softmax(self.logits, dim=0)
            self.prob = tf.transpose(self.prob, [1, 0, 2]) # keep the same dim with decoder {batch_size, time_step, n_character}
            self.decoder = LM_decoder(self.hyparam.alpha, self.hyparam.beta, self.hyparam.lang_model_path, self.words)

        with tf.name_scope("accuracy"):
            self.distance = tf.edit_distance(tf.cast(self.decoded[0], tf.int32), self.text)
            # compute label error rate (accuracy)
            self.label_err = tf.reduce_mean(self.distance, name='label_error_rate')
            tf.summary.scalar('accuracy', self.label_err) 
开发者ID:Pelhans,项目名称:ZASR_tensorflow,代码行数:27,代码来源:init_model.py

示例5: setup_summary_statistics

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import edit_distance [as 别名]
def setup_summary_statistics(self):
        # Create a placholder for the summary statistics
        with tf.name_scope("accuracy"):
            # Compute the edit (Levenshtein) distance of the top path
            distance = tf.edit_distance(
                tf.cast(self.decoded[0], tf.int32), self.targets)

            # Compute the label error rate (accuracy)
            self.ler = tf.reduce_mean(distance, name='label_error_rate')
            self.ler_placeholder = tf.placeholder(dtype=tf.float32, shape=[])
            self.train_ler_op = tf.summary.scalar(
                "train_label_error_rate", self.ler_placeholder)
            self.dev_ler_op = tf.summary.scalar(
                "validation_label_error_rate", self.ler_placeholder)
            self.test_ler_op = tf.summary.scalar(
                "test_label_error_rate", self.ler_placeholder) 
开发者ID:mrubash1,项目名称:RNN-Tutorial,代码行数:18,代码来源:tf_train_ctc.py

示例6: edit_distance

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import edit_distance [as 别名]
def edit_distance(targets, target_seq_length, predictions_sequence,
                  predictions_seq_length, output_feature_name):
    predicts = to_sparse(predictions_sequence,
                         predictions_seq_length,
                         tf.shape(predictions_sequence)[1])
    labels = to_sparse(targets,
                       target_seq_length,
                       tf.shape(targets)[1])
    edit_distance = tf.edit_distance(predicts, labels,
                                     name='edit_distance_{}'.format(
                                         output_feature_name))
    mean_edit_distance = tf.reduce_mean(edit_distance,
                                        name='mean_edit_distance_{}'.format(
                                            output_feature_name))
    return edit_distance, mean_edit_distance 
开发者ID:uber,项目名称:ludwig,代码行数:17,代码来源:measure_modules.py

示例7: create_solver

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import edit_distance [as 别名]
def create_solver(self):
        def sparse_targets(targets, targets_length):
            return tf.cast(K.ctc_label_dense_to_sparse(targets, math_ops.cast(
                K.flatten(targets_length), dtype='int32')), 'int32')

        def create_cer(sparse_decoded, sparse_targets):
            return tf.edit_distance(tf.cast(sparse_decoded, tf.int32), sparse_targets, normalize=True)

        # Note for codec change: the codec size is derived upon creation, therefore the ctc ops must be created
        # using the true codec size (the W/B-Matrix may change its shape however during loading/codec change
        # to match the true codec size
        loss = KL.Lambda(lambda args: K.ctc_batch_cost(*args), output_shape=(1,), name='ctc')((self.targets, self.softmax, self.output_seq_len, self.targets_length))
        self.sparse_targets = KL.Lambda(lambda args: sparse_targets(*args), name='sparse_targets')((self.targets, self.targets_length))
        self.cer = KL.Lambda(lambda args: create_cer(*args), output_shape=(1,), name='cer')((self.sparse_decoded, self.sparse_targets))

        if self.network_proto.solver == NetworkParams.MOMENTUM_SOLVER:
            optimizer = keras.optimizers.SGD(self.network_proto.learning_rate, self.network_proto.momentum, clipnorm=self.network_proto.clipping_norm)
        elif self.network_proto.solver == NetworkParams.ADAM_SOLVER:
            optimizer = keras.optimizers.Adam(self.network_proto.learning_rate, clipnorm=self.network_proto.clipping_norm)
        else:
            raise Exception("Unknown solver of type '%s'" % self.network_proto.solver)

        def ctc_loss(t, p):
            return p

        model = Model(inputs=[self.targets, self.input_data, self.input_length, self.targets_length], outputs=[loss])
        model.compile(optimizer=optimizer, loss={'ctc': ctc_loss},
                      )

        return model 
开发者ID:Calamari-OCR,项目名称:calamari,代码行数:32,代码来源:tensorflow_model.py

示例8: _testEditDistanceST

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import edit_distance [as 别名]
def _testEditDistanceST(
      self, hypothesis_st, truth_st, normalize, expected_output,
      expected_shape, expected_err_re=None):
    edit_distance = tf.edit_distance(
        hypothesis=hypothesis_st, truth=truth_st, normalize=normalize)

    if expected_err_re is None:
      self.assertEqual(edit_distance.get_shape(), expected_shape)
      output = edit_distance.eval()
      self.assertAllClose(output, expected_output)
    else:
      with self.assertRaisesOpError(expected_err_re):
        edit_distance.eval() 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:15,代码来源:edit_distance_op_test.py

示例9: cal_perf

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import edit_distance [as 别名]
def cal_perf(pred, sparse_labels):
  """Helper function to calculate edit distance and accuracy.
  """
  edist = tf.edit_distance(tf.cast(pred[0], tf.int32), sparse_labels,
                           normalize=False)
  acc = tf.reduce_mean(tf.cast(tf.equal(edist, 0), tf.float32))
  return edist, acc 
开发者ID:huschen,项目名称:kaggle_speech_recognition,代码行数:9,代码来源:models.py

示例10: _get_testing

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import edit_distance [as 别名]
def _get_testing(rnn_logits,sequence_length,label,label_length):
    """Create ops for testing (all scalars): 
       loss: CTC loss function value, 
       label_error:  Batch-normalized edit distance on beam search max
       sequence_error: Batch-normalized sequence error rate
    """
    with tf.name_scope("train"):
        loss = model.ctc_loss_layer(rnn_logits,label,sequence_length) 
    with tf.name_scope("test"):
        predictions,_ = tf.nn.ctc_beam_search_decoder(rnn_logits, 
                                                   sequence_length,
                                                   beam_width=128,
                                                   top_paths=1,
                                                   merge_repeated=True)
        hypothesis = tf.cast(predictions[0], tf.int32) # for edit_distance
        label_errors = tf.edit_distance(hypothesis, label, normalize=False)
        sequence_errors = tf.count_nonzero(label_errors,axis=0)
        total_label_error = tf.reduce_sum( label_errors )
        total_labels = tf.reduce_sum( label_length )
        label_error = tf.truediv( total_label_error, 
                                  tf.cast(total_labels, tf.float32 ),
                                  name='label_error')
        sequence_error = tf.truediv( tf.cast( sequence_errors, tf.int32 ),
                                     tf.shape(label_length)[0],
                                     name='sequence_error')
        tf.summary.scalar( 'loss', loss )
        tf.summary.scalar( 'label_error', label_error )
        tf.summary.scalar( 'sequence_error', sequence_error )

    return loss, label_error, sequence_error 
开发者ID:zfxxfeng,项目名称:cnn_lstm_ctc_ocr_for_ICPR,代码行数:32,代码来源:test.py

示例11: get_edit_distance

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import edit_distance [as 别名]
def get_edit_distance(hyp_arr,truth_arr,mode='train'):
    ''' calculate edit distance
    '''
    graph = tf.Graph()
    with graph.as_default():
        truth = tf.sparse_placeholder(tf.int32)
        hyp = tf.sparse_placeholder(tf.int32)
        editDist = tf.edit_distance(hyp, truth, normalize=True)

    with tf.Session(graph=graph) as session:
        truthTest = list_to_sparse_tensor(truth_arr, mode)
        hypTest = list_to_sparse_tensor(hyp_arr, mode)
        feedDict = {truth: truthTest, hyp: hypTest}
        dist = session.run(editDist, feed_dict=feedDict)
    return dist 
开发者ID:zzw922cn,项目名称:Automatic_Speech_Recognition,代码行数:17,代码来源:ed.py

示例12: edit_distance

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import edit_distance [as 别名]
def edit_distance(hypothesis, truth, eos_id, mapping=None):

    if mapping:
        mapping = tf.convert_to_tensor(mapping)
        hypothesis = tf.nn.embedding_lookup(mapping, hypothesis)
        truth = tf.nn.embedding_lookup(mapping, truth)

    hypothesis = dense_to_sparse(hypothesis, eos_id, merge_repeated=True)
    truth = dense_to_sparse(truth, eos_id, merge_repeated=True)

    return tf.edit_distance(hypothesis, truth, normalize=True) 
开发者ID:WindQAQ,项目名称:listen-attend-and-spell,代码行数:13,代码来源:metrics_utils.py

示例13: tf_edit_distance

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import edit_distance [as 别名]
def tf_edit_distance(hypothesis, truth, norm=False):
    """ Edit distance using tensorflow 

    inputs are tf.Sparse_tensors """

    return tf.edit_distance(hypothesis, truth, normalize=norm, name='edit_distance') 
开发者ID:ysoullard,项目名称:CTCModel,代码行数:8,代码来源:CTCModel.py

示例14: cer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import edit_distance [as 别名]
def cer(y_true, y_pred, return_all=False):
  labels_pred_sparse = one_hot_labels_to_sparse(y_pred)
  labels_true_sparse = one_hot_labels_to_sparse(y_true)
  ed = tf.edit_distance(tf.cast(labels_pred_sparse, tf.int32), labels_true_sparse)
  cer = tf.reduce_mean(ed)
  if return_all:
    return cer, ed
  else:
    return cer 
开发者ID:afourast,项目名称:deep_lip_reading,代码行数:11,代码来源:losses.py

示例15: _get_testing

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import edit_distance [as 别名]
def _get_testing( rnn_logits,sequence_length,label,label_length,
                  continuous_eval, lexicon, lexicon_prior ):
    """Create ops for testing (all scalars): 
       loss: CTC loss function value, 
       label_error:   batch level edit distance on beam search max
       sequence_error: batch level sequence error rate
    """

    with tf.name_scope( "train" ):
        # Reduce by mean (rather than sum) if doing continuous evaluation
        batch_loss = model.ctc_loss_layer( rnn_logits,label,sequence_length,
                                           reduce_mean=continuous_eval) 
    with tf.name_scope( "test" ):
        predictions,_ = _get_output( rnn_logits, sequence_length,
                                     lexicon, lexicon_prior )

        hypothesis = tf.cast( predictions[0], tf.int32 ) # for edit_distance

        # Per-sequence statistic
        num_label_errors = tf.edit_distance( hypothesis, label, 
                                             normalize=False )

        # Per-batch summary counts
        batch_num_label_errors = tf.reduce_sum( num_label_errors)
        batch_num_sequence_errors = tf.count_nonzero( num_label_errors, axis=0 )
        batch_num_labels = tf.reduce_sum( label_length )
        
        # Wide integer type casts (prefer unsigned, but truediv dislikes those)
        batch_num_label_errors = tf.cast( batch_num_label_errors, tf.int64 )
        batch_num_sequence_errors = tf.cast( batch_num_sequence_errors, 
                                             tf.int64 )
        batch_num_labels = tf.cast( batch_num_labels, tf.int64)
        
    return batch_loss, batch_num_label_errors, batch_num_sequence_errors, \
        batch_num_labels, predictions 
开发者ID:weinman,项目名称:cnn_lstm_ctc_ocr,代码行数:37,代码来源:model_fn.py


注:本文中的tensorflow.edit_distance方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。