我们从Python开源项目中,提取了以下10个代码示例,用于说明如何使用tensorflow.edit_distance()。
def compute_edit_distance(session, labels_true_st, labels_pred_st): """Compute edit distance per mini-batch. Args: session: labels_true_st: A `SparseTensor` of ground truth labels_pred_st: A `SparseTensor` of prediction Returns: edit_distances: list of edit distance of each uttearance """ indices, values, dense_shape = labels_true_st labels_pred_pl = tf.SparseTensor(indices, values, dense_shape) indices, values, dense_shape = labels_pred_st labels_true_pl = tf.SparseTensor(indices, values, dense_shape) edit_op = tf.edit_distance(labels_pred_pl, labels_true_pl, normalize=True) edit_distances = session.run(edit_op) return edit_distances
def _get_testing(rnn_logits,sequence_length,label,label_length): """Create ops for testing (all scalars): loss: CTC loss function value, label_error: Batch-normalized edit distance on beam search max sequence_error: Batch-normalized sequence error rate """ with tf.name_scope("train"): loss = model.ctc_loss_layer(rnn_logits,label,sequence_length) with tf.name_scope("test"): predictions,_ = tf.nn.ctc_beam_search_decoder(rnn_logits, sequence_length, beam_width=128, top_paths=1, merge_repeated=True) hypothesis = tf.cast(predictions[0], tf.int32) # for edit_distance label_errors = tf.edit_distance(hypothesis, label, normalize=False) sequence_errors = tf.count_nonzero(label_errors,axis=0) total_label_error = tf.reduce_sum( label_errors ) total_labels = tf.reduce_sum( label_length ) label_error = tf.truediv( total_label_error, tf.cast(total_labels, tf.float32 ), name='label_error') sequence_error = tf.truediv( tf.cast( sequence_errors, tf.int32 ), tf.shape(label_length)[0], name='sequence_error') tf.summary.scalar( 'loss', loss ) tf.summary.scalar( 'label_error', label_error ) tf.summary.scalar( 'sequence_error', sequence_error ) return loss, label_error, sequence_error
def create_label_error_rate(logits, labels, timesteps): with tf.variable_scope('LER'): decoded, log_prob = tf.nn.ctc_greedy_decoder(logits, timesteps) decoded = tf.cast(decoded[0], tf.int32) edit_dist = tf.edit_distance(decoded, labels) ler = tf.reduce_mean(edit_dist) tf.summary.scalar('label_error_rate', ler) return ler
def test_edit_distance(): graph = tf.Graph() with graph.as_default(): truth = tf.sparse_placeholder(tf.int32) hyp = tf.sparse_placeholder(tf.int32) editDist = tf.edit_distance(hyp, truth, normalize=False) with tf.Session(graph=graph) as session: truthTest = sparse_tensor_feed([[0,1,2], [0,1,2,3,4]]) hypTest = sparse_tensor_feed([[3,4,5], [0,1,2,2]]) feedDict = {truth: truthTest, hyp: hypTest} dist = session.run([editDist], feed_dict=feedDict) print(dist)
def get_edit_distance(hyp_arr,truth_arr): ''' calculate edit distance ''' graph = tf.Graph() with graph.as_default(): truth = tf.sparse_placeholder(tf.int32) hyp = tf.sparse_placeholder(tf.int32) editDist = tf.edit_distance(hyp, truth, normalize=True) with tf.Session(graph=graph) as session: truthTest = list_to_sparse_tensor(truth_arr) hypTest = list_to_sparse_tensor(hyp_arr) feedDict = {truth: truthTest, hyp: hypTest} dist = session.run(editDist, feed_dict=feedDict) return dist
def get_edit_distance(hyp_arr,truth_arr,mode='train'): ''' calculate edit distance ''' graph = tf.Graph() with graph.as_default(): truth = tf.sparse_placeholder(tf.int32) hyp = tf.sparse_placeholder(tf.int32) editDist = tf.edit_distance(hyp, truth, normalize=True) with tf.Session(graph=graph) as session: truthTest = list_to_sparse_tensor(truth_arr, mode) hypTest = list_to_sparse_tensor(hyp_arr, mode) feedDict = {truth: truthTest, hyp: hypTest} dist = session.run(editDist, feed_dict=feedDict) return dist
def calculate_mean_edit_distance_and_loss(batch_set, dropout): r''' This routine beam search decodes a mini-batch and calculates the loss and mean edit distance. Next to total and average loss it returns the mean edit distance, the decoded result and the batch's original Y. ''' # Obtain the next batch of data batch_x, batch_seq_len, batch_y = batch_set.next_batch() # Calculate the logits of the batch using BiRNN logits = BiRNN(batch_x, tf.to_int64(batch_seq_len), dropout) # Compute the CTC loss using either TensorFlow's `ctc_loss` or Baidu's `warp_ctc_loss`. if FLAGS.use_warpctc: total_loss = tf.contrib.warpctc.warp_ctc_loss(labels=batch_y, inputs=logits, sequence_length=batch_seq_len) else: total_loss = tf.nn.ctc_loss(labels=batch_y, inputs=logits, sequence_length=batch_seq_len) # Calculate the average loss across the batch avg_loss = tf.reduce_mean(total_loss) # Beam search decode the batch decoded, _ = tf.nn.ctc_beam_search_decoder(logits, batch_seq_len, merge_repeated=False) # Compute the edit (Levenshtein) distance distance = tf.edit_distance(tf.cast(decoded[0], tf.int32), batch_y) # Compute the mean edit distance mean_edit_distance = tf.reduce_mean(distance) # Finally we return the # - calculated total and # - average losses, # - the Levenshtein distance, # - the recognition mean edit distance, # - the decoded batch and # - the original batch_y (which contains the verified transcriptions). return total_loss, avg_loss, distance, mean_edit_distance, decoded, batch_y # Adam Optimization # ================= # In constrast to 'Deep Speech: Scaling up end-to-end speech recognition' # (http://arxiv.org/abs/1412.5567), # in which 'Nesterov's Accelerated Gradient Descent' # (www.cs.toronto.edu/~fritz/absps/momentum.pdf) was used, # we will use the Adam method for optimization (http://arxiv.org/abs/1412.6980), # because, generally, it requires less fine-tuning.
def sequence_edit_distance(predictions, labels, weights_fn=common_layers.weights_nonzero): """Average edit distance, ignoring padding 0s. The score returned is the edit distance divided by the total length of reference truth and the weight returned is the total length of the truth. Args: predictions: Tensor of shape [`batch_size`, `length`, 1, `num_classes`] and type tf.float32 representing the logits, 0-padded. labels: Tensor of shape [`batch_size`, `length`, 1, 1] and type tf.int32 representing the labels of same length as logits and 0-padded. weights_fn: ignored. The weights returned are the total length of the ground truth labels, excluding 0-paddings. Returns: (edit distance / reference length, reference length) Raises: ValueError: if weights_fn is not common_layers.weights_nonzero. """ if weights_fn is not common_layers.weights_nonzero: raise ValueError("Only weights_nonzero can be used for this metric.") with tf.variable_scope("edit_distance", values=[predictions, labels]): # Transform logits into sequence classes by taking max at every step. predictions = tf.to_int32( tf.squeeze(tf.argmax(predictions, axis=-1), axis=(2, 3))) nonzero_idx = tf.where(tf.not_equal(predictions, 0)) sparse_outputs = tf.SparseTensor(nonzero_idx, tf.gather_nd(predictions, nonzero_idx), tf.shape(predictions, out_type=tf.int64)) labels = tf.squeeze(labels, axis=(2, 3)) nonzero_idx = tf.where(tf.not_equal(labels, 0)) label_sparse_outputs = tf.SparseTensor(nonzero_idx, tf.gather_nd(labels, nonzero_idx), tf.shape(labels, out_type=tf.int64)) distance = tf.reduce_sum( tf.edit_distance(sparse_outputs, label_sparse_outputs, normalize=False)) reference_length = tf.to_float(common_layers.shape_list(nonzero_idx)[0]) return distance / reference_length, reference_length