我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.sparse_to_dense()。
def one_hot_encoding(labels, num_classes, scope=None): """Transform numeric labels into onehot_labels. Args: labels: [batch_size] target labels. num_classes: total number of classes. scope: Optional scope for op_scope. Returns: one hot encoding of the labels. """ with tf.op_scope([labels], scope, 'OneHotEncoding'): batch_size = labels.get_shape()[0] indices = tf.expand_dims(tf.range(0, batch_size), 1) labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype) concated = tf.concat(1, [indices, labels]) onehot_labels = tf.sparse_to_dense( concated, tf.pack([batch_size, num_classes]), 1.0, 0.0) onehot_labels.set_shape([batch_size, num_classes]) return onehot_labels
def _decode_lambda(self, args): """ Decoding within tensorflow graph. In case kenlm_directory is specified, a modified version of tensorflow (available at https://github.com/timediv/tensorflow-with-kenlm) is needed to run that extends ctc_decode to use a kenlm decoder. :return: Most probable decoded sequence. Important: blank labels are returned as `-1`. """ import tensorflow as tf prediction_batch, prediction_lengths = args log_prediction_batch = tf.log(tf.transpose(prediction_batch, perm=[1, 0, 2]) + 1e-8) prediction_length_batch = tf.to_int32(tf.squeeze(prediction_lengths, axis=[1])) (decoded, log_prob) = self.ctc_get_decoded_and_log_probability_batch(log_prediction_batch, prediction_length_batch) return single([tf.sparse_to_dense(st.indices, st.dense_shape, st.values, default_value=-1) for st in decoded])
def kSparse(self, x, topk): print 'run regular k-sparse' dim = int(x.get_shape()[1]) if topk > dim: warnings.warn('Warning: topk should not be larger than dim: %s, found: %s, using %s' % (dim, topk, dim)) topk = dim k = dim - topk values, indices = tf.nn.top_k(-x, k) # indices will be [[0, 1], [2, 1]], values will be [[6., 2.], [5., 4.]] # We need to create full indices like [[0, 0], [0, 1], [1, 2], [1, 1]] my_range = tf.expand_dims(tf.range(0, tf.shape(indices)[0]), 1) # will be [[0], [1]] my_range_repeated = tf.tile(my_range, [1, k]) # will be [[0, 0], [1, 1]] full_indices = tf.stack([my_range_repeated, indices], axis=2) # change shapes to [N, k, 1] and [N, k, 1], to concatenate into [N, k, 2] full_indices = tf.reshape(full_indices, [-1, 2]) to_reset = tf.sparse_to_dense(full_indices, tf.shape(x), tf.reshape(values, [-1]), default_value=0., validate_indices=False) res = tf.add(x, to_reset) return res
def labels_to_onehots(labels, num_classes): """Convert a vector of integer class labels to a matrix of one-hot target vectors. :param labels: a vector of integer labels, 0 to num_classes. Has shape (batch_size,). :param num_classes: the total number of classes :return: has shape (batch_size, num_classes) """ batch_size = labels.get_shape().as_list()[0] with tf.name_scope("one_hot"): labels = tf.expand_dims(labels, 1) indices = tf.expand_dims(tf.range(0, batch_size, 1), 1) sparse_ptrs = tf.concat(1, [indices, labels], name="ptrs") onehots = tf.sparse_to_dense(sparse_ptrs, [batch_size, num_classes], 1.0, 0.0) return onehots
def one_hot_encoding(labels, num_classes, scope=None): """Transform numeric labels into onehot_labels. Args: labels: [batch_size] target labels. num_classes: total number of classes. scope: Optional scope for name_scope. Returns: one hot encoding of the labels. """ with tf.name_scope(scope, 'OneHotEncoding', [labels]): batch_size = labels.get_shape()[0] indices = tf.expand_dims(tf.range(0, batch_size), 1) labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype) concated = tf.concat([indices, labels], 1) onehot_labels = tf.sparse_to_dense( concated, tf.pack([batch_size, num_classes]), 1.0, 0.0) onehot_labels.set_shape([batch_size, num_classes]) return onehot_labels
def loss_func_softmax(pred, gold): """softmax function with integers as the second argument (instead of zero-one encoding matrix) Args: pred: log-odds where the last dimension is the number of labels gold: integer array the same size as pred but the last dimension which is 1 Returns: the softmax values applied to the predictions """ pred = tf.reshape(pred, [-1, pred.get_shape()[-1].value]) gold = tf.reshape(gold, [pred.get_shape()[0].value]) n = pred.get_shape()[0].value voc_size = pred.get_shape()[1].value rg = tf.range(0, n) inds = tf.transpose(tf.pack([rg, tf.cast(gold, 'int32')])) vals = tf.ones([n]) # gold_mat = tf.SparseTensor( , [n, voc_size]) gold_mat = tf.sparse_to_dense(inds, [n, voc_size], vals) return tf.nn.softmax_cross_entropy_with_logits(pred, gold_mat)
def one_hot(labels, num_classes, name='one_hot'): """Transform numeric labels into onehot_labels. Args: labels: [batch_size] target labels. num_classes: total number of classes. scope: Optional scope for op_scope. Returns: one hot encoding of the labels. """ with tf.op_scope(name): batch_size = labels.get_shape()[0] indices = tf.expand_dims(tf.range(0, batch_size), 1) labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype) concated = tf.concat(1, [indices, labels]) onehot_labels = tf.sparse_to_dense( concated, tf.pack([batch_size, num_classes]), 1.0, 0.0) onehot_labels.set_shape([batch_size, num_classes]) return onehot_labels
def one_hot_encoding(labels, num_classes, scope=None): """Transform numeric labels into onehot_labels. Args: labels: [batch_size] target labels. num_classes: total number of classes. scope: Optional scope for name_scope. Returns: one hot encoding of the labels. """ with tf.name_scope(scope, 'OneHotEncoding', [labels]): batch_size = labels.get_shape()[0] indices = tf.expand_dims(tf.range(0, batch_size), 1) labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype) concated = tf.concat(1, [indices, labels]) onehot_labels = tf.sparse_to_dense( concated, tf.pack([batch_size, num_classes]), 1.0, 0.0) onehot_labels.set_shape([batch_size, num_classes]) return onehot_labels
def backward(self): dx_flat = self.probs coords = tf.transpose(tf.pack([tf.range(self.N * self.T), self.y_flat])) binary_mask = tf.sparse_to_dense(coords, dx_flat.get_shape(), 1) # convert 1/0 to True/False binary_mask = tf.cast(binary_mask, tf.bool) decremented = dx_flat - 1 # make new x out of old values or decresed, depending on mask dx_flat = tf.select(binary_mask, decremented, dx_flat) dx_flat /= self.N dx_flat *= self.mask_flat[:, None] dx = tf.reshape(dx_flat, [self.N, self.T, self.V]) return dx
def one_hot_encoding(labels, num_classes, scope=None): """Transform numeric labels into onehot_labels. Args: labels: [batch_size] target labels. num_classes: total number of classes. scope: Optional scope for name_scope. Returns: one hot encoding of the labels. """ with tf.name_scope(scope, 'OneHotEncoding', [labels]): batch_size = labels.get_shape()[0] indices = tf.expand_dims(tf.range(0, batch_size), 1) labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype) concated = tf.concat(axis=1, values=[indices, labels]) onehot_labels = tf.sparse_to_dense( concated, tf.stack([batch_size, num_classes]), 1.0, 0.0) onehot_labels.set_shape([batch_size, num_classes]) return onehot_labels
def shift_thin_stack(thin_stack, thin_stack_head_next, batch_size, max_num_concepts, decoder_position, prev_transition_state): """Applies shift to the thin stack and its head if in shift state.""" # Head points to item after stack top, so always update the stack entry. new_thin_stack = write_thin_stack(thin_stack, thin_stack_head_next, decoder_position, batch_size, max_num_concepts) # Push if previous transition state is shift (or pointer shift). stack_head_updates = tf.sparse_to_dense(tf.pack( [data_utils.GEN_STATE]), tf.pack([data_utils.NUM_TR_STATES]), 1) new_thin_stack_head_next = tf.add(thin_stack_head_next, tf.gather(stack_head_updates, prev_transition_state)) return new_thin_stack, new_thin_stack_head_next
def gather_nd_states(inputs, inds, batch_size, input_size, state_size): """Gathers an embedding for each batch entry with index inds from inputs. Args: inputs: Tensor [batch_size, input_size, state_size]. inds: Tensor [batch_size] Returns: output: Tensor [batch_size, embedding_size] """ sparse_inds = tf.transpose(tf.pack( [tf.range(batch_size), inds])) dense_inds = tf.sparse_to_dense(sparse_inds, tf.pack([batch_size, input_size]), tf.ones(tf.pack([batch_size]))) output_sum = tf.reduce_sum(tf.reshape(dense_inds, [-1, input_size, 1, 1]) * tf.reshape(inputs, [-1, input_size, 1, state_size]), [1, 2]) output = tf.reshape(output_sum, [-1, state_size]) return output
def prepare_reader(self, filename_queue): reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) contexts, features = tf.parse_single_sequence_example( serialized_example, context_features={ "video_id": tf.FixedLenFeature([], tf.string), "labels": tf.VarLenFeature(tf.int64)}, sequence_features={ "rgb": tf.FixedLenSequenceFeature([], dtype=tf.string), "audio": tf.FixedLenSequenceFeature([], dtype=tf.string), }) # read ground truth labels labels = (tf.cast( tf.sparse_to_dense(contexts["labels"].values, (self.num_classes,), 1, validate_indices=False), tf.bool)) rgbs, num_frames = self.get_video_matrix(features["rgb"], 1024, self.max_frames) audios, num_frames = self.get_video_matrix(features["audio"], 1024, self.max_frames) batch_video_ids = tf.expand_dims(contexts["video_id"], 0) batch_rgbs = tf.expand_dims(rgbs, 0) batch_audios = tf.expand_dims(audios, 0) batch_labels = tf.expand_dims(labels, 0) batch_frames = tf.expand_dims(num_frames, 0) return batch_video_ids, batch_rgbs, batch_audios, batch_labels, batch_frames
def get_config(self): config = {'topk': self.topk, 'ctype': self.ctype} base_config = super(KCompetitive, self).get_config() return dict(list(base_config.items()) + list(config.items())) # def k_comp_sigm(self, x, topk): # print 'run k_comp_sigm' # dim = int(x.get_shape()[1]) # if topk > dim: # warnings.warn('topk should not be larger than dim: %s, found: %s, using %s' % (dim, topk, dim)) # topk = dim # values, indices = tf.nn.top_k(x, topk) # indices will be [[0, 1], [2, 1]], values will be [[6., 2.], [5., 4.]] # # We need to create full indices like [[0, 0], [0, 1], [1, 2], [1, 1]] # my_range = tf.expand_dims(tf.range(0, K.shape(indices)[0]), 1) # will be [[0], [1]] # my_range_repeated = tf.tile(my_range, [1, topk]) # will be [[0, 0], [1, 1]] # full_indices = tf.stack([my_range_repeated, indices], axis=2) # change shapes to [N, k, 1] and [N, k, 1], to concatenate into [N, k, 2] # full_indices = tf.reshape(full_indices, [-1, 2]) # to_reset = tf.sparse_to_dense(full_indices, tf.shape(x), tf.reshape(values, [-1]), default_value=0., validate_indices=False) # batch_size = tf.to_float(tf.shape(x)[0]) # tmp = 1 * batch_size * tf.reduce_sum(x - to_reset, 1, keep_dims=True) / topk # res = tf.sparse_to_dense(full_indices, tf.shape(x), tf.reshape(tf.add(values, tmp), [-1]), default_value=0., validate_indices=False) # return res
def loss(logits, labels): # Reshape the labels into a dense Tensor of shape [batch_size, NUM_CLASSES]. sparse_labels = tf.reshape(labels, [input.FLAGS.batch_size, 1]) indices = tf.reshape(tf.range(0, input.FLAGS.batch_size), [input.FLAGS.batch_size, 1]) concated = tf.concat(1, [indices, sparse_labels]) dense_labels = tf.sparse_to_dense(concated, [input.FLAGS.batch_size, input.NUM_CLASSES], 1.0, 0.0) # Calculate the average cross entropy loss across the batch. cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, dense_labels, name='cross_entropy_per_example') cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy') tf.add_to_collection('losses', cross_entropy_mean) # The total loss is defined as the cross entropy loss plus all of the weight decay terms (L2 loss). return tf.add_n(tf.get_collection('losses'), name='total_loss')
def loss(logits, labels): batch_size = tf.size(labels) labels = tf.expand_dims(labels, 1) indices = tf.expand_dims(tf.range(0, batch_size, 1), 1) concated = tf.concat(axis=1, values=[indices, labels]) onehot_labels = tf.sparse_to_dense( concated, tf.stack([batch_size, 1000]), 1.0, 0.0) cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=onehot_labels, name='xentropy') loss = tf.reduce_mean(cross_entropy, name='xentropy_mean') return loss
def loss(logits, labels): batch_size = tf.size(labels) labels = tf.expand_dims(labels, 1) indices = tf.expand_dims(tf.range(0, batch_size, 1), 1) concated = tf.concat(1, [indices, labels]) onehot_labels = tf.sparse_to_dense( concated, tf.pack([batch_size, 1000]), 1.0, 0.0) cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, onehot_labels, name='xentropy') loss = tf.reduce_mean(cross_entropy, name='xentropy_mean') return loss
def loss_function(logits, labels): batch_size = tf.size(labels) labels = tf.expand_dims(labels, 1) indices = tf.expand_dims(tf.range(0, batch_size, 1), 1) concated = tf.concat(axis=1, values=[indices, labels]) onehot_labels = tf.sparse_to_dense( concated, tf.stack([batch_size, 10]), 1.0, 0.0) cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=onehot_labels, name='xentropy') loss = tf.reduce_mean(cross_entropy, name='xentropy_mean') return loss
def char_index_batch_to_2d_tensor(batch, batch_size, num_labels): sparse_labels = tf.reshape(batch, [batch_size, 1]) indices = tf.reshape(tf.range(0, batch_size, 1), [batch_size, 1]) concatenated = tf.concat(1, [indices, sparse_labels]) concat = tf.concat(0, [[batch_size], [num_labels]]) output_shape = tf.reshape(concat, [2]) sparse_to_dense = tf.sparse_to_dense(concatenated, output_shape, 1, 0) return tf.reshape(sparse_to_dense, [batch_size, num_labels])
def loss(logits, labels): """Add L2Loss to all the trainable variables. Add summary for for "Loss" and "Loss/avg". Args: logits: Logits from inference(). labels: Labels from distorted_inputs or inputs(). 1-D tensor of shape [batch_size] Returns: Loss tensor of type float. """ # Reshape the labels into a dense Tensor of # shape [batch_size, NUM_CLASSES]. sparse_labels = tf.reshape(labels, [FLAGS.batch_size, 1]) indices = tf.reshape(tf.range(0, FLAGS.batch_size), [FLAGS.batch_size, 1]) concated = tf.concat(axis=1, values=[indices, sparse_labels]) dense_labels = tf.sparse_to_dense(concated, [FLAGS.batch_size, NUM_CLASSES], 1.0, 0.0) # Calculate the average cross entropy loss across the batch. cross_entropy = tf.nn.softmax_cross_entropy_with_logits( logits=logits, labels=dense_labels, name='cross_entropy_per_example') cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy') tf.add_to_collection('losses', cross_entropy_mean) # The total loss is defined as the cross entropy loss plus all of the weight # decay terms (L2 loss). return tf.add_n(tf.get_collection('losses'), name='total_loss')
def translate(U, theta, out_height, out_width): num_batch = tf.shape(U)[0] height, width, num_ch = U.get_shape()[1:] height = height.value width = width.value num_ch = num_ch.value hwc = height*width*num_ch nind = tf.range(num_batch) x = repeat(tf.range(height), width) y = tf.tile(tf.range(width), tf.pack([height])) cind = tf.range(num_ch) nind = tf.expand_dims(repeat(nind, hwc), 1) x = tf.tile(tf.expand_dims(repeat(x, num_ch), 1), tf.pack([num_batch,1])) y = tf.tile(tf.expand_dims(repeat(y, num_ch), 1), tf.pack([num_batch,1])) cind = tf.tile(tf.expand_dims(cind, 1), tf.pack([num_batch*height*width,1])) dx, dy = tf.split(1, 2, theta) dx = tf.cast(tf.clip_by_value(dx, 0, out_height-height), 'int32') dx = tf.reshape(tf.tile(dx, tf.pack([1,hwc])), [-1,1]) dy = tf.cast(tf.clip_by_value(dy, 0, out_width-width), 'int32') dy = tf.reshape(tf.tile(dy, tf.pack([1,hwc])), [-1,1]) x = x + dx y = y + dy tind = tf.concat(1, [nind, x, y, cind]) val = tf.reshape(U, [-1]) T = tf.sparse_to_dense(tind, tf.pack([num_batch, out_height, out_width, num_ch]), val) T.set_shape([None, out_height, out_width, num_ch]) return T
def loss(logits, labels): batch_size = tf.size(labels) labels = tf.expand_dims(labels, 1) indices = tf.expand_dims(tf.range(0, batch_size, 1), 1) concated = tf.concat([indices, labels], 1 ) onehot_labels = tf.sparse_to_dense( concated, tf.stack([batch_size, 1000]), 1.0, 0.0) cross_entropy = tf.nn.softmax_cross_entropy_with_logits( logits=logits, labels=onehot_labels, name='xentropy') loss = tf.reduce_mean(cross_entropy, name='xentropy_mean') return loss
def loss(logits, labels): batch_size = tf.size(labels) labels = tf.expand_dims(labels, 1) indices = tf.expand_dims(tf.range(0, batch_size, 1), 1) #if layers configuration is changed, you probably should change stacked array size below. concated = tf.concat([indices, labels], 1) onehot_labels = tf.sparse_to_dense( concated, tf.stack([batch_size, 1000]), 1.0, 0.0) cross_entropy = tf.nn.softmax_cross_entropy_with_logits( logits=logits, labels=onehot_labels, name='xentropy') loss = tf.reduce_mean(cross_entropy, name='xentropy_mean') return loss
def one_hot_patch(x, depth): # workaround by name-name sparse_labels = tf.reshape(x, [-1, 1]) derived_size = tf.shape(sparse_labels)[0] indices = tf.reshape(tf.range(0, derived_size, 1), [-1, 1]) concated = tf.concat(axis=1, values=[indices, sparse_labels]) outshape = tf.concat(axis=0, values=[tf.reshape(derived_size, [1]), tf.reshape(depth, [1])]) return tf.sparse_to_dense(concated, outshape, 1.0, 0.0)
def loss(logits, labels): """Calculates the loss from the logits and the labels. Args: logits: input tensor, float - [batch_size, NUM_CLASSES]. labels: Labels tensor, int32 - [batch_size]. Returns: loss: Loss tensor of type float. """ # Convert from sparse integer labels in the range [0, NUM_CLASSES) # to 1-hot dense float vectors (that is we will have batch_size vectors, # each with NUM_CLASSES values, all of which are 0.0 except there will # be a 1.0 in the entry corresponding to the label). batch_size = tf.size(labels) labels = tf.expand_dims(labels, 1) indices = tf.expand_dims(tf.range(0, batch_size), 1) concated = tf.concat([indices, labels], 1) onehot_labels = tf.sparse_to_dense(concated, tf.shape(logits), 1.0, 0.0) cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=onehot_labels, name='xentropy') loss = tf.reduce_mean(cross_entropy, name='loss') tf.summary.scalar('summary/loss', loss) return loss
def grad_cam(x, vgg, sess, predicted_class, layer_name, nb_classes): print("Setting gradients to 1 for target class and rest to 0") # Conv layer tensor [?,7,7,512] conv_layer = vgg.layers[layer_name] # [1000]-D tensor with target class index set to 1 and rest as 0 one_hot = tf.sparse_to_dense(predicted_class, [nb_classes], 1.0) signal = tf.mul(vgg.layers['fc3'], one_hot) loss = tf.reduce_mean(signal) grads = tf.gradients(loss, conv_layer)[0] # Normalizing the gradients norm_grads = tf.div(grads, tf.sqrt(tf.reduce_mean(tf.square(grads))) + tf.constant(1e-5)) output, grads_val = sess.run([conv_layer, norm_grads], feed_dict={vgg.imgs: x}) output = output[0] # [7,7,512] grads_val = grads_val[0] # [7,7,512] weights = np.mean(grads_val, axis = (0, 1)) # [512] cam = np.ones(output.shape[0 : 2], dtype = np.float32) # [7,7] # Taking a weighted average for i, w in enumerate(weights): cam += w * output[:, :, i] # Passing through ReLU cam = np.maximum(cam, 0) cam = cam / np.max(cam) cam = resize(cam, (224,224)) # Converting grayscale to 3-D cam3 = np.expand_dims(cam, axis=2) cam3 = np.tile(cam3,[1,1,3]) return cam3
def one_hot_mask(labels, num_classes, scope=None): """Compute 1-hot encodings for masks. Given a label image, this computes the one hot encoding at each pixel. Args: labels: (batch_size, width, height, 1) tensor containing labels. num_classes: number of classes scope: optional scope name Returns: Tensor of shape (batch_size, width, height, num_classes) with a 1-hot encoding. """ with tf.name_scope(scope, "OneHotMask", [labels]): height, width, depth = _shape(labels) assert depth == 1 sparse_labels = tf.to_int32(tf.reshape(labels, [-1, 1])) sparse_size, _ = _shape(sparse_labels) indices = tf.reshape(tf.range(0, sparse_size, 1), [-1, 1]) concated = tf.concat(1, [indices, sparse_labels]) dense_result = tf.sparse_to_dense(concated, [sparse_size, num_classes], 1.0, 0.0) result = tf.reshape(dense_result, [height, width, num_classes]) return result
def decoding(self): """Predict labels from learned sequence model.""" # TODO: label error rate on validation set decoded, _ = tf.nn.ctc_greedy_decoder(self.logits_t, self.seq_lens) sparse_decode_op = decoded[0] # single-element list self.decode_op = tf.sparse_to_dense(sparse_decode_op.indices, sparse_decode_op.dense_shape, sparse_decode_op.values) return self.decode_op
def loss(logits, labels, config): labels = tf.expand_dims(labels, 1) indices = tf.expand_dims(tf.range(0, config.batch_size, 1), 1) concated = tf.concat(1, [indices, labels]) onehot_labels = tf.sparse_to_dense( concated, tf.pack([config.batch_size, config.ydim]), 1.0, 0.0) cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, onehot_labels, name='entropy') loss = tf.reduce_mean(cross_entropy, name='entropy_mean') return loss