我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.arg_max()。
def build_model4(self): self.weights3, self.biases3 = self.get_en_z_variables() self.weights4, self.biases4 = self.get_en_y_variables() self.e_z = self.encode_z(self.images, weights=self.weights3, biases=self.biases3) self.e_y = self.encode_y(self.images, weights=self.weights4, biases=self.biases4) #Changing y : + 1 or +2 or +3 self.e_y = tf.one_hot(tf.arg_max(self.e_y, 1) + self.extend_value, 10) self.fake_images = self.generate(self.e_z, self.e_y, weights=self.weights1, biases=self.biases1) t_vars = tf.trainable_variables() self.g_vars = [var for var in t_vars if 'gen' in var.name] self.enz_vars = [var for var in t_vars if 'enz' in var.name] self.eny_vars = [var for var in t_vars if 'eny' in var.name] self.saver = tf.train.Saver(self.g_vars) self.saver_z = tf.train.Saver(self.g_vars + self.enz_vars) self.saver_y = tf.train.Saver(self.eny_vars) #do train
def encode_z(self, x, weights, biases): c1 = tf.nn.relu(batch_normal(conv2d(x, weights['e1'], biases['eb1']), scope='enz_bn1')) c2 = tf.nn.relu(batch_normal(conv2d(c1, weights['e2'], biases['eb2']), scope='enz_bn2')) c2 = tf.reshape(c2, [self.batch_size, 128*7*7]) #using tanh instead of tf.nn.relu. result_z = batch_normal(fully_connect(c2, weights['e3'], biases['eb3']), scope='enz_bn3') #result_c = tf.nn.sigmoid(fully_connect(c2, weights['e4'], biases['eb4'])) #Transforming one-hot form #sparse_label = tf.arg_max(result_c, 1) #y_vec = tf.one_hot(sparse_label, 10) return result_z
def _generate_labels(self, overlaps): labels = tf.Variable(tf.ones(shape=(tf.shape(overlaps)[0],), dtype=tf.float32) * -1, trainable=False, validate_shape=False) gt_max_overlaps = tf.arg_max(overlaps, dimension=0) anchor_max_overlaps = tf.arg_max(overlaps, dimension=1) mask = tf.one_hot(anchor_max_overlaps, tf.shape(overlaps)[1], on_value=True, off_value=False) max_overlaps = tf.boolean_mask(overlaps, mask) if self._debug: max_overlaps = tf.Print(max_overlaps, [max_overlaps]) labels = tf.scatter_update(labels, gt_max_overlaps, tf.ones((tf.shape(gt_max_overlaps)[0],))) # TODO: extract config object over_threshold_mask = tf.reshape(tf.where(max_overlaps > 0.5), (-1,)) if self._debug: over_threshold_mask = tf.Print(over_threshold_mask, [over_threshold_mask], message='over threshold index : ') labels = tf.scatter_update(labels, over_threshold_mask, tf.ones((tf.shape(over_threshold_mask)[0],))) # TODO: support clobber positive in the origin implement below_threshold_mask = tf.reshape(tf.where(max_overlaps < 0.3), (-1,)) if self._debug: below_threshold_mask = tf.Print(below_threshold_mask, [below_threshold_mask], message='below threshold index : ') labels = tf.scatter_update(labels, below_threshold_mask, tf.zeros((tf.shape(below_threshold_mask)[0],))) return labels
def max_sentence_similarity(sentence_input, similarity_matrix): """ Parameters ---------- sentence_input: Tensor Tensor of shape (batch_size, num_sentence_words, rnn_hidden_dim). similarity_matrix: Tensor Tensor of shape (batch_size, num_sentence_words, num_sentence_words). """ # Shape: (batch_size, passage_len) def single_instance(inputs): single_sentence = inputs[0] argmax_index = inputs[1] # Shape: (num_sentence_words, rnn_hidden_dim) return tf.gather(single_sentence, argmax_index) question_index = tf.arg_max(similarity_matrix, 2) elems = (sentence_input, question_index) # Shape: (batch_size, num_sentence_words, rnn_hidden_dim) return tf.map_fn(single_instance, elems, dtype="float")
def test(self, sess, token_ids): # We decode one sentence at a time. token_ids = data_utils.padding(token_ids) target_ids = data_utils.padding([data_utils.GO_ID]) y_ids = data_utils.padding([data_utils.EOS_ID]) encoder_inputs, decoder_inputs, _, _ = data_utils.nextRandomBatch([(token_ids, target_ids, y_ids)], batch_size=1) prediction = sess.run(self.prediction, feed_dict={ self.encoder_inputs: encoder_inputs, self.decoder_inputs: decoder_inputs }) pred_max = tf.arg_max(prediction, 1) # prediction = tf.split(0, self.num_steps, prediction) # # This is a greedy decoder - outputs are just argmaxes of output_logits. # outputs = [int(np.argmax(predict)) for predict in prediction] # # If there is an EOS symbol in outputs, cut them at that point. # if data_utils.EOS_ID in outputs: # outputs = outputs[:outputs.index(data_utils.EOS_ID)] return pred_max.eval()
def add_loss_op(self): # max_class = tf.transpose(tf.expand_dims(tf.arg_max(self.predicted_class, 2), axis=2), [1, 0, 2]) max_class = tf.expand_dims(tf.arg_max(self.logits, 1), axis=1) true_labels = tf.cast(self.targets_placeholder, tf.int64) rewards = tf.cast(tf.equal(max_class, true_labels), tf.float32) tot_cum_rewards = rewards baseline_op = tf.stop_gradient(self.baselines) stable_rewards = tf.tile(tot_cum_rewards, (1, self.config.seq_len)) - tf.squeeze(baseline_op, axis=2) baseline_mse = tf.reduce_mean(tf.square((stable_rewards))) self.cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=tf.squeeze(true_labels,axis=1))) ll = tf.contrib.distributions.Normal(tf.stack(self.mean_loc), self.config.variance).log_pdf(tf.stack(self.sampled_loc)) ll = tf.transpose(tf.reduce_sum(ll, axis=2)) reward_loss = tf.reduce_mean(ll*stable_rewards, axis=[0, 1]) self.loss = -reward_loss + baseline_mse + self.cross_entropy self.total_rewards = tf.reduce_mean(tot_cum_rewards)
def decoder(self, decoder_inputs, encoder_state, name, lengths= None, train = True): dec_cell = tf.contrib.rnn.GRUCell(self.para.embedding_size) W = self.graph.get_tensor_by_name(name+'/weight:0') b = self.graph.get_tensor_by_name(name+'/bias:0') if train: with tf.variable_scope(name) as varscope: dynamic_fn_train = tf.contrib.seq2seq.simple_decoder_fn_train(encoder_state) outputs_train, state_train, _ = tf.contrib.seq2seq.dynamic_rnn_decoder(dec_cell, decoder_fn = dynamic_fn_train, inputs=decoder_inputs, sequence_length = lengths, scope = varscope) logits = tf.reshape(outputs_train, [-1, self.para.embedding_size]) logits_train = tf.matmul(logits, W) + b logits_projected = tf.reshape(logits_train, [self.para.batch_size, tf.reduce_max(lengths), self.vocabulary_size]) return logits_projected, outputs_train else: with tf.variable_scope(name, reuse = True) as varscope: output_fn = lambda x: tf.nn.softmax(tf.matmul(x, W) + b) dynamic_fn_inference = tf.contrib.seq2seq.simple_decoder_fn_inference(output_fn =output_fn, encoder_state = encoder_state, embeddings = self.word_embeddings, start_of_sequence_id = 2, end_of_sequence_id = 3, maximum_length = self.max_sent_len, num_decoder_symbols = self.vocabulary_size) logits_inference, state_inference,_ = tf.contrib.seq2seq.dynamic_rnn_decoder(dec_cell, decoder_fn = dynamic_fn_inference, scope = varscope) return tf.arg_max(logits_inference, 2)
def accuracy(logits, labels): """Evaluate the quality of the logits at predicting the label. Args: logits: Logits tensor, float - [batch_size, NUM_CLASSES]. labels: Labels tensor, """ with tf.name_scope('accuracy') as scope: correct = tf.equal(tf.arg_max(logits, 1), tf.arg_max(labels, 1)) correct = tf.cast(correct, tf.float32) accuracy = tf.reduce_mean(correct)*100.0 tf.summary.scalar(scope+'/accuracy', accuracy) return accuracy #%%
def build_output(self, inputs, inferences): scores = tf.nn.softmax(inferences, name='scores') tf.add_to_collection('outputs', scores) with tf.name_scope('labels'): label_indices = tf.arg_max(inferences, 1, name='arg_max') labels = self.classification.output_labels(label_indices) tf.add_to_collection('outputs', labels) keys = self.classification.keys(inputs) if keys: # Key feature, if it exists, is a passthrough to the output. # The use of identity is to name the tensor and correspondingly the output field. keys = tf.identity(keys, name='key') tf.add_to_collection('outputs', keys) return { 'label': labels, 'score': scores }
def __build_model(self): encoder_cell, decoder_cell = self.__build_rnn_cell() with tf.variable_scope('encoder_layer'): encoder_output, encoder_state = tf.nn.dynamic_rnn( cell=encoder_cell, inputs=self.encoder_input_embedding, dtype=tf.float32 ) tf.summary.histogram('encoder_output', encoder_output) del encoder_output with tf.variable_scope('decoder_layer'): output, decoder_state = tf.nn.dynamic_rnn( cell=decoder_cell, inputs=self.decoder_input_embedding, initial_state=encoder_state, dtype=tf.float32 ) tf.summary.histogram('decoder_layer', output) del decoder_state self.logit, self.cost, self.train_op = self.__build_ops(output) self.output = tf.arg_max(self.logit, 2) self.merged = tf.summary.merge_all()
def batch_iou_(anchors, bboxes): """ Compute iou of two batch of boxes. Box format '[y_min, x_min, y_max, x_max]'. Args: anchors: know shape bboxes: dynamic shape Return: ious: 2-D with shape '[num_bboxes, num_anchors]' indices: [num_bboxes, 1] """ num_anchors = anchors.get_shape().as_list()[0] ious_list = [] for i in range(num_anchors): anchor = anchors[i] _ious = batch_iou(bboxes, anchor) ious_list.append(_ious) ious = tf.stack(ious_list, axis=0) ious = tf.transpose(ious) indices = tf.arg_max(ious, dimension=1) return ious, indices
def test_batch_iou(self): with self.test_session() as sess: anchors = set_anchors(img_shape=[config.IMG_HEIGHT, config.IMG_WIDTH], fea_shape=[config.FEA_HEIGHT, config.FEA_WIDTH]) anchors_shape = anchors.get_shape().as_list() fea_h = anchors_shape[0] fea_w = anchors_shape[1] num_anchors = anchors_shape[2] * fea_h * fea_w anchors = tf.reshape(anchors, [num_anchors, 4]) # reshape anchors anchors = xywh_to_yxyx(anchors) bbox = tf.constant([0.75, 0.75, 0.2, 0.2], dtype=tf.float32) bbox = xywh_to_yxyx(bbox) iou = batch_iou(anchors, bbox) anchor_idx = tf.arg_max(iou, dimension=0) anchors, output, anchor_idx = sess.run([anchors, iou, anchor_idx]) print(anchors) print(output) print(anchor_idx)
def test_image(path_image,num_class,path_classes,weights_path = 'Default'): #x = tf.placeholder(tf.float32, [1,227,227,3]) x = cv2.imread(path_image) x = cv2.resize(x,(227,227)) x = x.astype(np.float32) x = np.reshape(x,[1,227,227,3]) y = tf.placeholder(tf.float32,[None,num_class]) model = AlexNet(x,0.5,1000,skip_layer = '', weights_path = weights_path) score = model.fc8 max = tf.arg_max(score,1) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) model.load_weights(sess) #score = model.fc8 label_id = sess.run(max)[0] with open(path_classes) as f: lines = f.readlines() label = lines[label_id] print('image name is {} class_id is {} class_name is {}'.format(path_image,label_id,label)) cv2.imshow(label,cv2.imread(path_image)) cv2.waitKey(0) f.close()
def __call__(self, sess, epoch, iteration, model, loss): if iteration == 0 and epoch % self.at_every_epoch == 0: total = 0 correct = 0 for values in self.batcher: total += len(values[-1]) feed_dict = {} for i in range(0, len(self.placeholders)): feed_dict[self.placeholders[i]] = values[i] truth = np.argmax(values[-1], 1) predicted = sess.run(tf.arg_max(tf.nn.softmax(model), 1), feed_dict=feed_dict) correct += sum(truth == predicted) acc = float(correct) / total self.update_summary(sess, iteration, ACCURACY_TRACE_TAG, acc) print("Epoch " + str(epoch) + "\tAcc " + str(acc) + "\tCorrect " + str(correct) + "\tTotal " + str(total))
def __call__(self, sess, epoch, iteration, model, loss): if iteration == 0 and epoch % self.at_every_epoch == 0: total = 0 correct = 0 truth_all = [] pred_all = [] for values in self.batcher: total += len(values[-1]) feed_dict = {} for i in range(0, len(self.placeholders)): feed_dict[self.placeholders[i]] = values[i] truth = np.argmax(values[-1], 1) # values[2], batch sampled from data[2], is a 3-legth one-hot vector containing the labels. this is to transform those back into integers predicted = sess.run(tf.arg_max(tf.nn.softmax(model), 1), feed_dict=feed_dict) correct += sum(truth == predicted) truth_all.extend(truth) pred_all.extend(predicted) print(classification_report(truth_all, pred_all, target_names=["NONE", "AGAINST", "FAVOR"], digits=4))
def center_loss(features, label, label_stats, centers, alfa): """The center loss. features: [batch_size, 512], the embedding of images. label: [batch_size, class_num], class label, the label index is 1, others are 0. labels_stats: [batch_size, 1], the count of each label in the batch. centers: [class_num, 512], center points, each class have one. alfa: float, updating rate of centers. """ label = tf.arg_max(label, 1) label = tf.reshape(label, [-1]) centers_batch = tf.gather(centers, label) diff = alfa * (centers_batch - features) diff = diff / label_stats centers = tf.scatter_sub(centers, label, diff) loss = tf.nn.l2_loss(features - centers_batch) return loss, centers
def test(self): init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) self.saver_z.restore(sess, self.encode_z_model) self.saver_y.restore(sess, self.encode_y_model) realbatch_array, _ = MnistData.getNextBatch(self.ds_train, self.label_y, 0, 50, self.batch_size) output_image , label_y = sess.run([self.fake_images,self.e_y], feed_dict={self.images: realbatch_array}) #one-hot #label_y = tf.arg_max(label_y, 1) print label_y save_images(output_image , [8 , 8] , './{}/test{:02d}_{:04d}.png'.format(self.sample_path , 0, 0)) save_images(realbatch_array , [8 , 8] , './{}/test{:02d}_{:04d}_r.png'.format(self.sample_path , 0, 0)) gen_img = cv2.imread('./{}/test{:02d}_{:04d}.png'.format(self.sample_path , 0, 0), 0) real_img = cv2.imread('./{}/test{:02d}_{:04d}_r.png'.format(self.sample_path , 0, 0), 0) cv2.imshow("test_EGan", gen_img) cv2.imshow("Real_Image", real_img) cv2.waitKey(-1) print("Test finish!")
def encode_y(self, x, weights, biases): c1 = tf.nn.relu(batch_normal(conv2d(x, weights['e1'], biases['eb1']), scope='eny_bn1')) c2 = tf.nn.relu(batch_normal(conv2d(c1, weights['e2'], biases['eb2']), scope='eny_bn2')) c2 = tf.reshape(c2, [self.batch_size, 128 * 7 * 7]) result_y = tf.nn.sigmoid(fully_connect(c2, weights['e3'], biases['eb3'])) #y_vec = tf.one_hot(tf.arg_max(result_y, 1), 10) return result_y
def rpn_rois(self, gt_boxes, labels): self.proposals = tf.Print(self.proposals, [tf.shape(self.proposals)], message='proposal shape') filled_gt_boxes = tf.concat(1, [tf.zeros([tf.shape(gt_boxes)[0], 1], dtype=tf.float32), gt_boxes]) all_rois = tf.concat(0, [self.proposals, filled_gt_boxes]) overlaps = self._calculate_overlaps(all_rois[:, 1:5], gt_boxes) # because faster-rcnn process one image per batch, leave the num_images here to keep consistency. num_images = 1 rois_per_image = tf.constant(cfg.TRAIN.BATCH_SIZE / num_images, dtype=tf.float32) fg_rois_per_image = tf.cast(tf.round(cfg.TRAIN.FG_FRACTION * rois_per_image), dtype=tf.int32) gt_assignment = tf.arg_max(overlaps, dimension=1) max_overlaps = tf.reduce_max(overlaps, reduction_indices=1) labels = tf.gather(labels, gt_assignment) fg_inds = tf.reshape(tf.cast(tf.where(max_overlaps >= cfg.TRAIN.FG_THRESH), dtype=tf.int32), [-1, ]) fg_rois_this_image = tf.minimum(fg_rois_per_image, tf.shape(fg_inds)[0]) # TODO: Check if fg_inds.size > 0: fg_inds = tf.random_crop(fg_inds, size=[fg_rois_this_image]) bg_inds = tf.reshape(tf.cast(tf.where((max_overlaps < cfg.TRAIN.BG_THRESH_HI) & (max_overlaps >= cfg.TRAIN.BG_THRESH_LO)), dtype=tf.int32), [-1, ]) bg_rois_this_image = tf.minimum(tf.cast(rois_per_image, dtype=tf.int32) - fg_rois_this_image, tf.shape(bg_inds)[0]) # TODO: Check if bg_inds.size > 0: bg_inds = tf.random_crop(bg_inds, size=[bg_rois_this_image]) keep_inds = tf.concat(0, [fg_inds, bg_inds]) self.train_labels = tf.concat(0, (tf.gather(labels, fg_inds), tf.zeros((tf.shape(bg_inds)[0],), dtype=tf.int32))) self.train_rois = tf.gather(all_rois, keep_inds) bbox_target_data = self._compute_targets( self.train_rois[:, 1:5], tf.gather(gt_boxes, tf.gather(gt_assignment, keep_inds)), self.train_labels) return self.train_rois, self.train_labels, bbox_target_data # TODO: implement this # self.bbox_targets, self.bbox_inside_weights = \ # self._get_bbox_regression_labels(bbox_target_data, num_classes)
def predict(images, exp_config): ''' Returns the prediction for an image given a network from the model zoo :param images: An input image tensor :param inference_handle: A model function from the model zoo :return: A prediction mask, and the corresponding softmax output ''' logits = exp_config.model_handle(images, training=tf.constant(False, dtype=tf.bool), nlabels=exp_config.nlabels) softmax = tf.nn.softmax(logits) mask = tf.arg_max(softmax, dimension=-1) return mask, softmax
def evaluation(logits, labels, images, nlabels, loss_type): ''' A function for evaluating the performance of the netwrok on a minibatch. This function returns the loss and the current foreground Dice score, and also writes example segmentations and imges to to tensorboard. :param logits: Output of network before softmax :param labels: Ground-truth label mask :param images: Input image mini batch :param nlabels: Number of labels in the dataset :param loss_type: Which loss should be evaluated :return: The loss without weight decay, the foreground dice of a minibatch ''' mask = tf.arg_max(tf.nn.softmax(logits, dim=-1), dimension=-1) # was 3 mask_gt = labels tf.summary.image('example_gt', prepare_tensor_for_summary(mask_gt, mode='mask', nlabels=nlabels)) tf.summary.image('example_pred', prepare_tensor_for_summary(mask, mode='mask', nlabels=nlabels)) tf.summary.image('example_zimg', prepare_tensor_for_summary(images, mode='image')) total_loss, nowd_loss, weights_norm = loss(logits, labels, nlabels=nlabels, loss_type=loss_type) cdice_structures = losses.per_structure_dice(logits, tf.one_hot(labels, depth=nlabels)) cdice_foreground = cdice_structures[:,1:] cdice = tf.reduce_mean(cdice_foreground) return nowd_loss, cdice
def loop_function(self, prev, _): """ :param prev: the output of t-1 time :param _: :return: the embedding of t-1 output """ prev = tf.add(tf.matmul(prev, self.softmax_w), self.softmax_b) prev_sympol = tf.arg_max(prev, 1) emb_prev = tf.nn.embedding_lookup(self.target_embedding, prev_sympol) return emb_prev
def _argmax(self, tensor): """ ArgMax Args: tensor : 2D - Tensor (Height x Width : 64x64 ) Returns: arg : Tuple of max position """ resh = tf.reshape(tensor, [-1]) argmax = tf.arg_max(resh, 0) return (argmax // tensor.get_shape().as_list()[0], argmax % tensor.get_shape().as_list()[0])
def _create_joint_tensor(self, tensor, name = 'joint_tensor',debug = False): """ TensorFlow Computation of Joint Position Args: tensor : Prediction Tensor Shape [nbStack x 64 x 64 x outDim] or [64 x 64 x outDim] name : name of the tensor Returns: out : Tensor of joints position Comment: Genuinely Agreeing this tensor is UGLY. If you don't trust me, look at 'prediction' node in TensorBoard. In my defence, I implement it to compare computation times with numpy. """ with tf.name_scope(name): shape = tensor.get_shape().as_list() if debug: print(shape) if len(shape) == 3: resh = tf.reshape(tensor[:,:,0], [-1]) elif len(shape) == 4: resh = tf.reshape(tensor[-1,:,:,0], [-1]) if debug: print(resh) arg = tf.arg_max(resh,0) if debug: print(arg, arg.get_shape(), arg.get_shape().as_list()) joints = tf.expand_dims(tf.stack([arg // tf.to_int64(shape[1]), arg % tf.to_int64(shape[1])], axis = -1), axis = 0) for i in range(1, shape[-1]): if len(shape) == 3: resh = tf.reshape(tensor[:,:,i], [-1]) elif len(shape) == 4: resh = tf.reshape(tensor[-1,:,:,i], [-1]) arg = tf.arg_max(resh,0) j = tf.expand_dims(tf.stack([arg // tf.to_int64(shape[1]), arg % tf.to_int64(shape[1])], axis = -1), axis = 0) joints = tf.concat([joints, j], axis = 0) return tf.identity(joints, name = 'joints')
def __init__(self, is_training=True): self.graph = tf.Graph() with self.graph.as_default(): if is_training: self.x, self.y, self.num_batch = get_batch() else: # Evaluation self.x = tf.placeholder(tf.int32, shape=(None, hp.max_len,)) self.y = tf.placeholder(tf.int32, shape=(None, hp.max_len,)) # Character Embedding for x self.enc = embed(self.x, len(roma2idx), hp.embed_size, scope="emb_x") # Encoder self.memory = encode(self.enc, is_training=is_training) # Character Embedding for decoder_inputs self.decoder_inputs = shift_by_one(self.y) self.dec = embed(self.decoder_inputs, len(surf2idx), hp.embed_size, scope="emb_decoder_inputs") # Decoder self.outputs = decode(self.dec, self.memory, len(surf2idx), is_training=is_training) # (N, T', hp.n_mels*hp.r) self.logprobs = tf.log(tf.nn.softmax(self.outputs)+1e-10) self.preds = tf.arg_max(self.outputs, dimension=-1) if is_training: self.loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.y, logits=self.outputs) self.istarget = tf.to_float(tf.not_equal(self.y, tf.zeros_like(self.y))) # masking self.mean_loss = tf.reduce_sum(self.loss * self.istarget) / (tf.reduce_sum(self.istarget)) # Training Scheme self.global_step = tf.Variable(0, name='global_step', trainable=False) self.optimizer = tf.train.AdamOptimizer(learning_rate=hp.lr) self.train_op = self.optimizer.minimize(self.mean_loss, global_step=self.global_step) # Summary tf.summary.scalar('mean_loss', self.mean_loss) self.merged = tf.summary.merge_all()
def likelihood_classification(w, n_classes, n_samples): # w has shape () w = tf.reshape(w, [n_classes, n_samples]) ll = predictive_ll(w) return ll # return tf.arg_max(ll, 0)
def get_plots_out(self, sample=0, frames=slice(0, None)): """Prepare to plot outputs for sample 'sample' with 'frames' frames""" plotsink = list() plot_dict = dict() plot_range_dict = dict() # Prepare output for plotting (plot_dict value is [tensor, [min, max]] plot_dict['{}_out'.format(self.name)] = tf.arg_max(self.out[sample, frames, :, :, :], 3) plot_range_dict['{}_out'.format(self.name)] = [0, self.n_units] plotsink.append(['{}_out'.format(self.name)]) return plot_dict, plotsink, plot_range_dict
def _preprocess(self, logits, targets): # Get most probable class of the output logits = tf.arg_max(logits, dimension=1) # If one-hot provided, transform into class if targets.get_shape().ndims > 2: targets = tf.arg_max(targets, dimension=1) # Erase singletion dimension if exists if targets.get_shape().ndims > 1: targets = tf.squeeze(targets, axis=1) return logits, targets
def my_plot(self,session,fd,y,Y,p,p1,Z_placeholder,additional_placeholders): max_class = tf.arg_max(p1, dimension=1) max_class_val = (session.run(max_class,feed_dict=fd)).astype(np.int32) y_val = self._Z[max_class_val] Y = sort_by_p(Y,p) y_val = sort_by_p(y_val,p) p = sorted(p) plt.plot(p,Y,'g--',p,y_val,'r') self.my_show()
def buildEvalGraph(self): with tf.variable_scope('eval_variables', reuse=False): self.logits = tf.nn.softmax(self.layers[-1].activations, name='logits') self.correct_predication = tf.equal(tf.arg_max(self.logits, 1), tf.arg_max(self.output, 1)) self.accuracy = tf.reduce_mean(tf.cast(self.correct_predication, tf.float32))
def accuracy(labels_placeholder, inference): with tf.name_scope('test'): correct_prediction = tf.equal( tf.arg_max(inference, 1), tf.argmax(labels_placeholder, 1) ) acc = tf.reduce_mean(tf.cast(correct_prediction, 'float')) tf.scalar_summary('accuracy', acc) return acc
def accuracy(logits, targets_pl, one_hot=False): targets = tf.to_int64(targets_pl) if one_hot: # compare the indices of the outputs. For a correct prediction they should be the same correct_prediction = tf.equal(tf.arg_max(logits, 1), tf.arg_max(targets, 1), name='accuracy_equals_oh') else: # compare the indices of the outputs with the correct label which is a number here. correct_prediction = tf.equal(tf.arg_max(logits, 1), targets, name='accuracy_equals') accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float32'), name='accuracy_mean') tf.summary.scalar('accuracy_mean', accuracy) return accuracy
def init_train_test_op(self): # loss function self.loss_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=self.read_out_logits)) # training op self.training_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss_function) self.predict_op = tf.arg_max(self.read_out_logits, 1) # predict predict_matches = tf.equal(tf.arg_max(self.y, dimension=1), tf.arg_max(self.read_out_logits, 1)) # accuracy metric self.accuracy = tf.reduce_mean(tf.cast(predict_matches, tf.float32))
def init_train_test_op(self): # some loss functions and all -> total loss self.loss_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=self.read_out_logits)) # training op self.training_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss_function) self.predict_op = tf.arg_max(self.read_out_logits, 1) # predict predict_matches = tf.equal(tf.arg_max(self.y, dimension=1), tf.arg_max(self.read_out_logits, 1)) # accuracy metric self.accuracy = tf.reduce_mean(tf.cast(predict_matches, tf.float32))
def calc_reward(outputs): # consider the action at the last time step outputs = outputs[-1] # look at ONLY THE END of the sequence outputs = tf.reshape(outputs, (batch_size, cell_out_size)) # get the baseline b = tf.pack(baselines) b = tf.concat(2, [b, b]) b = tf.reshape(b, (batch_size, (nGlimpses) * 2)) no_grad_b = tf.stop_gradient(b) # get the action(classification) p_y = tf.nn.softmax(tf.matmul(outputs, Wa_h_a) + Ba_h_a) max_p_y = tf.arg_max(p_y, 1) correct_y = tf.cast(labels_placeholder, tf.int64) # reward for all examples in the batch R = tf.cast(tf.equal(max_p_y, correct_y), tf.float32) reward = tf.reduce_mean(R) # mean reward R = tf.reshape(R, (batch_size, 1)) R = tf.tile(R, [1, (nGlimpses)*2]) # get the location p_loc = gaussian_pdf(mean_locs, sampled_locs) p_loc = tf.tanh(p_loc) p_loc_orig = p_loc p_loc = tf.reshape(p_loc, (batch_size, (nGlimpses) * 2)) # define the cost function J = tf.concat(1, [tf.log(p_y + SMALL_NUM) * (onehot_labels_placeholder), tf.log(p_loc + SMALL_NUM) * (R - no_grad_b)]) J = tf.reduce_sum(J, 1) J = J - tf.reduce_sum(tf.square(R - b), 1) J = tf.reduce_mean(J, 0) cost = -J # define the optimizer optimizer = tf.train.MomentumOptimizer(lr, momentumValue) train_op = optimizer.minimize(cost, global_step) return cost, reward, max_p_y, correct_y, train_op, b, tf.reduce_mean(b), tf.reduce_mean(R - b), lr
def num_correct_prediction(logits, labels): """Evaluate the quality of the logits at predicting the label. Return: the number of correct predictions """ correct = tf.equal(tf.arg_max(logits, 1), tf.arg_max(labels, 1)) correct = tf.cast(correct, tf.int32) n_correct = tf.reduce_sum(correct) return n_correct #%%
def _setup_net(self): with self.cnn_net.variable_scope([self.data_batches]) as variable_scope: end_points_collection = self.cnn_net.end_points_collection_name(variable_scope) net, _ = self.cnn_net.cnn_layers(self.data_batches, variable_scope, end_points_collection) net = slim.fully_connected(net, self.embedding_size, activation_fn=None, scope='fc0') net = rnn.rnn_layers(net, tf.arg_max(self.numbers_label_batches, dimension=2), self.embedding_size) net = tf.reshape(net, [-1, self.embedding_size]) self.model_output = slim.fully_connected(net, 11, activation_fn=None, scope='fc4')
def batch_iou_fast(anchors, bboxes): """ Compute iou of two batch of boxes. Box format '[y_min, x_min, y_max, x_max]'. Args: anchors: know shape bboxes: dynamic shape Return: ious: 2-D with shape '[num_bboxes, num_anchors]' indices: [num_bboxes, 1] """ num_anchors = anchors.get_shape().as_list()[0] tensor_num_bboxes = tf.shape(bboxes)[0] indices = tf.reshape(tf.range(tensor_num_bboxes), shape=[-1, 1]) indices = tf.reshape(tf.stack([indices]*num_anchors, axis=1), shape=[-1, 1]) bboxes_m = tf.gather_nd(bboxes, indices) anchors_m = tf.tile(anchors, [tensor_num_bboxes, 1]) lr = tf.maximum( tf.minimum(bboxes_m[:, 3], anchors_m[:, 3]) - tf.maximum(bboxes_m[:, 1], anchors_m[:, 1]), 0 ) tb = tf.maximum( tf.minimum(bboxes_m[:, 2], anchors_m[:, 2]) - tf.maximum(bboxes_m[:, 0], anchors_m[:, 0]), 0 ) intersection = tf.multiply(tb, lr) union = tf.subtract( tf.multiply((bboxes_m[:, 3] - bboxes_m[:, 1]), (bboxes_m[:, 2] - bboxes_m[:, 0])) + tf.multiply((anchors_m[:, 3] - anchors_m[:, 1]), (anchors_m[:, 2] - anchors_m[:, 0])), intersection ) ious = tf.div(intersection, union) ious = tf.reshape(ious, shape=[tensor_num_bboxes, num_anchors]) indices = tf.arg_max(ious, dimension=1) return ious, indices
def __init__(self, sent_length, class_num, embedding_size, initial_embedding_dict, l2_lambda, hidden_size): self.input_x = tf.placeholder(tf.int32, [None, sent_length], name="input_x") self.input_y = tf.placeholder(tf.float32, [None, class_num], name="input_y") self.dropout_keep_prob_1 = tf.placeholder(tf.float32, name="dropout_keep_prob_1") self.dropout_keep_prob_2 = tf.placeholder(tf.float32, name="dropout_keep_prob_2") l2_loss = tf.constant(0.0) with tf.name_scope("embedding"): self.embedding_dict = tf.Variable(initial_embedding_dict, name="Embedding", dtype=tf.float32) self.embedded_chars = tf.nn.embedding_lookup(self.embedding_dict, self.input_x) # unstack embedded input self.unstacked = tf.unstack(self.embedded_chars, sent_length, 1) with tf.name_scope("lstm"): # create a LSTM network lstm_cell = rnn.BasicLSTMCell(hidden_size) self.output, self.states = rnn.static_rnn(lstm_cell, self.unstacked, dtype=tf.float32) self.pooling = tf.reduce_mean(self.output, 0) with tf.name_scope("linear"): weights = tf.get_variable( "W", shape=[hidden_size, class_num], initializer=tf.contrib.layers.xavier_initializer()) bias = tf.Variable(tf.constant(0.1, shape=[class_num]), name="b") l2_loss += tf.nn.l2_loss(weights) l2_loss += tf.nn.l2_loss(bias) self.linear_result = tf.nn.xw_plus_b(self.pooling, weights, bias, name="linear") self.predictions = tf.arg_max(self.linear_result, 1, name="predictions") with tf.name_scope("loss"): losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.linear_result, labels=self.input_y) self.loss = tf.reduce_mean(losses) + l2_lambda * l2_loss with tf.name_scope("accuracy"): correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1)) self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
def __init__(self, sent_length, class_num, embedding_size, l2_lambda): self.input_x = tf.placeholder(tf.float32, [None, sent_length, embedding_size], name="input_x") self.input_y = tf.placeholder(tf.float32, [None, class_num], name="input_y") self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob") l2_loss = tf.constant(0.0) with tf.name_scope("flat"): self.flatted = tf.reshape(self.input_x, [-1, sent_length * embedding_size]) with tf.name_scope("linear"): weights = tf.get_variable( "W", shape=[sent_length * embedding_size, class_num], initializer=tf.contrib.layers.xavier_initializer()) bias = tf.Variable(tf.constant(0.1, shape=[class_num]), name="b") l2_loss += tf.nn.l2_loss(weights) l2_loss += tf.nn.l2_loss(bias) self.linear_result = tf.nn.xw_plus_b(self.flatted, weights, bias, name="linear") self.predictions = tf.arg_max(self.linear_result, 1, name="predictions") with tf.name_scope("loss"): losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.linear_result, labels=self.input_y) self.loss = tf.reduce_mean(losses) + l2_lambda * l2_loss with tf.name_scope("accuracy"): correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1)) self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
def __init__(self, input, n_in, n_out): self.W = tf.Variable(tf.zeros(shape=(n_in, n_out)), name="LR_W") self.b = tf.Variable(tf.zeros(shape=(n_out,)), name="LR_b") self.linear = tf.add(tf.matmul(input, self.W), self.b) self.p_y_given_x = tf.nn.softmax(tf.add(tf.matmul(input, self.W), self.b)) self.y_pred = tf.arg_max(self.p_y_given_x, 1)
def errors(self, y): return tf.reduce_mean(tf.cast(tf.not_equal(self.y_pred, tf.arg_max(y,1)), dtype=tf.float32))
def f1_score(logits, targets_pl, one_hot=False): targets = tf.to_int64(targets_pl) y_predicted = tf.arg_max(logits, 1) if one_hot: y_true = tf.arg_max(targets, 1) else: y_true = logits # get true positives (by multiplying the predicted and actual labels we will only get a 1 if both labels are 1) tp = tf.count_nonzero(y_predicted * y_true) # get true negatives (basically the same as tp only the inverse) tn = tf.count_nonzero((y_predicted - 1) * (y_true - 1)) fp = tf.count_nonzero(y_predicted * (y_true - 1)) fn = tf.count_nonzero((y_predicted - 1) * y_true) # Calculate accuracy, precision, recall and F1 score. accuracy = (tp + tn) / (tp + fp + fn + tn) precision = tp / (tp + fp) recall = tp / (tp + fn) f1_score = (2 * precision * recall) / (precision + recall) tf.summary.scalar('accuracy', accuracy) tf.summary.scalar('precision', precision) tf.summary.scalar('recall', recall) tf.summary.scalar('f1-score', f1_score) f1_score = tf.reduce_mean(tf.cast(f1_score, 'float32'), name='f1_score_reduce_mean') return f1_score
def get_pre_y(self): # TODO ??? # pre_y = tf.reshape(tf.round(tf.sigmoid(self._output)), [-1]) pre_y = tf.arg_max(input=self._output, dimension=1) return pre_y
def get_class(self, index): label = self.db.test.labels[index:index+1] return self.sess.run(tf.arg_max(label, 1))