我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.local_variables_initializer()。
def _test_metric_spec(self, metric_spec, hyps, refs, expected_scores): """Tests a MetricSpec""" predictions = {"predicted_tokens": tf.placeholder(dtype=tf.string)} labels = {"target_tokens": tf.placeholder(dtype=tf.string)} value, update_op = metric_spec.create_metric_ops(None, labels, predictions) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) scores = [] for hyp, ref in zip(hyps, refs): hyp = hyp.split(" ") ref = ref.split(" ") sess.run(update_op, { predictions["predicted_tokens"]: [hyp], labels["target_tokens"]: [ref] }) scores.append(sess.run(value)) for score, expected in zip(scores, expected_scores): np.testing.assert_almost_equal(score, expected, decimal=2) np.testing.assert_almost_equal(score, expected, decimal=2)
def omniglot(): sess = tf.InteractiveSession() """ def wrapper(v): return tf.Print(v, [v], message="Printing v") v = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='Matrix') sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) temp = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='temp') temp = wrapper(v) #with tf.control_dependencies([temp]): temp.eval() print 'Hello'""" def update_tensor(V, dim2, val): # Update tensor V, with index(:,dim2[:]) by val[:] val = tf.cast(val, V.dtype) def body(_, (v, d2, chg)): d2_int = tf.cast(d2, tf.int32) return tf.slice(tf.concat_v2([v[:d2_int],[chg] ,v[d2_int+1:]], axis=0), [0], [v.get_shape().as_list()[0]]) Z = tf.scan(body, elems=(V, dim2, val), initializer=tf.constant(1, shape=V.get_shape().as_list()[1:], dtype=tf.float32), name="Scan_Update") return Z
def test_reading_without_targets(self): num_epochs = 50 data_provider = make_parallel_data_provider( data_sources_source=[self.source_file.name], data_sources_target=None, num_epochs=num_epochs, shuffle=True) item_keys = list(data_provider.list_items()) item_values = data_provider.get(item_keys) items_dict = dict(zip(item_keys, item_values)) self.assertEqual(set(item_keys), set(["source_tokens", "source_len"])) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) with tf.contrib.slim.queues.QueueRunners(sess): item_dicts_ = [sess.run(items_dict) for _ in range(num_epochs * 3)] for item_dict in item_dicts_: self.assertEqual(item_dict["source_len"], 2) item_dict["source_tokens"] = np.char.decode( item_dict["source_tokens"].astype("S"), "utf-8") self.assertEqual(item_dict["source_tokens"][-1], "SEQUENCE_END")
def predictPL(self): B = self.flags.batch_size W,H,C = self.flags.width, self.flags.height, self.flags.color inputs = tf.placeholder(dtype=tf.float32,shape=[None,H,W,C]) #with open(self.flags.pred_path,'w') as f: # pass self._build(inputs,resize=False) counter = 0 with tf.Session() as sess: self.sess = sess sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) for imgs,imgnames in self.DATA.test_generator(): pred = sess.run(self.logit,feed_dict={inputs:imgs}) np.save("%s/%d.npy"%(self.flags.pred_path,counter),{"pred":pred,"name":imgnames}) counter+=len(imgs) if counter/B%10 ==0: print_mem_time("%d images predicted"%counter) # train with placeholders
def predict_from_placeholder(self,activation=None): self._build() self._get_summary() if activation is not None: self.logit = self._activate(self.logit,activation) with open(self.flags.pred_path,'w') as f: pass count = 0 with tf.Session() as sess: self.sess = sess sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) if self.flags.log_path and self.flags.visualize is not None: summary_writer = tf.summary.FileWriter(self.flags.log_path, sess.graph) for batch in self._batch_gen_test(): x,_,epoch = batch if self.flags.log_path and self.flags.visualize is not None: summary,pred = sess.run([self.summ_op,self.logit],feed_dict={self.inputs:x,self.is_training:0}) summary_writer.add_summary(summary, count) else: pred = sess.run(self.logit,feed_dict={self.inputs:x,self.is_training:0}) count+=1 if count%self.flags.verbosity == 0: print_mem_time("Epoch %d Batch %d "%(epoch,count)) self.write_pred(pred)
def input_pipeline(filenames, batch_size, read_threads=4, num_epochs=None, is_training=True): filename_queue = tf.train.string_input_producer( filenames, num_epochs=FLAGS.num_epochs, shuffle=is_training) # initialize local variables if num_epochs is not None or it'll raise uninitialized problem tf.get_default_session().run(tf.local_variables_initializer()) example_list = [read_my_file_format(filename_queue, is_training) \ for _ in range(read_threads)] min_after_dequeue = 300 if is_training else 10 capacity = min_after_dequeue + 3 * batch_size clip_batch, img_mask_batch, loss_mask_batch = tf.train.shuffle_batch_join( example_list, batch_size=batch_size, capacity=capacity, min_after_dequeue=min_after_dequeue) return clip_batch, img_mask_batch, loss_mask_batch
def input_pipeline_dis(filenames, batch_size, read_threads=4, num_epochs=None, is_training=True): filename_queue = tf.train.string_input_producer( filenames, num_epochs=FLAGS.num_epochs, shuffle=is_training) # initialize local variables if num_epochs is not None or it'll raise uninitialized problem tf.get_default_session().run(tf.local_variables_initializer()) example_list = [read_my_file_format_dis(filename_queue, is_training) \ for _ in range(read_threads)] min_after_dequeue = 300 if is_training else 10 capacity = min_after_dequeue + 3 * batch_size clip_batch, label_batch, text_batch = tf.train.shuffle_batch_join( example_list, batch_size=batch_size, capacity=capacity, min_after_dequeue=min_after_dequeue) return clip_batch, label_batch, text_batch
def examine_batches(features_batch, targets_batch): with tf.Session() as sess: sess.run(tf.local_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) try: for it in range(5000): features, targets = sess.run([features_batch, targets_batch]) if it % 100 == 0: LOGGER.debug(it) LOGGER.debug( len(features), features[0].shape, np.max(features[0][0][7][:]) ) LOGGER.debug(np.argmax(targets, axis=1)) except tf.errors.OutOfRangeError: LOGGER.info('Training stopped - queue is empty.') except Exception as e: LOGGER.error(e) finally: coord.request_stop() coord.join(threads)
def main(args): with tf.Graph().as_default() as graph: # Create dataset logging.info('Create data flow from %s' % args.data) caffe_dataset = CaffeDataset(dir=args.data, num_act=args.num_act, mean_path=args.mean) # Config session config = get_config(args) x = tf.placeholder(dtype=tf.float32, shape=[None, 84, 84, 12]) op = load_caffe_model(x, args.load) init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) # Start session with tf.Session(config=config) as sess: sess.run(init) i = 0 for s, a in caffe_dataset(5): pred_data = sess.run([op], feed_dict={x: [s]})[0] print pred_data.shape np.save('tf-%03d.npy' % i, pred_data) i += 1
def evaluate(): """Eval ocr for a number of steps.""" with tf.Graph().as_default() as g: images, labels, seq_lengths = ocr.inputs() logits, timesteps = ocr.inference(images, FLAGS.eval_batch_size, train=True) ler = ocr.create_label_error_rate(logits, labels, timesteps) init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) config = tf.ConfigProto( device_count={'GPU': 0} ) sess = tf.Session(config=config) sess.run(init_op) saver = tf.train.Saver() summary_op = tf.summary.merge_all() summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g) while True: eval_once(saver, summary_writer, ler, summary_op) if FLAGS.run_once: break # print("Waiting for next evaluation for " + str(FLAGS.eval_interval_secs) + " sec") time.sleep(FLAGS.eval_interval_secs)
def test_batch(self): with self.test_session() as sess: df = pd.DataFrame( ['TQ2379_0_0_B TQ2379_0_0.jpg F 1776:520|1824:125'.split(), 'TQ2379_0_0_B TQ2379_0_0.jpg F 1776:500|1824:125'.split(), ], columns=['id', 'image', 'class', 'detections']) df = extract_crops_sw(df, 250, False, 250) batch = create_batch(df, False) tf.global_variables_initializer().run() tf.local_variables_initializer().run() coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) images, labels = sess.run(batch) self.assertListEqual(list(labels[0, 2, 3, :5]), [1., 74., 125., 30., 30.]) self.assertTrue(labels[0, 2, 3, 5 + 5]) coord.request_stop() coord.join(threads)
def inputs_test(filename, batch_size, num_epochs, num_threads, imshape, num_examples_per_epoch=128): tf.local_variables_initializer() if not num_epochs: num_epochs = None with tf.name_scope('input'): filename_queue = tf.train.string_input_producer( [filename], num_epochs=num_epochs, name='string_input_producer') image, label = reader.read_and_decode_wholefile(filename_queue, imshape, normalize=True) images, sparse_labels = tf.train.batch([image, label], batch_size=batch_size) return images, sparse_labels
def main(): dqn = DQN(ENV_NAME, DOUBLE_DQN, DUELING_DQN, PER, TRAINING, RENDER) init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) with tf.Session() as sess: sess.run(init_op) #tries to restore a trained model and play! dqn.util.restore_graph(sess,forTrain = TRAINING) for ep in tqdm(range(MAX_EPISODES)):# for episodes print("Episode no. {} :".format(ep)) dqn.playing(sess) print('Episode %d: totalEpReward = %.2f , took: %.3f mins' % (ep, dqn.totalReward,dqn.duration/60.0)) #RUN...
def main(): init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) dummy_reader = Dataset_reader_classification(filename=_DATASET_PATH_, num_classes=_CLASSES_) #dummy_reader.pre_process_image(writer_pre_proc) with tf.Session() as sess: init_op.run() coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) images, labels = dummy_reader.next_batch(_BATCH_SIZE_) meanimage = sess.run([dummy_reader.mean_image])[0] print(meanimage) print(images[0]) if _SHOW_IMAGES_ : for image in images: cv2.imshow('Image', image) cv2.imshow('Meanimage',meanimage) cv2.waitKey(0) coord.request_stop() coord.join(threads)
def _test_metric_spec(self, metric_spec, hyps, refs, expected_scores): """Tests a MetricSpec""" predictions = {"predicted_tokens": tf.placeholder(dtype=tf.string)} labels = {"target_tokens": tf.placeholder(dtype=tf.string)} value, update_op = metric_spec.create_metric_ops( None, labels, predictions) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) scores = [] for hyp, ref in zip(hyps, refs): hyp = hyp.split(" ") ref = ref.split(" ") sess.run(update_op, { predictions["predicted_tokens"]: [hyp], labels["target_tokens"]: [ref] }) scores.append(sess.run(value)) for score, expected in zip(scores, expected_scores): self.assertNDArrayNear(score, expected, 0.01) self.assertNDArrayNear(score, expected, 0.01)
def testFinalOpsOnEvaluationLoop(self): value_op, update_op = slim.metrics.streaming_accuracy( self._predictions, self._labels) init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) # Create Checkpoint and log directories chkpt_dir = os.path.join(self.get_temp_dir(), 'tmp_logs/') gfile.MakeDirs(chkpt_dir) logdir = os.path.join(self.get_temp_dir(), 'tmp_logs2/') gfile.MakeDirs(logdir) # Save initialized variables to checkpoint directory saver = tf.train.Saver() with self.test_session() as sess: init_op.run() saver.save(sess, os.path.join(chkpt_dir, 'chkpt')) # Now, run the evaluation loop: accuracy_value = slim.evaluation.evaluation_loop( '', chkpt_dir, logdir, eval_op=update_op, final_op=value_op, max_number_of_evaluations=1) self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
def testRestoredModelPerformance(self): checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt') log_dir = os.path.join(self.get_temp_dir(), 'log_dir1/') # First, save out the current model to a checkpoint: init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) saver = tf.train.Saver(write_version=tf.train.SaverDef.V1) with self.test_session() as sess: sess.run(init_op) saver.save(sess, checkpoint_path) # Next, determine the metric to evaluate: value_op, update_op = slim.metrics.streaming_accuracy( self._predictions, self._labels) # Run the evaluation and verify the results: accuracy_value = slim.evaluation.evaluate_once( '', checkpoint_path, log_dir, eval_op=update_op, final_op=value_op) self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
def testUpdateOpsReturnsCurrentValue(self): with self.test_session() as sess: values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2)) _enqueue_vector(sess, values_queue, [0, 1]) _enqueue_vector(sess, values_queue, [-4.2, 9.1]) _enqueue_vector(sess, values_queue, [6.5, 0]) _enqueue_vector(sess, values_queue, [-3.2, 4.0]) values = values_queue.dequeue() mean, update_op = metrics.streaming_mean(values) sess.run(tf.local_variables_initializer()) self.assertAlmostEqual(0.5, sess.run(update_op), 5) self.assertAlmostEqual(1.475, sess.run(update_op), 5) self.assertAlmostEqual(12.4/6.0, sess.run(update_op), 5) self.assertAlmostEqual(1.65, sess.run(update_op), 5) self.assertAlmostEqual(1.65, sess.run(mean), 5)
def test1dWeightedValues(self): with self.test_session() as sess: # Create the queue that populates the values. values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2)) _enqueue_vector(sess, values_queue, [0, 1]) _enqueue_vector(sess, values_queue, [-4.2, 9.1]) _enqueue_vector(sess, values_queue, [6.5, 0]) _enqueue_vector(sess, values_queue, [-3.2, 4.0]) values = values_queue.dequeue() # Create the queue that populates the weighted labels. weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1)) _enqueue_vector(sess, weights_queue, [1]) _enqueue_vector(sess, weights_queue, [0]) _enqueue_vector(sess, weights_queue, [0]) _enqueue_vector(sess, weights_queue, [1]) weights = weights_queue.dequeue() mean, update_op = metrics.streaming_mean(values, weights) tf.local_variables_initializer().run() for _ in range(4): update_op.eval() self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test1dWeightedValues_placeholders(self): with self.test_session() as sess: # Create the queue that populates the values. feed_values = ( (0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0) ) values = tf.placeholder(dtype=tf.float32) # Create the queue that populates the weighted labels. weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1)) _enqueue_vector(sess, weights_queue, [1]) _enqueue_vector(sess, weights_queue, [0]) _enqueue_vector(sess, weights_queue, [0]) _enqueue_vector(sess, weights_queue, [1]) weights = weights_queue.dequeue() mean, update_op = metrics.streaming_mean(values, weights) tf.local_variables_initializer().run() for i in range(4): update_op.eval(feed_dict={values: feed_values[i]}) self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test2dWeightedValues(self): with self.test_session() as sess: # Create the queue that populates the values. values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2)) _enqueue_vector(sess, values_queue, [0, 1]) _enqueue_vector(sess, values_queue, [-4.2, 9.1]) _enqueue_vector(sess, values_queue, [6.5, 0]) _enqueue_vector(sess, values_queue, [-3.2, 4.0]) values = values_queue.dequeue() # Create the queue that populates the weighted labels. weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2)) _enqueue_vector(sess, weights_queue, [1, 1]) _enqueue_vector(sess, weights_queue, [1, 0]) _enqueue_vector(sess, weights_queue, [0, 1]) _enqueue_vector(sess, weights_queue, [0, 0]) weights = weights_queue.dequeue() mean, update_op = metrics.streaming_mean(values, weights) tf.local_variables_initializer().run() for _ in range(4): update_op.eval() self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
def testMultiDimensional(self): with self.test_session() as sess: values_queue = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(2, 2, 2)) _enqueue_vector(sess, values_queue, [[[1, 2], [1, 2]], [[1, 2], [1, 2]]], shape=(2, 2, 2)) _enqueue_vector(sess, values_queue, [[[1, 2], [1, 2]], [[3, 4], [9, 10]]], shape=(2, 2, 2)) values = values_queue.dequeue() mean, update_op = metrics.streaming_mean_tensor(values) sess.run(tf.local_variables_initializer()) for _ in range(2): sess.run(update_op) self.assertAllClose([[[1, 2], [1, 2]], [[2, 3], [5, 6]]], sess.run(mean))
def testUpdateOpsReturnsCurrentValue(self): with self.test_session() as sess: values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2)) _enqueue_vector(sess, values_queue, [0, 1]) _enqueue_vector(sess, values_queue, [-4.2, 9.1]) _enqueue_vector(sess, values_queue, [6.5, 0]) _enqueue_vector(sess, values_queue, [-3.2, 4.0]) values = values_queue.dequeue() mean, update_op = metrics.streaming_mean_tensor(values) sess.run(tf.local_variables_initializer()) self.assertAllClose([[0, 1]], sess.run(update_op), 5) self.assertAllClose([[-2.1, 5.05]], sess.run(update_op), 5) self.assertAllClose([[2.3/3., 10.1/3.]], sess.run(update_op), 5) self.assertAllClose([[-0.9/4., 3.525]], sess.run(update_op), 5) self.assertAllClose([[-0.9/4., 3.525]], sess.run(mean), 5)
def testWeighted1d(self): with self.test_session() as sess: # Create the queue that populates the values. values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2)) _enqueue_vector(sess, values_queue, [0, 1]) _enqueue_vector(sess, values_queue, [-4.2, 9.1]) _enqueue_vector(sess, values_queue, [6.5, 0]) _enqueue_vector(sess, values_queue, [-3.2, 4.0]) values = values_queue.dequeue() # Create the queue that populates the weights. weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1)) _enqueue_vector(sess, weights_queue, [[1]]) _enqueue_vector(sess, weights_queue, [[0]]) _enqueue_vector(sess, weights_queue, [[1]]) _enqueue_vector(sess, weights_queue, [[0]]) weights = weights_queue.dequeue() mean, update_op = metrics.streaming_mean_tensor(values, weights) sess.run(tf.local_variables_initializer()) for _ in range(4): sess.run(update_op) self.assertAllClose([[3.25, 0.5]], sess.run(mean), 5)
def testWeighted2d_1(self): with self.test_session() as sess: # Create the queue that populates the values. values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2)) _enqueue_vector(sess, values_queue, [0, 1]) _enqueue_vector(sess, values_queue, [-4.2, 9.1]) _enqueue_vector(sess, values_queue, [6.5, 0]) _enqueue_vector(sess, values_queue, [-3.2, 4.0]) values = values_queue.dequeue() # Create the queue that populates the weights. weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2)) _enqueue_vector(sess, weights_queue, [1, 1]) _enqueue_vector(sess, weights_queue, [1, 0]) _enqueue_vector(sess, weights_queue, [0, 1]) _enqueue_vector(sess, weights_queue, [0, 0]) weights = weights_queue.dequeue() mean, update_op = metrics.streaming_mean_tensor(values, weights) sess.run(tf.local_variables_initializer()) for _ in range(4): sess.run(update_op) self.assertAllClose([[-2.1, 0.5]], sess.run(mean), 5)
def testValueTensorIsIdempotent(self): predictions = tf.random_uniform((10, 3), maxval=3, dtype=tf.int64, seed=1) labels = tf.random_uniform((10, 3), maxval=3, dtype=tf.int64, seed=1) accuracy, update_op = metrics.streaming_accuracy( predictions, labels) with self.test_session() as sess: sess.run(tf.local_variables_initializer()) # Run several updates. for _ in range(10): sess.run(update_op) # Then verify idempotency. initial_accuracy = accuracy.eval() for _ in range(10): self.assertEqual(initial_accuracy, accuracy.eval())
def testEffectivelyEquivalentSizesWithDynamicallyShapedWeight(self): predictions = tf.convert_to_tensor([1, 1, 1]) # shape 3, labels = tf.expand_dims(tf.convert_to_tensor([1, 0, 0]), 1) # shape 3, 1 weights = [[100], [1], [1]] # shape 3, 1 weights_placeholder = tf.placeholder(dtype=tf.int32, name='weights') feed_dict = {weights_placeholder: weights} with self.test_session() as sess: accuracy, update_op = metrics.streaming_accuracy( predictions, labels, weights_placeholder) sess.run(tf.local_variables_initializer()) # if streaming_accuracy does not flatten the weight, accuracy would be # 0.33333334 due to an intended broadcast of weight. Due to flattening, # it will be higher than .95 self.assertGreater(update_op.eval(feed_dict=feed_dict), .95) self.assertGreater(accuracy.eval(feed_dict=feed_dict), .95)
def testValueTensorIsIdempotent(self): predictions = tf.random_uniform((10, 3), maxval=1, dtype=tf.int64, seed=1) labels = tf.random_uniform((10, 3), maxval=1, dtype=tf.int64, seed=1) precision, update_op = metrics.streaming_precision( predictions, labels) with self.test_session() as sess: sess.run(tf.local_variables_initializer()) # Run several updates. for _ in range(10): sess.run(update_op) # Then verify idempotency. initial_precision = precision.eval() for _ in range(10): self.assertEqual(initial_precision, precision.eval())
def testWeighted1d_placeholders(self): predictions = tf.placeholder(dtype=tf.float32) labels = tf.placeholder(dtype=tf.float32) feed_dict = { predictions: ((1, 0, 1, 0), (1, 0, 1, 0)), labels: ((0, 1, 1, 0), (1, 0, 0, 1)) } precision, update_op = metrics.streaming_precision( predictions, labels, weights=tf.constant([[2], [5]])) with self.test_session(): tf.local_variables_initializer().run() weighted_tp = 2.0 + 5.0 weighted_positives = (2.0 + 2.0) + (5.0 + 5.0) expected_precision = weighted_tp / weighted_positives self.assertAlmostEqual( expected_precision, update_op.eval(feed_dict=feed_dict)) self.assertAlmostEqual( expected_precision, precision.eval(feed_dict=feed_dict))
def testWeighted2d_placeholders(self): predictions = tf.placeholder(dtype=tf.float32) labels = tf.placeholder(dtype=tf.float32) feed_dict = { predictions: ((1, 0, 1, 0), (1, 0, 1, 0)), labels: ((0, 1, 1, 0), (1, 0, 0, 1)) } precision, update_op = metrics.streaming_precision( predictions, labels, weights=tf.constant([[1, 2, 3, 4], [4, 3, 2, 1]])) with self.test_session(): tf.local_variables_initializer().run() weighted_tp = 3.0 + 4.0 weighted_positives = (1.0 + 3.0) + (4.0 + 2.0) expected_precision = weighted_tp / weighted_positives self.assertAlmostEqual( expected_precision, update_op.eval(feed_dict=feed_dict)) self.assertAlmostEqual( expected_precision, precision.eval(feed_dict=feed_dict))
def testValueTensorIsIdempotent(self): predictions = tf.random_uniform((10, 3), maxval=1, dtype=tf.int64, seed=1) labels = tf.random_uniform((10, 3), maxval=1, dtype=tf.int64, seed=1) recall, update_op = metrics.streaming_recall( predictions, labels) with self.test_session() as sess: sess.run(tf.local_variables_initializer()) # Run several updates. for _ in range(10): sess.run(update_op) # Then verify idempotency. initial_recall = recall.eval() for _ in range(10): self.assertEqual(initial_recall, recall.eval())
def testValueTensorIsIdempotent(self): predictions = tf.random_uniform((10, 3), maxval=1, dtype=tf.float32, seed=1) labels = tf.random_uniform((10, 3), maxval=1, dtype=tf.int64, seed=1) auc, update_op = metrics.streaming_auc( predictions, labels) with self.test_session() as sess: sess.run(tf.local_variables_initializer()) # Run several updates. for _ in range(10): sess.run(update_op) # Then verify idempotency. initial_auc = auc.eval() for _ in range(10): self.assertAlmostEqual(initial_auc, auc.eval(), 5)
def testValueTensorIsIdempotent(self): predictions = tf.random_uniform((10, 3), maxval=1, dtype=tf.float32, seed=1) labels = tf.random_uniform((10, 3), maxval=2, dtype=tf.int64, seed=1) specificity, update_op = metrics.streaming_specificity_at_sensitivity( predictions, labels, sensitivity=0.7) with self.test_session() as sess: sess.run(tf.local_variables_initializer()) # Run several updates. for _ in range(10): sess.run(update_op) # Then verify idempotency. initial_specificity = specificity.eval() for _ in range(10): self.assertAlmostEqual(initial_specificity, specificity.eval(), 5)
def testWeighted1d(self): predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26] labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1] weights_values = [3] predictions = tf.constant(predictions_values, dtype=tf.float32) labels = tf.constant(labels_values) weights = tf.constant(weights_values) specificity, update_op = metrics.streaming_specificity_at_sensitivity( predictions, labels, weights=weights, sensitivity=0.4) with self.test_session() as sess: sess.run(tf.local_variables_initializer()) self.assertAlmostEqual(0.6, sess.run(update_op)) self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted2d(self): predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26] labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1] weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] predictions = tf.constant(predictions_values, dtype=tf.float32) labels = tf.constant(labels_values) weights = tf.constant(weights_values) specificity, update_op = metrics.streaming_specificity_at_sensitivity( predictions, labels, weights=weights, sensitivity=0.4) with self.test_session() as sess: sess.run(tf.local_variables_initializer()) self.assertAlmostEqual(8.0 / 15.0, sess.run(update_op)) self.assertAlmostEqual(8.0 / 15.0, specificity.eval())
def testValueTensorIsIdempotent(self): predictions = tf.random_uniform((10, 3), maxval=1, dtype=tf.float32, seed=1) labels = tf.random_uniform((10, 3), maxval=2, dtype=tf.int64, seed=1) sensitivity, update_op = metrics.streaming_sensitivity_at_specificity( predictions, labels, specificity=0.7) with self.test_session() as sess: sess.run(tf.local_variables_initializer()) # Run several updates. for _ in range(10): sess.run(update_op) # Then verify idempotency. initial_sensitivity = sensitivity.eval() for _ in range(10): self.assertAlmostEqual(initial_sensitivity, sensitivity.eval(), 5)
def testWeighted(self): predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26] labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1] weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] predictions = tf.constant(predictions_values, dtype=tf.float32) labels = tf.constant(labels_values) weights = tf.constant(weights_values) specificity, update_op = metrics.streaming_sensitivity_at_specificity( predictions, labels, weights=weights, specificity=0.4) with self.test_session() as sess: sess.run(tf.local_variables_initializer()) self.assertAlmostEqual(0.675, sess.run(update_op)) self.assertAlmostEqual(0.675, specificity.eval()) # TODO(nsilberman): Break this up into two sets of tests.
def testValueTensorIsIdempotent(self): predictions = tf.random_uniform((10, 3), maxval=1, dtype=tf.float32, seed=1) labels = tf.random_uniform((10, 3), maxval=1, dtype=tf.int64, seed=1) thresholds = [0, 0.5, 1.0] prec, prec_op = metrics.streaming_precision_at_thresholds( predictions, labels, thresholds) rec, rec_op = metrics.streaming_recall_at_thresholds( predictions, labels, thresholds) with self.test_session() as sess: sess.run(tf.local_variables_initializer()) # Run several updates, then verify idempotency. sess.run([prec_op, rec_op]) initial_prec = prec.eval() initial_rec = rec.eval() for _ in range(10): sess.run([prec_op, rec_op]) self.assertAllClose(initial_prec, prec.eval()) self.assertAllClose(initial_rec, rec.eval()) # TODO(nsilberman): fix tests (passing but incorrect).
def testAllCorrect(self): inputs = np.random.randint(0, 2, size=(100, 1)) with self.test_session() as sess: predictions = tf.constant(inputs, dtype=tf.float32) labels = tf.constant(inputs) thresholds = [0.5] prec, prec_op = metrics.streaming_precision_at_thresholds( predictions, labels, thresholds) rec, rec_op = metrics.streaming_recall_at_thresholds( predictions, labels, thresholds) sess.run(tf.local_variables_initializer()) sess.run([prec_op, rec_op]) self.assertEqual(1, prec.eval()) self.assertEqual(1, rec.eval())
def testAllIncorrect(self): inputs = np.random.randint(0, 2, size=(100, 1)) with self.test_session() as sess: predictions = tf.constant(inputs, dtype=tf.float32) labels = tf.constant(1 - inputs, dtype=tf.float32) thresholds = [0.5] prec, prec_op = metrics.streaming_precision_at_thresholds( predictions, labels, thresholds) rec, rec_op = metrics.streaming_recall_at_thresholds( predictions, labels, thresholds) sess.run(tf.local_variables_initializer()) sess.run([prec_op, rec_op]) self.assertAlmostEqual(0, prec.eval()) self.assertAlmostEqual(0, rec.eval())
def testExtremeThresholds(self): with self.test_session() as sess: predictions = tf.constant([1, 0, 1, 0], shape=(1, 4), dtype=tf.float32) labels = tf.constant([0, 1, 1, 1], shape=(1, 4)) thresholds = [-1.0, 2.0] # lower/higher than any values prec, prec_op = metrics.streaming_precision_at_thresholds( predictions, labels, thresholds) rec, rec_op = metrics.streaming_recall_at_thresholds( predictions, labels, thresholds) [prec_low, prec_high] = tf.split(0, 2, prec) [rec_low, rec_high] = tf.split(0, 2, rec) sess.run(tf.local_variables_initializer()) sess.run([prec_op, rec_op]) self.assertAlmostEqual(0.75, prec_low.eval()) self.assertAlmostEqual(0.0, prec_high.eval()) self.assertAlmostEqual(1.0, rec_low.eval()) self.assertAlmostEqual(0.0, rec_high.eval())
def testSingleUpdateKIs2(self): predictions = tf.constant(self._np_predictions, shape=(self._batch_size, self._num_classes), dtype=tf.float32) labels = tf.constant( self._np_labels, shape=(self._batch_size,), dtype=tf.int64) recall, update_op = metrics.streaming_recall_at_k( predictions, labels, k=2) sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k( predictions, tf.reshape(labels, (self._batch_size, 1)), k=2) with self.test_session() as sess: sess.run(tf.local_variables_initializer()) self.assertEqual(0.5, sess.run(update_op)) self.assertEqual(0.5, recall.eval()) self.assertEqual(0.5, sess.run(sp_update_op)) self.assertEqual(0.5, sp_recall.eval())
def testSingleUpdateKIs3(self): predictions = tf.constant(self._np_predictions, shape=(self._batch_size, self._num_classes), dtype=tf.float32) labels = tf.constant( self._np_labels, shape=(self._batch_size,), dtype=tf.int64) recall, update_op = metrics.streaming_recall_at_k( predictions, labels, k=3) sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k( predictions, tf.reshape(labels, (self._batch_size, 1)), k=3) with self.test_session() as sess: sess.run(tf.local_variables_initializer()) self.assertEqual(1.0, sess.run(update_op)) self.assertEqual(1.0, recall.eval()) self.assertEqual(1.0, sess.run(sp_update_op)) self.assertEqual(1.0, sp_recall.eval())
def testSingleUpdateSomeMissingKIs2(self): predictions = tf.constant(self._np_predictions, shape=(self._batch_size, self._num_classes), dtype=tf.float32) labels = tf.constant( self._np_labels, shape=(self._batch_size,), dtype=tf.int64) weights = tf.constant([0, 1, 0, 1], shape=(self._batch_size,), dtype=tf.float32) recall, update_op = metrics.streaming_recall_at_k( predictions, labels, k=2, weights=weights) sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k( predictions, tf.reshape(labels, (self._batch_size, 1)), k=2, weights=weights) with self.test_session() as sess: sess.run(tf.local_variables_initializer()) self.assertEqual(1.0, sess.run(update_op)) self.assertEqual(1.0, recall.eval()) self.assertEqual(1.0, sess.run(sp_update_op)) self.assertEqual(1.0, sp_recall.eval())
def testValueTensorIsIdempotent(self): predictions = tf.random_normal((10, 3), seed=1) labels = tf.random_normal((10, 3), seed=2) error, update_op = metrics.streaming_mean_absolute_error( predictions, labels) with self.test_session() as sess: sess.run(tf.local_variables_initializer()) # Run several updates. for _ in range(10): sess.run(update_op) # Then verify idempotency. initial_error = error.eval() for _ in range(10): self.assertEqual(initial_error, error.eval())
def testSingleUpdateNormalizedByLabels(self): np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32) np_labels = np.asarray([1, 3, 2, 3], dtype=np.float32) expected_error = np.mean( np.divide(np.absolute(np_predictions - np_labels), np_labels)) predictions = tf.constant(np_predictions, shape=(1, 4), dtype=tf.float32) labels = tf.constant(np_labels, shape=(1, 4)) error, update_op = metrics.streaming_mean_relative_error( predictions, labels, normalizer=labels) with self.test_session() as sess: sess.run(tf.local_variables_initializer()) self.assertEqual(expected_error, sess.run(update_op)) self.assertEqual(expected_error, error.eval())