我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.FIFOQueue()。
def testSimple(self): labels = [9, 3, 0] records = [self._record(labels[0], 0, 128, 255), self._record(labels[1], 255, 0, 1), self._record(labels[2], 254, 255, 0)] contents = b"".join([record for record, _ in records]) expected = [expected for _, expected in records] filename = os.path.join(self.get_temp_dir(), "cifar") open(filename, "wb").write(contents) with self.test_session() as sess: q = tf.FIFOQueue(99, [tf.string], shapes=()) q.enqueue([filename]).run() q.close().run() result = cifar10_input.read_cifar10(q) for i in range(3): key, label, uint8image = sess.run([ result.key, result.label, result.uint8image]) self.assertEqual("%s:%d" % (filename, i), tf.compat.as_text(key)) self.assertEqual(labels[i], label) self.assertAllEqual(expected[i], uint8image) with self.assertRaises(tf.errors.OutOfRangeError): sess.run([result.key, result.uint8image])
def setup_reader(self, image_paths, image_shape, num_concurrent, batch_size): # Path queue is list of image paths which will further be processed by another queue num_images = len(image_paths) indices = tf.range(0, num_images, 1) self.path_queue = tf.FIFOQueue(capacity=num_images, dtypes=[tf.int32, tf.string], name='path_queue') self.enqueue_path = self.path_queue.enqueue_many([indices, image_paths]) self.close_path = self.path_queue.close() processed_queue = tf.FIFOQueue(capacity=num_images, dtypes=[tf.int32, tf.float32], shapes=[(), image_shape], name='processed_queue') (idx, processed_image) = self.process() enqueue_process = processed_queue.enqueue([idx, processed_image]) self.dequeue_batch = processed_queue.dequeue_many(batch_size) self.queue_runner = tf.train.QueueRunner(processed_queue, [enqueue_process] * num_concurrent)
def create_queues(hypes, phase): """Create Queues.""" arch = hypes['arch'] dtypes = [tf.float32, tf.int32] height = 224 width = 224 channel = 3 shapes = [[height, width, channel], []] capacity = 50 q = tf.FIFOQueue(capacity=50, dtypes=dtypes, shapes=shapes) tf.summary.scalar("queue/%s/fraction_of_%d_full" % (q.name + "_" + phase, capacity), math_ops.cast(q.size(), tf.float32) * (1. / capacity)) return q
def create_queues(hypes, phase): """Create Queues.""" arch = hypes['arch'] dtypes = [tf.float32, tf.int32] shape_known = hypes['jitter']['fix_shape'] or \ hypes['jitter']['resize_image'] if shape_known: height = hypes['jitter']['image_height'] width = hypes['jitter']['image_width'] channel = hypes['arch']['num_channels'] shapes = [[height, width, channel], []] else: shapes = None capacity = 50 q = tf.FIFOQueue(capacity=50, dtypes=dtypes, shapes=shapes) tf.summary.scalar("queue/%s/fraction_of_%d_full" % (q.name + "_" + phase, capacity), math_ops.cast(q.size(), tf.float32) * (1. / capacity)) return q
def get_mydevlist(self, ngpus): wdev_list = self.get_allworkers_devlist(ngpus) mytask_id = self.mytask_id #: :type wdev: tf.DeviceSpec mywdev_list = [wdev for wdev in wdev_list if wdev.task == mytask_id] return mywdev_list # ============================================================================= # SIGNAL QUEUES: https://github.com/hustcat/tensorflow_examples/blob/master/mnist_distributed/dist_fifo.py @IgnorePep8 # ============================================================================= # def create_done_queue(i, num_workers=1): # """Queue used to signal death for i'th ps shard. Intended to have # all workers enqueue an item onto it to signal doneness.""" # # with tf.device("/job:ps/task:%d" % (i)): # return tf.FIFOQueue(num_workers, tf.int32, # shared_name="done_queue{}".format(i)) # # # def create_done_queues(num_ps): # return [create_done_queue(i) for i in range(num_ps)] # Perhaps implement a READY queue just like DONE queues.
def testUpdateOpsReturnsCurrentValue(self): with self.test_session() as sess: values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2)) _enqueue_vector(sess, values_queue, [0, 1]) _enqueue_vector(sess, values_queue, [-4.2, 9.1]) _enqueue_vector(sess, values_queue, [6.5, 0]) _enqueue_vector(sess, values_queue, [-3.2, 4.0]) values = values_queue.dequeue() mean, update_op = metrics.streaming_mean(values) sess.run(tf.initialize_local_variables()) self.assertAlmostEqual(0.5, sess.run(update_op), 5) self.assertAlmostEqual(1.475, sess.run(update_op), 5) self.assertAlmostEqual(12.4/6.0, sess.run(update_op), 5) self.assertAlmostEqual(1.65, sess.run(update_op), 5) self.assertAlmostEqual(1.65, sess.run(mean), 5)
def test1dWeightedValues(self): with self.test_session() as sess: # Create the queue that populates the values. values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2)) _enqueue_vector(sess, values_queue, [0, 1]) _enqueue_vector(sess, values_queue, [-4.2, 9.1]) _enqueue_vector(sess, values_queue, [6.5, 0]) _enqueue_vector(sess, values_queue, [-3.2, 4.0]) values = values_queue.dequeue() # Create the queue that populates the weighted labels. weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1)) _enqueue_vector(sess, weights_queue, [1]) _enqueue_vector(sess, weights_queue, [0]) _enqueue_vector(sess, weights_queue, [0]) _enqueue_vector(sess, weights_queue, [1]) weights = weights_queue.dequeue() mean, update_op = metrics.streaming_mean(values, weights) tf.initialize_local_variables().run() for _ in range(4): update_op.eval() self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test1dWeightedValues_placeholders(self): with self.test_session() as sess: # Create the queue that populates the values. feed_values = ( (0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0) ) values = tf.placeholder(dtype=tf.float32) # Create the queue that populates the weighted labels. weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1)) _enqueue_vector(sess, weights_queue, [1]) _enqueue_vector(sess, weights_queue, [0]) _enqueue_vector(sess, weights_queue, [0]) _enqueue_vector(sess, weights_queue, [1]) weights = weights_queue.dequeue() mean, update_op = metrics.streaming_mean(values, weights) tf.initialize_local_variables().run() for i in range(4): update_op.eval(feed_dict={values: feed_values[i]}) self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test2dWeightedValues(self): with self.test_session() as sess: # Create the queue that populates the values. values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2)) _enqueue_vector(sess, values_queue, [0, 1]) _enqueue_vector(sess, values_queue, [-4.2, 9.1]) _enqueue_vector(sess, values_queue, [6.5, 0]) _enqueue_vector(sess, values_queue, [-3.2, 4.0]) values = values_queue.dequeue() # Create the queue that populates the weighted labels. weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2)) _enqueue_vector(sess, weights_queue, [1, 1]) _enqueue_vector(sess, weights_queue, [1, 0]) _enqueue_vector(sess, weights_queue, [0, 1]) _enqueue_vector(sess, weights_queue, [0, 0]) weights = weights_queue.dequeue() mean, update_op = metrics.streaming_mean(values, weights) tf.initialize_local_variables().run() for _ in range(4): update_op.eval() self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
def testMultiDimensional(self): with self.test_session() as sess: values_queue = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(2, 2, 2)) _enqueue_vector(sess, values_queue, [[[1, 2], [1, 2]], [[1, 2], [1, 2]]], shape=(2, 2, 2)) _enqueue_vector(sess, values_queue, [[[1, 2], [1, 2]], [[3, 4], [9, 10]]], shape=(2, 2, 2)) values = values_queue.dequeue() mean, update_op = metrics.streaming_mean_tensor(values) sess.run(tf.initialize_local_variables()) for _ in range(2): sess.run(update_op) self.assertAllClose([[[1, 2], [1, 2]], [[2, 3], [5, 6]]], sess.run(mean))
def testUpdateOpsReturnsCurrentValue(self): with self.test_session() as sess: values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2)) _enqueue_vector(sess, values_queue, [0, 1]) _enqueue_vector(sess, values_queue, [-4.2, 9.1]) _enqueue_vector(sess, values_queue, [6.5, 0]) _enqueue_vector(sess, values_queue, [-3.2, 4.0]) values = values_queue.dequeue() mean, update_op = metrics.streaming_mean_tensor(values) sess.run(tf.initialize_local_variables()) self.assertAllClose([[0, 1]], sess.run(update_op), 5) self.assertAllClose([[-2.1, 5.05]], sess.run(update_op), 5) self.assertAllClose([[2.3/3., 10.1/3.]], sess.run(update_op), 5) self.assertAllClose([[-0.9/4., 3.525]], sess.run(update_op), 5) self.assertAllClose([[-0.9/4., 3.525]], sess.run(mean), 5)
def testWeighted1d(self): with self.test_session() as sess: # Create the queue that populates the values. values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2)) _enqueue_vector(sess, values_queue, [0, 1]) _enqueue_vector(sess, values_queue, [-4.2, 9.1]) _enqueue_vector(sess, values_queue, [6.5, 0]) _enqueue_vector(sess, values_queue, [-3.2, 4.0]) values = values_queue.dequeue() # Create the queue that populates the weights. weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1)) _enqueue_vector(sess, weights_queue, [[1]]) _enqueue_vector(sess, weights_queue, [[0]]) _enqueue_vector(sess, weights_queue, [[1]]) _enqueue_vector(sess, weights_queue, [[0]]) weights = weights_queue.dequeue() mean, update_op = metrics.streaming_mean_tensor(values, weights) sess.run(tf.initialize_local_variables()) for _ in range(4): sess.run(update_op) self.assertAllClose([[3.25, 0.5]], sess.run(mean), 5)
def testWeighted2d_1(self): with self.test_session() as sess: # Create the queue that populates the values. values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2)) _enqueue_vector(sess, values_queue, [0, 1]) _enqueue_vector(sess, values_queue, [-4.2, 9.1]) _enqueue_vector(sess, values_queue, [6.5, 0]) _enqueue_vector(sess, values_queue, [-3.2, 4.0]) values = values_queue.dequeue() # Create the queue that populates the weights. weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2)) _enqueue_vector(sess, weights_queue, [1, 1]) _enqueue_vector(sess, weights_queue, [1, 0]) _enqueue_vector(sess, weights_queue, [0, 1]) _enqueue_vector(sess, weights_queue, [0, 0]) weights = weights_queue.dequeue() mean, update_op = metrics.streaming_mean_tensor(values, weights) sess.run(tf.initialize_local_variables()) for _ in range(4): sess.run(update_op) self.assertAllClose([[-2.1, 0.5]], sess.run(mean), 5)
def testMultipleBatchesOfSizeOne(self): with self.test_session() as sess: # Create the queue that populates the predictions. preds_queue = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(1, 3)) _enqueue_vector(sess, preds_queue, [10, 8, 6]) _enqueue_vector(sess, preds_queue, [-4, 3, -1]) predictions = preds_queue.dequeue() # Create the queue that populates the labels. labels_queue = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(1, 3)) _enqueue_vector(sess, labels_queue, [1, 3, 2]) _enqueue_vector(sess, labels_queue, [2, 4, 6]) labels = labels_queue.dequeue() error, update_op = metrics.streaming_mean_squared_error( predictions, labels) sess.run(tf.initialize_local_variables()) sess.run(update_op) self.assertAlmostEqual(208.0 / 6, sess.run(update_op), 5) self.assertAlmostEqual(208.0 / 6, error.eval(), 5)
def testUpdateOpsReturnsCurrentValue(self): with self.test_session() as sess: values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2)) _enqueue_vector(sess, values_queue, [0, 1]) _enqueue_vector(sess, values_queue, [-4.2, 9.1]) _enqueue_vector(sess, values_queue, [6.5, 0]) _enqueue_vector(sess, values_queue, [-3.2, 4.0]) values = values_queue.dequeue() mean, update_op = metrics.streaming_mean(values) sess.run(tf.local_variables_initializer()) self.assertAlmostEqual(0.5, sess.run(update_op), 5) self.assertAlmostEqual(1.475, sess.run(update_op), 5) self.assertAlmostEqual(12.4/6.0, sess.run(update_op), 5) self.assertAlmostEqual(1.65, sess.run(update_op), 5) self.assertAlmostEqual(1.65, sess.run(mean), 5)
def test1dWeightedValues_placeholders(self): with self.test_session() as sess: # Create the queue that populates the values. feed_values = ( (0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0) ) values = tf.placeholder(dtype=tf.float32) # Create the queue that populates the weighted labels. weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1)) _enqueue_vector(sess, weights_queue, [1]) _enqueue_vector(sess, weights_queue, [0]) _enqueue_vector(sess, weights_queue, [0]) _enqueue_vector(sess, weights_queue, [1]) weights = weights_queue.dequeue() mean, update_op = metrics.streaming_mean(values, weights) tf.local_variables_initializer().run() for i in range(4): update_op.eval(feed_dict={values: feed_values[i]}) self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test2dWeightedValues(self): with self.test_session() as sess: # Create the queue that populates the values. values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2)) _enqueue_vector(sess, values_queue, [0, 1]) _enqueue_vector(sess, values_queue, [-4.2, 9.1]) _enqueue_vector(sess, values_queue, [6.5, 0]) _enqueue_vector(sess, values_queue, [-3.2, 4.0]) values = values_queue.dequeue() # Create the queue that populates the weighted labels. weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2)) _enqueue_vector(sess, weights_queue, [1, 1]) _enqueue_vector(sess, weights_queue, [1, 0]) _enqueue_vector(sess, weights_queue, [0, 1]) _enqueue_vector(sess, weights_queue, [0, 0]) weights = weights_queue.dequeue() mean, update_op = metrics.streaming_mean(values, weights) tf.local_variables_initializer().run() for _ in range(4): update_op.eval() self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
def test2dWeightedValues_placeholders(self): with self.test_session() as sess: # Create the queue that populates the values. feed_values = ( (0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0) ) values = tf.placeholder(dtype=tf.float32) # Create the queue that populates the weighted labels. weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2)) _enqueue_vector(sess, weights_queue, [1, 1]) _enqueue_vector(sess, weights_queue, [1, 0]) _enqueue_vector(sess, weights_queue, [0, 1]) _enqueue_vector(sess, weights_queue, [0, 0]) weights = weights_queue.dequeue() mean, update_op = metrics.streaming_mean(values, weights) tf.local_variables_initializer().run() for i in range(4): update_op.eval(feed_dict={values: feed_values[i]}) self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
def testMultiDimensional(self): with self.test_session() as sess: values_queue = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(2, 2, 2)) _enqueue_vector(sess, values_queue, [[[1, 2], [1, 2]], [[1, 2], [1, 2]]], shape=(2, 2, 2)) _enqueue_vector(sess, values_queue, [[[1, 2], [1, 2]], [[3, 4], [9, 10]]], shape=(2, 2, 2)) values = values_queue.dequeue() mean, update_op = metrics.streaming_mean_tensor(values) sess.run(tf.local_variables_initializer()) for _ in range(2): sess.run(update_op) self.assertAllClose([[[1, 2], [1, 2]], [[2, 3], [5, 6]]], sess.run(mean))
def testWeighted1d(self): with self.test_session() as sess: # Create the queue that populates the values. values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2)) _enqueue_vector(sess, values_queue, [0, 1]) _enqueue_vector(sess, values_queue, [-4.2, 9.1]) _enqueue_vector(sess, values_queue, [6.5, 0]) _enqueue_vector(sess, values_queue, [-3.2, 4.0]) values = values_queue.dequeue() # Create the queue that populates the weights. weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1)) _enqueue_vector(sess, weights_queue, [[1]]) _enqueue_vector(sess, weights_queue, [[0]]) _enqueue_vector(sess, weights_queue, [[1]]) _enqueue_vector(sess, weights_queue, [[0]]) weights = weights_queue.dequeue() mean, update_op = metrics.streaming_mean_tensor(values, weights) sess.run(tf.local_variables_initializer()) for _ in range(4): sess.run(update_op) self.assertAllClose([[3.25, 0.5]], sess.run(mean), 5)
def testWeighted2d_1(self): with self.test_session() as sess: # Create the queue that populates the values. values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2)) _enqueue_vector(sess, values_queue, [0, 1]) _enqueue_vector(sess, values_queue, [-4.2, 9.1]) _enqueue_vector(sess, values_queue, [6.5, 0]) _enqueue_vector(sess, values_queue, [-3.2, 4.0]) values = values_queue.dequeue() # Create the queue that populates the weights. weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2)) _enqueue_vector(sess, weights_queue, [1, 1]) _enqueue_vector(sess, weights_queue, [1, 0]) _enqueue_vector(sess, weights_queue, [0, 1]) _enqueue_vector(sess, weights_queue, [0, 0]) weights = weights_queue.dequeue() mean, update_op = metrics.streaming_mean_tensor(values, weights) sess.run(tf.local_variables_initializer()) for _ in range(4): sess.run(update_op) self.assertAllClose([[-2.1, 0.5]], sess.run(mean), 5)
def testWeighted2d_2(self): with self.test_session() as sess: # Create the queue that populates the values. values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2)) _enqueue_vector(sess, values_queue, [0, 1]) _enqueue_vector(sess, values_queue, [-4.2, 9.1]) _enqueue_vector(sess, values_queue, [6.5, 0]) _enqueue_vector(sess, values_queue, [-3.2, 4.0]) values = values_queue.dequeue() # Create the queue that populates the weights. weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2)) _enqueue_vector(sess, weights_queue, [0, 1]) _enqueue_vector(sess, weights_queue, [0, 0]) _enqueue_vector(sess, weights_queue, [0, 1]) _enqueue_vector(sess, weights_queue, [0, 0]) weights = weights_queue.dequeue() mean, update_op = metrics.streaming_mean_tensor(values, weights) sess.run(tf.local_variables_initializer()) for _ in range(4): sess.run(update_op) self.assertAllClose([[0, 0.5]], sess.run(mean), 5)
def testMultipleBatchesOfSizeOne(self): with self.test_session() as sess: # Create the queue that populates the predictions. preds_queue = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(1, 3)) _enqueue_vector(sess, preds_queue, [10, 8, 6]) _enqueue_vector(sess, preds_queue, [-4, 3, -1]) predictions = preds_queue.dequeue() # Create the queue that populates the labels. labels_queue = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(1, 3)) _enqueue_vector(sess, labels_queue, [1, 3, 2]) _enqueue_vector(sess, labels_queue, [2, 4, 6]) labels = labels_queue.dequeue() error, update_op = metrics.streaming_mean_squared_error( predictions, labels) sess.run(tf.local_variables_initializer()) sess.run(update_op) self.assertAlmostEqual(208.0 / 6, sess.run(update_op), 5) self.assertAlmostEqual(208.0 / 6, error.eval(), 5)
def testMultipleMetricsOnMultipleBatchesOfSizeOne(self): with self.test_session() as sess: # Create the queue that populates the predictions. preds_queue = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(1, 3)) _enqueue_vector(sess, preds_queue, [10, 8, 6]) _enqueue_vector(sess, preds_queue, [-4, 3, -1]) predictions = preds_queue.dequeue() # Create the queue that populates the labels. labels_queue = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(1, 3)) _enqueue_vector(sess, labels_queue, [1, 3, 2]) _enqueue_vector(sess, labels_queue, [2, 4, 6]) labels = labels_queue.dequeue() mae, ma_update_op = metrics.streaming_mean_absolute_error( predictions, labels) mse, ms_update_op = metrics.streaming_mean_squared_error( predictions, labels) sess.run(tf.local_variables_initializer()) sess.run([ma_update_op, ms_update_op]) sess.run([ma_update_op, ms_update_op]) self.assertAlmostEqual(32.0 / 6, mae.eval(), 5) self.assertAlmostEqual(208.0 / 6, mse.eval(), 5)
def __init__(self, path, batch_size=16, input_size=227, scale_factor=1.0, num_threads=10): self._path = path self._list_files = glob.glob(os.path.join(path, "**/*.avi")) self._batch_size = batch_size self._scale_factor = scale_factor self._image_size = input_size self._label_size = int(input_size * self._scale_factor) self._num_threads = num_threads self._coord = tf.train.Coordinator() self._image_shape = [batch_size, self._image_size, self._image_size, 3] self._label_shape = [batch_size, self._label_size, self._label_size, 1] p_x = tf.placeholder(tf.float32, self._image_shape, name='x') p_y = tf.placeholder(tf.float32, self._label_shape, name='y') inputs = [p_x, p_y] self._queue = tf.FIFOQueue(400, [i.dtype for i in inputs], [i.get_shape() for i in inputs]) self._inputs = inputs self._enqueue_op = self._queue.enqueue(inputs) self._queue_close_op = self._queue.close(cancel_pending_enqueues=True) self._threads = []
def __init__(self, files, batch_size=16, input_size=227, scale_factor=1.0, num_threads=10): self._list_files = files self._batch_size = batch_size self._scale_factor = scale_factor self._image_size = input_size self._label_size = int(input_size * self._scale_factor) self._num_threads = num_threads self._coord = tf.train.Coordinator() self._image_shape = [batch_size, self._image_size, self._image_size, 3] self._label_shape = [batch_size, self._label_size, self._label_size, 2] p_x = tf.placeholder(tf.float32, self._image_shape, name='x') p_y = tf.placeholder(tf.float32, self._label_shape, name='y') inputs = [p_x, p_y] self._queue = tf.FIFOQueue(400, [i.dtype for i in inputs], [i.get_shape() for i in inputs]) self._inputs = inputs self._enqueue_op = self._queue.enqueue(inputs) self._queue_close_op = self._queue.close(cancel_pending_enqueues=True) self._threads = []
def __init__(self, path, root_path='', batch_size=16, input_size=227, num_threads=10): self._path = path self._root_path = root_path with open(path) as f: self._list_files = [x.rstrip('\n') for x in f.readlines()] print('list_files', len(self._list_files)) self._batch_size = batch_size self._input_size = input_size self._num_threads = num_threads self._coord = tf.train.Coordinator() self._base_shape = [batch_size, input_size, input_size] self._image_shape = self._base_shape + [3] self._label_shape = self._base_shape + [1] p_x = tf.placeholder(tf.float32, self._image_shape, name='x') p_y = tf.placeholder(tf.float32, self._label_shape, name='y') inputs = [p_x, p_y] self._queue = tf.FIFOQueue(400, [i.dtype for i in inputs], [i.get_shape() for i in inputs]) self._inputs = inputs self._enqueue_op = self._queue.enqueue(inputs) self._queue_close_op = self._queue.close(cancel_pending_enqueues=True) self._threads = []
def validation_inputs(): fps, labels = _load_validation_labelmap() filepaths = tf.constant(fps) labels = tf.constant(labels, dtype=tf.int32) filename_queue = tf.FIFOQueue(len(fps), [tf.string, tf.int32], name='validation_filename_queue') enqueue_op = filename_queue.enqueue_many([filepaths, labels]) qr = tf.train.QueueRunner(filename_queue, [enqueue_op]) tf.train.add_queue_runner(qr) example_queue = tf.FIFOQueue(len(filepaths), [tf.float32, tf.int32], name='validation_example_queue') enqueue_op_ex = example_queue.enqueue(_read_and_preprocess_image_for_validation(filename_queue)) qr_ex = tf.train.QueueRunner(example_queue, [enqueue_op_ex] * FLAGS.num_consuming_threads) tf.train.add_queue_runner(qr_ex) image_10crop, label = example_queue.dequeue() # do not one-hot-encode label here return image_10crop, label
def queue_setup(filename, mode, batch_size, num_readers, min_examples): """ Sets up the queue runners for data input """ filename_queue = tf.train.string_input_producer([filename], shuffle=True, capacity=16) if mode == "train": examples_queue = tf.RandomShuffleQueue(capacity=min_examples + 3 * batch_size, min_after_dequeue=min_examples, dtypes=[tf.string]) else: examples_queue = tf.FIFOQueue(capacity=min_examples + 3 * batch_size, dtypes=[tf.string]) enqueue_ops = list() for _ in range(num_readers): reader = tf.TFRecordReader() _, value = reader.read(filename_queue) enqueue_ops.append(examples_queue.enqueue([value])) tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(examples_queue, enqueue_ops)) example_serialized = examples_queue.dequeue() return example_serialized
def __init__(self, input_size, batch_size, data_generator_creator, max_steps=None): super().__init__(input_size) self.batch_size = batch_size self.data_generator_creator = data_generator_creator self.steps_left = max_steps with tf.device("/cpu:0"): # Define input and label placeholders # inputs is of dimension [batch_size, max_time, input_size] self.inputs = tf.placeholder(tf.float32, [batch_size, None, input_size], name='inputs') self.sequence_lengths = tf.placeholder(tf.int32, [batch_size], name='sequence_lengths') self.labels = tf.sparse_placeholder(tf.int32, name='labels') # Queue for inputs and labels self.queue = tf.FIFOQueue(dtypes=[tf.float32, tf.int32, tf.string], capacity=100) # queues do not support sparse tensors yet, we need to serialize... serialized_labels = tf.serialize_many_sparse(self.labels) self.enqueue_op = self.queue.enqueue([self.inputs, self.sequence_lengths, serialized_labels])
def __init__(self, pkl_path, shuffle=False, distort=False, capacity=2000, image_per_thread=16): self._shuffle = shuffle self._distort = distort with open(pkl_path, 'rb') as fd: data = pickle.load(fd) self._images = data['data'].reshape([-1, 3, 32, 32]).transpose((0, 2, 3, 1)).copy(order='C') self._labels = data['labels'] # numpy 1-D array self.size = len(self._labels) self.queue = tf.FIFOQueue(shapes=[[32,32,3], []], dtypes=[tf.float32, tf.int32], capacity=capacity) # self.queue = tf.RandomShuffleQueue(shapes=[[32,32,3], []], # dtypes=[tf.float32, tf.int32], # capacity=capacity, # min_after_dequeue=min_after_dequeue) self.dataX = tf.placeholder(dtype=tf.float32, shape=[None,32,32,3]) self.dataY = tf.placeholder(dtype=tf.int32, shape=[None,]) self.enqueue_op = self.queue.enqueue_many([self.dataX, self.dataY]) self.image_per_thread = image_per_thread self._image_summary_added = False
def __init__(self, config, input_queue=None, predict_tower=None): """ :param config: a `TrainConfig` instance :param input_queue: a `tf.QueueBase` instance to be used to buffer datapoints. Defaults to a FIFO queue of size 100. :param predict_tower: list of gpu relative idx to run prediction. default to be [0]. Use -1 for cpu. """ super(QueueInputTrainer, self).__init__(config) self.input_vars = self.model.get_input_vars() # use a smaller queue size for now, to avoid https://github.com/tensorflow/tensorflow/issues/2942 if input_queue is None: self.input_queue = tf.FIFOQueue( 50, [x.dtype for x in self.input_vars], name='input_queue') else: self.input_queue = input_queue # by default, use the first training gpu for prediction self.predict_tower = predict_tower or [0] self.dequed_inputs = None
def __init__(self, sources=[], initOnStart=True): self.totalCount = 0 self.counts=[] self.sources=[] self.initDone=False self.initOnStart=initOnStart with tf.name_scope('dataset') as scope: self.queue = tf.FIFOQueue(dtypes=[tf.float32, tf.float32, tf.uint8], capacity=self.QUEUE_CAPACITY) self.image = tf.placeholder(dtype=tf.float32, shape=[None, None, 3], name="image") self.boxes = tf.placeholder(dtype=tf.float32, shape=[None,4], name="boxes") self.classes = tf.placeholder(dtype=tf.uint8, shape=[None], name="classes") self.enqueueOp = self.queue.enqueue([self.image, self.boxes, self.classes]) self.sources=sources[:]
def monitored_queue(*tensors, capacity, metric_name="items_in_queue", return_queue=False): queue = tf.FIFOQueue(capacity, dtypes(*tensors)) collections.add_metric(queue.size(), metric_name) add_queue_runner(queue, [queue.enqueue(tensors)]) if return_queue: return queue results = queue.dequeue() for tensor, result \ in zip(tensors, results if isinstance(results, list) else [results]): result.set_shape(tensor.get_shape()) return results
def testSimple(self): labels = [9, 3, 0] records = [self._record(labels[0], 0, 128, 255), self._record(labels[1], 255, 0, 1), self._record(labels[2], 254, 255, 0)] contents = b"".join([record for record, _ in records]) expected = [expected for _, expected in records] filename = os.path.join(self.get_temp_dir(), "cifar") open(filename, "wb").write(contents) with self.test_session() as sess: q = tf.FIFOQueue(99, [tf.string], shapes=()) q.enqueue([filename]).run() q.close().run() result = cifar10_input.read_cifar10(q) for i in range(3): key, label, uint8image = sess.run([ result.key, result.label, result.uint8image]) self.assertEqual("%s:%d" % (filename, i), cp.as_text(key)) self.assertEqual(labels[i], label) self.assertAllEqual(expected[i], uint8image) with self.assertRaises(tf.errors.OutOfRangeError): sess.run([result.key, result.uint8image])
def add_sync_queues_and_barrier(self, name_prefix, enqueue_after_list): """Adds ops to enqueue on all worker queues. Args: name_prefix: prefixed for the shared_name of ops. enqueue_after_list: control dependency from ops. Returns: an op that should be used as control dependency before starting next step. """ self.sync_queue_counter += 1 with tf.device(self.sync_queue_devices[( self.sync_queue_counter % len(self.sync_queue_devices))]): sync_queues = [ tf.FIFOQueue(self.num_workers, [tf.bool], shapes=[[]], shared_name='%s%s' % (name_prefix, i)) for i in range(self.num_workers)] queue_ops = [] # For each other worker, add an entry in a queue, signaling that it can # finish this step. token = tf.constant(False) with tf.control_dependencies(enqueue_after_list): for i, q in enumerate(sync_queues): if i == self.task_index: queue_ops.append(tf.no_op()) else: queue_ops.append(q.enqueue(token)) # Drain tokens off queue for this worker, one for each other worker. queue_ops.append( sync_queues[self.task_index].dequeue_many(len(sync_queues) - 1)) return tf.group(*queue_ops)
def __init__(self, coordinator, placeholders, meta, batch_size=32, split_nums=None, is_validation=False): """ :param coordinator: :param placeholders: :param meta: :param batch_size: :param split_nums: :param is_validation: """ super(BaseFeeder, self).__init__() queue = tf.FIFOQueue(capacity=math.ceil(batch_size/4), dtypes=[item.dtype for item in placeholders]) self.queue = queue # for buf inspect self.enqueue_op = queue.enqueue(placeholders) self.fed_holders = [None] * len(placeholders) # None placeholder for dequeue self.fed_holders = queue.dequeue() for idx in range(len(placeholders)): self.fed_holders[idx].set_shape(placeholders[idx].shape) self._placeholders = placeholders self.coord = coordinator self.sess = None self.meta = meta key_lst = meta.get('key_lst') assert isinstance(key_lst, list) or isinstance(key_lst, tuple) self.key_lst = key_lst self.batch_size = batch_size self.split_bool = False if split_nums is None else True self.split_nums = split_nums assert isinstance(is_validation, bool) self.is_validation = is_validation self._total_samples = len(key_lst) self._iter = 0 self._record_index = 0 self._loss = 0.
def input_fn(files, num_epochs=None, shuffle=False, shared_name=None): # get file names if file_io.is_directory(files[0]): file_names = file_io.get_matching_files(files[0] + '/*tfrecord') else: file_names = files # shuffle if required if shuffle: shuffle_fn(file_names) # queue with the file names that can be shared amongst workers during training filename_queue = tf.FIFOQueue(100, tf.string, shared_name=shared_name) enque_op = filename_queue.enqueue_many([tf.train.limit_epochs(file_names, num_epochs)]) close_op = filename_queue.close(cancel_pending_enqueues=True) # create queue runner and add it to queue runners qr = tf.train.QueueRunner(filename_queue, [enque_op], close_op, queue_closed_exception_types=(tf.errors.OutOfRangeError, tf.errors.CancelledError)) tf.train.add_queue_runner(qr) # read example from file reader = tf.TFRecordReader() _, example = reader.read(filename_queue) # parse example image, ground_truth, example_name = parse_example(example) return image, ground_truth, example_name