我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.random_uniform()。
def SampleRandomFrames(model_input, num_frames, num_samples): """Samples a random set of frames of size num_samples. Args: model_input: A tensor of size batch_size x max_frames x feature_size num_frames: A tensor of size batch_size x 1 num_samples: A scalar Returns: `model_input`: A tensor of size batch_size x num_samples x feature_size """ batch_size = tf.shape(model_input)[0] frame_index = tf.cast( tf.multiply( tf.random_uniform([batch_size, num_samples]), tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32) batch_index = tf.tile( tf.expand_dims(tf.range(batch_size), 1), [1, num_samples]) index = tf.stack([batch_index, frame_index], 2) return tf.gather_nd(model_input, index)
def rnn_model(self): cell = rnn.BasicLSTMCell(num_units=self.n_units) multi_cell = rnn.MultiRNNCell([cell]*self.n_layers) # we only need one output so get it wrapped to out one value which is next word index cell_wrapped = rnn.OutputProjectionWrapper(multi_cell, output_size=1) # get input embed embedding = tf.Variable(initial_value=tf.random_uniform([self.vocab_size, self.n_units], -1.0, 1.0)) inputs = tf.nn.embedding_lookup(embedding, self.inputs) # what is inputs dim?? outputs, states = tf.nn.dynamic_rnn(cell_wrapped, inputs=inputs, dtype=tf.float32) outputs = tf.reshape(outputs, [int(outputs.get_shape()[0]), int(inputs.get_shape()[1])]) w = tf.Variable(tf.truncated_normal([int(inputs.get_shape()[1]), self.vocab_size])) b = tf.Variable(tf.zeros([self.vocab_size])) logits = tf.nn.bias_add(tf.matmul(outputs, w), b) return logits
def ae(x): if nonlinearity_name == 'relu': f = tf.nn.relu elif nonlinearity_name == 'elu': f = tf.nn.elu elif nonlinearity_name == 'gelu': # def gelu(x): # return tf.mul(x, tf.erfc(-x / tf.sqrt(2.)) / 2.) # f = gelu def gelu_fast(_x): return 0.5 * _x * (1 + tf.tanh(tf.sqrt(2 / np.pi) * (_x + 0.044715 * tf.pow(_x, 3)))) f = gelu_fast elif nonlinearity_name == 'silu': def silu(_x): return _x * tf.sigmoid(_x) f = silu # elif nonlinearity_name == 'soi': # def soi_map(x): # u = tf.random_uniform(tf.shape(x)) # mask = tf.to_float(tf.less(u, (1 + tf.erf(x / tf.sqrt(2.))) / 2.)) # return tf.cond(is_training, lambda: tf.mul(mask, x), # lambda: tf.mul(x, tf.erfc(-x / tf.sqrt(2.)) / 2.)) # f = soi_map else: raise NameError("Need 'relu', 'elu', 'gelu', or 'silu' for nonlinearity_name") h1 = f(tf.matmul(x, W['1']) + b['1']) h2 = f(tf.matmul(h1, W['2']) + b['2']) h3 = f(tf.matmul(h2, W['3']) + b['3']) h4 = f(tf.matmul(h3, W['4']) + b['4']) h5 = f(tf.matmul(h4, W['5']) + b['5']) h6 = f(tf.matmul(h5, W['6']) + b['6']) h7 = f(tf.matmul(h6, W['7']) + b['7']) return tf.matmul(h7, W['8']) + b['8']
def xavier_initializer(shape): dim_sum = np.sum(shape) if len(shape) == 1: dim_sum += 1 bound = np.sqrt(2.0 / dim_sum) return tf.random_uniform(shape, minval=-bound, maxval=bound) # # Assigning network variables to target network variables # def assign_network_to_target(): # update_wfc = tf.assign(w_fc_target, w_fc) # update_bfc = tf.assign(b_fc_target, b_fc) # sess.run(update_wfc) # sess.run(update_bfc) # cell_target = cell # Input
def augment(self, model_input_raw, num_frames, labels_batch, **unused_params): assert(FLAGS.frame_feature, "AugmentationTransformer only works with frame feature") feature_dim = len(model_input_raw.get_shape()) - 1 frame_dim = len(model_input_raw.get_shape()) - 2 max_frame = model_input_raw.get_shape().as_list()[frame_dim] limit = tf.cast(tf.reduce_min(num_frames) / 4.0, tf.int32) offset = tf.random_uniform(shape=[], dtype=tf.int32) % limit input_trans1 = tf.pad(model_input_raw[:,offset:,:], paddings=[0,offset,0]) num_frames_trans1 = num_frames - offset num_frames_trans1 = tf.cast( tf.random_uniform(shape=num_frames.shape, minval=0.75, maxval=1.0, dtype=tf.float32) * num_frames_trans1, tf.int32) model_input = tf.concat([model_input_raw, input_trans1], axis=0) labels_batch = tf.concat([labels_batch, labels_batch], axis=0) num_frames = tf.concat([num_frames, num_frames_trans1], axis=0) return model_input, labels_batch, num_frames_new
def sample_dtype(self): return tf.int32 # WRONG SECOND DERIVATIVES # class CategoricalPd(Pd): # def __init__(self, logits): # self.logits = logits # self.ps = tf.nn.softmax(logits) # @classmethod # def fromflat(cls, flat): # return cls(flat) # def flatparam(self): # return self.logits # def mode(self): # return U.argmax(self.logits, axis=1) # def logp(self, x): # return -tf.nn.sparse_softmax_cross_entropy_with_logits(self.logits, x) # def kl(self, other): # return tf.nn.softmax_cross_entropy_with_logits(other.logits, self.ps) \ # - tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps) # def entropy(self): # return tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps) # def sample(self): # u = tf.random_uniform(tf.shape(self.logits)) # return U.argmax(self.logits - tf.log(-tf.log(u)), axis=1)
def tf_truncexpon(batch_size,rate,right): ''' a tensorflow node that returns a random variable sampled from an Exp(rate) random variable which has been truncated and normalized to [0,right] #Leverages that log of uniform is exponential batch_size: a tensorflow placeholder to sync batch_size everywhere rate: lambda rate parameter for exponential dist right: float in (0,inf) where to truncate exp distribution ''' uleft=tf.exp(-1*rate*right) U=tf.random_uniform(shape=(batch_size,1),minval=uleft,maxval=1) tExp=(-1/rate)*tf.log(U) return tExp
def Grad_Penalty(real_data,fake_data,Discriminator,config): ''' Implemention from "Improved training of Wasserstein" Interpolation based estimation of the gradient of the discriminator. Used to penalize the derivative rather than explicitly constrain lipschitz. ''' batch_size=config.batch_size LAMBDA=config.lambda_W n_hidden=config.critic_hidden_size alpha = tf.random_uniform([batch_size,1],0.,1.) interpolates = alpha*real_data + ((1-alpha)*fake_data)#Could do more if not fixed batch_size disc_interpolates = Discriminator(interpolates,batch_size,n_hidden=n_hidden,config=config, reuse=True)[1]#logits gradients = tf.gradients(disc_interpolates,[interpolates])[0]#orig slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1])) gradient_penalty = tf.reduce_mean((slopes-1)**2) grad_cost = LAMBDA*gradient_penalty return grad_cost,slopes
def init_var(self): self.rand_h = tf.random_uniform([1], 1.0 - float(self.rnd_hflip), 1.0) self.rand_v = tf.random_uniform([1], 1.0 - float(self.rnd_vflip), 1.0) self.rand_t = tf.random_uniform( [1], 1.0 - float(self.rnd_transpose), 1.0) self.offset = tf.random_uniform( [2], dtype='int32', maxval=self.padding * 2 + self.shrink) if self._debug: self.offset = tf.Print(self.offset, ['Forward RND module', self.offset]) if self.rnd_size: self.space = 2 * self.padding - self.offset self.offset20 = tf.random_uniform( [], dtype='int32', maxval=self.space[0] * 2) - self.space[0] self.offset21 = tf.random_uniform( [], dtype='int32', maxval=self.space[1] * 2) - self.space[1] self.offset2 = tf.pack([self.offset20, self.offset21]) else: self.offset2 = tf.zeros([2], dtype='int32') pass
def _build(self): """Build the linear layer. Two parameters: weight and bias. :return: None. """ bound = math.sqrt(6.0 / (self._input_size + self._output_size)) w_init = tf.random_uniform( minval=-bound, maxval=bound, shape=(self._input_size, self._output_size), dtype=D_TYPE, name='w_init' ) self._w = tf.Variable(w_init, dtype=D_TYPE, name='w') if self._with_bias: b_init = tf.zeros( shape=(self._output_size,), dtype=D_TYPE, name='b_init' ) self._b = tf.Variable(b_init, dtype=D_TYPE, name='b') else: self._b = None self._batch_norm = BatchNorm('bn', self._output_size) if self._with_batch_norm else None
def random_rotation(img: tf.Tensor, max_rotation: float=0.1, crop: bool=True) -> tf.Tensor: # from SeguinBe with tf.name_scope('RandomRotation'): rotation = tf.random_uniform([], -max_rotation, max_rotation) rotated_image = tf.contrib.image.rotate(img, rotation, interpolation='BILINEAR') if crop: rotation = tf.abs(rotation) original_shape = tf.shape(rotated_image)[:2] h, w = original_shape[0], original_shape[1] # see https://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders for formulae old_l, old_s = tf.cond(h > w, lambda: [h, w], lambda: [w, h]) old_l, old_s = tf.cast(old_l, tf.float32), tf.cast(old_s, tf.float32) new_l = (old_l * tf.cos(rotation) - old_s * tf.sin(rotation)) / tf.cos(2*rotation) new_s = (old_s - tf.sin(rotation) * new_l) / tf.cos(rotation) new_h, new_w = tf.cond(h > w, lambda: [new_l, new_s], lambda: [new_s, new_l]) new_h, new_w = tf.cast(new_h, tf.int32), tf.cast(new_w, tf.int32) bb_begin = tf.cast(tf.ceil((h-new_h)/2), tf.int32), tf.cast(tf.ceil((w-new_w)/2), tf.int32) rotated_image_crop = rotated_image[bb_begin[0]:h - bb_begin[0], bb_begin[1]:w - bb_begin[1], :] # If crop removes the entire image, keep the original image rotated_image = tf.cond(tf.equal(tf.size(rotated_image_crop), 0), true_fn=lambda: img, false_fn=lambda: rotated_image_crop) return rotated_image
def attack(self, x, y): """ This method creates a symbolic graph that given an input image, first randomly perturbs the image. The perturbation is bounded to an epsilon ball. Then multiple steps of gradient descent is performed to increase the probability of a target label or decrease the probability of the ground-truth label. :param x: A tensor with the input image. """ import tensorflow as tf from cleverhans.utils_tf import clip_eta eta = tf.random_uniform(tf.shape(x), -self.eps, self.eps) eta = clip_eta(eta, self.ord, self.eps) for i in range(self.nb_iter): x, eta = self.attack_single_step(x, eta, y) adv_x = x + eta if self.clip_min is not None and self.clip_max is not None: adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max) return adv_x
def testBuildEndPointsWithDepthMultiplierLessThanOne(self): batch_size = 5 height, width = 224, 224 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) _, end_points = inception.inception_v2(inputs, num_classes) endpoint_keys = [key for key in end_points.keys() if key.startswith('Mixed') or key.startswith('Conv')] _, end_points_with_multiplier = inception.inception_v2( inputs, num_classes, scope='depth_multiplied_net', depth_multiplier=0.5) for key in endpoint_keys: original_depth = end_points[key].get_shape().as_list()[3] new_depth = end_points_with_multiplier[key].get_shape().as_list()[3] self.assertEqual(0.5 * original_depth, new_depth)
def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self): batch_size = 5 height, width = 224, 224 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) _, end_points = inception.inception_v2(inputs, num_classes) endpoint_keys = [key for key in end_points.keys() if key.startswith('Mixed') or key.startswith('Conv')] _, end_points_with_multiplier = inception.inception_v2( inputs, num_classes, scope='depth_multiplied_net', depth_multiplier=2.0) for key in endpoint_keys: original_depth = end_points[key].get_shape().as_list()[3] new_depth = end_points_with_multiplier[key].get_shape().as_list()[3] self.assertEqual(2.0 * original_depth, new_depth)
def testUnknowBatchSize(self): batch_size = 1 height, width = 224, 224 num_classes = 1000 inputs = tf.placeholder(tf.float32, (None, height, width, 3)) logits, _ = inception.inception_v2(inputs, num_classes) self.assertTrue(logits.op.name.startswith('InceptionV2/Logits')) self.assertListEqual(logits.get_shape().as_list(), [None, num_classes]) images = tf.random_uniform((batch_size, height, width, 3)) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(logits, {inputs: images.eval()}) self.assertEquals(output.shape, (batch_size, num_classes))
def testTrainEvalWithReuse(self): train_batch_size = 5 eval_batch_size = 2 height, width = 150, 150 num_classes = 1000 train_inputs = tf.random_uniform((train_batch_size, height, width, 3)) inception.inception_v2(train_inputs, num_classes) eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3)) logits, _ = inception.inception_v2(eval_inputs, num_classes, reuse=True) predictions = tf.argmax(logits, 1) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (eval_batch_size,))
def testBuildEndPoints(self): batch_size = 5 height, width = 299, 299 num_classes = 1000 with self.test_session(): inputs = tf.random_uniform((batch_size, height, width, 3)) _, end_points = inception.inception_resnet_v2(inputs, num_classes) self.assertTrue('Logits' in end_points) logits = end_points['Logits'] self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) self.assertTrue('AuxLogits' in end_points) aux_logits = end_points['AuxLogits'] self.assertListEqual(aux_logits.get_shape().as_list(), [batch_size, num_classes]) pre_pool = end_points['PrePool'] self.assertListEqual(pre_pool.get_shape().as_list(), [batch_size, 8, 8, 1536])
def testTrainEvalWithReuse(self): train_batch_size = 5 eval_batch_size = 2 height, width = 150, 150 num_classes = 1000 with self.test_session() as sess: train_inputs = tf.random_uniform((train_batch_size, height, width, 3)) inception.inception_resnet_v2(train_inputs, num_classes) eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3)) logits, _ = inception.inception_resnet_v2(eval_inputs, num_classes, is_training=False, reuse=True) predictions = tf.argmax(logits, 1) sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (eval_batch_size,))
def testBuildBaseNetwork(self): batch_size = 5 height, width = 224, 224 inputs = tf.random_uniform((batch_size, height, width, 3)) net, end_points = mobilenet_v1.mobilenet_v1_base(inputs) self.assertTrue(net.op.name.startswith('MobilenetV1/Conv2d_13')) self.assertListEqual(net.get_shape().as_list(), [batch_size, 7, 7, 1024]) expected_endpoints = ['Conv2d_0', 'Conv2d_1_depthwise', 'Conv2d_1_pointwise', 'Conv2d_2_depthwise', 'Conv2d_2_pointwise', 'Conv2d_3_depthwise', 'Conv2d_3_pointwise', 'Conv2d_4_depthwise', 'Conv2d_4_pointwise', 'Conv2d_5_depthwise', 'Conv2d_5_pointwise', 'Conv2d_6_depthwise', 'Conv2d_6_pointwise', 'Conv2d_7_depthwise', 'Conv2d_7_pointwise', 'Conv2d_8_depthwise', 'Conv2d_8_pointwise', 'Conv2d_9_depthwise', 'Conv2d_9_pointwise', 'Conv2d_10_depthwise', 'Conv2d_10_pointwise', 'Conv2d_11_depthwise', 'Conv2d_11_pointwise', 'Conv2d_12_depthwise', 'Conv2d_12_pointwise', 'Conv2d_13_depthwise', 'Conv2d_13_pointwise'] self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildCustomNetworkUsingConvDefs(self): batch_size = 5 height, width = 224, 224 conv_defs = [ mobilenet_v1.Conv(kernel=[3, 3], stride=2, depth=32), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=64), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=128), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=512) ] inputs = tf.random_uniform((batch_size, height, width, 3)) net, end_points = mobilenet_v1.mobilenet_v1_base( inputs, final_endpoint='Conv2d_3_pointwise', conv_defs=conv_defs) self.assertTrue(net.op.name.startswith('MobilenetV1/Conv2d_3')) self.assertListEqual(net.get_shape().as_list(), [batch_size, 56, 56, 512]) expected_endpoints = ['Conv2d_0', 'Conv2d_1_depthwise', 'Conv2d_1_pointwise', 'Conv2d_2_depthwise', 'Conv2d_2_pointwise', 'Conv2d_3_depthwise', 'Conv2d_3_pointwise'] self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildEndPointsWithDepthMultiplierLessThanOne(self): batch_size = 5 height, width = 224, 224 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) _, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes) endpoint_keys = [key for key in end_points.keys() if key.startswith('Conv')] _, end_points_with_multiplier = mobilenet_v1.mobilenet_v1( inputs, num_classes, scope='depth_multiplied_net', depth_multiplier=0.5) for key in endpoint_keys: original_depth = end_points[key].get_shape().as_list()[3] new_depth = end_points_with_multiplier[key].get_shape().as_list()[3] self.assertEqual(0.5 * original_depth, new_depth)
def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self): batch_size = 5 height, width = 224, 224 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) _, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes) endpoint_keys = [key for key in end_points.keys() if key.startswith('Mixed') or key.startswith('Conv')] _, end_points_with_multiplier = mobilenet_v1.mobilenet_v1( inputs, num_classes, scope='depth_multiplied_net', depth_multiplier=2.0) for key in endpoint_keys: original_depth = end_points[key].get_shape().as_list()[3] new_depth = end_points_with_multiplier[key].get_shape().as_list()[3] self.assertEqual(2.0 * original_depth, new_depth)
def testUnknowBatchSize(self): batch_size = 1 height, width = 224, 224 num_classes = 1000 inputs = tf.placeholder(tf.float32, (None, height, width, 3)) logits, _ = mobilenet_v1.mobilenet_v1(inputs, num_classes) self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits')) self.assertListEqual(logits.get_shape().as_list(), [None, num_classes]) images = tf.random_uniform((batch_size, height, width, 3)) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(logits, {inputs: images.eval()}) self.assertEquals(output.shape, (batch_size, num_classes))
def testTrainEvalWithReuse(self): train_batch_size = 5 eval_batch_size = 2 height, width = 150, 150 num_classes = 1000 train_inputs = tf.random_uniform((train_batch_size, height, width, 3)) mobilenet_v1.mobilenet_v1(train_inputs, num_classes) eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3)) logits, _ = mobilenet_v1.mobilenet_v1(eval_inputs, num_classes, reuse=True) predictions = tf.argmax(logits, 1) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (eval_batch_size,))
def testBuildBaseNetwork(self): batch_size = 5 height, width = 299, 299 inputs = tf.random_uniform((batch_size, height, width, 3)) final_endpoint, end_points = inception.inception_v3_base(inputs) self.assertTrue(final_endpoint.op.name.startswith( 'InceptionV3/Mixed_7c')) self.assertListEqual(final_endpoint.get_shape().as_list(), [batch_size, 8, 8, 2048]) expected_endpoints = ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c'] self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self): batch_size = 5 height, width = 299, 299 endpoints = ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c'] for index, endpoint in enumerate(endpoints): with tf.Graph().as_default(): inputs = tf.random_uniform((batch_size, height, width, 3)) out_tensor, end_points = inception.inception_v3_base( inputs, final_endpoint=endpoint) self.assertTrue(out_tensor.op.name.startswith( 'InceptionV3/' + endpoint)) self.assertItemsEqual(endpoints[:index+1], end_points)
def testBuildEndPoints(self): batch_size = 5 height, width = 299, 299 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) _, end_points = inception.inception_v3(inputs, num_classes) self.assertTrue('Logits' in end_points) logits = end_points['Logits'] self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) self.assertTrue('AuxLogits' in end_points) aux_logits = end_points['AuxLogits'] self.assertListEqual(aux_logits.get_shape().as_list(), [batch_size, num_classes]) self.assertTrue('Mixed_7c' in end_points) pre_pool = end_points['Mixed_7c'] self.assertListEqual(pre_pool.get_shape().as_list(), [batch_size, 8, 8, 2048]) self.assertTrue('PreLogits' in end_points) pre_logits = end_points['PreLogits'] self.assertListEqual(pre_logits.get_shape().as_list(), [batch_size, 1, 1, 2048])
def testBuildEndPointsWithDepthMultiplierLessThanOne(self): batch_size = 5 height, width = 299, 299 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) _, end_points = inception.inception_v3(inputs, num_classes) endpoint_keys = [key for key in end_points.keys() if key.startswith('Mixed') or key.startswith('Conv')] _, end_points_with_multiplier = inception.inception_v3( inputs, num_classes, scope='depth_multiplied_net', depth_multiplier=0.5) for key in endpoint_keys: original_depth = end_points[key].get_shape().as_list()[3] new_depth = end_points_with_multiplier[key].get_shape().as_list()[3] self.assertEqual(0.5 * original_depth, new_depth)
def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self): batch_size = 5 height, width = 299, 299 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) _, end_points = inception.inception_v3(inputs, num_classes) endpoint_keys = [key for key in end_points.keys() if key.startswith('Mixed') or key.startswith('Conv')] _, end_points_with_multiplier = inception.inception_v3( inputs, num_classes, scope='depth_multiplied_net', depth_multiplier=2.0) for key in endpoint_keys: original_depth = end_points[key].get_shape().as_list()[3] new_depth = end_points_with_multiplier[key].get_shape().as_list()[3] self.assertEqual(2.0 * original_depth, new_depth)
def testUnknowBatchSize(self): batch_size = 1 height, width = 299, 299 num_classes = 1000 inputs = tf.placeholder(tf.float32, (None, height, width, 3)) logits, _ = inception.inception_v3(inputs, num_classes) self.assertTrue(logits.op.name.startswith('InceptionV3/Logits')) self.assertListEqual(logits.get_shape().as_list(), [None, num_classes]) images = tf.random_uniform((batch_size, height, width, 3)) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(logits, {inputs: images.eval()}) self.assertEquals(output.shape, (batch_size, num_classes))
def testTrainEvalWithReuse(self): train_batch_size = 5 eval_batch_size = 2 height, width = 150, 150 num_classes = 1000 train_inputs = tf.random_uniform((train_batch_size, height, width, 3)) inception.inception_v3(train_inputs, num_classes) eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3)) logits, _ = inception.inception_v3(eval_inputs, num_classes, is_training=False, reuse=True) predictions = tf.argmax(logits, 1) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (eval_batch_size,))
def testBuildLogits(self): batch_size = 5 height, width = 299, 299 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) logits, end_points = inception.inception_v4(inputs, num_classes) auxlogits = end_points['AuxLogits'] predictions = end_points['Predictions'] self.assertTrue(auxlogits.op.name.startswith('InceptionV4/AuxLogits')) self.assertListEqual(auxlogits.get_shape().as_list(), [batch_size, num_classes]) self.assertTrue(logits.op.name.startswith('InceptionV4/Logits')) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) self.assertTrue(predictions.op.name.startswith( 'InceptionV4/Logits/Predictions')) self.assertListEqual(predictions.get_shape().as_list(), [batch_size, num_classes])
def testBuildBaseNetwork(self): batch_size = 5 height, width = 299, 299 inputs = tf.random_uniform((batch_size, height, width, 3)) net, end_points = inception.inception_v4_base(inputs) self.assertTrue(net.op.name.startswith( 'InceptionV4/Mixed_7d')) self.assertListEqual(net.get_shape().as_list(), [batch_size, 8, 8, 1536]) expected_endpoints = [ 'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'Mixed_3a', 'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e', 'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c', 'Mixed_7d'] self.assertItemsEqual(end_points.keys(), expected_endpoints) for name, op in end_points.iteritems(): self.assertTrue(op.name.startswith('InceptionV4/' + name))
def testBuildOnlyUpToFinalEndpoint(self): batch_size = 5 height, width = 299, 299 all_endpoints = [ 'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'Mixed_3a', 'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e', 'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c', 'Mixed_7d'] for index, endpoint in enumerate(all_endpoints): with tf.Graph().as_default(): inputs = tf.random_uniform((batch_size, height, width, 3)) out_tensor, end_points = inception.inception_v4_base( inputs, final_endpoint=endpoint) self.assertTrue(out_tensor.op.name.startswith( 'InceptionV4/' + endpoint)) self.assertItemsEqual(all_endpoints[:index+1], end_points)
def testTrainEvalWithReuse(self): train_batch_size = 5 eval_batch_size = 2 height, width = 150, 150 num_classes = 1000 with self.test_session() as sess: train_inputs = tf.random_uniform((train_batch_size, height, width, 3)) inception.inception_v4(train_inputs, num_classes) eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3)) logits, _ = inception.inception_v4(eval_inputs, num_classes, is_training=False, reuse=True) predictions = tf.argmax(logits, 1) sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (eval_batch_size,))
def testBuildOnlyUptoFinalEndpoint(self): batch_size = 5 height, width = 224, 224 endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c'] for index, endpoint in enumerate(endpoints): with tf.Graph().as_default(): inputs = tf.random_uniform((batch_size, height, width, 3)) out_tensor, end_points = inception.inception_v1_base( inputs, final_endpoint=endpoint) self.assertTrue(out_tensor.op.name.startswith( 'InceptionV1/' + endpoint)) self.assertItemsEqual(endpoints[:index+1], end_points)
def testUnknowBatchSize(self): batch_size = 1 height, width = 224, 224 num_classes = 1000 inputs = tf.placeholder(tf.float32, (None, height, width, 3)) logits, _ = inception.inception_v1(inputs, num_classes) self.assertTrue(logits.op.name.startswith('InceptionV1/Logits')) self.assertListEqual(logits.get_shape().as_list(), [None, num_classes]) images = tf.random_uniform((batch_size, height, width, 3)) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(logits, {inputs: images.eval()}) self.assertEquals(output.shape, (batch_size, num_classes))
def testTrainEvalWithReuse(self): train_batch_size = 5 eval_batch_size = 2 height, width = 224, 224 num_classes = 1000 train_inputs = tf.random_uniform((train_batch_size, height, width, 3)) inception.inception_v1(train_inputs, num_classes) eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3)) logits, _ = inception.inception_v1(eval_inputs, num_classes, reuse=True) predictions = tf.argmax(logits, 1) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (eval_batch_size,))
def fixed_dropout(xs, keep_prob, noise_shape, seed=None): """ Apply dropout with same mask over all inputs Args: xs: list of tensors keep_prob: noise_shape: seed: Returns: list of dropped inputs """ with tf.name_scope("dropout", values=xs): noise_shape = noise_shape # uniform [keep_prob, 1.0 + keep_prob) random_tensor = keep_prob random_tensor += tf.random_uniform(noise_shape, seed=seed, dtype=xs[0].dtype) # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob) binary_tensor = tf.floor(random_tensor) outputs = [] for x in xs: ret = tf.div(x, keep_prob) * binary_tensor ret.set_shape(x.get_shape()) outputs.append(ret) return outputs
def testEndPoints(self): batch_size = 5 height, width = 224, 224 num_classes = 1000 with self.test_session(): inputs = tf.random_uniform((batch_size, height, width, 3)) _, end_points = vgg.vgg_a(inputs, num_classes) expected_names = ['vgg_a/conv1/conv1_1', 'vgg_a/pool1', 'vgg_a/conv2/conv2_1', 'vgg_a/pool2', 'vgg_a/conv3/conv3_1', 'vgg_a/conv3/conv3_2', 'vgg_a/pool3', 'vgg_a/conv4/conv4_1', 'vgg_a/conv4/conv4_2', 'vgg_a/pool4', 'vgg_a/conv5/conv5_1', 'vgg_a/conv5/conv5_2', 'vgg_a/pool5', 'vgg_a/fc6', 'vgg_a/fc7', 'vgg_a/fc8' ] self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testTrainEvalWithReuse(self): train_batch_size = 2 eval_batch_size = 1 train_height, train_width = 224, 224 eval_height, eval_width = 256, 256 num_classes = 1000 with self.test_session(): train_inputs = tf.random_uniform( (train_batch_size, train_height, train_width, 3)) logits, _ = vgg.vgg_a(train_inputs) self.assertListEqual(logits.get_shape().as_list(), [train_batch_size, num_classes]) tf.get_variable_scope().reuse_variables() eval_inputs = tf.random_uniform( (eval_batch_size, eval_height, eval_width, 3)) logits, _ = vgg.vgg_a(eval_inputs, is_training=False, spatial_squeeze=False) self.assertListEqual(logits.get_shape().as_list(), [eval_batch_size, 2, 2, num_classes]) logits = tf.reduce_mean(logits, [1, 2]) predictions = tf.argmax(logits, 1) self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testTrainEvalWithReuse(self): train_batch_size = 2 eval_batch_size = 1 train_height, train_width = 224, 224 eval_height, eval_width = 256, 256 num_classes = 1000 with self.test_session(): train_inputs = tf.random_uniform( (train_batch_size, train_height, train_width, 3)) logits, _ = vgg.vgg_16(train_inputs) self.assertListEqual(logits.get_shape().as_list(), [train_batch_size, num_classes]) tf.get_variable_scope().reuse_variables() eval_inputs = tf.random_uniform( (eval_batch_size, eval_height, eval_width, 3)) logits, _ = vgg.vgg_16(eval_inputs, is_training=False, spatial_squeeze=False) self.assertListEqual(logits.get_shape().as_list(), [eval_batch_size, 2, 2, num_classes]) logits = tf.reduce_mean(logits, [1, 2]) predictions = tf.argmax(logits, 1) self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testTrainEvalWithReuse(self): train_batch_size = 2 eval_batch_size = 1 train_height, train_width = 224, 224 eval_height, eval_width = 256, 256 num_classes = 1000 with self.test_session(): train_inputs = tf.random_uniform( (train_batch_size, train_height, train_width, 3)) logits, _ = vgg.vgg_19(train_inputs) self.assertListEqual(logits.get_shape().as_list(), [train_batch_size, num_classes]) tf.get_variable_scope().reuse_variables() eval_inputs = tf.random_uniform( (eval_batch_size, eval_height, eval_width, 3)) logits, _ = vgg.vgg_19(eval_inputs, is_training=False, spatial_squeeze=False) self.assertListEqual(logits.get_shape().as_list(), [eval_batch_size, 2, 2, num_classes]) logits = tf.reduce_mean(logits, [1, 2]) predictions = tf.argmax(logits, 1) self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testUnknowBatchSize(self): batch_size = 1 height, width = 224, 224 num_classes = 1000 inputs = tf.placeholder(tf.float32, (None, height, width, 3)) logits, _ = inception.inception_v2(inputs, num_classes) self.assertTrue(logits.op.name.startswith('InceptionV2/Logits')) self.assertListEqual(logits.get_shape().as_list(), [None, num_classes]) images = tf.random_uniform((batch_size, height, width, 3)) with self.test_session() as sess: sess.run(tf.initialize_all_variables()) output = sess.run(logits, {inputs: images.eval()}) self.assertEquals(output.shape, (batch_size, num_classes))
def testTrainEvalWithReuse(self): train_batch_size = 5 eval_batch_size = 2 height, width = 150, 150 num_classes = 1000 train_inputs = tf.random_uniform((train_batch_size, height, width, 3)) inception.inception_v2(train_inputs, num_classes) eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3)) logits, _ = inception.inception_v2(eval_inputs, num_classes, reuse=True) predictions = tf.argmax(logits, 1) with self.test_session() as sess: sess.run(tf.initialize_all_variables()) output = sess.run(predictions) self.assertEquals(output.shape, (eval_batch_size,))