我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.variables_initializer()。
def initialize_interdependent_variables(session, vars_list, feed_dict): """Initialize a list of variables one at a time, which is useful if initialization of some variables depends on initialization of the others. """ vars_left = vars_list while len(vars_left) > 0: new_vars_left = [] for v in vars_left: try: # If using an older version of TensorFlow, uncomment the line # below and comment out the line after it. #session.run(tf.initialize_variables([v]), feed_dict) session.run(tf.variables_initializer([v]), feed_dict) except tf.errors.FailedPreconditionError: new_vars_left.append(v) if len(new_vars_left) >= len(vars_left): # This can happend if the variables all depend on each other, or more likely if there's # another variable outside of the list, that still needs to be initialized. This could be # detected here, but life's finite. raise Exception("Cycle in variable dependencies, or extenrnal precondition unsatisfied.") else: vars_left = new_vars_left
def initialize_uninitialized_global_variables(sess): """ Only initializes the variables of a TensorFlow session that were not already initialized. :param sess: the TensorFlow session :return: """ # List all global variables global_vars = tf.global_variables() # Find initialized status for all variables is_var_init = [tf.is_variable_initialized(var) for var in global_vars] is_initialized = sess.run(is_var_init) # List all variables that were not initialized previously not_initialized_vars = [var for (var, init) in zip(global_vars, is_initialized) if not init] # Initialize all uninitialized variables found, if any if len(not_initialized_vars): sess.run(tf.variables_initializer(not_initialized_vars))
def _initialize_variables(): if hasattr(tf, 'global_variables'): variables = tf.global_variables() else: variables = tf.all_variables() uninitialized_variables = [] for v in variables: if not hasattr(v, '_keras_initialized') or not v._keras_initialized: uninitialized_variables.append(v) v._keras_initialized = True if uninitialized_variables: sess = get_session() if hasattr(tf, 'variables_initializer'): sess.run(tf.variables_initializer(uninitialized_variables)) else: sess.run(tf.initialize_variables(uninitialized_variables))
def testComputationSame(self, use_bias): """Run through for something with a known answer using SAME padding.""" conv1 = snt.Conv2D( output_channels=1, kernel_shape=3, stride=1, padding=snt.SAME, name="conv1", use_bias=use_bias, initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 1], dtype=np.float32))) expected_out = np.array([[5, 7, 7, 7, 5], [7, 10, 10, 10, 7], [7, 10, 10, 10, 7], [7, 10, 10, 10, 7], [5, 7, 7, 7, 5]]) if not use_bias: expected_out -= 1 with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose(np.reshape(out.eval(), [5, 5]), expected_out)
def testComputationValid(self, use_bias): """Run through for something with a known answer using snt.VALID padding.""" conv1 = snt.Conv2D( output_channels=1, kernel_shape=3, stride=1, padding=snt.VALID, name="conv1", use_bias=use_bias, initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 1], dtype=np.float32))) expected_output = np.array([[10, 10, 10], [10, 10, 10], [10, 10, 10]]) if not use_bias: expected_output -= 1 with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose(np.reshape(out.eval(), [3, 3]), expected_output)
def testMask2D(self): """2D Masks are applied properly.""" # This mask, applied on an image filled with 1, should result in an image # filled with 8 (since we sum 4 elements per channel and there are 2 input # channels). mask = np.array([[1, 1, 1], [1, 0, 0], [0, 0, 0]], dtype=np.float32) inputs = tf.constant(1.0, shape=(1, 5, 5, 2)) conv1 = snt.Conv2D( output_channels=1, kernel_shape=3, mask=mask, padding=snt.VALID, use_bias=False, initializers=create_constant_initializers(1.0, 0.0, use_bias=False)) out = conv1(inputs) expected_out = np.array([[8] * 3] * 3) with self.test_session(): tf.variables_initializer([conv1.w]).run() self.assertAllClose(np.reshape(out.eval(), [3, 3]), expected_out)
def testMask4D(self): """4D Masks are applied properly.""" # This mask, applied on an image filled with 1, should result in an image # filled with 17, as there are 18 weights but we zero out one of them. mask = np.ones([3, 3, 2, 1], dtype=np.float32) mask[0, 0, 0, :] = 0 inputs = tf.constant(1.0, shape=(1, 5, 5, 2)) conv1 = snt.Conv2D( output_channels=1, kernel_shape=3, mask=mask, padding=snt.VALID, use_bias=False, initializers=create_constant_initializers(1.0, 0.0, use_bias=False)) out = conv1(inputs) expected_out = np.array([[17] * 3] * 3) with self.test_session(): tf.variables_initializer([conv1.w]).run() self.assertAllClose(np.reshape(out.eval(), [3, 3]), expected_out)
def testComputationValid(self, use_bias): """Run through for something with a known answer using snt.VALID padding.""" conv1 = snt.Conv1D( output_channels=1, kernel_shape=3, stride=1, padding=snt.VALID, use_bias=use_bias, name="conv1", initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 1], dtype=np.float32))) expected_out = np.asarray([4, 4, 4]) if not use_bias: expected_out -= 1 with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose(np.reshape(out.eval(), [3]), expected_out)
def testComputation(self, use_bias): """Run through for something with a known answer.""" conv1 = snt.CausalConv1D( output_channels=1, kernel_shape=3, stride=1, use_bias=use_bias, name="conv1", initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 1], dtype=np.float32))) expected_out = np.reshape(np.array([1, 2, 3, 3, 3]), [1, 5, 1]) if use_bias: expected_out += 1 init_op = tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]) with self.test_session() as sess: sess.run(init_op) actual_out = sess.run(out) self.assertAllClose(actual_out, expected_out)
def testComputationStrided(self, use_bias): """Run through for something with a known answer.""" conv1 = snt.CausalConv1D( output_channels=1, kernel_shape=3, stride=2, use_bias=use_bias, name="conv1", initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 1], dtype=np.float32))) expected_out = np.reshape(np.array([1, 3, 3]), [1, 3, 1]) if use_bias: expected_out += 1 init_op = tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]) with self.test_session() as sess: sess.run(init_op) actual_out = sess.run(out) self.assertAllClose(actual_out, expected_out)
def testSharing(self, use_bias): """Sharing is working.""" conv1 = snt.InPlaneConv2D(kernel_shape=3, use_bias=use_bias) x = np.random.randn(1, 5, 5, 1) x1 = tf.constant(x, dtype=np.float32) x2 = tf.constant(x, dtype=np.float32) out1 = conv1(x1) out2 = conv1(x2) with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose(out1.eval(), out2.eval()) w = np.random.randn(3, 3, 1, 1) # Now change the weights. conv1.w.assign(w).eval() self.assertAllClose(out1.eval(), out2.eval())
def testShapesNotKnown(self, use_bias): """Test that the generated shapes are correct when input shape not known.""" inputs = tf.placeholder( tf.float32, shape=[None, None, None, self.in_channels], name="inputs") conv1 = snt.DepthwiseConv2D( channel_multiplier=self.channel_multiplier, kernel_shape=self.kernel_shape, padding=snt.SAME, stride=1, use_bias=use_bias) output = conv1(inputs) with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() output_eval = output.eval({inputs: np.zeros(self.input_shape)}) self.assertEqual(output_eval.shape, tuple(self.output_shape))
def testComputationSame(self, use_bias): """Run through for something with a known answer using SAME padding.""" conv1 = snt.DepthwiseConv2D( channel_multiplier=1, kernel_shape=[3, 3], stride=1, padding=snt.SAME, use_bias=use_bias, initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 1], dtype=np.float32))) expected_out = np.array([[5, 7, 7, 7, 5], [7, 10, 10, 10, 7], [7, 10, 10, 10, 7], [7, 10, 10, 10, 7], [5, 7, 7, 7, 5]]) if not use_bias: expected_out -= 1 with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose(np.reshape(out.eval(), [5, 5]), expected_out)
def testComputationValidMultiChannel(self, use_bias): """Run through for something with a known answer using snt.VALID padding.""" conv1 = snt.DepthwiseConv2D( channel_multiplier=1, kernel_shape=[3, 3], stride=1, padding=snt.VALID, use_bias=use_bias, initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 3], dtype=np.float32))) expected_out = np.array([[[10] * 3] * 3] * 3) if not use_bias: expected_out -= 1 with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose( np.reshape(out.eval(), [3, 3, 3]), expected_out)
def testSharing(self, use_bias): """Sharing is working.""" conv1 = snt.DepthwiseConv2D( channel_multiplier=3, kernel_shape=3, stride=1, padding=snt.SAME, use_bias=use_bias) x = np.random.randn(1, 5, 5, 1) x1 = tf.constant(x, dtype=np.float32) x2 = tf.constant(x, dtype=np.float32) out1 = conv1(x1) out2 = conv1(x2) with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose(out1.eval(), out2.eval()) # Kernel shape was set to 3, which is expandeded to [3, 3, 3]. # Input channels are 1, output channels := in_channels * multiplier. # multiplier is kernel_shape[2] == 3. So weight layout must be: # (3, 3, 1, 3). w = np.random.randn(3, 3, 1, 3) # Now change the weights. conv1.w.assign(w).eval() self.assertAllClose(out1.eval(), out2.eval())
def testShapesNotKnown(self, use_bias): """Test that the generated shapes are correct when input shape not known.""" inputs = tf.placeholder( tf.float32, shape=[None, None, None, self.in_channels], name="inputs") conv1 = snt.SeparableConv2D( output_channels=self.out_channels_dw, channel_multiplier=1, kernel_shape=self.kernel_shape, padding=snt.SAME, use_bias=use_bias) output = conv1(inputs) with self.test_session(): tf.variables_initializer( [conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else [conv1.w_dw, conv1.w_pw]).run() output_eval = output.eval({inputs: np.zeros(self.input_shape)}) self.assertEqual(output_eval.shape, tuple(self.output_shape))
def testComputationValidMultiChannel(self, use_bias): """Run through for something with a known answer using snt.VALID padding.""" conv1 = snt.SeparableConv2D( output_channels=3, channel_multiplier=1, kernel_shape=[3, 3], padding=snt.VALID, use_bias=use_bias, initializers=create_separable_constant_initializers( 1.0, 1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 3], dtype=np.float32))) expected_out = np.array([[[28] * 3] * 3] * 3) if not use_bias: expected_out -= 1 with self.test_session(): tf.variables_initializer( [conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else [conv1.w_dw, conv1.w_pw]).run() self.assertAllClose(np.reshape(out.eval(), [3, 3, 3]), expected_out)
def testComputationValid(self, use_bias): """Run through for something with a known answer using snt.VALID padding.""" conv1 = snt.Conv3D( output_channels=1, kernel_shape=3, stride=1, padding=snt.VALID, name="conv1", use_bias=use_bias, initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 5, 1], dtype=np.float32))) expected_out = np.asarray([28] * 27).reshape((3, 3, 3)) if not use_bias: expected_out -= 1 with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose( np.reshape(out.eval(), [3, 3, 3]), expected_out)
def initialize(self, session=None): """ Helper for initializing all the variables. Builds and runs model variables and global step initializers. Note that dual variables are initialized only when calling `backward`. :param session: optional tensorflow session (if None default session is used) :return: None """ ss = session or tf.get_default_session() assert ss, 'No default tensorflow session!' if isinstance(self.w, MergedVariable): self.w.initialize(session=session) else: ss.run(tf.variables_initializer([self.w])) ss.run(tf.variables_initializer(self.hyper_gradient_vars + [self.global_step.var]))
def initialize(self, session=None): """ Helper for initializing all the variables. Builds and runs model variables, Zs and global step initializers. :param session: optional tensorflow session (if None default session is used) :return: None """ ss = session or tf.get_default_session() assert ss, 'No default tensorflow session!' if isinstance(self.w, MergedVariable): self.w.initialize(session=session) else: ss.run(tf.variables_initializer([self.w])) # never tested ss.run(tf.variables_initializer(self.hyper_gradient_vars + [self.global_step.var])) [z.initializer().run() for z in self.zs] return True
def model_initilization(self, cfg): ############################################################################################################################################ def initialization(): var_list = tf.global_variables() for var in var_list: self.sess.run(tf.variables_initializer([var]), feed_dict={self.z: self.sample_z[:cfg.iBatchSize], self.images_lab: self.sample_images[:cfg.iBatchSize], self.fInputNoise: cfg.fInputNoise}) print(var.op.name) #self.sess.run(tf.initialize_all_tables(), feed_dict={self.z: self.sample_z[:cfg.iBatchSize], self.images_lab: self.sample_images[:cfg.iBatchSize], self.fInputNoise: cfg.fInputNoiseBiG}) print('optimizor initialization') if cfg.bLoadCheckpoint: if self.load(cfg): print(" [*] Load SUCCESS") else: print(" [!] Load failed...") initialization() else: initialization()
def restore_inception_resnet_variables_from_weight(sess, weights_path): adam_vars = [var for var in tf.global_variables() if 'Adam' in var.name or 'beta1_power' in var.name or 'beta2_power' in var.name] uninit_vars = tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES, scope='InceptionResnetV2/Conv2d_1a_3x3') + adam_vars init_op = tf.variables_initializer(uninit_vars) variables_to_restore = slim.get_variables_to_restore( exclude=['InceptionResnetV2/Conv2d_1a_3x3']) for var in uninit_vars: if var in variables_to_restore: variables_to_restore.remove(var) saver = tf.train.Saver(variables_to_restore) print 'Initializing new variables to train from downloaded inception resnet weights' sess.run(init_op) saver.restore(sess, weights_path) return 0
def guarantee_initialized_variables(session, variables=None): """Guarantee that all the specified variables are initialized. If a variable is already initialized, leave it alone. Otherwise, initialize it. If no variables are specified, checks all variables in the default graph. Args: variables (list[tf.Variable]) """ name_to_var = {v.op.name: v for v in tf.global_variables() + tf.local_variables()} uninitialized_variables = list(name_to_var[name] for name in session.run(tf.report_uninitialized_variables(variables))) init_op = tf.variables_initializer(uninitialized_variables) session.run(init_op) return uninitialized_variables
def initialize_uninitialized_variables(sess): """ Only initialize the weights that have not yet been initialized by other means, such as importing a metagraph and a checkpoint. It's useful when extending an existing model. """ uninit_vars = [] uninit_tensors = [] for var in tf.global_variables(): uninit_vars.append(var) uninit_tensors.append(tf.is_variable_initialized(var)) uninit_bools = sess.run(uninit_tensors) uninit = zip(uninit_bools, uninit_vars) uninit = [var for init, var in uninit if not init] sess.run(tf.variables_initializer(uninit)) #-------------------------------------------------------------------------------
def _initialize_metrics(self): """ Initialize the model metrics """ self.metrics = {} self.metric_values = {} self.update_metrics = {} self.reset_metrics = {} for data_scope in (Data.TRAIN, Data.VALIDATE, Data.TEST): metrics = self.collect_metrics(data_scope) self.metrics[data_scope] = metrics self.metric_values[data_scope] = { name: metric['scalar'] for name, metric in iteritems(metrics)} self.update_metrics[data_scope] = [ metric['update_op'] for metric in itervalues(metrics)] metric_variables = [] with stats_utils.metric_scope(data_scope, graph=self.graph) as scope: for local in tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope): metric_variables.append(local) self.reset_metrics[data_scope] = tf.variables_initializer(metric_variables)
def _init_variables(self): """ Create the initialization operation for the variables """ # Adam optimizer uses two variables that can only be accessed through the use of a protected # function since the variables aren't scoped in anyway. Trying to add a tf.variable_scope # around apply_gradients where the variables are created did not help. var_list = set(self.optimizer._get_beta_accumulators()) # pylint: disable=protected-access slot_names = self.optimizer.get_slot_names() for tower in self.towers: variables = tower.global_variables var_list.update(variables) for slot_name in slot_names: for variable in variables: slot = self.optimizer.get_slot(variable, slot_name) if slot is not None: var_list.add(slot) # Initialize all the variables self.initialization_operation = tf.group( tf.variables_initializer(var_list), # Apparently local variables are not part of 'all' variables... go figure # This is needed for metrics for example tf.local_variables_initializer())
def load(cls, dirname, session, training=False): """ Load a previously saved file. :param dirname: directory with model files :param session: tensorflow session :param training: whether to create training tensors :return: an instance of MultiFeedForward :rtype: MultiFeedForwardClassifier """ params = utils.load_parameters(dirname) model = cls._init_from_load(params, training) tensorflow_file = os.path.join(dirname, 'model') saver = tf.train.Saver(tf.trainable_variables()) saver.restore(session, tensorflow_file) # if training, optimizer values still have to be initialized if training: train_vars = [v for v in tf.global_variables() if v.name.startswith('training')] init_op = tf.variables_initializer(train_vars) session.run(init_op) return model
def yolo_eval(yolo_outputs, image_shape, max_boxes=10, score_threshold=.6, iou_threshold=.5): """Evaluate YOLO model on given input batch and return filtered boxes.""" box_xy, box_wh, box_confidence, box_class_probs = yolo_outputs boxes = yolo_boxes_to_corners(box_xy, box_wh) boxes, scores, classes = yolo_filter_boxes(boxes, box_confidence, box_class_probs, threshold=score_threshold) # Scale boxes back to original image shape. height = image_shape[0] width = image_shape[1] image_dims = K.stack([height, width, height, width]) image_dims = K.reshape(image_dims, [1, 4]) boxes = boxes * image_dims max_boxes_tensor = K.variable(max_boxes, dtype='int32') K.get_session().run(tf.variables_initializer([max_boxes_tensor])) nms_index = tf.image.non_max_suppression(boxes, scores, max_boxes_tensor, iou_threshold=iou_threshold) boxes = K.gather(boxes, nms_index) scores = K.gather(scores, nms_index) classes = K.gather(classes, nms_index) return boxes, scores, classes
def initialize(): new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED get_session().run(tf.variables_initializer(new_variables)) ALREADY_INITIALIZED.update(new_variables)
def initialize(self, sess): # Initial file lists are empty np_paths = [] ss_paths = [] # Fresh train directly from ImageNet weights print('Loading initial model weights from {:s}'.format(self.pretrained_model)) variables = tf.global_variables() # Initialize all variables first sess.run(tf.variables_initializer(variables, name='init')) var_keep_dic = self.get_variables_in_checkpoint_file(self.pretrained_model) # Get the variables to restore, ignoring the variables to fix variables_to_restore = self.net.get_variables_to_restore(variables, var_keep_dic) restorer = tf.train.Saver(variables_to_restore) restorer.restore(sess, self.pretrained_model) print('Loaded.') # Need to fix the variables before loading, so that the RGB weights are changed to BGR # For VGG16 it also changes the convolutional weights fc6 and fc7 to # fully connected weights self.net.fix_variables(sess, self.pretrained_model) print('Fixed.') last_snapshot_iter = 0 rate = cfg.TRAIN.LEARNING_RATE stepsizes = list(cfg.TRAIN.STEPSIZE) return rate, last_snapshot_iter, stepsizes, np_paths, ss_paths
def init(self): if self.load_path: print 'Attempting to load directly from path:', print self.load_path self.saver.restore(self.sess,self.load_path) else: print 'New ENCODE Model..init new Z parameters' init=tf.variables_initializer(var_list=self.var) print 'Initializing following variables:' for v in self.var: print v.name, v.get_shape().as_list() self.model.sess.run(init)
def initialize_uninitialized(sess): global_vars = tf.global_variables() is_not_initialized = sess.run([tf.is_variable_initialized(var) for var in global_vars]) not_initialized_vars = [v for (v, f) in zip(global_vars, is_not_initialized) if not f] print([str(i.name) for i in not_initialized_vars]) # only for testing if len(not_initialized_vars): sess.run(tf.variables_initializer(not_initialized_vars))
def _init_uninitialized(sess): """Initializes all uninitialized variables and returns them as a list.""" variables = tf.global_variables() if not variables: return [] # sess.run() barfs on empty list is_initialized = sess.run([tf.is_variable_initialized(v) for v in variables]) needs_init = [v for v, i in zip(variables, is_initialized) if not i] if not needs_init: return [] sess.run(tf.variables_initializer(needs_init)) return needs_init
def initialize_variables(self, session): with tf.device(self.device): if len(self.variables()) == 0: return init = tf.variables_initializer(self.variables(), reuse=self._reuse) session.run(init) self.initialized = True
def resetGlobal(self): self.global_acc = 0.0 self.global_loss = 0.0 # def initialize_uninit_variables(session, list_of_variables=None): # if list_of_variables is None: # list_of_variables = tf.global_variables() # uninitialized_variables = list(tf.get_variable(name) for name in # session.run(tf.report_uninitialized_variables(list_of_variables))) # session.run(tf.variables_initializer(uninitialized_variables)) # return uninitialized_variables
def testShapesNotKnown(self, use_bias): """The generated shapes are correct when input shape not known.""" batch_size = 5 in_height = in_width = 32 in_channels = out_channels = 5 kernel_shape_h = kernel_shape_w = 3 inputs = tf.placeholder( tf.float32, shape=[None, None, None, in_channels], name="inputs") conv1 = snt.Conv2D( name="conv1", output_channels=out_channels, kernel_shape=[kernel_shape_h, kernel_shape_w], padding=snt.SAME, stride=1, use_bias=use_bias) output = conv1(inputs) with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() output_eval = output.eval({ inputs: np.zeros([batch_size, in_height, in_width, in_channels])}) self.assertEqual( output_eval.shape, (batch_size, in_height, in_width, out_channels))
def testInitializers(self, use_bias): """Test initializers work as expected.""" w = random.random() b = random.random() conv1 = snt.Conv2D( output_channels=1, kernel_shape=3, stride=1, name="conv1", use_bias=use_bias, initializers=create_constant_initializers(w, b, use_bias)) conv1(tf.placeholder(tf.float32, [1, 10, 10, 2])) with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose( conv1.w.eval(), np.full([3, 3, 2, 1], w, dtype=np.float32)) if use_bias: self.assertAllClose( conv1.b.eval(), [b]) err = "Initializer for 'w' is not a callable function or dictionary" with self.assertRaisesRegexp(TypeError, err): snt.Conv2D(output_channels=10, kernel_shape=3, stride=1, name="conv1", initializers={"w": tf.ones([])})
def testSharing(self, use_bias): """Sharing is working.""" conv1 = snt.Conv2D( output_channels=1, kernel_shape=3, stride=1, padding=snt.SAME, use_bias=use_bias, name="conv1") x = np.random.randn(1, 5, 5, 1) x1 = tf.constant(x, dtype=np.float32) x2 = tf.constant(x, dtype=np.float32) out1 = conv1(x1) out2 = conv1(x2) with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose( out1.eval(), out2.eval()) # Now change the weights w = np.random.randn(3, 3, 1, 1) conv1.w.assign(w).eval() self.assertAllClose( out1.eval(), out2.eval())
def testAtrousConvSame(self, use_bias): """The atrous conv 2D is constructed and applied correctly with SAME.""" conv1 = snt.Conv2D( output_channels=1, kernel_shape=3, stride=1, rate=2, padding=snt.SAME, name="conv1", use_bias=use_bias, initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 1], dtype=np.float32))) expected_out = np.array([[5, 5, 7, 5, 5], [5, 5, 7, 5, 5], [7, 7, 10, 7, 7], [5, 5, 7, 5, 5], [5, 5, 7, 5, 5]]) if not use_bias: expected_out -= 1 with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose(np.reshape(out.eval(), [5, 5]), expected_out)
def testShapesNotKnown(self, use_bias): """The generated shapes are correct when input shape not known.""" batch_size = 5 in_length = 32 in_channels = out_channels = 5 kernel_shape = 3 inputs = tf.placeholder( tf.float32, shape=[None, None, in_channels], name="inputs") conv1 = snt.Conv1D( name="conv1", output_channels=out_channels, kernel_shape=kernel_shape, padding=snt.SAME, stride=1, use_bias=use_bias) output = conv1(inputs) with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() output_eval = output.eval({ inputs: np.zeros([batch_size, in_length, in_channels])}) self.assertEqual( output_eval.shape, (batch_size, in_length, out_channels))
def testInitializers(self, use_bias): """Test initializers work as expected.""" w = random.random() b = random.random() conv1 = snt.Conv1D( output_channels=1, kernel_shape=3, stride=1, padding=snt.SAME, use_bias=use_bias, name="conv1", initializers=create_constant_initializers(w, b, use_bias)) conv1(tf.placeholder(tf.float32, [1, 10, 2])) with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose( conv1.w.eval(), np.full([3, 2, 1], w, dtype=np.float32)) if use_bias: self.assertAllClose( conv1.b.eval(), [b]) err = "Initializer for 'w' is not a callable function or dictionary" with self.assertRaisesRegexp(TypeError, err): snt.Conv1D(output_channels=10, kernel_shape=3, stride=1, padding=snt.SAME, use_bias=use_bias, name="conv1", initializers={"w": tf.ones([])})
def testSharing(self, use_bias): """Sharing is working.""" conv1 = snt.Conv1D( output_channels=1, kernel_shape=3, stride=1, padding=snt.SAME, use_bias=use_bias, name="conv1") x = np.random.randn(1, 5, 1) x1 = tf.constant(x, dtype=np.float32) x2 = tf.constant(x, dtype=np.float32) out1 = conv1(x1) out2 = conv1(x2) with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose( out1.eval(), out2.eval()) # Now change the weights w = np.random.randn(3, 1, 1) conv1.w.assign(w).eval() self.assertAllClose( out1.eval(), out2.eval())
def testSharing(self, batch_size, in_length, in_channels, out_channels, kernel_shape, padding, use_bias, out_shape, stride_shape): """Sharing is working.""" conv1 = snt.Conv1DTranspose( output_channels=out_channels, output_shape=out_shape, kernel_shape=kernel_shape, padding=padding, stride=stride_shape, name="conv1", use_bias=use_bias) x = np.random.randn(batch_size, in_length, in_channels) x1 = tf.constant(x, dtype=np.float32) x2 = tf.constant(x, dtype=np.float32) out1 = conv1(x1) out2 = conv1(x2) with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose( out1.eval(), out2.eval()) # Now change the weights w = np.random.randn(1, kernel_shape, out_channels, in_channels) conv1.w.assign(w).eval() self.assertAllClose( out1.eval(), out2.eval())
def testSharing(self, use_bias): """Sharing is working.""" conv1 = snt.CausalConv1D( output_channels=1, kernel_shape=3, stride=1, use_bias=use_bias, name="conv1") x = np.random.randn(1, 5, 1) x1 = tf.constant(x, dtype=np.float32) x2 = tf.constant(x, dtype=np.float32) out1 = conv1(x1) out2 = conv1(x2) w = np.random.randn(3, 1, 1) weight_change_op = conv1.w.assign(w) init_op = tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]) with self.test_session() as sess: sess.run(init_op) first_replica_out = sess.run(out1) second_replica_out = sess.run(out2) # Now change the weights sess.run(weight_change_op) first_replica_out_changed = sess.run(out1) second_replica_out_changed = sess.run(out2) self.assertAllClose(first_replica_out, second_replica_out) self.assertAllClose(first_replica_out_changed, second_replica_out_changed)
def testInitializers(self, use_bias): """Test that initializers work as expected.""" w = random.random() b = np.random.randn(6) # Kernel shape is 3, input channels are 2, 2*3 = 6 conv1 = snt.DepthwiseConv2D( channel_multiplier=3, kernel_shape=3, stride=1, use_bias=use_bias, initializers=create_constant_initializers(w, b, use_bias)) conv1(tf.placeholder(tf.float32, [1, 10, 10, 2])) with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose( conv1.w.eval(), np.full( [3, 3, 2, 3], w, dtype=np.float32)) if use_bias: self.assertAllClose(conv1.b.eval(), b) error_msg = "Initializer for 'w' is not a callable function" with self.assertRaisesRegexp(TypeError, error_msg): snt.DepthwiseConv2D( channel_multiplier=3, kernel_shape=3, stride=1, use_bias=use_bias, initializers={"w": tf.ones([])})