我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用tensorflow.truncated_normal()。
def get_weight_variable(shape, name=None, type='xavier_uniform', regularize=True, **kwargs): initialise_from_constant = False if type == 'xavier_uniform': initial = xavier_initializer(uniform=True, dtype=tf.float32) elif type == 'xavier_normal': initial = xavier_initializer(uniform=False, dtype=tf.float32) elif type == 'he_normal': initial = variance_scaling_initializer(uniform=False, factor=2.0, mode='FAN_IN', dtype=tf.float32) elif type == 'he_uniform': initial = variance_scaling_initializer(uniform=True, factor=2.0, mode='FAN_IN', dtype=tf.float32) elif type == 'caffe_uniform': initial = variance_scaling_initializer(uniform=True, factor=1.0, mode='FAN_IN', dtype=tf.float32) elif type == 'simple': stddev = kwargs.get('stddev', 0.02) initial = tf.truncated_normal(shape, stddev=stddev, dtype=tf.float32) initialise_from_constant = True elif type == 'bilinear': weights = _bilinear_upsample_weights(shape) initial = tf.constant(weights, shape=shape, dtype=tf.float32) initialise_from_constant = True else: raise ValueError('Unknown initialisation requested: %s' % type) if name is None: # This keeps to option open to use unnamed Variables weight = tf.Variable(initial) else: if initialise_from_constant: weight = tf.get_variable(name, initializer=initial) else: weight = tf.get_variable(name, shape=shape, initializer=initial) if regularize: tf.add_to_collection('weight_variables', weight) return weight
def _conv_layer(self, bottom, filter_size, filter_num, scope_name, bottom_channel=None, padding='SAME'): if not bottom_channel: _, _, _, bottom_channel = bottom.get_shape().as_list() with tf.variable_scope(scope_name): kernel = tf.Variable( tf.truncated_normal([*filter_size, bottom_channel, filter_num], dtype=tf.float32, stddev=1e-1), trainable=False, name='weights' ) conv = tf.nn.conv2d(bottom, kernel, [1, 1, 1, 1], padding=padding) biases = tf.Variable( tf.constant(0.0, shape=[filter_num], dtype=tf.float32), trainable=True, name='bias' ) out = tf.nn.bias_add(conv, biases) return out
def _get_weight_variable(self, layer_name, name, shape, L2=1): wname = '%s/%s:0'%(layer_name,name) fanin, fanout = shape[-2:] for dim in shape[:-2]: fanin *= float(dim) fanout *= float(dim) sigma = self._xavi_norm(fanin, fanout) if self.weights is None or wname not in self.weights: w1 = tf.get_variable(name,initializer=tf.truncated_normal(shape = shape, mean=0,stddev = sigma)) print('{:>23} {:>23}'.format(wname, 'randomly initialize')) else: w1 = tf.get_variable(name, shape = shape, initializer=tf.constant_initializer(value=self.weights[wname],dtype=tf.float32)) self.loaded_weights[wname]=1 if wname != w1.name: print(wname,w1.name) assert False tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, tf.nn.l2_loss(w1)*L2) return w1
def sample_encoded_context(self, embeddings): '''Helper function for init_opt''' c_mean_logsigma = self.model.generate_condition(embeddings) mean = c_mean_logsigma[0] if cfg.TRAIN.COND_AUGMENTATION: # epsilon = tf.random_normal(tf.shape(mean)) epsilon = tf.truncated_normal(tf.shape(mean)) stddev = tf.exp(c_mean_logsigma[1]) c = mean + stddev * epsilon kl_loss = KL_loss(c_mean_logsigma[0], c_mean_logsigma[1]) else: c = mean kl_loss = 0 return c, cfg.TRAIN.COEFF.KL * kl_loss
def sample_encoded_context(self, embeddings): '''Helper function for init_opt''' # Build conditioning augmentation structure for text embedding # under different variable_scope: 'g_net' and 'hr_g_net' c_mean_logsigma = self.model.generate_condition(embeddings) mean = c_mean_logsigma[0] if cfg.TRAIN.COND_AUGMENTATION: # epsilon = tf.random_normal(tf.shape(mean)) epsilon = tf.truncated_normal(tf.shape(mean)) stddev = tf.exp(c_mean_logsigma[1]) c = mean + stddev * epsilon kl_loss = KL_loss(c_mean_logsigma[0], c_mean_logsigma[1]) else: c = mean kl_loss = 0 # TODO: play with the coefficient for KL return c, cfg.TRAIN.COEFF.KL * kl_loss
def __init__(self, embedding_length): self._calculator_loom = CalculatorLoom(embedding_length) self._labels_placeholder = tf.placeholder(tf.float32) self._classifier_weights = tf.Variable( tf.truncated_normal([embedding_length, 3], dtype=tf.float32, stddev=1), name='classifier_weights') self._output_weights = tf.matmul( self._calculator_loom.output(), self._classifier_weights) self._loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( logits=self._output_weights, labels=self._labels_placeholder)) self._true_labels = tf.argmax(self._labels_placeholder, dimension=1) self._prediction = tf.argmax(self._output_weights, dimension=1) self._accuracy = tf.reduce_mean(tf.cast( tf.equal(self._true_labels, self._prediction), dtype=tf.float32))
def weight_variable(shape, name, var_type='normal', const=1): """Initializes a tensorflow weight variable. Args: shape: An array representing shape of the weight variable name: A string name given to the variable. var_type: can be either 'normal', for weights following a Gaussian distribution around 0, or 'xavier', for the Xavier method const: Numeric value that controls the range of the weights within the Xavier method. Returns: Tensor variable for the weights """ if var_type == 'xavier': """ Xavier initialization of network weights. Taken from: https://gist.github.com/blackecho/3a6e4d512d3aa8aa6cf9 https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow """ assert len(shape) == 2 low = -const * np.sqrt(6.0 / (shape[0] + shape[1])) high = const * np.sqrt(6.0 / (shape[0] + shape[1])) initial = tf.random_uniform((shape[0], shape[1]), minval=low, maxval=high) else: initial = tf.truncated_normal(shape, stddev=1.0 / math.sqrt(float(shape[0])), dtype=tf.float32) return tf.Variable(initial, name=name)
def get_conv_filter(self, params): if params["name"]+"/weights" in self.modelDict: init = tf.constant_initializer(value=self.modelDict[params["name"]+"/weights"], dtype=tf.float32) var = tf.get_variable(name="weights", initializer=init, shape=params["shape"]) print "loaded " + params["name"]+"/weights" else: if params["std"]: stddev = params["std"] else: fanIn = params["shape"][0]*params["shape"][1]*params["shape"][2] stddev = (2/float(fanIn))**0.5 init = tf.truncated_normal(shape=params["shape"], stddev=stddev, seed=0) var = tf.get_variable(name="weights", initializer=init) print "generated " + params["name"] + "/weights" if not tf.get_variable_scope().reuse: weightDecay = tf.mul(tf.nn.l2_loss(var), self._wd, name='weight_loss') tf.add_to_collection('losses', weightDecay) return var
def minibatch(self, dataset, subset, use_datasets, cache_data, shift_ratio=-1): """Get synthetic image batches.""" del subset, use_datasets, cache_data, shift_ratio input_shape = [self.batch_size, self.height, self.width, self.depth] images = tf.truncated_normal( input_shape, dtype=self.dtype, stddev=1e-1, name='synthetic_images') labels = tf.random_uniform( [self.batch_size], minval=0, maxval=dataset.num_classes - 1, dtype=tf.int32, name='synthetic_labels') # Note: This results in a H2D copy, but no computation # Note: This avoids recomputation of the random values, but still # results in a H2D copy. images = tf.contrib.framework.local_variable(images, name='images') labels = tf.contrib.framework.local_variable(labels, name='labels') if self.num_splits == 1: images_splits = [images] labels_splits = [labels] else: images_splits = tf.split(images, self.num_splits, 0) labels_splits = tf.split(labels, self.num_splits, 0) return images_splits, labels_splits
def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial)
def weight_variable(shape, name): initial = tf.truncated_normal(shape, stddev=0.001) return tf.Variable(initial, name=name)
def weight_variable(shape, stddev=0.1): initial = tf.truncated_normal(shape, stddev=stddev) return tf.Variable(initial)
def weight_variable_devonc(shape, stddev=0.1): return tf.Variable(tf.truncated_normal(shape, stddev=stddev))
def Weight(shape, name): return tf.Variable(name=name + "_Weights", initial_value=tf.truncated_normal(shape=shape, mean=0, stddev=0.1))
def initWeight(shape): weights = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(weights) # start with 0.1 so reLu isnt always 0
def weight_variable(shape): """ A handy little function to create TensorFlow weight variables. :param shape: the dimensions of the variable to be created :return: a TensorFlow weight variable ready for training """ variable = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(variable)
def add_final_training_ops(graph, class_count, final_tensor_name, ground_truth_tensor_name): """Adds a new softmax and fully-connected layer for training. We need to retrain the top layer to identify our new classes, so this function adds the right operations to the graph, along with some variables to hold the weights, and then sets up all the gradients for the backward pass. The set up for the softmax and fully-connected layers is based on: https://tensorflow.org/versions/master/tutorials/mnist/beginners/index.html Args: graph: Container for the existing model's Graph. class_count: Integer of how many categories of things we're trying to recognize. final_tensor_name: Name string for the new final node that produces results. ground_truth_tensor_name: Name string of the node we feed ground truth data into. Returns: Nothing. """ bottleneck_tensor = graph.get_tensor_by_name(ensure_name_has_port( BOTTLENECK_TENSOR_NAME)) layer_weights = tf.Variable( tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, class_count], stddev=0.001), name='final_weights') layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases') logits = tf.matmul(bottleneck_tensor, layer_weights, name='final_matmul') + layer_biases tf.nn.softmax(logits, name=final_tensor_name) ground_truth_placeholder = tf.placeholder(tf.float32, [None, class_count], name=ground_truth_tensor_name) cross_entropy = tf.nn.softmax_cross_entropy_with_logits( logits, ground_truth_placeholder) cross_entropy_mean = tf.reduce_mean(cross_entropy) train_step = tf.train.GradientDescentOptimizer(FLAGS.learning_rate).minimize( cross_entropy_mean) return train_step, cross_entropy_mean # https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/image_retraining/retrain.py
def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.01) return tf.Variable(initial)
def weight_variable(shape,name): initial = tf.truncated_normal(shape, stddev=1.0 / math.sqrt(float(shape[0]))) return tf.Variable(initial, name=name)
def initializeWeights(self): shared_sizes = [] self.weights_shared = [] self.biases_shared = [] for i in range(len(self.hidden_sizes_shared)): if i==0: input_len = self.input_size else: input_len = self.hidden_sizes_shared[i-1] output_len = self.hidden_sizes_shared[i] layer_weights = tfnet.weight_variable([input_len, output_len],name='weights' + str(i)) layer_biases = tfnet.bias_variable([output_len], name='biases' + str(i)) self.weights_shared.append(layer_weights) self.biases_shared.append(layer_biases) shared_sizes.append((str(input_len) + "x" + str(output_len), str(output_len))) task_initial_w1 = tf.truncated_normal([self.n_tasks,self.hidden_sizes_shared[-1],self.hidden_size_task], stddev=1.0 / math.sqrt(float(self.hidden_sizes_shared[-1]))) self.task_w1 = tf.Variable(task_initial_w1, name="task_weight1") task_initial_b1 = tf.constant(0.1, shape=[self.n_tasks,self.hidden_size_task]) self.task_b1 = tf.Variable(task_initial_b1, name="task_bias1") task_initial_w2 = tf.truncated_normal([self.n_tasks,self.hidden_size_task,self.output_size], stddev=1.0 / math.sqrt(float(self.hidden_size_task))) self.task_w2 = tf.Variable(task_initial_w2, name="task_weight2") task_initial_b2 = tf.constant(0.1, shape=[self.n_tasks,self.output_size]) self.task_b2 = tf.Variable(task_initial_b2, name="task_bias2") if self.verbose: print "Okay, making a neural net with the following structure:" print "\tShared:", shared_sizes print "\tTask:", tf.shape(self.task_w1), "x", tf.shape(self.task_w2)
def tf_weight_variable(shape, name): """Initializes a tensorflow weight variable with random values centered around 0. """ initial = tf.truncated_normal(shape, stddev=1.0 / math.sqrt(float(shape[0])), dtype=tf.float64) return tf.Variable(initial, name=name)
def sample_encoded_context(embeddings, model, bAugmentation=True): '''Helper function for init_opt''' # Build conditioning augmentation structure for text embedding # under different variable_scope: 'g_net' and 'hr_g_net' c_mean_logsigma = model.generate_condition(embeddings) mean = c_mean_logsigma[0] if bAugmentation: # epsilon = tf.random_normal(tf.shape(mean)) epsilon = tf.truncated_normal(tf.shape(mean)) stddev = tf.exp(c_mean_logsigma[1]) c = mean + stddev * epsilon else: c = mean return c
def weight_variable(shape): inital = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(inital)
def create_variable(shape, name, c=None, sigma=None, trainable=True): if sigma: initial = tf.truncated_normal(shape, stddev=sigma, name=name) else: initial = tf.constant(c if c else 0.0, shape=shape, name=name) return tf.Variable(initial, trainable=trainable)
def xavier_normal_dist(shape): return tf.truncated_normal(shape, mean=0, stddev=tf.sqrt(3. / shape[-1] + shape[-2]))
def xavier_normal_dist_conv3d(shape): return tf.truncated_normal(shape, mean=0, stddev=tf.sqrt(3. / (tf.reduce_prod(shape[:3]) * tf.reduce_sum(shape[3:]))))
def convolution_layer_3d(layer_input, filter, strides, padding='SAME'): assert len(filter) == 5 # [filter_depth, filter_height, filter_width, in_channels, out_channels] assert len(strides) == 5 # must match input dimensions [batch, in_depth, in_height, in_width, in_channels] assert padding in ['VALID', 'SAME'] # w = tf.Variable(initial_value=tf.truncated_normal(shape=filter), name='weights') w = tf.Variable(initial_value=xavier_uniform_dist_conv3d(shape=filter), name='weights') b = tf.Variable(tf.constant(1.0, shape=[filter[-1]]), name='biases') convolution = tf.nn.conv3d(layer_input, w, strides, padding) return convolution + b
def deconvolution_layer_3d(layer_input, filter, output_shape, strides, padding='SAME'): assert len(filter) == 5 # [depth, height, width, output_channels, in_channels] assert len(strides) == 5 # must match input dimensions [batch, depth, height, width, in_channels] assert padding in ['VALID', 'SAME'] # w = tf.Variable(initial_value=tf.truncated_normal(shape=filter), name='weights') w = tf.Variable(initial_value=xavier_uniform_dist_conv3d(shape=filter), name='weights') b = tf.Variable(tf.constant(1.0, shape=[filter[-2]]), name='biases') deconvolution = tf.nn.conv3d_transpose(layer_input, w, output_shape, strides, padding) return deconvolution + b
def weightVar(shape, mean=0.0, stddev=0.02, name='weights'): init_w = tf.truncated_normal(shape=shape, mean=mean, stddev=stddev) return tf.Variable(init_w, name=name)
def weight_variable(self, shape, name): initial = tf.truncated_normal(shape, stddev=0.01) return tf.get_variable(name=name, initializer=initial, trainable=True)
def weight_variable(self, shape): initial = tf.truncated_normal(shape, stddev=0.01) return tf.Variable(initial)
def weight_variable(self,shape,name="v"): if self.initializer == "graves" and False: initial = tf.truncated_normal_initializer(mean=0., stddev=.075, seed=None, dtype=tf.float32) else: initial = tf.truncated_normal(shape, stddev=.075) return tf.Variable(initial,name=name+"_weight")
def _linear_layer(self, input, input_size, output_size, scope_name): with tf.variable_scope(scope_name) as scope: weights = tf.Variable(name='weights', initial_value=tf.truncated_normal(shape=[input_size, output_size], stddev=0.1)) biases = tf.Variable(name='biases', initial_value=tf.constant(value=0.1, shape=[output_size])) output = tf.matmul(input, weights) + biases return output
def _weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial)
def __init__(self, embedding_length): self._embedding_length = embedding_length self._named_tensors = {} for n in xrange(10): # Note: the examples only have the numbers 0 through 9 as terminal nodes. name = 'terminal_' + str(n) self._named_tensors[name] = tf.Variable( tf.truncated_normal([embedding_length], dtype=tf.float32, stddev=1), name=name) self._combiner_weights = {} self._loom_ops = {} for name in calculator_pb2.CalculatorExpression.OpCode.keys(): weights_var = tf.Variable( tf.truncated_normal([2 * embedding_length, embedding_length], dtype=tf.float32, stddev=1), name=name) self._combiner_weights[name] = weights_var self._loom_ops[name] = CombineLoomOp(2, embedding_length, weights_var) self._loom = loom.Loom( named_tensors=self._named_tensors, named_ops=self._loom_ops) self._output = self._loom.output_tensor( loom.TypeShape('float32', [embedding_length]))
def weight_variable(self, shape): """Create a weight variable with appropriate initialization.""" initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial)