我们从Python开源项目中,提取了以下40个代码示例,用于说明如何使用tensorflow.make_template()。
def templatemethod(name_): """This decorator wraps a method with `tf.make_template`. For example, @templatemethod def my_method(): # Create variables """ def template_decorator(func): """Inner decorator function""" def func_wrapper(*args, **kwargs): """Inner wrapper function""" templated_func = tf.make_template(name_, func) return templated_func(*args, **kwargs) return func_wrapper return template_decorator
def setUp(self): super(TestVirtualAdversarialMethod, self).setUp() import tensorflow as tf import tensorflow.contrib.slim as slim def dummy_model(x): net = slim.fully_connected(x, 60) return slim.fully_connected(net, 10, activation_fn=None) self.sess = tf.Session() self.sess.as_default() self.model = tf.make_template('dummy_model', dummy_model) self.attack = VirtualAdversarialMethod(self.model, sess=self.sess) # initialize model with tf.name_scope('dummy_model'): self.model(tf.placeholder(tf.float32, shape=(None, 1000))) self.sess.run(tf.global_variables_initializer())
def setUp(self): super(TestSaliencyMapMethod, self).setUp() import tensorflow as tf import tensorflow.contrib.slim as slim def dummy_model(x): net = slim.fully_connected(x, 60) return slim.fully_connected(net, 10, activation_fn=None) self.sess = tf.Session() self.sess.as_default() self.model = tf.make_template('dummy_model', dummy_model) self.attack = SaliencyMapMethod(self.model, sess=self.sess) # initialize model with tf.name_scope('dummy_model'): self.model(tf.placeholder(tf.float32, shape=(None, 1000))) self.sess.run(tf.global_variables_initializer()) self.attack = SaliencyMapMethod(self.model, sess=self.sess)
def initialize_graph(self, input_statistics): """Save templates for components, which can then be used repeatedly. This method is called every time a new graph is created. It's safe to start adding ops to the current default graph here, but the graph should be constructed from scratch. Args: input_statistics: A math_utils.InputStatistics object. """ super(_LSTMModel, self).initialize_graph(input_statistics=input_statistics) self._lstm_cell = tf.nn.rnn_cell.LSTMCell(num_units=self._num_units) # Create templates so we don't have to worry about variable reuse. self._lstm_cell_run = tf.make_template( name_="lstm_cell", func_=self._lstm_cell, create_scope_now_=True) # Transforms LSTM output into mean predictions. self._predict_from_lstm_output = tf.make_template( name_="predict_from_lstm_output", func_=lambda inputs: tf.layers.dense(inputs=inputs, units=self.num_features), create_scope_now_=True)
def reuse(scope): """ A decorator for transparent reuse of tensorflow `Variables <https://www.tensorflow.org/api_docs/python/tf/Variable>`_ in a function. The decorated function will automatically create variables the first time they are called and reuse them thereafter. .. note:: This decorator is internally implemented by tensorflow's :func:`make_template` function. See `its doc <https://www.tensorflow.org/api_docs/python/tf/make_template>_` for requirements on the target function. :param scope: A string. The scope name passed to tensorflow `variable_scope() <https://www.tensorflow.org/api_docs/python/tf/variable_scope>`_. """ return lambda f: tf.make_template(scope, f)
def __init__(self, summaries=None, summary_labels=None): """ Creates a new optimizer instance. """ self.variables = dict() self.summaries = summaries if summary_labels is None: self.summary_labels = dict() else: self.summary_labels = summary_labels def custom_getter(getter, name, registered=False, **kwargs): variable = getter(name=name, registered=True, **kwargs) if not registered: assert kwargs.get('trainable', False) self.variables[name] = variable return variable # TensorFlow function self.step = tf.make_template( name_='step', func_=self.tf_step, custom_getter=custom_getter )
def __init__(self, max_iterations, unroll_loop=False): """ Creates a new iterative solver instance. Args: max_iterations: Maximum number of iterations before termination. unroll_loop: Unrolls the TensorFlow while loop if true. """ assert max_iterations >= 0 self.max_iterations = max_iterations assert isinstance(unroll_loop, bool) self.unroll_loop = unroll_loop super(Iterative, self).__init__() # TensorFlow functions self.initialize = tf.make_template(name_='initialize', func_=self.tf_initialize) self.step = tf.make_template(name_='step', func_=self.tf_step) self.next_step = tf.make_template(name_='next-step', func_=self.tf_next_step)
def __init__(self, scope='exploration', summary_labels=None): self.summary_labels = set(summary_labels or ()) self.variables = dict() self.summaries = list() def custom_getter(getter, name, registered=False, **kwargs): variable = getter(name=name, registered=True, **kwargs) if not registered: self.variables[name] = variable return variable self.explore = tf.make_template( name_=(scope + '/explore'), func_=self.tf_explore, custom_getter_=custom_getter )
def __init__(self, scope='preprocessor', summary_labels=None): self.summary_labels = set(summary_labels or ()) self.variables = dict() self.summaries = list() def custom_getter(getter, name, registered=False, **kwargs): variable = getter(name=name, registered=True, **kwargs) if not registered: self.variables[name] = variable return variable self.process = tf.make_template( name_=(scope + '/process'), func_=self.tf_process, custom_getter_=custom_getter )
def initialize(self, custom_getter): super(QDemoModel, self).initialize(custom_getter=custom_getter) # Demonstration loss self.fn_demo_loss = tf.make_template( name_='demo-loss', func_=self.tf_demo_loss, custom_getter_=custom_getter ) # Demonstration optimization self.fn_demo_optimization = tf.make_template( name_='demo-optimization', func_=self.tf_demo_optimization, custom_getter_=custom_getter )
def initialize(self, custom_getter): super(DistributionModel, self).initialize(custom_getter) # Network self.network = Network.from_spec( spec=self.network_spec, kwargs=dict(summary_labels=self.summary_labels) ) # Distributions self.distributions = self.create_distributions() # Network internals self.internals_input.extend(self.network.internals_input()) self.internals_init.extend(self.network.internals_init()) # KL divergence function self.fn_kl_divergence = tf.make_template( name_=(self.scope + '/kl-divergence'), func_=self.tf_kl_divergence, custom_getter_=custom_getter )
def __init__(self, name): """Performs the initialisation necessary for all AbstractModule instances. Every subclass of AbstractModule must begin their constructor with a call to this constructor, i.e. `super(MySubModule, self).__init__(name=name)`. Avoid instantiating sub-modules in __init__ where possible, as they will not be defined under the module's scope. Instead, instantiate sub-modules in `build`. Args: name: Name of this module. Used to construct the Templated build function. Raises: ValueError: If name is not specified. """ if not isinstance(name, string_types): raise ValueError("Name must be a string.") self._is_connected = False self._template = tf.make_template(name, self._build, create_scope_now_=True) # Update __call__ and the object docstrings to enable better introspection self.__doc__ = self._build.__doc__ self.__call__.__func__.__doc__ = self._build.__doc__
def __init__(self, name): """ Initialize the module. Each subclass must call this constructor with a name. Args: name: Name of this module. Used for `tf.make_template`. """ self.name = name self._template = tf.make_template(name, self._build, create_scope_now_=True) # Docstrings for the class should be the docstring for the _build method self.__doc__ = self._build.__doc__ # pylint: disable=E1101 self.__call__.__func__.__doc__ = self._build.__doc__
def __init__(self, arch, is_training=False): ''' Variational auto-encoder implemented in 2D convolutional neural nets Input: `arch`: network architecture (`dict`) `is_training`: (unused now) it was kept for historical reasons (for `BatchNorm`) ''' self.arch = arch self._sanity_check() self.is_training = is_training with tf.name_scope('SpeakerRepr'): self.y_emb = self._l2_regularized_embedding( self.arch['y_dim'], self.arch['z_dim'], 'y_embedding') self._generate = tf.make_template( 'Generator', self._generator) self._encode = tf.make_template( 'Encoder', self._encoder) self.generate = self.decode # for VAE-GAN extension
def __init__(self, arch, is_training=False): self.arch = arch self.is_training = is_training self._decode = tf.make_template('Decoder', self._generator) self._encode = tf.make_template('Encoder', self._encoder)
def __init__(self, name): """ Initialize the module. Each subclass must call this constructor with a name. Args: name: Name of this module. Used for `tf.make_template`. """ self.name = name self._template = tf.make_template( name, self._build, create_scope_now_=True) # Docstrings for the class should be the docstring for the _build method self.__doc__ = self._build.__doc__ self.__call__.__func__.__doc__ = self._build.__doc__
def test_variable_reuse_with_template(self): tmpl1 = tf.make_template('test', tf.contrib.layers.legacy_fully_connected, num_output_units=8) output1 = tmpl1(self.input) output2 = tmpl1(self.input) with tf.Session() as sess: tf.initialize_all_variables().run() out_value1, out_value2 = sess.run([output1, output2]) self.assertAllClose(out_value1, out_value2)
def test_variable_reuse_with_template(self): tmpl1 = tf.make_template('test', tf.contrib.layers.legacy_fully_connected, num_output_units=8) output1 = tmpl1(self.input) output2 = tmpl1(self.input) with tf.Session() as sess: tf.global_variables_initializer().run() out_value1, out_value2 = sess.run([output1, output2]) self.assertAllClose(out_value1, out_value2)
def __init__(self): """ Creates a new solver instance. """ # TensorFlow function self.solve = tf.make_template(name_='solver', func_=self.tf_solve)
def __init__(self, scope='baseline', summary_labels=None): self.summary_labels = set(summary_labels or ()) self.variables = dict() self.all_variables = dict() self.summaries = list() def custom_getter(getter, name, registered=False, **kwargs): variable = getter(name=name, registered=True, **kwargs) if not registered: self.all_variables[name] = variable if kwargs.get('trainable', True) and not name.startswith('optimization'): self.variables[name] = variable if 'variables' in self.summary_labels: summary = tf.summary.histogram(name=name, values=variable) self.summaries.append(summary) return variable self.predict = tf.make_template( name_=(scope + '/predict'), func_=self.tf_predict, custom_getter_=custom_getter ) self.loss = tf.make_template( name_=(scope + '/loss'), func_=self.tf_loss, custom_getter_=custom_getter ) self.regularization_loss = tf.make_template( name_=(scope + '/regularization-loss'), func_=self.tf_regularization_loss, custom_getter_=custom_getter )
def __init__(self, num_internals=0, scope='layer', summary_labels=None): self.num_internals = num_internals self.summary_labels = set(summary_labels or ()) self.named_tensors = dict() self.variables = dict() self.all_variables = dict() self.summaries = list() def custom_getter(getter, name, registered=False, **kwargs): variable = getter(name=name, registered=True, **kwargs) if not registered: self.all_variables[name] = variable if kwargs.get('trainable', True) and not name.startswith('optimization'): self.variables[name] = variable if 'variables' in self.summary_labels: summary = tf.summary.histogram(name=name, values=variable) self.summaries.append(summary) return variable self.apply = tf.make_template( name_=(scope + '/apply'), func_=self.tf_apply, custom_getter_=custom_getter ) self.regularization_loss = tf.make_template( name_=(scope + '/regularization-loss'), func_=self.tf_regularization_loss, custom_getter_=custom_getter )
def __init__(self, scope='network', summary_labels=None): self.summary_labels = set(summary_labels or ()) self.variables = dict() self.all_variables = dict() self.summaries = list() def custom_getter(getter, name, registered=False, **kwargs): variable = getter(name=name, registered=True, **kwargs) if not registered: self.all_variables[name] = variable if kwargs.get('trainable', True) and not name.startswith('optimization'): self.variables[name] = variable if 'variables' in self.summary_labels: summary = tf.summary.histogram(name=name, values=variable) self.summaries.append(summary) return variable self.apply = tf.make_template( name_=(scope + '/apply'), func_=self.tf_apply, custom_getter_=custom_getter ) self.regularization_loss = tf.make_template( name_=(scope + '/regularization-loss'), func_=self.tf_regularization_loss, custom_getter_=custom_getter )
def initialize(self, custom_getter): super(PGProbRatioModel, self).initialize(custom_getter) # Model comparison functions self.reference = tf.make_template( name_='reference', func_=self.tf_reference, custom_getter_=custom_getter ) self.compare = tf.make_template( name_='compare', func_=self.tf_compare, custom_getter_=custom_getter )
def __init__(self, arch, is_training=False): self.arch = arch self._sanity_check() self.is_training = is_training self._generate = tf.make_template( 'Generator', self._generator) self._discriminate = tf.make_template( 'Discriminator', self._discriminator) self._encode = tf.make_template( 'Encoder', self._encoder)
def __init__(self, arch, is_training=False): self.arch = arch self._sanity_check() self.is_training = is_training with tf.name_scope('SpeakerRepr'): self.y_emb = self._unit_embedding( self.arch['y_dim'], self.arch['z_dim'], 'y_embedding') with tf.variable_scope('Tau'): self.tau = tf.nn.relu( 10. * tf.Variable( tf.ones([1]), name='tau')) + 0.1 self._generate = tf.make_template( 'Generator', self._generator) self._discriminate = tf.make_template( 'Discriminator', self._discriminator) self._encode = tf.make_template( 'Encoder', self._encoder) self._classify = tf.make_template( 'Classifier', self._classifier)
def ready_for_reuse(name): def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): temp_func = tf.make_template(name, func) return temp_func(*args, **kwargs) return wrapper return decorator
def __init__(self, f, g, num_layers=1, f_side_input=None, g_side_input=None, use_efficient_backprop=True): if isinstance(f, list): assert len(f) == num_layers else: f = [f] * num_layers if isinstance(g, list): assert len(g) == num_layers else: g = [g] * num_layers scope_prefix = "revblock/revlayer_%d/" f_scope = scope_prefix + "f" g_scope = scope_prefix + "g" f = [ tf.make_template(f_scope % i, fn, create_scope_now_=True) for i, fn in enumerate(f) ] g = [ tf.make_template(g_scope % i, fn, create_scope_now_=True) for i, fn in enumerate(g) ] self.f = f self.g = g self.num_layers = num_layers self.f_side_input = f_side_input or [] self.g_side_input = g_side_input or [] self._use_efficient_backprop = use_efficient_backprop
def build(self, *args, **kwargs): """Builds the module and sets the scope. This function will get called automatically when the module gets called. """ if self._is_built: logging.info('Current Module name: `{}` is already built.'.format(self.name)) return self._is_built = True self._template = tf.make_template(self.name, self._build, create_scope_now_=True) self._unique_name = self._template.variable_scope.name.split('/')[-1]
def __call__(self, func): this = self templated_func = tf.make_template(this.scope, func) @wraps(func, assigned=TfTemplate.available_attrs(func)) def inner(*args, **kwargs): return templated_func(*args, **kwargs) return inner
def layer(func): class Layer(object): def __init__(self, *args, **kwargs): self.func = func self.args = args self.kwargs = kwargs self.name = self.kwargs.get("name", self.func.__name__) self._template = tf.make_template(self.name, self.func, create_scope_now_=True) self._unique_name = self._template.variable_scope.name.split("/")[-1] self._summary_added = False def __call__(self, x): out = self.template(x, *self.args, **self.kwargs) self._layer_logging(x, out) self._add_summary() return out def __rrshift__(self, other): """ >> """ return self.__call__(other) def _layer_logging(self, other, out): tf.logging.info(" {} {} {} -> {}".format( self.unique_name, "shape", str(other.get_shape()), str(out.get_shape()))) def _add_summary(self): if not self.kwargs.get("summary"): return None if self.summary_added: return None for var in self.get_variables_in_scope(): # TODO: different summary types tf.summary.scalar(var.name, tf.reduce_mean(var)) self._summary_added = True def get_variables_in_scope(self): assert self.template._variables_created, "Variables not yet created or undefined." variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.variable_scope_name) return variables @property def template(self): return self._template @property def unique_name(self): return self._unique_name @property def variable_scope_name(self): return self.template._variable_scope._name @property def summary_added(self): return self._summary_added return Layer
def __init__(self, corpus, **opts): self.corpus = corpus self.opts = opts self.global_step = get_or_create_global_step() self.increment_global_step_op = tf.assign(self.global_step, self.global_step + 1, name="increment_global_step") self.corpus_size = get_corpus_size(self.corpus["train"]) self.corpus_size_valid = get_corpus_size(self.corpus["valid"]) self.word2idx, self.idx2word = build_vocab(self.corpus["train"]) self.vocab_size = len(self.word2idx) self.generator_template = tf.make_template(GENERATOR_PREFIX, generator) self.discriminator_template = tf.make_template(DISCRIMINATOR_PREFIX, discriminator) self.enqueue_data, _, source, target, sequence_length = \ prepare_data(self.corpus["train"], self.word2idx, num_threads=7, **self.opts) # TODO: option to either do pretrain or just generate? self.g_tensors_pretrain = self.generator_template( source, target, sequence_length, self.vocab_size, **self.opts) self.enqueue_data_valid, self.input_ph, source_valid, target_valid, sequence_length_valid = \ prepare_data(self.corpus["valid"], self.word2idx, num_threads=1, **self.opts) self.g_tensors_pretrain_valid = self.generator_template( source_valid, target_valid, sequence_length_valid, self.vocab_size, **self.opts) self.decoder_fn = prepare_custom_decoder( sequence_length, self.g_tensors_pretrain.embedding_matrix, self.g_tensors_pretrain.output_projections) self.g_tensors_fake = self.generator_template( source, target, sequence_length, self.vocab_size, decoder_fn=self.decoder_fn, **self.opts) self.g_tensors_fake_valid = self.generator_template( source_valid, target_valid, sequence_length_valid, self.vocab_size, decoder_fn=self.decoder_fn, **self.opts) # TODO: using the rnn outputs from pretraining as "real" instead of target embeddings (aka professor forcing) self.d_tensors_real = self.discriminator_template( self.g_tensors_pretrain.rnn_outputs, sequence_length, is_real=True, **self.opts) # TODO: check to see if sequence_length is correct self.d_tensors_fake = self.discriminator_template( self.g_tensors_fake.rnn_outputs, None, is_real=False, **self.opts) self.g_tvars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=GENERATOR_PREFIX) self.d_tvars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=DISCRIMINATOR_PREFIX)
def build_model(self): sc = predictron_arg_scope() with tf.variable_scope('state'): with slim.arg_scope(sc): state = slim.conv2d(self.inputs, 32, [3, 3], scope='conv1') state = layers.batch_norm(state, activation_fn=tf.nn.relu, scope='conv1/preact') state = slim.conv2d(state, 32, [3, 3], scope='conv2') state = layers.batch_norm(state, activation_fn=tf.nn.relu, scope='conv2/preact') iter_template = tf.make_template('iter', self.iter_func, unique_name_='iter') rewards_arr = [] gammas_arr = [] lambdas_arr = [] values_arr = [] for k in range(self.max_depth): state, reward, gamma, lambda_, value = iter_template(state) rewards_arr.append(reward) gammas_arr.append(gamma) lambdas_arr.append(lambda_) values_arr.append(value) _, _, _, _, value = iter_template(state) # K + 1 elements values_arr.append(value) bs = tf.shape(self.inputs)[0] # [batch_size, K * maze_size] self.rewards = tf.pack(rewards_arr, axis=1) # [batch_size, K, maze_size] self.rewards = tf.reshape(self.rewards, [bs, self.max_depth, self.maze_size]) # [batch_size, K + 1, maze_size] self.rewards = tf.concat_v2(values=[tf.zeros(shape=[bs, 1, self.maze_size], dtype=tf.float32), self.rewards], axis=1, name='rewards') # [batch_size, K * maze_size] self.gammas = tf.pack(gammas_arr, axis=1) # [batch_size, K, maze_size] self.gammas = tf.reshape(self.gammas, [bs, self.max_depth, self.maze_size]) # [batch_size, K + 1, maze_size] self.gammas = tf.concat_v2(values=[tf.ones(shape=[bs, 1, self.maze_size], dtype=tf.float32), self.gammas], axis=1, name='gammas') # [batch_size, K * maze_size] self.lambdas = tf.pack(lambdas_arr, axis=1) # [batch_size, K, maze_size] self.lambdas = tf.reshape(self.lambdas, [-1, self.max_depth, self.maze_size]) # [batch_size, (K + 1) * maze_size] self.values = tf.pack(values_arr, axis=1) # [batch_size, K + 1, maze_size] self.values = tf.reshape(self.values, [-1, (self.max_depth + 1), self.maze_size]) self.build_preturns() self.build_lambda_preturns()
def build_model(self): lstm_state = tf.contrib.rnn.LSTMStateTuple(self.initial_lstm_state[0], self.initial_lstm_state[1]) encoder_network_template = tf.make_template('vpn_encoder', self.encoder_template) decoder_network_template = tf.make_template('vpn_decoder', self.decoder_template) with tf.name_scope('training_graph'): net_unwrap = [] for i in range(self.config.truncated_steps): encoder_state, lstm_state = encoder_network_template(self.sequences[:, i], lstm_state) step_out = decoder_network_template(encoder_state, self.sequences[:, i + 1]) net_unwrap.append(step_out) self.final_lstm_state = lstm_state with tf.name_scope('wrap_out'): net_unwrap = tf.stack(net_unwrap) self.output = tf.transpose(net_unwrap, [1, 0, 2, 3, 4]) for i in range(self.config.truncated_steps): Logger.summarize_images(tf.expand_dims(tf.cast(tf.arg_max(self.output[:, i], 3), tf.float32), 3), 'frame_{0}'.format(i), 'vpn', 1) with tf.name_scope('loss'): labels = tf.one_hot(tf.cast(tf.squeeze(self.sequences[:, 1:]), tf.int32), 256, axis=-1, dtype=tf.float32) self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.output, labels=labels)) self.optimizer = tf.train.RMSPropOptimizer(learning_rate=self.config.learning_rate).minimize(self.loss) with tf.name_scope('inference_graph'): lstm_state = tf.contrib.rnn.LSTMStateTuple(self.initial_lstm_state[0], self.initial_lstm_state[1]) self.encoder_state, lstm_state = encoder_network_template(self.inference_prev_frame, lstm_state) self.inference_lstm_state = lstm_state self.inference_output = decoder_network_template(self.inference_encoder_state, self.inference_current_frame) with tf.name_scope('test_frames'): self.test_summaries = [] for i in range(self.config.truncated_steps): Logger.summarize_images(tf.expand_dims(tf.cast(tf.arg_max(self.inference_output, 3), tf.float32), 3), 'test_frame_{0}'.format(i), 'vpn_test_{0}'.format(i), 1) self.test_summaries.append(tf.summary.merge_all('vpn_test_{0}'.format(i))) self.summaries = tf.summary.merge_all('vpn')