我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.__version__()。
def _upload_metrics(current_model): bq.upload_metrics_to_bq(test_name=current_model.test_name, total_time=current_model.total_time, epochs=current_model.epochs, batch_size=current_model.batch_size, backend_type=keras.backend.backend(), backend_version=get_backend_version(), cpu_num_cores=config['cpu_num_cores'], cpu_memory=config['cpu_memory'], cpu_memory_info=config['cpu_memory_info'], gpu_count=config['gpus'], gpu_platform=config['gpu_platform'], platform_type=config['platform_type'], platform_machine_type=config['platform_machine_type'], keras_version=keras.__version__, sample_type=current_model.sample_type) # MNIST MLP
def save_run(config, environment=None, comment=None, extra_config=None, base_path=DEFAULT_BASE_PATH, filename=DEFAULT_FILENAME): if environment == 'cloud': # We don't write runs inside Google Cloud, we run it before. return diff = get_diff() lumi_version = get_luminoth_version() tf_version = get_tensorflow_version() experiment = { 'environment': environment, 'datetime': str(datetime.datetime.utcnow()) + 'Z', 'diff': diff, 'luminoth_version': lumi_version, 'tensorflow_version': tf_version, 'config': config, 'extra_config': extra_config, } file_path = os.path.join(base_path, filename) tf.gfile.MakeDirs(base_path) with tf.gfile.Open(file_path, 'a') as log: log.write(json.dumps(experiment) + '\n')
def main(choice): try: assert LooseVersion(tf.__version__) in [LooseVersion('1.0.0'), LooseVersion( '1.0.1')], 'This project requires TensorFlow version 1.0 You are using {}' \ .format(tf.__version__) print('TensorFlow Version: {}'.format(tf.__version__)) print('*****Author: Satyaki Sanyal*****') print('***This project must only be used for educational purpose***') if choice == 1: if not tf.test.gpu_device_name(): print('*** ERROR: No GPU found. Please use a GPU to train your neural network. ***') else: print('Default GPU Device: {}'.format(tf.test.gpu_device_name())) Main().main() elif choice == 2: Translate().translate() elif choice == 3: Plot().plot() else: print('*** Error: Wrong choice ***') except Exception as exc: print('*** Error: ' + str(exc) + ' ***')
def main(_): logging.basicConfig( level=logging.INFO, format='%(asctime)s %(name)-7s %(levelname)-7s %(message)s' ) logger.info('tf version: {}'.format(tf.__version__)) parser = argparse.ArgumentParser(description='Run Dobot WebAPI.') parser.add_argument('--data_dir', type=str, default='data', help="Directory for training data.") parser.add_argument('--train_dir', type=str, default='train', help="Directory for checkpoints.") args = parser.parse_args() reader = FeaturesDataReader(args.data_dir) predictor = Predictor(reader, args.train_dir, args.train_dir+'/params.json') print(predictor.predict_to_json())
def create_model(session, forward_only): """Create model and initialize or load parameters""" model = seq2seq_model.Seq2SeqModel( gConfig['enc_vocab_size'], gConfig['dec_vocab_size'], _buckets, gConfig['layer_size'], gConfig['num_layers'], gConfig['max_gradient_norm'], gConfig['batch_size'], gConfig['learning_rate'], gConfig['learning_rate_decay_factor'], forward_only=forward_only) if 'pretrained_model' in gConfig: model.saver.restore(session,gConfig['pretrained_model']) return model ckpt = tf.train.get_checkpoint_state(gConfig['working_directory']) # the checkpoint filename has changed in recent versions of tensorflow checkpoint_suffix = "" if tf.__version__ > "0.12": checkpoint_suffix = ".index" if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path + checkpoint_suffix): print("Reading model parameters from %s" % ckpt.model_checkpoint_path) model.saver.restore(session, ckpt.model_checkpoint_path) else: print("Created model with fresh parameters.") session.run(tf.initialize_all_variables()) return model
def get_inception_layer( inputs, conv11_size, conv33_11_size, conv33_size, conv55_11_size, conv55_size, pool11_size ): with tf.variable_scope("conv_1x1"): conv11 = layers.conv2d( inputs, conv11_size, [ 1, 1 ] ) with tf.variable_scope("conv_3x3"): conv33_11 = layers.conv2d( inputs, conv33_11_size, [ 1, 1 ] ) conv33 = layers.conv2d( conv33_11, conv33_size, [ 3, 3 ] ) with tf.variable_scope("conv_5x5"): conv55_11 = layers.conv2d( inputs, conv55_11_size, [ 1, 1 ] ) conv55 = layers.conv2d( conv55_11, conv55_size, [ 5, 5 ] ) with tf.variable_scope("pool_proj"): pool_proj = layers.max_pool2d( inputs, [ 3, 3 ], stride = 1 ) pool11 = layers.conv2d( pool_proj, pool11_size, [ 1, 1 ] ) if tf.__version__ == '0.11.0rc0': return tf.concat(3, [conv11, conv33, conv55, pool11]) return tf.concat([conv11, conv33, conv55, pool11], 3)
def main(_): print('tf version', tf.__version__) topics, answers, num_topics = read_assistments_data(DATA_LOC) full_data = load_data(topics, answers, num_topics) model = DKTModel(num_topics, HIDDEN_SIZE, MAX_LENGTH) with tf.Session() as session: session.run(tf.global_variables_initializer()) #We need to explicitly initialize local variables to use #TensorFlow's AUC function for some reason... session.run(tf.local_variables_initializer()) train_model(model, session, full_data) #model1, model2 = train_paired_models(session, full_data, num_topics) #test_paired_models(session, full_data, model1, model2) #embed()
def _model_deploy(args, cell): parts = args['name'].split('.') if len(parts) == 2: model_name, version_name = parts[0], parts[1] model_exists = False try: # If describe() works, the model already exists. datalab_ml.Models(project_id=args['project']).get_model_details(model_name) model_exists = True except: pass if not model_exists: datalab_ml.Models(project_id=args['project']).create(model_name) versions = datalab_ml.ModelVersions(model_name, project_id=args['project']) runtime_version = args['runtime_version'] if not runtime_version: runtime_version = tf.__version__ versions.deploy(version_name, args['path'], runtime_version=runtime_version) else: raise ValueError('Name must be like "model.version".')
def setup(tf, order=None): """ Sets up global variables (currently only the tensorflow version) to adapt to peculiarities of different tensorflow versions. This function should only be called before :py:class:`Model` creation, not for evaluation. Therefore, the tensorflow module *tf* must be passed: .. code-block:: python import tensorflow as tf import tfdeploy as td td.setup(tf) # ... Also, when *order* is not *None*, it is forwarded to :py:func:`optimize` for convenience. """ global _tf_version_string, _tf_version _tf_version_string = tf.__version__ _tf_version = _parse_tf_version(_tf_version_string) if order is not None: optimize(order)
def main(unused_argv): logging.set_verbosity(tf.logging.INFO) print("tensorflow version: %s" % tf.__version__) evaluate()
def main(unused_argv): # Load the environment. env = json.loads(os.environ.get("TF_CONFIG", "{}")) # Load the cluster data from the environment. cluster_data = env.get("cluster", None) cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None # Load the task data from the environment. task_data = env.get("task", None) or {"type": "master", "index": 0} task = type("TaskSpec", (object,), task_data) # Logging the version. logging.set_verbosity(tf.logging.INFO) logging.info("%s: Tensorflow version: %s.", task_as_string(task), tf.__version__) # Dispatch to a master, a worker, or a parameter server. if not cluster or task.type == "master" or task.type == "worker": Trainer(cluster, task, FLAGS.train_dir, FLAGS.log_device_placement).run( start_new_model=FLAGS.start_new_model) elif task.type == "ps": ParameterServer(cluster, task).run() else: raise ValueError("%s: Invalid task_type: %s." % (task_as_string(task), task.type))
def main(unused_argv): logging.set_verbosity(tf.logging.INFO) print("tensorflow version: %s" % tf.__version__) check_video_id()
def main(unused_argv): logging.set_verbosity(tf.logging.INFO) print("tensorflow version: %s" % tf.__version__) inference()
def tensorflow_version_tuple(): v = tf.__version__ major, minor, patch = v.split('.') return (int(major), int(minor), patch)
def get_backend_version(): if keras.backend.backend() == "tensorflow": return tf.__version__ if keras.backend.backend() == "theano": return theano.__version__ if keras.backend.backend() == "cntk": return cntk.__version__ return "undefined"
def main(unused_argv): # Load the environment. env = json.loads(os.environ.get("TF_CONFIG", "{}")) # Load the cluster data from the environment. cluster_data = env.get("cluster", None) cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None # Load the task data from the environment. task_data = env.get("task", None) or {"type": "master", "index": 0} task = type("TaskSpec", (object,), task_data) # Logging the version. logging.set_verbosity(tf.logging.INFO) logging.info("%s: Tensorflow version: %s.", task_as_string(task), tf.__version__) # Dispatch to a master, a worker, or a parameter server. if not cluster or task.type == "master" or task.type == "worker": model = find_class_by_name(FLAGS.model, [frame_level_models, video_level_models])() reader = get_reader() model_exporter = export_model.ModelExporter( frame_features=FLAGS.frame_features, model=model, reader=reader) Trainer(cluster, task, FLAGS.train_dir, model, reader, model_exporter, FLAGS.log_device_placement, FLAGS.max_steps, FLAGS.export_model_steps).run(start_new_model=FLAGS.start_new_model) elif task.type == "ps": ParameterServer(cluster, task).run() else: raise ValueError("%s: Invalid task_type: %s." % (task_as_string(task), task.type))
def main(unused_argv): tf.logging.set_verbosity(tf.logging.INFO) _logger.info("Tensorflow Version: %s", str(tf.__version__)) tf_results = test_model(TfModel) loom_results = test_model(LoomModel, False) loom_results_proper = test_model(LoomModel, True) if FLAGS.tree_lstm: model_type = "GRU" else: model_type = "FC" _logger.info("====================================================") _logger.info("Num epochs: %d; repeats per epoch %d", FLAGS.num_epochs, FLAGS.num_repeats) _logger.info("Model type: %s, %s", model_type, FLAGS.tree_type) _logger.info("Vector size: %d", FLAGS.vector_size) _logger.info("Tree size: %d", FLAGS.tree_size) print_results(tf_results, "TensorFlow") print_results(loom_results, "Loom") print_results(loom_results_proper, "Loom with random trees") compare_results(tf_results, loom_results, "TensorFlow", "Loom") compare_total_speedup(loom_results, tf_results[1]) _logger.info("Finished benchmarks.")
def main(_): logging.info('Current tf version: %s', tf.__version__) logging.info('Current tf git version: %s', tf.__git_version__) run_training()
def _set_model(self, model): import tensorflow as tf import keras.backend.tensorflow_backend as KTF self.model = model self.sess = KTF.get_session() if self.histogram_freq and self.merged is None: for layer in self.model.layers: for weight in layer.weights: tf.histogram_summary(weight.name, weight) if self.write_images: w_img = tf.squeeze(weight) shape = w_img.get_shape() if len(shape) > 1 and shape[0] > shape[1]: w_img = tf.transpose(w_img) if len(shape) == 1: w_img = tf.expand_dims(w_img, 0) w_img = tf.expand_dims(tf.expand_dims(w_img, 0), -1) tf.image_summary(weight.name, w_img) if hasattr(layer, 'output'): tf.histogram_summary('{}_out'.format(layer.name), layer.output) self.merged = tf.merge_all_summaries() if self.write_graph: if parse_version(tf.__version__) >= parse_version('0.8.0'): self.writer = tf.train.SummaryWriter(self.log_dir, self.sess.graph) else: self.writer = tf.train.SummaryWriter(self.log_dir, self.sess.graph_def) else: self.writer = tf.train.SummaryWriter(self.log_dir)
def main(unused_argv): # Load the environment. env = json.loads(os.environ.get("TF_CONFIG", "{}")) # Load the cluster data from the environment. cluster_data = env.get("cluster", None) cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None # Load the task data from the environment. task_data = env.get("task", None) or {"type": "master", "index": 0} task = type("TaskSpec", (object,), task_data) # Logging the version. logging.set_verbosity(tf.logging.INFO) logging.info("%s: Tensorflow version: %s.", task_as_string(task), tf.__version__) # Dispatch to a master, a worker, or a parameter server. if not cluster or task.type == "master" or task.type == "worker": model = find_class_by_name(FLAGS.model, [models])() reader = get_reader() model_exporter = export_model.ModelExporter( model=model, reader=reader) Trainer(cluster, task, FLAGS.train_dir, model, reader, model_exporter, FLAGS.log_device_placement, FLAGS.max_steps, FLAGS.export_model_steps).run(start_new_model=FLAGS.start_new_model) elif task.type == "ps": ParameterServer(cluster, task).run() else: raise ValueError("%s: Invalid task_type: %s." % (task_as_string(task), task.type))
def main(unused_argv): # Load the environment. env = json.loads(os.environ.get("TF_CONFIG", "{}")) # Load the cluster data from the environment. cluster_data = env.get("cluster", None) cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None # Load the task data from the environment. task_data = env.get("task", None) or {"type": "master", "index": 0} task = type("TaskSpec", (object,), task_data) # Logging the version. logging.set_verbosity(tf.logging.INFO) logging.info("%s: Tensorflow version: %s.", task_as_string(task), tf.__version__) # Dispatch to a master, a worker, or a parameter server. if not cluster or task.type == "master" or task.type == "worker": model = find_class_by_name(FLAGS.model, [cvd_models])() reader = get_reader() model_exporter = export_model.ModelExporter( model=model, reader=reader) Trainer(cluster, task, FLAGS.train_dir, model, reader, model_exporter, FLAGS.log_device_placement, FLAGS.max_steps, FLAGS.export_model_steps).run(start_new_model=FLAGS.start_new_model) elif task.type == "ps": ParameterServer(cluster, task).run() else: raise ValueError("%s: Invalid task_type: %s." % (task_as_string(task), task.type))
def get_tensorflow_version(): try: from tensorflow import __version__ as tf_version return tf_version except ImportError: pass
def _assert_tensorflow_version(): # Fail with a clear error in case we are not using a compatible TF version. major, minor, _ = tf.__version__.split('.') if int(major) != 1 or int(minor) < 4: raise RuntimeError( 'Tensorflow version >= 1.4, < 2 is required. Found (%s). Please ' 'install the latest 1.x version from ' 'https://github.com/tensorflow/tensorflow. ' % tf.__version__)
def __init__(self): print("tensorflow version: ", tf.__version__) tf.reset_default_graph() self.encoder_vec_file = "./preprocessing/enc.vec" self.decoder_vec_file = "./preprocessing/dec.vec" self.encoder_vocabulary = "./preprocessing/enc.vocab" self.decoder_vocabulary = "./preprocessing/dec.vocab" self.dictFile = './word_dict.txt' self.batch_size = 1 self.max_batches = 10000 self.show_epoch = 100 self.model_path = './model/' # jieba???? jieba.load_userdict(self.dictFile) self.model = dynamicSeq2seq(encoder_cell=LSTMCell(20), decoder_cell=LSTMCell(40), encoder_vocab_size=540, decoder_vocab_size=1600, embedding_size=20, attention=True, bidirectional=True, debug=False, time_major=True) self.location = ["??", "??", "??", "??","??"] self.user_info = {"__username__":"Stephen", "__location__":"??"} self.robot_info = {"__robotname__":"JiJi"} self.dec_vocab = {} self.enc_vocab = {} tag_location = '' with open(self.encoder_vocabulary, "r") as enc_vocab_file: for index, word in enumerate(enc_vocab_file.readlines()): self.enc_vocab[word.strip()] = index with open(self.decoder_vocabulary, "r") as dec_vocab_file: for index, word in enumerate(dec_vocab_file.readlines()): self.dec_vocab[index] = word.strip()