我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.GPUOptions()。
def __init__(self, cluster, task, train_dir, log_device_placement=True): """"Creates a Trainer. Args: cluster: A tf.train.ClusterSpec if the execution is distributed. None otherwise. task: A TaskSpec describing the job type and the task index. """ self.cluster = cluster self.task = task self.is_master = (task.type == "master" and task.index == 0) self.train_dir = train_dir gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2) self.config = tf.ConfigProto(log_device_placement=log_device_placement,gpu_options=gpu_options) if self.is_master and self.task.index > 0: raise StandardError("%s: Only one replica of master expected", task_as_string(self.task))
def __init__(self, cluster, task, train_dir, log_device_placement=True): """"Creates a Trainer. Args: cluster: A tf.train.ClusterSpec if the execution is distributed. None otherwise. task: A TaskSpec describing the job type and the task index. """ self.cluster = cluster self.task = task self.is_master = (task.type == "master" and task.index == 0) self.train_dir = train_dir gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu) self.config = tf.ConfigProto(log_device_placement=log_device_placement) if self.is_master and self.task.index > 0: raise StandardError("%s: Only one replica of master expected", task_as_string(self.task))
def configure_gpu_settings(gpu_cfg=None): session_conf = None if gpu_cfg: with open(gpu_cfg) as f: cfg = json.load(f) gpu_options = tf.GPUOptions( per_process_gpu_memory_fraction=cfg['per_process_gpu_memory_fraction']) session_conf = tf.ConfigProto( allow_soft_placement=cfg['allow_soft_placement'], log_device_placement=cfg['log_device_placement'], inter_op_parallelism_threads=cfg['inter_op_parallelism_threads'], intra_op_parallelism_threads=cfg['intra_op_parallelism_threads'], gpu_options=gpu_options) # Timeline # jit_level = 0 # session_conf.graph_options.optimizer_options.global_jit_level = jit_level # sess = tf.Session( # config=session_conf) # else: # sess = tf.Session() return session_conf
def setUp(self): """Set up class before _each_ test method is executed. Creates a tensorflow session and instantiates a dbinterface. """ self.setup_model() self.sess = tf.Session( config=tf.ConfigProto( allow_soft_placement=True, gpu_options=tf.GPUOptions(allow_growth=True), log_device_placement=self.params['log_device_placement'], inter_op_parallelism_threads=self.params['inter_op_parallelism_threads'])) # TODO: Determine whether this should be called here or # in dbinterface.initialize() self.sess.run(tf.global_variables_initializer()) self.dbinterface = base.DBInterface(sess=self.sess, params=self.params, cache_dir=self.CACHE_DIR, save_params=self.save_params, load_params=self.load_params) self.step = 0
def predict(): # Only allocate part of the gpu memory when predicting. gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2) tf_config = tf.ConfigProto(gpu_options=gpu_options) with tf.Session(config=tf_config) as sess: predictor = EasyPredictor(sess) sys.stdout.write("> ") sys.stdout.flush() line = sys.stdin.readline() while line: replies = predictor.predict(line) for i, text in enumerate(replies): print(i, text) print("> ", end="") sys.stdout.flush() line = sys.stdin.readline()
def main(): # Graph with tf.device('/cpu:0'): a = tf.Variable(tf.truncated_normal(shape=[2]),dtype=tf.float32) b = tf.Variable(tf.truncated_normal(shape=[2]),dtype=tf.float32) c=a+b target = tf.constant(100.,shape=[2],dtype=tf.float32) loss = tf.reduce_mean(tf.square(c-target)) opt = tf.train.GradientDescentOptimizer(.0001).minimize(loss) # Session #sv = tf.train.Supervisor(logdir='/tmp/mydir') sv = tf.train.Supervisor(logdir='/tmp/mydir') gpu_options = tf.GPUOptions(allow_growth=True,allocator_type="BFC",visible_device_list="%d"%FLAGS.gpu_id) config = tf.ConfigProto(gpu_options=gpu_options,allow_soft_placement=False,device_count={'GPU':1},log_device_placement=True) sess = sv.prepare_or_wait_for_session(config=config) for i in range(1000): sess.run(opt) if i % 10 == 0: r = sess.run(c) print(r) time.sleep(.1)
def main(_): gpu_options = tf.GPUOptions( per_process_gpu_memory_fraction=calc_gpu_fraction(FLAGS.gpu_fraction)) with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess: config = get_config(FLAGS) or FLAGS if config.env_type == 'simple': env = SimpleGymEnvironment(config) else: env = GymEnvironment(config) if not tf.test.is_gpu_available() and FLAGS.use_gpu: raise Exception("use_gpu flag is true when no GPUs are available") if not FLAGS.use_gpu: config.cnn_format = 'NHWC' agent = Agent(config, env, sess) if FLAGS.is_train: agent.train() else: agent.play()
def get_session(): tf.reset_default_graph() tf_config = tf.ConfigProto( inter_op_parallelism_threads=1, intra_op_parallelism_threads=1) # This was the default provided in the starter code. #session = tf.Session(config=tf_config) # Use this if I want to see what is on the GPU. #session = tf.Session(config=tf.ConfigProto(log_device_placement=True)) # Use this for limiting memory allocated for the GPU. gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5) session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) print("AVAILABLE GPUS: ", get_available_gpus()) return session
def process(input_dir, output_dir, model_dir, resizing_size, gpu): gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.3, visible_device_list=gpu) with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)).as_default(): m = loader.LoadedModel(model_dir) os.makedirs(output_dir, exist_ok=True) input_filenames = glob(os.path.join(input_dir, '*.jpg')) + \ glob(os.path.join(input_dir, '*.png')) + \ glob(os.path.join(input_dir, '*.tif')) + \ glob(os.path.join(input_dir, '*.jp2')) for path in tqdm(input_filenames): img = Image.open(path).resize(resizing_size) mat = np.asarray(img) if len(mat.shape) == 2: mat = np.stack([mat, mat, mat], axis=2) predictions = m.predict(mat[None], prediction_key='labels')[0] plt.imsave(os.path.join(output_dir, os.path.relpath(path, input_dir)), predictions)
def main(args): gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = args.gpu_fraction) with tf.Session(config=tf.ConfigProto(gpu_options = gpu_options)) as sess: saver = tf.train.import_meta_graph('./meta_graph/my-model.meta') saver.restore(sess,tf.train.latest_checkpoint('./model')) image_batch = tf.get_collection('image_batch')[0] GT_trimap = tf.get_collection('GT_trimap')[0] pred_mattes = tf.get_collection('pred_mattes')[0] rgb = misc.imread(args.rgb) alpha = misc.imread(args.alpha,'L') trimap = generate_trimap(np.expand_dims(np.copy(alpha),2),np.expand_dims(alpha,2))[:,:,0] origin_shape = alpha.shape rgb = np.expand_dims(misc.imresize(rgb.astype(np.uint8),[320,320,3]).astype(np.float32)-g_mean,0) trimap = np.expand_dims(np.expand_dims(misc.imresize(trimap.astype(np.uint8),[320,320],interp = 'nearest').astype(np.float32),2),0) feed_dict = {image_batch:rgb,GT_trimap:trimap} pred_alpha = sess.run(pred_mattes,feed_dict = feed_dict) final_alpha = misc.imresize(np.squeeze(pred_alpha),origin_shape) # misc.imshow(final_alpha) misc.imsave('./alpha.png',final_alpha)
def main(argv): pprint.pprint(tf.app.flags.FLAGS.__flags) flags = tf.app.flags.FLAGS graph = tf.Graph() os.environ["CUDA_VISIBLE_DEVICES"]=str(flags.gpu) gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.22, allow_growth=True) with graph.as_default(): with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options),graph=graph) as session: all_tests = test_list.tests tasks = task_list.tasks if flags.train == 'optimizer': train_optimizer(tasks[flags.task]) elif flags.train == 'optimizee': train_optimizee(all_tests[flags.task]) elif flags.train == 'optimizer_train_optimizee': optimizer_train_optimizee(tasks[flags.task]) elif flags.train == 'test': test(tasks[flags.task])
def main(args): if args.meta_file == None or not os.path.exists(args.meta_file): print("Invalid tensorflow meta-graph file:", args.meta_file) return gpu_options = tf.GPUOptions(allow_growth=True) sess = tf.Session(config=tf.ConfigProto( gpu_options=gpu_options, log_device_placement=False, allow_soft_placement=True)) with sess.as_default(): # ---- load pretrained parameters ---- # saver = tf.train.import_meta_graph(args.meta_file, clear_devices=True) saver.restore(tf.get_default_session(), args.ckpt_file) pretrained = {} var_ = tf.get_collection(tf.GraphKeys.MODEL_VARIABLES) print("total:", len(var_)) for v in var_: print("process:", v.name) # [notice: the name of parameter is like 'Resnet/conv2d/bias:0', # here we should remove the prefix name, and get '/conv2d/bias:0'] v_name = v.name pretrained[v_name] = sess.run([v]) np.save(args.save_path, pretrained) print("done:", len(pretrained.keys()))
def predict(): tf_config = tf.ConfigProto(gpu_options = tf.GPUOptions(visible_device_list = "0")) with tf.Session(config=tf_config) as sess: predictor = EasyPredictor(sess) sys.stdout.write("> ") sys.stdout.flush() line = sys.stdin.readline() while line: replies = predictor.predict(line) #for i, text in enumerate(replies): #print(i, text) print(replies[0]) print("> ", end = "") sys.stdout.flush() line = sys.stdin.readline()
def main(_): # load training parameter # ini_file = '../outcome/model/ini/tr_param.ini' param_sets = load_train_ini(ini_file) param_set = param_sets[0] print '====== Phase >>> %s <<< ======' % param_set['phase'] if not os.path.exists(param_set['chkpoint_dir']): os.makedirs(param_set['chkpoint_dir']) if not os.path.exists(param_set['labeling_dir']): os.makedirs(param_set['labeling_dir']) # GPU setting, per_process_gpu_memory_fraction means 95% GPU MEM ,allow_growth means unfixed memory gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.95, allow_growth=True) with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)) as sess: model = unet_3D_xy(sess, param_set) if param_set['phase'] == 'train': model.train() elif param_set['phase'] == 'test': # model.test() model.test_generate_map() elif param_set['phase'] == 'crsv': model.test4crsv()
def __init__(self, env): self.env = env # if not isinstance(env.observation_space, Box) or \ # not isinstance(env.action_space, Discrete): # print("Incompatible spaces.") # exit(-1) print("Observation Space", env.observation_space) print("Action Space", env.action_space) print("Action area, high:%f, low%f" % (env.action_space.high, env.action_space.low)) gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1 / 3.0) self.session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) self.end_count = 0 self.paths = [] self.train = True self.baseline = Baseline() self.storage = Storage(self, self.env, self.baseline) self.distribution = DiagonalGaussian(pms.action_shape) self.net = None # def init_logger(self): # head = ["average_episode_std" , "sum steps episode number" "total number of episodes" , # "Average sum of rewards per episode" , # "KL between old and new distribution" , "Surrogate loss" , "Surrogate loss prev" , "ds" , "entropy" , # "mean_advant"] # self.logger = Logger(head)
def main(_): gpu_options = tf.GPUOptions( per_process_gpu_memory_fraction=calc_gpu_fraction(FLAGS.gpu_fraction)) with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess: config = get_config(FLAGS) or FLAGS env = UniverseEnvironment(config) if not tf.test.is_gpu_available() and FLAGS.use_gpu: raise Exception("use_gpu flag is true when no GPUs are available") if not FLAGS.use_gpu: config.cnn_format = 'NHWC' agent = Agent(config, env, sess) while True: try: if FLAGS.is_train: agent.train() else: agent.play() except universe.error.Error: print("Environment crashed, restarting.") agent.env= UniverseEnvironment(config)
def main(_): # set up TF environment os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpus gpus_list = FLAGS.gpus.split(',') # save prefix prefix = '%s/%s/%s/%d' % (FLAGS.working_root, FLAGS.dataset_name, FLAGS.model_name, FLAGS.try_num) if not os.path.exists(prefix): os.makedirs(prefix) # start model_params = {"num_classes": 10, "gpus_list": gpus_list} run_config = tf.estimator.RunConfig() run_config = run_config.replace( model_dir=prefix, log_step_count_steps=100, save_checkpoints_secs=600, session_config=tf.ConfigProto(allow_soft_placement=True, gpu_options=tf.GPUOptions(allow_growth=True))) nn = tf.estimator.Estimator( model_fn=model_fn, params=model_params, config=run_config) nn.train(input_fn=lambda: input_fn( len(gpus_list)), steps=None, max_steps=None)
def visualize(conf): conf['data_dir'] = '/'.join(str.split(conf['data_dir'], '/')[:-1] + ['test']) conf['visualize'] = conf['output_dir'] + '/' + FLAGS.visualize conf['event_log_dir'] = '/tmp' conf['batch_size'] = 1 conf['train_val_split'] =1 with tf.variable_scope('model', reuse=None) as training_scope: model = Model(conf) saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.VARIABLES), max_to_keep=0) gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9) sess = tf.InteractiveSession(config=tf.ConfigProto(gpu_options=gpu_options)) sess.run(tf.initialize_all_variables()) saver.restore(sess, conf['visualize']) # vis_different_goalpos(conf, model, sess) vis_different_ballpos(conf, model, sess)
def train(self): self.training = True util.log('Creating session and loading checkpoint') session = tf.train.MonitoredTrainingSession( checkpoint_dir=self.config.run_dir, save_summaries_steps=0, # Summaries will be saved with train_op only config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))) with session: if len(self.agents) == 1: self.train_agent(session, self.agents[0]) else: self.train_threaded(session) util.log('Training complete')
def classify(net, in_im, net_name, im_list, gt_labels): config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)) imgs = open(im_list).readlines() gt_labels = open(gt_labels).readlines() fool_rate = 0 top_1 = 0 with tf.Session(config=config) as sess: sess.run(tf.global_variables_initializer()) for i,name in enumerate(imgs): if net_name == 'caffenet': im = img_preprocess(name.strip(), size=227) else: im = img_preprocess(name.strip()) softmax_scores = sess.run(net['prob'], feed_dict={in_im: im}) if i!=0 and i%1000 == 0: print 'iter: {:5d}\ttop-1: {:04.2f}\tfooling-rate: {:04.2f}'.format(i, (top_1/float(i))*100, (fool_rate)/float(i)*100) if np.argmax(softmax_scores[0]) == int(gt_labels[i].strip()): top_1 += 1 if np.argmax(softmax_scores[0]) != np.argmax(softmax_scores[1]): fool_rate += 1 print 'Top-1 Accuracy = {:.2f}'.format(top_1/500.0) print 'Fooling Rate = {:.2f}'.format(fool_rate/500.0)
def setup(self, setup_options=None): super(DeepQ,self).setup(setup_options=setup_options) with self.G.as_default(): if setup_options is None: self.setup_config = tf.ConfigProto(gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=self.params['gpu_fraction'])) else: self.setup_config = tf.ConfigProto(**setup_options) self.setup_config.gpu_options.per_process_gpu_memory_fraction=self.params['gpu_fraction'] self.sess = tf.Session(config=self.setup_config) self.init = tf.global_variables_initializer() self.sess.run(self.init) self.sess.run(self.cp_ops) self.reset_game() self.step = 0 self.reset_statistics('all') self.train_cnt = self.sess.run(self.qnet.global_step)
def init_caffe_model(model_path): """Init caffe model for detect face. """ print('Creating networks and loading parameters') print('Load models path: ', model_path) start = time.time() # measure load caffe model with tf.Graph().as_default(): # TODO: GUI accelerate gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0) sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False)) with sess.as_default(): pnet, rnet, onet = detect_face.create_mtcnn(sess, model_path) global _pnet _pnet = pnet global _rnet _rnet = rnet global _onet _onet = onet print('time used: ', time.time()-start)
def setup_tensorflow(): # Create session gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu_fraction) config = tf.ConfigProto(log_device_placement=FLAGS.log_device_placement, gpu_options=gpu_options) sess = tf.Session(config=config) # Initialize rng with a deterministic seed with sess.graph.as_default(): tf.set_random_seed(FLAGS.random_seed) random.seed(FLAGS.random_seed) np.random.seed(FLAGS.random_seed) summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph) return sess, summary_writer
def get_session_config(): import tensorflow as tf session_args = { 'intra_op_parallelism_threads': CONFIG['nthread'], 'inter_op_parallelism_threads': CONFIG['ncpu'], 'allow_soft_placement': True, 'log_device_placement': CONFIG['debug'], } if CONFIG['ngpu'] > 0: if CONFIG['cnmem'] > 0: session_args['gpu_options'] = tf.GPUOptions( per_process_gpu_memory_fraction=CONFIG['cnmem'], allow_growth=False) else: session_args['gpu_options'] = tf.GPUOptions( allow_growth=True) return session_args
def __init__(self, image_size=24, num_classes=10, batch_size=50, channels=3): self._image_size = image_size self._num_classes = num_classes self._batch_size = batch_size self._channels = channels gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) self._session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) self._images = tf.placeholder(tf.float32, shape=[None, self._image_size, self._image_size, self._channels]) self._labels = tf.placeholder(tf.int64, shape=[None]) self._keep_prob = tf.placeholder(tf.float32) self._global_step = tf.Variable(0, tf.int64, name="global_step") self._logits = self._inference(self._images, self._keep_prob) self._avg_loss = self._loss(self._labels, self._logits) self._train_op = self._train(self._avg_loss) self._accuracy = F.accuracy_score(self._labels, self._logits) self._saver = tf.train.Saver(tf.all_variables()) self._session.run(tf.initialize_all_variables())
def set_gpu_fraction(sess=None, gpu_fraction=0.3): """Set the GPU memory fraction for the application. Parameters ---------- sess : a session instance of TensorFlow TensorFlow session gpu_fraction : a float Fraction of GPU memory, (0 ~ 1] References ---------- - `TensorFlow using GPU <https://www.tensorflow.org/versions/r0.9/how_tos/using_gpu/index.html>`_ """ print(" tensorlayer: GPU MEM Fraction %f" % gpu_fraction) gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction) sess = tf.Session(config = tf.ConfigProto(gpu_options = gpu_options)) return sess
def debug(self, processor): # ???? train_class_labels, train_object_masks, train_nobject_masks, \ train_box_labels, train_box_masks = self.process_labels_cpu(processor.train_labels) # ???? gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.25) self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) self.sess.run(tf.global_variables_initializer()) # ?? [temp] = self.sess.run( fetches=[self.observe], feed_dict={self.images: numpy.random.random(size=[128, 384, 384, 3]), self.labels: numpy.random.randint(low=0, high=1, size=[128, 20, 5]), self.keep_prob: 1.0}) print(temp.shape) self.sess.close()
def set_gpu_fraction(sess=None, gpu_fraction=0.3): """Set the GPU memory fraction for the application. Parameters ---------- sess : a session instance of TensorFlow TensorFlow session gpu_fraction : a float Fraction of GPU memory, (0 ~ 1] References ---------- - `TensorFlow using GPU <https://www.tensorflow.org/versions/r0.9/how_tos/using_gpu/index.html>`_ """ print("[TL]: GPU MEM Fraction %f" % gpu_fraction) gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction) sess = tf.Session(config = tf.ConfigProto(gpu_options = gpu_options)) return sess
def build_session(self): self.saver = tf.train.Saver() self.summary_writer = tf.summary.FileWriter(self.model_dir) sv = tf.train.Supervisor(logdir=self.model_dir, is_chief=True, saver=self.saver, summary_op=None, summary_writer=self.summary_writer, save_summaries_secs=300, save_model_secs=self.checkpoint_secs, global_step=self.model.global_step) gpu_options = tf.GPUOptions( per_process_gpu_memory_fraction=self.gpu_memory_fraction, allow_growth=True) # seems to be not working sess_config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options) self.sess = sv.prepare_or_wait_for_session(config=sess_config)
def _session_config(self): """Creates the session config with t2t default parameters.""" graph_options = tf.GraphOptions(optimizer_options=tf.OptimizerOptions( opt_level=tf.OptimizerOptions.L1, do_function_inlining=False)) if self._single_cpu_thread: config = tf.ConfigProto( intra_op_parallelism_threads=1, inter_op_parallelism_threads=1, allow_soft_placement=True, graph_options=graph_options, log_device_placement=False) else: gpu_options = tf.GPUOptions( per_process_gpu_memory_fraction=0.95) config = tf.ConfigProto( allow_soft_placement=True, graph_options=graph_options, gpu_options=gpu_options, log_device_placement=False) return config
def __init__(self): model_name = 'yolov2-coco' model_dir = './models' gpu_id = 4 self.gpu_utility = 0.9 self.pb_file = '{}/{}.pb'.format(model_dir, model_name) self.meta_file = '{}/{}.meta'.format(model_dir, model_name) self.batch = 4 self.graph = tf.Graph() with tf.device('/gpu:1'): with self.graph.as_default() as g: self.build_from_pb() gpu_options = tf.GPUOptions(allow_growth=True) sess_config = tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False) self.sess = tf.Session(config = sess_config) self.sess.run(tf.global_variables_initializer()) return
def setup_meta_ops(self): cfg = dict({ 'allow_soft_placement': False, 'log_device_placement': False }) utility = min(self.gpu_utility, 1.0) if utility > 0.0: print('GPU model with {} usage'.format(utility)) cfg['gpu_options'] = tf.GPUOptions(per_process_gpu_memory_fraction = utility) cfg['allow_soft_placement'] = True else: print('Run totally on CPU') cfg['device_count'] = {'GPU': 0} self.sess = tf.Session(config = tf.ConfigProto(**cfg)) self.sess.run(tf.global_variables_initializer())
def test(self, dataloader, backup_path, epoch, batch_size=128): gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.25) self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) # ???? self.saver = tf.train.Saver(write_version=tf.train.SaverDef.V2) model_path = os.path.join(backup_path, 'model_%d.ckpt' % (epoch)) assert(os.path.exists(model_path+'.index')) self.saver.restore(self.sess, model_path) print('read model from %s' % (model_path)) # ?????????? accuracy_list = [] test_images = dataloader.data_augmentation(dataloader.test_images, flip=False, crop=True, crop_shape=(24,24,3), whiten=True, noise=False) test_labels = dataloader.test_labels for i in range(0, dataloader.n_test, batch_size): batch_images = test_images[i: i+batch_size] batch_labels = test_labels[i: i+batch_size] [avg_accuracy] = self.sess.run( fetches=[self.accuracy], feed_dict={self.images:batch_images, self.labels:batch_labels, self.keep_prob:1.0}) accuracy_list.append(avg_accuracy) print('test precision: %.4f' % (numpy.mean(accuracy_list))) self.sess.close()
def test(self, backup_path, epoch, batch_size=128): saver = tf.train.Saver(write_version=tf.train.SaverDef.V2) gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.45) sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) # ???? model_path = os.path.join(backup_path, 'model_%d.ckpt' % (epoch)) assert(os.path.exists(model_path+'.index')) saver.restore(sess, model_path) print('read model from %s' % (model_path)) # ?????????? precision = [] for batch in range(int(cifar10.test.num_examples / batch_size)): batch_image, batch_label = cifar10.test.next_batch(batch_size) [precision_onebatch] = sess.run( fetches=[self.accuracy], feed_dict={self.image:batch_image, self.label:batch_label, self.keep_prob:1.0}) precision.append(precision_onebatch) print('test precision: %.4f' % (numpy.mean(precision)))
def main(_): attrs = conf.__dict__['__flags'] pp(attrs) dataset, img_feature, train_data = get_data(conf.input_json, conf.input_img_h5, conf.input_ques_h5, conf.img_norm) gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=calc_gpu_fraction(conf.gpu_fraction)) with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess: model = question_generator.Question_Generator(sess, conf, dataset, img_feature, train_data) if conf.is_train: model.build_model() model.train() else: model.build_generator() model.test(test_image_path=conf.test_image_path, model_path=conf.test_model_path, maxlen=26)
def main(): if args.logdir is None: raise ValueError('Please specify the logdir file') ckpt = get_checkpoint(args.logdir) if ckpt is None: raise ValueError('No checkpoints in {}'.format(args.logdir)) with open(os.path.join(args.logdir, 'architecture.json')) as f: arch = json.load(f) reader = VCC2016TFRManager() features = reader.read_whole(args.file_pattern, num_epochs=1) x = features['frame'] y = features['label'] filename = features['filename'] y_conv = y * 0 + args.target_id net = MLPcVAE(arch=arch, is_training=False) z = net.encode(x) xh = net.decode(z, y) x_conv = net.decode(z, y_conv) pre_train_saver = tf.train.Saver() def load_pretrain(sess): pre_train_saver.restore(sess, ckpt) sv = tf.train.Supervisor(init_fn=load_pretrain) gpu_options = tf.GPUOptions(allow_growth=True) sess_config = tf.ConfigProto( allow_soft_placement=True, gpu_options=gpu_options) with sv.managed_session(config=sess_config) as sess: for _ in range(reader.n_files): if sv.should_stop(): break fetch_dict = {'x': x, 'xh': xh, 'x_conv': x_conv, 'f': filename} results = sess.run(fetch_dict) plot_spectra(results)
def train(self, nIter, machine=None, summary_op=None): # Xh = self._validate(machine=machine, n=10) run_metadata = tf.RunMetadata() sv = tf.train.Supervisor( logdir=self.dirs['logdir'], # summary_writer=summary_writer, # summary_op=None, # is_chief=True, save_model_secs=300, global_step=self.opt['global_step']) # sess_config = configure_gpu_settings(args.gpu_cfg) sess_config = tf.ConfigProto( allow_soft_placement=True, gpu_options=tf.GPUOptions(allow_growth=True)) with sv.managed_session(config=sess_config) as sess: sv.loop(60, self._refresh_status, (sess,)) for step in range(self.arch['training']['max_iter']): if sv.should_stop(): break # main loop sess.run(self.opt['g']) # # output img # if step % 1000 == 0: # xh = sess.run(Xh) # with tf.gfile.GFile( # os.path.join( # self.dirs['logdir'], # 'img-anime-{:03d}k.png'.format(step // 1000), # ), # mode='wb', # ) as fp: # fp.write(xh)
def serialize_cifar_pool3(X,filename): print 'About to generate file: %s' % filename gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1) sess = tf.InteractiveSession(config=tf.ConfigProto(gpu_options=gpu_options)) X_pool3 = batch_pool3_features(sess,X) np.save(filename,X_pool3)
def restrict_gpu_memory(per_process_gpu_memory_fraction: float = 0.9): import os import tensorflow as tf import keras thread_count = os.environ.get('OMP_NUM_THREADS') gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=per_process_gpu_memory_fraction) config = tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True, intra_op_parallelism_threads=thread_count) \ if thread_count else tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True) keras.backend.tensorflow_backend.set_session(tf.Session(config=config))
def main(_): # preprocess conf.observation_dims = eval(conf.observation_dims) # start gpu_options = tf.GPUOptions( per_process_gpu_memory_fraction=calc_gpu_fraction(conf.gpu_fraction)) dataset = data_loader(conf.source_path, conf.target_path) with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess: env = Curve() pred_network = CNN(sess=sess, observation_dims=conf.observation_dims, name='pred_network', trainable=True) policy = Policy(sess=sess, pred_network=pred_network, env=env, dataset=dataset, conf=conf) if conf.is_train: policy.train() else: policy.test(conf.test_image_path)
def twitter_bot(): # Only allocate part of the gpu memory when predicting. gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2) tf_config = tf.ConfigProto(gpu_options=gpu_options) consumer_key = os.getenv("consumer_key") consumer_secret = os.getenv("consumer_secret") access_token = os.getenv("access_token") access_token_secret = os.getenv("access_token_secret") auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) with tf.Session(config=tf_config) as sess: predictor = predict.EasyPredictor(sess) for tweet in tweets(): status_id, status, bot_flag = tweet print("Processing {0}...".format(status.text)) screen_name = status.author.screen_name replies = predictor.predict(status.text) if not replies: print("no reply") continue reply_body = replies[0] if reply_body is None: print("No reply predicted") else: try: post_reply(api, bot_flag, reply_body, screen_name, status_id) except tweepy.TweepError as e: # duplicate status if e.api_code == 187: pass else: raise mark_tweet_processed(status_id)
def evaluate(model, dataset, params): with tf.Session(config=tf.ConfigProto( inter_op_parallelism_threads=params.num_cores, intra_op_parallelism_threads=params.num_cores, gpu_options=tf.GPUOptions(allow_growth=True) )) as session: tf.local_variables_initializer().run() tf.global_variables_initializer().run() saver = tf.train.Saver(tf.global_variables()) ckpt = tf.train.get_checkpoint_state(params.model) saver.restore(session, ckpt.model_checkpoint_path) evaluate_retrieval(model, dataset, params, session) evaluate_loss(model, dataset, params, session)