我们从Python开源项目中,提取了以下37个代码示例,用于说明如何使用tensorflow.python.client.device_lib.list_local_devices()。
def sg_gpus(): r""" Gets current available GPU nums Returns: A integer : total # of GPUs available """ global _gpus if _gpus is None: local_device_protos = device_lib.list_local_devices() _gpus = len([x.name for x in local_device_protos if x.device_type == 'GPU']) return max(_gpus, 1) # # context helpers #
def has_gpu() -> bool: """Check if TensorFlow can access GPU. The test is based on https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/platform/test.py ...but we are interested only in CUDA GPU devices. Returns: True, if TF can access the GPU """ # pylint: disable=global-statement global __HAS_GPU_RESULT # pylint: enable=global-statement if __HAS_GPU_RESULT is None: __HAS_GPU_RESULT = any((x.device_type == 'GPU') for x in _device_lib.list_local_devices()) return __HAS_GPU_RESULT
def get_available_gpus(): local_device_protos = device_lib.list_local_devices() values = "" counter = 0 length = len(local_device_protos) for device in local_device_protos: if device.device_type == "GPU": description = "Found " + device.physical_device_desc values += description if counter < length - 1: values += "\n" counter += 1 # return [x.name for x in local_device_protos if x.device_type == 'GPU'] return values # this is taken from the C++ header file # const int INFO = 0; // base_logging::INFO; # const int WARNING = 1; // base_logging::WARNING; # const int ERROR = 2; // base_logging::ERROR; # const int FATAL = 3; // base_logging::FATAL; # const int NUM_SEVERITIES = 4; // base_logging::NUM_SEVERITIES;
def is_gpu_available(cuda_only=True): """ code from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/platform/test.py Returns whether TensorFlow can access a GPU. Args: cuda_only: limit the search to CUDA gpus. Returns: True iff a gpu device of the requested kind is available. """ from tensorflow.python.client import device_lib as _device_lib if cuda_only: return any((x.device_type == 'GPU') for x in _device_lib.list_local_devices()) else: return any((x.device_type == 'GPU' or x.device_type == 'SYCL') for x in _device_lib.list_local_devices())
def get_available_gpus(): r""" Returns the number of GPUs available on this system. """ local_device_protos = device_lib.list_local_devices() return [x.name for x in local_device_protos if x.device_type == 'GPU']
def get_available_gpus(): from tensorflow.python.client import device_lib local_device_protos = device_lib.list_local_devices() return [x.name for x in local_device_protos if x.device_type=='GPU']
def get_available_gpus(): from tensorflow.python.client import device_lib local_device_protos = device_lib.list_local_devices() return [x.physical_device_desc for x in local_device_protos if x.device_type == 'GPU']
def get_available_gpus(): local_device_protos = device_lib.list_local_devices() return [x.name for x in local_device_protos if x.device_type == 'GPU']
def get_available_gpus(): """ The function does what its name says. Simple as that. """ local_device_protos = device_lib.list_local_devices() return len([x.name for x in local_device_protos if x.device_type == 'GPU'])
def get_available_gpus(): try: from tensorflow.python.client import device_lib local_device_protos = device_lib.list_local_devices() return [x.name for x in local_device_protos if x.device_type == 'GPU'] except Exception as ex: logging.error( 'Error while trying to list available GPUs: %s' % str(ex)) return list()
def get_tf_session(): """ Returning a session. Set options here (e.g. for GPUs) if desired. """ tf.reset_default_graph() tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1) gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5) session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) def get_available_gpus(): from tensorflow.python.client import device_lib local_device_protos = device_lib.list_local_devices() return [x.physical_device_desc for x in local_device_protos if x.device_type == 'GPU'] print("AVAILABLE GPUS: ", get_available_gpus()) return session
def get_tf_session(): """ Returning a session. Set options here if desired. """ tf.reset_default_graph() tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1) gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5) session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) def get_available_gpus(): from tensorflow.python.client import device_lib local_device_protos = device_lib.list_local_devices() return [x.physical_device_desc for x in local_device_protos if x.device_type == 'GPU'] print("AVAILABLE GPUS: ", get_available_gpus()) return session
def get_available_gpus(): """ ??GPU????nvidia-smi ?????????ps aux | grep PID :return: GPU?? """ local_device_protos = device_lib.list_local_devices() print "all: %s" % [x.name for x in local_device_protos] print "gpu: %s" % [x.name for x in local_device_protos if x.device_type == 'GPU']
def get_available_gpus(): from tensorflow.python.client import device_lib local_device_protos = device_lib.list_local_devices() return [x.name for x in local_device_protos if x.device_type == 'GPU']
def get_available_devices(gpu_only=False): from tensorflow.python.client import device_lib local_device_protos = device_lib.list_local_devices() if gpu_only: return [x.name for x in local_device_protos if x.device_type == 'GPU'] else: return [x.name for x in local_device_protos]
def get_available_gpus(ngpus=-1): ''' :param int ngpus: GPUs max to use. Default -1 means all gpus. :returns: List of gpu devices. Ex.: ['/gpu:0', '/gpu:1', ...] ''' local_device_protos = device_lib.list_local_devices() gpus_list = [x.name for x in local_device_protos if x.device_type == 'GPU'] return gpus_list[:ngpus] if ngpus > -1 else gpus_list
def __init__(self): self.devices = device_lib.list_local_devices() self.cpus = [x.name for x in self.devices if x.device_type == 'CPU'] self.gpus = [x.name for x in self.devices if x.device_type == 'GPU'] self.iterate_cpus = it.cycle(self.cpus) self.iterate_gpus = it.cycle(self.gpus)
def get_num_gpu(): """Get number of available GPUs Returns: a `int`, available GPUs in CUDA_VISIBLE_DEVICES, or in the system. """ env = os.environ.get('CUDA_VISIBLE_DEVICES', None) if env is not None: return len(env.split(',')) from tensorflow.python.client import device_lib device_protos = device_lib.list_local_devices() gpus = [x.name for x in device_protos if x.device_type == 'GPU'] return len(gpus)
def _get_available_gpus(self): """Get available GPUs.""" from tensorflow.python.client import device_lib local_device_protos = device_lib.list_local_devices() return [int(x.name[-1]) for x in local_device_protos if x.device_type == 'GPU']
def tf_is_gpu(): local_devices = device_lib.list_local_devices() return len([x for x in local_devices if x.device_type == 'GPU']) > 0
def count_gpus(): from tensorflow.python.client import device_lib count = 0 for device in device_lib.list_local_devices(): if device.device_type == "GPU": count+=1 return count
def get_available_gpus(): """ Get the number of available gpus """ local_device_protos = device_lib.list_local_devices() return [x.name for x in local_device_protos if x.device_type == 'GPU']
def haveGpu0(self): device_names = [d.name for d in device_lib.list_local_devices()] return "/gpu:0" in device_names
def get_available_gpus(num_gpus=None): """ Modified on http://stackoverflow.com/questions/38559755/how-to-get-current-available-gpus-in-tensorflow However, the original code will occupy all available gpu memory. The modified code need a parameter: num_gpus. It does nothing but return the device handler name It will work well on single-maching-training, but I don't know whether it will work well on a cluster. """ if num_gpus == None: from tensorflow.python.client import device_lib as _device_lib local_device_protos = _device_lib.list_local_devices() return [x.name for x in local_device_protos if x.device_type == 'GPU'] else: return ['/gpu:%d' % (idx) for idx in xrange(num_gpus)]
def testGradientColocation(self): """Tests a particular device (e.g. gpu, cpu) placement. This test ensures that the following device placement is possible: * The Linear module is on the gpu, * the optimizer is declared to be on the cpu, * but when calling minimize on the optimizer, we pass True to colocate_gradients_with_ops. The test exists because while one may expect tf.matmul(X, w) + b to be equivalent to tf.nn.xw_plus_b(X, w, b), with the latter this placement results in an InvalidArgumentError. Warning: if there is no gpu available to tensorflow this test will be skipped with just a warning! This is because the test requires that tensorflow has access to a gpu, but often this is not the case. """ if not any(x.device_type == "GPU" for x in device_lib.list_local_devices()): tf.logging.warn("Skipping the gradient colocation test as there is no " "gpu available to tensorflow.") return n_outputs = 5 n_inputs = 3 batch_size = 7 linear = snt.Linear(n_outputs) with tf.device("/cpu:*"): # Set up data. inputs = tf.placeholder(tf.float32, [batch_size, n_inputs]) labels = tf.to_int64(np.ones((batch_size))) # Predictions. with tf.device("/gpu:*"): outputs = linear(inputs) # Calculate the loss. cross_entropy = tf.contrib.nn.deprecated_flipped_sparse_softmax_cross_entropy_with_logits( # pylint: disable=line-too-long outputs, labels, name="xentropy") loss = tf.reduce_mean(cross_entropy, name="xentropy_mean") # Optimizer. optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1) optimizer.minimize(loss, colocate_gradients_with_ops=True) init = tf.global_variables_initializer() try: with self.test_session(force_gpu=True) as sess: sess.run(init) except tf.errors.InvalidArgumentError as e: self.fail("Cannot start the session. Details:\n" + e.message)
def _query_gpu_info(): """ This function query GPU information: ngpu [device_name, device_compute_capability, device_total_memory] Note ---- this function use deviceQuery command, so you better have it in your path """ dev = {'ngpu': 1, # deviceName: [cardName, computeCapability, mem(MB)] 'dev0': ['Unknown', 3.0, 1024]} temp_dir = tempfile.mkdtemp() p = os.path.join(temp_dir, 'tmp.txt') queried = subprocess.call('deviceQuery > ' + p, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0 dev = {} if queried: # found deviceQuery info = open(p, 'r').read() devNames = re.compile(r'Device \d: ".*"').findall(info) devNames = [i.strip().split(':')[-1].replace('"', '') for i in devNames] ngpu = len(devNames) comCap = re.compile( r'CUDA Capability Major\/Minor version number:\s*.*').findall(info) comCap = [float(i.strip().split(':')[-1]) for i in comCap] totalMems = re.compile( r'Total amount of global memory:\s*\d*').findall(info) totalMems = [int(i.strip().split(':')[-1]) for i in totalMems] # ====== create dev ====== # dev['ngpu'] = ngpu for i, (name, com, mem) in enumerate(zip(devNames, comCap, totalMems)): dev['dev%d' % i] = [name, com, mem] else: _warning('Cannot use "deviceQuery" to get GPU information for configuration.') from tensorflow.python.client import device_lib local_device_protos = device_lib.list_local_devices() dev['ngpu'] = 0 for i, name in (x.name for x in local_device_protos if x.device_type == 'GPU'): dev['dev%d' % i] = [name, None, None] dev['ngpu'] += 1 # remove temp-dir shutil.rmtree(temp_dir) return dev