本文整理匯總了Python中tensorflow.compat.v1.GPUOptions方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.GPUOptions方法的具體用法?Python v1.GPUOptions怎麽用?Python v1.GPUOptions使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.GPUOptions方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: set_gpu_fraction
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import GPUOptions [as 別名]
def set_gpu_fraction(sess=None, gpu_fraction=0.3):
"""Set the GPU memory fraction for the application.
Parameters
----------
sess : a session instance of TensorFlow
TensorFlow session
gpu_fraction : a float
Fraction of GPU memory, (0 ~ 1]
References
----------
- `TensorFlow using GPU <https://www.tensorflow.org/versions/r0.9/how_tos/using_gpu/index.html>`_
"""
print(" tensorlayer: GPU MEM Fraction %f" % gpu_fraction)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
sess = tf.Session(config = tf.ConfigProto(gpu_options = gpu_options))
return sess
示例2: create_session_config
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import GPUOptions [as 別名]
def create_session_config(log_device_placement=False,
enable_graph_rewriter=False,
gpu_mem_fraction=0.95,
use_tpu=False,
xla_jit_level=tf.OptimizerOptions.OFF,
inter_op_parallelism_threads=0,
intra_op_parallelism_threads=0):
"""The TensorFlow Session config to use."""
if use_tpu:
graph_options = tf.GraphOptions()
else:
if enable_graph_rewriter:
rewrite_options = rewriter_config_pb2.RewriterConfig()
rewrite_options.layout_optimizer = rewriter_config_pb2.RewriterConfig.ON
graph_options = tf.GraphOptions(rewrite_options=rewrite_options)
else:
graph_options = tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L1,
do_function_inlining=False,
global_jit_level=xla_jit_level))
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_mem_fraction)
config = tf.ConfigProto(
allow_soft_placement=True,
graph_options=graph_options,
gpu_options=gpu_options,
log_device_placement=log_device_placement,
inter_op_parallelism_threads=inter_op_parallelism_threads,
intra_op_parallelism_threads=intra_op_parallelism_threads,
isolate_session_state=True)
return config
示例3: build_config
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import GPUOptions [as 別名]
def build_config(limit_gpu_fraction=0.2, limit_cpu_fraction=10):
if limit_gpu_fraction > 0:
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
gpu_options = GPUOptions(
allow_growth=True,
per_process_gpu_memory_fraction=limit_gpu_fraction)
config = ConfigProto(gpu_options=gpu_options)
else:
os.environ["CUDA_VISIBLE_DEVICES"] = ""
config = ConfigProto(device_count={'GPU': 0})
if limit_cpu_fraction is not None:
if limit_cpu_fraction == 0:
cpu_count = 1
if limit_cpu_fraction < 0:
# -2 gives all CPUs except 1
cpu_count = max(
1, int(os.cpu_count() + limit_cpu_fraction + 1))
elif limit_cpu_fraction < 1:
# 0.5 gives 50% of available CPUs
cpu_count = max(
1, int(os.cpu_count() * limit_cpu_fraction))
else:
# 2 gives 2 CPUs
cpu_count = int(limit_cpu_fraction)
config.inter_op_parallelism_threads = cpu_count
config.intra_op_parallelism_threads = cpu_count
os.environ['OMP_NUM_THREADS'] = str(1)
os.environ['MKL_NUM_THREADS'] = str(cpu_count)
return config
示例4: get_tf_config
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import GPUOptions [as 別名]
def get_tf_config(gpus=None, gpu_fraction=1, horovod=None,
allow_parallel_threads=True):
intra_op_parallelism_threads = 0 # tf determines automatically
inter_op_parallelism_threads = 0 # tf determines automatically
if not allow_parallel_threads:
# this is needed for reproducibility
intra_op_parallelism_threads = 1
inter_op_parallelism_threads = 1
if gpus is not None:
if gpu_fraction > 0 and gpu_fraction < 1:
# this is the source of freezing in tensorflow 1.3.1
gpu_options = GPUOptions(
per_process_gpu_memory_fraction=gpu_fraction,
allow_growth=True)
else:
gpu_options = GPUOptions(allow_growth=True)
# allow_growth=True is needed for a weird behavior with CUDA 10
# https://github.com/tensorflow/tensorflow/issues/24828
if isinstance(gpus, int):
gpus = [gpus]
gpu_options.visible_device_list = ','.join(str(g) for g in gpus)
tf_config = ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
intra_op_parallelism_threads=intra_op_parallelism_threads,
inter_op_parallelism_threads=inter_op_parallelism_threads,
gpu_options=gpu_options
)
else:
tf_config = ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
intra_op_parallelism_threads=intra_op_parallelism_threads,
inter_op_parallelism_threads=inter_op_parallelism_threads,
gpu_options=GPUOptions(allow_growth=True)
)
if horovod is not None:
tf_config.gpu_options.visible_device_list = str(horovod.local_rank())
return tf_config