本文整理汇总了Python中tensorflow.GraphOptions方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.GraphOptions方法的具体用法?Python tensorflow.GraphOptions怎么用?Python tensorflow.GraphOptions使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.GraphOptions方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: native_op_vs_composed_ops
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GraphOptions [as 别名]
def native_op_vs_composed_ops(batch_size, num_classes, num_samples, num_iters):
np.random.seed(1618) # Make it reproducible.
shape = [batch_size, num_classes]
logits_np = np.random.randn(*shape).astype(np.float32)
# No CSE/CF.
optimizer_options = tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L0)
config = tf.ConfigProto(
graph_options=tf.GraphOptions(optimizer_options=optimizer_options))
with tf.Session(config=config) as sess:
logits = tf.constant(logits_np, shape=shape)
native_op = tf.group(native_sampler(logits, num_samples))
composed_op = tf.group(composed_sampler(logits, num_samples))
native_dt = timeit.timeit(lambda: sess.run(native_op), number=num_iters)
composed_dt = timeit.timeit(lambda: sess.run(composed_op), number=num_iters)
return native_dt, composed_dt
示例2: parameterized_vs_naive
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GraphOptions [as 别名]
def parameterized_vs_naive(shape, num_iters, use_gpu=False):
np.random.seed(1618) # Make it reproducible.
# No CSE/CF.
optimizer_options = tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L0)
config = tf.ConfigProto(
graph_options=tf.GraphOptions(optimizer_options=optimizer_options))
with tf.Session(config=config) as sess:
with tf.device("/cpu:0" if not use_gpu else None):
param_op = tf.group(random_ops.parameterized_truncated_normal(shape))
naive_op = tf.group(random_ops.truncated_normal(shape))
# Burn-in to avoid session setup costs in the timing.
sess.run(param_op)
sess.run(param_op)
param_dt = timeit.timeit(lambda: sess.run(param_op), number=num_iters)
sess.run(naive_op)
sess.run(naive_op)
naive_dt = timeit.timeit(lambda: sess.run(naive_op), number=num_iters)
return param_dt, naive_dt
示例3: testTanhSymGrad
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GraphOptions [as 别名]
def testTanhSymGrad(self):
@function.Defun(tf.float32)
def Forward(x):
return tf.reduce_sum(tf.tanh(x))
g = tf.Graph()
with g.as_default():
x = tf.placeholder(tf.float32)
y = Forward(x)
dx = tf.gradients([y], [x])
inp = np.array([-1, 1, 2, -2], dtype=np.float32)
feed = {x: inp}
cfg = tf.ConfigProto(graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L1, do_function_inlining=True)))
with tf.Session(graph=g, config=cfg) as sess:
out, = sess.run(dx, feed)
self.assertAllClose(1 - np.square(np.tanh(inp)), out)
示例4: _session_config
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GraphOptions [as 别名]
def _session_config(self):
"""Creates the session config with t2t default parameters."""
graph_options = tf.GraphOptions(optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L1, do_function_inlining=False))
if self._single_cpu_thread:
config = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1,
allow_soft_placement=True,
graph_options=graph_options,
log_device_placement=False)
else:
gpu_options = tf.GPUOptions(
per_process_gpu_memory_fraction=0.95)
config = tf.ConfigProto(
allow_soft_placement=True,
graph_options=graph_options,
gpu_options=gpu_options,
log_device_placement=False)
return config
示例5: create_session_config
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GraphOptions [as 别名]
def create_session_config(log_device_placement=False,
enable_graph_rewriter=False,
gpu_mem_fraction=0.95,
use_tpu=False,
inter_op_parallelism_threads=0,
intra_op_parallelism_threads=0):
"""The TensorFlow Session config to use."""
if use_tpu:
graph_options = tf.GraphOptions()
else:
if enable_graph_rewriter:
rewrite_options = rewriter_config_pb2.RewriterConfig()
rewrite_options.layout_optimizer = rewriter_config_pb2.RewriterConfig.ON
graph_options = tf.GraphOptions(rewrite_options=rewrite_options)
else:
graph_options = tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L1, do_function_inlining=False))
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_mem_fraction)
config = tf.ConfigProto(
allow_soft_placement=True,
graph_options=graph_options,
gpu_options=gpu_options,
log_device_placement=log_device_placement,
inter_op_parallelism_threads=inter_op_parallelism_threads,
intra_op_parallelism_threads=intra_op_parallelism_threads)
return config
示例6: _add_infer_shapes
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GraphOptions [as 别名]
def _add_infer_shapes(graph_def):
with tf.Graph().as_default():
with tf.Session(
config=tf.ConfigProto(
graph_options=tf.GraphOptions(infer_shapes=True))) as sess:
tf.import_graph_def(graph_def, name="")
return sess.graph_def
示例7: create_session_config
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GraphOptions [as 别名]
def create_session_config(log_device_placement=False,
enable_graph_rewriter=False,
gpu_mem_fraction=0.95,
use_tpu=False,
xla_jit_level=tf.OptimizerOptions.OFF,
inter_op_parallelism_threads=0,
intra_op_parallelism_threads=0):
"""The TensorFlow Session config to use."""
if use_tpu:
graph_options = tf.GraphOptions()
else:
if enable_graph_rewriter:
rewrite_options = rewriter_config_pb2.RewriterConfig()
rewrite_options.layout_optimizer = rewriter_config_pb2.RewriterConfig.ON
graph_options = tf.GraphOptions(rewrite_options=rewrite_options)
else:
graph_options = tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L1,
do_function_inlining=False,
global_jit_level=xla_jit_level))
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_mem_fraction)
config = tf.ConfigProto(
allow_soft_placement=True,
graph_options=graph_options,
gpu_options=gpu_options,
log_device_placement=log_device_placement,
inter_op_parallelism_threads=inter_op_parallelism_threads,
intra_op_parallelism_threads=intra_op_parallelism_threads,
isolate_session_state=True)
return config
示例8: session_config
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GraphOptions [as 别名]
def session_config(params):
optimizer_options = tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L1,
do_function_inlining=True)
graph_options = tf.GraphOptions(optimizer_options=optimizer_options)
config = tf.ConfigProto(allow_soft_placement=True,
graph_options=graph_options)
if distribute.is_distributed_training_mode():
config.gpu_options.visible_device_list = str(distribute.local_rank())
elif params.device_list:
device_str = ",".join([str(i) for i in params.device_list])
config.gpu_options.visible_device_list = device_str
return config
示例9: session_config
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GraphOptions [as 别名]
def session_config(params):
optimizer_options = tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L1,
do_function_inlining=False)
graph_options = tf.GraphOptions(optimizer_options=optimizer_options)
config = tf.ConfigProto(allow_soft_placement=True,
graph_options=graph_options,
intra_op_parallelism_threads=16,
inter_op_parallelism_threads=16)
if params.device_list:
device_str = ",".join([str(i) for i in params.device_list])
config.gpu_options.visible_device_list = device_str
return config
示例10: session_config
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GraphOptions [as 别名]
def session_config(params):
optimizer_options = tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L1,
do_function_inlining=False)
graph_options = tf.GraphOptions(optimizer_options=optimizer_options)
config = tf.ConfigProto(allow_soft_placement=True,
graph_options=graph_options)
if params.device_list:
device_str = ",".join([str(i) for i in params.device_list])
config.gpu_options.visible_device_list = device_str
return config
示例11: session_config
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GraphOptions [as 别名]
def session_config(params):
optimizer_options = tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L1,
do_function_inlining=True)
graph_options = tf.GraphOptions(optimizer_options=optimizer_options)
config = tf.ConfigProto(allow_soft_placement=True,
graph_options=graph_options)
if params.device_list:
device_str = ",".join([str(i) for i in params.device_list])
config.gpu_options.visible_device_list = device_str
return config
示例12: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GraphOptions [as 别名]
def __init__(self, iterations):
tf.logging.info("TrainLowLevelRunner: constructor")
self.feature_structure = {}
self.loss = None
self.infeed_queue = []
self.enqueue_ops = []
self.dataset_initializer = []
self.iterations = iterations
self.num_hosts = FLAGS.num_shards // FLAGS.num_shards_per_host
self.scaffold_fn = None
# Having two separate sessions and graphs to make the initialization faster.
self.input_sess = None
self.train_sess = None
self.input_graph = tf.Graph()
self.train_graph = None
self.tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
# Disable grappler for better performance.
self.session_config = tf.ConfigProto(
allow_soft_placement=True,
graph_options=tf.GraphOptions(
rewrite_options=rewriter_config_pb2.RewriterConfig(
disable_meta_optimizer=True)),
isolate_session_state=True)
cluster_spec = self.tpu_cluster_resolver.cluster_spec()
if cluster_spec:
self.session_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
self.tpu_init = [tpu.initialize_system()]
self.tpu_shutdown = tpu.shutdown_system()
self.init_sess = tf.Session(self.tpu_cluster_resolver.get_master(),
config=self.session_config)
self.init_sess.run(self.tpu_init)
self.queue = Queue.Queue()
示例13: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GraphOptions [as 别名]
def __init__(self, iterations, train_steps):
tf.logging.info("TrainRunner: constructor")
self.feature_structure = {}
self.loss = None
self.infeed_queue = []
self.enqueue_ops = []
self.dataset_initializer = []
self.iterations = iterations
self.sess = None
self.input_sess = None
self.infeed_thread = None
if train_steps % iterations != 0:
train_steps = iterations * int(math.ceil(train_steps / iterations))
self.train_steps = train_steps
self.input_graph = tf.Graph()
tpu_init = [tpu.initialize_system()]
self.tpu_shutdown = tpu.shutdown_system()
self.cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
self.config = tf.ConfigProto(operation_timeout_in_ms=600 * 60 * 1000,
graph_options=tf.GraphOptions(
rewrite_options=rewriter_config_pb2.RewriterConfig(
disable_meta_optimizer=True)),
isolate_session_state=True)
cluster_spec = self.cluster_resolver.cluster_spec()
if cluster_spec:
self.config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
self.init_sess = tf.Session(self.cluster_resolver.get_master(), config=self.config)
self.init_sess.run(tpu_init)
示例14: create_session_config
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GraphOptions [as 别名]
def create_session_config(log_device_placement=False,
enable_graph_rewriter=False,
gpu_mem_fraction=0.95,
use_tpu=False,
xla_jit_level=tf.OptimizerOptions.OFF,
inter_op_parallelism_threads=0,
intra_op_parallelism_threads=0):
"""The TensorFlow Session config to use."""
if use_tpu:
graph_options = tf.GraphOptions()
else:
if enable_graph_rewriter:
rewrite_options = rewriter_config_pb2.RewriterConfig()
rewrite_options.layout_optimizer = rewriter_config_pb2.RewriterConfig.ON
graph_options = tf.GraphOptions(rewrite_options=rewrite_options)
else:
graph_options = tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L1,
do_function_inlining=False,
global_jit_level=xla_jit_level))
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_mem_fraction)
config = tf.ConfigProto(
allow_soft_placement=True,
graph_options=graph_options,
gpu_options=gpu_options,
log_device_placement=log_device_placement,
inter_op_parallelism_threads=inter_op_parallelism_threads,
intra_op_parallelism_threads=intra_op_parallelism_threads)
return config
示例15: _OptimizerOptions
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GraphOptions [as 别名]
def _OptimizerOptions():
for cse in [False, True]:
for inline in [False, True]:
for cfold in [False, True]:
yield tf.ConfigProto(graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L0,
do_common_subexpression_elimination=cse,
do_function_inlining=inline,
do_constant_folding=cfold)))