本文整理汇总了Python中tensorflow.core.protobuf.rewriter_config_pb2.RewriterConfig方法的典型用法代码示例。如果您正苦于以下问题:Python rewriter_config_pb2.RewriterConfig方法的具体用法?Python rewriter_config_pb2.RewriterConfig怎么用?Python rewriter_config_pb2.RewriterConfig使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.core.protobuf.rewriter_config_pb2
的用法示例。
在下文中一共展示了rewriter_config_pb2.RewriterConfig方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_session_config
# 需要导入模块: from tensorflow.core.protobuf import rewriter_config_pb2 [as 别名]
# 或者: from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig [as 别名]
def create_session_config(log_device_placement=False,
enable_graph_rewriter=False,
gpu_mem_fraction=0.95,
use_tpu=False,
inter_op_parallelism_threads=0,
intra_op_parallelism_threads=0):
"""The TensorFlow Session config to use."""
if use_tpu:
graph_options = tf.GraphOptions()
else:
if enable_graph_rewriter:
rewrite_options = rewriter_config_pb2.RewriterConfig()
rewrite_options.layout_optimizer = rewriter_config_pb2.RewriterConfig.ON
graph_options = tf.GraphOptions(rewrite_options=rewrite_options)
else:
graph_options = tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L1, do_function_inlining=False))
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_mem_fraction)
config = tf.ConfigProto(
allow_soft_placement=True,
graph_options=graph_options,
gpu_options=gpu_options,
log_device_placement=log_device_placement,
inter_op_parallelism_threads=inter_op_parallelism_threads,
intra_op_parallelism_threads=intra_op_parallelism_threads)
return config
示例2: print_info
# 需要导入模块: from tensorflow.core.protobuf import rewriter_config_pb2 [as 别名]
# 或者: from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig [as 别名]
def print_info(self):
"""Print basic information."""
benchmark_info = self._get_params_info()
log_fn('Model: %s' % self.model.get_model_name())
log_fn('Dataset: %s' % benchmark_info['dataset_name'])
log_fn('Mode: %s' % self.mode)
log_fn('SingleSess: %s' % benchmark_info['single_session'])
log_fn('Batch size: %s global' % (self.batch_size * self.num_workers))
log_fn(' %s per device' % (self.batch_size //
len(self.raw_devices)))
if self.batch_group_size > 1:
log_fn(' %d batches per prepocessing group' %
self.batch_group_size)
log_fn('Num batches: %d' % self.num_batches)
log_fn('Num epochs: %.2f' % self.num_epochs)
log_fn('Devices: %s' % benchmark_info['device_list'])
log_fn('NUMA bind: %s' % self.params.use_numa_affinity)
log_fn('Data format: %s' % self.params.data_format)
if self.rewriter_config:
log_fn('RewriterConfig: %s' % self.rewriter_config)
log_fn('Optimizer: %s' % self.params.optimizer)
log_fn('Variables: %s' % self.params.variable_update)
if (self.params.variable_update == 'replicated' or
self.params.variable_update == 'distributed_all_reduce'
or self.params.variable_update == 'collective_all_reduce'):
log_fn('AllReduce: %s' % self.params.all_reduce_spec)
if self.job_name:
log_fn('Sync: %s' % self.params.cross_replica_sync)
if self.params.staged_vars:
log_fn('Staged vars: %s' % self.params.staged_vars)
if self.params.variable_update == 'horovod' and self.params.horovod_device:
log_fn('Horovod on: %s' % self.params.horovod_device)
log_fn('==========')
示例3: create_session_config
# 需要导入模块: from tensorflow.core.protobuf import rewriter_config_pb2 [as 别名]
# 或者: from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig [as 别名]
def create_session_config(log_device_placement=False,
enable_graph_rewriter=False,
gpu_mem_fraction=0.95,
use_tpu=False,
xla_jit_level=tf.OptimizerOptions.OFF,
inter_op_parallelism_threads=0,
intra_op_parallelism_threads=0):
"""The TensorFlow Session config to use."""
if use_tpu:
graph_options = tf.GraphOptions()
else:
if enable_graph_rewriter:
rewrite_options = rewriter_config_pb2.RewriterConfig()
rewrite_options.layout_optimizer = rewriter_config_pb2.RewriterConfig.ON
graph_options = tf.GraphOptions(rewrite_options=rewrite_options)
else:
graph_options = tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L1,
do_function_inlining=False,
global_jit_level=xla_jit_level))
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_mem_fraction)
config = tf.ConfigProto(
allow_soft_placement=True,
graph_options=graph_options,
gpu_options=gpu_options,
log_device_placement=log_device_placement,
inter_op_parallelism_threads=inter_op_parallelism_threads,
intra_op_parallelism_threads=intra_op_parallelism_threads,
isolate_session_state=True)
return config
示例4: auto_parallel
# 需要导入模块: from tensorflow.core.protobuf import rewriter_config_pb2 [as 别名]
# 或者: from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig [as 别名]
def auto_parallel(metagraph, model):
from tensorflow.python.grappler import tf_optimizer
rewriter_config = rewriter_config_pb2.RewriterConfig()
rewriter_config.optimizers.append("autoparallel")
rewriter_config.auto_parallel.enable = True
rewriter_config.auto_parallel.num_replicas = FLAGS.num_gpus
optimized_graph = tf_optimizer.OptimizeGraph(rewriter_config, metagraph)
metagraph.graph_def.CopyFrom(optimized_graph)
UpdateCollection(metagraph, model)
示例5: __init__
# 需要导入模块: from tensorflow.core.protobuf import rewriter_config_pb2 [as 别名]
# 或者: from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig [as 别名]
def __init__(self, iterations):
tf.logging.info("TrainLowLevelRunner: constructor")
self.feature_structure = {}
self.loss = None
self.infeed_queue = []
self.enqueue_ops = []
self.dataset_initializer = []
self.iterations = iterations
self.num_hosts = FLAGS.num_shards // FLAGS.num_shards_per_host
self.scaffold_fn = None
# Having two separate sessions and graphs to make the initialization faster.
self.input_sess = None
self.train_sess = None
self.input_graph = tf.Graph()
self.train_graph = None
self.tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
# Disable grappler for better performance.
self.session_config = tf.ConfigProto(
allow_soft_placement=True,
graph_options=tf.GraphOptions(
rewrite_options=rewriter_config_pb2.RewriterConfig(
disable_meta_optimizer=True)),
isolate_session_state=True)
cluster_spec = self.tpu_cluster_resolver.cluster_spec()
if cluster_spec:
self.session_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
self.tpu_init = [tpu.initialize_system()]
self.tpu_shutdown = tpu.shutdown_system()
self.init_sess = tf.Session(self.tpu_cluster_resolver.get_master(),
config=self.session_config)
self.init_sess.run(self.tpu_init)
self.queue = Queue.Queue()
示例6: __init__
# 需要导入模块: from tensorflow.core.protobuf import rewriter_config_pb2 [as 别名]
# 或者: from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig [as 别名]
def __init__(self, iterations, train_steps):
tf.logging.info("TrainRunner: constructor")
self.feature_structure = {}
self.loss = None
self.infeed_queue = []
self.enqueue_ops = []
self.dataset_initializer = []
self.iterations = iterations
self.sess = None
self.input_sess = None
self.infeed_thread = None
if train_steps % iterations != 0:
train_steps = iterations * int(math.ceil(train_steps / iterations))
self.train_steps = train_steps
self.input_graph = tf.Graph()
tpu_init = [tpu.initialize_system()]
self.tpu_shutdown = tpu.shutdown_system()
self.cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
self.config = tf.ConfigProto(operation_timeout_in_ms=600 * 60 * 1000,
graph_options=tf.GraphOptions(
rewrite_options=rewriter_config_pb2.RewriterConfig(
disable_meta_optimizer=True)),
isolate_session_state=True)
cluster_spec = self.cluster_resolver.cluster_spec()
if cluster_spec:
self.config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
self.init_sess = tf.Session(self.cluster_resolver.get_master(), config=self.config)
self.init_sess.run(tpu_init)
示例7: create_session_config
# 需要导入模块: from tensorflow.core.protobuf import rewriter_config_pb2 [as 别名]
# 或者: from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig [as 别名]
def create_session_config(log_device_placement=False,
enable_graph_rewriter=False,
gpu_mem_fraction=0.95,
use_tpu=False,
xla_jit_level=tf.OptimizerOptions.OFF,
inter_op_parallelism_threads=0,
intra_op_parallelism_threads=0):
"""The TensorFlow Session config to use."""
if use_tpu:
graph_options = tf.GraphOptions()
else:
if enable_graph_rewriter:
rewrite_options = rewriter_config_pb2.RewriterConfig()
rewrite_options.layout_optimizer = rewriter_config_pb2.RewriterConfig.ON
graph_options = tf.GraphOptions(rewrite_options=rewrite_options)
else:
graph_options = tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L1,
do_function_inlining=False,
global_jit_level=xla_jit_level))
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_mem_fraction)
config = tf.ConfigProto(
allow_soft_placement=True,
graph_options=graph_options,
gpu_options=gpu_options,
log_device_placement=log_device_placement,
inter_op_parallelism_threads=inter_op_parallelism_threads,
intra_op_parallelism_threads=intra_op_parallelism_threads)
return config
示例8: build_graph_options
# 需要导入模块: from tensorflow.core.protobuf import rewriter_config_pb2 [as 别名]
# 或者: from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig [as 别名]
def build_graph_options(cls, disable_optimizations):
if not disable_optimizations:
return tf.GraphOptions()
return tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L0,
do_common_subexpression_elimination=False,
do_constant_folding=False,
do_function_inlining=False,
),
rewrite_options=rewriter_config_pb2.RewriterConfig(
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF
),
)
示例9: create_config_proto
# 需要导入模块: from tensorflow.core.protobuf import rewriter_config_pb2 [as 别名]
# 或者: from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig [as 别名]
def create_config_proto(params):
"""Returns session config proto.
Args:
params: Params tuple, typically created by make_params or
make_params_from_flags.
"""
config = tf.ConfigProto()
config.allow_soft_placement = True
config.intra_op_parallelism_threads = params.num_intra_threads
config.inter_op_parallelism_threads = params.num_inter_threads
config.gpu_options.force_gpu_compatible = params.force_gpu_compatible
if params.gpu_memory_frac_for_testing > 0:
config.gpu_options.per_process_gpu_memory_fraction = (
params.gpu_memory_frac_for_testing)
if params.xla:
config.graph_options.optimizer_options.global_jit_level = (
tf.OptimizerOptions.ON_1)
if params.enable_layout_optimizer:
config.graph_options.rewrite_options.layout_optimizer = (
rewriter_config_pb2.RewriterConfig.ON)
if params.rewriter_config:
rewriter_config = rewriter_config_pb2.RewriterConfig()
text_format.Merge(params.rewriter_config, rewriter_config)
config.graph_options.rewrite_options.CopyFrom(rewriter_config)
if params.variable_update == 'horovod':
import horovod.tensorflow as hvd # pylint: disable=g-import-not-at-top
config.gpu_options.visible_device_list = str(hvd.local_rank())
return config
示例10: _assert_equal_session_config
# 需要导入模块: from tensorflow.core.protobuf import rewriter_config_pb2 [as 别名]
# 或者: from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig [as 别名]
def _assert_equal_session_config(self, session_config,
expected_device_filters):
rewrite_opts = rewriter_config_pb2.RewriterConfig(
meta_optimizer_iterations=rewriter_config_pb2.RewriterConfig.ONE)
graph_opts = config_pb2.GraphOptions(rewrite_options=rewrite_opts)
expected_session_config = config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=graph_opts,
device_filters=expected_device_filters)
self.assertEqual(session_config, expected_session_config)
示例11: get_default_session_config
# 需要导入模块: from tensorflow.core.protobuf import rewriter_config_pb2 [as 别名]
# 或者: from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig [as 别名]
def get_default_session_config():
"""Returns tf.ConfigProto instance."""
rewrite_opts = rewriter_config_pb2.RewriterConfig(
meta_optimizer_iterations=rewriter_config_pb2.RewriterConfig.ONE)
graph_opts = config_pb2.GraphOptions(rewrite_options=rewrite_opts)
return config_pb2.ConfigProto(
allow_soft_placement=True, graph_options=graph_opts)
示例12: _get_default_session_config_distributed
# 需要导入模块: from tensorflow.core.protobuf import rewriter_config_pb2 [as 别名]
# 或者: from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig [as 别名]
def _get_default_session_config_distributed(self):
"""Returns None or tf.ConfigProto instance with default device_filters set.
Device filters are set such that chief/master and worker communicates with
only ps. session_config=None for evaluators or any other TaskType.
"""
rewrite_opts = rewriter_config_pb2.RewriterConfig(
meta_optimizer_iterations=rewriter_config_pb2.RewriterConfig.ONE)
graph_opts = config_pb2.GraphOptions(rewrite_options=rewrite_opts)
device_filters = None
if self._task_type == TaskType.MASTER:
device_filters = ['/job:ps', '/job:master']
elif self._task_type == TaskType.CHIEF:
device_filters = ['/job:ps', '/job:chief']
elif self._task_type == TaskType.WORKER:
device_filters = ['/job:ps', '/job:worker/task:%d' % self._task_id]
elif self._task_type == TaskType.PS:
device_filters = ['/job:ps', '/job:worker', '/job:chief', '/job:master']
else:
# If the task_type is `EVALUATOR` or something other than the ones in
# TaskType then don't set any device filters.
return None
return config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=graph_opts,
device_filters=device_filters)
示例13: no_rewrite_session_config
# 需要导入模块: from tensorflow.core.protobuf import rewriter_config_pb2 [as 别名]
# 或者: from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig [as 别名]
def no_rewrite_session_config():
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:7,代码来源:session_debug_testlib.py
示例14: export
# 需要导入模块: from tensorflow.core.protobuf import rewriter_config_pb2 [as 别名]
# 或者: from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig [as 别名]
def export(model_params, checkpoint_file, config=None):
# Input data
batch_size = 1
im_size = model_params.im_size
guide_image = tf.placeholder(tf.float32, [batch_size, 224, 224, 3])
gb_image = tf.placeholder(tf.float32, [batch_size, im_size[1], im_size[0], 1])
input_image = tf.placeholder(tf.float32, [batch_size, im_size[1], im_size[0], 3])
# Create model
model_func = get_model_func(model_params.base_model)
# split the model into visual modulator and other parts, visual modulator only need to run once
if model_params.use_visual_modulator:
if model_params.base_model =='lite':
v_m_params = visual_modulator_lite(guide_image, model_params, is_training=False)
else:
v_m_params = visual_modulator(guide_image, model_params, is_training=False)
else:
v_m_params = None
net, end_points = model_func([guide_image, gb_image, input_image], model_params, visual_modulator_params = v_m_params, is_training=False)
probabilities = tf.nn.sigmoid(net, name = 'prob')
global_step = tf.Variable(0, name='global_step', trainable=False)
rewrite_options = rewriter_config_pb2.RewriterConfig()
rewrite_options.optimizers.append('pruning')
rewrite_options.optimizers.append('constfold')
rewrite_options.optimizers.append('layout')
graph_options = tf.GraphOptions(
rewrite_options=rewrite_options, infer_shapes=True)
config = tf.ConfigProto(
graph_options=graph_options,
allow_soft_placement=True,
)
output_names = ['prob']
for i, v_m_param in enumerate(v_m_params):
visual_mod_name = 'visual_mod_params_%d' % (i+1)
tf.identity(v_m_param, name = visual_mod_name)
output_names.append(visual_mod_name)
# Create a saver to load the network
saver = tf.train.Saver([v for v in tf.global_variables()]) #if '-up' not in v.name and '-cr' not in v.name])
save_name = checkpoint_file + '.graph.pb'
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, checkpoint_file)
if not model_params.base_model == 'lite':
sess.run(interp_surgery(tf.global_variables()))
output_graph_def = graph_util.convert_variables_to_constants(
sess,
sess.graph_def,
output_names)
with open(save_name, 'wb') as writer:
writer.write(output_graph_def.SerializeToString())
model_params.output_names = output_names
with open(save_name+'.json', 'w') as writer:
json.dump(vars(model_params), writer)
print 'Model saved in', save_name
示例15: print_info
# 需要导入模块: from tensorflow.core.protobuf import rewriter_config_pb2 [as 别名]
# 或者: from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig [as 别名]
def print_info(self):
"""Print basic information."""
log_fn('Model: %s' % self.model.get_model())
dataset_name = self.dataset.name
if self.dataset.use_synthetic_gpu_images():
dataset_name += ' (synthetic)'
log_fn('Dataset: %s' % dataset_name)
log_fn('Mode: %s' % get_mode_from_params(self.params))
single_session = self.params.variable_update == 'distributed_all_reduce'
log_fn('SingleSess: %s' % single_session)
if single_session:
device_list = self.raw_devices_across_tasks()
elif self.params.variable_update == 'horovod':
device_list = ['horovod/%s:%d' % (self.params.device, idx)
for idx in range(self.num_workers)]
else:
device_list = self.raw_devices
log_fn('Batch size: %s global' % (self.batch_size * self.num_workers))
log_fn(' %s per device' % (self.batch_size /
len(self.raw_devices)))
if self.batch_group_size > 1:
log_fn(' %d batches per prepocessing group' %
self.batch_group_size)
log_fn('Num batches: %d' % self.num_batches)
log_fn('Num epochs: %.2f' % self.num_epochs)
log_fn('Devices: %s' % device_list)
log_fn('Data format: %s' % self.data_format)
log_fn('Layout optimizer: %s' % self.enable_layout_optimizer)
if self.rewriter_config:
log_fn('RewriterConfig: %s' % self.rewriter_config)
log_fn('Optimizer: %s' % self.params.optimizer)
log_fn('Variables: %s' % self.params.variable_update)
if (self.params.variable_update == 'replicated' or
self.params.variable_update == 'distributed_all_reduce'):
log_fn('AllReduce: %s' % self.params.all_reduce_spec)
if self.job_name:
log_fn('Sync: %s' % self.params.cross_replica_sync)
if self.params.staged_vars:
log_fn('Staged vars: %s' % self.params.staged_vars)
if self.params.variable_update == 'horovod' and self.params.horovod_device:
log_fn('Horovod on: %s' % self.params.horovod_device)
if self.model.get_model() in model_config.model_titles:
print("__exp.model_title__=\"%s\"" % (model_config.model_titles[self.model.get_model()]))
log_fn('==========')