本文整理汇总了Python中tensorflow.python.estimator.run_config.RunConfig方法的典型用法代码示例。如果您正苦于以下问题:Python run_config.RunConfig方法的具体用法?Python run_config.RunConfig怎么用?Python run_config.RunConfig使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.estimator.run_config
的用法示例。
在下文中一共展示了run_config.RunConfig方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_model_dir
# 需要导入模块: from tensorflow.python.estimator import run_config [as 别名]
# 或者: from tensorflow.python.estimator.run_config import RunConfig [as 别名]
def _get_model_dir(model_dir):
"""Returns `model_dir` based user provided `model_dir` or `TF_CONFIG`."""
model_dir_in_tf_config = json.loads(
os.environ.get('TF_CONFIG') or '{}').get('model_dir', None)
if model_dir_in_tf_config is not None:
if model_dir is not None and model_dir_in_tf_config != model_dir:
raise ValueError(
'`model_dir` provided in RunConfig construct, if set, '
'must have the same value as the model_dir in TF_CONFIG. '
'model_dir: {}\nTF_CONFIG["model_dir"]: {}.\n'.format(
model_dir, model_dir_in_tf_config))
logging.info('Using model_dir in TF_CONFIG: %s', model_dir_in_tf_config)
return model_dir or model_dir_in_tf_config
示例2: __init__
# 需要导入模块: from tensorflow.python.estimator import run_config [as 别名]
# 或者: from tensorflow.python.estimator.run_config import RunConfig [as 别名]
def __init__(self, id, args, worker_address, sink_address):
super().__init__()
self.model_dir = args.model_dir
self.config_fp = os.path.join(self.model_dir, 'bert_config.json')
self.checkpoint_fp = os.path.join(self.model_dir, 'bert_model.ckpt')
self.vocab_fp = os.path.join(args.model_dir, 'vocab.txt')
self.tokenizer = tokenization.FullTokenizer(vocab_file=self.vocab_fp)
self.max_seq_len = args.max_seq_len
self.worker_id = id
self.daemon = True
self.model_fn = model_fn_builder(
bert_config=modeling.BertConfig.from_json_file(self.config_fp),
init_checkpoint=self.checkpoint_fp,
pooling_strategy=args.pooling_strategy,
pooling_layer=args.pooling_layer
)
os.environ['CUDA_VISIBLE_DEVICES'] = str(self.worker_id)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = args.gpu_memory_fraction
self.estimator = Estimator(self.model_fn, config=RunConfig(session_config=config))
self.exit_flag = multiprocessing.Event()
self.logger = set_logger('WORKER-%d' % self.worker_id)
self.worker_address = worker_address
self.sink_address = sink_address
示例3: _get_replica_device_setter
# 需要导入模块: from tensorflow.python.estimator import run_config [as 别名]
# 或者: from tensorflow.python.estimator.run_config import RunConfig [as 别名]
def _get_replica_device_setter(config):
"""Creates a replica device setter if required as a default device_fn.
`Estimator` uses ReplicaDeviceSetter as a default device placer. It sets the
distributed related arguments such as number of ps_replicas based on given
config.
Args:
config: A `RunConfig` instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableOfTensors', 'MutableDenseHashTable'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return training.replica_device_setter(
ps_tasks=config.num_ps_replicas,
worker_device=worker_device,
merge_devices=True,
ps_ops=ps_ops,
cluster=config.cluster_spec)
else:
return None
示例4: uid
# 需要导入模块: from tensorflow.python.estimator import run_config [as 别名]
# 或者: from tensorflow.python.estimator.run_config import RunConfig [as 别名]
def uid(self, whitelist=None):
"""Generates a 'Unique Identifier' based on all internal fields.
Caller should use the uid string to check `RunConfig` instance integrity
in one session use, but should not rely on the implementation details, which
is subject to change.
Args:
whitelist: A list of the string names of the properties uid should not
include. If `None`, defaults to `_DEFAULT_UID_WHITE_LIST`, which
includes most properites user allowes to change.
Returns:
A uid string.
"""
if whitelist is None:
whitelist = _DEFAULT_UID_WHITE_LIST
state = {k: v for k, v in self.__dict__.items() if not k.startswith('__')}
# Pop out the keys in whitelist.
for k in whitelist:
state.pop('_' + k, None)
ordered_state = collections.OrderedDict(
sorted(state.items(), key=lambda t: t[0]))
# For class instance without __repr__, some special cares are required.
# Otherwise, the object address will be used.
if '_cluster_spec' in ordered_state:
ordered_state['_cluster_spec'] = ordered_state['_cluster_spec'].as_dict()
return ', '.join(
'%s=%r' % (k, v) for (k, v) in six.iteritems(ordered_state))
示例5: get_estimator
# 需要导入模块: from tensorflow.python.estimator import run_config [as 别名]
# 或者: from tensorflow.python.estimator.run_config import RunConfig [as 别名]
def get_estimator(self, tf):
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.run_config import RunConfig
from tensorflow.python.estimator.model_fn import EstimatorSpec
def model_fn(features, labels, mode, params):
with tf.gfile.GFile(self.graph_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
input_names = ['input_ids', 'input_mask', 'input_type_ids']
output = tf.import_graph_def(graph_def,
input_map={k + ':0': features[k] for k in input_names},
return_elements=['final_encodes:0'])
return EstimatorSpec(mode=mode, predictions={
'client_id': features['client_id'],
'encodes': output[0]
})
config = tf.ConfigProto(device_count={'GPU': 0 if self.device_id < 0 else 1})
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = self.gpu_memory_fraction
config.log_device_placement = False
# session-wise XLA doesn't seem to work on tf 1.10
# if args.xla:
# config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
return Estimator(model_fn=model_fn, config=RunConfig(session_config=config))
示例6: get_estimator
# 需要导入模块: from tensorflow.python.estimator import run_config [as 别名]
# 或者: from tensorflow.python.estimator.run_config import RunConfig [as 别名]
def get_estimator(self):
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.run_config import RunConfig
from tensorflow.python.estimator.model_fn import EstimatorSpec
def model_fn(features, labels, mode, params):
with tf.gfile.GFile(self.graph_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
input_names = ['input_ids', 'input_mask', 'input_type_ids']
output = tf.import_graph_def(graph_def,
input_map={k + ':0': features[k] for k in input_names},
return_elements=['final_encodes:0'])
return EstimatorSpec(mode=mode, predictions={
'encodes': output[0]
})
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = self.gpu_memory_fraction
config.log_device_placement = False
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
return Estimator(model_fn=model_fn, config=RunConfig(session_config=config),
params={'batch_size': self.batch_size}, model_dir='../tmp')
示例7: get_estimator
# 需要导入模块: from tensorflow.python.estimator import run_config [as 别名]
# 或者: from tensorflow.python.estimator.run_config import RunConfig [as 别名]
def get_estimator(self):
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.run_config import RunConfig
bert_config = modeling.BertConfig.from_json_file(args.config_name)
label_list = self.processor.get_labels()
train_examples = self.processor.get_train_examples(args.data_dir)
num_train_steps = int(
len(train_examples) / self.batch_size * args.num_train_epochs)
num_warmup_steps = int(num_train_steps * 0.1)
if self.mode == tf.estimator.ModeKeys.TRAIN:
init_checkpoint = args.ckpt_name
else:
init_checkpoint = args.output_dir
model_fn = self.model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=init_checkpoint,
learning_rate=args.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_one_hot_embeddings=False)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = args.gpu_memory_fraction
config.log_device_placement = False
return Estimator(model_fn=model_fn, config=RunConfig(session_config=config), model_dir=args.output_dir,
params={'batch_size': self.batch_size})
示例8: create_estimator_and_tokenizer
# 需要导入模块: from tensorflow.python.estimator import run_config [as 别名]
# 或者: from tensorflow.python.estimator.run_config import RunConfig [as 别名]
def create_estimator_and_tokenizer(self):
config_fp = os.path.join(self.model_dir, 'bert_config.json')
checkpoint_fp = os.path.join(self.model_dir, 'bert_model.ckpt')
vocab_fp = os.path.join(self.model_dir, 'vocab.txt')
tokenizer = tokenization.FullTokenizer(vocab_file=vocab_fp)
model_fn = model_fn_builder(
bert_config=modeling.BertConfig.from_json_file(config_fp),
init_checkpoint=checkpoint_fp,
pooling_strategy=PoolingStrategy.NONE,
pooling_layer=[-2]
)
config = tf.ConfigProto(
device_count={
'CPU': cpu_count()
},
inter_op_parallelism_threads=0,
intra_op_parallelism_threads=0,
gpu_options={
'allow_growth': True
}
)
estimator = Estimator(model_fn, config=RunConfig(
session_config=config), model_dir=None)
return estimator, tokenizer
示例9: get_estimator
# 需要导入模块: from tensorflow.python.estimator import run_config [as 别名]
# 或者: from tensorflow.python.estimator.run_config import RunConfig [as 别名]
def get_estimator(self):
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.run_config import RunConfig
from tensorflow.python.estimator.model_fn import EstimatorSpec
def model_fn(features, labels, mode, params):
with tf.gfile.GFile(self.graph_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
input_names = ['input_ids', 'input_mask', 'input_type_ids']
output = tf.import_graph_def(graph_def,
input_map={k + ':0': features[k] for k in input_names},
return_elements=['final_encodes:0'])
return EstimatorSpec(mode=mode, predictions={
'encodes': output[0]
})
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = self.gpu_memory_fraction
config.log_device_placement = False
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
return Estimator(model_fn=model_fn, config=RunConfig(session_config=config),
params={'batch_size': self.batch_size})
示例10: get_estimator
# 需要导入模块: from tensorflow.python.estimator import run_config [as 别名]
# 或者: from tensorflow.python.estimator.run_config import RunConfig [as 别名]
def get_estimator(self):
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.run_config import RunConfig
bert_config = modeling.BertConfig.from_json_file(self.config_name)
label_list = self.processor.get_labels()
train_examples = self.processor.get_train_examples(self.data_dir)
num_train_steps = int(
len(train_examples) / self.batch_size * self.num_train_epochs)
num_warmup_steps = int(num_train_steps * 0.1)
if self.mode == tf.estimator.ModeKeys.TRAIN:
init_checkpoint = self.ckpt_name
else:
init_checkpoint = self.output_dir
model_fn = self.model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=init_checkpoint,
learning_rate=self.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_one_hot_embeddings=False)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = self.gpu_memory_fraction
config.log_device_placement = False
return Estimator(model_fn=model_fn, config=RunConfig(session_config=config), model_dir=self.output_dir,
params={'batch_size': self.batch_size})
示例11: _call_model_fn
# 需要导入模块: from tensorflow.python.estimator import run_config [as 别名]
# 或者: from tensorflow.python.estimator.run_config import RunConfig [as 别名]
def _call_model_fn(self, features, labels, mode, config):
"""Calls model function.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
config: RunConfig
Returns:
An `EstimatorSpec` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
model_fn_args = util.fn_args(self._model_fn)
kwargs = {}
if 'labels' in model_fn_args:
kwargs['labels'] = labels
else:
if labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = config
model_fn_results = self._model_fn(features=features, **kwargs)
if not isinstance(model_fn_results, model_fn_lib.EstimatorSpec):
raise ValueError('model_fn should return an EstimatorSpec.')
return model_fn_results
示例12: _get_replica_device_setter
# 需要导入模块: from tensorflow.python.estimator import run_config [as 别名]
# 或者: from tensorflow.python.estimator.run_config import RunConfig [as 别名]
def _get_replica_device_setter(config):
"""Creates a replica device setter if required as a default device_fn.
`Estimator` uses ReplicaDeviceSetter as a default device placer. It sets the
distributed related arguments such as number of ps_replicas based on given
config.
Args:
config: A `RunConfig` instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableV2', 'MutableHashTableOfTensors',
'MutableHashTableOfTensorsV2', 'MutableDenseHashTable',
'MutableDenseHashTableV2'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return training.replica_device_setter(
ps_tasks=config.num_ps_replicas,
worker_device=worker_device,
merge_devices=True,
ps_ops=ps_ops,
cluster=config.cluster_spec)
else:
return None
示例13: _call_model_fn
# 需要导入模块: from tensorflow.python.estimator import run_config [as 别名]
# 或者: from tensorflow.python.estimator.run_config import RunConfig [as 别名]
def _call_model_fn(self, features, labels, mode, config):
"""Calls model function.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
config: RunConfig
Returns:
An `EstimatorSpec` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
model_fn_args = util.fn_args(self._model_fn)
kwargs = {}
if 'labels' in model_fn_args:
kwargs['labels'] = labels
else:
if labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = config
model_fn_results = self._model_fn(features=features, **kwargs)
if not isinstance(model_fn_results, model_fn_lib.EstimatorSpec):
raise ValueError('model_fn should return an EstimatorSpec.')
return model_fn_results
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:37,代码来源:estimator.py
示例14: _get_replica_device_setter
# 需要导入模块: from tensorflow.python.estimator import run_config [as 别名]
# 或者: from tensorflow.python.estimator.run_config import RunConfig [as 别名]
def _get_replica_device_setter(config):
"""Creates a replica device setter if required as a default device_fn.
`Estimator` uses ReplicaDeviceSetter as a default device placer. It sets the
distributed related arguments such as number of ps_replicas based on given
config.
Args:
config: A `RunConfig` instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableV2', 'MutableHashTableOfTensors',
'MutableHashTableOfTensorsV2', 'MutableDenseHashTable',
'MutableDenseHashTableV2'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return training.replica_device_setter(
ps_tasks=config.num_ps_replicas,
worker_device=worker_device,
merge_devices=True,
ps_ops=ps_ops,
cluster=config.cluster_spec)
else:
return None
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:36,代码来源:estimator.py