本文整理匯總了Python中tensorflow.contrib.training.python.training.hparam.HParams方法的典型用法代碼示例。如果您正苦於以下問題:Python hparam.HParams方法的具體用法?Python hparam.HParams怎麽用?Python hparam.HParams使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.contrib.training.python.training.hparam
的用法示例。
在下文中一共展示了hparam.HParams方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _add_item_to_params
# 需要導入模塊: from tensorflow.contrib.training.python.training import hparam [as 別名]
# 或者: from tensorflow.contrib.training.python.training.hparam import HParams [as 別名]
def _add_item_to_params(params, key, value):
"""Adds a new item into `params`."""
if isinstance(params, hparam.HParams):
# For HParams, we need to use special API.
if key in params:
params.set_hparam(key, value)
else:
params.add_hparam(key, value)
else:
# Now params is Python dict.
params[key] = value
示例2: __init__
# 需要導入模塊: from tensorflow.contrib.training.python.training import hparam [as 別名]
# 或者: from tensorflow.contrib.training.python.training.hparam import HParams [as 別名]
def __init__(self, num_experts, args):
"""Creates the computation graph of the MoE network and loads
the checkpoint file. Following fields are fetched from ``args``
moe_config: Comma-separated <key>=<value> pairs specifying
the MoE network. See the command line arguments of
sgnmt_moe for a full description. Available keys:
vocab_size, embed_size, activation, hidden_layer_size,
preprocessing.
moe_checkpoint_dir (string): Checkpoint directory
n_cpu_threads (int): Number of CPU threads for TensorFlow
Args:
num_experts (int): Number of predictors under the MoE model
args (object): SGNMT configuration object
"""
super(MoEInterpolationStrategy, self).__init__()
config = dict(el.split("=", 1) for el in args.moe_config.split(";"))
self._create_hparams(num_experts, config)
self.model = MOEModel(self.params)
logging.info("MoE HParams: %s" % self.params)
moe_graph = tf.Graph()
with moe_graph.as_default() as g:
self.model.initialize()
self.sess = tf_utils.create_session(args.moe_checkpoint_dir,
args.n_cpu_threads)
示例3: _create_hparams
# 需要導入模塊: from tensorflow.contrib.training.python.training import hparam [as 別名]
# 或者: from tensorflow.contrib.training.python.training.hparam import HParams [as 別名]
def _create_hparams(self, num_experts, config):
"""Creates self.params."""
self.params = hparam.HParams(
vocab_size=int(config.get("vocab_size", "30003")),
learning_rate=0.001, # Not used
batch_size=1,
num_experts=num_experts,
embed_filename="",
embed_size=int(config.get("embed_size", "512")),
activation=config.get("activation", "relu"),
loss_strategy="rank", # Not used
hidden_layer_size=int(config.get("hidden_layer_size", "64")),
preprocessing=config.get("preprocessing", "")
)
示例4: train_and_evaluate
# 需要導入模塊: from tensorflow.contrib.training.python.training import hparam [as 別名]
# 或者: from tensorflow.contrib.training.python.training.hparam import HParams [as 別名]
def train_and_evaluate(hparams):
"""Helper function: Trains and evaluate model.
Args:
hparams: (hparam.HParams) Command line parameters passed from task.py
"""
# Load data.
(train_data, train_labels), (test_data, test_labels) = \
utils.preprocess(train_data_file=hparams.train_file,
word_index_file=hparams.word_index_file,
num_words=model.TOP_K)
# Training steps
train_steps = hparams.num_epochs * len(train_data) / hparams.batch_size
# Change numpy array shape.
train_labels = np.asarray(train_labels).astype('int').reshape((-1, 1))
# Create TrainSpec.
train_spec = tf.estimator.TrainSpec(
input_fn=lambda: model.input_fn(
train_data,
train_labels,
hparams.batch_size,
mode=tf.estimator.ModeKeys.TRAIN),
max_steps=train_steps)
# Create exporter information.
exporter = tf.estimator.LatestExporter('exporter', model.serving_input_fn)
# Change numpy array shape.
test_labels = np.asarray(test_labels).astype('int').reshape((-1, 1))
# Create EvalSpec.
eval_spec = tf.estimator.EvalSpec(
input_fn=lambda: model.input_fn(
test_data,
test_labels,
hparams.batch_size,
mode=tf.estimator.ModeKeys.EVAL),
steps=None,
exporters=exporter,
start_delay_secs=10,
throttle_secs=10)
# Generate Configuration.
run_config = tf.estimator.RunConfig(save_checkpoints_steps=500)
# Create estimator
estimator = model.keras_estimator(model_dir=hparams.job_dir,
config=run_config,
learning_rate=hparams.learning_rate,
vocab_size=model.VOCAB_SIZE)
# Start training
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
示例5: _wrapped_experiment_fn_with_uid_check
# 需要導入模塊: from tensorflow.contrib.training.python.training import hparam [as 別名]
# 或者: from tensorflow.contrib.training.python.training.hparam import HParams [as 別名]
def _wrapped_experiment_fn_with_uid_check(experiment_fn, require_hparams=False):
"""Wraps the `RunConfig` uid check with `experiment_fn`.
For `experiment_fn` which takes `run_config`, it is expected that the
`run_config` is passed to the Estimator correctly. Toward that, the wrapped
`experiment_fn` compares the `uid` of the `RunConfig` instance.
Args:
experiment_fn: The original `experiment_fn` which takes `run_config` and
`hparams`.
require_hparams: If True, the `hparams` passed to `experiment_fn` cannot be
`None`.
Returns:
A experiment_fn with same signature.
"""
def wrapped_experiment_fn(run_config, hparams):
"""Calls experiment_fn and checks the uid of `RunConfig`."""
if not isinstance(run_config, run_config_lib.RunConfig):
raise ValueError('`run_config` must be `RunConfig` instance')
if not run_config.model_dir:
raise ValueError(
'Must specify a model directory `model_dir` in `run_config`.')
if hparams is not None and not isinstance(hparams, hparam_lib.HParams):
raise ValueError('`hparams` must be `HParams` instance')
if require_hparams and hparams is None:
raise ValueError('`hparams` cannot be `None`.')
expected_uid = run_config.uid()
experiment = experiment_fn(run_config, hparams)
if not isinstance(experiment, Experiment):
raise TypeError('Experiment builder did not return an Experiment '
'instance, got %s instead.' % type(experiment))
if experiment.estimator.config.uid() != expected_uid:
raise RuntimeError(
'`RunConfig` instance is expected to be used by the `Estimator` '
'inside the `Experiment`. expected {}, but got {}'.format(
expected_uid, experiment.estimator.config.uid()))
return experiment
return wrapped_experiment_fn
示例6: generate_experiment_fn
# 需要導入模塊: from tensorflow.contrib.training.python.training import hparam [as 別名]
# 或者: from tensorflow.contrib.training.python.training.hparam import HParams [as 別名]
def generate_experiment_fn(**experiment_args):
"""Create an experiment function.
See command line help text for description of args.
Args:
experiment_args: keyword arguments to be passed through to experiment
See `tf.contrib.learn.Experiment` for full args.
Returns:
A function:
(tf.contrib.learn.RunConfig, tf.contrib.training.HParams) -> Experiment
This function is used by learn_runner to create an Experiment which
executes model code provided in the form of an Estimator and
input functions.
"""
def _experiment_fn(run_config, hparams):
# num_epochs can control duration if train_steps isn't
# passed to Experiment
train_input = lambda: model.generate_input_fn(
[os.path.join(os.environ['PIPELINE_INPUT_PATH'], train_file) for train_file in hparams.train_files],
num_epochs=hparams.num_epochs,
batch_size=hparams.train_batch_size,
)
# Don't shuffle evaluation data
eval_input = lambda: model.generate_input_fn(
[os.path.join(os.environ['PIPELINE_INPUT_PATH'], eval_file) for eval_file in hparams.eval_files],
batch_size=hparams.eval_batch_size,
shuffle=False
)
return tf.contrib.learn.Experiment(
tf.estimator.Estimator(
model.generate_model_fn(
embedding_size=hparams.embedding_size,
# Construct layers sizes with exponential decay
hidden_units=[
max(2, int(hparams.first_layer_size *
hparams.scale_factor**i))
for i in range(hparams.num_layers)
],
learning_rate=hparams.learning_rate
),
config=run_config
),
train_input_fn=train_input,
eval_input_fn=eval_input,
# export_fn
# checkpoint_and_export=True,
**experiment_args
)
return _experiment_fn