當前位置: 首頁>>代碼示例>>Python>>正文


Python hparam.HParams方法代碼示例

本文整理匯總了Python中tensorflow.contrib.training.python.training.hparam.HParams方法的典型用法代碼示例。如果您正苦於以下問題:Python hparam.HParams方法的具體用法?Python hparam.HParams怎麽用?Python hparam.HParams使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.contrib.training.python.training.hparam的用法示例。


在下文中一共展示了hparam.HParams方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _add_item_to_params

# 需要導入模塊: from tensorflow.contrib.training.python.training import hparam [as 別名]
# 或者: from tensorflow.contrib.training.python.training.hparam import HParams [as 別名]
def _add_item_to_params(params, key, value):
  """Adds a new item into `params`."""
  if isinstance(params, hparam.HParams):
    # For HParams, we need to use special API.
    if key in params:
      params.set_hparam(key, value)
    else:
      params.add_hparam(key, value)
  else:
    # Now params is Python dict.
    params[key] = value 
開發者ID:ymcui,項目名稱:Chinese-XLNet,代碼行數:13,代碼來源:tpu_estimator.py

示例2: __init__

# 需要導入模塊: from tensorflow.contrib.training.python.training import hparam [as 別名]
# 或者: from tensorflow.contrib.training.python.training.hparam import HParams [as 別名]
def __init__(self, num_experts, args):
        """Creates the computation graph of the MoE network and loads
        the checkpoint file. Following fields are fetched from ``args``

            moe_config: Comma-separated <key>=<value> pairs specifying
              the MoE network. See the command line arguments of 
              sgnmt_moe for a full description. Available keys:
              vocab_size, embed_size, activation, hidden_layer_size,
              preprocessing.
            moe_checkpoint_dir (string): Checkpoint directory
            n_cpu_threads (int): Number of CPU threads for TensorFlow

        Args:
            num_experts (int): Number of predictors under the MoE model
            args (object): SGNMT configuration object
        """
        super(MoEInterpolationStrategy, self).__init__()
        config = dict(el.split("=", 1) for el in args.moe_config.split(";"))
        self._create_hparams(num_experts, config)
        self.model = MOEModel(self.params)
        logging.info("MoE HParams: %s" % self.params)
        moe_graph = tf.Graph()
        with moe_graph.as_default() as g:
          self.model.initialize()
          self.sess = tf_utils.create_session(args.moe_checkpoint_dir,
                                              args.n_cpu_threads) 
開發者ID:ucam-smt,項目名稱:sgnmt,代碼行數:28,代碼來源:interpolation.py

示例3: _create_hparams

# 需要導入模塊: from tensorflow.contrib.training.python.training import hparam [as 別名]
# 或者: from tensorflow.contrib.training.python.training.hparam import HParams [as 別名]
def _create_hparams(self, num_experts, config):
        """Creates self.params."""
        self.params = hparam.HParams(
          vocab_size=int(config.get("vocab_size", "30003")),
          learning_rate=0.001, # Not used
          batch_size=1,
          num_experts=num_experts,
          embed_filename="",
          embed_size=int(config.get("embed_size", "512")),
          activation=config.get("activation", "relu"),
          loss_strategy="rank", # Not used
          hidden_layer_size=int(config.get("hidden_layer_size", "64")),
          preprocessing=config.get("preprocessing", "")
        ) 
開發者ID:ucam-smt,項目名稱:sgnmt,代碼行數:16,代碼來源:interpolation.py

示例4: train_and_evaluate

# 需要導入模塊: from tensorflow.contrib.training.python.training import hparam [as 別名]
# 或者: from tensorflow.contrib.training.python.training.hparam import HParams [as 別名]
def train_and_evaluate(hparams):
	"""Helper function: Trains and evaluate model.

	Args:
		hparams: (hparam.HParams) Command line parameters passed from task.py
	"""
	# Load data.
	(train_data, train_labels), (test_data, test_labels) = \
		utils.preprocess(train_data_file=hparams.train_file,
                         word_index_file=hparams.word_index_file,
                         num_words=model.TOP_K)

	# Training steps
	train_steps = hparams.num_epochs * len(train_data) / hparams.batch_size
	# Change numpy array shape.
	train_labels = np.asarray(train_labels).astype('int').reshape((-1, 1))
	# Create TrainSpec.
	train_spec = tf.estimator.TrainSpec(
		input_fn=lambda: model.input_fn(
			train_data,
			train_labels,
			hparams.batch_size,
			mode=tf.estimator.ModeKeys.TRAIN),
		max_steps=train_steps)
	# Create exporter information.
	exporter = tf.estimator.LatestExporter('exporter', model.serving_input_fn)
	# Change numpy array shape.
	test_labels = np.asarray(test_labels).astype('int').reshape((-1, 1))
	# Create EvalSpec.
	eval_spec = tf.estimator.EvalSpec(
		input_fn=lambda: model.input_fn(
			test_data,
			test_labels,
			hparams.batch_size,
			mode=tf.estimator.ModeKeys.EVAL),
		steps=None,
		exporters=exporter,
		start_delay_secs=10,
		throttle_secs=10)

	# Generate Configuration.
	run_config = tf.estimator.RunConfig(save_checkpoints_steps=500)
	# Create estimator
	estimator = model.keras_estimator(model_dir=hparams.job_dir,
                                      config=run_config,
                                      learning_rate=hparams.learning_rate,
                                      vocab_size=model.VOCAB_SIZE)
	# Start training
	tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) 
開發者ID:GoogleCloudPlatform,項目名稱:cloudml-samples,代碼行數:51,代碼來源:task.py

示例5: _wrapped_experiment_fn_with_uid_check

# 需要導入模塊: from tensorflow.contrib.training.python.training import hparam [as 別名]
# 或者: from tensorflow.contrib.training.python.training.hparam import HParams [as 別名]
def _wrapped_experiment_fn_with_uid_check(experiment_fn, require_hparams=False):
  """Wraps the `RunConfig` uid check with `experiment_fn`.

  For `experiment_fn` which takes `run_config`, it is expected that the
  `run_config` is passed to the Estimator correctly. Toward that, the wrapped
  `experiment_fn` compares the `uid` of the `RunConfig` instance.

  Args:
    experiment_fn: The original `experiment_fn` which takes `run_config` and
      `hparams`.
    require_hparams: If True, the `hparams` passed to `experiment_fn` cannot be
      `None`.

  Returns:
    A experiment_fn with same signature.
  """
  def wrapped_experiment_fn(run_config, hparams):
    """Calls experiment_fn and checks the uid of `RunConfig`."""
    if not isinstance(run_config, run_config_lib.RunConfig):
      raise ValueError('`run_config` must be `RunConfig` instance')
    if not run_config.model_dir:
      raise ValueError(
          'Must specify a model directory `model_dir` in `run_config`.')
    if hparams is not None and not isinstance(hparams, hparam_lib.HParams):
      raise ValueError('`hparams` must be `HParams` instance')
    if require_hparams and hparams is None:
      raise ValueError('`hparams` cannot be `None`.')

    expected_uid = run_config.uid()
    experiment = experiment_fn(run_config, hparams)

    if not isinstance(experiment, Experiment):
      raise TypeError('Experiment builder did not return an Experiment '
                      'instance, got %s instead.' % type(experiment))

    if experiment.estimator.config.uid() != expected_uid:
      raise RuntimeError(
          '`RunConfig` instance is expected to be used by the `Estimator` '
          'inside the `Experiment`. expected {}, but got {}'.format(
              expected_uid, experiment.estimator.config.uid()))
    return experiment
  return wrapped_experiment_fn 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:44,代碼來源:learn_runner.py

示例6: generate_experiment_fn

# 需要導入模塊: from tensorflow.contrib.training.python.training import hparam [as 別名]
# 或者: from tensorflow.contrib.training.python.training.hparam import HParams [as 別名]
def generate_experiment_fn(**experiment_args):
  """Create an experiment function.

  See command line help text for description of args.
  Args:
    experiment_args: keyword arguments to be passed through to experiment
      See `tf.contrib.learn.Experiment` for full args.
  Returns:
    A function:
      (tf.contrib.learn.RunConfig, tf.contrib.training.HParams) -> Experiment

    This function is used by learn_runner to create an Experiment which
    executes model code provided in the form of an Estimator and
    input functions.
  """
  def _experiment_fn(run_config, hparams):
    # num_epochs can control duration if train_steps isn't
    # passed to Experiment
    train_input = lambda: model.generate_input_fn(
        [os.path.join(os.environ['PIPELINE_INPUT_PATH'], train_file) for train_file in hparams.train_files],
        num_epochs=hparams.num_epochs,
        batch_size=hparams.train_batch_size,
    )
    # Don't shuffle evaluation data
    eval_input = lambda: model.generate_input_fn(
        [os.path.join(os.environ['PIPELINE_INPUT_PATH'], eval_file) for eval_file in hparams.eval_files],
        batch_size=hparams.eval_batch_size,
        shuffle=False
    )
    return tf.contrib.learn.Experiment(
        tf.estimator.Estimator(
            model.generate_model_fn(
                embedding_size=hparams.embedding_size,
                # Construct layers sizes with exponential decay
                hidden_units=[
                    max(2, int(hparams.first_layer_size *
                               hparams.scale_factor**i))
                    for i in range(hparams.num_layers)
                ],
                learning_rate=hparams.learning_rate
            ),
            config=run_config
        ),
        train_input_fn=train_input,
        eval_input_fn=eval_input,
#       export_fn
#        checkpoint_and_export=True,
        **experiment_args
    )
  return _experiment_fn 
開發者ID:PipelineAI,項目名稱:models,代碼行數:52,代碼來源:pipeline_train.py


注:本文中的tensorflow.contrib.training.python.training.hparam.HParams方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。