当前位置: 首页>>代码示例>>Python>>正文


Python training.HParams方法代码示例

本文整理汇总了Python中tensorflow.contrib.training.HParams方法的典型用法代码示例。如果您正苦于以下问题:Python training.HParams方法的具体用法?Python training.HParams怎么用?Python training.HParams使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib.training的用法示例。


在下文中一共展示了training.HParams方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _setup

# 需要导入模块: from tensorflow.contrib import training [as 别名]
# 或者: from tensorflow.contrib.training import HParams [as 别名]
def _setup(self):
    super(GymSimulatedDiscreteProblem, self)._setup()

    environment_spec = self.environment_spec
    hparams = HParams(
        video_num_input_frames=environment_spec.video_num_input_frames,
        video_num_target_frames=environment_spec.video_num_target_frames,
        environment_spec=environment_spec)

    initial_frames_problem = environment_spec.initial_frames_problem
    dataset = initial_frames_problem.dataset(
        tf.estimator.ModeKeys.TRAIN,
        FLAGS.data_dir,
        shuffle_files=False,
        hparams=hparams)
    dataset = dataset.map(lambda x: x["input_action"]).take(1)
    input_data_iterator = (dataset.batch(1).make_initializable_iterator())
    self._session.run(input_data_iterator.initializer)

    res = self._session.run(input_data_iterator.get_next())
    self._initial_actions = res[0, :, 0][:-1]
    self._reset_real_env() 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:24,代码来源:gym_problems.py

示例2: main

# 需要导入模块: from tensorflow.contrib import training [as 别名]
# 或者: from tensorflow.contrib.training import HParams [as 别名]
def main():

    hparams = HParams(**vars(args))
    hparams.hidden_size = 512
    hparams.num_classes = 10
    hparams.num_features = 100
    hparams.num_epochs = 200
    hparams.num_samples = 1234

    dataset = tf.data.Dataset.from_tensor_slices((
        np.random.random(size=(hparams.num_samples, hparams.num_features)),
        np.random.randint(0, hparams.num_classes, size=hparams.num_samples)))
    dataset = dataset.batch(hparams.batch_size)

    print('\n\nRunning SimpleNN model.')
    model = SimpleNN(hparams)
    for epoch_idx in range(hparams.num_epochs):
        num_correct_total = model.run_train_epoch(dataset)
        if epoch_idx % 5 == 0:
            print('Epoch {}: accuracy={:.3%}'.format(
                epoch_idx, float(num_correct_total) / hparams.num_samples)) 
开发者ID:PacktPublishing,项目名称:-Learn-Artificial-Intelligence-with-TensorFlow,代码行数:23,代码来源:eager.py

示例3: large_imagenet_config

# 需要导入模块: from tensorflow.contrib import training [as 别名]
# 或者: from tensorflow.contrib.training import HParams [as 别名]
def large_imagenet_config():
  """Large ImageNet configuration based on PNASNet-5."""
  return contrib_training.HParams(
      stem_multiplier=3.0,
      dense_dropout_keep_prob=0.5,
      num_cells=12,
      filter_scaling_rate=2.0,
      num_conv_filters=216,
      drop_path_keep_prob=0.6,
      use_aux_head=1,
      num_reduction_layers=2,
      data_format='NHWC',
      skip_reduction_layer_input=1,
      total_training_steps=250000,
      use_bounded_activation=False,
  ) 
开发者ID:tensorflow,项目名称:models,代码行数:18,代码来源:pnasnet.py

示例4: mobile_imagenet_config

# 需要导入模块: from tensorflow.contrib import training [as 别名]
# 或者: from tensorflow.contrib.training import HParams [as 别名]
def mobile_imagenet_config():
  """Mobile ImageNet configuration based on PNASNet-5."""
  return contrib_training.HParams(
      stem_multiplier=1.0,
      dense_dropout_keep_prob=0.5,
      num_cells=9,
      filter_scaling_rate=2.0,
      num_conv_filters=54,
      drop_path_keep_prob=1.0,
      use_aux_head=1,
      num_reduction_layers=2,
      data_format='NHWC',
      skip_reduction_layer_input=1,
      total_training_steps=250000,
      use_bounded_activation=False,
  ) 
开发者ID:tensorflow,项目名称:models,代码行数:18,代码来源:pnasnet.py

示例5: testNewMomentumOptimizerValue

# 需要导入模块: from tensorflow.contrib import training [as 别名]
# 或者: from tensorflow.contrib.training import HParams [as 别名]
def testNewMomentumOptimizerValue(self):
    """Tests that new momentum value is updated appropriately."""
    original_momentum_value = 0.4
    hparams = contrib_training.HParams(momentum_optimizer_value=1.1)
    pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")

    pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
    optimizer_config = pipeline_config.train_config.optimizer.rms_prop_optimizer
    optimizer_config.momentum_optimizer_value = original_momentum_value
    _write_config(pipeline_config, pipeline_config_path)

    configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
    configs = config_util.merge_external_params_with_configs(configs, hparams)
    optimizer_config = configs["train_config"].optimizer.rms_prop_optimizer
    new_momentum_value = optimizer_config.momentum_optimizer_value
    self.assertAlmostEqual(1.0, new_momentum_value)  # Clipped to 1.0. 
开发者ID:tensorflow,项目名称:models,代码行数:18,代码来源:config_util_test.py

示例6: testNewClassificationLocalizationWeightRatio

# 需要导入模块: from tensorflow.contrib import training [as 别名]
# 或者: from tensorflow.contrib.training import HParams [as 别名]
def testNewClassificationLocalizationWeightRatio(self):
    """Tests that the loss weight ratio is updated appropriately."""
    original_localization_weight = 0.1
    original_classification_weight = 0.2
    new_weight_ratio = 5.0
    hparams = contrib_training.HParams(
        classification_localization_weight_ratio=new_weight_ratio)
    pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")

    pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
    pipeline_config.model.ssd.loss.localization_weight = (
        original_localization_weight)
    pipeline_config.model.ssd.loss.classification_weight = (
        original_classification_weight)
    _write_config(pipeline_config, pipeline_config_path)

    configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
    configs = config_util.merge_external_params_with_configs(configs, hparams)
    loss = configs["model"].ssd.loss
    self.assertAlmostEqual(1.0, loss.localization_weight)
    self.assertAlmostEqual(new_weight_ratio, loss.classification_weight) 
开发者ID:tensorflow,项目名称:models,代码行数:23,代码来源:config_util_test.py

示例7: testNewFocalLossParameters

# 需要导入模块: from tensorflow.contrib import training [as 别名]
# 或者: from tensorflow.contrib.training import HParams [as 别名]
def testNewFocalLossParameters(self):
    """Tests that the loss weight ratio is updated appropriately."""
    original_alpha = 1.0
    original_gamma = 1.0
    new_alpha = 0.3
    new_gamma = 2.0
    hparams = contrib_training.HParams(
        focal_loss_alpha=new_alpha, focal_loss_gamma=new_gamma)
    pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")

    pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
    classification_loss = pipeline_config.model.ssd.loss.classification_loss
    classification_loss.weighted_sigmoid_focal.alpha = original_alpha
    classification_loss.weighted_sigmoid_focal.gamma = original_gamma
    _write_config(pipeline_config, pipeline_config_path)

    configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
    configs = config_util.merge_external_params_with_configs(configs, hparams)
    classification_loss = configs["model"].ssd.loss.classification_loss
    self.assertAlmostEqual(new_alpha,
                           classification_loss.weighted_sigmoid_focal.alpha)
    self.assertAlmostEqual(new_gamma,
                           classification_loss.weighted_sigmoid_focal.gamma) 
开发者ID:tensorflow,项目名称:models,代码行数:25,代码来源:config_util_test.py

示例8: create_hparams

# 需要导入模块: from tensorflow.contrib import training [as 别名]
# 或者: from tensorflow.contrib.training import HParams [as 别名]
def create_hparams(hparams_overrides=None):
  """Returns hyperparameters, including any flag value overrides.

  Args:
    hparams_overrides: Optional hparams overrides, represented as a
      string containing comma-separated hparam_name=value pairs.

  Returns:
    The hyperparameters as a tf.HParams object.
  """
  hparams = contrib_training.HParams(
      # Whether a fine tuning checkpoint (provided in the pipeline config)
      # should be loaded for training.
      load_pretrained=True)
  # Override any of the preceding hyperparameter values.
  if hparams_overrides:
    hparams = hparams.parse(hparams_overrides)
  return hparams 
开发者ID:tensorflow,项目名称:models,代码行数:20,代码来源:model_hparams.py

示例9: standard_atari_env_spec

# 需要导入模块: from tensorflow.contrib import training [as 别名]
# 或者: from tensorflow.contrib.training import HParams [as 别名]
def standard_atari_env_spec(env):
  """Parameters of environment specification."""
  standard_wrappers = [[tf_atari_wrappers.StackAndSkipWrapper, {"skip": 4}]]
  env_lambda = None
  if isinstance(env, str):
    env_lambda = lambda: gym.make(env)
  if callable(env):
    env_lambda = env
  assert env is not None, "Unknown specification of environment"

  return tf.contrib.training.HParams(
      env_lambda=env_lambda, wrappers=standard_wrappers, simulated_env=False) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:14,代码来源:gym_problems.py

示例10: standard_atari_ae_env_spec

# 需要导入模块: from tensorflow.contrib import training [as 别名]
# 或者: from tensorflow.contrib.training import HParams [as 别名]
def standard_atari_ae_env_spec(env):
  """Parameters of environment specification."""
  standard_wrappers = [[tf_atari_wrappers.StackAndSkipWrapper, {"skip": 4}],
                       [tf_atari_wrappers.AutoencoderWrapper, {}]]
  env_lambda = None
  if isinstance(env, str):
    env_lambda = lambda: gym.make(env)
  if callable(env):
    env_lambda = env
  assert env is not None, "Unknown specification of environment"

  return tf.contrib.training.HParams(env_lambda=env_lambda,
                                     wrappers=standard_wrappers,
                                     simulated_env=False) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:16,代码来源:gym_problems.py

示例11: _cifar_config

# 需要导入模块: from tensorflow.contrib import training [as 别名]
# 或者: from tensorflow.contrib.training import HParams [as 别名]
def _cifar_config(is_training=True, data_format=None, total_steps=None):
  drop_path_keep_prob = 1.0 if not is_training else 0.6
  return contrib_training.HParams(
      stem_multiplier=3.0,
      drop_path_keep_prob=drop_path_keep_prob,
      num_cells=18,
      use_aux_head=1,
      num_conv_filters=32,
      dense_dropout_keep_prob=1.0,
      filter_scaling_rate=2.0,
      num_reduction_layers=2,
      skip_reduction_layer_input=0,
      data_format=data_format or 'NHWC',
      # 600 epochs with a batch size of 32
      # This is used for the drop path probabilities since it needs to increase
      # the drop out probability over the course of training.
      total_training_steps=total_steps or 937500,
  )


# Notes for training large NASNet model on ImageNet
# -------------------------------------
# batch size (per replica): 16
# learning rate: 0.015 * 100
# learning rate decay factor: 0.97
# num epochs per decay: 2.4
# sync sgd with 100 replicas
# auxiliary head loss weighting: 0.4
# label smoothing: 0.1
# clip global norm of all gradients by 10 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:32,代码来源:nasnet_model.py

示例12: _large_imagenet_config

# 需要导入模块: from tensorflow.contrib import training [as 别名]
# 或者: from tensorflow.contrib.training import HParams [as 别名]
def _large_imagenet_config(is_training=True, data_format=None,
                           total_steps=None):
  drop_path_keep_prob = 1.0 if not is_training else 0.7
  return contrib_training.HParams(
      stem_multiplier=3.0,
      dense_dropout_keep_prob=0.5,
      num_cells=18,
      filter_scaling_rate=2.0,
      num_conv_filters=168,
      drop_path_keep_prob=drop_path_keep_prob,
      use_aux_head=1,
      num_reduction_layers=2,
      skip_reduction_layer_input=1,
      data_format=data_format or 'NHWC',
      total_training_steps=total_steps or 250000,
  )


# Notes for training the mobile NASNet ImageNet model
# -------------------------------------
# batch size (per replica): 32
# learning rate: 0.04 * 50
# learning rate scaling factor: 0.97
# num epochs per decay: 2.4
# sync sgd with 50 replicas
# auxiliary head weighting: 0.4
# label smoothing: 0.1
# clip global norm of all gradients by 10 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:30,代码来源:nasnet_model.py

示例13: _mobile_imagenet_config

# 需要导入模块: from tensorflow.contrib import training [as 别名]
# 或者: from tensorflow.contrib.training import HParams [as 别名]
def _mobile_imagenet_config(data_format=None, total_steps=None):
  return contrib_training.HParams(
      stem_multiplier=1.0,
      dense_dropout_keep_prob=0.5,
      num_cells=12,
      filter_scaling_rate=2.0,
      drop_path_keep_prob=1.0,
      num_conv_filters=44,
      use_aux_head=1,
      num_reduction_layers=2,
      skip_reduction_layer_input=0,
      data_format=data_format or 'NHWC',
      total_training_steps=total_steps or 250000,
  ) 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:16,代码来源:nasnet_model.py

示例14: _get_default_hparams

# 需要导入模块: from tensorflow.contrib import training [as 别名]
# 或者: from tensorflow.contrib.training import HParams [as 别名]
def _get_default_hparams(self):
        default_dict = {'shuffle': True,
                        'num_epochs': None,
                        'buffer_size': 512,
                        'compressed': True,
                        'sequence_length':None,  # read from manifest if None
                        }
        return HParams(**default_dict) 
开发者ID:SudeepDasari,项目名称:visual_foresight,代码行数:10,代码来源:dataset_reader.py

示例15: _default_hparams

# 需要导入模块: from tensorflow.contrib import training [as 别名]
# 或者: from tensorflow.contrib.training import HParams [as 别名]
def _default_hparams(self):
        return HParams() 
开发者ID:SudeepDasari,项目名称:visual_foresight,代码行数:4,代码来源:policy.py


注:本文中的tensorflow.contrib.training.HParams方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。