当前位置: 首页>>代码示例>>Python>>正文


Python utils.logged_timer方法代码示例

本文整理汇总了Python中utils.logged_timer方法的典型用法代码示例。如果您正苦于以下问题:Python utils.logged_timer方法的具体用法?Python utils.logged_timer怎么用?Python utils.logged_timer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在utils的用法示例。


在下文中一共展示了utils.logged_timer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: validate

# 需要导入模块: import utils [as 别名]
# 或者: from utils import logged_timer [as 别名]
def validate(
        working_dir: 'tf.estimator working directory',
        *tf_record_dirs: 'Directories where holdout data are',
        checkpoint_name: 'Which checkpoint to evaluate (None=latest)'=None,
        validate_name: 'Name for validation set (i.e., selfplay or human)'=None):
    qmeas.start_time('validate')
    tf_records = []
    with timer("Building lists of holdout files"):
        for record_dir in tf_record_dirs:
            tf_records.extend(gfile.Glob(os.path.join(record_dir, '*.zz')))

    first_record = os.path.basename(tf_records[0])
    last_record = os.path.basename(tf_records[-1])
    with timer("Validating from {} to {}".format(first_record, last_record)):
        dual_net.validate(
            working_dir, tf_records, checkpoint_name=checkpoint_name,
            name=validate_name)
    qmeas.stop_time('validate') 
开发者ID:mlperf,项目名称:training_results_v0.5,代码行数:20,代码来源:main.py

示例2: evaluate_both

# 需要导入模块: import utils [as 别名]
# 或者: from utils import logged_timer [as 别名]
def evaluate_both(
        prev_model: 'The path to previous model',
        cur_model: 'The path to current model',
        output_dir: 'Where to write the evaluation results'='sgf/evaluate',
        readouts: 'How many readouts to make per move.'=200,
        games: 'the number of games to play'=20,
        verbose: 'How verbose the players should be (see selfplay)' = 1):
    qmeas.start_time('evaluate')
    _ensure_dir_exists(output_dir)

    winners = []
    with timer("%d games" % games):
        winners = evaluation.play_match_many_instance_both(
            prev_model, cur_model, games, readouts, output_dir, verbose)
    qmeas.stop_time('evaluate')
    white_count = 0
    for win in winners:
      if 'W' in win or 'w' in win:
        white_count += 1
    return white_count * 1.0 / (games*2)

    # qmeas.report_profiler() 
开发者ID:mlperf,项目名称:training_results_v0.5,代码行数:24,代码来源:main.py

示例3: train

# 需要导入模块: import utils [as 别名]
# 或者: from utils import logged_timer [as 别名]
def train(trained_models_dir, estimator_model_dir, training_chunk_dir, params):
  """Train the latest model from gathered data.

  Args:
    trained_models_dir: Where to export the completed generation.
    estimator_model_dir: tf.estimator model directory.
    training_chunk_dir: Directory where gathered training chunks are.
    params: An object of hyperparameters for the model.
  """
  model_num, model_name = utils.get_latest_model(trained_models_dir)
  print('Initializing from model {}'.format(model_name))

  new_model_name = utils.generate_model_name(model_num + 1)
  print('New model will be {}'.format(new_model_name))
  save_file = os.path.join(trained_models_dir, new_model_name)

  tf_records = sorted(
      tf.gfile.Glob(os.path.join(training_chunk_dir, '*'+_TF_RECORD_SUFFIX)))
  tf_records = tf_records[
      -(params.train_window_size // params.examples_per_chunk):]

  print('Training from: {} to {}'.format(tf_records[0], tf_records[-1]))
  with utils.logged_timer('Training'):
    dualnet.train(estimator_model_dir, tf_records, model_num + 1, params)
    dualnet.export_model(estimator_model_dir, save_file) 
开发者ID:itsamitgoel,项目名称:Gun-Detector,代码行数:27,代码来源:minigo.py

示例4: train

# 需要导入模块: import utils [as 别名]
# 或者: from utils import logged_timer [as 别名]
def train():
    model_version, model_name = get_latest_model()
    logger.info("Training on gathered game data, initializing from {}".format(model_name))
    new_model_name = generate(model_version + 1)
    logger.info("New model will be {}".format(new_model_name))
    save_file = os.path.join(PATHS.MODELS_DIR, new_model_name)

    try:
        logger.info("Getting tf_records")
        tf_records = sorted(gfile.Glob(os.path.join(PATHS.TRAINING_CHUNK_DIR, '*.tfrecord.zz')))
        tf_records = tf_records[
                     -1 * (GLOBAL_PARAMETER_STORE.WINDOW_SIZE // GLOBAL_PARAMETER_STORE.EXAMPLES_PER_RECORD):]

        print("Training from:", tf_records[0], "to", tf_records[-1])

        with timer("Training"):
            network.train(PATHS.ESTIMATOR_WORKING_DIR, tf_records, model_version+1)
            network.export_latest_checkpoint_model(PATHS.ESTIMATOR_WORKING_DIR, save_file)

    except:
        logger.info("Got an error training")
        logging.exception("Train error") 
开发者ID:PacktPublishing,项目名称:Python-Reinforcement-Learning-Projects,代码行数:24,代码来源:controller.py

示例5: validate

# 需要导入模块: import utils [as 别名]
# 或者: from utils import logged_timer [as 别名]
def validate(*tf_records):
    """Validate a model's performance on a set of holdout data."""
    if FLAGS.use_tpu:
        def _input_fn(params):
            return preprocessing.get_tpu_input_tensors(
                params['train_batch_size'], params['input_layout'], tf_records,
                filter_amount=1.0)
    else:
        def _input_fn():
            return preprocessing.get_input_tensors(
                FLAGS.train_batch_size, FLAGS.input_layout, tf_records,
                filter_amount=1.0, shuffle_examples=False)

    steps = FLAGS.examples_to_validate // FLAGS.train_batch_size
    if FLAGS.use_tpu:
        steps //= FLAGS.num_tpu_cores

    estimator = dual_net.get_estimator()
    with utils.logged_timer("Validating"):
        estimator.evaluate(_input_fn, steps=steps, name=FLAGS.validate_name) 
开发者ID:mlperf,项目名称:training,代码行数:22,代码来源:validate.py

示例6: main

# 需要导入模块: import utils [as 别名]
# 或者: from utils import logged_timer [as 别名]
def main(argv):
    """Validate a model's performance on a set of holdout data."""
    _, *validation_paths = argv
    if FLAGS.expand_validation_dirs:
        tf_records = []
        with utils.logged_timer("Building lists of holdout files"):
            dirs = validation_paths
            while dirs:
                d = dirs.pop()
                for path, newdirs, files in os.walk(d):
                    tf_records.extend(os.path.join(path, f) for f in files if f.endswith('.zz'))
                    dirs.extend(os.path.join(path, d) for d in newdirs)

    else:
        tf_records = validation_paths

    if not tf_records:
        print("Validation paths:", validation_paths)
        print(["{}:\n\t{}".format(p, os.listdir(p)) for p in validation_paths])
        raise RuntimeError("Did not find any holdout files for validating!")
    validate(*tf_records) 
开发者ID:mlperf,项目名称:training,代码行数:23,代码来源:validate.py

示例7: train

# 需要导入模块: import utils [as 别名]
# 或者: from utils import logged_timer [as 别名]
def train(trained_models_dir, estimator_model_dir, training_chunk_dir,
          generation, params):
  """Train the latest model from gathered data.

  Args:
    trained_models_dir: Where to export the completed generation.
    estimator_model_dir: tf.estimator model directory.
    training_chunk_dir: Directory where gathered training chunks are.
    generation: Which generation you are training.
    params: A MiniGoParams instance of hyperparameters for the model.
  """
  new_model_name = utils.generate_model_name(generation)
  print('New model will be {}'.format(new_model_name))
  new_model = os.path.join(trained_models_dir, new_model_name)

  print('Training on gathered game data...')
  tf_records = sorted(
      tf.gfile.Glob(os.path.join(training_chunk_dir, '*'+_TF_RECORD_SUFFIX)))
  tf_records = tf_records[
      -(params.train_window_size // params.examples_per_chunk):]

  print('Training from: {} to {}'.format(tf_records[0], tf_records[-1]))
  with utils.logged_timer('Training'):
    dualnet.train(estimator_model_dir, tf_records, generation, params)
    dualnet.export_model(estimator_model_dir, new_model) 
开发者ID:generalized-iou,项目名称:g-tensorflow-models,代码行数:27,代码来源:minigo.py

示例8: load_player

# 需要导入模块: import utils [as 别名]
# 或者: from utils import logged_timer [as 别名]
def load_player(model_path):
  print("Loading weights from %s ... " % model_path)
  with timer("Loading weights from %s ... " % model_path):
      network = dual_net.DualNetwork(model_path)
      network.name = os.path.basename(model_path)
  player = MCTSPlayer(network, verbosity=2)
  return player 
开发者ID:mlperf,项目名称:training_results_v0.5,代码行数:9,代码来源:training_curve.py

示例9: train

# 需要导入模块: import utils [as 别名]
# 或者: from utils import logged_timer [as 别名]
def train(
        working_dir: 'tf.estimator working directory.',
        chunk_dir: 'Directory where gathered training chunks are.',
        model_save_path: 'Where to export the completed generation.',
        generation_num: 'Which generation you are training.'=0):
    qmeas.start_time('train')
    tf_records = sorted(gfile.Glob(os.path.join(chunk_dir, '*.tfrecord.zz')))
    tf_records = tf_records[-1 * (WINDOW_SIZE // EXAMPLES_PER_RECORD):]

    print("Training from:", tf_records[0], "to", tf_records[-1])

    with timer("Training"):
        dual_net.train(working_dir, tf_records, generation_num)
        dual_net.export_model(working_dir, model_save_path)
    qmeas.stop_time('train') 
开发者ID:mlperf,项目名称:training_results_v0.5,代码行数:17,代码来源:main.py

示例10: evaluate

# 需要导入模块: import utils [as 别名]
# 或者: from utils import logged_timer [as 别名]
def evaluate(
        black_model: 'The path to the model to play black',
        white_model: 'The path to the model to play white',
        output_dir: 'Where to write the evaluation results'='sgf/evaluate',
        readouts: 'How many readouts to make per move.'=200,
        games: 'the number of games to play'=20,
        verbose: 'How verbose the players should be (see selfplay)' = 1):
    qmeas.start_time('evaluate')
    _ensure_dir_exists(output_dir)

    with timer("Loading weights"):
        black_net = dual_net.DualNetwork(black_model)
        white_net = dual_net.DualNetwork(white_model)

    winners = []
    with timer("%d games" % games):
        winners = evaluation.play_match(
            black_net, white_net, games, readouts, output_dir, verbose)
    qmeas.stop_time('evaluate')
    white_count = 0
    for win in winners:
      if 'W' in win or 'w' in win:
        white_count += 1
    return white_count * 1.0 / games

    # qmeas.report_profiler() 
开发者ID:mlperf,项目名称:training_results_v0.5,代码行数:28,代码来源:main.py

示例11: selfplay_cache_model

# 需要导入模块: import utils [as 别名]
# 或者: from utils import logged_timer [as 别名]
def selfplay_cache_model(
        network: "The path to the network model files",
        output_dir: "Where to write the games"="data/selfplay",
        holdout_dir: "Where to write the games"="data/holdout",
        output_sgf: "Where to write the sgfs"="sgf/",
        readouts: 'How many simulations to run per move'=100,
        verbose: '>=2 will print debug info, >=3 will print boards' = 1,
        resign_threshold: 'absolute value of threshold to resign at' = 0.95,
        holdout_pct: 'how many games to hold out for validation' = 0.05):
    qmeas.start_time('selfplay')
    clean_sgf = os.path.join(output_sgf, 'clean')
    full_sgf = os.path.join(output_sgf, 'full')
    _ensure_dir_exists(clean_sgf)
    _ensure_dir_exists(full_sgf)
    _ensure_dir_exists(output_dir)
    _ensure_dir_exists(holdout_dir)

    with timer("Playing game"):
        player = selfplay_mcts.play(
            network, readouts, resign_threshold, verbose)

    output_name = '{}-{}'.format(int(time.time() * 1000 * 1000), socket.gethostname())
    game_data = player.extract_data()
    with gfile.GFile(os.path.join(clean_sgf, '{}.sgf'.format(output_name)), 'w') as f:
        f.write(player.to_sgf(use_comments=False))
    with gfile.GFile(os.path.join(full_sgf, '{}.sgf'.format(output_name)), 'w') as f:
        f.write(player.to_sgf())

    tf_examples = preprocessing.make_dataset_from_selfplay(game_data)

    # Hold out 5% of games for evaluation.
    if random.random() < holdout_pct:
        fname = os.path.join(holdout_dir, "{}.tfrecord.zz".format(output_name))
    else:
        fname = os.path.join(output_dir, "{}.tfrecord.zz".format(output_name))

    preprocessing.write_tf_examples(fname, tf_examples)
    qmeas.stop_time('selfplay') 
开发者ID:mlperf,项目名称:training_results_v0.5,代码行数:40,代码来源:main.py

示例12: selfplay

# 需要导入模块: import utils [as 别名]
# 或者: from utils import logged_timer [as 别名]
def selfplay(
        load_file: "The path to the network model files",
        output_dir: "Where to write the games"="data/selfplay",
        holdout_dir: "Where to write the games"="data/holdout",
        output_sgf: "Where to write the sgfs"="sgf/",
        readouts: 'How many simulations to run per move'=100,
        verbose: '>=2 will print debug info, >=3 will print boards' = 1,
        resign_threshold: 'absolute value of threshold to resign at' = 0.95,
        holdout_pct: 'how many games to hold out for validation' = 0.05):
    qmeas.start_time('selfplay')
    clean_sgf = os.path.join(output_sgf, 'clean')
    full_sgf = os.path.join(output_sgf, 'full')
    _ensure_dir_exists(clean_sgf)
    _ensure_dir_exists(full_sgf)
    _ensure_dir_exists(output_dir)
    _ensure_dir_exists(holdout_dir)

    with timer("Loading weights from %s ... " % load_file):
        network = dual_net.DualNetwork(load_file)

    with timer("Playing game"):
        player = selfplay_mcts.play(
            network, readouts, resign_threshold, verbose)

    output_name = '{}-{}'.format(int(time.time() * 1000 * 1000), socket.gethostname())
    game_data = player.extract_data()
    with gfile.GFile(os.path.join(clean_sgf, '{}.sgf'.format(output_name)), 'w') as f:
        f.write(player.to_sgf(use_comments=False))
    with gfile.GFile(os.path.join(full_sgf, '{}.sgf'.format(output_name)), 'w') as f:
        f.write(player.to_sgf())

    tf_examples = preprocessing.make_dataset_from_selfplay(game_data)

    # Hold out 5% of games for evaluation.
    if random.random() < holdout_pct:
        fname = os.path.join(holdout_dir, "{}.tfrecord.zz".format(output_name))
    else:
        fname = os.path.join(output_dir, "{}.tfrecord.zz".format(output_name))

    preprocessing.write_tf_examples(fname, tf_examples)
    qmeas.stop_time('selfplay') 
开发者ID:mlperf,项目名称:training_results_v0.5,代码行数:43,代码来源:main.py

示例13: validate

# 需要导入模块: import utils [as 别名]
# 或者: from utils import logged_timer [as 别名]
def validate(trained_models_dir, holdout_dir, estimator_model_dir, params):
  """Validate the latest model on the holdout dataset.

  Args:
    trained_models_dir: Directories where the completed generations/models are.
    holdout_dir: Directories where holdout data are.
    estimator_model_dir: tf.estimator model directory.
    params: An object of hyperparameters for the model.
  """
  model_num, _ = utils.get_latest_model(trained_models_dir)

  # Get the holdout game data
  nums_names = utils.get_models(trained_models_dir)

  # Model N was trained on games up through model N-1, so the validation set
  # should only be for models through N-1 as well, thus the (model_num) term.
  models = [num_name for num_name in nums_names if num_name[0] < model_num]

  # pair is a tuple of (model_num, model_name), like (13, 000013-modelname)
  holdout_dirs = [os.path.join(holdout_dir, pair[1])
                  for pair in models[-params.holdout_generation:]]
  tf_records = []
  with utils.logged_timer('Building lists of holdout files'):
    for record_dir in holdout_dirs:
      if os.path.exists(record_dir):  # make sure holdout dir exists
        tf_records.extend(
            tf.gfile.Glob(os.path.join(record_dir, '*'+_TF_RECORD_SUFFIX)))

  print('The length of tf_records is {}.'.format(len(tf_records)))
  first_tf_record = os.path.basename(tf_records[0])
  last_tf_record = os.path.basename(tf_records[-1])
  with utils.logged_timer('Validating from {} to {}'.format(
      first_tf_record, last_tf_record)):
    dualnet.validate(estimator_model_dir, tf_records, params) 
开发者ID:itsamitgoel,项目名称:Gun-Detector,代码行数:36,代码来源:minigo.py

示例14: validate

# 需要导入模块: import utils [as 别名]
# 或者: from utils import logged_timer [as 别名]
def validate(model_version=None, validate_name=None):
    if model_version is None:
        model_version, model_name = get_latest_model()
    else:
        model_version = int(model_version)
        model_name = get_model(model_version)

    models = list(
        filter(lambda num_name: num_name[0] < (model_version - 1), get_models()))

    if len(models) == 0:
        logger.info('Not enough models, including model N for validation')
        models = list(
            filter(lambda num_name: num_name[0] <= model_version, get_models()))
    else:
        logger.info('Validating using data from following models: {}'.format(models))

    tf_record_dirs = [os.path.join(PATHS.HOLDOUT_DIR, pair[1])
                    for pair in models[-5:]]

    working_dir = PATHS.ESTIMATOR_WORKING_DIR
    checkpoint_name = os.path.join(PATHS.MODELS_DIR, model_name)

    tf_records = []
    with timer("Building lists of holdout files"):
        for record_dir in tf_record_dirs:
            tf_records.extend(gfile.Glob(os.path.join(record_dir, '*.zz')))

    with timer("Validating from {} to {}".format(os.path.basename(tf_records[0]), os.path.basename(tf_records[-1]))):
        network.validate(working_dir, tf_records, checkpoint_path=checkpoint_name, name=validate_name) 
开发者ID:PacktPublishing,项目名称:Python-Reinforcement-Learning-Projects,代码行数:32,代码来源:controller.py

示例15: evaluate

# 需要导入模块: import utils [as 别名]
# 或者: from utils import logged_timer [as 别名]
def evaluate(black_model, white_model):
    os.makedirs(PATHS.SGF_DIR, exist_ok=True)

    with timer("Loading weights"):
        black_net = network.PolicyValueNetwork(black_model)
        white_net = network.PolicyValueNetwork(white_model)

    with timer("Playing {} games".format(GLOBAL_PARAMETER_STORE.EVALUATION_GAMES)):
        play_match(black_net, white_net, GLOBAL_PARAMETER_STORE.EVALUATION_GAMES,
                   GLOBAL_PARAMETER_STORE.EVALUATION_READOUTS, PATHS.SGF_DIR) 
开发者ID:PacktPublishing,项目名称:Python-Reinforcement-Learning-Projects,代码行数:12,代码来源:controller.py


注:本文中的utils.logged_timer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。