本文整理汇总了Python中tensorpack.QueueInput方法的典型用法代码示例。如果您正苦于以下问题:Python tensorpack.QueueInput方法的具体用法?Python tensorpack.QueueInput怎么用?Python tensorpack.QueueInput使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorpack
的用法示例。
在下文中一共展示了tensorpack.QueueInput方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_config
# 需要导入模块: import tensorpack [as 别名]
# 或者: from tensorpack import QueueInput [as 别名]
def get_config(model, fake=False, data_aug=True):
nr_tower = max(get_nr_gpu(), 1)
batch = TOTAL_BATCH_SIZE // nr_tower
if fake:
logger.info("For benchmark, batch size is fixed to 64 per tower.")
dataset_train = FakeData(
[[64, 224, 224, 3], [64]], 1000, random=False, dtype='uint8')
callbacks = []
else:
logger.info("Running on {} towers. Batch size per tower: {}".format(nr_tower, batch))
dataset_train = get_data('train', batch, data_aug)
dataset_val = get_data('val', batch, data_aug)
callbacks = [
ModelSaver(),
]
if data_aug:
callbacks.append(ScheduledHyperParamSetter('learning_rate',
[(30, 1e-2), (60, 1e-3), (85, 1e-4), (95, 1e-5), (105, 1e-6)]))
callbacks.append(HumanHyperParamSetter('learning_rate'))
infs = [ClassificationError('wrong-top1', 'val-error-top1'),
ClassificationError('wrong-top5', 'val-error-top5')]
if nr_tower == 1:
# single-GPU inference with queue prefetch
callbacks.append(InferenceRunner(QueueInput(dataset_val), infs))
else:
# multi-GPU inference (with mandatory queue prefetch)
callbacks.append(DataParallelInferenceRunner(
dataset_val, infs, list(range(nr_tower))))
return AutoResumeTrainConfig(
model=model,
dataflow=dataset_train,
callbacks=callbacks,
steps_per_epoch=5000 if TOTAL_BATCH_SIZE == 256 else 10000,
max_epoch=110 if data_aug else 64,
nr_tower=nr_tower
)
示例2: get_config
# 需要导入模块: import tensorpack [as 别名]
# 或者: from tensorpack import QueueInput [as 别名]
def get_config(model, option):
dataset_train = get_data('train', option)
dataset_val = get_data('val', option)
callbacks = get_callbacks(dataset_val, option)
steps_per_epoch = get_steps_per_epoch(option)
return TrainConfig(
model=model,
data=StagingInput(QueueInput(dataset_train), nr_stage=1),
callbacks=callbacks,
steps_per_epoch=steps_per_epoch,
max_epoch=option.epoch,
)
示例3: get_config
# 需要导入模块: import tensorpack [as 别名]
# 或者: from tensorpack import QueueInput [as 别名]
def get_config(model, fake=False):
nr_tower = max(get_nr_gpu(), 1)
assert args.batch % nr_tower == 0
batch = args.batch // nr_tower
if fake:
logger.info("For benchmark, batch size is fixed to 64 per tower.")
dataset_train = FakeData(
[[64, 224, 224, 3], [64]], 1000, random=False, dtype='uint8')
callbacks = []
else:
logger.info("Running on {} towers. Batch size per tower: {}".format(nr_tower, batch))
dataset_train = get_data('train', batch)
dataset_val = get_data('val', batch)
BASE_LR = 0.1 * (args.batch / 256.0)
callbacks = [
ModelSaver(),
ScheduledHyperParamSetter(
'learning_rate', [(0, BASE_LR), (30, BASE_LR * 1e-1), (60, BASE_LR * 1e-2),
(90, BASE_LR * 1e-3)]),
]
if BASE_LR > 0.1:
callbacks.append(
ScheduledHyperParamSetter(
'learning_rate', [(0, 0.1), (3, BASE_LR)], interp='linear'))
infs = [ClassificationError('wrong-top1', 'val-error-top1'),
ClassificationError('wrong-top5', 'val-error-top5')]
if nr_tower == 1:
# single-GPU inference with queue prefetch
callbacks.append(InferenceRunner(QueueInput(dataset_val), infs))
else:
# multi-GPU inference (with mandatory queue prefetch)
callbacks.append(DataParallelInferenceRunner(
dataset_val, infs, list(range(nr_tower))))
return TrainConfig(
model=model,
dataflow=dataset_train,
callbacks=callbacks,
steps_per_epoch=100 if args.fake else 1280000 // args.batch,
max_epoch=110,
)
示例4: get_config
# 需要导入模块: import tensorpack [as 别名]
# 或者: from tensorpack import QueueInput [as 别名]
def get_config(model, fake=False):
nr_tower = max(get_num_gpu(), 1)
assert args.batch % nr_tower == 0
batch = args.batch // nr_tower
if fake:
logger.info("For benchmark, batch size is fixed to 64 per tower.")
dataset_train = FakeData(
[[64, 224, 224, 3], [64]], 1000, random=False, dtype='uint8')
callbacks = []
steps_per_epoch = 100
else:
logger.info("Running on {} towers. Batch size per tower: {}".format(nr_tower, batch))
dataset_train = get_imagenet_dataflow(args.data, 'train', batch)
dataset_val = get_imagenet_dataflow(args.data, 'val', min(64, batch))
steps_per_epoch = 1281167 // args.batch
BASE_LR = 0.1 * args.batch / 256.0
logger.info("BASELR: {}".format(BASE_LR))
callbacks = [
ModelSaver(),
EstimatedTimeLeft(),
GPUUtilizationTracker(),
ScheduledHyperParamSetter(
'learning_rate', [(0, BASE_LR), (30, BASE_LR * 1e-1), (60, BASE_LR * 1e-2),
(90, BASE_LR * 1e-3)]),
]
if BASE_LR > 0.1:
callbacks.append(
ScheduledHyperParamSetter(
'learning_rate', [(0, 0.1), (5 * steps_per_epoch, BASE_LR)],
interp='linear', step_based=True))
infs = [ClassificationError('wrong-top1', 'val-error-top1'),
ClassificationError('wrong-top5', 'val-error-top5')]
if nr_tower == 1:
# single-GPU inference with queue prefetch
callbacks.append(InferenceRunner(QueueInput(dataset_val), infs))
else:
# multi-GPU inference (with mandatory queue prefetch)
callbacks.append(DataParallelInferenceRunner(
dataset_val, infs, list(range(nr_tower))))
return TrainConfig(
model=model,
dataflow=dataset_train,
callbacks=callbacks,
steps_per_epoch=steps_per_epoch,
max_epoch=100,
)
示例5: get_config
# 需要导入模块: import tensorpack [as 别名]
# 或者: from tensorpack import QueueInput [as 别名]
def get_config(model, fake=False):
nr_tower = max(get_num_gpu(), 1)
assert args.batch % nr_tower == 0
batch = args.batch // nr_tower
logger.info("Running on {} towers. Batch size per tower: {}".format(nr_tower, batch))
if batch < 32 or batch > 64:
logger.warn("Batch size per tower not in [32, 64]. This probably will lead to worse accuracy than reported.")
if fake:
data = QueueInput(FakeData(
[[batch, 224, 224, 3], [batch],[batch, 224, 224, 3], [batch]], 1000, random=False, dtype='uint8'))
callbacks = []
else:
data = QueueInput(get_data('train', batch))
START_LR = 0.1
BASE_LR = START_LR * (args.batch / 256.0)
callbacks = [
ModelSaver(),
EstimatedTimeLeft(),
ScheduledHyperParamSetter(
'learning_rate', [
(0, min(START_LR, BASE_LR)), (30, BASE_LR * 1e-1), (45, BASE_LR * 1e-2),
(55, BASE_LR * 1e-3)]),
]
if BASE_LR > START_LR:
callbacks.append(
ScheduledHyperParamSetter(
'learning_rate', [(0, START_LR), (5, BASE_LR)], interp='linear'))
infs = [ClassificationError('wrong-top1', 'val-error-top1'),
ClassificationError('wrong-top5', 'val-error-top5')]
dataset_val = get_data('val', batch)
if nr_tower == 1:
# single-GPU inference with queue prefetch
callbacks.append(InferenceRunner(QueueInput(dataset_val), infs))
else:
# multi-GPU inference (with mandatory queue prefetch)
callbacks.append(DataParallelInferenceRunner(
dataset_val, infs, list(range(nr_tower))))
return AutoResumeTrainConfig(
model=model,
data=data,
callbacks=callbacks,
steps_per_epoch=100 if args.fake else 1280000 // args.batch,
max_epoch=60,
)
示例6: get_config
# 需要导入模块: import tensorpack [as 别名]
# 或者: from tensorpack import QueueInput [as 别名]
def get_config(model):
nr_tower = max(get_num_gpu(), 1)
assert args.batch % nr_tower == 0
batch = args.batch // nr_tower
logger.info("Running on {} towers. Batch size per tower: {}".format(nr_tower, batch))
if batch < 32 or batch > 64:
logger.warn("Batch size per tower not in [32, 64]. This probably will lead to worse accuracy than reported.")
if args.fake:
data = QueueInput(FakeData(
[[batch, 224, 224, 3], [batch]], 1000, random=False, dtype='uint8'))
callbacks = []
else:
if args.symbolic:
data = TFDatasetInput(get_imagenet_tfdata(args.data, 'train', batch))
else:
data = QueueInput(get_imagenet_dataflow(args.data, 'train', batch))
START_LR = 0.1
BASE_LR = START_LR * (args.batch / 256.0)
callbacks = [
ModelSaver(),
EstimatedTimeLeft(),
ScheduledHyperParamSetter(
'learning_rate', [
(0, min(START_LR, BASE_LR)), (30, BASE_LR * 1e-1), (60, BASE_LR * 1e-2),
(90, BASE_LR * 1e-3), (100, BASE_LR * 1e-4)]),
]
if BASE_LR > START_LR:
callbacks.append(
ScheduledHyperParamSetter(
'learning_rate', [(0, START_LR), (5, BASE_LR)], interp='linear'))
infs = [ClassificationError('wrong-top1', 'val-error-top1'),
ClassificationError('wrong-top5', 'val-error-top5')]
dataset_val = get_imagenet_dataflow(args.data, 'val', batch)
if nr_tower == 1:
# single-GPU inference with queue prefetch
callbacks.append(InferenceRunner(QueueInput(dataset_val), infs))
else:
# multi-GPU inference (with mandatory queue prefetch)
callbacks.append(DataParallelInferenceRunner(
dataset_val, infs, list(range(nr_tower))))
if get_num_gpu() > 0:
callbacks.append(GPUUtilizationTracker())
return TrainConfig(
model=model,
data=data,
callbacks=callbacks,
steps_per_epoch=100 if args.fake else 1281167 // args.batch,
max_epoch=105,
)