本文整理汇总了Python中neon.callbacks.callbacks.Callbacks.add_callback方法的典型用法代码示例。如果您正苦于以下问题:Python Callbacks.add_callback方法的具体用法?Python Callbacks.add_callback怎么用?Python Callbacks.add_callback使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类neon.callbacks.callbacks.Callbacks
的用法示例。
在下文中一共展示了Callbacks.add_callback方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: train
# 需要导入模块: from neon.callbacks.callbacks import Callbacks [as 别名]
# 或者: from neon.callbacks.callbacks.Callbacks import add_callback [as 别名]
def train(args, hyper_params, model, opt, data_set):
# setup cost function as CrossEntropy
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
callbacks = Callbacks(model, **args.callback_args)
callbacks.add_callback(EpochEndCallback())
data_set.set_mode('train')
model.fit(data_set, optimizer=opt,
num_epochs=hyper_params.num_epochs, cost=cost, callbacks=callbacks)
return
示例2: range
# 需要导入模块: from neon.callbacks.callbacks import Callbacks [as 别名]
# 或者: from neon.callbacks.callbacks.Callbacks import add_callback [as 别名]
layers.append(Conv((2, 2, nchan), strides=2, **common))
for idx in range(16):
layers.append(Conv((3, 3, nchan), **common))
if nchan > 16:
nchan /= 2
for idx in range(15):
layers.append(Deconv((3, 3, nchan), **common))
layers.append(Deconv((4, 4, nchan), strides=2, **common))
layers.append(Deconv((3, 3, 1), init=init, activation=Logistic(shortcut=True)))
cost = GeneralizedCost(costfunc=SumSquared())
mlp = Model(layers=layers)
callbacks = Callbacks(mlp, train, **args.callback_args)
evaluator = Evaluator(callbacks.callback_data, mlp, test, imwidth, args.epochs,
args.data_dir, point_num)
callbacks.add_callback(evaluator)
mlp.fit(train, optimizer=opt, num_epochs=args.epochs, cost=cost,
callbacks=callbacks)
train.exit_batch_provider()
preds = evaluator.get_outputs()
paths = np.genfromtxt(os.path.join(args.test_data_dir, 'val_file.csv'),
dtype=str)[1:]
basenames = [os.path.basename(path) for path in paths]
filenames = [path.split(',')[0] for path in basenames]
filenames.sort()
content = []
for i, filename in enumerate(filenames):
item = {
"annotations":
[
示例3: StepSchedule
# 需要导入模块: from neon.callbacks.callbacks import Callbacks [as 别名]
# 或者: from neon.callbacks.callbacks.Callbacks import add_callback [as 别名]
weights=[1, 1, 1])
# setup optimizer
schedule_w = StepSchedule(step_config=[5], change=[0.001 / 10])
schedule_b = StepSchedule(step_config=[5], change=[0.002 / 10])
opt_w = GradientDescentMomentum(0.001, 0.9, wdecay=0.0005, schedule=schedule_w)
opt_b = GradientDescentMomentum(0.002, 0.9, wdecay=0.0005, schedule=schedule_b)
opt_skip = GradientDescentMomentum(0.0, 0.0)
optimizer = MultiOptimizer({'default': opt_w, 'Bias': opt_b,
'skip': opt_skip, 'skip_bias': opt_skip})
# if training a new model, seed the image model conv layers with pre-trained weights
# otherwise, just load the model file
if args.model_file is None:
util.load_vgg_all_weights(model, args.data_dir)
callbacks = Callbacks(model, eval_set=train_set, **args.callback_args)
callbacks.add_callback(TrainMulticostCallback())
# model.benchmark(train_set, optimizer=optimizer, cost=cost)
model.fit(train_set, optimizer=optimizer, cost=cost, num_epochs=args.epochs, callbacks=callbacks)
# Scale the bbox regression branch linear layer weights before saving the model
model = util.scale_bbreg_weights(model, [0.0, 0.0, 0.0, 0.0],
[0.1, 0.1, 0.2, 0.2], train_set.num_classes)
if args.save_path is not None:
save_obj(model.serialize(keep_states=True), args.save_path)
示例4: Callbacks
# 需要导入模块: from neon.callbacks.callbacks import Callbacks [as 别名]
# 或者: from neon.callbacks.callbacks.Callbacks import add_callback [as 别名]
if args.model_file:
import os
assert os.path.exists(args.model_file), '%s not found' % args.model_file
mlp.load_weights(args.model_file)
# configure callbacks
callbacks = Callbacks(mlp, train, output_file=args.output_file)
if args.validation_freq:
class TopKMetrics(Callback):
def __init__(self, valid_set, epoch_freq=args.validation_freq):
super(TopKMetrics, self).__init__(epoch_freq=epoch_freq)
self.valid_set = valid_set
def on_epoch_end(self, epoch):
self.valid_set.reset()
allmetrics = TopKMisclassification(k=5)
stats = mlp.eval(self.valid_set, metric=allmetrics)
print ", ".join(allmetrics.metric_names) + ": " + ", ".join(map(str, stats.flatten()))
callbacks.add_callback(TopKMetrics(test))
if args.save_path:
checkpoint_schedule = range(1, args.epochs)
callbacks.add_serialize_callback(checkpoint_schedule, args.save_path, history=2)
mlp.fit(train, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
test.exit_batch_provider()
train.exit_batch_provider()
示例5: Pooling
# 需要导入模块: from neon.callbacks.callbacks import Callbacks [as 别名]
# 或者: from neon.callbacks.callbacks.Callbacks import add_callback [as 别名]
Pooling(3, strides=2)]
# Structure of the deep residual part of the network:
# args.depth modules of 2 convolutional layers each at feature map depths
# of 64, 128, 256, 512
nfms = list(itt.chain.from_iterable(
[itt.repeat(2**(x + 6), r) for x, r in enumerate(stages)]))
strides = [-1] + [1 if cur == prev else 2 for cur,
prev in zip(nfms[1:], nfms[:-1])]
for nfm, stride in zip(nfms, strides):
layers.append(module_factory(nfm, stride))
layers.append(Pooling('all', op='avg'))
layers.append(Conv(**conv_params(1, train.nclass, relu=False)))
layers.append(Activation(Softmax()))
model = Model(layers=layers)
weight_sched = Schedule([30, 60], 0.1)
opt = GradientDescentMomentum(0.1, 0.9, wdecay=0.0001, schedule=weight_sched)
# configure callbacks
valmetric = TopKMisclassification(k=5)
callbacks = Callbacks(model, eval_set=test, metric=valmetric, **args.callback_args)
callbacks.add_callback(BatchNormTuneCallback(tune), insert_pos=0)
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
model.fit(train, optimizer=opt, num_epochs=args.epochs,
cost=cost, callbacks=callbacks)
示例6: MNIST
# 需要导入模块: from neon.callbacks.callbacks import Callbacks [as 别名]
# 或者: from neon.callbacks.callbacks.Callbacks import add_callback [as 别名]
random_seed = args.rng_seed if args.rng_seed else 0
# load up the mnist data set, padding images to size 32
dataset = MNIST(path=args.data_dir, sym_range=True, size=32, shuffle=True)
train = dataset.train_iter
# create a GAN
model, cost = create_model(dis_model=args.dmodel, gen_model=args.gmodel,
cost_type='wasserstein', noise_type='normal',
im_size=32, n_chan=1, n_noise=128,
n_gen_ftr=args.n_gen_ftr, n_dis_ftr=args.n_dis_ftr,
depth=4, n_extra_layers=4,
batch_norm=True, dis_iters=5,
wgan_param_clamp=0.01, wgan_train_sched=True)
# setup optimizer
optimizer = RMSProp(learning_rate=2e-4, decay_rate=0.99, epsilon=1e-8)
# configure callbacks
callbacks = Callbacks(model, **args.callback_args)
fdir = ensure_dirs_exist(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'results/'))
fname = os.path.splitext(os.path.basename(__file__))[0] +\
'_[' + datetime.now().strftime('%Y-%m-%d-%H-%M-%S') + ']'
im_args = dict(filename=os.path.join(fdir, fname), hw=32,
num_samples=args.batch_size, nchan=1, sym_range=True)
callbacks.add_callback(GANPlotCallback(**im_args))
callbacks.add_callback(GANCostCallback())
# model fit
model.fit(train, optimizer=optimizer, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
示例7: Model
# 需要导入模块: from neon.callbacks.callbacks import Callbacks [as 别名]
# 或者: from neon.callbacks.callbacks.Callbacks import add_callback [as 别名]
model = Model(layers=SSD(ssd_config=train_config['ssd_config'], dataset=train_set))
cost = MBoxLoss(num_classes=train_set.num_classes)
if args.model_file is None:
load_vgg_weights(model, cache_dir)
else:
model.load_params(args.model_file)
if args.lr_step is None:
args.lr_step = [40, 80, 120]
base_lr = 0.0001 * be.bsz * args.lr_scale
schedule = Schedule(args.lr_step, 0.1)
opt_w = GradientDescentMomentum(base_lr, momentum_coef=0.9, wdecay=0.0005, schedule=schedule)
opt_b = GradientDescentMomentum(base_lr, momentum_coef=0.9, schedule=schedule)
opt = MultiOptimizer({'default': opt_w, 'Bias': opt_b})
# hijack the eval callback arg here
eval_freq = args.callback_args.pop('eval_freq')
callbacks = Callbacks(model, **args.callback_args)
callbacks.add_callback(MAP_Callback(eval_set=val_set, epoch_freq=eval_freq))
if args.image_sample_dir is not None:
callbacks.add_callback(ssd_image_callback(eval_set=val_set, image_dir=args.image_sample_dir,
epoch_freq=eval_freq, num_images=args.num_images,
classes=val_config['class_names']))
model.fit(train_set, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
示例8: Model
# 需要导入模块: from neon.callbacks.callbacks import Callbacks [as 别名]
# 或者: from neon.callbacks.callbacks.Callbacks import add_callback [as 别名]
#final model
mlp = Model(layers=layers)
logger.info("model construction complete...")
"""
model training and classification accurate rate
"""
#model training and results
callbacks = Callbacks(mlp,train, args, eval_set=valid,metric=Misclassification())
#add lost and metric call backs facilitate more diagnostic
callbacks.add_callback(MetricCallback(mlp,eval_set=train,metric=Misclassification(),epoch_freq=args.evaluation_freq))
callbacks.add_callback(MetricCallback(mlp,eval_set=valid,metric=Misclassification(),epoch_freq=args.evaluation_freq))
#run the model
mlp.fit(train, optimizer=opt_gdm, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
#final classification accuracy
t_mis_rate=mlp.eval(train, metric=Misclassification())*100
v_mis_rate=mlp.eval(valid, metric=Misclassification())*100
#test_mis_rate=mlp.eval(test, metric=Misclassification())*100
print ('Train Misclassification error = %.1f%%' %t_mis_rate)
print ('Valid Miscladdifcaiton error = %.1f%%' %v_mis_rate)
#print ('Test Miscladdifcaiton error = %.1f%%' %test_mis_rate)
示例9: main
# 需要导入模块: from neon.callbacks.callbacks import Callbacks [as 别名]
# 或者: from neon.callbacks.callbacks.Callbacks import add_callback [as 别名]
def main():
# setup the model and run for num_epochs saving the last state only
# this is at the top so that the be is generated
model = gen_model(args.backend)
# setup data iterators
(X_train, y_train), (X_test, y_test), nclass = load_mnist(path=args.data_dir)
NN = batch_size*5 # avoid partial mini batches
if args.backend == 'nervanacpu' or args.backend == 'cpu':
# limit data since cpu backend runs slower
train = ArrayIterator(X_train[:NN], y_train[:NN],
nclass=nclass, lshape=(1, 28, 28))
valid = ArrayIterator(X_test[:NN], y_test[:NN],
nclass=nclass, lshape=(1, 28, 28))
else:
train = ArrayIterator(X_train, y_train, nclass=nclass, lshape=(1, 28, 28))
valid = ArrayIterator(X_test, y_test, nclass=nclass, lshape=(1, 28, 28))
# serialization related
cost = GeneralizedCost(costfunc=CrossEntropyBinary())
opt_gdm = GradientDescentMomentum(learning_rate=0.1, momentum_coef=0.9)
checkpoint_model_path = os.path.join('./', 'test_oneshot.pkl')
checkpoint_schedule = 1 # save at every step
callbacks = Callbacks(model)
callbacks.add_callback(SerializeModelCallback(checkpoint_model_path,
checkpoint_schedule,
history=2))
# run the fit all the way through saving a checkpoint e
model.fit(train,
optimizer=opt_gdm,
num_epochs=num_epochs,
cost=cost,
callbacks=callbacks)
# setup model with same random seed run epoch by epoch
# serializing and deserializing at each step
model = gen_model(args.backend)
cost = GeneralizedCost(costfunc=CrossEntropyBinary())
opt_gdm = GradientDescentMomentum(learning_rate=0.1, momentum_coef=0.9)
# reset data iterators
train.reset()
valid.reset()
checkpoint_model_path = os.path.join('./', 'test_manyshot.pkl')
checkpoint_schedule = 1 # save at evey step
for epoch in range(num_epochs):
# _0 points to state at end of epoch 0
callbacks = Callbacks(model)
callbacks.add_callback(SerializeModelCallback(checkpoint_model_path,
checkpoint_schedule,
history=num_epochs))
model.fit(train,
optimizer=opt_gdm,
num_epochs=epoch+1,
cost=cost,
callbacks=callbacks)
# load saved file
prts = os.path.splitext(checkpoint_model_path)
fn = prts[0] + '_%d' % epoch + prts[1]
model.load_params(fn) # load the saved weights
# compare test_oneshot_<num_epochs>.pkl to test_manyshot_<num_epochs>.pkl
if not compare_model_pickles('test_oneshot_%d.pkl' % (num_epochs-1),
'test_manyshot_%d.pkl' % (num_epochs-1)):
print 'No Match'
sys.exit(1)
else:
print 'Match'
示例10: Conv
# 需要导入模块: from neon.callbacks.callbacks import Callbacks [as 别名]
# 或者: from neon.callbacks.callbacks.Callbacks import add_callback [as 别名]
Conv((1, 1, 16), **conv),
Pooling(8, op="avg"),
Activation(Softmax())]
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
mlp = Model(layers=layers)
if args.model_file:
import os
assert os.path.exists(args.model_file), '%s not found' % args.model_file
mlp.load_weights(args.model_file)
# configure callbacks
callbacks = Callbacks(mlp, train_set, eval_set=valid_set, **args.callback_args)
if args.deconv:
callbacks.add_deconv_callback(train_set, valid_set)
callbacks.add_callback(
MetricCallback(
valid_set,
Misclassification()
)
)
mlp.fit(train_set, optimizer=opt_gdm, num_epochs=num_epochs, cost=cost, callbacks=callbacks)
import logging
logger = logging.getLogger(__name__)
logger.critical('Misclassification error = %.1f%%' % (mlp.eval(valid_set, metric=Misclassification())*100))
示例11: GradientDescentMomentum
# 需要导入模块: from neon.callbacks.callbacks import Callbacks [as 别名]
# 或者: from neon.callbacks.callbacks.Callbacks import add_callback [as 别名]
opt_biases = GradientDescentMomentum(args.rate_init[1], args.momentum[1],
schedule=weight_sched, stochastic_round=args.rounding)
opt_fixed = GradientDescentMomentum(0.0, 1.0, wdecay=0.0)
opt = MultiOptimizer({'default': opt_gdm, 'Bias': opt_biases, 'DOG': opt_fixed})
# configure cost and test metrics
cost = GeneralizedCost(costfunc=(CrossEntropyBinary() \
if train.parser.independent_labels else CrossEntropyMulti()))
metric = EMMetric(oshape=test.parser.oshape, use_softmax=not train.parser.independent_labels) if test else None
# configure callbacks
if not args.neon_progress:
args.callback_args['progress_bar'] = False
callbacks = Callbacks(model, eval_set=test, metric=metric, **args.callback_args)
if not args.neon_progress:
callbacks.add_callback(EMEpochCallback(args.callback_args['eval_freq'],train.nmacrobatches),insert_pos=None)
# xxx - thought of making this an option but not clear that it slows anything down?
#callbacks.add_hist_callback() # xxx - not clear what information this conveys
if args.save_best_path:
callbacks.add_save_best_state_callback(args.save_best_path)
model.fit(train, optimizer=opt, num_epochs=num_epochs, cost=cost, callbacks=callbacks)
print('Model training complete for %d epochs!' % (args.epochs,))
#test.stop(); train.stop()
elif args.write_output:
# write_output mode, must have model loaded
if args.data_config:
test = EMDataIterator(args.data_config, write_output=args.write_output,
chunk_skip_list=args.chunk_skip_list, dim_ordering=args.dim_ordering,