本文整理汇总了Python中neon.callbacks.callbacks.Callbacks类的典型用法代码示例。如果您正苦于以下问题:Python Callbacks类的具体用法?Python Callbacks怎么用?Python Callbacks使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Callbacks类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: train
def train(self, dataset, model=None):
"""Trains the passed model on the given dataset. If no model is passed, `generate_default_model` is used."""
print "[%s] Starting training..." % self.model_name
start = time.time()
# The training will be run on the CPU. If a GPU is available it should be used instead.
backend = gen_backend(backend='cpu',
batch_size=self.batch_size,
rng_seed=self.random_seed,
stochastic_round=False)
cost = GeneralizedCost(
name='cost',
costfunc=CrossEntropyMulti())
optimizer = GradientDescentMomentum(
learning_rate=self.lrate,
momentum_coef=0.9)
# set up the model and experiment
if not model:
model = self.generate_default_model(dataset.num_labels)
args = NeonCallbackParameters()
args.output_file = os.path.join(self.root_path, self.Callback_Store_Filename)
args.evaluation_freq = 1
args.progress_bar = False
args.epochs = self.max_epochs
args.save_path = os.path.join(self.root_path, self.Intermediate_Model_Filename)
args.serialize = 1
args.history = 100
args.model_file = None
callbacks = Callbacks(model, dataset.train(), args, eval_set=dataset.test())
# add a callback that saves the best model state
callbacks.add_save_best_state_callback(self.model_path)
# Uncomment line below to run on GPU using cudanet backend
# backend = gen_backend(rng_seed=0, gpu='cudanet')
model.fit(
dataset.train(),
optimizer=optimizer,
num_epochs=self.max_epochs,
cost=cost,
callbacks=callbacks)
print("[%s] Misclassification error = %.1f%%"
% (self.model_name, model.eval(dataset.test(), metric=Misclassification()) * 100))
print "[%s] Finished training!" % self.model_name
end = time.time()
print "[%s] Duration in seconds", end - start
return model
示例2: train
def train(args, hyper_params, model, opt, data_set):
# setup cost function as CrossEntropy
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
callbacks = Callbacks(model, **args.callback_args)
callbacks.add_callback(EpochEndCallback())
data_set.set_mode('train')
model.fit(data_set, optimizer=opt,
num_epochs=hyper_params.num_epochs, cost=cost, callbacks=callbacks)
return
示例3: deserialize
def deserialize(fn, datasets=None, inference=False):
"""
Helper function to load all objects from a serialized file,
this includes callbacks and datasets as well as the model, layers,
etc.
Arguments:
datasets (DataSet, optional): If the dataset is not serialized
in the file it can be passed in
as an argument. This will also
override any dataset in the serialized
file
inference (bool, optional): if true only the weights will be loaded, not
the states
Returns:
Model: the model object
Dataset: the data set object
Callback: the callbacks
"""
config_dict = load_obj(fn)
if datasets is not None:
logger.warn('Ignoring datasets serialized in archive file %s' % fn)
elif 'datasets' in config_dict:
ds_cls = load_class(config_dict['datasets']['type'])
dataset = ds_cls.gen_class(config_dict['datasets']['config'])
datasets = dataset.gen_iterators()
if 'train' in datasets:
data_iter = datasets['train']
else:
key = datasets.keys()[0]
data_iter = datasets[key]
logger.warn('Could not find training set iterator'
'using %s instead' % key)
model = Model(config_dict, data_iter)
callbacks = None
if 'callbacks' in config_dict:
# run through the callbacks looking for dataset objects
# replace them with the corresponding data set above
cbs = config_dict['callbacks']['callbacks']
for cb in cbs:
if 'config' not in cb:
cb['config'] = {}
for arg in cb['config']:
if type(cb['config'][arg]) is dict and 'type' in cb['config'][arg]:
if cb['config'][arg]['type'] == 'Data':
key = cb['config'][arg]['name']
if key in datasets:
cb['config'][arg] = datasets[key]
else:
cb['config'][arg] = None
# now we can generate the callbacks
callbacks = Callbacks.load_callbacks(config_dict['callbacks'], model)
return (model, dataset, callbacks)
示例4: main
def main():
# setup the model and run for num_epochs saving the last state only
# this is at the top so that the be is generated
mlp = gen_model(args.backend)
# setup data iterators
(X_train, y_train), (X_test, y_test), nclass = load_mnist(path=args.data_dir)
if args.backend == 'nervanacpu' or args.backend == 'cpu':
# limit data since cpu backend runs slower
train = DataIterator(X_train[:1000], y_train[:1000], nclass=nclass, lshape=(1, 28, 28))
valid = DataIterator(X_test[:1000], y_test[:1000], nclass=nclass, lshape=(1, 28, 28))
else:
train = DataIterator(X_train, y_train, nclass=nclass, lshape=(1, 28, 28))
valid = DataIterator(X_test, y_test, nclass=nclass, lshape=(1, 28, 28))
# serialization related
cost = GeneralizedCost(costfunc=CrossEntropyBinary())
opt_gdm = GradientDescentMomentum(learning_rate=0.1, momentum_coef=0.9)
checkpoint_model_path = os.path.join('./', 'test_oneshot.pkl')
checkpoint_schedule = 1 # save at every step
callbacks = Callbacks(mlp, train)
callbacks.add_serialize_callback(checkpoint_schedule, checkpoint_model_path, history=2)
# run the fit all the way through saving a checkpoint e
mlp.fit(train, optimizer=opt_gdm, num_epochs=num_epochs, cost=cost, callbacks=callbacks)
# setup model with same random seed run epoch by epoch
# serializing and deserializing at each step
mlp = gen_model(args.backend)
cost = GeneralizedCost(costfunc=CrossEntropyBinary())
opt_gdm = GradientDescentMomentum(learning_rate=0.1, momentum_coef=0.9)
# reset data iterators
train.reset()
valid.reset()
checkpoint_model_path = os.path.join('./', 'test_manyshot.pkl')
checkpoint_schedule = 1 # save at evey step
callbacks = Callbacks(mlp, train)
callbacks.add_serialize_callback(checkpoint_schedule,
checkpoint_model_path,
history=num_epochs)
for epoch in range(num_epochs):
# _0 points to state at end of epoch 0
mlp.fit(train, optimizer=opt_gdm, num_epochs=epoch+1, cost=cost, callbacks=callbacks)
# load saved file
prts = os.path.splitext(checkpoint_model_path)
fn = prts[0] + '_%d' % epoch + prts[1]
mlp.load_weights(fn) # load the saved weights
# compare test_oneshot_<num_epochs>.pkl to test_manyshot_<num_epochs>.pkl
try:
compare_model_pickles('test_oneshot_%d.pkl' % (num_epochs-1),
'test_manyshot_%d.pkl' % (num_epochs-1))
except:
print 'test failed....'
sys.exit(1)
示例5: GeneralizedCost
layers.append(Dropout(keep=0.5))
layers.append(Affine(nout=1000, init=init1, bias=Constant(-7), activation=Softmax()))
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
opt = MultiOptimizer({'default': opt_gdm, 'Bias': opt_biases})
mlp = Model(layers=layers)
if args.model_file:
import os
assert os.path.exists(args.model_file), '%s not found' % args.model_file
mlp.load_weights(args.model_file)
# configure callbacks
callbacks = Callbacks(mlp, train, output_file=args.output_file)
if args.validation_freq:
class TopKMetrics(Callback):
def __init__(self, valid_set, epoch_freq=args.validation_freq):
super(TopKMetrics, self).__init__(epoch_freq=epoch_freq)
self.valid_set = valid_set
def on_epoch_end(self, epoch):
self.valid_set.reset()
allmetrics = TopKMisclassification(k=5)
stats = mlp.eval(self.valid_set, metric=allmetrics)
print ", ".join(allmetrics.metric_names) + ": " + ", ".join(map(str, stats.flatten()))
callbacks.add_callback(TopKMetrics(test))
示例6: str
print 'Training specialist: ', i
path = EXPERIMENT_DIR + confusion_matrix_name + '_' + clustering_name + '_' + str(num_clusters) + 'clusters/' + 'specialist' + '_' + str(i) + '.prm'
# Create datasets
X_spec, y_spec, spec_out = filter_dataset(X_train, y_train, cluster)
X_spec_test, y_spec_test, spec_out = filter_dataset(
X_test, y_test, cluster)
spec_out = nout
spec_set = DataIterator(
X_spec, y_spec, nclass=spec_out, lshape=(3, 32, 32))
spec_test = DataIterator(
X_spec_test, y_spec_test, nclass=spec_out, lshape=(3, 32, 32))
# Train the specialist
specialist, opt, cost = spec_net(nout=spec_out, archive_path=gene_path)
callbacks = Callbacks(specialist, spec_set, args, eval_set=spec_test)
callbacks.add_early_stop_callback(early_stop)
callbacks.add_save_best_state_callback(path)
specialist.fit(spec_set, optimizer=opt,
num_epochs=specialist.epoch_index + num_epochs, cost=cost, callbacks=callbacks)
# Print results
print 'Specialist Train misclassification error: ', specialist.eval(spec_set, metric=Misclassification())
print 'Specialist Test misclassification error: ', specialist.eval(spec_test, metric=Misclassification())
print 'Generalist Train misclassification error: ', generalist.eval(spec_set, metric=Misclassification())
print 'Generalist Test misclassification error: ', generalist.eval(spec_test, metric=Misclassification())
# specialists.append(specialist)
save_obj(specialist.serialize(), path)
except:
path = confusion_matrix_name + '_' + clustering_name + '_' + str(num_clusters) + 'clusters/'
print 'Failed for ', path
示例7: GRU
output_size = 8
N = 120 # number of memory locations
M = 8 # size of a memory location
# model initialization
layers = [
GRU(hidden_size, init, activation=Tanh(), gate_activation=Logistic()),
Affine(train_set.nout, init, bias=init, activation=Logistic())
]
cost = GeneralizedCostMask(costfunc=CrossEntropyBinary())
model = Model(layers=layers)
optimizer = RMSProp(gradient_clip_value=gradient_clip_value,
stochastic_round=args.rounding)
# configure callbacks
callbacks = Callbacks(model, **args.callback_args)
# we can use the training set as the validation set,
# since the data is tickerally generated
callbacks.add_watch_ticker_callback(train_set)
# train model
model.fit(train_set,
optimizer=optimizer,
num_epochs=args.epochs,
cost=cost,
callbacks=callbacks)
示例8: RBM
# setup optimizer
optimizer = {'momentum': [0],
'step_config': 1,
'learning_rate': 0.1,
'weight_decay': 0}
# initialize model object
rbm = RBM(layers=layers)
if args.model_file:
assert os.path.exists(args.model_file), '%s not found' % args.model_file
logger.info('loading initial model state from %s' % args.model_file)
rbm.load_weights(args.model_file)
# setup standard fit callbacks
callbacks = Callbacks(rbm, train_set, output_file=args.output_file,
progress_bar=args.progress_bar)
# add a callback ot calculate
if args.serialize > 0:
# add callback for saving checkpoint file
# every args.serialize epchs
checkpoint_schedule = args.serialize
checkpoint_model_path = args.save_path
callbacks.add_serialize_callback(checkpoint_schedule, checkpoint_model_path)
rbm.fit(train_set, optimizer=optimizer, num_epochs=num_epochs, callbacks=callbacks)
for mb_idx, (x_val, y_val) in enumerate(valid_set):
hidden = rbm.fprop(x_val)
break
示例9: Pooling
Pooling(3, strides=2)]
# Structure of the deep residual part of the network:
# args.depth modules of 2 convolutional layers each at feature map depths
# of 64, 128, 256, 512
nfms = list(itt.chain.from_iterable(
[itt.repeat(2**(x + 6), r) for x, r in enumerate(stages)]))
strides = [-1] + [1 if cur == prev else 2 for cur,
prev in zip(nfms[1:], nfms[:-1])]
for nfm, stride in zip(nfms, strides):
layers.append(module_factory(nfm, stride))
layers.append(Pooling('all', op='avg'))
layers.append(Conv(**conv_params(1, train.nclass, relu=False)))
layers.append(Activation(Softmax()))
model = Model(layers=layers)
weight_sched = Schedule([30, 60], 0.1)
opt = GradientDescentMomentum(0.1, 0.9, wdecay=0.0001, schedule=weight_sched)
# configure callbacks
valmetric = TopKMisclassification(k=5)
callbacks = Callbacks(model, eval_set=test, metric=valmetric, **args.callback_args)
callbacks.add_callback(BatchNormTuneCallback(tune), insert_pos=0)
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
model.fit(train, optimizer=opt, num_epochs=args.epochs,
cost=cost, callbacks=callbacks)
示例10: GeneralizedCost
# setup cost function as CrossEntropy
cost = GeneralizedCost(costfunc=CrossEntropyBinary())
# setup optimizer
optimizer = GradientDescentMomentum(0.1, momentum_coef=0.9, stochastic_round=args.rounding)
# initialize model object
mlp = Model(layers=layers)
if args.model_file:
assert os.path.exists(args.model_file), '%s not found' % args.model_file
logger.info('loading initial model state from %s' % args.model_file)
mlp.load_weights(args.model_file)
# setup standard fit callbacks
callbacks = Callbacks(mlp, train_set, output_file=args.output_file,
progress_bar=args.progress_bar)
# add a callback ot calculate
if args.validation_freq:
# setup validation trial callbacks
callbacks.add_validation_callback(valid_set, args.validation_freq)
if args.serialize > 0:
# add callback for saving checkpoint file
# every args.serialize epchs
checkpoint_schedule = args.serialize
checkpoint_model_path = args.save_path
callbacks.add_serialize_callback(checkpoint_schedule, checkpoint_model_path)
# run fit
示例11: MNIST
random_seed = args.rng_seed if args.rng_seed else 0
# load up the mnist data set, padding images to size 32
dataset = MNIST(path=args.data_dir, sym_range=True, size=32, shuffle=True)
train = dataset.train_iter
# create a GAN
model, cost = create_model(dis_model=args.dmodel, gen_model=args.gmodel,
cost_type='wasserstein', noise_type='normal',
im_size=32, n_chan=1, n_noise=128,
n_gen_ftr=args.n_gen_ftr, n_dis_ftr=args.n_dis_ftr,
depth=4, n_extra_layers=4,
batch_norm=True, dis_iters=5,
wgan_param_clamp=0.01, wgan_train_sched=True)
# setup optimizer
optimizer = RMSProp(learning_rate=2e-4, decay_rate=0.99, epsilon=1e-8)
# configure callbacks
callbacks = Callbacks(model, **args.callback_args)
fdir = ensure_dirs_exist(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'results/'))
fname = os.path.splitext(os.path.basename(__file__))[0] +\
'_[' + datetime.now().strftime('%Y-%m-%d-%H-%M-%S') + ']'
im_args = dict(filename=os.path.join(fdir, fname), hw=32,
num_samples=args.batch_size, nchan=1, sym_range=True)
callbacks.add_callback(GANPlotCallback(**im_args))
callbacks.add_callback(GANCostCallback())
# model fit
model.fit(train, optimizer=optimizer, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
示例12: StepSchedule
weights=[1, 1, 1])
# setup optimizer
schedule_w = StepSchedule(step_config=[5], change=[0.001 / 10])
schedule_b = StepSchedule(step_config=[5], change=[0.002 / 10])
opt_w = GradientDescentMomentum(0.001, 0.9, wdecay=0.0005, schedule=schedule_w)
opt_b = GradientDescentMomentum(0.002, 0.9, wdecay=0.0005, schedule=schedule_b)
opt_skip = GradientDescentMomentum(0.0, 0.0)
optimizer = MultiOptimizer({'default': opt_w, 'Bias': opt_b,
'skip': opt_skip, 'skip_bias': opt_skip})
# if training a new model, seed the image model conv layers with pre-trained weights
# otherwise, just load the model file
if args.model_file is None:
util.load_vgg_all_weights(model, args.data_dir)
callbacks = Callbacks(model, eval_set=train_set, **args.callback_args)
callbacks.add_callback(TrainMulticostCallback())
# model.benchmark(train_set, optimizer=optimizer, cost=cost)
model.fit(train_set, optimizer=optimizer, cost=cost, num_epochs=args.epochs, callbacks=callbacks)
# Scale the bbox regression branch linear layer weights before saving the model
model = util.scale_bbreg_weights(model, [0.0, 0.0, 0.0, 0.0],
[0.1, 0.1, 0.2, 0.2], train_set.num_classes)
if args.save_path is not None:
save_obj(model.serialize(keep_states=True), args.save_path)
示例13: train_mlp
def train_mlp():
"""
Train data and save scaling and network weights and biases to file
to be used by forward prop phase on test data
"""
parser = NeonArgparser(__doc__)
args = parser.parse_args()
logger = logging.getLogger()
logger.setLevel(args.log_thresh)
# hyperparameters
num_epochs = args.epochs
#preprocessor
std_scale = preprocessing.StandardScaler(with_mean=True,with_std=True)
#std_scale = feature_scaler(type='Standardizer',with_mean=True,with_std=True)
#number of non one-hot encoded features, including ground truth
num_feat = 4
# load up the mnist data set
# split into train and tests sets
#load data from csv-files and rescale
#training
traindf = pd.DataFrame.from_csv('data/train.csv')
ncols = traindf.shape[1]
#tmpmat=std_scale.fit_transform(traindf.as_matrix())
#print std_scale.scale_
#print std_scale.mean_
tmpmat = traindf.as_matrix()
#print tmpmat[:,1:num_feat]
tmpmat[:,:num_feat] = std_scale.fit_transform(tmpmat[:,:num_feat])
X_train = tmpmat[:,1:]
y_train = np.reshape(tmpmat[:,0],(tmpmat[:,0].shape[0],1))
#validation
validdf = pd.DataFrame.from_csv('data/validate.csv')
ncols = validdf.shape[1]
tmpmat = validdf.as_matrix()
tmpmat[:,:num_feat] = std_scale.transform(tmpmat[:,:num_feat])
X_valid = tmpmat[:,1:]
y_valid = np.reshape(tmpmat[:,0],(tmpmat[:,0].shape[0],1))
#test
testdf = pd.DataFrame.from_csv('data/test.csv')
ncols = testdf.shape[1]
tmpmat = testdf.as_matrix()
tmpmat[:,:num_feat] = std_scale.transform(tmpmat[:,:num_feat])
X_test = tmpmat[:,1:]
y_test = np.reshape(tmpmat[:,0],(tmpmat[:,0].shape[0],1))
# setup a training set iterator
train_set = CustomDataIterator(X_train, lshape=(X_train.shape[1]), y_c=y_train)
# setup a validation data set iterator
valid_set = CustomDataIterator(X_valid, lshape=(X_valid.shape[1]), y_c=y_valid)
# setup a validation data set iterator
test_set = CustomDataIterator(X_test, lshape=(X_test.shape[1]), y_c=y_test)
# setup weight initialization function
init_norm = Xavier()
# setup model layers
layers = [Affine(nout=X_train.shape[1], init=init_norm, activation=Rectlin()),
Dropout(keep=0.5),
Affine(nout=X_train.shape[1]/2, init=init_norm, activation=Rectlin()),
Linear(nout=1, init=init_norm)]
# setup cost function as CrossEntropy
cost = GeneralizedCost(costfunc=SmoothL1Loss())
# setup optimizer
#schedule
#schedule = ExpSchedule(decay=0.3)
#optimizer = GradientDescentMomentum(0.0001, momentum_coef=0.9, stochastic_round=args.rounding, schedule=schedule)
optimizer = Adam(learning_rate=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1.e-8)
# initialize model object
mlp = Model(layers=layers)
# configure callbacks
if args.callback_args['eval_freq'] is None:
args.callback_args['eval_freq'] = 1
# configure callbacks
callbacks = Callbacks(mlp, eval_set=valid_set, **args.callback_args)
callbacks.add_early_stop_callback(stop_func)
callbacks.add_save_best_state_callback(os.path.join(args.data_dir, "early_stop-best_state.pkl"))
# run fit
mlp.fit(train_set, optimizer=optimizer, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
#evaluate model
print('Evaluation Error = %.4f'%(mlp.eval(valid_set, metric=SmoothL1Metric())))
print('Test set error = %.4f'%(mlp.eval(test_set, metric=SmoothL1Metric())))
#.........这里部分代码省略.........
示例14: Uniform
init = Uniform(low=-0.08, high=0.08)
# model initialization
layers = [
LSTM(hidden_size, init, Logistic(), Tanh()),
Affine(len(train_set.vocab), init, bias=init, activation=Softmax())
]
model = Model(layers=layers)
cost = GeneralizedCost(costfunc=CrossEntropyMulti(usebits=True))
optimizer = RMSProp(clip_gradients=clip_gradients, stochastic_round=args.rounding)
# configure callbacks
callbacks = Callbacks(model, train_set, output_file=args.output_file,
progress_bar=args.progress_bar,
valid_set=valid_set, valid_freq=1,
)
callbacks.add_serialize_callback(1, args.save_path)
# fit and validate
model.fit(train_set, optimizer=optimizer, num_epochs=num_epochs, cost=cost, callbacks=callbacks)
def sample(prob):
"""
Sample index from probability distribution
"""
prob = prob / (prob.sum() + 1e-6)
return np.argmax(np.random.multinomial(1, prob, 1))
# Set batch size and time_steps to 1 for generation and reset buffers
示例15: HDF5Iterator
validation=False,
remove_history=False,
minimal_set=False,
next_N=3)
valid = HDF5Iterator(filenames,
ndata=(16 * 2014),
validation=True,
remove_history=False,
minimal_set=False,
next_N=1)
out1, out2, out3 = model.layers.get_terminal()
cost = Multicost(costs=[GeneralizedCost(costfunc=CrossEntropyMulti(usebits=True)),
GeneralizedCost(costfunc=CrossEntropyMulti(usebits=True)),
GeneralizedCost(costfunc=CrossEntropyMulti(usebits=True))])
schedule = ExpSchedule(decay=(1.0 / 50)) # halve the learning rate every 50 epochs
opt_gdm = GradientDescentMomentum(learning_rate=0.01,
momentum_coef=0.9,
stochastic_round=args.rounding,
gradient_clip_value=1,
gradient_clip_norm=5,
wdecay=0.0001,
schedule=schedule)
callbacks = Callbacks(model, eval_set=valid, metric=TopKMisclassification(5), **args.callback_args)
callbacks.add_save_best_state_callback(os.path.join(args.workspace_dir, "best_state_h5resnet.pkl"))
model.fit(train, optimizer=opt_gdm, num_epochs=num_epochs, cost=cost, callbacks=callbacks)
model.save_params(os.path.join(args.workspace_dir, "final_state_h5resnet.pkl"))