本文整理汇总了Python中neon.callbacks.callbacks.Callbacks.add_save_best_state_callback方法的典型用法代码示例。如果您正苦于以下问题:Python Callbacks.add_save_best_state_callback方法的具体用法?Python Callbacks.add_save_best_state_callback怎么用?Python Callbacks.add_save_best_state_callback使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类neon.callbacks.callbacks.Callbacks
的用法示例。
在下文中一共展示了Callbacks.add_save_best_state_callback方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: train
# 需要导入模块: from neon.callbacks.callbacks import Callbacks [as 别名]
# 或者: from neon.callbacks.callbacks.Callbacks import add_save_best_state_callback [as 别名]
def train(self, dataset, model=None):
"""Trains the passed model on the given dataset. If no model is passed, `generate_default_model` is used."""
print "[%s] Starting training..." % self.model_name
start = time.time()
# The training will be run on the CPU. If a GPU is available it should be used instead.
backend = gen_backend(backend='cpu',
batch_size=self.batch_size,
rng_seed=self.random_seed,
stochastic_round=False)
cost = GeneralizedCost(
name='cost',
costfunc=CrossEntropyMulti())
optimizer = GradientDescentMomentum(
learning_rate=self.lrate,
momentum_coef=0.9)
# set up the model and experiment
if not model:
model = self.generate_default_model(dataset.num_labels)
args = NeonCallbackParameters()
args.output_file = os.path.join(self.root_path, self.Callback_Store_Filename)
args.evaluation_freq = 1
args.progress_bar = False
args.epochs = self.max_epochs
args.save_path = os.path.join(self.root_path, self.Intermediate_Model_Filename)
args.serialize = 1
args.history = 100
args.model_file = None
callbacks = Callbacks(model, dataset.train(), args, eval_set=dataset.test())
# add a callback that saves the best model state
callbacks.add_save_best_state_callback(self.model_path)
# Uncomment line below to run on GPU using cudanet backend
# backend = gen_backend(rng_seed=0, gpu='cudanet')
model.fit(
dataset.train(),
optimizer=optimizer,
num_epochs=self.max_epochs,
cost=cost,
callbacks=callbacks)
print("[%s] Misclassification error = %.1f%%"
% (self.model_name, model.eval(dataset.test(), metric=Misclassification()) * 100))
print "[%s] Finished training!" % self.model_name
end = time.time()
print "[%s] Duration in seconds", end - start
return model
示例2: train_mlp
# 需要导入模块: from neon.callbacks.callbacks import Callbacks [as 别名]
# 或者: from neon.callbacks.callbacks.Callbacks import add_save_best_state_callback [as 别名]
def train_mlp():
"""
Train data and save scaling and network weights and biases to file
to be used by forward prop phase on test data
"""
parser = NeonArgparser(__doc__)
args = parser.parse_args()
logger = logging.getLogger()
logger.setLevel(args.log_thresh)
# hyperparameters
num_epochs = args.epochs
#preprocessor
std_scale = preprocessing.StandardScaler(with_mean=True,with_std=True)
#std_scale = feature_scaler(type='Standardizer',with_mean=True,with_std=True)
#number of non one-hot encoded features, including ground truth
num_feat = 4
# load up the mnist data set
# split into train and tests sets
#load data from csv-files and rescale
#training
traindf = pd.DataFrame.from_csv('data/train.csv')
ncols = traindf.shape[1]
#tmpmat=std_scale.fit_transform(traindf.as_matrix())
#print std_scale.scale_
#print std_scale.mean_
tmpmat = traindf.as_matrix()
#print tmpmat[:,1:num_feat]
tmpmat[:,:num_feat] = std_scale.fit_transform(tmpmat[:,:num_feat])
X_train = tmpmat[:,1:]
y_train = np.reshape(tmpmat[:,0],(tmpmat[:,0].shape[0],1))
#validation
validdf = pd.DataFrame.from_csv('data/validate.csv')
ncols = validdf.shape[1]
tmpmat = validdf.as_matrix()
tmpmat[:,:num_feat] = std_scale.transform(tmpmat[:,:num_feat])
X_valid = tmpmat[:,1:]
y_valid = np.reshape(tmpmat[:,0],(tmpmat[:,0].shape[0],1))
#test
testdf = pd.DataFrame.from_csv('data/test.csv')
ncols = testdf.shape[1]
tmpmat = testdf.as_matrix()
tmpmat[:,:num_feat] = std_scale.transform(tmpmat[:,:num_feat])
X_test = tmpmat[:,1:]
y_test = np.reshape(tmpmat[:,0],(tmpmat[:,0].shape[0],1))
# setup a training set iterator
train_set = CustomDataIterator(X_train, lshape=(X_train.shape[1]), y_c=y_train)
# setup a validation data set iterator
valid_set = CustomDataIterator(X_valid, lshape=(X_valid.shape[1]), y_c=y_valid)
# setup a validation data set iterator
test_set = CustomDataIterator(X_test, lshape=(X_test.shape[1]), y_c=y_test)
# setup weight initialization function
init_norm = Xavier()
# setup model layers
layers = [Affine(nout=X_train.shape[1], init=init_norm, activation=Rectlin()),
Dropout(keep=0.5),
Affine(nout=X_train.shape[1]/2, init=init_norm, activation=Rectlin()),
Linear(nout=1, init=init_norm)]
# setup cost function as CrossEntropy
cost = GeneralizedCost(costfunc=SmoothL1Loss())
# setup optimizer
#schedule
#schedule = ExpSchedule(decay=0.3)
#optimizer = GradientDescentMomentum(0.0001, momentum_coef=0.9, stochastic_round=args.rounding, schedule=schedule)
optimizer = Adam(learning_rate=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1.e-8)
# initialize model object
mlp = Model(layers=layers)
# configure callbacks
if args.callback_args['eval_freq'] is None:
args.callback_args['eval_freq'] = 1
# configure callbacks
callbacks = Callbacks(mlp, eval_set=valid_set, **args.callback_args)
callbacks.add_early_stop_callback(stop_func)
callbacks.add_save_best_state_callback(os.path.join(args.data_dir, "early_stop-best_state.pkl"))
# run fit
mlp.fit(train_set, optimizer=optimizer, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
#evaluate model
print('Evaluation Error = %.4f'%(mlp.eval(valid_set, metric=SmoothL1Metric())))
print('Test set error = %.4f'%(mlp.eval(test_set, metric=SmoothL1Metric())))
#.........这里部分代码省略.........
示例3: tuple
# 需要导入模块: from neon.callbacks.callbacks import Callbacks [as 别名]
# 或者: from neon.callbacks.callbacks.Callbacks import add_save_best_state_callback [as 别名]
# define stopping function
# it takes as input a tuple (State,val[t])
# which describes the cumulative validation state (generated by this function)
# and the validation error at time t
# and returns as output a tuple (State', Bool),
# which represents the new state and whether to stop
# Stop if validation error ever increases from epoch to epoch
def stop_func(s, v):
if s is None:
return (v, False)
return (min(v, s), v > s)
# fit and validate
optimizer = GradientDescentMomentum(learning_rate=0.1, momentum_coef=0.9)
# configure callbacks
if args.callback_args['eval_freq'] is None:
args.callback_args['eval_freq'] = 1
callbacks = Callbacks(mlp, eval_set=valid_set, **args.callback_args)
callbacks.add_early_stop_callback(stop_func)
callbacks.add_save_best_state_callback(os.path.join(args.data_dir, "early_stop-best_state.pkl"))
mlp.fit(train_set,
optimizer=optimizer,
num_epochs=args.epochs,
cost=cost,
callbacks=callbacks)
示例4: filter_dataset
# 需要导入模块: from neon.callbacks.callbacks import Callbacks [as 别名]
# 或者: from neon.callbacks.callbacks.Callbacks import add_save_best_state_callback [as 别名]
# Create datasets
X_spec, y_spec, spec_out = filter_dataset(X_train, y_train, cluster)
X_spec_test, y_spec_test, spec_out = filter_dataset(
X_test, y_test, cluster)
spec_out = nout
spec_set = DataIterator(
X_spec, y_spec, nclass=spec_out, lshape=(3, 32, 32))
spec_test = DataIterator(
X_spec_test, y_spec_test, nclass=spec_out, lshape=(3, 32, 32))
# Train the specialist
specialist, opt, cost = spec_net(nout=spec_out, archive_path=gene_path)
callbacks = Callbacks(specialist, spec_set, args, eval_set=spec_test)
callbacks.add_early_stop_callback(early_stop)
callbacks.add_save_best_state_callback(path)
specialist.fit(spec_set, optimizer=opt,
num_epochs=specialist.epoch_index + num_epochs, cost=cost, callbacks=callbacks)
# Print results
print 'Specialist Train misclassification error: ', specialist.eval(spec_set, metric=Misclassification())
print 'Specialist Test misclassification error: ', specialist.eval(spec_test, metric=Misclassification())
print 'Generalist Train misclassification error: ', generalist.eval(spec_set, metric=Misclassification())
print 'Generalist Test misclassification error: ', generalist.eval(spec_test, metric=Misclassification())
# specialists.append(specialist)
save_obj(specialist.serialize(), path)
except:
path = confusion_matrix_name + '_' + clustering_name + '_' + str(num_clusters) + 'clusters/'
print 'Failed for ', path
failed.append(path)
示例5: HDF5Iterator
# 需要导入模块: from neon.callbacks.callbacks import Callbacks [as 别名]
# 或者: from neon.callbacks.callbacks.Callbacks import add_save_best_state_callback [as 别名]
validation=False,
remove_history=False,
minimal_set=False,
next_N=3)
valid = HDF5Iterator(filenames,
ndata=(16 * 2014),
validation=True,
remove_history=False,
minimal_set=False,
next_N=1)
out1, out2, out3 = model.layers.get_terminal()
cost = Multicost(costs=[GeneralizedCost(costfunc=CrossEntropyMulti(usebits=True)),
GeneralizedCost(costfunc=CrossEntropyMulti(usebits=True)),
GeneralizedCost(costfunc=CrossEntropyMulti(usebits=True))])
schedule = ExpSchedule(decay=(1.0 / 50)) # halve the learning rate every 50 epochs
opt_gdm = GradientDescentMomentum(learning_rate=0.01,
momentum_coef=0.9,
stochastic_round=args.rounding,
gradient_clip_value=1,
gradient_clip_norm=5,
wdecay=0.0001,
schedule=schedule)
callbacks = Callbacks(model, eval_set=valid, metric=TopKMisclassification(5), **args.callback_args)
callbacks.add_save_best_state_callback(os.path.join(args.workspace_dir, "best_state_h5resnet.pkl"))
model.fit(train, optimizer=opt_gdm, num_epochs=num_epochs, cost=cost, callbacks=callbacks)
model.save_params(os.path.join(args.workspace_dir, "final_state_h5resnet.pkl"))
示例6: GeneralizedCost
# 需要导入模块: from neon.callbacks.callbacks import Callbacks [as 别名]
# 或者: from neon.callbacks.callbacks.Callbacks import add_save_best_state_callback [as 别名]
# configure cost and test metrics
cost = GeneralizedCost(costfunc=(CrossEntropyBinary() \
if train.parser.independent_labels else CrossEntropyMulti()))
metric = EMMetric(oshape=test.parser.oshape, use_softmax=not train.parser.independent_labels) if test else None
# configure callbacks
if not args.neon_progress:
args.callback_args['progress_bar'] = False
callbacks = Callbacks(model, eval_set=test, metric=metric, **args.callback_args)
if not args.neon_progress:
callbacks.add_callback(EMEpochCallback(args.callback_args['eval_freq'],train.nmacrobatches),insert_pos=None)
# xxx - thought of making this an option but not clear that it slows anything down?
#callbacks.add_hist_callback() # xxx - not clear what information this conveys
if args.save_best_path:
callbacks.add_save_best_state_callback(args.save_best_path)
model.fit(train, optimizer=opt, num_epochs=num_epochs, cost=cost, callbacks=callbacks)
print('Model training complete for %d epochs!' % (args.epochs,))
#test.stop(); train.stop()
elif args.write_output:
# write_output mode, must have model loaded
if args.data_config:
test = EMDataIterator(args.data_config, write_output=args.write_output,
chunk_skip_list=args.chunk_skip_list, dim_ordering=args.dim_ordering,
batch_range=args.test_range, name='test', isTest=True, concatenate_batches=False,
NBUF=args.nbebuf, image_in_size=args.image_in_size)
if hasattr(model,'batch_meta'):
test.parser.batch_meta['prior_train_count'] = model.batch_meta['prior_train_count']