本文整理汇总了Python中trainer.Trainer.train方法的典型用法代码示例。如果您正苦于以下问题:Python Trainer.train方法的具体用法?Python Trainer.train怎么用?Python Trainer.train使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类trainer.Trainer
的用法示例。
在下文中一共展示了Trainer.train方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Test_Trainer
# 需要导入模块: from trainer import Trainer [as 别名]
# 或者: from trainer.Trainer import train [as 别名]
class Test_Trainer(unittest.TestCase):
def setUp(self):
self.class_number = 21
self.input_shape = (300, 300, 3)
self.model = SSD300v2(self.input_shape, num_classes=self.class_number)
def test_train(self):
base_lr=3e-4
self.trainer = Trainer(class_number=self.class_number,
input_shape=self.input_shape,
priors_file='prior_boxes_ssd300.pkl',
train_file='VOC2007_test.pkl',
path_prefix='./VOCdevkit/VOC2007/JPEGImages/',
model=self.model,
weight_file='weights_SSD300.hdf5',
freeze=('input_1', 'conv1_1', 'conv1_2', 'pool1',
'conv2_1', 'conv2_2', 'pool2',
'conv3_1', 'conv3_2', 'conv3_3', 'pool3'),
save_weight_file='./checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5', # noqa
optim=keras.optimizers.Adam(lr=base_lr),
)
self.trainer.train(nb_epoch=1)
def teardown(self):
try:
subprocess.call("rm -rf " + self.trainer.log_dir, shell=True)
except subprocess.CalledProcessError as cpe:
print(str(cpe))
示例2: main
# 需要导入模块: from trainer import Trainer [as 别名]
# 或者: from trainer.Trainer import train [as 别名]
def main(config):
prepare_dirs_and_logger(config)
save_config(config)
if config.is_train:
from trainer import Trainer
if config.dataset == 'line':
from data_line import BatchManager
elif config.dataset == 'ch':
from data_ch import BatchManager
elif config.dataset == 'kanji':
from data_kanji import BatchManager
elif config.dataset == 'baseball' or\
config.dataset == 'cat':
from data_qdraw import BatchManager
batch_manager = BatchManager(config)
trainer = Trainer(config, batch_manager)
trainer.train()
else:
from tester import Tester
if config.dataset == 'line':
from data_line import BatchManager
elif config.dataset == 'ch':
from data_ch import BatchManager
elif config.dataset == 'kanji':
from data_kanji import BatchManager
elif config.dataset == 'baseball' or\
config.dataset == 'cat':
from data_qdraw import BatchManager
batch_manager = BatchManager(config)
tester = Tester(config, batch_manager)
tester.test()
示例3: main
# 需要导入模块: from trainer import Trainer [as 别名]
# 或者: from trainer.Trainer import train [as 别名]
def main(_):
prepare_dirs_and_logger(config)
if not config.task.lower().startswith('tsp'):
raise Exception("[!] Task should starts with TSP")
if config.max_enc_length is None:
config.max_enc_length = config.max_data_length
if config.max_dec_length is None:
config.max_dec_length = config.max_data_length
rng = np.random.RandomState(config.random_seed)
tf.set_random_seed(config.random_seed)
trainer = Trainer(config, rng)
save_config(config.model_dir, config)
if config.is_train:
trainer.train()
else:
if not config.load_path:
raise Exception("[!] You should specify `load_path` to load a pretrained model")
trainer.test()
tf.logging.info("Run finished.")
示例4: tesT_TrainingOnSentances
# 需要导入模块: from trainer import Trainer [as 别名]
# 或者: from trainer.Trainer import train [as 别名]
def tesT_TrainingOnSentances(self):
c = Corpus(self.txt)
rnn = RNN(100, c.V, 50)
trainer = Trainer(c,rnn, nepochs=50, alpha = 1.8)
trainer.train()
示例5: TrainerTest
# 需要导入模块: from trainer import Trainer [as 别名]
# 或者: from trainer.Trainer import train [as 别名]
class TrainerTest(unittest.TestCase):
def setUp(self):
from trainer import Trainer
from database import TrainingDataBase,WordDataBase,WordRecord
self.tr_empty = Trainer(WordDataBase(),TrainingDataBase())
wdb = WordDataBase()
wdb.addWord(WordRecord("aaa"))
wdb.addWord(WordRecord("bbb"))
wdb.addWord(WordRecord("ccc"))
tdb = TrainingDataBase()
tdb.add([WordRecord("aaa"),WordRecord("bbb"),WordRecord("ccc")],[WordRecord("ccc"),WordRecord("bbb")])
tdb.add([WordRecord("aaa"),WordRecord("ccc")],[WordRecord("ccc"),WordRecord("ccc")])
self.tr_notempty = Trainer(wdb,tdb)
def test_init_invalidinput(self):
from trainer import Trainer,TrainerException
with self.assertRaises(TrainerException):
tr = Trainer(None,None)
def test_train_invalidinput(self):
from trainer import TrainerException
with self.assertRaises(TrainerException):
self.tr_empty.train(None)
def test_train_validinput_empty_neuralbrain(self):
from neural import NeuralBrain
from trainer import TrainerException
with self.assertRaises(TrainerException):
self.tr_empty.train(NeuralBrain())
def test_train_validinput_neuralbrain(self):
from neural import NeuralBrain
from trainer import TrainerException
self.tr_notempty.train(NeuralBrain())
def test_train_validinput_empty_lookuptablebrain(self):
from neural import LookUpTableBrain
from trainer import TrainerException
with self.assertRaises(TrainerException):
self.tr_empty.train(LookUpTableBrain())
def test_train_validinput_lookuptablebrain(self):
from neural import LookUpTableBrain
from trainer import TrainerException
self.tr_notempty.train(LookUpTableBrain())
def test_prepareDataSet(self):
data = self.tr_notempty._prepareDataSet()
self.assertIn(((0,1,2),(2,1)),data.items())
self.assertIn(((0, 2),(2, 2)),data.items())
示例6: train
# 需要导入模块: from trainer import Trainer [as 别名]
# 或者: from trainer.Trainer import train [as 别名]
def train(self,
training_set_x,
training_set_y,
hyper_parameters,
regularization_methods,
activation_method,
top=50,
print_verbose=False,
validation_set_x=None,
validation_set_y=None):
#need to convert the input into tensor variable
training_set_x = shared(training_set_x, 'training_set_x', borrow=True)
training_set_y = shared(training_set_y, 'training_set_y', borrow=True)
symmetric_double_encoder = StackedDoubleEncoder(hidden_layers=[],
numpy_range=self._random_range,
input_size_x=training_set_x.get_value(borrow=True).shape[1],
input_size_y=training_set_y.get_value(borrow=True).shape[1],
batch_size=hyper_parameters.batch_size,
activation_method=activation_method)
params = []
#In this phase we train the stacked encoder one layer at a time
#once a layer was added, weights not belonging to the new layer are
#not changed
for layer_size in hyper_parameters.layer_sizes:
self._add_cross_encoder_layer(layer_size,
symmetric_double_encoder,
hyper_parameters.method_in,
hyper_parameters.method_out)
params = []
for layer in symmetric_double_encoder:
params.append(layer.Wx)
params.append(layer.bias_x)
params.append(layer.bias_y)
params.append(symmetric_double_encoder[0].bias_x_prime)
params.append(symmetric_double_encoder[-1].bias_y_prime)
params.append(symmetric_double_encoder[-1].Wy)
Trainer.train(train_set_x=training_set_x,
train_set_y=training_set_y,
hyper_parameters=hyper_parameters,
symmetric_double_encoder=symmetric_double_encoder,
params=params,
regularization_methods=regularization_methods,
print_verbose=print_verbose,
validation_set_x=validation_set_x,
validation_set_y=validation_set_y)
return symmetric_double_encoder
示例7: train
# 需要导入模块: from trainer import Trainer [as 别名]
# 或者: from trainer.Trainer import train [as 别名]
def train(*args):
"""
trains the model based on files in the input folder
"""
input_folder = args[0][0]
if not input_folder:
print "Must specify a directory of models"
return
trainer = Trainer(input_folder, options.output)
trainer.train()
示例8: main_train
# 需要导入模块: from trainer import Trainer [as 别名]
# 或者: from trainer.Trainer import train [as 别名]
def main_train(featureSet, options, input=sys.stdin):
optionsDict = vars(options)
if options.usedFeats:
optionsDict['usedFeats'] = file(options.usedFeats)
trainer = Trainer(featureSet, optionsDict)
if options.inFeatFile:
trainer.getEventsFromFile(options.inFeatFile)
else:
trainer.getEvents(input, options.outFeatFile)
trainer.cutoffFeats()
trainer.train()
trainer.save()
示例9: train
# 需要导入模块: from trainer import Trainer [as 别名]
# 或者: from trainer.Trainer import train [as 别名]
def train(self):
"""
Trains Jarvis brain
Input:
Nothing
Returns:
Nothing
"""
from trainer import Trainer
if self._word_db == None: raise JarvisException("Don't have dictionary.")
if self._traning_db == None: raise JarvisException("Don't have traning database.")
trainer = Trainer(self._word_db,self._traning_db)
trainer.train(self._brain)
示例10: GeneralizedBoltzmann
# 需要导入模块: from trainer import Trainer [as 别名]
# 或者: from trainer.Trainer import train [as 别名]
class GeneralizedBoltzmann(GeneralizedModel):
attrs_ = ['trainfn', 'n', 'batch_size', 'epochs', 'learn_rate', 'beta', 'momentum', 'verbose']
def __init__(self, trainfn='cdn', n=1, batch_size=10, epochs=1, learn_rate=0.1,
beta=0.0001, momentum=0., verbose=False):
self.trainfn = trainfn
self.epochs = epochs
self.n = n
self.learn_rate = learn_rate
self.beta = beta
self.batch_size = batch_size
self.momentum = momentum
self.trainer = Trainer()
self.verbose = verbose
def gibbs_hvh(self, h, mf=False, **args):
v_samples = self.propdown(h, **args)
v = v_samples[0][1] if mf else v_samples[0][0]
h_samples = self.propup(v, **args)
return v_samples, h_samples
def gibbs_vhv(self, v, mf=False, **args):
h_samples = self.propup(v, **args)
h = h_samples[-1][1] if mf else h_samples[-1][0]
v_samples = self.propdown(h, **args)
return v_samples, h_samples
def cost(self, v):
if len(np.shape(v)) == 1: v.shape = (1,len(v))
use_fw = self.trainfn == 'fpcd'
use_persist = use_fw or self.trainfn == 'pcd'
num_points = v.shape[0]
# positive phase
pos_h_samples = self.propup(v)
# negative phase
nh0 = self.p[:num_points] if use_persist else pos_h_samples[-1][0]
for i in range(self.n):
neg_v_samples, neg_h_samples = self.gibbs_hvh(nh0, fw=use_fw)
nh0 = neg_h_samples[-1][0]
# compute gradients
grads = self.grad(v, pos_h_samples, neg_v_samples, neg_h_samples)
self.p[:num_points] = nh0
# compute reconstruction error
if self.trainfn=='cdn':
cost = np.sum(np.square(v - neg_v_samples[0][1])) / self.batch_size
else:
cost = np.sum(np.square(v - self.gibbs_vhv(v)[0][0][1])) / self.batch_size
return cost, grads
def train(self, data, max_iter=1):
args = { 'epochs': self.epochs,
'batch_size': self.batch_size,
'max_iter': max_iter,
'verbose': self.verbose }
return self.trainer.train(self, data, **args)
示例11: mainTrain
# 需要导入模块: from trainer import Trainer [as 别名]
# 或者: from trainer.Trainer import train [as 别名]
def mainTrain(featureSet, options):
trainer = Trainer(featureSet, options)
if 'inFeatFile' in options and options['inFeatFile']:
# Use with featurized input
trainer.getEventsFromFile(options['inFeatFile'])
else: # Use with raw input
trainer.getEvents(options['inputStream'])
if options['task'] == 'most-informative-features':
trainer.cutoffFeats()
trainer.mostInformativeFeatures(options['outputStream'])
elif 'toCRFsuite' in options and options['toCRFsuite']:
trainer.cutoffFeats()
trainer.toCRFsuite(options['outputStream'])
trainer.save()
else:
trainer.cutoffFeats()
trainer.train()
trainer.save()
示例12: route_command
# 需要导入模块: from trainer import Trainer [as 别名]
# 或者: from trainer.Trainer import train [as 别名]
def route_command(args):
"""
Routes the different commands out to the respective module that will handle the classification
"""
print(args)
if len(args) <= 1:
print("Enter the flag --help to get a list of commands")
else:
command = args[1]
if command == "--train":
trainer = Trainer()
trainer.train(args[2])
elif command == "--classify":
#TODO: Implement the testing phase of the classifier
pass
elif command == "--help":
#TODO: List all the commands once implemented
pass
else:
print("Could not recongize the command " + command)
print("Please try help for more commands.")
示例13: train_and_test
# 需要导入模块: from trainer import Trainer [as 别名]
# 或者: from trainer.Trainer import train [as 别名]
def train_and_test(image_loader, feature_extractor):
"""
Simple implementation of train and test function
:param image_loader:
:param feature_extractor:
"""
first_class_train_data, first_class_test_data = get_train_and_test_data(params.first_class_params)
second_class_train_data, second_class_test_data = get_train_and_test_data(params.second_class_params)
train_data = list(first_class_train_data) + list(second_class_train_data)
random.shuffle(train_data)
trainer = Trainer(image_loader, feature_extractor)
solve_container = trainer.train(train_data, params.svm_params)
test_data = list(first_class_test_data) + list(second_class_test_data)
tester = Tester(image_loader, solve_container)
return tester.test(test_data)
示例14: train
# 需要导入模块: from trainer import Trainer [as 别名]
# 或者: from trainer.Trainer import train [as 别名]
def train(self,
training_set_x,
training_set_y,
hyper_parameters,
regularization_methods,
activation_method,
print_verbose=False,
validation_set_x=None,
validation_set_y=None,
dir_name=None,
import_net=False,
import_path=''):
if not import_net:
symmetric_double_encoder = StackedDoubleEncoder(hidden_layers=[],
numpy_range=self._random_range,
input_size_x=training_set_x.shape[1],
input_size_y=training_set_y.shape[1],
batch_size=hyper_parameters.batch_size,
activation_method=activation_method)
else:
symmetric_double_encoder = StackedDoubleEncoder(hidden_layers=[],
numpy_range=self._random_range,
input_size_x=training_set_x.shape[1],
input_size_y=training_set_y.shape[1],
batch_size=hyper_parameters.batch_size,
activation_method=None)
symmetric_double_encoder.import_encoder(import_path, hyper_parameters)
self._moving_average = []
# In this phase we train the stacked encoder one layer at a time
# once a layer was added, weights not belonging to the new layer are
# not changed
layer_sizes = hyper_parameters.layer_sizes[len(symmetric_double_encoder):]
for idx, layer_size in enumerate(layer_sizes):
OutputLog().write('--------Adding Layer of Size - %d--------' % layer_size)
self._add_cross_encoder_layer(layer_size,
symmetric_double_encoder,
hyper_parameters.method_in,
hyper_parameters.method_out)
params = []
if idx == 0:
params.extend(symmetric_double_encoder[0].x_params)
else:
params.extend(symmetric_double_encoder[-1].x_hidden_params)
params.extend(symmetric_double_encoder[-1].y_params)
if hyper_parameters.cascade_train:
OutputLog().write('--------Starting Training Network-------')
Trainer.train(train_set_x=training_set_x,
train_set_y=training_set_y,
hyper_parameters=hyper_parameters,
symmetric_double_encoder=symmetric_double_encoder,
params=params,
regularization_methods=regularization_methods,
print_verbose=print_verbose,
validation_set_x=validation_set_x,
validation_set_y=validation_set_y,
moving_averages=self._moving_average)
if dir_name is not None:
symmetric_double_encoder.export_encoder(dir_name, 'layer_{0}'.format(len(symmetric_double_encoder) + 1))
if not hyper_parameters.cascade_train:
params = symmetric_double_encoder.getParams()
OutputLog().write('--------Starting Training Network-------')
Trainer.train(train_set_x=training_set_x,
train_set_y=training_set_y,
hyper_parameters=hyper_parameters,
symmetric_double_encoder=symmetric_double_encoder,
params=params,
regularization_methods=regularization_methods,
print_verbose=print_verbose,
validation_set_x=validation_set_x,
validation_set_y=validation_set_y)
if dir_name is not None:
symmetric_double_encoder.export_encoder(dir_name, 'layer_{0}'.format(len(symmetric_double_encoder) + 1))
return symmetric_double_encoder
示例15: Nonlinear
# 需要导入模块: from trainer import Trainer [as 别名]
# 或者: from trainer.Trainer import train [as 别名]
from sys import path
path.append("src/")
from trainer import Trainer
from models import Nonlinear
size = 5
lookback = 2
hidden = 10
delta = 0.0
lmb = 0.1
data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
model = Nonlinear(hidden=hidden, lookback=lookback, delta=delta, lmb=lmb)
trainer = Trainer(data, size=size, lookback=lookback, model=model)
print trainer.train(maxiter=20)