本文整理汇总了Python中pylearn2.utils.timing.log_timing函数的典型用法代码示例。如果您正苦于以下问题:Python log_timing函数的具体用法?Python log_timing怎么用?Python log_timing使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了log_timing函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run_epoch
def run_epoch(trainobj):
"""
Runs an epoch. Returns True to continue or
False to terminate.
"""
if trainobj.first_callbacks_and_monitoring:
trainobj.run_callbacks_and_monitoring()
trainobj.first_callbacks_and_monitoring = False
return True
rval = True
if trainobj.algorithm is None:
rval = trainobj.model.train_all(dataset=trainobj.dataset)
if rval is not None:
raise ValueError("Model.train_all should not return " +
"anything. Use Model.continue_learning " +
"to control whether learning continues.")
rval = post_epoch(trainobj)
else:
with log_timing(logger, None, level=logging.DEBUG,
callbacks=[trainobj.total_seconds.set_value]):
with log_timing(logger, None, final_msg='Time this epoch:',
callbacks=[trainobj.training_seconds.set_value]):
rval = trainobj.algorithm.train(dataset=trainobj.dataset)
if rval is not None:
raise ValueError("TrainingAlgorithm.train should not "
"return anything. Use "
"TrainingAlgorithm.continue_learning "
"to control whether learning "
"continues.")
rval = post_epoch(trainobj)
return rval
示例2: process_dataset
def process_dataset(model, dataset, data_specs=None, output_fn=None, batch_size=128):
if data_specs is None:
data_specs = (CompositeSpace((
model.get_input_space(),
model.get_output_space())),
("features", "targets"));
if output_fn is None:
with log_timing(log, 'compiling output_fn'):
minibatch = model.get_input_space().make_theano_batch();
output_fn = theano.function(inputs=[minibatch],
outputs=model.fprop(minibatch));
it = dataset.iterator(mode='sequential',
batch_size=batch_size,
data_specs=data_specs);
y_pred = [];
y_real = [];
output = [];
for minibatch, target in it:
out = output_fn(minibatch); # this hangs for convnet on Jeep2
output.append(out);
# print out
# print out.shape
y_pred.append(np.argmax(out, axis = 1));
y_real.append(np.argmax(target, axis = 1));
y_pred = np.hstack(y_pred);
y_real = np.hstack(y_real);
output = np.vstack(output);
return y_real, y_pred, output;
示例3: on_monitor
def on_monitor(self, model, dataset, algorithm):
epoch = algorithm.monitor._epochs_seen;
model_file = self.save_path + self.save_prefix + str(epoch) + '.pkl';
with log_timing(log, 'saving model to {}'.format(model_file)):
serial.save(model_file, model, on_overwrite = 'backup')
示例4: on_monitor
def on_monitor(self, model, dataset, algorithm):
"""
Looks whether the model performs better than earlier
- or equally good (modification).
If it's the case, saves the model.
Parameters
----------
model : pylearn2.models.model.Model
model.monitor must contain a channel with name given by
self.channel_name
dataset : pylearn2.datasets.dataset.Dataset
Not used
algorithm : TrainingAlgorithm
Not used
"""
monitor = model.monitor
channels = monitor.channels
channel = channels[self.channel_name]
val_record = channel.val_record
new_cost = val_record[-1]
if self.coeff * new_cost <= self.coeff * self.best_cost and \
monitor._epochs_seen >= self.start_epoch:
self.best_cost = new_cost
# Update the tag of the model object before saving it.
self._update_tag(model)
if self.store_best_model:
self.best_model = deepcopy(model)
if self.save_path is not None:
with log_timing(log, 'Saving to ' + self.save_path):
serial.save(self.save_path, model, on_overwrite='backup')
示例5: load_results
def load_results(experiment_root):
# load the model (mlp_best.pkl)
model_file = os.path.join(experiment_root, 'mlp_best.pkl');
with log_timing(log, 'loading model from {}'.format(model_file)):
model = serial.load(model_file);
# load train
train_yaml_file = os.path.join(experiment_root, 'train.yaml');
train_yaml = load_yaml_template(train_yaml_file);
# fix dataset path
localizer = PathLocalizer();
train_yaml = localizer.localize_yaml(train_yaml);
with log_timing(log, 'loading train from {}'.format(train_yaml_file)):
train = load_yaml(train_yaml)[0];
return train, model;
示例6: extract_output
def extract_output(experiment_root):
train, model = load_results(experiment_root);
# get the datasets with their names from the monitor
for key, dataset in train.algorithm.monitoring_dataset.items():
# process each dataset
with log_timing(log, 'processing dataset \'{}\''.format(key)):
y_real, y_pred, output = process_dataset(model, dataset)
save(os.path.join(experiment_root, 'cache', key+'_output.pklz'), (y_real, y_pred, output));
示例7: train_mlp
def train_mlp(params):
train, yaml_str = load_yaml_file(
os.path.join(os.path.dirname(__file__), 'cross_trial_template.yaml'),
params=params,
);
save_yaml_file(yaml_str, os.path.join(params.experiment_root, 'settings.yaml'));
with log_timing(log, 'training network'):
train.main_loop();
示例8: save_yaml_file
def save_yaml_file(yaml_str, yaml_file_path):
if save_yaml_file is not None:
with log_timing(log, 'saving yaml to {}'.format(yaml_file_path)):
save_dir = os.path.dirname(yaml_file_path);
if save_dir == '':
save_dir = '.'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
with open(yaml_file_path, 'w') as yaml_file:
yaml_file.write(yaml_str)
yaml_file.close();
示例9: train_convnet
def train_convnet(config):
train, yaml_str = load_yaml_file(
os.path.join(os.path.dirname(__file__), 'train_convnet_template.yaml'),
params=config,
);
save_yaml_file(yaml_str, os.path.join(config.experiment_root, 'settings.yaml'));
with log_timing(log, 'training network'):
train.main_loop();
示例10: __init__
def __init__(self, filepath):
self.filepath = filepath
with log_timing(log, 'loading data from {}'.format(filepath)):
tmp = load(filepath)
if len(tmp) == 2:
self.data, self.metadata = tmp
self.targets = None
elif len(tmp) == 3:
self.data, self.metadata, self.targets = tmp
else:
raise ValueError('got {} objects instead of 2 or 3.'.format(len(tmp)))
示例11: load_data_file
def load_data_file(filename):
#data = np.loadtxt(filename, dtype=float, delimiter=' ', skiprows=1); #, autostrip=True, names=False)
with log_timing(log, 'loading data from {}'.format(filename)):
data = np.genfromtxt(filename, dtype=theano.config.floatX, delimiter=' ', skip_header=1, autostrip=True);
log.info('loaded {}'.format(data.shape));
# print data.shape;
# print data[0];
# print data[-1];
return data;
示例12: load_yaml
def load_yaml(yaml_template, params=None):
print params;
if params is not None:
yaml_str = yaml_template % params;
else:
yaml_str = yaml_template;
print yaml_str;
with log_timing(log, 'parsing yaml'):
obj = yaml_parse.load(yaml_str);
return obj, yaml_str;
示例13: load_yaml
def load_yaml(yaml_template, params=None):
log.debug('params: {}'.format(params))
if params is not None:
yaml_str = yaml_template % params
else:
yaml_str = yaml_template
log.debug(yaml_str)
with log_timing(log, 'parsing yaml'):
obj = yaml_parse.load(yaml_str)
return obj, yaml_str
示例14: main_loop
def main_loop(self):
"""
Repeatedly runs an epoch of the training algorithm, runs any
epoch-level callbacks, and saves the model.
"""
if self.algorithm is None:
self.model.monitor = Monitor.get_monitor(self.model)
self.setup_extensions()
self.run_callbacks_and_monitoring()
while True:
rval = self.model.train_all(dataset=self.dataset)
if rval is not None:
raise ValueError("Model.train_all should not return anything. Use Model.continue_learning to control whether learning continues.")
self.model.monitor.report_epoch()
if self.save_freq > 0 and self.model.monitor.epochs_seen % self.save_freq == 0:
self.save()
continue_learning = self.model.continue_learning()
assert continue_learning in [True, False, 0, 1]
if not continue_learning:
break
else:
self.algorithm.setup(model=self.model, dataset=self.dataset)
self.setup_extensions()
if not hasattr(self.model, 'monitor'):
# TODO: is this really necessary? I just put this error here
# to prevent an AttributeError later, but I think we could
# rewrite to avoid the AttributeError
raise RuntimeError("The algorithm is responsible for setting"
" up the Monitor, but failed to.")
self.run_callbacks_and_monitoring()
while True:
with log_timing(log, None, final_msg='Time this epoch:'):
rval = self.algorithm.train(dataset=self.dataset)
if rval is not None:
raise ValueError("TrainingAlgorithm.train should not return anything. Use TrainingAlgorithm.continue_learning to control whether learning continues.")
self.model.monitor.report_epoch()
self.run_callbacks_and_monitoring()
if self.save_freq > 0 and self.model.monitor._epochs_seen % self.save_freq == 0:
self.save()
continue_learning = self.algorithm.continue_learning(self.model)
assert continue_learning in [True, False, 0, 1]
if not continue_learning:
break
self.model.monitor.training_succeeded = True
if self.save_freq > 0:
self.save()
示例15: __init__
def __init__(self, save_dir):
PYLEARN2_TRAIN_DIR = preprocess('${PYLEARN2_TRAIN_DIR}')
PYLEARN2_TRAIN_BASE_NAME = preprocess('${PYLEARN2_TRAIN_BASE_NAME}')
src = os.path.join(PYLEARN2_TRAIN_DIR, PYLEARN2_TRAIN_BASE_NAME)
dst = os.path.join(save_dir, PYLEARN2_TRAIN_BASE_NAME)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if os.path.exists(save_dir) and not os.path.isdir(save_dir):
raise IOError("save path %s exists, not a directory" % save_dir)
elif not os.access(save_dir, os.W_OK):
raise IOError("permission error creating %s" % dst)
with log_timing(log, 'copying yaml from {} to {}'.format(src, dst)):
copyfile(src, dst)