本文整理汇总了Python中cntk.Trainer.summarize_training_progress方法的典型用法代码示例。如果您正苦于以下问题:Python Trainer.summarize_training_progress方法的具体用法?Python Trainer.summarize_training_progress怎么用?Python Trainer.summarize_training_progress使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cntk.Trainer
的用法示例。
在下文中一共展示了Trainer.summarize_training_progress方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: entrenar
# 需要导入模块: from cntk import Trainer [as 别名]
# 或者: from cntk.Trainer import summarize_training_progress [as 别名]
def entrenar(checkpoint, entrRuedas, entrOperaciones, input_dim, num_output_classes, testRuedas, testOperaciones):
minibatch_size = 100;
epocs=900;
minibatchIteraciones = int(len(entrOperaciones) / minibatch_size);
# Input variables denoting the features and label data
feature = input((input_dim), np.float32)
label = input((num_output_classes), np.float32)
netout = crearRed(input_dim, num_output_classes, feature);
ce = cross_entropy_with_softmax(netout, label)
pe = classification_error(netout, label)
lr_per_minibatch=learning_rate_schedule(0.25, UnitType.minibatch)
# Instantiate the trainer object to drive the model training
learner = sgd(netout.parameters, lr=lr_per_minibatch)
progress_printer = ProgressPrinter(log_to_file=checkpoint+".log", num_epochs=epocs);
trainer = Trainer(netout, (ce, pe), learner, progress_printer)
if os.path.isfile(checkpoint):
trainer.restore_from_checkpoint(checkpoint);
npentrRuedas = np.array(entrRuedas).astype(np.float32);
npentrOperaciones = np.array(entrOperaciones).astype(np.float32);
#iteramos una vez por cada "epoc"
for i in range(0, epocs):
p = np.random.permutation(len(entrRuedas));
npentrOperaciones = npentrOperaciones[p];
npentrRuedas = npentrRuedas[p];
#ahora partimos los datos en "minibatches" y entrenamos
for j in range(0, minibatchIteraciones):
features = npentrRuedas[j*minibatch_size:(j+1)*minibatch_size];
labels = npentrOperaciones[j*minibatch_size:(j+1)*minibatch_size];
trainer.train_minibatch({feature: features, label: labels});
trainer.summarize_training_progress()
trainer.save_checkpoint(checkpoint);
minibatchIteraciones = int(len(testOperaciones) / minibatch_size);
avg_error = 0;
for j in range(0, minibatchIteraciones):
test_features = np.array(testRuedas[j*minibatch_size:(j+1)*minibatch_size]).astype(np.float32);
test_labels = np.array(testOperaciones[j*minibatch_size:(j+1)*minibatch_size]).astype(np.float32);
#test_features = np.array( entrRuedas[0:minibatch_size]).astype(np.float32);
#test_labels = np.array(entrOperaciones[0:minibatch_size]).astype(np.float32);
avg_error = avg_error + ( trainer.test_minibatch(
{feature: test_features, label: test_labels}) / minibatchIteraciones)
return avg_error
示例2: train_fast_rcnn
# 需要导入模块: from cntk import Trainer [as 别名]
# 或者: from cntk.Trainer import summarize_training_progress [as 别名]
def train_fast_rcnn(debug_output=False):
if debug_output:
print("Storing graphs and intermediate models to %s." % os.path.join(abs_path, "Output"))
# Create the minibatch source
minibatch_source = create_mb_source(image_height, image_width, num_channels,
num_classes, num_rois, base_path, "train")
# Input variables denoting features, rois and label data
image_input = input_variable((num_channels, image_height, image_width))
roi_input = input_variable((num_rois, 4))
label_input = input_variable((num_rois, num_classes))
# define mapping from reader streams to network inputs
input_map = {
image_input: minibatch_source[features_stream_name],
roi_input: minibatch_source[roi_stream_name],
label_input: minibatch_source[label_stream_name]
}
# Instantiate the Fast R-CNN prediction model and loss function
frcn_output = frcn_predictor(image_input, roi_input, num_classes)
ce = cross_entropy_with_softmax(frcn_output, label_input, axis=1)
pe = classification_error(frcn_output, label_input, axis=1)
if debug_output:
plot(frcn_output, os.path.join(abs_path, "Output", "graph_frcn.png"))
# Set learning parameters
l2_reg_weight = 0.0005
lr_per_sample = [0.00001] * 10 + [0.000001] * 5 + [0.0000001]
lr_schedule = learning_rate_schedule(lr_per_sample, unit=UnitType.sample)
mm_schedule = momentum_as_time_constant_schedule(momentum_time_constant)
# Instantiate the trainer object
learner = momentum_sgd(frcn_output.parameters, lr_schedule, mm_schedule, l2_regularization_weight=l2_reg_weight)
progress_printer = ProgressPrinter(tag='Training', num_epochs=max_epochs)
trainer = Trainer(frcn_output, (ce, pe), learner, progress_printer)
# Get minibatches of images and perform model training
print("Training Fast R-CNN model for %s epochs." % max_epochs)
log_number_of_parameters(frcn_output)
for epoch in range(max_epochs): # loop over epochs
sample_count = 0
while sample_count < epoch_size: # loop over minibatches in the epoch
data = minibatch_source.next_minibatch(min(mb_size, epoch_size-sample_count), input_map=input_map)
trainer.train_minibatch(data) # update model with it
sample_count += trainer.previous_minibatch_sample_count # count samples processed so far
trainer.summarize_training_progress()
if debug_output:
frcn_output.save(os.path.join(abs_path, "Output", "frcn_py_%s.model" % (epoch+1)))
return frcn_output
示例3: train_model
# 需要导入模块: from cntk import Trainer [as 别名]
# 或者: from cntk.Trainer import summarize_training_progress [as 别名]
def train_model(base_model_file, feature_node_name, last_hidden_node_name,
image_width, image_height, num_channels, num_classes, train_map_file,
num_epochs, max_images=-1, freeze=False):
epoch_size = sum(1 for line in open(train_map_file))
if max_images > 0:
epoch_size = min(epoch_size, max_images)
# Create the minibatch source and input variables
minibatch_source = create_mb_source(train_map_file, image_width, image_height, num_channels, num_classes)
image_input = C.input_variable((num_channels, image_height, image_width))
label_input = C.input_variable(num_classes)
# Define mapping from reader streams to network inputs
input_map = {
image_input: minibatch_source[features_stream_name],
label_input: minibatch_source[label_stream_name]
}
# Instantiate the transfer learning model and loss function
tl_model = create_model(base_model_file, feature_node_name, last_hidden_node_name, num_classes, image_input, freeze)
ce = cross_entropy_with_softmax(tl_model, label_input)
pe = classification_error(tl_model, label_input)
# Instantiate the trainer object
lr_schedule = learning_rate_schedule(lr_per_mb, unit=UnitType.minibatch)
mm_schedule = momentum_schedule(momentum_per_mb)
learner = momentum_sgd(tl_model.parameters, lr_schedule, mm_schedule, l2_regularization_weight=l2_reg_weight)
progress_printer = ProgressPrinter(tag='Training', num_epochs=num_epochs)
trainer = Trainer(tl_model, (ce, pe), learner, progress_printer)
# Get minibatches of images and perform model training
print("Training transfer learning model for {0} epochs (epoch_size = {1}).".format(num_epochs, epoch_size))
log_number_of_parameters(tl_model)
for epoch in range(num_epochs): # loop over epochs
sample_count = 0
while sample_count < epoch_size: # loop over minibatches in the epoch
data = minibatch_source.next_minibatch(min(mb_size, epoch_size-sample_count), input_map=input_map)
trainer.train_minibatch(data) # update model with it
sample_count += trainer.previous_minibatch_sample_count # count samples processed so far
if sample_count % (100 * mb_size) == 0:
print ("Processed {0} samples".format(sample_count))
trainer.summarize_training_progress()
return tl_model
示例4: ffnet
# 需要导入模块: from cntk import Trainer [as 别名]
# 或者: from cntk.Trainer import summarize_training_progress [as 别名]
def ffnet(data, labels):
input_dim = 800
num_output_classes = 3
num_hidden_layers = 2
hidden_layers_dim = 50
# Input variables denoting the features and label data
feature = input((input_dim), np.float32)
label = input((num_output_classes), np.float32)
netout = Sequential([For(range(num_hidden_layers), lambda i: Dense(hidden_layers_dim, activation=sigmoid)),
Dense(num_output_classes)])(feature)
ce = cross_entropy_with_softmax(netout, label)
pe = classification_error(netout, label)
lr_per_minibatch=learning_rate_schedule(0.5, UnitType.minibatch)
# Instantiate the trainer object to drive the model training
learner = sgd(netout.parameters, lr=lr_per_minibatch)
progress_printer = ProgressPrinter(128)
trainer = Trainer(netout, (ce, pe), learner, progress_printer)
# Get minibatches of training data and perform model training
minibatch_size = 25
features, labels = generate_stock_data(minibatch_size);
for i in range(1024):
# features, labels = generate_random_data(
# minibatch_size, input_dim, num_output_classes)
# Specify the mapping of input variables in the model to actual
# minibatch data to be trained with
trainer.train_minibatch({feature: features, label: labels})
trainer.summarize_training_progress()
test_features, test_labels = generate_random_data(
minibatch_size, input_dim, num_output_classes)
avg_error = trainer.test_minibatch(
{feature: test_features, label: test_labels})
return avg_error
示例5: ffnet
# 需要导入模块: from cntk import Trainer [as 别名]
# 或者: from cntk.Trainer import summarize_training_progress [as 别名]
def ffnet():
input_dim = 2
num_output_classes = 2
num_hidden_layers = 2
hidden_layers_dim = 50
# Input variables denoting the features and label data
input = input_variable((input_dim), np.float32)
label = input_variable((num_output_classes), np.float32)
# Instantiate the feedforward classification model
netout = fully_connected_classifier_net(
input, num_output_classes, hidden_layers_dim, num_hidden_layers, sigmoid)
ce = cross_entropy_with_softmax(netout, label)
pe = classification_error(netout, label)
lr_per_minibatch=learning_rate_schedule(0.5, UnitType.minibatch)
# Instantiate the trainer object to drive the model training
learner = sgd(netout.parameters, lr=lr_per_minibatch)
progress_printer = ProgressPrinter(128)
trainer = Trainer(netout, (ce, pe), learner, progress_printer)
# Get minibatches of training data and perform model training
minibatch_size = 25
for i in range(1024):
features, labels = generate_random_data(
minibatch_size, input_dim, num_output_classes)
# Specify the mapping of input variables in the model to actual
# minibatch data to be trained with
trainer.train_minibatch({input: features, label: labels})
trainer.summarize_training_progress()
test_features, test_labels = generate_random_data(
minibatch_size, input_dim, num_output_classes)
avg_error = trainer.test_minibatch(
{input: test_features, label: test_labels})
return avg_error
示例6: train_and_evaluate
# 需要导入模块: from cntk import Trainer [as 别名]
# 或者: from cntk.Trainer import summarize_training_progress [as 别名]
def train_and_evaluate(reader_train, reader_test, network_name, epoch_size, max_epochs, profiler_dir=None,
model_dir=None, log_dir=None, tensorboard_logdir=None, gen_heartbeat=False):
set_computation_network_trace_level(0)
# Input variables denoting the features and label data
input_var = C.input_variable((num_channels, image_height, image_width), name='features')
label_var = C.input_variable((num_classes))
# create model, and configure learning parameters
if network_name == 'resnet20':
z = create_cifar10_model(input_var, 3, num_classes)
lr_per_mb = [1.0]*80+[0.1]*40+[0.01]
elif network_name == 'resnet110':
z = create_cifar10_model(input_var, 18, num_classes)
lr_per_mb = [0.1]*1+[1.0]*80+[0.1]*40+[0.01]
else:
raise RuntimeError("Unknown model name!")
# loss and metric
ce = cross_entropy_with_softmax(z, label_var)
pe = classification_error(z, label_var)
# shared training parameters
minibatch_size = 128
momentum_time_constant = -minibatch_size/np.log(0.9)
l2_reg_weight = 0.0001
# Set learning parameters
lr_per_sample = [lr/minibatch_size for lr in lr_per_mb]
lr_schedule = learning_rate_schedule(lr_per_sample, epoch_size=epoch_size, unit=UnitType.sample)
mm_schedule = momentum_as_time_constant_schedule(momentum_time_constant)
# progress writers
progress_writers = [ProgressPrinter(tag='Training', log_to_file=log_dir, num_epochs=max_epochs, gen_heartbeat=gen_heartbeat)]
tensorboard_writer = None
if tensorboard_logdir is not None:
tensorboard_writer = TensorBoardProgressWriter(freq=10, log_dir=tensorboard_logdir, model=z)
progress_writers.append(tensorboard_writer)
# trainer object
learner = momentum_sgd(z.parameters, lr_schedule, mm_schedule,
l2_regularization_weight = l2_reg_weight)
trainer = Trainer(z, (ce, pe), learner, progress_writers)
# define mapping from reader streams to network inputs
input_map = {
input_var: reader_train.streams.features,
label_var: reader_train.streams.labels
}
log_number_of_parameters(z) ; print()
# perform model training
if profiler_dir:
start_profiler(profiler_dir, True)
for epoch in range(max_epochs): # loop over epochs
sample_count = 0
while sample_count < epoch_size: # loop over minibatches in the epoch
data = reader_train.next_minibatch(min(minibatch_size, epoch_size-sample_count), input_map=input_map) # fetch minibatch.
trainer.train_minibatch(data) # update model with it
sample_count += trainer.previous_minibatch_sample_count # count samples processed so far
trainer.summarize_training_progress()
# Log mean of each parameter tensor, so that we can confirm that the parameters change indeed.
if tensorboard_writer:
for parameter in z.parameters:
tensorboard_writer.write_value(parameter.uid + "/mean", reduce_mean(parameter).eval(), epoch)
if model_dir:
z.save(os.path.join(model_dir, network_name + "_{}.dnn".format(epoch)))
enable_profiler() # begin to collect profiler data after first epoch
if profiler_dir:
stop_profiler()
# Evaluation parameters
test_epoch_size = 10000
minibatch_size = 16
# process minibatches and evaluate the model
metric_numer = 0
metric_denom = 0
sample_count = 0
while sample_count < test_epoch_size:
current_minibatch = min(minibatch_size, test_epoch_size - sample_count)
# Fetch next test min batch.
data = reader_test.next_minibatch(current_minibatch, input_map=input_map)
# minibatch data to be trained with
metric_numer += trainer.test_minibatch(data) * current_minibatch
metric_denom += current_minibatch
# Keep track of the number of samples processed so far.
sample_count += data[label_var].num_samples
print("")
trainer.summarize_test_progress()
print("")
#.........这里部分代码省略.........
示例7: simple_mnist
# 需要导入模块: from cntk import Trainer [as 别名]
# 或者: from cntk.Trainer import summarize_training_progress [as 别名]
def simple_mnist():
input_dim = 784
num_output_classes = 10
num_hidden_layers = 1
hidden_layers_dim = 200
# Input variables denoting the features and label data
features = input_variable(input_dim, np.float32)
label = input_variable(num_output_classes, np.float32)
# Instantiate the feedforward classification model
scaled_input = element_times(constant(0.00390625), features)
netout = fully_connected_classifier_net(
scaled_input, num_output_classes, hidden_layers_dim, num_hidden_layers, relu)
ce = cross_entropy_with_softmax(netout, label)
pe = classification_error(netout, label)
try:
rel_path = os.path.join(os.environ['CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY'],
*"Image/MNIST/v0/Train-28x28_cntk_text.txt".split("/"))
except KeyError:
rel_path = os.path.join(*"../Image/DataSets/MNIST/Train-28x28_cntk_text.txt".split("/"))
path = os.path.normpath(os.path.join(abs_path, rel_path))
check_path(path)
reader_train = create_reader(path, True, input_dim, num_output_classes)
input_map = {
features: reader_train.streams.features,
label: reader_train.streams.labels
}
# Instantiate progress writers.
logdir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "mnist_log")
tensorboard_writer = TensorBoardProgressWriter(freq=1, log_dir=logdir, model=netout)
progress_printer = ProgressPrinter(freq=10, tag='Training')
# Instantiate the trainer object to drive the model training
lr_per_minibatch = learning_rate_schedule(0.2, UnitType.minibatch)
learner = sgd(netout.parameters, lr=lr_per_minibatch)
trainer = Trainer(netout, (ce, pe), learner, [tensorboard_writer, progress_printer])
# Get minibatches of images to train with and perform model training
minibatch_size = 64
num_samples_per_sweep = 6000
num_sweeps_to_train_with = 2
num_minibatches_to_train = (num_samples_per_sweep * num_sweeps_to_train_with) / minibatch_size
for minibatch_idx in range(0, int(num_minibatches_to_train)):
trainer.train_minibatch(reader_train.next_minibatch(minibatch_size, input_map=input_map))
# Log max/min/mean of each parameter tensor, so that we can confirm that the parameters change indeed.
# Don't want to do that very often though, otherwise will spend too much time computing min/max/mean.
if minibatch_idx % 10 == 9:
for p in netout.parameters:
tensorboard_writer.write_value(p.uid + "/max", reduce_max(p).eval(), minibatch_idx)
tensorboard_writer.write_value(p.uid + "/min", reduce_min(p).eval(), minibatch_idx)
tensorboard_writer.write_value(p.uid + "/mean", reduce_mean(p).eval(), minibatch_idx)
trainer.summarize_training_progress()
# Load test data
try:
rel_path = os.path.join(os.environ['CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY'],
*"Image/MNIST/v0/Test-28x28_cntk_text.txt".split("/"))
except KeyError:
rel_path = os.path.join(*"../Image/DataSets/MNIST/Test-28x28_cntk_text.txt".split("/"))
path = os.path.normpath(os.path.join(abs_path, rel_path))
check_path(path)
reader_test = create_reader(path, False, input_dim, num_output_classes)
input_map = {
features: reader_test.streams.features,
label: reader_test.streams.labels
}
# Test data for trained model
test_minibatch_size = 1024
num_samples = 10000
num_minibatches_to_test = num_samples / test_minibatch_size
test_result = 0.0
for i in range(0, int(num_minibatches_to_test)):
mb = reader_test.next_minibatch(test_minibatch_size, input_map=input_map)
test_result += trainer.test_minibatch(mb)
# Average of evaluation errors of all test minibatches
trainer.summarize_test_progress()
return test_result / num_minibatches_to_test
示例8: conv3d_ucf11
# 需要导入模块: from cntk import Trainer [as 别名]
# 或者: from cntk.Trainer import summarize_training_progress [as 别名]
def conv3d_ucf11(train_reader, test_reader, max_epochs=30):
# Replace 0 with 1 to get detailed log.
set_computation_network_trace_level(0)
# These values must match for both train and test reader.
image_height = train_reader.height
image_width = train_reader.width
num_channels = train_reader.channel_count
sequence_length = train_reader.sequence_length
num_output_classes = train_reader.label_count
# Input variables denoting the features and label data
input_var = input_variable((num_channels, sequence_length, image_height, image_width), np.float32)
label_var = input_variable(num_output_classes, np.float32)
# Instantiate simple 3D Convolution network inspired by VGG network
# and http://vlg.cs.dartmouth.edu/c3d/c3d_video.pdf
with default_options (activation=relu):
z = Sequential([
Convolution3D((3,3,3), 64, pad=True),
MaxPooling((1,2,2), (1,2,2)),
For(range(3), lambda i: [
Convolution3D((3,3,3), [96, 128, 128][i], pad=True),
Convolution3D((3,3,3), [96, 128, 128][i], pad=True),
MaxPooling((2,2,2), (2,2,2))
]),
For(range(2), lambda : [
Dense(1024),
Dropout(0.5)
]),
Dense(num_output_classes, activation=None)
])(input_var)
# loss and classification error.
ce = cross_entropy_with_softmax(z, label_var)
pe = classification_error(z, label_var)
# training config
epoch_size = 1322 # for now we manually specify epoch size
minibatch_size = 4
# Set learning parameters
lr_per_sample = [0.01]*10+[0.001]*10+[0.0001]
lr_schedule = learning_rate_schedule(lr_per_sample, epoch_size=epoch_size, unit=UnitType.sample)
momentum_time_constant = 4096
mm_schedule = momentum_as_time_constant_schedule([momentum_time_constant], epoch_size=epoch_size)
# Instantiate the trainer object to drive the model training
learner = momentum_sgd(z.parameters, lr_schedule, mm_schedule, True)
progress_printer = ProgressPrinter(tag='Training', num_epochs=max_epochs)
trainer = Trainer(z, (ce, pe), learner, progress_printer)
log_number_of_parameters(z) ; print()
# Get minibatches of images to train with and perform model training
for epoch in range(max_epochs): # loop over epochs
train_reader.reset()
while train_reader.has_more():
videos, labels, current_minibatch = train_reader.next_minibatch(minibatch_size)
trainer.train_minibatch({input_var : videos, label_var : labels})
trainer.summarize_training_progress()
# Test data for trained model
epoch_size = 332
minibatch_size = 2
# process minibatches and evaluate the model
metric_numer = 0
metric_denom = 0
minibatch_index = 0
test_reader.reset()
while test_reader.has_more():
videos, labels, current_minibatch = test_reader.next_minibatch(minibatch_size)
# minibatch data to be trained with
metric_numer += trainer.test_minibatch({input_var : videos, label_var : labels}) * current_minibatch
metric_denom += current_minibatch
# Keep track of the number of samples processed so far.
minibatch_index += 1
print("")
print("Final Results: Minibatch[1-{}]: errs = {:0.2f}% * {}".format(minibatch_index+1, (metric_numer*100.0)/metric_denom, metric_denom))
print("")
return metric_numer/metric_denom
示例9: train
# 需要导入模块: from cntk import Trainer [as 别名]
# 或者: from cntk.Trainer import summarize_training_progress [as 别名]
def train(reader, model, max_epochs):
# Input variables denoting the features and label data
query = Input(input_dim, is_sparse=False)
slot_labels = Input(num_labels, is_sparse=True) # TODO: make sparse once it works
# apply model to input
z = model(query)
# loss and metric
ce = cross_entropy_with_softmax(z, slot_labels)
pe = classification_error (z, slot_labels)
# training config
epoch_size = 36000
minibatch_size = 70
num_mbs_to_show_result = 100
momentum_time_constant = momentum_as_time_constant_schedule(minibatch_size / -math.log(0.9)) # TODO: Change to round number. This is 664.39. 700?
lr_schedule = [0.003]*2+[0.0015]*12+[0.0003] # LR schedule over epochs (we don't run that many epochs, but if we did, these are good values)
# trainer object
lr_per_sample = learning_rate_schedule(lr_schedule, UnitType.sample, epoch_size)
learner = adam_sgd(z.parameters,
lr=lr_per_sample, momentum=momentum_time_constant,
unit_gain=True,
low_memory=True,
gradient_clipping_threshold_per_sample=15, gradient_clipping_with_truncation=True)
# more detailed logging
progress_printer = ProgressPrinter(freq=100, first=10, tag='Training')
#progress_printer = ProgressPrinter(tag='Training')
tensorboard_writer = TensorBoardProgressWriter(freq=100, log_dir='atis_log', model=z)
trainer = Trainer(z, (ce, pe), [learner], [progress_printer, tensorboard_writer])
# define mapping from reader streams to network inputs
input_map = {
query : reader.streams.query,
slot_labels : reader.streams.slot_labels
}
# process minibatches and perform model training
log_number_of_parameters(z) ; print()
t = 0
for epoch in range(max_epochs): # loop over epochs
epoch_end = (epoch+1) * epoch_size
while t < epoch_end: # loop over minibatches on the epoch
# BUGBUG? The change of minibatch_size parameter vv has no effect.
data = reader.next_minibatch(min(minibatch_size, epoch_end-t), input_map=input_map) # fetch minibatch
trainer.train_minibatch(data) # update model with it
t += trainer.previous_minibatch_sample_count # count samples processed so far
#def trace_node(name):
# nl = [n for n in z.parameters if n.name() == name]
# if len(nl) > 0:
# print (name, np.asarray(nl[0].value))
#trace_node('W')
#trace_node('stabilizer_param')
trainer.summarize_training_progress()
tensorboard_writer.close()