本文整理汇总了Python中cntk.Trainer.test_minibatch方法的典型用法代码示例。如果您正苦于以下问题:Python Trainer.test_minibatch方法的具体用法?Python Trainer.test_minibatch怎么用?Python Trainer.test_minibatch使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cntk.Trainer
的用法示例。
在下文中一共展示了Trainer.test_minibatch方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: entrenar
# 需要导入模块: from cntk import Trainer [as 别名]
# 或者: from cntk.Trainer import test_minibatch [as 别名]
def entrenar(checkpoint, entrRuedas, entrOperaciones, input_dim, num_output_classes, testRuedas, testOperaciones):
minibatch_size = 100;
epocs=900;
minibatchIteraciones = int(len(entrOperaciones) / minibatch_size);
# Input variables denoting the features and label data
feature = input((input_dim), np.float32)
label = input((num_output_classes), np.float32)
netout = crearRed(input_dim, num_output_classes, feature);
ce = cross_entropy_with_softmax(netout, label)
pe = classification_error(netout, label)
lr_per_minibatch=learning_rate_schedule(0.25, UnitType.minibatch)
# Instantiate the trainer object to drive the model training
learner = sgd(netout.parameters, lr=lr_per_minibatch)
progress_printer = ProgressPrinter(log_to_file=checkpoint+".log", num_epochs=epocs);
trainer = Trainer(netout, (ce, pe), learner, progress_printer)
if os.path.isfile(checkpoint):
trainer.restore_from_checkpoint(checkpoint);
npentrRuedas = np.array(entrRuedas).astype(np.float32);
npentrOperaciones = np.array(entrOperaciones).astype(np.float32);
#iteramos una vez por cada "epoc"
for i in range(0, epocs):
p = np.random.permutation(len(entrRuedas));
npentrOperaciones = npentrOperaciones[p];
npentrRuedas = npentrRuedas[p];
#ahora partimos los datos en "minibatches" y entrenamos
for j in range(0, minibatchIteraciones):
features = npentrRuedas[j*minibatch_size:(j+1)*minibatch_size];
labels = npentrOperaciones[j*minibatch_size:(j+1)*minibatch_size];
trainer.train_minibatch({feature: features, label: labels});
trainer.summarize_training_progress()
trainer.save_checkpoint(checkpoint);
minibatchIteraciones = int(len(testOperaciones) / minibatch_size);
avg_error = 0;
for j in range(0, minibatchIteraciones):
test_features = np.array(testRuedas[j*minibatch_size:(j+1)*minibatch_size]).astype(np.float32);
test_labels = np.array(testOperaciones[j*minibatch_size:(j+1)*minibatch_size]).astype(np.float32);
#test_features = np.array( entrRuedas[0:minibatch_size]).astype(np.float32);
#test_labels = np.array(entrOperaciones[0:minibatch_size]).astype(np.float32);
avg_error = avg_error + ( trainer.test_minibatch(
{feature: test_features, label: test_labels}) / minibatchIteraciones)
return avg_error
示例2: ffnet
# 需要导入模块: from cntk import Trainer [as 别名]
# 或者: from cntk.Trainer import test_minibatch [as 别名]
def ffnet(debug_output=False):
input_dim = 2
num_output_classes = 2
num_hidden_layers = 2
hidden_layers_dim = 50
# Input variables denoting the features and label data
input = input_variable((input_dim), np.float32)
label = input_variable((num_output_classes), np.float32)
# Instantiate the feedforward classification model
netout = fully_connected_classifier_net(
input, num_output_classes, hidden_layers_dim, num_hidden_layers, sigmoid)
ce = cross_entropy_with_softmax(netout, label)
pe = classification_error(netout, label)
# Instantiate the trainer object to drive the model training
trainer = Trainer(netout, ce, pe, [sgd(netout.parameters(), lr=0.02)])
# Get minibatches of training data and perform model training
minibatch_size = 25
num_samples_per_sweep = 10000
num_sweeps_to_train_with = 2
num_minibatches_to_train = (
num_samples_per_sweep * num_sweeps_to_train_with) / minibatch_size
training_progress_output_freq = 60
if debug_output:
training_progress_output_freq = training_progress_output_freq/3
for i in range(0, int(num_minibatches_to_train)):
features, labels = generate_random_data(
minibatch_size, input_dim, num_output_classes)
# Specify the mapping of input variables in the model to actual
# minibatch data to be trained with
trainer.train_minibatch({input: features, label: labels})
print_training_progress(trainer, i, training_progress_output_freq)
test_features, test_labels = generate_random_data(
minibatch_size, input_dim, num_output_classes)
avg_error = trainer.test_minibatch(
{input: test_features, label: test_labels})
return avg_error
示例3: ffnet
# 需要导入模块: from cntk import Trainer [as 别名]
# 或者: from cntk.Trainer import test_minibatch [as 别名]
def ffnet(data, labels):
input_dim = 800
num_output_classes = 3
num_hidden_layers = 2
hidden_layers_dim = 50
# Input variables denoting the features and label data
feature = input((input_dim), np.float32)
label = input((num_output_classes), np.float32)
netout = Sequential([For(range(num_hidden_layers), lambda i: Dense(hidden_layers_dim, activation=sigmoid)),
Dense(num_output_classes)])(feature)
ce = cross_entropy_with_softmax(netout, label)
pe = classification_error(netout, label)
lr_per_minibatch=learning_rate_schedule(0.5, UnitType.minibatch)
# Instantiate the trainer object to drive the model training
learner = sgd(netout.parameters, lr=lr_per_minibatch)
progress_printer = ProgressPrinter(128)
trainer = Trainer(netout, (ce, pe), learner, progress_printer)
# Get minibatches of training data and perform model training
minibatch_size = 25
features, labels = generate_stock_data(minibatch_size);
for i in range(1024):
# features, labels = generate_random_data(
# minibatch_size, input_dim, num_output_classes)
# Specify the mapping of input variables in the model to actual
# minibatch data to be trained with
trainer.train_minibatch({feature: features, label: labels})
trainer.summarize_training_progress()
test_features, test_labels = generate_random_data(
minibatch_size, input_dim, num_output_classes)
avg_error = trainer.test_minibatch(
{feature: test_features, label: test_labels})
return avg_error
示例4: ffnet
# 需要导入模块: from cntk import Trainer [as 别名]
# 或者: from cntk.Trainer import test_minibatch [as 别名]
def ffnet():
input_dim = 2
num_output_classes = 2
num_hidden_layers = 2
hidden_layers_dim = 50
# Input variables denoting the features and label data
input = input_variable((input_dim), np.float32)
label = input_variable((num_output_classes), np.float32)
# Instantiate the feedforward classification model
netout = fully_connected_classifier_net(
input, num_output_classes, hidden_layers_dim, num_hidden_layers, sigmoid)
ce = cross_entropy_with_softmax(netout, label)
pe = classification_error(netout, label)
lr_per_minibatch=learning_rate_schedule(0.5, UnitType.minibatch)
# Instantiate the trainer object to drive the model training
learner = sgd(netout.parameters, lr=lr_per_minibatch)
progress_printer = ProgressPrinter(128)
trainer = Trainer(netout, (ce, pe), learner, progress_printer)
# Get minibatches of training data and perform model training
minibatch_size = 25
for i in range(1024):
features, labels = generate_random_data(
minibatch_size, input_dim, num_output_classes)
# Specify the mapping of input variables in the model to actual
# minibatch data to be trained with
trainer.train_minibatch({input: features, label: labels})
trainer.summarize_training_progress()
test_features, test_labels = generate_random_data(
minibatch_size, input_dim, num_output_classes)
avg_error = trainer.test_minibatch(
{input: test_features, label: test_labels})
return avg_error
示例5: cifar_resnet_distributed
# 需要导入模块: from cntk import Trainer [as 别名]
# 或者: from cntk.Trainer import test_minibatch [as 别名]
def cifar_resnet_distributed(data_path, run_test, num_epochs, communicator=None, save_model_filename=None, load_model_filename=None, debug_output=False):
image_height = 32
image_width = 32
num_channels = 3
num_classes = 10
feats_stream_name = 'features'
labels_stream_name = 'labels'
minibatch_source = create_reader(os.path.join(data_path, 'train_map.txt'), os.path.join(data_path, 'CIFAR-10_mean.xml'), True,
distributed_communicator = communicator)
features_si = minibatch_source[feats_stream_name]
labels_si = minibatch_source[labels_stream_name]
# Instantiate the resnet classification model, or load from file
if load_model_filename:
print("Loading model:", load_model_filename)
classifier_output = persist.load_model(load_model_filename)
image_input = classifier_output.arguments[0]
else:
image_input = input_variable(
(num_channels, image_height, image_width), features_si.m_element_type)
classifier_output = create_resnet_model(image_input, num_classes)
# Input variables denoting the features and label data
label_var = input_variable((num_classes), features_si.m_element_type)
ce = cross_entropy_with_softmax(classifier_output, label_var)
pe = classification_error(classifier_output, label_var)
# Instantiate the trainer object to drive the model training
mb_size = 128
num_mb_per_epoch = 100
num_mbs = num_mb_per_epoch * num_epochs
lr_per_sample = [1/mb_size]*80+[0.1/mb_size]*40+[0.01/mb_size]
lr_schedule = learning_rate_schedule(lr_per_sample, units = mb_size * num_mb_per_epoch)
momentum_time_constant = -mb_size/np.log(0.9)
# create data parallel distributed trainer if needed
dist_trainer = distributed.data_parallel_distributed_trainer(communicator, False) if communicator else None
# Instantiate the trainer object to drive the model training
trainer = Trainer(classifier_output, ce, pe,
[momentum_sgd(classifier_output.parameters, lr_schedule, momentum_time_constant, l2_regularization_weight=0.0001)],
distributed_trainer = dist_trainer)
# Get minibatches of images to train with and perform model training
training_progress_output_freq = 100 if communicator else 20
if debug_output:
training_progress_output_freq = training_progress_output_freq/4
for i in range(0, num_mbs):
# NOTE: depends on network, the mb_size can be changed dynamically here
mb = minibatch_source.next_minibatch(mb_size)
# Specify the mapping of input variables in the model to actual
# minibatch data to be trained with
arguments = {
image_input: mb[features_si],
label_var: mb[labels_si]
}
trainer.train_minibatch(arguments)
print_training_progress(trainer, i, training_progress_output_freq)
if save_model_filename:
print("Saving model:", save_model_filename)
persist.save_model(classifier_output, save_model_filename)
if run_test:
test_minibatch_source = create_reader(os.path.join(data_path, 'test_map.txt'), os.path.join(data_path, 'CIFAR-10_mean.xml'), False)
features_si = test_minibatch_source[feats_stream_name]
labels_si = test_minibatch_source[labels_stream_name]
mb_size = 128
num_mbs = 100
total_error = 0.0
for i in range(0, num_mbs):
mb = test_minibatch_source.next_minibatch(mb_size)
# Specify the mapping of input variables in the model to actual
# minibatch data to be trained with
arguments = {
image_input: mb[features_si],
label_var: mb[labels_si]
}
error = trainer.test_minibatch(arguments)
total_error += error
return total_error / num_mbs
else:
return 0
示例6: conv3d_ucf11
# 需要导入模块: from cntk import Trainer [as 别名]
# 或者: from cntk.Trainer import test_minibatch [as 别名]
def conv3d_ucf11(train_reader, test_reader, max_epochs=30):
# Replace 0 with 1 to get detailed log.
set_computation_network_trace_level(0)
# These values must match for both train and test reader.
image_height = train_reader.height
image_width = train_reader.width
num_channels = train_reader.channel_count
sequence_length = train_reader.sequence_length
num_output_classes = train_reader.label_count
# Input variables denoting the features and label data
input_var = input_variable((num_channels, sequence_length, image_height, image_width), np.float32)
label_var = input_variable(num_output_classes, np.float32)
# Instantiate simple 3D Convolution network inspired by VGG network
# and http://vlg.cs.dartmouth.edu/c3d/c3d_video.pdf
with default_options (activation=relu):
z = Sequential([
Convolution3D((3,3,3), 64, pad=True),
MaxPooling((1,2,2), (1,2,2)),
For(range(3), lambda i: [
Convolution3D((3,3,3), [96, 128, 128][i], pad=True),
Convolution3D((3,3,3), [96, 128, 128][i], pad=True),
MaxPooling((2,2,2), (2,2,2))
]),
For(range(2), lambda : [
Dense(1024),
Dropout(0.5)
]),
Dense(num_output_classes, activation=None)
])(input_var)
# loss and classification error.
ce = cross_entropy_with_softmax(z, label_var)
pe = classification_error(z, label_var)
# training config
epoch_size = 1322 # for now we manually specify epoch size
minibatch_size = 4
# Set learning parameters
lr_per_sample = [0.01]*10+[0.001]*10+[0.0001]
lr_schedule = learning_rate_schedule(lr_per_sample, epoch_size=epoch_size, unit=UnitType.sample)
momentum_time_constant = 4096
mm_schedule = momentum_as_time_constant_schedule([momentum_time_constant], epoch_size=epoch_size)
# Instantiate the trainer object to drive the model training
learner = momentum_sgd(z.parameters, lr_schedule, mm_schedule, True)
progress_printer = ProgressPrinter(tag='Training', num_epochs=max_epochs)
trainer = Trainer(z, (ce, pe), learner, progress_printer)
log_number_of_parameters(z) ; print()
# Get minibatches of images to train with and perform model training
for epoch in range(max_epochs): # loop over epochs
train_reader.reset()
while train_reader.has_more():
videos, labels, current_minibatch = train_reader.next_minibatch(minibatch_size)
trainer.train_minibatch({input_var : videos, label_var : labels})
trainer.summarize_training_progress()
# Test data for trained model
epoch_size = 332
minibatch_size = 2
# process minibatches and evaluate the model
metric_numer = 0
metric_denom = 0
minibatch_index = 0
test_reader.reset()
while test_reader.has_more():
videos, labels, current_minibatch = test_reader.next_minibatch(minibatch_size)
# minibatch data to be trained with
metric_numer += trainer.test_minibatch({input_var : videos, label_var : labels}) * current_minibatch
metric_denom += current_minibatch
# Keep track of the number of samples processed so far.
minibatch_index += 1
print("")
print("Final Results: Minibatch[1-{}]: errs = {:0.2f}% * {}".format(minibatch_index+1, (metric_numer*100.0)/metric_denom, metric_denom))
print("")
return metric_numer/metric_denom
示例7: train_and_evaluate
# 需要导入模块: from cntk import Trainer [as 别名]
# 或者: from cntk.Trainer import test_minibatch [as 别名]
def train_and_evaluate(reader_train, reader_test, network_name):
set_computation_network_trace_level(0)
# Input variables denoting the features and label data
input_var = input_variable((num_channels, image_height, image_width))
label_var = input_variable((num_classes))
# create model, and configure learning parameters
if network_name == 'resnet20':
z = create_cifar10_model(input_var, 3, num_classes)
lr_per_mb = [1.0]*80+[0.1]*40+[0.01]
elif network_name == 'resnet110':
z = create_cifar10_model(input_var, 18, num_classes)
lr_per_mb = [0.1]*1+[1.0]*80+[0.1]*40+[0.01]
else:
return RuntimeError("Unknown model name!")
# loss and metric
ce = cross_entropy_with_softmax(z, label_var)
pe = classification_error(z, label_var)
# shared training parameters
epoch_size = 50000 # for now we manually specify epoch size
minibatch_size = 128
max_epochs = 160
momentum_time_constant = -minibatch_size/np.log(0.9)
l2_reg_weight = 0.0001
# Set learning parameters
lr_per_sample = [lr/minibatch_size for lr in lr_per_mb]
lr_schedule = learning_rate_schedule(lr_per_sample, epoch_size=epoch_size)
mm_schedule = momentum_as_time_constant_schedule(momentum_time_constant)
# trainer object
learner = momentum_sgd(z.parameters, lr_schedule, mm_schedule,
l2_regularization_weight = l2_reg_weight)
trainer = Trainer(z, ce, pe, learner)
# define mapping from reader streams to network inputs
input_map = {
input_var: reader_train.streams.features,
label_var: reader_train.streams.labels
}
log_number_of_parameters(z) ; print()
progress_printer = ProgressPrinter(tag='Training')
# perform model training
for epoch in range(max_epochs): # loop over epochs
sample_count = 0
while sample_count < epoch_size: # loop over minibatches in the epoch
data = reader_train.next_minibatch(min(minibatch_size, epoch_size-sample_count), input_map=input_map) # fetch minibatch.
trainer.train_minibatch(data) # update model with it
sample_count += data[label_var].num_samples # count samples processed so far
progress_printer.update_with_trainer(trainer, with_metric=True) # log progress
progress_printer.epoch_summary(with_metric=True)
persist.save_model(z, os.path.join(model_path, network_name + "_{}.dnn".format(epoch)))
# Evaluation parameters
epoch_size = 10000
minibatch_size = 16
# process minibatches and evaluate the model
metric_numer = 0
metric_denom = 0
sample_count = 0
minibatch_index = 0
while sample_count < epoch_size:
current_minibatch = min(minibatch_size, epoch_size - sample_count)
# Fetch next test min batch.
data = reader_test.next_minibatch(current_minibatch, input_map=input_map)
# minibatch data to be trained with
metric_numer += trainer.test_minibatch(data) * current_minibatch
metric_denom += current_minibatch
# Keep track of the number of samples processed so far.
sample_count += data[label_var].num_samples
minibatch_index += 1
print("")
print("Final Results: Minibatch[1-{}]: errs = {:0.2f}% * {}".format(minibatch_index+1, (metric_numer*100.0)/metric_denom, metric_denom))
print("")
return metric_numer/metric_denom
示例8: sequence_to_sequence_translator
# 需要导入模块: from cntk import Trainer [as 别名]
# 或者: from cntk.Trainer import test_minibatch [as 别名]
#.........这里部分代码省略.........
ce = cross_entropy_with_softmax(z, label_sequence)
errs = classification_error(z, label_sequence)
# Instantiate the trainer object to drive the model training
lr = 0.007
momentum_time_constant = 1100
momentum_per_sample = momentums_per_sample(math.exp(-1.0 / momentum_time_constant))
clipping_threshold_per_sample = 2.3
gradient_clipping_with_truncation = True
trainer = Trainer(
z,
ce,
errs,
[
momentum_sgd(
z.parameters(),
lr,
momentum_per_sample,
clipping_threshold_per_sample,
gradient_clipping_with_truncation,
)
],
)
rel_path = r"../../../../Examples/SequenceToSequence/CMUDict/Data/cmudict-0.7b.train-dev-20-21.ctf"
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), rel_path)
feature_stream_name = "features"
labels_stream_name = "labels"
mb_source = text_format_minibatch_source(
path,
[
StreamConfiguration(feature_stream_name, input_vocab_dim, True, "S0"),
StreamConfiguration(labels_stream_name, label_vocab_dim, True, "S1"),
],
10000,
)
features_si = mb_source[feature_stream_name]
labels_si = mb_source[labels_stream_name]
# Get minibatches of sequences to train with and perform model training
minibatch_size = 72
training_progress_output_freq = 30
if debug_output:
training_progress_output_freq = training_progress_output_freq / 3
while True:
mb = mb_source.get_next_minibatch(minibatch_size)
if len(mb) == 0:
break
# Specify the mapping of input variables in the model to actual
# minibatch data to be trained with
arguments = {raw_input: mb[features_si], raw_labels: mb[labels_si]}
trainer.train_minibatch(arguments)
print_training_progress(trainer, i, training_progress_output_freq)
i += 1
rel_path = r"../../../../Examples/SequenceToSequence/CMUDict/Data/cmudict-0.7b.test.ctf"
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), rel_path)
test_mb_source = text_format_minibatch_source(
path,
[
StreamConfiguration(feature_stream_name, input_vocab_dim, True, "S0"),
StreamConfiguration(labels_stream_name, label_vocab_dim, True, "S1"),
],
10000,
False,
)
features_si = test_mb_source[feature_stream_name]
labels_si = test_mb_source[labels_stream_name]
# choose this to be big enough for the longest sentence
train_minibatch_size = 1024
# Get minibatches of sequences to test and perform testing
i = 0
total_error = 0.0
while True:
mb = test_mb_source.get_next_minibatch(train_minibatch_size)
if len(mb) == 0:
break
# Specify the mapping of input variables in the model to actual
# minibatch data to be tested with
arguments = {raw_input: mb[features_si], raw_labels: mb[labels_si]}
mb_error = trainer.test_minibatch(arguments)
total_error += mb_error
if debug_output:
print("Minibatch {}, Error {} ".format(i, mb_error))
i += 1
# Average of evaluation errors of all test minibatches
return total_error / i
示例9: train_and_evaluate
# 需要导入模块: from cntk import Trainer [as 别名]
# 或者: from cntk.Trainer import test_minibatch [as 别名]
def train_and_evaluate(create_train_reader, test_reader, network_name, max_epochs, create_dist_learner, scale_up=False):
set_computation_network_trace_level(0)
# Input variables denoting the features and label data
input_var = input_variable((num_channels, image_height, image_width))
label_var = input_variable((num_classes))
# create model, and configure learning parameters
if network_name == 'resnet20':
z = create_cifar10_model(input_var, 3, num_classes)
lr_per_mb = [1.0]*80+[0.1]*40+[0.01]
elif network_name == 'resnet110':
z = create_cifar10_model(input_var, 18, num_classes)
lr_per_mb = [0.1]*1+[1.0]*80+[0.1]*40+[0.01]
else:
return RuntimeError("Unknown model name!")
# loss and metric
ce = cross_entropy_with_softmax(z, label_var)
pe = classification_error(z, label_var)
# shared training parameters
epoch_size = 50000 # for now we manually specify epoch size
# NOTE: scaling up minibatch_size increases sample throughput. In 8-GPU machine,
# ResNet110 samples-per-second is ~7x of single GPU, comparing to ~3x without scaling
# up. However, bigger minimatch size on the same number of samples means less updates,
# thus leads to higher training error. This is a trade-off of speed and accuracy
minibatch_size = 128 * (distributed.Communicator.num_workers() if scale_up else 1)
momentum_time_constant = -minibatch_size/np.log(0.9)
l2_reg_weight = 0.0001
# Set learning parameters
lr_per_sample = [lr/minibatch_size for lr in lr_per_mb]
lr_schedule = learning_rate_schedule(lr_per_sample, epoch_size=epoch_size, unit=UnitType.sample)
mm_schedule = momentum_as_time_constant_schedule(momentum_time_constant)
# trainer object
learner = create_dist_learner(momentum_sgd(z.parameters, lr_schedule, mm_schedule,
l2_regularization_weight = l2_reg_weight))
trainer = Trainer(z, ce, pe, learner)
total_number_of_samples = max_epochs * epoch_size
train_reader=create_train_reader(total_number_of_samples)
# define mapping from reader streams to network inputs
input_map = {
input_var: train_reader.streams.features,
label_var: train_reader.streams.labels
}
log_number_of_parameters(z) ; print()
progress_printer = ProgressPrinter(tag='Training')
# perform model training
current_epoch=0
updated=True
while updated:
data=train_reader.next_minibatch(minibatch_size, input_map=input_map) # fetch minibatch.
updated=trainer.train_minibatch(data) # update model with it
progress_printer.update_with_trainer(trainer, with_metric=True) # log progress
epoch_index = int(trainer.total_number_of_samples_seen/epoch_size)
if current_epoch != epoch_index: # new epoch reached
progress_printer.epoch_summary(with_metric=True)
current_epoch=epoch_index
trainer.save_checkpoint(os.path.join(model_path, network_name + "_{}.dnn".format(current_epoch)))
# Evaluation parameters
epoch_size = 10000
minibatch_size = 16
# process minibatches and evaluate the model
metric_numer = 0
metric_denom = 0
sample_count = 0
minibatch_index = 0
while True:
data = test_reader.next_minibatch(minibatch_size, input_map=input_map)
if not data: break;
local_mb_samples=data[label_var].num_samples
metric_numer += trainer.test_minibatch(data) * local_mb_samples
metric_denom += local_mb_samples
minibatch_index += 1
print("")
print("Final Results: Minibatch[1-{}]: errs = {:0.2f}% * {}".format(minibatch_index+1, (metric_numer*100.0)/metric_denom, metric_denom))
print("")
return metric_numer/metric_denom
示例10: train_and_evaluate
# 需要导入模块: from cntk import Trainer [as 别名]
# 或者: from cntk.Trainer import test_minibatch [as 别名]
def train_and_evaluate(reader_train, reader_test, network_name, epoch_size, max_epochs, profiler_dir=None,
model_dir=None, log_dir=None, tensorboard_logdir=None, gen_heartbeat=False):
set_computation_network_trace_level(0)
# Input variables denoting the features and label data
input_var = C.input_variable((num_channels, image_height, image_width), name='features')
label_var = C.input_variable((num_classes))
# create model, and configure learning parameters
if network_name == 'resnet20':
z = create_cifar10_model(input_var, 3, num_classes)
lr_per_mb = [1.0]*80+[0.1]*40+[0.01]
elif network_name == 'resnet110':
z = create_cifar10_model(input_var, 18, num_classes)
lr_per_mb = [0.1]*1+[1.0]*80+[0.1]*40+[0.01]
else:
raise RuntimeError("Unknown model name!")
# loss and metric
ce = cross_entropy_with_softmax(z, label_var)
pe = classification_error(z, label_var)
# shared training parameters
minibatch_size = 128
momentum_time_constant = -minibatch_size/np.log(0.9)
l2_reg_weight = 0.0001
# Set learning parameters
lr_per_sample = [lr/minibatch_size for lr in lr_per_mb]
lr_schedule = learning_rate_schedule(lr_per_sample, epoch_size=epoch_size, unit=UnitType.sample)
mm_schedule = momentum_as_time_constant_schedule(momentum_time_constant)
# progress writers
progress_writers = [ProgressPrinter(tag='Training', log_to_file=log_dir, num_epochs=max_epochs, gen_heartbeat=gen_heartbeat)]
tensorboard_writer = None
if tensorboard_logdir is not None:
tensorboard_writer = TensorBoardProgressWriter(freq=10, log_dir=tensorboard_logdir, model=z)
progress_writers.append(tensorboard_writer)
# trainer object
learner = momentum_sgd(z.parameters, lr_schedule, mm_schedule,
l2_regularization_weight = l2_reg_weight)
trainer = Trainer(z, (ce, pe), learner, progress_writers)
# define mapping from reader streams to network inputs
input_map = {
input_var: reader_train.streams.features,
label_var: reader_train.streams.labels
}
log_number_of_parameters(z) ; print()
# perform model training
if profiler_dir:
start_profiler(profiler_dir, True)
for epoch in range(max_epochs): # loop over epochs
sample_count = 0
while sample_count < epoch_size: # loop over minibatches in the epoch
data = reader_train.next_minibatch(min(minibatch_size, epoch_size-sample_count), input_map=input_map) # fetch minibatch.
trainer.train_minibatch(data) # update model with it
sample_count += trainer.previous_minibatch_sample_count # count samples processed so far
trainer.summarize_training_progress()
# Log mean of each parameter tensor, so that we can confirm that the parameters change indeed.
if tensorboard_writer:
for parameter in z.parameters:
tensorboard_writer.write_value(parameter.uid + "/mean", reduce_mean(parameter).eval(), epoch)
if model_dir:
z.save(os.path.join(model_dir, network_name + "_{}.dnn".format(epoch)))
enable_profiler() # begin to collect profiler data after first epoch
if profiler_dir:
stop_profiler()
# Evaluation parameters
test_epoch_size = 10000
minibatch_size = 16
# process minibatches and evaluate the model
metric_numer = 0
metric_denom = 0
sample_count = 0
while sample_count < test_epoch_size:
current_minibatch = min(minibatch_size, test_epoch_size - sample_count)
# Fetch next test min batch.
data = reader_test.next_minibatch(current_minibatch, input_map=input_map)
# minibatch data to be trained with
metric_numer += trainer.test_minibatch(data) * current_minibatch
metric_denom += current_minibatch
# Keep track of the number of samples processed so far.
sample_count += data[label_var].num_samples
print("")
trainer.summarize_test_progress()
print("")
#.........这里部分代码省略.........
示例11: simple_mnist
# 需要导入模块: from cntk import Trainer [as 别名]
# 或者: from cntk.Trainer import test_minibatch [as 别名]
def simple_mnist(tensorboard_logdir=None):
input_dim = 784
num_output_classes = 10
num_hidden_layers = 1
hidden_layers_dim = 200
# Input variables denoting the features and label data
input = input_variable(input_dim, np.float32)
label = input_variable(num_output_classes, np.float32)
# Instantiate the feedforward classification model
scaled_input = element_times(constant(0.00390625), input)
z = fully_connected_classifier_net(
scaled_input, num_output_classes, hidden_layers_dim, num_hidden_layers, relu)
ce = cross_entropy_with_softmax(z, label)
pe = classification_error(z, label)
data_dir = os.path.join(abs_path, "..", "..", "..", "DataSets", "MNIST")
path = os.path.normpath(os.path.join(data_dir, "Train-28x28_cntk_text.txt"))
check_path(path)
reader_train = create_reader(path, True, input_dim, num_output_classes)
input_map = {
input : reader_train.streams.features,
label : reader_train.streams.labels
}
# Training config
minibatch_size = 64
num_samples_per_sweep = 60000
num_sweeps_to_train_with = 10
# Instantiate progress writers.
#training_progress_output_freq = 100
progress_writers = [ProgressPrinter(
#freq=training_progress_output_freq,
tag='Training',
num_epochs=num_sweeps_to_train_with)]
if tensorboard_logdir is not None:
progress_writers.append(TensorBoardProgressWriter(freq=10, log_dir=tensorboard_logdir, model=z))
# Instantiate the trainer object to drive the model training
lr_per_minibatch = learning_rate_schedule(0.2, UnitType.minibatch)
trainer = Trainer(z, (ce, pe), sgd(z.parameters, lr=lr_per_minibatch), progress_writers)
training_session(
trainer=trainer,
mb_source = reader_train,
mb_size = minibatch_size,
var_to_stream = input_map,
max_samples = num_samples_per_sweep * num_sweeps_to_train_with,
progress_frequency=num_samples_per_sweep
).train()
# Load test data
path = os.path.normpath(os.path.join(data_dir, "Test-28x28_cntk_text.txt"))
check_path(path)
reader_test = create_reader(path, False, input_dim, num_output_classes)
input_map = {
input : reader_test.streams.features,
label : reader_test.streams.labels
}
# Test data for trained model
test_minibatch_size = 1024
num_samples = 10000
num_minibatches_to_test = num_samples / test_minibatch_size
test_result = 0.0
for i in range(0, int(num_minibatches_to_test)):
mb = reader_test.next_minibatch(test_minibatch_size, input_map=input_map)
eval_error = trainer.test_minibatch(mb)
test_result = test_result + eval_error
# Average of evaluation errors of all test minibatches
return test_result / num_minibatches_to_test
示例12: simple_mnist
# 需要导入模块: from cntk import Trainer [as 别名]
# 或者: from cntk.Trainer import test_minibatch [as 别名]
def simple_mnist(debug_output=False):
input_dim = 784
num_output_classes = 10
num_hidden_layers = 1
hidden_layers_dim = 200
# Input variables denoting the features and label data
input = input_variable(input_dim, np.float32)
label = input_variable(num_output_classes, np.float32)
# Instantiate the feedforward classification model
scaled_input = element_times(constant(0.00390625), input)
z = fully_connected_classifier_net(
scaled_input, num_output_classes, hidden_layers_dim, num_hidden_layers, relu)
ce = cross_entropy_with_softmax(z, label)
pe = classification_error(z, label)
try:
rel_path = os.path.join(os.environ['CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY'],
*"Image/MNIST/v0/Train-28x28_cntk_text.txt".split("/"))
except KeyError:
rel_path = os.path.join(abs_path, "..", "..", "..", "..", "..", "Examples", "Image", "DataSets", "MNIST", "Train-28x28_cntk_text.txt")
path = os.path.normpath(os.path.join(abs_path, rel_path))
check_path(path)
reader_train = create_reader(path, True, input_dim, num_output_classes)
input_map = {
input : reader_train.streams.features,
label : reader_train.streams.labels
}
lr_per_minibatch=learning_rate_schedule(0.2, UnitType.minibatch)
# Instantiate the trainer object to drive the model training
trainer = Trainer(z, ce, pe, sgd(z.parameters, lr=lr_per_minibatch))
# Get minibatches of images to train with and perform model training
minibatch_size = 64
num_samples_per_sweep = 60000
num_sweeps_to_train_with = 10
num_minibatches_to_train = (num_samples_per_sweep * num_sweeps_to_train_with) / minibatch_size
training_progress_output_freq = 500
if debug_output:
training_progress_output_freq = training_progress_output_freq/4
for i in range(0, int(num_minibatches_to_train)):
mb = reader_train.next_minibatch(minibatch_size, input_map=input_map)
trainer.train_minibatch(mb)
print_training_progress(trainer, i, training_progress_output_freq)
# Load test data
try:
rel_path = os.path.join(os.environ['CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY'],
*"Image/MNIST/v0/Test-28x28_cntk_text.txt".split("/"))
except KeyError:
rel_path = os.path.join(abs_path, "..", "..", "..", "..", "..", "Examples", "Image", "DataSets", "MNIST", "Test-28x28_cntk_text.txt")
path = os.path.normpath(os.path.join(abs_path, rel_path))
check_path(path)
reader_test = create_reader(path, False, input_dim, num_output_classes)
input_map = {
input : reader_test.streams.features,
label : reader_test.streams.labels
}
# Test data for trained model
test_minibatch_size = 1024
num_samples = 10000
num_minibatches_to_test = num_samples / test_minibatch_size
test_result = 0.0
for i in range(0, int(num_minibatches_to_test)):
mb = reader_test.next_minibatch(test_minibatch_size, input_map=input_map)
eval_error = trainer.test_minibatch(mb)
test_result = test_result + eval_error
# Average of evaluation errors of all test minibatches
return test_result / num_minibatches_to_test
示例13: cifar_resnet
# 需要导入模块: from cntk import Trainer [as 别名]
# 或者: from cntk.Trainer import test_minibatch [as 别名]
def cifar_resnet(base_path, debug_output=False):
image_height = 32
image_width = 32
num_channels = 3
num_classes = 10
feats_stream_name = 'features'
labels_stream_name = 'labels'
minibatch_source = create_mb_source(feats_stream_name, labels_stream_name,
image_height, image_width, num_channels, num_classes, base_path)
features_si = minibatch_source[feats_stream_name]
labels_si = minibatch_source[labels_stream_name]
# Input variables denoting the features and label data
image_input = input_variable(
(num_channels, image_height, image_width), features_si.m_element_type)
label_var = input_variable((num_classes), features_si.m_element_type)
# Instantiate the resnet classification model
classifier_output = resnet_classifer(image_input, num_classes)
ce = cross_entropy_with_softmax(classifier_output, label_var)
pe = classification_error(classifier_output, label_var)
# Instantiate the trainer object to drive the model training
trainer = Trainer(classifier_output, ce, pe,
[sgd(classifier_output.parameters(), lr=0.0078125)])
# Get minibatches of images to train with and perform model training
mb_size = 32
training_progress_output_freq = 60
num_mbs = 1000
if debug_output:
training_progress_output_freq = training_progress_output_freq/3
for i in range(0, num_mbs):
mb = minibatch_source.get_next_minibatch(mb_size)
# Specify the mapping of input variables in the model to actual
# minibatch data to be trained with
arguments = {
image_input: mb[features_si],
label_var: mb[labels_si]
}
trainer.train_minibatch(arguments)
print_training_progress(trainer, i, training_progress_output_freq)
test_minibatch_source = create_test_mb_source(feats_stream_name, labels_stream_name,
image_height, image_width, num_channels, num_classes, base_path)
features_si = test_minibatch_source[feats_stream_name]
labels_si = test_minibatch_source[labels_stream_name]
mb_size = 64
num_mbs = 300
total_error = 0.0
for i in range(0, num_mbs):
mb = test_minibatch_source.get_next_minibatch(mb_size)
# Specify the mapping of input variables in the model to actual
# minibatch data to be trained with
arguments = {
image_input: mb[features_si],
label_var: mb[labels_si]
}
error = trainer.test_minibatch(arguments)
total_error += error
return total_error / num_mbs
示例14: simple_mnist
# 需要导入模块: from cntk import Trainer [as 别名]
# 或者: from cntk.Trainer import test_minibatch [as 别名]
def simple_mnist(debug_output=False):
input_dim = 784
num_output_classes = 10
num_hidden_layers = 1
hidden_layers_dim = 200
# Input variables denoting the features and label data
input = input_variable(input_dim, np.float32)
label = input_variable(num_output_classes, np.float32)
# Instantiate the feedforward classification model
scaled_input = element_times(constant((), 0.00390625), input)
netout = fully_connected_classifier_net(
scaled_input, num_output_classes, hidden_layers_dim, num_hidden_layers, sigmoid
)
ce = cross_entropy_with_softmax(netout, label)
pe = classification_error(netout, label)
try:
rel_path = os.path.join(
os.environ["CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY"],
*"Image/MNIST/v0/Train-28x28_cntk_text.txt".split("/")
)
except KeyError:
rel_path = os.path.join(*"../../../../Examples/Image/Datasets/MNIST/Train-28x28_cntk_text.txt".split("/"))
path = os.path.normpath(os.path.join(abs_path, rel_path))
check_path(path)
feature_stream_name = "features"
labels_stream_name = "labels"
mb_source = text_format_minibatch_source(
path,
[
StreamConfiguration(feature_stream_name, input_dim),
StreamConfiguration(labels_stream_name, num_output_classes),
],
)
features_si = mb_source[feature_stream_name]
labels_si = mb_source[labels_stream_name]
# Instantiate the trainer object to drive the model training
trainer = Trainer(netout, ce, pe, [sgd(netout.parameters(), lr=0.003125)])
# Get minibatches of images to train with and perform model training
minibatch_size = 32
num_samples_per_sweep = 60000
num_sweeps_to_train_with = 1
num_minibatches_to_train = (num_samples_per_sweep * num_sweeps_to_train_with) / minibatch_size
training_progress_output_freq = 80
if debug_output:
training_progress_output_freq = training_progress_output_freq / 4
for i in range(0, int(num_minibatches_to_train)):
mb = mb_source.get_next_minibatch(minibatch_size)
# Specify the mapping of input variables in the model to actual
# minibatch data to be trained with
arguments = {input: mb[features_si], label: mb[labels_si]}
trainer.train_minibatch(arguments)
print_training_progress(trainer, i, training_progress_output_freq)
# Load test data
try:
rel_path = os.path.join(
os.environ["CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY"], *"Image/MNIST/v0/Test-28x28_cntk_text.txt".split("/")
)
except KeyError:
rel_path = os.path.join(*"../../../../Examples/Image/Datasets/MNIST/Test-28x28_cntk_text.txt".split("/"))
path = os.path.normpath(os.path.join(abs_path, rel_path))
check_path(path)
test_mb_source = text_format_minibatch_source(
path,
[
StreamConfiguration(feature_stream_name, input_dim),
StreamConfiguration(labels_stream_name, num_output_classes),
],
randomize=False,
)
features_si = test_mb_source[feature_stream_name]
labels_si = test_mb_source[labels_stream_name]
# Test data for trained model
test_minibatch_size = 512
num_samples = 10000
num_minibatches_to_test = num_samples / test_minibatch_size
test_result = 0.0
for i in range(0, int(num_minibatches_to_test)):
mb = test_mb_source.get_next_minibatch(test_minibatch_size)
# Specify the mapping of input variables in the model to actual
# minibatch data to be tested with
arguments = {input: mb[features_si], label: mb[labels_si]}
eval_error = trainer.test_minibatch(arguments)
test_result = test_result + eval_error
#.........这里部分代码省略.........
示例15: convnet_cifar10_dataaug
# 需要导入模块: from cntk import Trainer [as 别名]
# 或者: from cntk.Trainer import test_minibatch [as 别名]
def convnet_cifar10_dataaug(reader_train, reader_test, max_epochs = 80):
set_computation_network_trace_level(0)
# Input variables denoting the features and label data
input_var = input_variable((num_channels, image_height, image_width))
label_var = input_variable((num_classes))
# apply model to input
scaled_input = element_times(constant(0.00390625), input_var)
with default_options (activation=relu, pad=True):
z = Sequential([
LayerStack(2, lambda : [
Convolution((3,3), 64),
Convolution((3,3), 64),
MaxPooling((3,3), (2,2))
]),
LayerStack(2, lambda i: [
Dense([256,128][i]),
Dropout(0.5)
]),
Dense(num_classes, activation=None)
])(scaled_input)
# loss and metric
ce = cross_entropy_with_softmax(z, label_var)
pe = classification_error(z, label_var)
# training config
epoch_size = 50000 # for now we manually specify epoch size
minibatch_size = 64
# Set learning parameters
lr_per_sample = [0.0015625]*20+[0.00046875]*20+[0.00015625]*20+[0.000046875]*10+[0.000015625]
lr_schedule = learning_rate_schedule(lr_per_sample, unit=UnitType.sample, epoch_size=epoch_size)
mm_time_constant = [0]*20+[600]*20+[1200]
mm_schedule = momentum_as_time_constant_schedule(mm_time_constant, epoch_size=epoch_size)
l2_reg_weight = 0.002
# trainer object
learner = momentum_sgd(z.parameters, lr_schedule, mm_schedule,
l2_regularization_weight = l2_reg_weight)
trainer = Trainer(z, ce, pe, learner)
# define mapping from reader streams to network inputs
input_map = {
input_var: reader_train.streams.features,
label_var: reader_train.streams.labels
}
log_number_of_parameters(z) ; print()
progress_printer = ProgressPrinter(tag='Training')
# perform model training
for epoch in range(max_epochs): # loop over epochs
sample_count = 0
while sample_count < epoch_size: # loop over minibatches in the epoch
data = reader_train.next_minibatch(min(minibatch_size, epoch_size-sample_count), input_map=input_map) # fetch minibatch.
trainer.train_minibatch(data) # update model with it
sample_count += trainer.previous_minibatch_sample_count # count samples processed so far
progress_printer.update_with_trainer(trainer, with_metric=True) # log progress
progress_printer.epoch_summary(with_metric=True)
persist.save_model(z, os.path.join(model_path, "ConvNet_CIFAR10_DataAug_{}.dnn".format(epoch)))
### Evaluation action
epoch_size = 10000
minibatch_size = 16
# process minibatches and evaluate the model
metric_numer = 0
metric_denom = 0
sample_count = 0
minibatch_index = 0
while sample_count < epoch_size:
current_minibatch = min(minibatch_size, epoch_size - sample_count)
# Fetch next test min batch.
data = reader_test.next_minibatch(current_minibatch, input_map=input_map)
# minibatch data to be trained with
metric_numer += trainer.test_minibatch(data) * current_minibatch
metric_denom += current_minibatch
# Keep track of the number of samples processed so far.
sample_count += data[label_var].num_samples
minibatch_index += 1
print("")
print("Final Results: Minibatch[1-{}]: errs = {:0.2f}% * {}".format(minibatch_index+1, (metric_numer*100.0)/metric_denom, metric_denom))
print("")
return metric_numer/metric_denom