本文整理汇总了Python中cntk.Trainer.previous_minibatch_loss_average方法的典型用法代码示例。如果您正苦于以下问题:Python Trainer.previous_minibatch_loss_average方法的具体用法?Python Trainer.previous_minibatch_loss_average怎么用?Python Trainer.previous_minibatch_loss_average使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cntk.Trainer
的用法示例。
在下文中一共展示了Trainer.previous_minibatch_loss_average方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: train_sequence_classifier
# 需要导入模块: from cntk import Trainer [as 别名]
# 或者: from cntk.Trainer import previous_minibatch_loss_average [as 别名]
def train_sequence_classifier(debug_output=False):
input_dim = 2000
cell_dim = 25
hidden_dim = 25
embedding_dim = 50
num_output_classes = 5
# Input variables denoting the features and label data
features = input_variable(shape=input_dim, is_sparse=True)
label = input_variable(num_output_classes, dynamic_axes=[
Axis.default_batch_axis()])
# Instantiate the sequence classification model
classifier_output = LSTM_sequence_classifer_net(
features, num_output_classes, embedding_dim, hidden_dim, cell_dim)
ce = cross_entropy_with_softmax(classifier_output, label)
pe = classification_error(classifier_output, label)
rel_path = r"../../../../Tests/EndToEndTests/Text/SequenceClassification/Data/Train.ctf"
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), rel_path)
feature_stream_name = 'features'
labels_stream_name = 'labels'
mb_source = text_format_minibatch_source(path, [
StreamConfiguration(feature_stream_name, input_dim, True, 'x'),
StreamConfiguration(labels_stream_name, num_output_classes, False, 'y')], 0)
features_si = mb_source[features]
labels_si = mb_source[label]
# Instantiate the trainer object to drive the model training
trainer = Trainer(classifier_output, ce, pe,
[sgd(classifier_output.parameters(), lr=0.0005)])
# Get minibatches of sequences to train with and perform model training
minibatch_size = 200
training_progress_output_freq = 10
i = 0
if debug_output:
training_progress_output_freq = training_progress_output_freq/3
while True:
mb = mb_source.get_next_minibatch(minibatch_size)
if len(mb) == 0:
break
# Specify the mapping of input variables in the model to actual
# minibatch data to be trained with
arguments = {features: mb[features_si],
label: mb[labels_si]}
trainer.train_minibatch(arguments)
print_training_progress(trainer, i, training_progress_output_freq)
i += 1
import copy
evaluation_average = copy.copy(
trainer.previous_minibatch_evaluation_average())
loss_average = copy.copy(trainer.previous_minibatch_loss_average())
return evaluation_average, loss_average