本文整理汇总了Python中cntk.momentum_schedule方法的典型用法代码示例。如果您正苦于以下问题:Python cntk.momentum_schedule方法的具体用法?Python cntk.momentum_schedule怎么用?Python cntk.momentum_schedule使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cntk
的用法示例。
在下文中一共展示了cntk.momentum_schedule方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_graph
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import momentum_schedule [as 别名]
def build_graph(config):
assert(config['type'] in ["cnn", "lstm", "gru", "bilstm", "bigru"])
if config["type"] == "cnn":
# static model
features = C.input_variable(input_dim_model, name="input")
labels = C.input_variable(label_dim, name="label")
else:
# recurrent model
features = C.sequence.input_variable(input_dim_model, name="input")
labels = C.sequence.input_variable(label_dim, name="label")
netoutput = create_model(features, config["type"], config["encoder"], config["pretrained_model"], config["e3_clone"])
if config["l2_loss_type"] == 1:
print("Use standard l2 loss")
ce = l2_loss(netoutput, labels)
elif config["l2_loss_type"] == 2:
print("Use variance normalized l2 loss")
ce = std_normalized_l2_loss(netoutput, labels)
else:
raise ValueError("Unsupported loss type")
# enforce sparsity output
if config["l1_reg"] > sys.float_info.epsilon:
ce = ce + config["l1_reg"] * l1_reg_loss(netoutput)
# performance metrics
pe = C.squared_error(netoutput, labels)
if config["constlr"]:
lr_schedule = config["lr"]
else:
if config["lr_list"] is not None:
print("use learning rate schedule from file")
lr_schedule = config["lr_list"]
else:
if config["type"] != "cnn": # default learning rate for recurrent model
lr_schedule = [0.005] + [0.0025]*2 + [0.001]*4 + [0.0005]*8 + [0.00025]*16 + [0.0001]*1000 + [0.00005]*1000 + [0.000025]
elif config["lr_schedule"] == 1: # learning rate for CNN
lr_schedule = [0.005] + [0.0025]*2 + [0.00125]*3 + [0.0005]*4 + [0.00025]*5 + [0.0001]
elif config["lr_schedule"] == 2:
lr_schedule = [0.005] + [0.0025]*2 + [0.00125]*3 + [0.0005]*4 + [0.00025]*5 + [0.0001]*100 + [0.00005]*50 + [0.000025]*50 + [0.00001]
else:
raise ValueError("unknown learning rate")
learning_rate = C.learning_parameter_schedule_per_sample(lr_schedule, epoch_size=config["epoch_size"])
momentum_schedule = C.momentum_schedule(0.9, minibatch_size=300)
learner = C.adam(netoutput.parameters, lr=learning_rate, momentum=momentum_schedule,
l2_regularization_weight=0.0001,
gradient_clipping_threshold_per_sample=3.0, gradient_clipping_with_truncation=True)
trainer = C.Trainer(netoutput, (ce, pe), [learner])
return features, labels, netoutput, trainer
#-----------------------------------
# training procedure
#-----------------------------------
# create reader
示例2: train
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import momentum_schedule [as 别名]
def train(self, x, y, reader, model_func, max_epochs=10, task="slot_tagging"):
log.info("Training...")
# Instantiate the model function; x is the input (feature) variable
model = model_func(x)
# Instantiate the loss and error function
loss, label_error = self.create_criterion_function_preferred(model, y)
# training config
epoch_size = 18000 # 18000 samples is half the dataset size
minibatch_size = 70
# LR schedule over epochs
# In CNTK, an epoch is how often we get out of the minibatch loop to
# do other stuff (e.g. checkpointing, adjust learning rate, etc.)
lr_per_sample = [3e-4] * 4 + [1.5e-4]
lr_per_minibatch = [lr * minibatch_size for lr in lr_per_sample]
lr_schedule = C.learning_parameter_schedule(lr_per_minibatch, epoch_size=epoch_size)
# Momentum schedule
momentums = C.momentum_schedule(0.9048374180359595, minibatch_size=minibatch_size)
# We use a the Adam optimizer which is known to work well on this dataset
# Feel free to try other optimizers from
# https://www.cntk.ai/pythondocs/cntk.learner.html#module-cntk.learner
learner = C.adam(
parameters=model.parameters,
lr=lr_schedule,
momentum=momentums,
gradient_clipping_threshold_per_sample=15,
gradient_clipping_with_truncation=True)
# Setup the progress updater
progress_printer = C.logging.ProgressPrinter(tag="Training", num_epochs=max_epochs)
# Instantiate the trainer
trainer = C.Trainer(model, (loss, label_error), learner, progress_printer)
# process mini batches and perform model training
C.logging.log_number_of_parameters(model)
# Assign the data fields to be read from the input
if task == "slot_tagging":
data_map = {x: reader.streams.query, y: reader.streams.slot_labels}
else:
data_map = {x: reader.streams.query, y: reader.streams.intent}
t = 0
for epoch in range(max_epochs): # loop over epochs
epoch_end = (epoch + 1) * epoch_size
while t < epoch_end: # loop over mini batches on the epoch
data = reader.next_minibatch(minibatch_size, input_map=data_map) # fetch mini batch
trainer.train_minibatch(data) # update model with it
t += data[y].num_samples # samples so far
trainer.summarize_training_progress()
return model