本文整理汇总了Python中neon.models.Model.cost方法的典型用法代码示例。如果您正苦于以下问题:Python Model.cost方法的具体用法?Python Model.cost怎么用?Python Model.cost使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类neon.models.Model
的用法示例。
在下文中一共展示了Model.cost方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_model_serialize
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import cost [as 别名]
def test_model_serialize(backend_default, data):
dataset = MNIST(path=data)
(X_train, y_train), (X_test, y_test), nclass = dataset.load_data()
train_set = ArrayIterator(
[X_train, X_train], y_train, nclass=nclass, lshape=(1, 28, 28))
init_norm = Gaussian(loc=0.0, scale=0.01)
# initialize model
path1 = Sequential([Conv((5, 5, 16), init=init_norm, bias=Constant(0), activation=Rectlin()),
Pooling(2),
Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin())])
path2 = Sequential([Affine(nout=100, init=init_norm, bias=Constant(0), activation=Rectlin()),
Dropout(keep=0.5),
Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin())])
layers = [MergeMultistream(layers=[path1, path2], merge="stack"),
Affine(nout=20, init=init_norm, batch_norm=True, activation=Rectlin()),
Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))]
tmp_save = 'test_model_serialize_tmp_save.pickle'
mlp = Model(layers=layers)
mlp.optimizer = GradientDescentMomentum(learning_rate=0.1, momentum_coef=0.9)
mlp.cost = GeneralizedCost(costfunc=CrossEntropyBinary())
mlp.initialize(train_set, cost=mlp.cost)
n_test = 3
num_epochs = 3
# Train model for num_epochs and n_test batches
for epoch in range(num_epochs):
for i, (x, t) in enumerate(train_set):
x = mlp.fprop(x)
delta = mlp.cost.get_errors(x, t)
mlp.bprop(delta)
mlp.optimizer.optimize(mlp.layers_to_optimize, epoch=epoch)
if i > n_test:
break
# Get expected outputs of n_test batches and states of all layers
outputs_exp = []
pdicts_exp = [l.get_params_serialize() for l in mlp.layers_to_optimize]
for i, (x, t) in enumerate(train_set):
outputs_exp.append(mlp.fprop(x, inference=True))
if i > n_test:
break
# Serialize model
mlp.save_params(tmp_save, keep_states=True)
# Load model
mlp = Model(tmp_save)
mlp.initialize(train_set)
outputs = []
pdicts = [l.get_params_serialize() for l in mlp.layers_to_optimize]
for i, (x, t) in enumerate(train_set):
outputs.append(mlp.fprop(x, inference=True))
if i > n_test:
break
# Check outputs, states, and params are the same
for output, output_exp in zip(outputs, outputs_exp):
assert allclose_with_out(output.get(), output_exp.get())
for pd, pd_exp in zip(pdicts, pdicts_exp):
for s, s_e in zip(pd['states'], pd_exp['states']):
if isinstance(s, list): # this is the batch norm case
for _s, _s_e in zip(s, s_e):
assert allclose_with_out(_s, _s_e)
else:
assert allclose_with_out(s, s_e)
for p, p_e in zip(pd['params'], pd_exp['params']):
assert type(p) == type(p_e)
if isinstance(p, list): # this is the batch norm case
for _p, _p_e in zip(p, p_e):
assert allclose_with_out(_p, _p_e)
elif isinstance(p, np.ndarray):
assert allclose_with_out(p, p_e)
else:
assert p == p_e
os.remove(tmp_save)
示例2: test_model_serialize
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import cost [as 别名]
def test_model_serialize(backend):
(X_train, y_train), (X_test, y_test), nclass = load_mnist()
train_set = DataIterator([X_train, X_train], y_train, nclass=nclass, lshape=(1, 28, 28))
init_norm = Gaussian(loc=0.0, scale=0.01)
# initialize model
path1 = [Conv((5, 5, 16), init=init_norm, bias=Constant(0), activation=Rectlin()),
Pooling(2),
Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin())]
path2 = [Dropout(keep=0.5),
Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin())]
layers = [MergeConcat([path1, path2]),
Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin()),
BatchNorm(),
Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))]
tmp_save = 'test_model_serialize_tmp_save.pickle'
mlp = Model(layers=layers)
mlp.optimizer = GradientDescentMomentum(learning_rate=0.1, momentum_coef=0.9)
mlp.cost = GeneralizedCost(costfunc=CrossEntropyBinary())
n_test = 3
num_epochs = 3
# Train model for num_epochs and n_test batches
for epoch in range(num_epochs):
for i, (x, t) in enumerate(train_set):
x = mlp.fprop(x)
delta = mlp.cost.get_errors(x, t)
mlp.bprop(delta)
mlp.optimizer.optimize(mlp.layers_to_optimize, epoch=epoch)
if i > n_test:
break
# Get expected outputs of n_test batches and states of all layers
outputs_exp = []
pdicts_exp = [l.get_params_serialize() for l in mlp.layers_to_optimize]
for i, (x, t) in enumerate(train_set):
outputs_exp.append(mlp.fprop(x, inference=True))
if i > n_test:
break
# Serialize model
save_obj(mlp.serialize(keep_states=True), tmp_save)
# Load model
mlp = Model(layers=layers)
mlp.load_weights(tmp_save)
outputs = []
pdicts = [l.get_params_serialize() for l in mlp.layers_to_optimize]
for i, (x, t) in enumerate(train_set):
outputs.append(mlp.fprop(x, inference=True))
if i > n_test:
break
# Check outputs, states, and params are the same
for output, output_exp in zip(outputs, outputs_exp):
assert np.allclose(output.get(), output_exp.get())
for pd, pd_exp in zip(pdicts, pdicts_exp):
for s, s_e in zip(pd['states'], pd_exp['states']):
if isinstance(s, list): # this is the batch norm case
for _s, _s_e in zip(s, s_e):
assert np.allclose(_s, _s_e)
else:
assert np.allclose(s, s_e)
for p, p_e in zip(pd['params'], pd_exp['params']):
if isinstance(p, list): # this is the batch norm case
for _p, _p_e in zip(p, p_e):
assert np.allclose(_p, _p_e)
else:
assert np.allclose(p, p_e)
os.remove(tmp_save)
示例3: GeneralizedCost
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import cost [as 别名]
layers.append(Conv((5, 5, 20), padding=0, strides=1, init=init_uni,
bias=Constant(0), activation=Tanh()))
layers.append(Pooling(2, strides=2, op='max'))
# cannot be 50!!! should be multiple of 4
layers.append(Conv((5, 5, 52), padding=0, strides=1, init=init_uni,
bias=Constant(0), activation=Tanh()))
layers.append(Pooling(2, strides=2, op='max'))
layers.append(Affine(nout=500, init=init_norm, activation=Rectlin()))
layers.append(Affine(nout=config.ydim, init=init_norm, activation=Softmax()))
# setup cost function as CrossEntropy
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
# initialize model object
network = Model(layers=layers)
network.cost = cost
network.initialize(data, cost)
if config.backend == 'gpu':
start = drv.Event()
end = drv.Event()
num_iterations = config.num_warmup_iters + config.num_timing_iters
forward_time = np.zeros(config.num_timing_iters)
backward_time = np.zeros(config.num_timing_iters)
iter = 0
flag = True
while flag:
for (x, t) in data:
iter += 1
if iter > num_iterations:
flag = False
示例4: Affine
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import cost [as 别名]
name='decoder1')
encoder2 = Affine(nout=config.encoder_size[1], init=init_norm,
activation=Logistic(), name='encoder2')
decoder2 = Affine(nout=config.encoder_size[0], init=init_norm,
activation=Logistic(), name='decoder2')
encoder3 = Affine(nout=config.encoder_size[2], init=init_norm,
activation=Logistic(), name='encoder3')
decoder3 = Affine(nout=config.encoder_size[1], init=init_norm,
activation=Logistic(), name='decoder3')
classifier = Affine(nout=config.ydim, init=init_norm, activation=Softmax())
cost_reconst = GeneralizedCost(costfunc=SumSquared())
cost_classification = GeneralizedCost(costfunc=CrossEntropyMulti())
# Setting model layers for AE1
AE1 = Model([encoder1, decoder1])
AE1.cost = cost_reconst
AE1.initialize(data, cost_reconst)
# AE1.optimizer = optimizer_default
measure_time(data, AE1, config, 'AE1')
# Setting model layers for AE2
# It has an extra encoder layer compared to what AE should really be. This is
# done to avoid saving the outputs for each AE.
AE2_mimic = Model([encoder1, encoder2, decoder2])
AE2_mimic.cost = cost_reconst
AE2_mimic.initialize(data, cost_reconst)
# Learning rates for extra layers that should not be updated are set to zero.
# opt = MultiOptimizer({'default': optimizer_default,
# 'encoder1': optimizer_helper})
# AE2_mimic.optimizer = opt
measure_time(data, AE2_mimic, config, 'AE2', create_target=True)