本文整理汇总了Python中neon.models.Model.eval方法的典型用法代码示例。如果您正苦于以下问题:Python Model.eval方法的具体用法?Python Model.eval怎么用?Python Model.eval使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类neon.models.Model
的用法示例。
在下文中一共展示了Model.eval方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: train_eval
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import eval [as 别名]
def train_eval(
train_set,
valid_set,
args,
hidden_size = 100,
clip_gradients = True,
gradient_limit = 5):
# weight initialization
init = Uniform(low=-0.08, high=0.08)
# model initialization
layers = [
LSTM(hidden_size, init, Logistic(), Tanh()),
LSTM(hidden_size, init, Logistic(), Tanh()),
Affine(2, init, bias=init, activation=Softmax())
]
cost = GeneralizedCost(costfunc=CrossEntropyMulti(usebits=True))
model = Model(layers=layers)
optimizer = RMSProp(clip_gradients=clip_gradients, gradient_limit=gradient_limit, stochastic_round=args.rounding)
# configure callbacks
callbacks = Callbacks(model, train_set, progress_bar=args.progress_bar)
# train model
model.fit(train_set,
optimizer=optimizer,
num_epochs=args.epochs,
cost=cost,
callbacks=callbacks)
pred = model.get_outputs(valid_set)
pred_neg_rate = model.eval(valid_set, metric=Misclassification())
return (pred[:,1], pred_neg_rate)
示例2: run
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import eval [as 别名]
def run(args, train, test):
init_uni = Uniform(low=-0.1, high=0.1)
opt_gdm = GradientDescentMomentum(learning_rate=0.01,
momentum_coef=0.9,
stochastic_round=args.rounding)
layers = [Conv((5, 5, 16), init=init_uni, activation=Rectlin(), batch_norm=True),
Pooling((2, 2)),
Conv((5, 5, 32), init=init_uni, activation=Rectlin(), batch_norm=True),
Pooling((2, 2)),
Affine(nout=500, init=init_uni, activation=Rectlin(), batch_norm=True),
Affine(nout=10, init=init_uni, activation=Softmax())]
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
mlp = Model(layers=layers)
callbacks = Callbacks(mlp, train, eval_set=test, **args.callback_args)
mlp.fit(train, optimizer=opt_gdm, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
err = mlp.eval(test, metric=Misclassification())*100
print('Misclassification error = %.2f%%' % err)
return err
示例3: __init__
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import eval [as 别名]
class MostCommonWordSense:
def __init__(self, rounding, callback_args, epochs):
# setup weight initialization function
self.init = Gaussian(loc=0.0, scale=0.01)
# setup optimizer
self.optimizer = GradientDescentMomentum(learning_rate=0.1, momentum_coef=0.9,
stochastic_round=rounding)
# setup cost function as CrossEntropy
self.cost = GeneralizedCost(costfunc=SumSquared())
self.epochs = epochs
self.model = None
self.callback_args = callback_args
def build(self):
# setup model layers
layers = [Affine(nout=100, init=self.init, bias=self.init, activation=Rectlin()),
Affine(nout=2, init=self.init, bias=self.init, activation=Softmax())]
# initialize model object
self.model = Model(layers=layers)
def fit(self, valid_set, train_set):
# configure callbacks
callbacks = Callbacks(self.model, eval_set=valid_set, **self.callback_args)
self.model.fit(train_set, optimizer=self.optimizer, num_epochs=self.epochs,
cost=self.cost, callbacks=callbacks)
def save(self, save_path):
self.model.save_params(save_path)
def load(self, model_path):
self.model = Model(model_path)
def eval(self, valid_set):
eval_rate = self.model.eval(valid_set, metric=Misclassification())
return eval_rate
def get_outputs(self, valid_set):
return self.model.get_outputs(valid_set)
示例4: Recurrent
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import eval [as 别名]
rlayer = Recurrent(hidden_size, g_uni, activation=Tanh(), reset_cells=True)
elif args.rlayer_type == 'birnn':
rlayer = DeepBiRNN(hidden_size, g_uni, activation=Tanh(),
depth=1, reset_cells=True)
layers = [
LookupTable(vocab_size=vocab_size, embedding_dim=embedding_dim, init=uni),
rlayer,
RecurrentSum(),
Dropout(keep=0.5),
Affine(2, g_uni, bias=g_uni, activation=Softmax())
]
model = Model(layers=layers)
cost = GeneralizedCost(costfunc=CrossEntropyMulti(usebits=True))
optimizer = Adagrad(learning_rate=0.01,
gradient_clip_value=gradient_clip_value)
# configure callbacks
callbacks = Callbacks(model, eval_set=valid_set, **args.callback_args)
# train model
model.fit(train_set, optimizer=optimizer,
num_epochs=args.epochs, cost=cost, callbacks=callbacks)
# eval model
neon_logger.display("Train Accuracy - {}".format(100 * model.eval(train_set, metric=Accuracy())))
neon_logger.display("Test Accuracy - {}".format(100 * model.eval(valid_set, metric=Accuracy())))
示例5: Affine
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import eval [as 别名]
Affine(nout=16, name="b2_l1", **normrelu),
Affine(nout=10, name="b2_l2", **normsigm)]
# setup cost function as CrossEntropy
cost = Multicost(costs=[GeneralizedCost(costfunc=CrossEntropyMulti()),
GeneralizedCost(costfunc=CrossEntropyBinary()),
GeneralizedCost(costfunc=CrossEntropyBinary())],
weights=[1, 0., 0.])
# setup optimizer
optimizer = GradientDescentMomentum(
0.1, momentum_coef=0.9, stochastic_round=args.rounding)
# initialize model object
alphas = [1, 0.25, 0.25]
mlp = Model(layers=SingleOutputTree([p1, p2, p3], alphas=alphas))
# setup standard fit callbacks
callbacks = Callbacks(mlp, eval_set=valid_set, multicost=True, **args.callback_args)
# run fit
mlp.fit(train_set, optimizer=optimizer,
num_epochs=args.epochs, cost=cost, callbacks=callbacks)
# TODO: introduce Multicost metric support. The line below currently fails
# since the Misclassification metric expects a single Tensor not a list of
# Tensors
neon_logger.display('Misclassification error = %.1f%%' %
(mlp.eval(valid_set, metric=Misclassification()) * 100))
示例6: Rectlin
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import eval [as 别名]
relu = Rectlin()
layers = []
layers.append(Dropout(keep=.8))
layers.append(Conv((3, 3, 96), init=init_uni, batch_norm=True, activation=relu))
layers.append(Conv((3, 3, 96), init=init_uni, batch_norm=True, activation=relu, pad=1))
layers.append(Conv((3, 3, 96), init=init_uni, batch_norm=True, activation=relu, pad=1, strides=2))
layers.append(Dropout(keep=.5))
layers.append(Conv((3, 3, 192), init=init_uni, batch_norm=True, activation=relu, pad=1))
layers.append(Conv((3, 3, 192), init=init_uni, batch_norm=True, activation=relu, pad=1))
layers.append(Conv((3, 3, 192), init=init_uni, batch_norm=True, activation=relu, pad=1, strides=2))
layers.append(Dropout(keep=.5))
layers.append(Conv((3, 3, 192), init=init_uni, batch_norm=True, activation=relu))
layers.append(Conv((1, 1, 192), init=init_uni, batch_norm=True, activation=relu))
layers.append(Conv((1, 1, 16), init=init_uni, activation=relu))
layers.append(Pooling(6, op="avg"))
layers.append(Activation(Softmax()))
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
mlp = Model(layers=layers)
# configure callbacks
callbacks = Callbacks(mlp, train_set, output_file=args.output_file, valid_set=valid_set,
valid_freq=args.validation_freq, progress_bar=args.progress_bar)
mlp.fit(train_set, optimizer=opt_gdm, num_epochs=num_epochs, cost=cost, callbacks=callbacks)
print mlp.eval(valid_set, metric=Misclassification())
示例7: HDF5Iterator
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import eval [as 别名]
# use the iterator that generates 1-hot output. other HDF5Iterator (sub) classes are
# available for different data layouts
train_set = HDF5IteratorOneHot('mnist_train.h5')
valid_set = HDF5IteratorOneHot('mnist_test.h5')
# setup weight initialization function
init_norm = Gaussian(loc=0.0, scale=0.01)
# setup model layers
layers = [Affine(nout=100, init=init_norm, activation=Rectlin()),
Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))]
# setup cost function as CrossEntropy
cost = GeneralizedCost(costfunc=CrossEntropyBinary())
# setup optimizer
optimizer = GradientDescentMomentum(
0.1, momentum_coef=0.9, stochastic_round=args.rounding)
# initialize model object
mlp = Model(layers=layers)
# configure callbacks
callbacks = Callbacks(mlp, eval_set=valid_set, **args.callback_args)
# run fit
mlp.fit(train_set, optimizer=optimizer,
num_epochs=args.epochs, cost=cost, callbacks=callbacks)
error_rate = mlp.eval(valid_set, metric=Misclassification())
neon_logger.display('Misclassification error = %.1f%%' % (error_rate * 100))
示例8: TopKMisclassification
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import eval [as 别名]
valmetric = TopKMisclassification(k=5)
# dummy optimizer for benchmarking
# training implementation coming soon
opt_gdm = GradientDescentMomentum(0.0, 0.0)
opt_biases = GradientDescentMomentum(0.0, 0.0)
opt = MultiOptimizer({'default': opt_gdm, 'Bias': opt_biases})
# setup cost function as CrossEntropy
cost = Multicost(costs=[GeneralizedCost(costfunc=CrossEntropyMulti()),
GeneralizedCost(costfunc=CrossEntropyMulti()),
GeneralizedCost(costfunc=CrossEntropyMulti())],
weights=[1, 0., 0.]) # We only want to consider the CE of the main path
assert os.path.exists(args.model_file), 'script requires the trained weights file'
model.load_params(args.model_file)
model.initialize(test, cost)
print 'running speed benchmark...'
model.benchmark(test, cost, opt)
print '\nCalculating performance on validation set...'
test.reset()
mets = model.eval(test, metric=valmetric)
print 'Validation set metrics:'
print 'LogLoss: %.2f, Accuracy: %.1f %% (Top-1), %.1f %% (Top-5)' % (mets[0],
(1.0-mets[1])*100,
(1.0-mets[2])*100)
示例9: NeonArgparser
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import eval [as 别名]
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()
dataset = CIFAR10(path=args.data_dir,
normalize=True,
contrast_normalize=False,
whiten=False)
train = dataset.train_iter
test = dataset.valid_iter
init_uni = Uniform(low=-0.1, high=0.1)
opt_gdm = GradientDescentMomentum(learning_rate=0.01, momentum_coef=0.9)
# set up the model layers
layers = [Affine(nout=200, init=init_uni, activation=Rectlin()),
Affine(nout=10, init=init_uni, activation=Logistic(shortcut=True))]
cost = GeneralizedCost(costfunc=CrossEntropyBinary())
mlp = Model(layers=layers)
# configure callbacks
callbacks = Callbacks(mlp, eval_set=test, **args.callback_args)
mlp.fit(train, optimizer=opt_gdm, num_epochs=args.epochs,
cost=cost, callbacks=callbacks)
neon_logger.display('Misclassification error = %.1f%%' %
(mlp.eval(test, metric=Misclassification()) * 100))
示例10: LookupTable
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import eval [as 别名]
LookupTable(vocab_size=vocab_size, embedding_dim=embedding_dim, init=init_emb),
LSTM(hidden_size, init_glorot, activation=Tanh(),
gate_activation=Logistic(), reset_cells=True),
RecurrentSum(),
Dropout(keep=0.5),
Affine(2, init_glorot, bias=init_glorot, activation=Softmax())
]
cost = GeneralizedCost(costfunc=CrossEntropyMulti(usebits=True))
metric = Accuracy()
model = Model(layers=layers)
optimizer = Adagrad(learning_rate=0.01, clip_gradients=clip_gradients)
# configure callbacks
callbacks = Callbacks(model, train_set, eval_set=valid_set, **args.callback_args)
# train model
model.fit(train_set,
optimizer=optimizer,
num_epochs=num_epochs,
cost=cost,
callbacks=callbacks)
# eval model
print "Test Accuracy - ", 100 * model.eval(valid_set, metric=metric)
print "Train Accuracy - ", 100 * model.eval(train_set, metric=metric)
示例11: create_index_files
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import eval [as 别名]
args = parser.parse_args()
train_idx, val_idx = create_index_files(args.data_dir)
common_params = dict(sampling_freq=22050, clip_duration=31000, frame_duration=20)
train_params = AudioParams(random_scale_percent=5, **common_params)
val_params = AudioParams(**common_params)
common = dict(target_size=1, nclasses=10, repo_dir=args.data_dir)
train = DataLoader(set_name='genres-train', media_params=train_params,
index_file=train_idx, shuffle=True, **common)
val = DataLoader(set_name='genres-val', media_params=val_params,
index_file=val_idx, shuffle=False, **common)
init = Gaussian(scale=0.01)
layers = [Conv((5, 5, 64), init=init, activation=Rectlin(),
strides=dict(str_h=2, str_w=4)),
Pooling(2, strides=2),
Conv((5, 5, 64), init=init, batch_norm=True, activation=Rectlin(),
strides=dict(str_h=1, str_w=2)),
BiRNN(256, init=init, activation=Rectlin(), reset_cells=True),
RecurrentMean(),
Affine(128, init=init, batch_norm=True, activation=Rectlin()),
Affine(nout=common['nclasses'], init=init, activation=Softmax())]
model = Model(layers=layers)
opt = Adadelta()
metric = Misclassification()
callbacks = Callbacks(model, eval_set=val, metric=metric, **args.callback_args)
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
model.fit(train, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
print('Misclassification error = %.1f%%' % (model.eval(val, metric=metric)*100))
示例12: arguments
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import eval [as 别名]
import os
from neon.util.argparser import NeonArgparser
from neon.util.persist import load_obj
from neon.transforms import Misclassification, CrossEntropyMulti
from neon.optimizers import GradientDescentMomentum
from neon.layers import GeneralizedCost
from neon.models import Model
from neon.data import DataLoader, ImageParams
# parse the command line arguments (generates the backend)
parser = NeonArgparser(__doc__)
args = parser.parse_args()
# setup data provider
test_dir = os.path.join(args.data_dir, 'val')
shape = dict(channel_count=3, height=32, width=32)
test_params = ImageParams(center=True, flip=False, **shape)
common = dict(target_size=1, nclasses=10)
test_set = DataLoader(set_name='val', repo_dir=test_dir, media_params=test_params, **common)
model = Model(load_obj(args.model_file))
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
opt = GradientDescentMomentum(0.1, 0.9, wdecay=0.0001)
model.initialize(test_set, cost=cost)
acc = 1.0 - model.eval(test_set, metric=Misclassification())[0]
print 'Accuracy: %.1f %% (Top-1)' % (acc*100.0)
model.benchmark(test_set, cost=cost, optimizer=opt)
示例13: Seq2Seq
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import eval [as 别名]
reset_cells=True, name=name+"Enc"))
decoder.append(GRU(hidden_size, init, activation=Tanh(), gate_activation=Logistic(),
reset_cells=True, name=name+"Dec"))
decoder_connections.append(ii)
decoder.append(Affine(train_set.nout, init, bias=init, activation=Softmax(), name="AffOut"))
layers = Seq2Seq([encoder, decoder],
decoder_connections=decoder_connections,
name="Seq2Seq")
cost = GeneralizedCost(costfunc=CrossEntropyMulti(usebits=True))
model = Model(layers=layers)
optimizer = RMSProp(gradient_clip_value=gradient_clip_value, stochastic_round=args.rounding)
callbacks = Callbacks(model, eval_set=valid_set, **args.callback_args)
# train model
model.fit(train_set,
optimizer=optimizer,
num_epochs=args.epochs,
cost=cost, callbacks=callbacks)
# Misclassification rate on validation set
error_rate = model.eval(valid_set, metric=Misclassification(steps=time_steps))
neon_logger.display('Misclassification error = %.2f%%' % (error_rate * 100))
# Print some example predictions.
prediction, groundtruth = get_predictions(model, valid_set, time_steps)
# convert them into text and display
display_text(valid_set.index_to_token, groundtruth, prediction)
示例14: RecurrentSum
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import eval [as 别名]
gate_activation=Logistic(), reset_cells=True),
RecurrentSum(),
Dropout(keep=0.5),
Affine(2, init_glorot, bias=init_glorot, activation=Softmax())
]
cost = GeneralizedCost(costfunc=CrossEntropyMulti(usebits=True))
metric = Accuracy()
model = Model(layers=layers)
optimizer = Adagrad(learning_rate=0.01, clip_gradients=clip_gradients)
# configure callbacks
callbacks = Callbacks(model, train_set, output_file=args.output_file,
valid_set=test_set, valid_freq=args.validation_freq,
progress_bar=args.progress_bar)
# train model
model.fit(train_set,
optimizer=optimizer,
num_epochs=num_epochs,
cost=cost,
callbacks=callbacks)
# eval model
print "Test Accuracy - ", 100 * model.eval(test_set, metric=metric)
print "Train Accuracy - ", 100 * model.eval(train_set, metric=metric)
示例15: DeepBiLSTM
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import eval [as 别名]
rlayer = DeepBiLSTM(hidden_size, g_uni, activation=Tanh(), depth=1,
gate_activation=Logistic(), reset_cells=True)
elif args.rlayer_type == 'rnn':
rlayer = Recurrent(hidden_size, g_uni, activation=Tanh(), reset_cells=True)
elif args.rlayer_type == 'birnn':
rlayer = DeepBiRNN(hidden_size, g_uni, activation=Tanh(), depth=1, reset_cells=True)
layers = [
LookupTable(vocab_size=vocab_size, embedding_dim=embedding_dim, init=uni),
rlayer,
RecurrentSum(),
Dropout(keep=0.5),
Affine(2, g_uni, bias=g_uni, activation=Softmax())
]
model = Model(layers=layers)
cost = GeneralizedCost(costfunc=CrossEntropyMulti(usebits=True))
optimizer = Adagrad(learning_rate=0.01, gradient_clip_value=gradient_clip_value)
# configure callbacks
callbacks = Callbacks(model, eval_set=valid_set, **args.callback_args)
# train model
model.fit(train_set, optimizer=optimizer, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
# eval model
print "Train Accuracy - ", 100 * model.eval(train_set, metric=Accuracy())
print "Test Accuracy - ", 100 * model.eval(valid_set, metric=Accuracy())