本文整理汇总了Python中neon.callbacks.callbacks.Callbacks.add_deconv_callback方法的典型用法代码示例。如果您正苦于以下问题:Python Callbacks.add_deconv_callback方法的具体用法?Python Callbacks.add_deconv_callback怎么用?Python Callbacks.add_deconv_callback使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类neon.callbacks.callbacks.Callbacks
的用法示例。
在下文中一共展示了Callbacks.add_deconv_callback方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Dropout
# 需要导入模块: from neon.callbacks.callbacks import Callbacks [as 别名]
# 或者: from neon.callbacks.callbacks.Callbacks import add_deconv_callback [as 别名]
Dropout(keep=0.5),
Conv((3, 3, 192), **convp1),
Conv((3, 3, 192), **convp1),
Conv((3, 3, 192), **convp1s2),
Dropout(keep=0.5),
Conv((3, 3, 192), **convp1),
Conv((1, 1, 192), **conv),
Conv((1, 1, 16), **conv),
Pooling(8, op="avg"),
Activation(Softmax()),
]
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
mlp = Model(layers=layers)
if args.model_file:
import os
assert os.path.exists(args.model_file), "%s not found" % args.model_file
mlp.load_params(args.model_file)
# configure callbacks
callbacks = Callbacks(mlp, eval_set=valid_set, **args.callback_args)
if args.deconv:
callbacks.add_deconv_callback(train_set, valid_set)
mlp.fit(train_set, optimizer=opt_gdm, num_epochs=num_epochs, cost=cost, callbacks=callbacks)
print("Misclassification error = %.1f%%" % (mlp.eval(valid_set, metric=Misclassification()) * 100))
示例2: GeneralizedCost
# 需要导入模块: from neon.callbacks.callbacks import Callbacks [as 别名]
# 或者: from neon.callbacks.callbacks.Callbacks import add_deconv_callback [as 别名]
layers.append(Conv((3, 3, 384), init=init_uni, activation=relu, strides=1, padding=1))
layers.append(Conv((1, 1, 384), init=init_uni, activation=relu, strides=1))
layers.append(Conv((3, 3, 384), init=init_uni, activation=relu, strides=2, padding=1)) # 12->6
layers.append(Dropout(keep=0.5))
layers.append(Conv((3, 3, 1024), init=init_uni, activation=relu, strides=1, padding=1))
layers.append(Conv((1, 1, 1024), init=init_uni, activation=relu, strides=1))
layers.append(Conv((1, 1, 1000), init=init_uni, activation=relu, strides=1))
layers.append(Pooling(6, op='avg'))
layers.append(Activation(Softmax()))
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
mlp = Model(layers=layers)
if args.model_file:
import os
assert os.path.exists(args.model_file), '%s not found' % args.model_file
mlp.load_weights(args.model_file)
# configure callbacks
callbacks = Callbacks(mlp, train, eval_set=test, **args.callback_args)
if args.deconv:
callbacks.add_deconv_callback(train, test)
mlp.fit(train, optimizer=opt_gdm, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
test.exit_batch_provider()
train.exit_batch_provider()
示例3: NeonArgparser
# 需要导入模块: from neon.callbacks.callbacks import Callbacks [as 别名]
# 或者: from neon.callbacks.callbacks.Callbacks import add_deconv_callback [as 别名]
parser = NeonArgparser(__doc__, default_config_files=config_files,
default_overrides=dict(batch_size=64))
parser.add_argument('--deconv', action='store_true',
help='save visualization data from deconvolution')
parser.add_argument('--subset_pct', type=float, default=100,
help='subset of training dataset to use (percentage)')
args = parser.parse_args()
model, cost = create_network()
rseed = 0 if args.rng_seed is None else args.rng_seed
# setup data provider
assert 'train' in args.manifest, "Missing train manifest"
assert 'val' in args.manifest, "Missing validation manifest"
train = make_alexnet_train_loader(args.manifest['train'], args.manifest_root,
model.be, args.subset_pct, rseed)
valid = make_validation_loader(args.manifest['val'], args.manifest_root,
model.be, args.subset_pct)
sched_weight = Schedule([10], change=0.1)
opt = GradientDescentMomentum(0.01, 0.9, wdecay=0.0005, schedule=sched_weight)
# configure callbacks
valmetric = TopKMisclassification(k=5)
callbacks = Callbacks(model, eval_set=valid, metric=valmetric, **args.callback_args)
if args.deconv:
callbacks.add_deconv_callback(train, valid)
model.fit(train, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks)