本文整理汇总了Python中neon.models.Model类的典型用法代码示例。如果您正苦于以下问题:Python Model类的具体用法?Python Model怎么用?Python Model使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Model类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, num_actions, args):
# remember parameters
self.num_actions = num_actions
self.batch_size = args.batch_size
self.discount_rate = args.discount_rate
self.history_length = args.history_length
self.screen_dim = (args.screen_height, args.screen_width)
self.clip_error = args.clip_error
self.min_reward = args.min_reward
self.max_reward = args.max_reward
self.batch_norm = args.batch_norm
# create Neon backend
self.be = gen_backend(backend = args.backend,
batch_size = args.batch_size,
rng_seed = args.random_seed,
device_id = args.device_id,
datatype = np.dtype(args.datatype).type,
stochastic_round = args.stochastic_round)
# prepare tensors once and reuse them
self.input_shape = (self.history_length,) + self.screen_dim + (self.batch_size,)
self.input = self.be.empty(self.input_shape)
self.input.lshape = self.input_shape # HACK: needed for convolutional networks
self.targets = self.be.empty((self.num_actions, self.batch_size))
# create model
layers = self._createLayers(num_actions)
self.model = Model(layers = layers)
self.cost = GeneralizedCost(costfunc = SumSquared())
# Bug fix
for l in self.model.layers.layers:
l.parallelism = 'Disabled'
self.model.initialize(self.input_shape[:-1], self.cost)
if args.optimizer == 'rmsprop':
self.optimizer = RMSProp(learning_rate = args.learning_rate,
decay_rate = args.decay_rate,
stochastic_round = args.stochastic_round)
elif args.optimizer == 'adam':
self.optimizer = Adam(learning_rate = args.learning_rate,
stochastic_round = args.stochastic_round)
elif args.optimizer == 'adadelta':
self.optimizer = Adadelta(decay = args.decay_rate,
stochastic_round = args.stochastic_round)
else:
assert false, "Unknown optimizer"
# create target model
self.train_iterations = 0
if args.target_steps:
self.target_model = Model(layers = self._createLayers(num_actions))
# Bug fix
for l in self.target_model.layers.layers:
l.parallelism = 'Disabled'
self.target_model.initialize(self.input_shape[:-1])
self.save_weights_prefix = args.save_weights_prefix
else:
self.target_model = self.model
self.callback = None
示例2: __init__
def __init__(self, depth=9):
self.depth = depth
depth = 9
train = (3, 32, 32)
nfms = [2**(stage + 4) for stage in sorted(list(range(3)) * depth)]
strides = [1 if cur == prev else 2 for cur, prev in zip(nfms[1:], nfms[:-1])]
# Now construct the network
layers = [Conv(**self.conv_params(3, 16))]
layers.append(self.module_s1(nfms[0], True))
for nfm, stride in zip(nfms[1:], strides):
res_module = self.module_s1(nfm) if stride == 1 else self.module_s2(nfm)
layers.append(res_module)
layers.append(BatchNorm())
layers.append(Activation(Rectlin()))
layers.append(Pooling('all', op='avg'))
layers.append(Affine(10, init=Kaiming(local=False),
batch_norm=True, activation=Softmax()))
self.layers = layers
model = Model(layers=layers)
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
model.initialize(train, cost=cost)
self.model = model
示例3: test_model_get_outputs_rnn
def test_model_get_outputs_rnn(backend_default, data):
dataset = PTB(50, path=data)
dataiter = dataset.train_iter
# weight initialization
init = Constant(0.08)
# model initialization
layers = [
Recurrent(150, init, activation=Logistic()),
Affine(len(dataiter.vocab), init, bias=init, activation=Rectlin())
]
model = Model(layers=layers)
output = model.get_outputs(dataiter)
assert output.shape == (dataiter.ndata, dataiter.seq_length, dataiter.nclass)
# since the init are all constant and model is un-trained:
# along the feature dim, the values should be all the same
assert allclose_with_out(output[0, 0], output[0, 0, 0], rtol=0, atol=1e-4)
assert allclose_with_out(output[0, 1], output[0, 1, 0], rtol=0, atol=1e-4)
# along the time dim, the values should be increasing:
assert np.alltrue(output[0, 2] > output[0, 1])
assert np.alltrue(output[0, 1] > output[0, 0])
示例4: train_eval
def train_eval(
train_set,
valid_set,
args,
hidden_size = 100,
clip_gradients = True,
gradient_limit = 5):
# weight initialization
init = Uniform(low=-0.08, high=0.08)
# model initialization
layers = [
LSTM(hidden_size, init, Logistic(), Tanh()),
LSTM(hidden_size, init, Logistic(), Tanh()),
Affine(2, init, bias=init, activation=Softmax())
]
cost = GeneralizedCost(costfunc=CrossEntropyMulti(usebits=True))
model = Model(layers=layers)
optimizer = RMSProp(clip_gradients=clip_gradients, gradient_limit=gradient_limit, stochastic_round=args.rounding)
# configure callbacks
callbacks = Callbacks(model, train_set, progress_bar=args.progress_bar)
# train model
model.fit(train_set,
optimizer=optimizer,
num_epochs=args.epochs,
cost=cost,
callbacks=callbacks)
pred = model.get_outputs(valid_set)
pred_neg_rate = model.eval(valid_set, metric=Misclassification())
return (pred[:,1], pred_neg_rate)
示例5: __init__
def __init__(self, env, args, rng, name = "DQNNeon"):
""" Initializes a network based on the Neon framework.
Args:
env (AtariEnv): The envirnoment in which the agent actuates.
args (argparse.Namespace): All settings either with a default value or set via command line arguments.
rng (mtrand.RandomState): initialized Mersenne Twister pseudo-random number generator.
name (str): The name of the network object.
Note:
This function should always call the base class first to initialize
the common values for the networks.
"""
_logger.info("Initializing new object of type " + str(type(self).__name__))
super(DQNNeon, self).__init__(env, args, rng, name)
self.input_shape = (self.sequence_length,) + self.frame_dims + (self.batch_size,)
self.dummy_batch = np.zeros((self.batch_size, self.sequence_length) + self.frame_dims, dtype=np.uint8)
self.batch_norm = args.batch_norm
self.be = gen_backend(
backend = args.backend,
batch_size = args.batch_size,
rng_seed = args.random_seed,
device_id = args.device_id,
datatype = np.dtype(args.datatype).type,
stochastic_round = args.stochastic_round)
# prepare tensors once and reuse them
self.input = self.be.empty(self.input_shape)
self.input.lshape = self.input_shape # HACK: needed for convolutional networks
self.targets = self.be.empty((self.output_shape, self.batch_size))
# create model
layers = self._create_layer()
self.model = Model(layers = layers)
self.cost_func = GeneralizedCost(costfunc = SumSquared())
# Bug fix
for l in self.model.layers.layers:
l.parallelism = 'Disabled'
self.model.initialize(self.input_shape[:-1], self.cost_func)
self._set_optimizer()
if not self.args.load_weights == None:
self.load_weights(self.args.load_weights)
# create target model
if self.target_update_frequency:
layers = self._create_layer()
self.target_model = Model(layers)
# Bug fix
for l in self.target_model.layers.layers:
l.parallelism = 'Disabled'
self.target_model.initialize(self.input_shape[:-1])
else:
self.target_model = self.model
self.callback = None
_logger.debug("%s" % self)
示例6: main
def main():
parser = get_parser()
args = parser.parse_args()
print('Args:', args)
loggingLevel = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(level=loggingLevel, format='')
ext = extension_from_parameters(args)
loader = p1b3.DataLoader(feature_subsample=args.feature_subsample,
scaling=args.scaling,
drug_features=args.drug_features,
scramble=args.scramble,
min_logconc=args.min_logconc,
max_logconc=args.max_logconc,
subsample=args.subsample,
category_cutoffs=args.category_cutoffs)
# initializer = Gaussian(loc=0.0, scale=0.01)
initializer = GlorotUniform()
activation = get_function(args.activation)()
layers = []
reshape = None
if args.convolution and args.convolution[0]:
reshape = (1, loader.input_dim, 1)
layer_list = list(range(0, len(args.convolution), 3))
for l, i in enumerate(layer_list):
nb_filter = args.convolution[i]
filter_len = args.convolution[i+1]
stride = args.convolution[i+2]
# print(nb_filter, filter_len, stride)
# fshape: (height, width, num_filters).
layers.append(Conv((1, filter_len, nb_filter), strides={'str_h':1, 'str_w':stride}, init=initializer, activation=activation))
if args.pool:
layers.append(Pooling((1, args.pool)))
for layer in args.dense:
if layer:
layers.append(Affine(nout=layer, init=initializer, activation=activation))
if args.drop:
layers.append(Dropout(keep=(1-args.drop)))
layers.append(Affine(nout=1, init=initializer, activation=neon.transforms.Identity()))
model = Model(layers=layers)
train_iter = ConcatDataIter(loader, ndata=args.train_samples, lshape=reshape, datatype=args.datatype)
val_iter = ConcatDataIter(loader, partition='val', ndata=args.val_samples, lshape=reshape, datatype=args.datatype)
cost = GeneralizedCost(get_function(args.loss)())
optimizer = get_function(args.optimizer)()
callbacks = Callbacks(model, eval_set=val_iter, **args.callback_args)
model.fit(train_iter, optimizer=optimizer, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
示例7: __init__
def __init__(self, state_size, num_steers, num_speeds, args):
# remember parameters
self.state_size = state_size
self.num_steers = num_steers
self.num_speeds = num_speeds
self.num_actions = num_steers + num_speeds
self.num_layers = args.hidden_layers
self.hidden_nodes = args.hidden_nodes
self.batch_size = args.batch_size
self.discount_rate = args.discount_rate
self.clip_error = args.clip_error
# create Neon backend
self.be = gen_backend(backend = args.backend,
batch_size = args.batch_size,
rng_seed = args.random_seed,
device_id = args.device_id,
datatype = np.dtype(args.datatype).type,
stochastic_round = args.stochastic_round)
# prepare tensors once and reuse them
self.input_shape = (self.state_size, self.batch_size)
self.input = self.be.empty(self.input_shape)
self.targets = self.be.empty((self.num_actions, self.batch_size))
# create model
self.model = Model(layers = self._createLayers())
self.cost = GeneralizedCost(costfunc = SumSquared())
self.model.initialize(self.input_shape[:-1], self.cost)
if args.optimizer == 'rmsprop':
self.optimizer = RMSProp(learning_rate = args.learning_rate,
decay_rate = args.decay_rate,
stochastic_round = args.stochastic_round)
elif args.optimizer == 'adam':
self.optimizer = Adam(learning_rate = args.learning_rate,
stochastic_round = args.stochastic_round)
elif args.optimizer == 'adadelta':
self.optimizer = Adadelta(decay = args.decay_rate,
stochastic_round = args.stochastic_round)
else:
assert false, "Unknown optimizer"
# create target model
self.target_steps = args.target_steps
self.train_iterations = 0
if self.target_steps:
self.target_model = Model(layers = self._createLayers())
self.target_model.initialize(self.input_shape[:-1])
self.save_weights_prefix = args.save_weights_prefix
else:
self.target_model = self.model
示例8: test_model_N_S_setter
def test_model_N_S_setter(backend_default):
# weight initialization
init = Constant(0.08)
# model initialization
layers = [
Recurrent(150, init, activation=Logistic()),
Affine(100, init, bias=init, activation=Rectlin())
]
model = Model(layers=layers)
model.set_batch_size(20)
model.set_seq_len(10)
示例9: build
def build(self):
# setup model layers
layers = [Affine(nout=100, init=self.init, bias=self.init, activation=Rectlin()),
Affine(nout=2, init=self.init, bias=self.init, activation=Softmax())]
# initialize model object
self.model = Model(layers=layers)
示例10: load
def load(self, model_path):
"""
Load pre-trained model's .prm file to NpSemanticSegClassifier object
Args:
model_path(str): local path for loading the model
"""
self.model = Model(model_path)
示例11: run
def run(args, train, test):
init_uni = Uniform(low=-0.1, high=0.1)
opt_gdm = GradientDescentMomentum(learning_rate=0.01,
momentum_coef=0.9,
stochastic_round=args.rounding)
layers = [Conv((5, 5, 16), init=init_uni, activation=Rectlin(), batch_norm=True),
Pooling((2, 2)),
Conv((5, 5, 32), init=init_uni, activation=Rectlin(), batch_norm=True),
Pooling((2, 2)),
Affine(nout=500, init=init_uni, activation=Rectlin(), batch_norm=True),
Affine(nout=10, init=init_uni, activation=Softmax())]
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
mlp = Model(layers=layers)
callbacks = Callbacks(mlp, train, eval_set=test, **args.callback_args)
mlp.fit(train, optimizer=opt_gdm, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
err = mlp.eval(test, metric=Misclassification())*100
print('Misclassification error = %.2f%%' % err)
return err
示例12: test_model_get_outputs
def test_model_get_outputs(backend):
(X_train, y_train), (X_test, y_test), nclass = load_mnist()
train_set = DataIterator(X_train[:backend.bsz * 3])
init_norm = Gaussian(loc=0.0, scale=0.1)
layers = [Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin()),
Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))]
mlp = Model(layers=layers)
out_list = []
for x, t in train_set:
x = mlp.fprop(x)
out_list.append(x.get().T.copy())
ref_output = np.vstack(out_list)
train_set.reset()
output = mlp.get_outputs(train_set)
assert np.allclose(output, ref_output)
示例13: __init__
def __init__(self, args, max_action_no, batch_dimension):
self.args = args
self.train_batch_size = args.train_batch_size
self.discount_factor = args.discount_factor
self.use_gpu_replay_mem = args.use_gpu_replay_mem
self.be = gen_backend(backend='gpu',
batch_size=self.train_batch_size)
self.input_shape = (batch_dimension[1], batch_dimension[2], batch_dimension[3], batch_dimension[0])
self.input = self.be.empty(self.input_shape)
self.input.lshape = self.input_shape # HACK: needed for convolutional networks
self.targets = self.be.empty((max_action_no, self.train_batch_size))
if self.use_gpu_replay_mem:
self.history_buffer = self.be.zeros(batch_dimension, dtype=np.uint8)
self.input_uint8 = self.be.empty(self.input_shape, dtype=np.uint8)
else:
self.history_buffer = np.zeros(batch_dimension, dtype=np.float32)
self.train_net = Model(self.create_layers(max_action_no))
self.cost = GeneralizedCost(costfunc=SumSquared())
# Bug fix
for l in self.train_net.layers.layers:
l.parallelism = 'Disabled'
self.train_net.initialize(self.input_shape[:-1], self.cost)
self.target_net = Model(self.create_layers(max_action_no))
# Bug fix
for l in self.target_net.layers.layers:
l.parallelism = 'Disabled'
self.target_net.initialize(self.input_shape[:-1])
if self.args.optimizer == 'Adam': # Adam
self.optimizer = Adam(beta_1=args.rms_decay,
beta_2=args.rms_decay,
learning_rate=args.learning_rate)
else: # Neon RMSProp
self.optimizer = RMSProp(decay_rate=args.rms_decay,
learning_rate=args.learning_rate)
self.max_action_no = max_action_no
self.running = True
示例14: test_model_predict_rnn
def test_model_predict_rnn(backend):
data_path = load_text('ptb-valid')
data_set = Text(time_steps=50, path=data_path)
# weight initialization
init = Constant(0.08)
# model initialization
layers = [
Recurrent(150, init, Logistic()),
Affine(len(data_set.vocab), init, bias=init, activation=Rectlin())
]
model = Model(layers=layers)
output = model.predict(data_set)
assert output.shape == (data_set.ndata, data_set.nclass)
示例15: TreeModel
class TreeModel(object):
"""
Container for Tree style test model"
"""
def __init__(self):
self.in_shape = (1, 32, 32)
init_norm = Gaussian(loc=0.0, scale=0.01)
normrelu = dict(init=init_norm, activation=Rectlin())
normsigm = dict(init=init_norm, activation=Logistic(shortcut=True))
normsoft = dict(init=init_norm, activation=Softmax())
# setup model layers
b1 = BranchNode(name="b1")
b2 = BranchNode(name="b2")
p1 = [Affine(nout=100, name="main1", **normrelu),
b1,
Affine(nout=32, name="main2", **normrelu),
Affine(nout=160, name="main3", **normrelu),
b2,
Affine(nout=32, name="main2", **normrelu),
# make next layer big to check sizing
Affine(nout=320, name="main2", **normrelu),
Affine(nout=10, name="main4", **normsoft)]
p2 = [b1,
Affine(nout=16, name="branch1_1", **normrelu),
Affine(nout=10, name="branch1_2", **normsigm)]
p3 = [b2,
Affine(nout=16, name="branch2_1", **normrelu),
Affine(nout=10, name="branch2_2", **normsigm)]
self.cost = Multicost(costs=[GeneralizedCost(costfunc=CrossEntropyMulti()),
GeneralizedCost(costfunc=CrossEntropyBinary()),
GeneralizedCost(costfunc=CrossEntropyBinary())],
weights=[1, 0., 0.])
self.layers = SingleOutputTree([p1, p2, p3], alphas=[1, .2, .2])
self.model = Model(layers=self.layers)
self.model.initialize(self.in_shape, cost=self.cost)