本文整理汇总了Python中neon.models.Model.fprop方法的典型用法代码示例。如果您正苦于以下问题:Python Model.fprop方法的具体用法?Python Model.fprop怎么用?Python Model.fprop使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类neon.models.Model
的用法示例。
在下文中一共展示了Model.fprop方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_conv_rnn
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fprop [as 别名]
def test_conv_rnn(backend_default):
train_shape = (1, 17, 142)
be = NervanaObject.be
inp = be.array(be.rng.randn(np.prod(train_shape), be.bsz))
delta = be.array(be.rng.randn(10, be.bsz))
init_norm = Gaussian(loc=0.0, scale=0.01)
bilstm = DeepBiLSTM(128, init_norm, activation=Rectlin(), gate_activation=Rectlin(),
depth=1, reset_cells=True)
birnn_1 = DeepBiRNN(128, init_norm, activation=Rectlin(),
depth=1, reset_cells=True, batch_norm=False)
birnn_2 = DeepBiRNN(128, init_norm, activation=Rectlin(),
depth=2, reset_cells=True, batch_norm=False)
bibnrnn = DeepBiRNN(128, init_norm, activation=Rectlin(),
depth=1, reset_cells=True, batch_norm=True)
birnnsum = DeepBiRNN(128, init_norm, activation=Rectlin(),
depth=1, reset_cells=True, batch_norm=False, bi_sum=True)
rnn = Recurrent(128, init=init_norm, activation=Rectlin(), reset_cells=True)
lstm = LSTM(128, init_norm, activation=Rectlin(), gate_activation=Rectlin(), reset_cells=True)
gru = GRU(128, init_norm, activation=Rectlin(), gate_activation=Rectlin(), reset_cells=True)
rlayers = [bilstm, birnn_1, birnn_2, bibnrnn, birnnsum, rnn, lstm, gru]
for rl in rlayers:
layers = [
Conv((2, 2, 4), init=init_norm, activation=Rectlin(),
strides=dict(str_h=2, str_w=4)),
Pooling(2, strides=2),
Conv((3, 3, 4), init=init_norm, batch_norm=True, activation=Rectlin(),
strides=dict(str_h=1, str_w=2)),
rl,
RecurrentMean(),
Affine(nout=10, init=init_norm, activation=Rectlin()),
]
model = Model(layers=layers)
cost = GeneralizedCost(costfunc=CrossEntropyBinary())
model.initialize(train_shape, cost)
model.fprop(inp)
model.bprop(delta)
示例2: test_model_get_outputs
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fprop [as 别名]
def test_model_get_outputs(backend):
(X_train, y_train), (X_test, y_test), nclass = load_mnist()
train_set = DataIterator(X_train[:backend.bsz * 3])
init_norm = Gaussian(loc=0.0, scale=0.1)
layers = [Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin()),
Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))]
mlp = Model(layers=layers)
out_list = []
for x, t in train_set:
x = mlp.fprop(x)
out_list.append(x.get().T.copy())
ref_output = np.vstack(out_list)
train_set.reset()
output = mlp.get_outputs(train_set)
assert np.allclose(output, ref_output)
示例3: test_model_get_outputs
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fprop [as 别名]
def test_model_get_outputs(backend_default, data):
dataset = MNIST(path=data)
train_set = dataset.train_iter
init_norm = Gaussian(loc=0.0, scale=0.1)
layers = [Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin()),
Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))]
mlp = Model(layers=layers)
out_list = []
mlp.initialize(train_set)
for x, t in train_set:
x = mlp.fprop(x)
out_list.append(x.get().T.copy())
ref_output = np.vstack(out_list)
train_set.reset()
output = mlp.get_outputs(train_set)
assert allclose_with_out(output, ref_output[:output.shape[0], :])
# test model benchmark inference
mlp.benchmark(train_set, inference=True, niterations=5)
示例4: test_model_get_outputs
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fprop [as 别名]
def test_model_get_outputs(backend_default, data):
(X_train, y_train), (X_test, y_test), nclass = load_mnist(path=data)
train_set = ArrayIterator(X_train[:backend_default.bsz * 3])
init_norm = Gaussian(loc=0.0, scale=0.1)
layers = [Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin()),
Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))]
mlp = Model(layers=layers)
out_list = []
mlp.initialize(train_set)
for x, t in train_set:
x = mlp.fprop(x)
out_list.append(x.get().T.copy())
ref_output = np.vstack(out_list)
train_set.reset()
output = mlp.get_outputs(train_set)
assert np.allclose(output, ref_output)
# test model benchmark inference
mlp.benchmark(train_set, inference=True, niterations=5)
示例5: LSTM
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fprop [as 别名]
layers = [
LSTM(hidden_size, init, activation=Logistic(), gate_activation=Tanh()),
Affine(len(train_set.vocab), init, bias=init, activation=Softmax())
]
model_new = Model(layers=layers)
model_new.load_params(args.save_path)
model_new.initialize(dataset=(train_set.shape[0], time_steps))
# Generate text
text = []
seed_tokens = list('ROMEO:')
x = model_new.be.zeros((len(train_set.vocab), time_steps))
for s in seed_tokens:
x.fill(0)
x[train_set.token_to_index[s], 0] = 1
y = model_new.fprop(x)
for i in range(num_predict):
# Take last prediction and feed into next fprop
pred = sample(y.get()[:, -1])
text.append(train_set.index_to_token[int(pred)])
x.fill(0)
x[int(pred), 0] = 1
y = model_new.fprop(x)
neon_logger.display(''.join(seed_tokens + text))
示例6: __init__
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fprop [as 别名]
#.........这里部分代码省略.........
layers.append(Conv((3, 3, 64), strides=1, init=init_norm, activation=Rectlin(), batch_norm=self.batch_norm))
# The final hidden layer is fully-connected and consists of 512 rectifier units.
layers.append(Affine(nout=512, init=init_norm, activation=Rectlin(), batch_norm=self.batch_norm))
# The output layer is a fully-connected linear layer with a single output for each valid action.
layers.append(Affine(nout=num_actions, init = init_norm))
return layers
def _setInput(self, states):
# change order of axes to match what Neon expects
states = np.transpose(states, axes = (1, 2, 3, 0))
# copy() shouldn't be necessary here, but Neon doesn't work otherwise
self.input.set(states.copy())
# normalize network input between 0 and 1
self.be.divide(self.input, 255, self.input)
def train(self, minibatch, epoch):
# expand components of minibatch
prestates, actions, rewards, poststates, terminals = minibatch
assert len(prestates.shape) == 4
assert len(poststates.shape) == 4
assert len(actions.shape) == 1
assert len(rewards.shape) == 1
assert len(terminals.shape) == 1
assert prestates.shape == poststates.shape
assert prestates.shape[0] == actions.shape[0] == rewards.shape[0] == poststates.shape[0] == terminals.shape[0]
if self.target_steps and self.train_iterations % self.target_steps == 0:
# have to serialize also states for batch normalization to work
pdict = self.model.get_description(get_weights=True, keep_states=True)
self.target_model.deserialize(pdict, load_states=True)
# feed-forward pass for poststates to get Q-values
self._setInput(poststates)
postq = self.target_model.fprop(self.input, inference = True)
assert postq.shape == (self.num_actions, self.batch_size)
# calculate max Q-value for each poststate
maxpostq = self.be.max(postq, axis=0).asnumpyarray()
assert maxpostq.shape == (1, self.batch_size)
# feed-forward pass for prestates
self._setInput(prestates)
preq = self.model.fprop(self.input, inference = False)
assert preq.shape == (self.num_actions, self.batch_size)
# make copy of prestate Q-values as targets
# It seems neccessary for cpu backend.
targets = preq.asnumpyarray().copy()
# clip rewards between -1 and 1
rewards = np.clip(rewards, self.min_reward, self.max_reward)
# update Q-value targets for actions taken
for i, action in enumerate(actions):
if terminals[i]:
targets[action, i] = float(rewards[i])
else:
targets[action, i] = float(rewards[i]) + self.discount_rate * maxpostq[0,i]
# copy targets to GPU memory
self.targets.set(targets)
# calculate errors
deltas = self.cost.get_errors(preq, self.targets)
assert deltas.shape == (self.num_actions, self.batch_size)
#assert np.count_nonzero(deltas.asnumpyarray()) == 32
示例7: open
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fprop [as 别名]
train.reset()
# get 1 image
for im, l in train:
break
train.exit_batch_provider()
with open('im1.pkl', 'w') as fid:
pickle.dump((im.get(), l.get()), fid)
im_save = im.get().copy()
if args.resume:
with open('im1.pkl', 'r') as fid:
(im2, l2) = pickle.load(fid)
im.set(im2)
l.set(l2)
# run fprop and bprop on this minibatch save the results
out_fprop = model.fprop(im)
out_fprop_save = [x.get() for x in out_fprop]
im.set(im_save)
out_fprop = model.fprop(im)
out_fprop_save2 = [x.get() for x in out_fprop]
for x, y in zip(out_fprop_save, out_fprop_save2):
assert np.max(np.abs(x-y)) == 0.0, '2 fprop iterations do not match'
# run fit fot 1 minibatch
# have to do this by hand
delta = model.cost.get_errors(im, l)
model.bprop(delta)
if args.resume:
model.optimizer = opt
model.optimizer.optimize(model.layers_to_optimize, epoch=model.epoch_index)
示例8: __init__
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fprop [as 别名]
class DeepQNetwork:
def __init__(self, num_actions, args):
# create Neon backend
self.be = gen_backend(backend = args.backend,
batch_size = args.batch_size,
rng_seed = args.random_seed,
device_id = args.device_id,
default_dtype = np.dtype(args.datatype).type,
stochastic_round = args.stochastic_round)
# create model
layers = self.createLayers(num_actions)
self.model = Model(layers = layers)
self.cost = GeneralizedCost(costfunc = SumSquared())
self.optimizer = RMSProp(learning_rate = args.learning_rate,
decay_rate = args.rmsprop_decay_rate,
stochastic_round = args.stochastic_round)
# create target model
self.target_steps = args.target_steps
self.train_iterations = 0
if self.target_steps:
self.target_model = Model(layers = self.createLayers(num_actions))
self.save_weights_path = args.save_weights_path
else:
self.target_model = self.model
# remember parameters
self.num_actions = num_actions
self.batch_size = args.batch_size
self.discount_rate = args.discount_rate
self.history_length = args.history_length
self.screen_dim = (args.screen_height, args.screen_width)
self.clip_error = args.clip_error
# prepare tensors once and reuse them
self.input_shape = (self.history_length,) + self.screen_dim + (self.batch_size,)
self.tensor = self.be.empty(self.input_shape)
self.tensor.lshape = self.input_shape # needed for convolutional networks
self.targets = self.be.empty((self.num_actions, self.batch_size))
self.callback = None
def createLayers(self, num_actions):
# create network
init_norm = Gaussian(loc=0.0, scale=0.01)
layers = []
# The first hidden layer convolves 32 filters of 8x8 with stride 4 with the input image and applies a rectifier nonlinearity.
layers.append(Conv((8, 8, 32), strides=4, init=init_norm, activation=Rectlin()))
# The second hidden layer convolves 64 filters of 4x4 with stride 2, again followed by a rectifier nonlinearity.
layers.append(Conv((4, 4, 64), strides=2, init=init_norm, activation=Rectlin()))
# This is followed by a third convolutional layer that convolves 64 filters of 3x3 with stride 1 followed by a rectifier.
layers.append(Conv((3, 3, 64), strides=1, init=init_norm, activation=Rectlin()))
# The final hidden layer is fully-connected and consists of 512 rectifier units.
layers.append(Affine(nout=512, init=init_norm, activation=Rectlin()))
# The output layer is a fully-connected linear layer with a single output for each valid action.
layers.append(Affine(nout = num_actions, init = init_norm))
return layers
def setTensor(self, states):
# change order of axes to match what Neon expects
states = np.transpose(states, axes = (1, 2, 3, 0))
# copy() shouldn't be necessary here, but Neon doesn't work otherwise
self.tensor.set(states.copy())
# normalize network input between 0 and 1
self.be.divide(self.tensor, 255, self.tensor)
def train(self, minibatch, epoch):
# expand components of minibatch
prestates, actions, rewards, poststates, terminals = minibatch
assert len(prestates.shape) == 4
assert len(poststates.shape) == 4
assert len(actions.shape) == 1
assert len(rewards.shape) == 1
assert len(terminals.shape) == 1
assert prestates.shape == poststates.shape
assert prestates.shape[0] == actions.shape[0] == rewards.shape[0] == poststates.shape[0] == terminals.shape[0]
if self.target_steps and self.train_iterations % self.target_steps == 0:
# HACK: push something through network, so that weights exist
self.model.fprop(self.tensor)
# HACK: serialize network to disk and read it back to clone
filename = os.path.join(self.save_weights_path, "target_network.pkl")
save_obj(self.model.serialize(keep_states = False), filename)
self.target_model.load_weights(filename)
# feed-forward pass for poststates to get Q-values
self.setTensor(poststates)
postq = self.target_model.fprop(self.tensor, inference = True)
assert postq.shape == (self.num_actions, self.batch_size)
# calculate max Q-value for each poststate
maxpostq = self.be.max(postq, axis=0).asnumpyarray()
assert maxpostq.shape == (1, self.batch_size)
# feed-forward pass for prestates
self.setTensor(prestates)
preq = self.model.fprop(self.tensor, inference = False)
assert preq.shape == (self.num_actions, self.batch_size)
#.........这里部分代码省略.........
示例9: NeonArgparser
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fprop [as 别名]
# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument("hdf5")
parser.add_argument("model_pkl")
args = parser.parse_args()
model = Model(args.model_pkl)
h5s = [h5py.File(args.hdf5)]
num_moves = sum(h['X'].shape[0] for h in h5s)
print("Found {} HDF5 files with {} moves".format(len(h5s), num_moves))
inputs = HDF5Iterator([h['X'] for h in h5s],
[h['y'] for h in h5s],
ndata=(1024 * 1024))
out_predict = h5s[0].require_dataset("predictions", (num_moves, 362), dtype=np.float32)
out_score = h5s[0].require_dataset("scores", (num_moves,), dtype=np.float32)
out_max = h5s[0].require_dataset("best", (num_moves,), dtype=np.float32)
model.initialize(inputs)
for indata, actual, sl in inputs.predict():
prediction = model.fprop(indata, inference=False).get().T
actual = actual.astype(int)
actual_idx = actual[:, 0] * 19 + actual[:, 1]
actual_idx[actual_idx < 0] = 361
out_predict[sl, :] = prediction
out_score[sl] = prediction[range(prediction.shape[0]), actual_idx]
out_max[sl] = prediction.max(axis=1)
print (sl)
示例10: test_model_serialize
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fprop [as 别名]
def test_model_serialize(backend_default, data):
dataset = MNIST(path=data)
(X_train, y_train), (X_test, y_test), nclass = dataset.load_data()
train_set = ArrayIterator(
[X_train, X_train], y_train, nclass=nclass, lshape=(1, 28, 28))
init_norm = Gaussian(loc=0.0, scale=0.01)
# initialize model
path1 = Sequential([Conv((5, 5, 16), init=init_norm, bias=Constant(0), activation=Rectlin()),
Pooling(2),
Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin())])
path2 = Sequential([Affine(nout=100, init=init_norm, bias=Constant(0), activation=Rectlin()),
Dropout(keep=0.5),
Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin())])
layers = [MergeMultistream(layers=[path1, path2], merge="stack"),
Affine(nout=20, init=init_norm, batch_norm=True, activation=Rectlin()),
Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))]
tmp_save = 'test_model_serialize_tmp_save.pickle'
mlp = Model(layers=layers)
mlp.optimizer = GradientDescentMomentum(learning_rate=0.1, momentum_coef=0.9)
mlp.cost = GeneralizedCost(costfunc=CrossEntropyBinary())
mlp.initialize(train_set, cost=mlp.cost)
n_test = 3
num_epochs = 3
# Train model for num_epochs and n_test batches
for epoch in range(num_epochs):
for i, (x, t) in enumerate(train_set):
x = mlp.fprop(x)
delta = mlp.cost.get_errors(x, t)
mlp.bprop(delta)
mlp.optimizer.optimize(mlp.layers_to_optimize, epoch=epoch)
if i > n_test:
break
# Get expected outputs of n_test batches and states of all layers
outputs_exp = []
pdicts_exp = [l.get_params_serialize() for l in mlp.layers_to_optimize]
for i, (x, t) in enumerate(train_set):
outputs_exp.append(mlp.fprop(x, inference=True))
if i > n_test:
break
# Serialize model
mlp.save_params(tmp_save, keep_states=True)
# Load model
mlp = Model(tmp_save)
mlp.initialize(train_set)
outputs = []
pdicts = [l.get_params_serialize() for l in mlp.layers_to_optimize]
for i, (x, t) in enumerate(train_set):
outputs.append(mlp.fprop(x, inference=True))
if i > n_test:
break
# Check outputs, states, and params are the same
for output, output_exp in zip(outputs, outputs_exp):
assert allclose_with_out(output.get(), output_exp.get())
for pd, pd_exp in zip(pdicts, pdicts_exp):
for s, s_e in zip(pd['states'], pd_exp['states']):
if isinstance(s, list): # this is the batch norm case
for _s, _s_e in zip(s, s_e):
assert allclose_with_out(_s, _s_e)
else:
assert allclose_with_out(s, s_e)
for p, p_e in zip(pd['params'], pd_exp['params']):
assert type(p) == type(p_e)
if isinstance(p, list): # this is the batch norm case
for _p, _p_e in zip(p, p_e):
assert allclose_with_out(_p, _p_e)
elif isinstance(p, np.ndarray):
assert allclose_with_out(p, p_e)
else:
assert p == p_e
os.remove(tmp_save)
示例11: ModelDescription
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fprop [as 别名]
model_desc = ModelDescription(load_obj(args.save_model_file))
for layer in segnet_model.layers_to_optimize:
name = layer.name
trained_layer = model_desc.getlayer(name)
layer.load_weights(trained_layer)
fig = plt.figure()
if args.display:
plt.ion()
im1 = None
im2 = None
cnt = 1
for x, t in test_set:
z = segnet_model.fprop(x).get()
z = np.argmax(z.reshape((c, h, w)), axis=0)
t = np.argmax(t.get().reshape((c, h, w)), axis=0)
# calculate the misclass rate
acc = (np.where(z == t)[0].size / float(z.size))*100.0
plt.subplot(2,1,1);
if im1 is None:
im1 = plt.imshow(t);plt.title('Truth')
else:
im1.set_data(t)
plt.subplot(2,1,2);
if im2 is None:
示例12: DQNNeon
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fprop [as 别名]
#.........这里部分代码省略.........
def _prepare_network_input(self, states):
""" Transforms and normalizes the states from one minibatch.
Args:
states (): a set of states with the size of minibatch
"""
_logger.debug("Normalizing and transforming input")
# change order of axes to match what Neon expects
states = np.transpose(states, axes = (1, 2, 3, 0))
# copy() shouldn't be necessary here, but Neon doesn't work otherwise
self.input.set(states.copy())
# normalize network input between 0 and 1
self.be.divide(self.input, self.grayscales, self.input)
def train(self, minibatch, epoch):
""" Prepare, perform and document a complete train step for one minibatch.
Args:
minibatch (numpy.ndarray): Mini-batch of states, shape=(batch_size,sequence_length,frame_width,frame_height)
epoch (int): Current train epoch
"""
_logger.debug("Complete trainig step for one minibatch")
prestates, actions, rewards, poststates, terminals = minibatch
assert len(prestates.shape) == 4
assert len(poststates.shape) == 4
assert len(actions.shape) == 1
assert len(rewards.shape) == 1
assert len(terminals.shape) == 1
assert prestates.shape == poststates.shape
assert prestates.shape[0] == actions.shape[0] == rewards.shape[0] == poststates.shape[0] == terminals.shape[0]
# feed-forward pass for poststates to get Q-values
self._prepare_network_input(poststates)
postq = self.target_model.fprop(self.input, inference = True)
assert postq.shape == (self.output_shape, self.batch_size)
# calculate max Q-value for each poststate
maxpostq = self.be.max(postq, axis=0).asnumpyarray()
assert maxpostq.shape == (1, self.batch_size)
# average maxpostq for stats
maxpostq_avg = maxpostq.mean()
# feed-forward pass for prestates
self._prepare_network_input(prestates)
preq = self.model.fprop(self.input, inference = False)
assert preq.shape == (self.output_shape, self.batch_size)
# make copy of prestate Q-values as targets
targets = preq.asnumpyarray()
# clip rewards between -1 and 1
rewards = np.clip(rewards, self.min_reward, self.max_reward)
# update Q-value targets for each state only at actions taken
for i, action in enumerate(actions):
if terminals[i]:
targets[action, i] = float(rewards[i])
else:
targets[action, i] = float(rewards[i]) + self.discount_rate * maxpostq[0,i]
# copy targets to GPU memory
self.targets.set(targets)
# calculate errors
errors = self.cost_func.get_errors(preq, self.targets)
assert errors.shape == (self.output_shape, self.batch_size)
# average error where there is a error (should be 1 in every row)
#TODO: errors_avg = np.sum(errors)/np.size(errors[errors>0.])
# clip errors
if self.clip_error:
self.be.clip(errors, -self.clip_error, self.clip_error, out = errors)
# calculate cost, just in case
cost = self.cost_func.get_cost(preq, self.targets)
示例13: LSTM
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fprop [as 别名]
num_predict = 1000
layers = [
LSTM(hidden_size, init, Logistic(), Tanh()),
Affine(len(train_set.vocab), init, bias=init, activation=Softmax())
]
model = Model(layers=layers)
model.load_weights(args.save_path)
# Generate text
text = []
seed_tokens = list('ROMEO:')
x = be.zeros((len(train_set.vocab), time_steps))
for s in seed_tokens:
x.fill(0)
x[train_set.token_to_index[s], 0] = 1
y = model.fprop(x)
for i in range(num_predict):
# Take last prediction and feed into next fprop
pred = sample(y.get()[:, -1])
text.append(train_set.index_to_token[int(pred)])
x.fill(0)
x[pred, 0] = 1
y = model.fprop(x)
print ''.join(seed_tokens + text)
示例14: test_model_serialize
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fprop [as 别名]
def test_model_serialize(backend):
(X_train, y_train), (X_test, y_test), nclass = load_mnist()
train_set = DataIterator([X_train, X_train], y_train, nclass=nclass, lshape=(1, 28, 28))
init_norm = Gaussian(loc=0.0, scale=0.01)
# initialize model
path1 = [Conv((5, 5, 16), init=init_norm, bias=Constant(0), activation=Rectlin()),
Pooling(2),
Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin())]
path2 = [Dropout(keep=0.5),
Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin())]
layers = [MergeConcat([path1, path2]),
Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin()),
BatchNorm(),
Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))]
tmp_save = 'test_model_serialize_tmp_save.pickle'
mlp = Model(layers=layers)
mlp.optimizer = GradientDescentMomentum(learning_rate=0.1, momentum_coef=0.9)
mlp.cost = GeneralizedCost(costfunc=CrossEntropyBinary())
n_test = 3
num_epochs = 3
# Train model for num_epochs and n_test batches
for epoch in range(num_epochs):
for i, (x, t) in enumerate(train_set):
x = mlp.fprop(x)
delta = mlp.cost.get_errors(x, t)
mlp.bprop(delta)
mlp.optimizer.optimize(mlp.layers_to_optimize, epoch=epoch)
if i > n_test:
break
# Get expected outputs of n_test batches and states of all layers
outputs_exp = []
pdicts_exp = [l.get_params_serialize() for l in mlp.layers_to_optimize]
for i, (x, t) in enumerate(train_set):
outputs_exp.append(mlp.fprop(x, inference=True))
if i > n_test:
break
# Serialize model
save_obj(mlp.serialize(keep_states=True), tmp_save)
# Load model
mlp = Model(layers=layers)
mlp.load_weights(tmp_save)
outputs = []
pdicts = [l.get_params_serialize() for l in mlp.layers_to_optimize]
for i, (x, t) in enumerate(train_set):
outputs.append(mlp.fprop(x, inference=True))
if i > n_test:
break
# Check outputs, states, and params are the same
for output, output_exp in zip(outputs, outputs_exp):
assert np.allclose(output.get(), output_exp.get())
for pd, pd_exp in zip(pdicts, pdicts_exp):
for s, s_e in zip(pd['states'], pd_exp['states']):
if isinstance(s, list): # this is the batch norm case
for _s, _s_e in zip(s, s_e):
assert np.allclose(_s, _s_e)
else:
assert np.allclose(s, s_e)
for p, p_e in zip(pd['params'], pd_exp['params']):
if isinstance(p, list): # this is the batch norm case
for _p, _p_e in zip(p, p_e):
assert np.allclose(_p, _p_e)
else:
assert np.allclose(p, p_e)
os.remove(tmp_save)
示例15: Accuracy
# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fprop [as 别名]
metric = Accuracy()
##########################################################################
model = Model(layers=layers)
optimizer = Adagrad(learning_rate=0.01, clip_gradients=clip_gradients)
callbacks = Callbacks(model, train_set, args, eval_set=valid_set)
model.load_weights(os.path.join(args.data_dir, '128128_49_model_e2.pkl'))
print "Test Accuracy - ", 100 * model.eval(valid_set, metric=metric)
print "Train Accuracy - ", 100 * model.eval(train_set, metric=metric)
# output result directly
for x, y in valid_set:
x = model.fprop(x, inference=True)
print(x.get())
print(y.get())
break
#########################################################################
# continue training
# optimizer = Adagrad(learning_rate=0.01, clip_gradients=clip_gradients)
# callbacks = Callbacks(model, train_set, args, eval_set=valid_set)
# import ipdb; ipdb.set_trace()
# re-allocate output memories for each layer
# model.initialized = False
# model.initialize(train_set, cost=cost)