当前位置: 首页>>代码示例>>Python>>正文


Python Model.fit方法代码示例

本文整理汇总了Python中neon.models.Model.fit方法的典型用法代码示例。如果您正苦于以下问题:Python Model.fit方法的具体用法?Python Model.fit怎么用?Python Model.fit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在neon.models.Model的用法示例。


在下文中一共展示了Model.fit方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: train_eval

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fit [as 别名]
def train_eval(
        train_set,
        valid_set,
        args,
        hidden_size = 100,
        clip_gradients = True,
        gradient_limit = 5):

    # weight initialization
    init = Uniform(low=-0.08, high=0.08)

    # model initialization
    layers = [
        LSTM(hidden_size, init, Logistic(), Tanh()),
        LSTM(hidden_size, init, Logistic(), Tanh()),
        Affine(2, init, bias=init, activation=Softmax())
    ]

    cost = GeneralizedCost(costfunc=CrossEntropyMulti(usebits=True))
    model = Model(layers=layers)
    optimizer = RMSProp(clip_gradients=clip_gradients, gradient_limit=gradient_limit, stochastic_round=args.rounding)

    # configure callbacks
    callbacks = Callbacks(model, train_set, progress_bar=args.progress_bar)

    # train model
    model.fit(train_set,
              optimizer=optimizer,
              num_epochs=args.epochs,
              cost=cost,
              callbacks=callbacks)

    pred = model.get_outputs(valid_set)
    pred_neg_rate = model.eval(valid_set, metric=Misclassification())
    return (pred[:,1], pred_neg_rate)
开发者ID:wjiangcmu,项目名称:Driver_telematics,代码行数:37,代码来源:lstm.py

示例2: main

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fit [as 别名]
def main():
    parser = get_parser()
    args = parser.parse_args()
    print('Args:', args)

    loggingLevel = logging.DEBUG if args.verbose else logging.INFO
    logging.basicConfig(level=loggingLevel, format='')

    ext = extension_from_parameters(args)

    loader = p1b3.DataLoader(feature_subsample=args.feature_subsample,
                             scaling=args.scaling,
                             drug_features=args.drug_features,
                             scramble=args.scramble,
                             min_logconc=args.min_logconc,
                             max_logconc=args.max_logconc,
                             subsample=args.subsample,
                             category_cutoffs=args.category_cutoffs)

    # initializer = Gaussian(loc=0.0, scale=0.01)
    initializer = GlorotUniform()
    activation = get_function(args.activation)()

    layers = []
    reshape = None

    if args.convolution and args.convolution[0]:
        reshape = (1, loader.input_dim, 1)
        layer_list = list(range(0, len(args.convolution), 3))
        for l, i in enumerate(layer_list):
            nb_filter = args.convolution[i]
            filter_len = args.convolution[i+1]
            stride = args.convolution[i+2]
            # print(nb_filter, filter_len, stride)
            # fshape: (height, width, num_filters).
            layers.append(Conv((1, filter_len, nb_filter), strides={'str_h':1, 'str_w':stride}, init=initializer, activation=activation))
            if args.pool:
                layers.append(Pooling((1, args.pool)))

    for layer in args.dense:
        if layer:
            layers.append(Affine(nout=layer, init=initializer, activation=activation))
        if args.drop:
            layers.append(Dropout(keep=(1-args.drop)))
    layers.append(Affine(nout=1, init=initializer, activation=neon.transforms.Identity()))

    model = Model(layers=layers)

    train_iter = ConcatDataIter(loader, ndata=args.train_samples, lshape=reshape, datatype=args.datatype)
    val_iter = ConcatDataIter(loader, partition='val', ndata=args.val_samples, lshape=reshape, datatype=args.datatype)

    cost = GeneralizedCost(get_function(args.loss)())
    optimizer = get_function(args.optimizer)()
    callbacks = Callbacks(model, eval_set=val_iter, **args.callback_args)

    model.fit(train_iter, optimizer=optimizer, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
开发者ID:carrondt,项目名称:Benchmarks,代码行数:58,代码来源:p1b3_baseline_neon.py

示例3: run

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fit [as 别名]
def run(args, train, test):
    init_uni = Uniform(low=-0.1, high=0.1)
    opt_gdm = GradientDescentMomentum(learning_rate=0.01,
                                      momentum_coef=0.9,
                                      stochastic_round=args.rounding)
    layers = [Conv((5, 5, 16), init=init_uni, activation=Rectlin(), batch_norm=True),
              Pooling((2, 2)),
              Conv((5, 5, 32), init=init_uni, activation=Rectlin(), batch_norm=True),
              Pooling((2, 2)),
              Affine(nout=500, init=init_uni, activation=Rectlin(), batch_norm=True),
              Affine(nout=10, init=init_uni, activation=Softmax())]
    cost = GeneralizedCost(costfunc=CrossEntropyMulti())
    mlp = Model(layers=layers)
    callbacks = Callbacks(mlp, train, eval_set=test, **args.callback_args)
    mlp.fit(train, optimizer=opt_gdm, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
    err = mlp.eval(test, metric=Misclassification())*100
    print('Misclassification error = %.2f%%' % err)
    return err
开发者ID:ferenckulcsar,项目名称:neon,代码行数:20,代码来源:compare.py

示例4: run

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fit [as 别名]
def run(train, test):
    init = Gaussian(scale=0.01)
    layers = [Conv((3, 3, 128), init=init, activation=Rectlin(),
                   strides=dict(str_h=1, str_w=2)),
              Conv((3, 3, 256), init=init, batch_norm=True, activation=Rectlin()),
              Pooling(2, strides=2),
              Conv((2, 2, 512), init=init, batch_norm=True, activation=Rectlin()),
              DeepBiRNN(256, init=init, activation=Rectlin(), reset_cells=True, depth=3),
              RecurrentLast(),
              Affine(32, init=init, batch_norm=True, activation=Rectlin()),
              Affine(nout=common['nclasses'], init=init, activation=Softmax())]

    model = Model(layers=layers)
    opt = Adadelta()
    metric = Misclassification()
    callbacks = Callbacks(model, eval_set=test, metric=metric, **args.callback_args)
    cost = GeneralizedCost(costfunc=CrossEntropyBinary())

    model.fit(train, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
    return model
开发者ID:JediKoder,项目名称:neon,代码行数:22,代码来源:whale_calls.py

示例5: __init__

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fit [as 别名]
class MostCommonWordSense:

    def __init__(self, rounding, callback_args, epochs):
        # setup weight initialization function
        self.init = Gaussian(loc=0.0, scale=0.01)
        # setup optimizer
        self.optimizer = GradientDescentMomentum(learning_rate=0.1, momentum_coef=0.9,
                                                 stochastic_round=rounding)
        # setup cost function as CrossEntropy
        self.cost = GeneralizedCost(costfunc=SumSquared())
        self.epochs = epochs
        self.model = None
        self.callback_args = callback_args

    def build(self):
        # setup model layers
        layers = [Affine(nout=100, init=self.init, bias=self.init, activation=Rectlin()),
                  Affine(nout=2, init=self.init, bias=self.init, activation=Softmax())]

        # initialize model object
        self.model = Model(layers=layers)

    def fit(self, valid_set, train_set):
        # configure callbacks
        callbacks = Callbacks(self.model, eval_set=valid_set, **self.callback_args)
        self.model.fit(train_set, optimizer=self.optimizer, num_epochs=self.epochs,
                       cost=self.cost, callbacks=callbacks)

    def save(self, save_path):
        self.model.save_params(save_path)

    def load(self, model_path):
        self.model = Model(model_path)

    def eval(self, valid_set):
        eval_rate = self.model.eval(valid_set, metric=Misclassification())
        return eval_rate

    def get_outputs(self, valid_set):
        return self.model.get_outputs(valid_set)
开发者ID:cdj0311,项目名称:nlp-architect,代码行数:42,代码来源:most_common_word_sense.py

示例6: train_regressor

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fit [as 别名]
def train_regressor(orig_wordvecs, w2v_W, w2v_vocab):
    """
    Return regressor to map word2vec to RNN word space

    Function modified from:
    https://github.com/ryankiros/skip-thoughts/blob/master/training/tools.py
    """
    # Gather all words from word2vec that appear in wordvecs
    d = defaultdict(lambda: 0)
    for w in w2v_vocab.keys():
        d[w] = 1
    shared = OrderedDict()
    count = 0

    for w in list(orig_wordvecs.keys())[:-2]:
        if d[w] > 0:
            shared[w] = count
            count += 1

    # Get the vectors for all words in 'shared'
    w2v = np.zeros((len(shared), 300), dtype='float32')
    sg = np.zeros((len(shared), 620), dtype='float32')
    for w in shared.keys():
        w2v[shared[w]] = w2v_W[w2v_vocab[w]]
        sg[shared[w]] = orig_wordvecs[w]

    train_set = ArrayIterator(X=w2v, y=sg, make_onehot=False)

    layers = [Linear(nout=620, init=Gaussian(loc=0.0, scale=0.1)),
              Bias(init=Constant(0.0))]
    clf = Model(layers=layers)

    # regression model is trained using default global batch size
    cost = GeneralizedCost(costfunc=SumSquared())
    opt = GradientDescentMomentum(0.1, 0.9, gradient_clip_value=5.0)
    callbacks = Callbacks(clf)

    clf.fit(train_set, num_epochs=20, optimizer=opt, cost=cost, callbacks=callbacks)
    return clf
开发者ID:rlugojr,项目名称:neon,代码行数:41,代码来源:util.py

示例7: DeepCascadeLearning

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fit [as 别名]
def DeepCascadeLearning(modelLayers,X_train,Y_train,callbacks,init_uni=Uniform(low=-0.1, high=0.1),
                        testIterator=None,epochs=2,
                        cost=GeneralizedCost(costfunc=CrossEntropyMulti()),
                        opt_gdm=GradientDescentMomentum(learning_rate=0.01,momentum_coef=0.9)):
  importantLayersIndexes = list()
  i = 0
  outputLayer = Affine(nout=10, init=init_uni, activation=Softmax())
  modelToPredict = None
  for currentLayer in modelLayers:
    if(np.shape(currentLayer)):
      currentLayer = currentLayer[0]
    if((currentLayer.classnm == 'Convolution') or (currentLayer.classnm == 'Affine')):
      importantLayersIndexes.append(i)
    i += 1
  for i in importantLayersIndexes:
    modelToTrain = list()
    for currentLayer in modelLayers[i:importantLayersIndexes[i+1]]:
      modelToTrain.append(currentLayer)
    modelToTrain.append(outputLayer)
    modelToTrain = Model(modelToTrain)
    if(modelToPredict == None):
      trainIterator = ArrayIterator(X_train, Y_train, nclass=10, lshape=(3,32,32)) 
      x = trainIterator.__iter__()
      callbacks = Callbacks(modelToTrain)
      modelToTrain.fit(trainIterator, optimizer=opt_gdm, num_epochs=epochs, cost=GeneralizedCost(costfunc=CrossEntropyMulti()), callbacks=callbacks)
    else:
      tmpIterator = ArrayIterator(X_train,lshape=(3,32,32))
      tmpTrain = modelToPredict.get_outputs(tmpIterator)
      tmpIterator = ArrayIterator(tmpTrain[0:20],Y_train[0:20],nclass=10,lshape=(32,30,30))
      modelToTrain.fit(tmpIterator, optimizer=opt_gdm, num_epochs=epochs, cost=cost)
    if modelToPredict == None:
        modelToPredict = list()
    else:
        modelToPredict = modelToPredict.layers.layers
    for currentLayer in modelToTrain.layers.layers[0:-2]:
      modelToPredict.append(currentLayer)
    modelToPredict = Model(modelToPredict)

  return modelToPredict
开发者ID:EnriqueSMarquez,项目名称:CNNs_RelatedProjects,代码行数:41,代码来源:testingVsKerasCIFAR.py

示例8: train_mlp

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fit [as 别名]
def train_mlp():
	"""
	Train data and save scaling and network weights and biases to file
	to be used by forward prop phase on test data
	"""
	parser = NeonArgparser(__doc__)
	
	args = parser.parse_args()
	
	logger = logging.getLogger()
	logger.setLevel(args.log_thresh)
	
	# hyperparameters
	num_epochs = args.epochs
	
	#preprocessor
	std_scale = preprocessing.StandardScaler(with_mean=True,with_std=True)
	#std_scale = feature_scaler(type='Standardizer',with_mean=True,with_std=True)
	
	#number of non one-hot encoded features, including ground truth
	num_feat = 4
	
	# load up the mnist data set
	# split into train and tests sets
	#load data from csv-files and rescale
	#training
	traindf = pd.DataFrame.from_csv('data/train.csv')
	ncols = traindf.shape[1]
	
	#tmpmat=std_scale.fit_transform(traindf.as_matrix())
	#print std_scale.scale_
	#print std_scale.mean_
	
	tmpmat = traindf.as_matrix()
	#print tmpmat[:,1:num_feat]
	
	tmpmat[:,:num_feat] = std_scale.fit_transform(tmpmat[:,:num_feat])
	X_train = tmpmat[:,1:]
	y_train = np.reshape(tmpmat[:,0],(tmpmat[:,0].shape[0],1))
	
	#validation
	validdf = pd.DataFrame.from_csv('data/validate.csv')
	ncols = validdf.shape[1]
	tmpmat = validdf.as_matrix()
	tmpmat[:,:num_feat] = std_scale.transform(tmpmat[:,:num_feat])
	X_valid = tmpmat[:,1:]
	y_valid = np.reshape(tmpmat[:,0],(tmpmat[:,0].shape[0],1))
	
	#test
	testdf = pd.DataFrame.from_csv('data/test.csv')
	ncols = testdf.shape[1]
	tmpmat = testdf.as_matrix()
	tmpmat[:,:num_feat] = std_scale.transform(tmpmat[:,:num_feat])
	X_test = tmpmat[:,1:]
	y_test = np.reshape(tmpmat[:,0],(tmpmat[:,0].shape[0],1))
	
	# setup a training set iterator
	train_set = CustomDataIterator(X_train, lshape=(X_train.shape[1]), y_c=y_train)
	# setup a validation data set iterator
	valid_set = CustomDataIterator(X_valid, lshape=(X_valid.shape[1]), y_c=y_valid)
	# setup a validation data set iterator
	test_set = CustomDataIterator(X_test, lshape=(X_test.shape[1]), y_c=y_test)
	
	# setup weight initialization function
	init_norm = Xavier()
	
	# setup model layers
	layers = [Affine(nout=X_train.shape[1], init=init_norm, activation=Rectlin()),
	          Dropout(keep=0.5),
	          Affine(nout=X_train.shape[1]/2, init=init_norm, activation=Rectlin()),
			  Linear(nout=1, init=init_norm)]
	
	# setup cost function as CrossEntropy
	cost = GeneralizedCost(costfunc=SmoothL1Loss())
	
	# setup optimizer
	#schedule
	#schedule = ExpSchedule(decay=0.3)
	#optimizer = GradientDescentMomentum(0.0001, momentum_coef=0.9, stochastic_round=args.rounding, schedule=schedule)
	optimizer = Adam(learning_rate=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1.e-8)
	
	# initialize model object
	mlp = Model(layers=layers)
	
	# configure callbacks
	if args.callback_args['eval_freq'] is None:
		args.callback_args['eval_freq'] = 1
	
	# configure callbacks
	callbacks = Callbacks(mlp, eval_set=valid_set, **args.callback_args)
	
	callbacks.add_early_stop_callback(stop_func)
	callbacks.add_save_best_state_callback(os.path.join(args.data_dir, "early_stop-best_state.pkl"))
	
	# run fit
	mlp.fit(train_set, optimizer=optimizer, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
	
	#evaluate model
	print('Evaluation Error = %.4f'%(mlp.eval(valid_set, metric=SmoothL1Metric())))
	print('Test set error = %.4f'%(mlp.eval(test_set, metric=SmoothL1Metric())))
#.........这里部分代码省略.........
开发者ID:ankitvb,项目名称:homeprice,代码行数:103,代码来源:train_mlp.py

示例9: Rectlin

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fit [as 别名]
relu = Rectlin()
layers = []
layers.append(Dropout(keep=.8))
layers.append(Conv((3, 3, 96), init=init_uni, batch_norm=True, activation=relu))
layers.append(Conv((3, 3, 96), init=init_uni, batch_norm=True, activation=relu, pad=1))
layers.append(Conv((3, 3, 96), init=init_uni, batch_norm=True, activation=relu, pad=1, strides=2))
layers.append(Dropout(keep=.5))

layers.append(Conv((3, 3, 192), init=init_uni, batch_norm=True, activation=relu, pad=1))
layers.append(Conv((3, 3, 192), init=init_uni, batch_norm=True, activation=relu, pad=1))
layers.append(Conv((3, 3, 192), init=init_uni, batch_norm=True, activation=relu, pad=1, strides=2))
layers.append(Dropout(keep=.5))

layers.append(Conv((3, 3, 192), init=init_uni, batch_norm=True, activation=relu))
layers.append(Conv((1, 1, 192), init=init_uni, batch_norm=True, activation=relu))
layers.append(Conv((1, 1, 16), init=init_uni, activation=relu))

layers.append(Pooling(6, op="avg"))
layers.append(Activation(Softmax()))

cost = GeneralizedCost(costfunc=CrossEntropyMulti())

mlp = Model(layers=layers)

# configure callbacks
callbacks = Callbacks(mlp, train_set, output_file=args.output_file, valid_set=valid_set,
                      valid_freq=args.validation_freq, progress_bar=args.progress_bar)

mlp.fit(train_set, optimizer=opt_gdm, num_epochs=num_epochs, cost=cost, callbacks=callbacks)
print('Misclassification error = %.1f%%' % (mlp.eval(valid_set, metric=Misclassification())*100))
开发者ID:ZebTech,项目名称:neon,代码行数:32,代码来源:cifar10_allcnn.py

示例10: create_index_files

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fit [as 别名]
args = parser.parse_args()
train_idx, val_idx = create_index_files(args.data_dir)

common_params = dict(sampling_freq=22050, clip_duration=31000, frame_duration=20)
train_params = AudioParams(random_scale_percent=5, **common_params)
val_params = AudioParams(**common_params)
common = dict(target_size=1, nclasses=10, repo_dir=args.data_dir)
train = DataLoader(set_name='genres-train', media_params=train_params,
                   index_file=train_idx, shuffle=True, **common)
val = DataLoader(set_name='genres-val', media_params=val_params,
                 index_file=val_idx, shuffle=False, **common)
init = Gaussian(scale=0.01)
layers = [Conv((5, 5, 64), init=init, activation=Rectlin(),
               strides=dict(str_h=2, str_w=4)),
          Pooling(2, strides=2),
          Conv((5, 5, 64), init=init, batch_norm=True, activation=Rectlin(),
               strides=dict(str_h=1, str_w=2)),
          BiRNN(256, init=init, activation=Rectlin(), reset_cells=True),
          RecurrentMean(),
          Affine(128, init=init, batch_norm=True, activation=Rectlin()),
          Affine(nout=common['nclasses'], init=init, activation=Softmax())]

model = Model(layers=layers)
opt = Adadelta()
metric = Misclassification()
callbacks = Callbacks(model, eval_set=val, metric=metric, **args.callback_args)
cost = GeneralizedCost(costfunc=CrossEntropyMulti())

model.fit(train, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
print('Misclassification error = %.1f%%' % (model.eval(val, metric=metric)*100))
开发者ID:JediKoder,项目名称:neon,代码行数:32,代码来源:music_genres.py

示例11: Affine

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fit [as 别名]
p3 = [b2, Affine(nout=16, linear_name="b2_l1", **normrelu), Affine(nout=10, linear_name="b2_l2", **normsigm)]


# setup cost function as CrossEntropy
cost = Multicost(
    costs=[
        GeneralizedCost(costfunc=CrossEntropyMulti()),
        GeneralizedCost(costfunc=CrossEntropyBinary()),
        GeneralizedCost(costfunc=CrossEntropyBinary()),
    ],
    weights=[1, 0.0, 0.0],
)

# setup optimizer
optimizer = GradientDescentMomentum(0.1, momentum_coef=0.9, stochastic_round=args.rounding)

# initialize model object
alphas = [1, 0.25, 0.25]
mlp = Model(layers=Tree([p1, p2, p3], alphas=alphas))

# setup standard fit callbacks
callbacks = Callbacks(mlp, train_set, eval_set=valid_set, **args.callback_args)

# run fit
mlp.fit(train_set, optimizer=optimizer, num_epochs=args.epochs, cost=cost, callbacks=callbacks)

logging.getLogger("neon").info(
    "Misclassification error = %.1f%%", (mlp.eval(valid_set, metric=Misclassification()) * 100)
)
print("Misclassification error = %.1f%%" % (mlp.eval(valid_set, metric=Misclassification()) * 100))
开发者ID:ferenckulcsar,项目名称:neon,代码行数:32,代码来源:mnist_branch.py

示例12: GradientDescentMomentum

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fit [as 别名]
if args.datatype in [np.float32, np.float64]:
    opt_gdm = GradientDescentMomentum(learning_rate=0.01,
                                      momentum_coef=0.9,
                                      stochastic_round=args.rounding)
elif args.datatype in [np.float16]:
    opt_gdm = GradientDescentMomentum(learning_rate=0.01/cost_scale,
                                      momentum_coef=0.9,
                                      stochastic_round=args.rounding)

layers = [Conv((5, 5, 16), init=init_uni, activation=Rectlin(), batch_norm=True),
          Pooling((2, 2)),
          Conv((5, 5, 32), init=init_uni, activation=Rectlin(), batch_norm=True),
          Pooling((2, 2)),
          Affine(nout=500, init=init_uni, activation=Rectlin(), batch_norm=True),
          Affine(nout=10, init=init_uni, activation=Softmax())]

if args.datatype in [np.float32, np.float64]:
    cost = GeneralizedCost(costfunc=CrossEntropyMulti())
elif args.datatype in [np.float16]:
    cost = GeneralizedCost(costfunc=CrossEntropyMulti(scale=cost_scale))

model = Model(layers=layers)

# configure callbacks
callbacks = Callbacks(model,  eval_set=test, **args.callback_args)

# callbacks = Callbacks.load_callbacks(callbacks.get_description(), model, data=[train, test])
model.fit(train, optimizer=opt_gdm, num_epochs=num_epochs, cost=cost, callbacks=callbacks)

print 'Misclassification error = %.1f%%' % (model.eval(test, metric=Misclassification())*100)
开发者ID:AdityoSanjaya,项目名称:neon,代码行数:32,代码来源:cifar10_conv.py

示例13: Conv

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fit [as 别名]
layers = [Conv((3, 3, 32), init=init_uni, activation=Rectlin(), batch_norm=False),
          Conv((3,3,32),init=init_uni,activation=Rectlin(), batch_norm=False),
          Pooling((2, 2)),
          Dropout(keep=0.75),
          Conv((3, 3, 64), init=init_uni, activation=Rectlin(), batch_norm=False),
          Conv((3, 3, 64), init=init_uni, activation=Rectlin(), batch_norm=False),
          Pooling((2, 2)),
          Dropout(keep=0.75),
          Affine(nout=512, init=init_uni, activation=Rectlin(), batch_norm=False),
          Dropout(keep=0.5),
          Affine(nout=10, init=init_uni, activation=Softmax())]

cost = GeneralizedCost(costfunc=CrossEntropyMulti())


mlp = Model(layers=layers)

# configure callbacks
callbacks = Callbacks(mlp, eval_set=test, **args.callback_args)
pretrainedModel = DeepCascadeLearning(layers,X_train,y_train,callbacks)
mlp.fit(train, optimizer=opt_gdm, num_epochs=5, cost=cost, callbacks=callbacks)
newLayers = list()
for i in mlp.layers.layers:
  newLayers.append(i)
newLayers = Model(newLayers)
callbacks = Callbacks(newLayers, eval_set=test, **args.callback_args)
newLayers.fit(train, optimizer=opt_gdm, num_epochs=5, cost=cost, callbacks=callbacks)

print 'Misclassification error = %.1f%%' % (mlp.eval(test, metric=Misclassification())*100)
开发者ID:EnriqueSMarquez,项目名称:CNNs_RelatedProjects,代码行数:31,代码来源:testingVsKerasCIFAR.py

示例14: SequenceChunker

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fit [as 别名]
class SequenceChunker(object):
    """
    Sequence chunker model (Neon based)

    Args:
        sentence_length (str): max sentence length
        token_vocab_size (int): word vocabulary size
        pos_vocab_size (int, optional): POS vocabulary size
        char_vocab_size (int, optional): characters vocabulary size
        max_char_word_length (int, optional): max word length in characters
        token_embedding_size (int, optional): word embedding dims
        pos_embedding_size (int, optional): POS embedding dims
        char_embedding_size (int, optional): character embedding dims
        num_labels (int, optional): number of output labels possible per token
        lstm_hidden_size (int, optional): LSTM hidden size
        num_lstm_layers (int, optional): number of LSTM layers
        use_external_embedding (bool, optional): input is provided as external word embedding
        dropout (float, optional): dropout rate
    """

    def __init__(self, sentence_length,
                 token_vocab_size,
                 pos_vocab_size=None,
                 char_vocab_size=None,
                 max_char_word_length=20,
                 token_embedding_size=None,
                 pos_embedding_size=None,
                 char_embedding_size=None,
                 num_labels=None,
                 lstm_hidden_size=100,
                 num_lstm_layers=1,
                 use_external_embedding=None,
                 dropout=0.5
                 ):

        init = GlorotUniform()
        tokens = []
        if use_external_embedding is None:
            tokens.append(LookupTable(vocab_size=token_vocab_size,
                                      embedding_dim=token_embedding_size,
                                      init=init,
                                      pad_idx=0))
        else:
            tokens.append(DataInput())
        tokens.append(Reshape((-1, sentence_length)))
        f_layers = [tokens]

        # add POS tag input
        if pos_vocab_size is not None and pos_embedding_size is not None:
            f_layers.append([
                LookupTable(vocab_size=pos_vocab_size,
                            embedding_dim=pos_embedding_size,
                            init=init,
                            pad_idx=0),
                Reshape((-1, sentence_length))
            ])

        # add Character RNN input
        if char_vocab_size is not None and char_embedding_size is not None:
            char_lut_layer = LookupTable(vocab_size=char_vocab_size,
                                         embedding_dim=char_embedding_size,
                                         init=init,
                                         pad_idx=0)
            char_nn = [char_lut_layer,
                       TimeDistBiLSTM(char_embedding_size, init, activation=Logistic(),
                                      gate_activation=Tanh(),
                                      reset_cells=True, reset_freq=max_char_word_length),
                       TimeDistributedRecurrentLast(timesteps=max_char_word_length),
                       Reshape((-1, sentence_length))]

            f_layers.append(char_nn)

        layers = []
        if len(f_layers) == 1:
            layers.append(f_layers[0][0])
        else:
            layers.append(MergeMultistream(layers=f_layers, merge="stack"))
            layers.append(Reshape((-1, sentence_length)))
        layers += [DeepBiLSTM(lstm_hidden_size, init, activation=Logistic(),
                              gate_activation=Tanh(),
                              reset_cells=True,
                              depth=num_lstm_layers),
                   Dropout(keep=dropout),
                   Affine(num_labels, init, bias=init, activation=Softmax())]
        self._model = Model(layers=layers)

    def fit(self, dataset, optimizer, cost, callbacks, epochs=10):
        """
        fit a model

        Args:
            dataset: train/test set of CONLL2000 dataset
            optimizer: optimizer (Neon based)
            cost: cost function (Neon based)
            callbacks: callbacks (Neon based)
            epochs (int, optional): number of epochs to train
        """
        self._model.fit(dataset,
                        optimizer=optimizer,
                        num_epochs=epochs,
#.........这里部分代码省略.........
开发者ID:cdj0311,项目名称:nlp-architect,代码行数:103,代码来源:chunker.py

示例15: Pooling

# 需要导入模块: from neon.models import Model [as 别名]
# 或者: from neon.models.Model import fit [as 别名]
# layers = [Conv(fshape=(5,5,16), init=init_uni, activation=Rectlin()),
#           Pooling(fshape=2, strides=2),
#           Conv(fshape=(5,5,32), init=init_uni, activation=Rectlin()),
#           Pooling(fshape=2, strides=2),
#           Affine(nout=500, init=init_uni, activation=Rectlin()),
#           Affine(nout=10, init=init_uni, activation=Softmax())]
# learning_rate = 0.005
# momentum = 0.9

cnn = Model(layers=layers)

# - cost function
cost = GeneralizedCost(costfunc=CrossEntropyMulti())

# - learning rule
optimizer = GradientDescentMomentum(learning_rate, momentum_coef=momentum)

# Progress bar for each epoch - what's an epoch again? by default 10 Crazy magic - don't even go here!
callbacks = Callbacks(cnn, eval_set=test_set, **args.callback_args)

# put everything together!
cnn.fit(train_set, optimizer=optimizer, num_epochs=epochs, cost=cost, callbacks=callbacks)

# # Calculate test set results
# results = cnn.get_outputs(test_set)

# dump(cnn, "cnn_0_005.jbl")

# # work out the performance!
# error = cnn.eval(test_set, metric=Misclassification())
开发者ID:oew1v07,项目名称:Neon-practice,代码行数:32,代码来源:cifar.py


注:本文中的neon.models.Model.fit方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。