当前位置: 首页>>代码示例>>Python>>正文


Python Sequential.count_params方法代码示例

本文整理汇总了Python中keras.models.Sequential.count_params方法的典型用法代码示例。如果您正苦于以下问题:Python Sequential.count_params方法的具体用法?Python Sequential.count_params怎么用?Python Sequential.count_params使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.models.Sequential的用法示例。


在下文中一共展示了Sequential.count_params方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: VGG_like_convnet

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import count_params [as 别名]
def VGG_like_convnet(data_shape, opt):
    print('Training VGG net.')
    model = Sequential()
    # input: 100x100 images with 3 channels -> (3, 100, 100) tensors.
    # this applies 32 convolution filters of size 3x3 each.
    model.add(Convolution2D(32, 3, 3, border_mode='valid', input_shape=(data_shape[0], data_shape[1], data_shape[2])))
    model.add(Activation('relu'))
    model.add(Convolution2D(32, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #model.add(Dropout(0.25))

    model.add(Convolution2D(64, 3, 3, border_mode='valid'))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #model.add(Dropout(0.25))

    model.add(Flatten())
    # Note: Keras does automatic shape inference.
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(2))
    model.add(Activation('softmax'))

    print ('VGG_like_convnet... nb params: {}'.format(model.count_params()))
    model.compile(loss='categorical_crossentropy', optimizer=opt)
    return model
开发者ID:paolorota,项目名称:deep-splicing,代码行数:33,代码来源:method_cnn.py

示例2: test_sequential_count_params

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import count_params [as 别名]
def test_sequential_count_params():
    input_dim = 20
    nb_units = 10
    nb_classes = 2

    n = input_dim * nb_units + nb_units
    n += nb_units * nb_units + nb_units
    n += nb_units * nb_classes + nb_classes

    model = Sequential()
    model.add(Dense(nb_units, input_shape=(input_dim,)))
    model.add(Dense(nb_units))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))
    assert(n == model.count_params())

    model.compile('sgd', 'binary_crossentropy')
    assert(n == model.count_params())
开发者ID:AI42,项目名称:keras,代码行数:20,代码来源:test_models.py

示例3: test_count_params

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import count_params [as 别名]
    def test_count_params(self):
        print('test count params')
        nb_units = 100
        nb_classes = 2

        n = nb_units * nb_units + nb_units
        n += nb_units * nb_units + nb_units
        n += nb_units * nb_classes + nb_classes

        model = Sequential()
        model.add(Dense(nb_units, nb_units))
        model.add(Dense(nb_units, nb_units))
        model.add(Dense(nb_units, nb_classes))
        model.add(Activation('softmax'))

        self.assertEqual(n, model.count_params())

        model.compile('sgd', 'binary_crossentropy')

        self.assertEqual(n, model.count_params())
开发者ID:3dconv,项目名称:keras,代码行数:22,代码来源:test_sequential_model.py

示例4: test_sequential_count_params

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import count_params [as 别名]
def test_sequential_count_params():
    input_dim = 20
    nb_units = 10
    nb_classes = 2

    n = input_dim * nb_units + nb_units
    n += nb_units * nb_units + nb_units
    n += nb_units * nb_classes + nb_classes

    model = Sequential()
    model.add(Dense(nb_units, input_shape=(input_dim,)))
    model.add(Dense(nb_units))
    model.add(Dense(nb_classes))
    model.add(Activation("softmax"))
    model.build()

    assert n == model.count_params()

    model.compile("sgd", "binary_crossentropy")
    assert n == model.count_params()
开发者ID:CheRaissi,项目名称:keras,代码行数:22,代码来源:test_sequential_model.py

示例5: make_model

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import count_params [as 别名]
def make_model():
	''' define the model'''
	model = Sequential()
	# input: 32x32 images with 3 channels -> (3, 32, 32) tensors.
	# this applies 32 convolution filters of size 3x3 each.
	model.add(Convolution2D(maps_count_param, 3, 3, border_mode='same', input_shape=(3, input_size, input_size),init='he_normal',W_regularizer=l2(lambda_reg)))
	model.add(Activation('relu'))
	model.add(Convolution2D(maps_count_param, 3, 3, border_mode='same', init='he_normal',W_regularizer=l2(lambda_reg)))
	model.add(Activation('relu'))
#	model.add(Dropout(0.3))
	model.add(MaxPooling2D(pool_size=(2, 2)))

	model.add(Convolution2D(maps_count_param*2, 3, 3, border_mode='same', init='he_normal',W_regularizer=l2(lambda_reg)))
	model.add(Activation('relu'))
#	model.add(Dropout(0.3))
	model.add(Convolution2D(maps_count_param*2, 3, 3, border_mode='same', init='he_normal',W_regularizer=l2(lambda_reg)))
	model.add(Activation('relu'))
#	model.add(Dropout(0.3))
	model.add(MaxPooling2D(pool_size=(2, 2)))

	model.add(Convolution2D(maps_count_param*4, 3, 3, border_mode='same', init='he_normal',W_regularizer=l2(lambda_reg)))
	model.add(Activation('relu'))
#	model.add(Dropout(0.3))
	model.add(MaxPooling2D(pool_size=(2, 2)))

	model.add(Flatten())

	model.add(Dense(2048,W_regularizer=l2(lambda_reg)))
	model.add(Activation('relu'))
	model.add(Dropout(0.5))

	model.add(Dense(1024,W_regularizer=l2(lambda_reg)))
	model.add(Activation('relu'))
	model.add(Dropout(0.5))

	model.add(Dense(10,W_regularizer=l2(lambda_reg)))
	model.add(Activation('softmax'))
#	model.add(Dropout(0.5))

	sgd = SGD(lr=learn_rate, decay=decay_param, momentum=0.9, nesterov=True)
	model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=["accuracy"])

	print('model parameters:',model.count_params())
	print('model characteristics:',model.summary())
	print('----------------------------------------------------------------------------------------')

	return model
开发者ID:ybenigot,项目名称:keras,代码行数:49,代码来源:convnet.py

示例6: AlexNet_like_convnet

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import count_params [as 别名]
def AlexNet_like_convnet(data_shape, opt):
    print('Training AlexNet net.')
    model = Sequential()
    model.add(Convolution2D(96, 10, 10, border_mode='valid', input_shape=(data_shape[0], data_shape[1], data_shape[2])))
    model.add(Activation('relu'))
    #model.add(BatchNormalization(epsilon=1e-06, mode=0))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(128, 5, 5, border_mode='valid'))
    model.add(Activation('relu'))
    #model.add(BatchNormalization(epsilon=1e-06, mode=0))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(256, 3, 3, border_mode='valid'))
    model.add(Activation('relu'))
    #model.add(BatchNormalization(epsilon=1e-06, mode=0))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(768, init='normal'))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    #model.add(BatchNormalization(epsilon=1e-06, mode=0))
    model.add(Dense(256, init='normal'))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    #model.add(BatchNormalization(epsilon=1e-06, mode=0))
    model.add(Dense(2))
    model.add(Activation('softmax'))

    print ('AlexNet_like_convnet... nb params: {}'.format(model.count_params()))
    model.compile(loss='categorical_crossentropy', optimizer=opt)
    return model
开发者ID:paolorota,项目名称:deep-splicing,代码行数:38,代码来源:method_cnn.py

示例7: print

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import count_params [as 别名]
model.add(TimeDistributed(MaxPooling1D(2, 2)))
model.add(Dropout(0.25))
model.add(TimeDistributed(Convolution1D(64, 3, activation='relu')))
model.add(TimeDistributed(Convolution1D(64, 3, activation='relu')))
model.add(TimeDistributed(MaxPooling1D(2, 2)))
model.add(Dropout(0.25))
model.add(TimeDistributed(Flatten()))
model.add(BatchNormalization())
model.add(Bidirectional(LSTM(256, return_sequences=True)))
model.add(Bidirectional(LSTM(256, return_sequences=True)))
model.add(Dropout(0.25))
model.add(TimeDistributed(Dense(12, activation='sigmoid')))

model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])

print('param count:', model.count_params())
print('input shape:', model.input_shape)
print('output shape:', model.output_shape)

def new_model_id():
    return 'model_%s' % arrow.get().format('YYYY-MM-DD-HH-mm-ss')

def save_model_arch(model_id, model):
    arch_file = '%s/%s_arch.json' % (model_dir, model_id)
    print('Saving model architecture:', arch_file)
    open(arch_file, 'w').write(model.to_json())

def weights_file(model_id, suffix=''):
    return '%s/%s_weights%s.h5' % (model_dir, model_id, suffix)

model_id = new_model_id()
开发者ID:bzamecnik,项目名称:ml-playground,代码行数:33,代码来源:lstm_chord_classification_training.py

示例8: run

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import count_params [as 别名]

#.........这里部分代码省略.........
    print(trainX.shape, testX.shape, trainy.shape, testy.shape)

    early = keras.callbacks.EarlyStopping(monitor = 'val_loss',
            patience = 20)

    history = model.fit(trainX.values, trainy.values, nb_epoch = RP['epochs'],
            batch_size = RP['batch'], callbacks = [early],
            validation_data = (testX.values, testy.values))


    preprocessMeta = {
        'scaler': scalery
    }

    # compute metrics for the model based on the task for both testing and training data
    print('\nGetting metrics for training data:')
    if RP['classify']:
        trainMetrics = metrics.classify(model, trainX.values, trainy.values, preprocessMeta)
    else:
        trainMetrics = metrics.predict(model, trainX.values, trainy.values, preprocessMeta)

    print('\nGetting metrics for test data:')
    if RP['classify']:
        testMetrics = metrics.classify(model, testX.values, testy.values, preprocessMeta)
    else:
        testMetrics = metrics.predict(model, testX.values, testy.values, preprocessMeta)


    print('Plot:')
    values = np.zeros((len(history.history['loss']), 2))
    for i in range(len(history.history['loss'])):
        values[i][0] = history.history['loss'][i]
        values[i][1] = history.history['val_loss'][i]
    utility.plotLoss(values)

    print('Dump csv pred')
    pred = model.predict(testX.values, batch_size = RP['batch'])


    if RP['zscore_norm']:
        predScaled = pd.DataFrame(scalery.inverse_transform(pred), columns=['pred'])
        testScaled = pd.DataFrame(scalery.inverse_transform(testy), columns=['true'])
    else:
        predScaled = pd.DataFrame(pred,columns=['pred'])
        testScaled = pd.DataFrame(testy,columns=['true'])

    predByTruth = pd.concat([predScaled, testScaled],axis=1)

    # predByTruth.plot(x='pred',y='true', kind='scatter')
    # plt.show()
    # predByTruth.to_csv('local/pred.csv')


    # statistics to send to journal
    stats['runtime_second'] = time.time() - stats['runtime_second']
    stats['memory_pm_mb'], stats['memory_vm_mb'] = utility.getMemoryUsage()
    stats['git_commit'] = utility.getGitCommitHash()
    stats['comment'] = RP['comment']
    stats['hostname'] = socket.gethostname()
    stats['experiment_config'] = yaml.dump(cc.exp,default_flow_style=False)

    stats['model'] = utility.modelToString(model)
    stats['loaded_model'] = RP['load_model']
    stats['parameter_count'] = model.count_params()
    stats['task'] = 'classification' if RP['classify'] else 'regression'

    stats['dataset_name'] = cc.exp['fetch']['table']
    stats['split_name'] = RD['testing']
    stats['label_name'] = ','.join(RD['labels'])

    stats['epoch_max'] = RP['epochs']
    stats['learning_rate'] = RP['learning_rate']
    stats['optimization_method'] = OPTIMIZER.__class__.__name__
    stats['batch_size'] = RP['batch']
    stats['seed'] = RP['seed']
    stats['objective'] = RP['objective']
    stats['learning_curve'] = {'val':open('{}/{}'.format(cc.cfg['plots']['dir'], utility.PLOT_NAME),'rb').read(),'type':'bin'}

    # metric statistics to send
    metricStats = {}

    if RP['classify']:
        metricStats['relevance_training'] = trainMetrics['acc_avg']
        metricStats['relevance_training_std'] = trainMetrics['acc_std']
        metricStats['relevance_testing'] = testMetrics['acc_avg']
        metricStats['relevance_testing_std'] = testMetrics['acc_std']
        metricStats['log_loss'] = testMetrics['log_loss_avg']
        metricStats['log_loss_std'] = testMetrics['log_loss_std']
        metricStats['auc'] = testMetrics['auc_avg']
        metricStats['auc_std'] = testMetrics['auc_std']
    else:
        metricStats['relevance_training'] = trainMetrics['r2_avg']
        metricStats['relevance_training_std'] = trainMetrics['r2_std']
        metricStats['relevance_testing'] = testMetrics['r2_avg']
        metricStats['relevance_testing_std'] = testMetrics['r2_std']
        metricStats['mse'] = testMetrics['mse_avg']
        metricStats['mse_std'] = testMetrics['mse_std']

    stats.update(metricStats)
    db.sendStatistics(**stats)
开发者ID:PMitura,项目名称:smiles-neural-network,代码行数:104,代码来源:dnn.py

示例9: constructDNNModel

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import count_params [as 别名]
def constructDNNModel(modelIndex):
    model = []
    if modelIndex == 1:
        model = Sequential()
    #
        model.add(Activation('linear',input_shape=(channels,patchHeight,patchWidth)))  # 23 x 31
        model.add(Convolution2D(64, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))  # 21 x 29
        model.add(Convolution2D(64, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))  # 19 x 27
        model.add(MaxPooling2D(pool_size=(2,2),strides=(1,1)))  # 18 x 26
        #
        # # ------------------------------------------------------------------------------------------------------------------------------------------------ #
        #
        model.add(Convolution2D(128, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(Convolution2D(128, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(Convolution2D(128, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(MaxPooling2D(pool_size=(2,2),strides=(1,1)))  # 11 x 19

        # ------------------------------------------------------------------------------------------------------------------------------------------------ #

        model.add(Convolution2D(128, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(Convolution2D(128, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(Convolution2D(128, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(MaxPooling2D(pool_size=(2,2),strides=(1,1)))  # 2 x 6
        #
        # ------------------------------------------------------------------------------------------------------------------------------------------------ #


        model.add(Flatten())
        # model.add(Reshape(1))
        # model.add(Dropout(0.25))
        model.add(Dense(1024, trainable=True, init=initialization, W_regularizer=l2(regularizer), activation = "relu"))
        model.add(Dropout(0.5))
        model.add(Dense(1024, trainable=True, init=initialization, W_regularizer=l2(regularizer), activation = "relu"))
        model.add(Dropout(0.5))
        model.add(Dense(nb_output, trainable=True, init=initialization, W_regularizer=l2(regularizer), activation = "linear"))

        printing("Built the model")
        print("Model parameters = " + str(model.count_params()))

        # ------------------------------------------------------------------------------------------------------------------------------------------------ #

        if doWeightLoadSaveTest:
            # pdb.set_trace()
            model.save_weights(weightSavePath + 'weightsLoadSaveTest.h5', overwrite=True)
            model.load_weights(weightSavePath + 'weightsLoadSaveTest.h5')
            printing("Weight load/save test passed...")

        # ------------------------------------------------------------------------------------------------------------------------------------------------ #

        sgd = SGD(lr=learningRate, decay=1e-6, momentum=0.9, nesterov=True)
        model.compile(loss=linear_correlation_loss, optimizer=sgd)
        printing("Compilation Finished")
    elif modelIndex == 2:
        model = Sequential()

        model.add(Activation('linear',input_shape=(channels,patchHeight,patchWidth)))  # 23 x 31
        model.add(Convolution2D(32, 1, 1, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))  # 21 x 29
        model.add(Convolution2D(32, 2, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))  # 19 x 27 22, 29
        model.add(Convolution2D(32, 3, 4, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))  # 17 x 25 20, 26
        model.add(MaxPooling2D(pool_size=(2,2),strides=(1,1)))  # 16 x 24 19, 25
        #
        # # ------------------------------------------------------------------------------------------------------------------------------------------------ #
        #
        model.add(Convolution2D(48, 1, 1, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))  # 19, 25
        model.add(Convolution2D(48, 2, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))  # 18, 23
        model.add(Convolution2D(48, 3, 4, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))  # 16, 20
        model.add(MaxPooling2D(pool_size=(2,2),strides=(1,1)))  # 9 x 17 15, 19

        # ------------------------------------------------------------------------------------------------------------------------------------------------ #

        model.add(Convolution2D(48, 1, 1, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(Convolution2D(48, 2, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(Convolution2D(48, 3, 4, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(MaxPooling2D(pool_size=(2,2), strides=(1,1)))  # 1 x 5 11, 13
        #
        # # ------------------------------------------------------------------------------------------------------------------------------------------------ #

        model.add(Convolution2D(48, 1, 1, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(Convolution2D(48, 2, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(Convolution2D(48, 3, 4, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(MaxPooling2D(pool_size=(2,2), strides=(1,1)))  # 1 x 5 7, 7

        # ------------------------------------------------------------------------------------------------------------------------------------------------ #

        model.add(Convolution2D(48, 1, 1, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(Convolution2D(48, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(Convolution2D(48, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(MaxPooling2D(pool_size=(2,2), strides=(1,1)))  # 1 x 5 2, 2

        # ------------------------------------------------------------------------------------------------------------------------------------------------ #

        model.add(Reshape((2 * 2 * 48,)))
        model.add(Dense(400, trainable=True, init=initialization, W_regularizer=l2(regularizer), activation = "relu"))
        model.add(Dropout(0.5))
        model.add(Dense(400, trainable=True, init=initialization, W_regularizer=l2(regularizer), activation = "relu"))
        model.add(Dropout(0.5))
        model.add(Dense(nb_output, trainable=True, init=initialization, W_regularizer=l2(regularizer), activation = "linear"))
        printing("Built the model")

        # ------------------------------------------------------------------------------------------------------------------------------------------------ #
#.........这里部分代码省略.........
开发者ID:parag2489,项目名称:Image-Quality,代码行数:103,代码来源:train_imageQuality_Estmn_multiPatchSmallNetwork.py


注:本文中的keras.models.Sequential.count_params方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。