当前位置: 首页>>代码示例>>Python>>正文


Python core.input_data函数代码示例

本文整理汇总了Python中tflearn.layers.core.input_data函数的典型用法代码示例。如果您正苦于以下问题:Python input_data函数的具体用法?Python input_data怎么用?Python input_data使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了input_data函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _model1

def _model1():
    global yTest, img_aug
    tf.reset_default_graph()
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    network = input_data(shape=[None, inputSize, inputSize, dim],
                 name='input',
                 data_preprocessing=img_prep,
                 data_augmentation=img_aug)

    network = conv_2d(network, 32, 3, strides = 4, activation='relu')
    network = max_pool_2d(network, 2, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 64, 3, strides = 2, activation='relu')
    network = max_pool_2d(network, 2, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 128, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, len(Y[0]), activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.001,
                 loss='categorical_crossentropy', name='target')

    model = tflearn.DNN(network, tensorboard_verbose=3)
    model.fit(X, Y, n_epoch=epochNum, validation_set=(xTest, yTest),
       snapshot_step=500, show_metric=True, batch_size=batchNum, shuffle=True, run_id=_id + 'artClassification')
    if modelStore: model.save(_id + '-model.tflearn')
开发者ID:richardbored,项目名称:customData,代码行数:29,代码来源:train.py

示例2: cnn

def cnn():
    X, Y, testX, testY = mnist.load_data(one_hot=True)
    X = X.reshape([-1, 28, 28, 1])
    testX = testX.reshape([-1, 28, 28, 1])

    # Building convolutional network
    network = input_data(shape=[None, 28, 28, 1], name='input')
    network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = fully_connected(network, 128, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 10, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.01,
                         loss='categorical_crossentropy', name='target')

    # Training
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit({'input': X}, {'target': Y}, n_epoch=20,
               validation_set=({'input': testX}, {'target': testY}),
               snapshot_step=100, show_metric=True, run_id='cnn_demo')
开发者ID:Emersonxuelinux,项目名称:2book,代码行数:26,代码来源:cnn.py

示例3: neural_network_model

def neural_network_model(input_size):

    network = input_data(shape=[None, input_size, 1], name='input')

    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 256, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 512, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 256, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=LR,
                         loss='categorical_crossentropy', name='targets')
    model = tflearn.DNN(network, tensorboard_dir='log')

    return model
开发者ID:davidgjordan,项目名称:gym_tensorflow,代码行数:25,代码来源:tensor2.py

示例4: createModel

def createModel(nbClasses,imageSize):
	print("[+] Creating model...")
	convnet = input_data(shape=[None, imageSize, imageSize, 1], name='input')

	convnet = conv_2d(convnet, 64, 2, activation='elu', weights_init="Xavier")
	convnet = max_pool_2d(convnet, 2)

	convnet = conv_2d(convnet, 128, 2, activation='elu', weights_init="Xavier")
	convnet = max_pool_2d(convnet, 2)

	convnet = conv_2d(convnet, 256, 2, activation='elu', weights_init="Xavier")
	convnet = max_pool_2d(convnet, 2)

	convnet = conv_2d(convnet, 512, 2, activation='elu', weights_init="Xavier")
	convnet = max_pool_2d(convnet, 2)

	convnet = fully_connected(convnet, 1024, activation='elu')
	convnet = dropout(convnet, 0.5)

	convnet = fully_connected(convnet, nbClasses, activation='softmax')
	convnet = regression(convnet, optimizer='rmsprop', loss='categorical_crossentropy')

	model = tflearn.DNN(convnet)
	print("    Model created! ✅")
	return model
开发者ID:withoutend,项目名称:DeepAudioClassification,代码行数:25,代码来源:model.py

示例5: alexnet

def alexnet(width, height, lr, output=3):
    network = input_data(shape=[None, width, height, 1], name='input')
    network = conv_2d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, output, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr, name='targets')

    model = tflearn.DNN(network, checkpoint_path='model_alexnet',
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model
开发者ID:gcm0621,项目名称:pygta5,代码行数:26,代码来源:models.py

示例6: do_cnn

def  do_cnn(trainX, trainY,testX, testY):
    global n_words
    # Data preprocessing
    # Sequence padding
    trainX = pad_sequences(trainX, maxlen=MAX_DOCUMENT_LENGTH, value=0.)
    testX = pad_sequences(testX, maxlen=MAX_DOCUMENT_LENGTH, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Building convolutional network
    network = input_data(shape=[None, MAX_DOCUMENT_LENGTH], name='input')
    network = tflearn.embedding(network, input_dim=n_words+1, output_dim=128)
    branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
    branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
    branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
    network = merge([branch1, branch2, branch3], mode='concat', axis=1)
    network = tf.expand_dims(network, 2)
    network = global_max_pool(network)
    network = dropout(network, 0.5)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.001,
                         loss='categorical_crossentropy', name='target')
    # Training
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit(trainX, trainY, n_epoch = 20, shuffle=True, validation_set=(testX, testY), show_metric=True, batch_size=32)
开发者ID:DemonZeros,项目名称:1book,代码行数:26,代码来源:17-2.py

示例7: _model2

def _model2():
    global yTest, img_aug
    tf.reset_default_graph()
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    net = input_data(shape=[None, inputSize, inputSize, dim],
                 name='input',
                 data_preprocessing=img_prep,
                 data_augmentation=img_aug)
    n = 2
    j = 64
    '''
    net = tflearn.conv_2d(net, j, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.residual_block(net, n, j)
    net = tflearn.residual_block(net, 1, j*2, downsample=True)
    net = tflearn.residual_block(net, n-1, j*2)
    net = tflearn.residual_block(net, 1, j*4, downsample=True)
    net = tflearn.residual_block(net, n-1, j*4)
    net = tflearn.residual_block(net, 1, j*8, downsample=True)
    net = tflearn.residual_block(net, n-1, j*8)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    '''
    net = tflearn.conv_2d(net, j, 7, strides = 2, regularizer='L2', weight_decay=0.0001)
    net = max_pool_2d(net, 2, strides=2)
    net = tflearn.residual_block(net, n, j)
    net = tflearn.residual_block(net, 1, j*2, downsample=True)
    net = tflearn.residual_block(net, n-1, j*2)
    net = tflearn.residual_block(net, 1, j*4, downsample=True)
    net = tflearn.residual_block(net, n-1, j*4)
    net = tflearn.residual_block(net, 1, j*8, downsample=True)
    net = tflearn.residual_block(net, n-1, j*8)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    net = tflearn.fully_connected(net, len(yTest[0]), activation='softmax')
    mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=mom,
                     loss='categorical_crossentropy')
    model = tflearn.DNN(net, checkpoint_path='model2_resnet',
                max_checkpoints=10, tensorboard_verbose=3, clip_gradients=0.)
    model.load(_path)
    pred = model.predict(xTest)

    df = pd.DataFrame(pred)
    df.to_csv(_path + ".csv")

    newList = pred.copy()
    newList = convert2(newList)
    if _CSV: makeCSV(newList)
    pred = convert2(pred)
    pred = convert3(pred)
    yTest = convert3(yTest)
    print(metrics.confusion_matrix(yTest, pred))
    print(metrics.classification_report(yTest, pred))
    print('Accuracy', accuracy_score(yTest, pred))
    print()
    if _wrFile: writeTest(pred)
开发者ID:richardbored,项目名称:customData,代码行数:60,代码来源:evaluate.py

示例8: train_nmf_network

def train_nmf_network(mfcc_array, sdr_array, n_epochs, take):
    """

    :param mfcc_array:
    :param sdr_array:
    :param n_epochs:
    :param take:
    :return:
    """
    with tf.Graph().as_default():
        network = input_data(shape=[None, 13, 100, 1])
        network = conv_2d(network, 32, [5, 5], activation="relu", regularizer="L2")
        network = max_pool_2d(network, 2)
        network = conv_2d(network, 64, [5, 5], activation="relu", regularizer="L2")
        network = max_pool_2d(network, 2)
        network = fully_connected(network, 128, activation="relu")
        network = dropout(network, 0.8)
        network = fully_connected(network, 256, activation="relu")
        network = dropout(network, 0.8)
        network = fully_connected(network, 1, activation="linear")
        regress = tflearn.regression(network, optimizer="rmsprop", loss="mean_square", learning_rate=0.001)

        # Training
        model = tflearn.DNN(regress)  # , session=sess)
        model.fit(
            mfcc_array,
            sdr_array,
            n_epoch=n_epochs,
            snapshot_step=1000,
            show_metric=True,
            run_id="repet_choice_{0}_epochs_take_{1}".format(n_epochs, take),
        )

        return model
开发者ID:ethman,项目名称:prediction,代码行数:34,代码来源:repet_nmf_choice.py

示例9: train_repet_network

def train_repet_network(beat_spectrum_array, sdr_array, n_epochs, take):
    """

    :param beat_spectrum_array:
    :param sdr_array:
    :param n_epochs:
    :param take:
    :return:
    """
    beat_spec_len = 432
    with tf.Graph().as_default():
        input_layer = input_data(shape=[None, beat_spec_len, 1])
        conv1 = conv_1d(input_layer, 32, 4, activation="relu", regularizer="L2")
        max_pool1 = max_pool_1d(conv1, 2)
        conv2 = conv_1d(max_pool1, 64, 80, activation="relu", regularizer="L2")
        max_pool2 = max_pool_1d(conv2, 2)
        fully1 = fully_connected(max_pool2, 128, activation="relu")
        dropout1 = dropout(fully1, 0.8)
        fully2 = fully_connected(dropout1, 256, activation="relu")
        dropout2 = dropout(fully2, 0.8)
        linear = fully_connected(dropout2, 1, activation="linear")
        regress = tflearn.regression(linear, optimizer="rmsprop", loss="mean_square", learning_rate=0.001)

        # Training
        model = tflearn.DNN(regress)  # , session=sess)
        model.fit(
            beat_spectrum_array,
            sdr_array,
            n_epoch=n_epochs,
            snapshot_step=1000,
            show_metric=True,
            run_id="repet_choice_{0}_epochs_take_{1}".format(n_epochs, take),
        )

        return model
开发者ID:ethman,项目名称:prediction,代码行数:35,代码来源:repet_nmf_choice.py

示例10: neural_network_model

def neural_network_model(input_size):
    """
    Function is to build NN based on the input size
    :param input_size: feature size of each observation
    :return: tensorflow model
    """
    network = input_data(shape=[None, input_size], name='input')

    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 256, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 512, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 256, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, learning_rate=LR,  name='targets')
    model = tflearn.DNN(network, tensorboard_dir='logs/ann/ann_0')

    return model
开发者ID:Eudie,项目名称:Online-Practice,代码行数:28,代码来源:ann_playing.py

示例11: build_model_anything_happening

def build_model_anything_happening():
    ### IS ANY OF THIS NECESSARY FOR LIGHT/DARK? IN GENERAL W/ STAIONARY CAMERA?
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()

    # Specify shape of the data, image prep
    network = input_data(shape=[None, 52, 64],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)

    # Since the image position remains consistent and are fairly similar, this can be spatially aware.
    # Using a fully connected network directly, no need for convolution.
    network = fully_connected(network, 2048, activation='relu')
    network = fully_connected(network, 2, activation='softmax')

    network = regression(network, optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.00003)

    model = tflearn.DNN(network, tensorboard_verbose=0)
    return model
开发者ID:LastZactionHero,项目名称:zookeeper_prime,代码行数:25,代码来源:network_anything_happening.py

示例12: alexnet

def alexnet():
    X, Y = oxflower17.load_data(one_hot=True, resize_pics=(227, 227))

    # Building 'AlexNet'
    network = input_data(shape=[None, 227, 227, 3])
    network = conv_2d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 17, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=0.001)

    # Training
    model = tflearn.DNN(network, checkpoint_path='model_alexnet',
                        max_checkpoints=1, tensorboard_verbose=2)
    model.fit(X, Y, n_epoch=1000, validation_set=0.1, shuffle=True,
              show_metric=True, batch_size=64, snapshot_step=200,
              snapshot_epoch=False, run_id='alexnet')
开发者ID:Emersonxuelinux,项目名称:2book,代码行数:31,代码来源:cnn.py

示例13: build_network

 def build_network(self):
   # Building 'AlexNet'
   # https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
   # https://github.com/DT42/squeezenet_demo
   # https://github.com/yhenon/pysqueezenet/blob/master/squeezenet.py
   print('[+] Building CNN')
   self.network = input_data(shape = [None, SIZE_FACE, SIZE_FACE, 1])
   self.network = conv_2d(self.network, 96, 11, strides = 4, activation = 'relu')
   self.network = max_pool_2d(self.network, 3, strides = 2)
   self.network = local_response_normalization(self.network)
   self.network = conv_2d(self.network, 256, 5, activation = 'relu')
   self.network = max_pool_2d(self.network, 3, strides = 2)
   self.network = local_response_normalization(self.network)
   self.network = conv_2d(self.network, 256, 3, activation = 'relu')
   self.network = max_pool_2d(self.network, 3, strides = 2)
   self.network = local_response_normalization(self.network)
   self.network = fully_connected(self.network, 1024, activation = 'tanh')
   self.network = dropout(self.network, 0.5)
   self.network = fully_connected(self.network, 1024, activation = 'tanh')
   self.network = dropout(self.network, 0.5)
   self.network = fully_connected(self.network, len(EMOTIONS), activation = 'softmax')
   self.network = regression(self.network,
     optimizer = 'momentum',
     loss = 'categorical_crossentropy')
   self.model = tflearn.DNN(
     self.network,
     checkpoint_path = SAVE_DIRECTORY + '/alexnet_mood_recognition',
     max_checkpoints = 1,
     tensorboard_verbose = 2
   )
   self.load_model()
开发者ID:somaticapi,项目名称:mood-recognition-neural-networks,代码行数:31,代码来源:mood_recognition.py

示例14: do_cnn_doc2vec

def do_cnn_doc2vec(trainX, testX, trainY, testY):
    global max_features
    print "CNN and doc2vec"

    #trainX = pad_sequences(trainX, maxlen=max_features, value=0.)
    #testX = pad_sequences(testX, maxlen=max_features, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Building convolutional network
    network = input_data(shape=[None,max_features], name='input')
    network = tflearn.embedding(network, input_dim=1000000, output_dim=128,validate_indices=False)
    branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
    branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
    branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
    network = merge([branch1, branch2, branch3], mode='concat', axis=1)
    network = tf.expand_dims(network, 2)
    network = global_max_pool(network)
    network = dropout(network, 0.8)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.001,
                         loss='categorical_crossentropy', name='target')
    # Training
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit(trainX, trainY,
              n_epoch=5, shuffle=True, validation_set=(testX, testY),
              show_metric=True, batch_size=100,run_id="review")
开发者ID:Emersonxuelinux,项目名称:2book,代码行数:28,代码来源:review.py

示例15: do_cnn_doc2vec_2d

def do_cnn_doc2vec_2d(trainX, testX, trainY, testY):
    print "CNN and doc2vec 2d"

    trainX = trainX.reshape([-1, max_features, max_document_length, 1])
    testX = testX.reshape([-1, max_features, max_document_length, 1])


    # Building convolutional network
    network = input_data(shape=[None, max_features, max_document_length, 1], name='input')
    network = conv_2d(network, 16, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = fully_connected(network, 128, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 10, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.01,
                         loss='categorical_crossentropy', name='target')

    # Training
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit({'input': trainX}, {'target': trainY}, n_epoch=20,
               validation_set=({'input': testX}, {'target': testY}),
               snapshot_step=100, show_metric=True, run_id='review')
开发者ID:Emersonxuelinux,项目名称:2book,代码行数:28,代码来源:review.py


注:本文中的tflearn.layers.core.input_data函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。