當前位置: 首頁>>代碼示例>>Python>>正文


Python tflearn.regression方法代碼示例

本文整理匯總了Python中tflearn.regression方法的典型用法代碼示例。如果您正苦於以下問題:Python tflearn.regression方法的具體用法?Python tflearn.regression怎麽用?Python tflearn.regression使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tflearn的用法示例。


在下文中一共展示了tflearn.regression方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: resnext

# 需要導入模塊: import tflearn [as 別名]
# 或者: from tflearn import regression [as 別名]
def resnext(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
    net = input_data(shape=[None, width, height, 3], name='input')
    net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.layers.conv.resnext_block(net, n, 16, 32)
    net = tflearn.resnext_block(net, 1, 32, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 32, 32)
    net = tflearn.resnext_block(net, 1, 64, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 64, 32)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    # Regression
    net = tflearn.fully_connected(net, output, activation='softmax')
    opt = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=opt,
                             loss='categorical_crossentropy')

    model = tflearn.DNN(net,
                        max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log')

    return model 
開發者ID:Sentdex,項目名稱:pygta5,代碼行數:23,代碼來源:models.py

示例2: test_feed_dict_no_None

# 需要導入模塊: import tflearn [as 別名]
# 或者: from tflearn import regression [as 別名]
def test_feed_dict_no_None(self):

        X = [[0., 0., 0., 0.], [1., 1., 1., 1.], [0., 0., 1., 0.], [1., 1., 1., 0.]]
        Y = [[1., 0.], [0., 1.], [1., 0.], [0., 1.]]

        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4], name="X_in")
            g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1])
            g = tflearn.conv_2d(g, 4, 2)
            g = tflearn.conv_2d(g, 4, 1)
            g = tflearn.max_pool_2d(g, 2)
            g = tflearn.fully_connected(g, 2, activation='softmax')
            g = tflearn.regression(g, optimizer='sgd', learning_rate=1.)

            m = tflearn.DNN(g)

            def do_fit():
                m.fit({"X_in": X, 'non_existent': X}, Y, n_epoch=30, snapshot_epoch=False)
            self.assertRaisesRegexp(Exception, "Feed dict asks for variable named 'non_existent' but no such variable is known to exist", do_fit) 
開發者ID:limbo018,項目名稱:FRU,代碼行數:21,代碼來源:test_layers.py

示例3: build_simple_model

# 需要導入模塊: import tflearn [as 別名]
# 或者: from tflearn import regression [as 別名]
def build_simple_model(self):
        """Build a simple model for test
        Returns:
            DNN, [ (input layer name, input placeholder, input data) ], Target data
        """
        inputPlaceholder1, inputPlaceholder2 = \
            tf.placeholder(tf.float32, (1, 1), name = "input1"), tf.placeholder(tf.float32, (1, 1), name = "input2")
        input1 = tflearn.input_data(placeholder = inputPlaceholder1)
        input2 = tflearn.input_data(placeholder = inputPlaceholder2)
        network = tflearn.merge([ input1, input2 ], "sum")
        network = tflearn.reshape(network, (1, 1))
        network = tflearn.fully_connected(network, 1)
        network = tflearn.regression(network)
        return (
            tflearn.DNN(network),
            [ ("input1:0", inputPlaceholder1, self.INPUT_DATA_1), ("input2:0", inputPlaceholder2, self.INPUT_DATA_2) ],
            self.TARGET,
        ) 
開發者ID:limbo018,項目名稱:FRU,代碼行數:20,代碼來源:test_inputs.py

示例4: createDNNLayers

# 需要導入模塊: import tflearn [as 別名]
# 或者: from tflearn import regression [as 別名]
def createDNNLayers(self, x, y):

        ###############################################################
        #
        # Sets up the DNN layers, configuration in required/confs.json
        #
        ###############################################################
        
        net = tflearn.input_data(shape=[None, len(x[0])])

        for i in range(self._confs["NLU"]['FcLayers']):
            net = tflearn.fully_connected(net, self._confs["NLU"]['FcUnits'])

        net = tflearn.fully_connected(net, len(y[0]), activation=str(self._confs["NLU"]['Activation']))

        if self._confs["NLU"]['Regression']:
            net = tflearn.regression(net)

        return net 
開發者ID:GeniSysAI,項目名稱:NLU,代碼行數:21,代碼來源:Model.py

示例5: test_case1

# 需要導入模塊: import tflearn [as 別名]
# 或者: from tflearn import regression [as 別名]
def test_case1():
    x = [1,2,3]
    y = [0.01,0.99]
    # 多組x作為輸入樣本
    X = np.array(np.repeat([x], 1, axis=0))
    # 多組y作為樣本的y值
    Y = np.array(np.repeat([y], 1, axis=0))

    #X = np.array([x1,x2], dtype=np.float32)
    #Y = np.array([y1,y2])

    # 這裏的第二個數對應了x是多少維的向量
    net = tflearn.input_data(shape=[None, 3])
    #net = tflearn.fully_connected(net, 32)
    net = tflearn.fully_connected(net, 2)
    # 這裏的第二個參數對應了輸出的y是多少維的向量
    #net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net)


    model = tflearn.DNN(net)
    model.fit(X, Y, n_epoch=1000, batch_size=1, show_metric=True, snapshot_epoch=False)
    pred = model.predict([x])
    print(pred) 
開發者ID:warmheartli,項目名稱:ChatBotCourse,代碼行數:26,代碼來源:one_lstm_sequence_generate.py

示例6: build_estimator

# 需要導入模塊: import tflearn [as 別名]
# 或者: from tflearn import regression [as 別名]
def build_estimator(model_dir, model_type, embeddings,index_map, combination_method):
  """Build an estimator."""

  # Continuous base columns.
  node1 = tf.contrib.layers.real_valued_column("node1")

  deep_columns = [node1]
  
  if model_type == "regressor":
      
      tflearn.init_graph(num_cores=8, gpu_memory_fraction=0.5)
      if combination_method == 'concatenate':
          net = tflearn.input_data(shape=[None, embeddings.shape[1]*2])
      else:
        net = tflearn.input_data(shape=[None, embeddings.shape[1]] )
      net = tflearn.fully_connected(net, 100, activation='relu')
      net = tflearn.fully_connected(net, 2, activation='softmax')
      net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy')
      m = tflearn.DNN(net)
  else:
    m = tf.contrib.learn.DNNLinearCombinedClassifier(
        model_dir=model_dir,
        linear_feature_columns=wide_columns,
        dnn_feature_columns=deep_columns,
        dnn_hidden_units=[100])
  return m 
開發者ID:cambridgeltl,項目名稱:link-prediction_with_deep-learning,代碼行數:28,代碼來源:link_prediction.py

示例7: get_network

# 需要導入模塊: import tflearn [as 別名]
# 或者: from tflearn import regression [as 別名]
def get_network(frames, input_size, num_classes):
    """Create our LSTM"""
    net = tflearn.input_data(shape=[None, frames, input_size])
    net = tflearn.lstm(net, 128, dropout=0.8, return_seq=True)
    net = tflearn.lstm(net, 128)
    net = tflearn.fully_connected(net, num_classes, activation='softmax')
    net = tflearn.regression(net, optimizer='adam',
                             loss='categorical_crossentropy', name="output1")
    return net 
開發者ID:hthuwal,項目名稱:sign-language-gesture-recognition,代碼行數:11,代碼來源:rnn_utils.py

示例8: get_network_deep

# 需要導入模塊: import tflearn [as 別名]
# 或者: from tflearn import regression [as 別名]
def get_network_deep(frames, input_size, num_classes):
    """Create a deeper LSTM"""
    net = tflearn.input_data(shape=[None, frames, input_size])
    net = tflearn.lstm(net, 64, dropout=0.2, return_seq=True)
    net = tflearn.lstm(net, 64, dropout=0.2, return_seq=True)
    net = tflearn.lstm(net, 64, dropout=0.2)
    net = tflearn.fully_connected(net, num_classes, activation='softmax')
    net = tflearn.regression(net, optimizer='adam',
                             loss='categorical_crossentropy', name="output1")
    return net 
開發者ID:hthuwal,項目名稱:sign-language-gesture-recognition,代碼行數:12,代碼來源:rnn_utils.py

示例9: get_network_wide

# 需要導入模塊: import tflearn [as 別名]
# 或者: from tflearn import regression [as 別名]
def get_network_wide(frames, input_size, num_classes):
    """Create a wider LSTM"""
    net = tflearn.input_data(shape=[None, frames, input_size])
    net = tflearn.lstm(net, 256, dropout=0.2)
    net = tflearn.fully_connected(net, num_classes, activation='softmax')
    net = tflearn.regression(net, optimizer='adam',
                             loss='categorical_crossentropy', name='output1')
    return net 
開發者ID:hthuwal,項目名稱:sign-language-gesture-recognition,代碼行數:10,代碼來源:rnn_utils.py

示例10: get_network_wider

# 需要導入模塊: import tflearn [as 別名]
# 或者: from tflearn import regression [as 別名]
def get_network_wider(frames, input_size, num_classes):
    """Create a wider LSTM"""
    net = tflearn.input_data(shape=[None, frames, input_size])
    net = tflearn.lstm(net, 512, dropout=0.2)
    net = tflearn.fully_connected(net, num_classes, activation='softmax')
    net = tflearn.regression(net, optimizer='adam',
                             loss='categorical_crossentropy', name='output1')
    return net 
開發者ID:hthuwal,項目名稱:sign-language-gesture-recognition,代碼行數:10,代碼來源:rnn_utils.py

示例11: vgg_net_19

# 需要導入模塊: import tflearn [as 別名]
# 或者: from tflearn import regression [as 別名]
def vgg_net_19(width, height):
    network = input_data(shape=[None, height, width, 3], name='input')
    network = conv_2d(network, 64, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = conv_2d(network, 64, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = max_pool_2d(network, 2, strides=2)
    network = conv_2d(network, 128, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = conv_2d(network, 128, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = max_pool_2d(network, 2, strides=2)
    network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = max_pool_2d(network, 2, strides=2)
    network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = max_pool_2d(network, 2, strides=2)
    network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = max_pool_2d(network, 2, strides=2)
    network = fully_connected(network, 4096, activation='relu', weight_decay=5e-4)
    network = dropout(network, keep_prob=0.5)
    network = fully_connected(network, 4096, activation='relu', weight_decay=5e-4)
    network = dropout(network, keep_prob=0.5)
    network = fully_connected(network, 1000, activation='softmax', weight_decay=5e-4)
    
    opt = Momentum(learning_rate=0, momentum = 0.9)
    network = regression(network, optimizer=opt, loss='categorical_crossentropy', name='targets')
    
    model = DNN(network, checkpoint_path='', max_checkpoints=1, tensorboard_verbose=2, tensorboard_dir='')
    
    return model

#model of vgg-19 for testing of the activations 
#rename the output you want to test, connect it to the next layer and change the output layer at the bottom (model = DNN(...))
#make sure to use the correct test function (depending if your output is a tensor or a vector) 
開發者ID:lFatality,項目名稱:tensorflow2caffe,代碼行數:41,代碼來源:model.py

示例12: vgg_net_19_activations

# 需要導入模塊: import tflearn [as 別名]
# 或者: from tflearn import regression [as 別名]
def vgg_net_19_activations(width, height):
    network = input_data(shape=[None, height, width, 3], name='input')
    network1 = conv_2d(network, 64, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network2 = conv_2d(network1, 64, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = max_pool_2d(network2, 2, strides=2)
    network = conv_2d(network, 128, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = conv_2d(network, 128, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = max_pool_2d(network, 2, strides=2)
    network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = max_pool_2d(network, 2, strides=2)
    network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = max_pool_2d(network, 2, strides=2)
    network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
    network = max_pool_2d(network, 2, strides=2)
    network = fully_connected(network, 4096, activation='relu', weight_decay=5e-4)
    network = dropout(network, keep_prob=0.5)
    network = fully_connected(network, 4096, activation='relu', weight_decay=5e-4)
    network = dropout(network, keep_prob=0.5)
    network = fully_connected(network, 1000, activation='softmax', weight_decay=5e-4)
    
    opt = Momentum(learning_rate=0, momentum = 0.9)
    network = regression(network, optimizer=opt, loss='categorical_crossentropy', name='targets')
    
    model = DNN(network1, checkpoint_path='', max_checkpoints=1, tensorboard_verbose=2, tensorboard_dir='')
    
    return model 
開發者ID:lFatality,項目名稱:tensorflow2caffe,代碼行數:37,代碼來源:model.py

示例13: sentnet_color_2d

# 需要導入模塊: import tflearn [as 別名]
# 或者: from tflearn import regression [as 別名]
def sentnet_color_2d(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
    network = input_data(shape=[None, width, height, 3], name='input')
    network = conv_2d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, output, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr, name='targets')

    model = tflearn.DNN(network,
                        max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log')

    return model 
開發者ID:Sentdex,項目名稱:pygta5,代碼行數:39,代碼來源:models.py

示例14: sentnet_LSTM_gray

# 需要導入模塊: import tflearn [as 別名]
# 或者: from tflearn import regression [as 別名]
def sentnet_LSTM_gray(width, height, frame_count, lr, output=9):
    network = input_data(shape=[None, width, height], name='input')
    #network = tflearn.input_data(shape=[None, 28, 28], name='input')
    network = tflearn.lstm(network, 128, return_seq=True)
    network = tflearn.lstm(network, 128)
    network = tflearn.fully_connected(network, 9, activation='softmax')
    network = tflearn.regression(network, optimizer='adam',
    loss='categorical_crossentropy', name="output1")

    model = tflearn.DNN(network, checkpoint_path='model_lstm',
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model 
開發者ID:Sentdex,項目名稱:pygta5,代碼行數:15,代碼來源:models.py

示例15: sentnet_color

# 需要導入模塊: import tflearn [as 別名]
# 或者: from tflearn import regression [as 別名]
def sentnet_color(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
    network = input_data(shape=[None, width, height,3, 1], name='input')
    network = conv_3d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = conv_3d(network, 256, 5, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 256, 3, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    network = conv_3d(network, 256, 5, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 256, 3, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, output, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr, name='targets')

    model = tflearn.DNN(network, checkpoint_path=model_name,
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model 
開發者ID:Sentdex,項目名稱:pygta5,代碼行數:39,代碼來源:models.py


注:本文中的tflearn.regression方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。