本文整理汇总了Python中tflearn.regression函数的典型用法代码示例。如果您正苦于以下问题:Python regression函数的具体用法?Python regression怎么用?Python regression使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了regression函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: xor_operation
def xor_operation():
# Function to simulate XOR operation using graph combo of NAND and OR
X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]
Y_nand = [[1.], [1.], [1.], [0.]]
Y_or = [[0.], [1.], [1.], [1.]]
with tf.Graph().as_default():
graph = tflearn.input_data(shape=[None, 2])
graph_nand = tflearn.fully_connected(graph, 32, activation='linear')
graph_nand = tflearn.fully_connected(graph_nand, 32, activation='linear')
graph_nand = tflearn.fully_connected(graph_nand, 1, activation='sigmoid')
graph_nand = tflearn.regression(graph_nand, optimizer='sgd', learning_rate=2., loss='binary_crossentropy')
graph_or = tflearn.fully_connected(graph, 32, activation='linear')
graph_or = tflearn.fully_connected(graph_or, 32, activation='linear')
graph_or = tflearn.fully_connected(graph_or, 1, activation='sigmoid')
graph_or = tflearn.regression(graph_or, optimizer='sgd', learning_rate=2., loss='binary_crossentropy')
graph_xor = tflearn.merge([graph_nand, graph_or], mode='elemwise_mul')
# Model training
model = tflearn.DNN(graph_xor)
model.fit(X, [Y_nand, Y_or], n_epoch=100, snapshot_epoch=False)
prediction = model.predict([[0., 1.]])
print("Prediction: ", prediction)
示例2: run_combo_XOR
def run_combo_XOR():
X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]
Y_nand = [[1.], [1.], [1.], [0.]]
Y_or = [[0.], [1.], [1.], [1.]]
g = tflearn.input_data(shape=[None, 2])
# Nand graph
g_nand = tflearn.fully_connected(g, 32, activation='linear')
g_nand = tflearn.fully_connected(g_nand, 32, activation='linear')
g_nand = tflearn.fully_connected(g_nand, 1, activation='sigmoid')
g_nand = tflearn.regression(
g_nand, optimizer='sgd', learning_rate=2., loss='binary_crossentropy')
# Nand graph
g_or = tflearn.fully_connected(g, 32, activation='linear')
g_or = tflearn.fully_connected(g_or, 32, activation='linear')
g_or = tflearn.fully_connected(g_or, 1, activation='sigmoid')
g_or = tflearn.regression(
g_or, optimizer='sgd', learning_rate=2., loss='binary_crossentropy')
g_xor = tflearn.merge([g_nand, g_or], mode='elemwise_mul')
m = train_model(g_xor, X, [Y_nand, Y_or])
# sess = tf.Session() # separate from DNN session
sess = m.session # separate from DNN session
print(
sess.run(tflearn.merge([Y_nand, Y_or], mode='elemwise_mul')))
示例3: test_dnn
def test_dnn(self):
with tf.Graph().as_default():
X = [3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,7.042,10.791,5.313,7.997,5.654,9.27,3.1]
Y = [1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,2.827,3.465,1.65,2.904,2.42,2.94,1.3]
input = tflearn.input_data(shape=[None])
linear = tflearn.single_unit(input)
regression = tflearn.regression(linear, optimizer='sgd', loss='mean_square',
metric='R2', learning_rate=0.01)
m = tflearn.DNN(regression)
# Testing fit and predict
m.fit(X, Y, n_epoch=1000, show_metric=True, snapshot_epoch=False)
res = m.predict([3.2])[0]
self.assertGreater(res, 1.3, "DNN test (linear regression) failed! with score: " + str(res) + " expected > 1.3")
self.assertLess(res, 1.8, "DNN test (linear regression) failed! with score: " + str(res) + " expected < 1.8")
# Testing save method
m.save("test_dnn.tflearn")
self.assertTrue(os.path.exists("test_dnn.tflearn"))
with tf.Graph().as_default():
input = tflearn.input_data(shape=[None])
linear = tflearn.single_unit(input)
regression = tflearn.regression(linear, optimizer='sgd', loss='mean_square',
metric='R2', learning_rate=0.01)
m = tflearn.DNN(regression)
# Testing load method
m.load("test_dnn.tflearn")
res = m.predict([3.2])[0]
self.assertGreater(res, 1.3, "DNN test (linear regression) failed after loading model! score: " + str(res) + " expected > 1.3")
self.assertLess(res, 1.8, "DNN test (linear regression) failed after loading model! score: " + str(res) + " expected < 1.8")
示例4: test_regression_placeholder
def test_regression_placeholder(self):
'''
Check that regression does not duplicate placeholders
'''
with tf.Graph().as_default():
g = tflearn.input_data(shape=[None, 2])
g_nand = tflearn.fully_connected(g, 1, activation='linear')
with tf.name_scope("Y"):
Y_in = tf.placeholder(shape=[None, 1], dtype=tf.float32, name="Y")
tflearn.regression(g_nand, optimizer='sgd',
placeholder=Y_in,
learning_rate=2.,
loss='binary_crossentropy',
op_name="regression1",
name="Y")
# for this test, just use the same default trainable_vars
# in practice, this should be different for the two regressions
tflearn.regression(g_nand, optimizer='adam',
placeholder=Y_in,
learning_rate=2.,
loss='binary_crossentropy',
op_name="regression2",
name="Y")
self.assertEqual(len(tf.get_collection(tf.GraphKeys.TARGETS)), 1)
示例5: __init__
def __init__(self, s_date, n_frame):
self.n_epoch = 20
prev_bd = int(s_date[:6])-1
prev_ed = int(s_date[9:15])-1
if prev_bd%100 == 0: prev_bd -= 98
if prev_ed%100 == 0: prev_ed -= 98
pred_s_date = "%d01_%d01" % (prev_bd, prev_ed)
prev_model = '../model/tflearn/reg_l3_bn/big/%s' % pred_s_date
self.model_dir = '../model/tflearn/reg_l3_bn/big/%s' % s_date
tf.reset_default_graph()
tflearn.init_graph(gpu_memory_fraction=0.1)
input_layer = tflearn.input_data(shape=[None, 23*n_frame], name='input')
dense1 = tflearn.fully_connected(input_layer, 400, name='dense1', activation='relu')
dense1n = tflearn.batch_normalization(dense1, name='BN1')
dense2 = tflearn.fully_connected(dense1n, 100, name='dense2', activation='relu')
dense2n = tflearn.batch_normalization(dense2, name='BN2')
dense3 = tflearn.fully_connected(dense2n, 1, name='dense3')
output = tflearn.single_unit(dense3)
regression = tflearn.regression(output, optimizer='adam', loss='mean_square',
metric='R2', learning_rate=0.001)
self.estimators = tflearn.DNN(regression)
if os.path.exists('%s/model.tfl' % prev_model):
self.estimators.load('%s/model.tfl' % prev_model)
self.n_epoch = 10
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
示例6: do_rnn
def do_rnn(trainX, testX, trainY, testY):
max_document_length=64
y_test=testY
trainX = pad_sequences(trainX, maxlen=max_document_length, value=0.)
testX = pad_sequences(testX, maxlen=max_document_length, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Network building
net = tflearn.input_data([None, max_document_length])
net = tflearn.embedding(net, input_dim=10240000, output_dim=64)
net = tflearn.lstm(net, 64, dropout=0.1)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=0,tensorboard_dir="dga_log")
model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
batch_size=10,run_id="dga",n_epoch=1)
y_predict_list = model.predict(testX)
#print y_predict_list
y_predict = []
for i in y_predict_list:
print i[0]
if i[0] > 0.5:
y_predict.append(0)
else:
y_predict.append(1)
print(classification_report(y_test, y_predict))
print metrics.confusion_matrix(y_test, y_predict)
示例7: build_network
def build_network():
network = tflearn.input_data(shape=[None, 2])
network = tflearn.fully_connected(network, 64, activation='relu', regularizer='L2', weight_decay=0.001)
network = tflearn.fully_connected(network, 1, activation='sigmoid')
network = tflearn.regression(network, optimizer='sgd', learning_rate=0.3,
loss='mean_square')
return network
示例8: do_rnn
def do_rnn(x_train,x_test,y_train,y_test):
global n_words
# Data preprocessing
# Sequence padding
print "GET n_words embedding %d" % n_words
#x_train = pad_sequences(x_train, maxlen=100, value=0.)
#x_test = pad_sequences(x_test, maxlen=100, value=0.)
# Converting labels to binary vectors
y_train = to_categorical(y_train, nb_classes=2)
y_test = to_categorical(y_test, nb_classes=2)
# Network building
net = tflearn.input_data(shape=[None, 100,n_words])
net = tflearn.lstm(net, 10, return_seq=True)
net = tflearn.lstm(net, 10, )
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.1,name="output",
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=3)
model.fit(x_train, y_train, validation_set=(x_test, y_test), show_metric=True,
batch_size=32,run_id="maidou")
示例9: model_for_type
def model_for_type(neural_net_type, tile_size, on_band_count):
"""The neural_net_type can be: one_layer_relu,
one_layer_relu_conv,
two_layer_relu_conv."""
network = tflearn.input_data(shape=[None, tile_size, tile_size, on_band_count])
# NN architectures mirror ch. 3 of www.cs.toronto.edu/~vmnih/docs/Mnih_Volodymyr_PhD_Thesis.pdf
if neural_net_type == "one_layer_relu":
network = tflearn.fully_connected(network, 64, activation="relu")
elif neural_net_type == "one_layer_relu_conv":
network = conv_2d(network, 64, 12, strides=4, activation="relu")
network = max_pool_2d(network, 3)
elif neural_net_type == "two_layer_relu_conv":
network = conv_2d(network, 64, 12, strides=4, activation="relu")
network = max_pool_2d(network, 3)
network = conv_2d(network, 128, 4, activation="relu")
else:
print("ERROR: exiting, unknown layer type for neural net")
# classify as road or not road
softmax = tflearn.fully_connected(network, 2, activation="softmax")
# hyperparameters based on www.cs.toronto.edu/~vmnih/docs/Mnih_Volodymyr_PhD_Thesis.pdf
momentum = tflearn.optimizers.Momentum(learning_rate=0.005, momentum=0.9, lr_decay=0.0002, name="Momentum")
net = tflearn.regression(softmax, optimizer=momentum, loss="categorical_crossentropy")
return tflearn.DNN(net, tensorboard_verbose=0)
示例10: use_tflearn
def use_tflearn():
import tflearn
# Data loading and preprocessing
import tflearn.datasets.mnist as mnist
X, Y, testX, testY = mnist.load_data(one_hot=True)
# Building deep neural network
input_layer = tflearn.input_data(shape=[None, 784])
dense1 = tflearn.fully_connected(input_layer, 64, activation='tanh',
regularizer='L2', weight_decay=0.001)
dropout1 = tflearn.dropout(dense1, 0.8)
dense2 = tflearn.fully_connected(dropout1, 64, activation='tanh',
regularizer='L2', weight_decay=0.001)
dropout2 = tflearn.dropout(dense2, 0.8)
softmax = tflearn.fully_connected(dropout2, 10, activation='softmax')
# Regression using SGD with learning rate decay and Top-3 accuracy
sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000)
top_k = tflearn.metrics.Top_k(3)
net = tflearn.regression(softmax, optimizer=sgd, metric=top_k,
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(X, Y, n_epoch=20, validation_set=(testX, testY),
show_metric=True, run_id="dense_model")
示例11: yn_net
def yn_net():
net = tflearn.input_data(shape=[None, img_rows, img_cols, 1]) #D = 256, 256
net = tflearn.conv_2d(net,nb_filter=8,filter_size=3, activation='relu', name='conv0.1')
net = tflearn.conv_2d(net,nb_filter=8,filter_size=3, activation='relu', name='conv0.2')
net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool0') #D = 128, 128
net = tflearn.dropout(net,0.75,name='dropout0')
net = tflearn.conv_2d(net,nb_filter=16,filter_size=3, activation='relu', name='conv1.1')
net = tflearn.conv_2d(net,nb_filter=16,filter_size=3, activation='relu', name='conv1.2')
net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool1') #D = 64, 64
net = tflearn.dropout(net,0.75,name='dropout0')
net = tflearn.conv_2d(net,nb_filter=32,filter_size=3, activation='relu', name='conv2.1')
net = tflearn.conv_2d(net,nb_filter=32,filter_size=3, activation='relu', name='conv2.2')
net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool2') #D = 32 by 32
net = tflearn.dropout(net,0.75,name='dropout0')
net = tflearn.conv_2d(net,nb_filter=32,filter_size=3, activation='relu', name='conv3.1')
net = tflearn.conv_2d(net,nb_filter=32,filter_size=3, activation='relu', name='conv3.2')
net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool3') #D = 16 by 16
net = tflearn.dropout(net,0.75,name='dropout0')
# net = tflearn.conv_2d(net,nb_filter=64,filter_size=3, activation='relu', name='conv4.1')
# net = tflearn.conv_2d(net,nb_filter=64,filter_size=3, activation='relu', name='conv4.2')
# net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool4') #D = 8 by 8
# net = tflearn.dropout(net,0.75,name='dropout0')
net = tflearn.fully_connected(net, n_units = 128, activation='relu', name='fc1')
net = tflearn.fully_connected(net, 2, activation='sigmoid')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001)
model = tflearn.DNN(net, tensorboard_verbose=1,tensorboard_dir='/tmp/tflearn_logs/')
return model
示例12: _model2
def _model2():
global yTest, img_aug
tf.reset_default_graph()
img_prep = ImagePreprocessing()
img_prep.add_featurewise_zero_center()
img_prep.add_featurewise_stdnorm()
net = input_data(shape=[None, inputSize, inputSize, dim],
name='input',
data_preprocessing=img_prep,
data_augmentation=img_aug)
n = 2
j = 64
'''
net = tflearn.conv_2d(net, j, 3, regularizer='L2', weight_decay=0.0001)
net = tflearn.residual_block(net, n, j)
net = tflearn.residual_block(net, 1, j*2, downsample=True)
net = tflearn.residual_block(net, n-1, j*2)
net = tflearn.residual_block(net, 1, j*4, downsample=True)
net = tflearn.residual_block(net, n-1, j*4)
net = tflearn.residual_block(net, 1, j*8, downsample=True)
net = tflearn.residual_block(net, n-1, j*8)
net = tflearn.batch_normalization(net)
net = tflearn.activation(net, 'relu')
net = tflearn.global_avg_pool(net)
'''
net = tflearn.conv_2d(net, j, 7, strides = 2, regularizer='L2', weight_decay=0.0001)
net = max_pool_2d(net, 2, strides=2)
net = tflearn.residual_block(net, n, j)
net = tflearn.residual_block(net, 1, j*2, downsample=True)
net = tflearn.residual_block(net, n-1, j*2)
net = tflearn.residual_block(net, 1, j*4, downsample=True)
net = tflearn.residual_block(net, n-1, j*4)
net = tflearn.residual_block(net, 1, j*8, downsample=True)
net = tflearn.residual_block(net, n-1, j*8)
net = tflearn.batch_normalization(net)
net = tflearn.activation(net, 'relu')
net = tflearn.global_avg_pool(net)
net = tflearn.fully_connected(net, len(yTest[0]), activation='softmax')
mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
net = tflearn.regression(net, optimizer=mom,
loss='categorical_crossentropy')
model = tflearn.DNN(net, checkpoint_path='model2_resnet',
max_checkpoints=10, tensorboard_verbose=3, clip_gradients=0.)
model.load(_path)
pred = model.predict(xTest)
df = pd.DataFrame(pred)
df.to_csv(_path + ".csv")
newList = pred.copy()
newList = convert2(newList)
if _CSV: makeCSV(newList)
pred = convert2(pred)
pred = convert3(pred)
yTest = convert3(yTest)
print(metrics.confusion_matrix(yTest, pred))
print(metrics.classification_report(yTest, pred))
print('Accuracy', accuracy_score(yTest, pred))
print()
if _wrFile: writeTest(pred)
示例13: train_nmf_network
def train_nmf_network(mfcc_array, sdr_array, n_epochs, take):
"""
:param mfcc_array:
:param sdr_array:
:param n_epochs:
:param take:
:return:
"""
with tf.Graph().as_default():
network = input_data(shape=[None, 13, 100, 1])
network = conv_2d(network, 32, [5, 5], activation="relu", regularizer="L2")
network = max_pool_2d(network, 2)
network = conv_2d(network, 64, [5, 5], activation="relu", regularizer="L2")
network = max_pool_2d(network, 2)
network = fully_connected(network, 128, activation="relu")
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation="relu")
network = dropout(network, 0.8)
network = fully_connected(network, 1, activation="linear")
regress = tflearn.regression(network, optimizer="rmsprop", loss="mean_square", learning_rate=0.001)
# Training
model = tflearn.DNN(regress) # , session=sess)
model.fit(
mfcc_array,
sdr_array,
n_epoch=n_epochs,
snapshot_step=1000,
show_metric=True,
run_id="repet_choice_{0}_epochs_take_{1}".format(n_epochs, take),
)
return model
示例14: train_repet_network
def train_repet_network(beat_spectrum_array, sdr_array, n_epochs, take):
"""
:param beat_spectrum_array:
:param sdr_array:
:param n_epochs:
:param take:
:return:
"""
beat_spec_len = 432
with tf.Graph().as_default():
input_layer = input_data(shape=[None, beat_spec_len, 1])
conv1 = conv_1d(input_layer, 32, 4, activation="relu", regularizer="L2")
max_pool1 = max_pool_1d(conv1, 2)
conv2 = conv_1d(max_pool1, 64, 80, activation="relu", regularizer="L2")
max_pool2 = max_pool_1d(conv2, 2)
fully1 = fully_connected(max_pool2, 128, activation="relu")
dropout1 = dropout(fully1, 0.8)
fully2 = fully_connected(dropout1, 256, activation="relu")
dropout2 = dropout(fully2, 0.8)
linear = fully_connected(dropout2, 1, activation="linear")
regress = tflearn.regression(linear, optimizer="rmsprop", loss="mean_square", learning_rate=0.001)
# Training
model = tflearn.DNN(regress) # , session=sess)
model.fit(
beat_spectrum_array,
sdr_array,
n_epoch=n_epochs,
snapshot_step=1000,
show_metric=True,
run_id="repet_choice_{0}_epochs_take_{1}".format(n_epochs, take),
)
return model
示例15: do_rnn
def do_rnn(trainX, testX, trainY, testY):
global n_words
# Data preprocessing
# Sequence padding
print "GET n_words embedding %d" % n_words
trainX = pad_sequences(trainX, maxlen=MAX_DOCUMENT_LENGTH, value=0.)
testX = pad_sequences(testX, maxlen=MAX_DOCUMENT_LENGTH, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Network building
net = tflearn.input_data([None, MAX_DOCUMENT_LENGTH])
net = tflearn.embedding(net, input_dim=n_words, output_dim=128)
net = tflearn.lstm(net, 128, dropout=0.8)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=3)
model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
batch_size=32,run_id="maidou")