本文整理汇总了Python中mlp.MLP.predict方法的典型用法代码示例。如果您正苦于以下问题:Python MLP.predict方法的具体用法?Python MLP.predict怎么用?Python MLP.predict使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mlp.MLP
的用法示例。
在下文中一共展示了MLP.predict方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from mlp import MLP [as 别名]
# 或者: from mlp.MLP import predict [as 别名]
class CWS:
def __init__(self, s):
self.mlp = MLP(s['ne'], s['de'], s['win'], s['nh'], 4, s['L2_reg'], np.random.RandomState(s['seed']))
self.s = s
def fit(self, lex, label):
s = self.s
n_sentences = len(lex)
n_train = int(n_sentences * (1. - s['valid_size']))
s['clr'] = s['lr']
best_f = 0
for e in xrange(s['n_epochs']):
shuffle([lex, label], s['seed'])
train_lex, valid_lex = lex[:n_train], lex[n_train:]
train_label, valid_label = label[:n_train], label[n_train:]
tic = time.time()
cost = 0
for i in xrange(n_train):
if len(train_lex[i]) == 2: continue
words = np.asarray(contextwin(train_lex[i], s['win']), dtype = 'int32')
labels = [0] + train_label[i] + [0]
y_pred = self.mlp.predict(words)
cost += self.mlp.fit(words, [0]+y_pred, [0]+labels, s['clr'])
self.mlp.normalize()
if s['verbose']:
print '[learning] epoch %i >> %2.2f%%' % (e+1, (i+1)*100./n_train), 'completed in %s << \r' % time_format(time.time() - tic),
sys.stdout.flush()
print '[learning] epoch %i >> cost = %f' % (e+1, cost / n_train), ', %s used' % time_format(time.time() - tic)
pred_y = self.predict(valid_lex)
p, r, f = evaluate(pred_y, valid_label)
print ' P: %2.2f%% R: %2.2f%% F: %2.2f%%' % (p*100., r*100., f*100.)
'''
if f > best_f:
best_f = f
self.save()
'''
def predict(self, lex):
s = self.s
y = [self.mlp.predict(np.asarray(contextwin(x, s['win'])).astype('int32'))[1:-1] for x in lex]
return y
def save(self):
if not os.path.exists('params'): os.mkdir('params')
self.mlp.save()
def load(self):
self.mlp.load()
示例2: testMLP
# 需要导入模块: from mlp import MLP [as 别名]
# 或者: from mlp.MLP import predict [as 别名]
def testMLP(self):
'''
Using MLP of one hidden layer and one softmax layer
'''
conf_filename = './snippet_mlp.conf'
start_time = time.time()
configer = MLPConfiger(conf_filename)
mlpnet = MLP(configer, verbose=True)
end_time = time.time()
pprint('Time used to build the architecture of MLP: %f seconds' % (end_time-start_time))
# Training
start_time = time.time()
for i in xrange(configer.nepoch):
cost, accuracy = mlpnet.train(self.snippet_train_set, self.snippet_train_label)
pprint('epoch %d, cost = %f, accuracy = %f' % (i, cost, accuracy))
end_time = time.time()
pprint('Time used for training MLP network on Snippet task: %f minutes' % ((end_time-start_time)/60))
# Test
test_size = self.snippet_test_label.shape[0]
prediction = mlpnet.predict(self.snippet_test_set)
accuracy = np.sum(prediction == self.snippet_test_label) / float(test_size)
pprint('Test accuracy: %f' % accuracy)
示例3: testMLP
# 需要导入模块: from mlp import MLP [as 别名]
# 或者: from mlp.MLP import predict [as 别名]
def testMLP(self):
'''
Sentiment analysis task for sentence representation using MLP,
with one hidden layer and one softmax layer.
'''
conf_filename = './sentiment_mlp.conf'
start_time = time.time()
configer = MLPConfiger(conf_filename)
mlpnet = MLP(configer, verbose=True)
end_time = time.time()
pprint('Time used to build the architecture of MLP: %f seconds.' % (end_time-start_time))
# Training
start_time = time.time()
for i in xrange(configer.nepoch):
rate = 2.0 / ((1.0 + i/500) ** 2)
cost, accuracy = mlpnet.train(self.senti_train_set, self.senti_train_label, rate)
pprint('epoch %d, cost = %f, accuracy = %f' % (i, cost, accuracy))
end_time = time.time()
pprint('Time used for training MLP network on Sentiment analysis task: %f minutes.' % ((end_time-start_time)/60))
# Test
prediction = mlpnet.predict(self.senti_test_set)
accuracy = np.sum(prediction == self.senti_test_label) / float(self.test_size)
pprint('Test accuracy: %f' % accuracy)
示例4:
# 需要导入模块: from mlp import MLP [as 别名]
# 或者: from mlp.MLP import predict [as 别名]
model.learning_rate,
model.momentum,
model.dropout_p_input,
model.dropout_p_hidden))
# check if this configuration has already been tried
if os.path.exists(fname):
continue
# if not continue with training
print 'Trying the following configuration:', try_params
t0 = time.time()
train_cost_history = model.fit(train_x, train_y, epochs=25)
print 'Training completed in {:.2f} sec'.format(time.time() - t0)
# and validation
valid_y_pred = model.predict(valid_x)
valid_accuracy = np.sum(valid_y_pred == valid_y, dtype=np.float32) / valid_y.shape[0]
print 'Validation accuracy: {:.2f}'.format(valid_accuracy * 100)
if best_valid is None or best_valid < valid_accuracy:
best_valid = valid_accuracy
best_conf = try_params
test_y_pred = model.predict(test_x)
best_test = np.sum(test_y_pred == test_y, dtype=np.float32) / test_y.shape[0]
# finally save the training costs and the validation accuracy to disk
np.savez_compressed(fname, train_cost=train_cost_history, valid_accuracy=valid_accuracy)
n_tried += 1
print 'Best configuration:', best_conf
print 'Best validation accuracy: {:.2f}'.format(best_valid * 100)
示例5: splitMomentDataByFeatureAndLabel
# 需要导入模块: from mlp import MLP [as 别名]
# 或者: from mlp.MLP import predict [as 别名]
from loaddata import splitMomentDataByFeatureAndLabel
from mlp import MLP
import numpy
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
userid=1
device=1
featureCondition=16
classificationCondition=1
offsetFeatureOn=False
my_test_size = 0.3
my_random_state = 42
data, label = splitMomentDataByFeatureAndLabel(userid, device, featureCondition, classificationCondition, offsetFeatureOn=offsetFeatureOn)
data = data.astype(float)
label = label.astype(int)
trainingData, testData, trainingLabel, testLabel = train_test_split(data, label, test_size=my_test_size, random_state=my_random_state)
clf = MLP(n_hidden=10, n_deep=3, l1_norm=0, drop=0.1, verbose=0).fit(trainingData, trainingLabel)
print testLabel
print clf.predict(testData)
示例6: main
# 需要导入模块: from mlp import MLP [as 别名]
# 或者: from mlp.MLP import predict [as 别名]
def main(argv):
# Handle command line arguments
# 0. just use the default percent of the
# data set for training (70%)
#
# 1. should be a number from 0 to 1
# (0 <= x < 1) which determines what
# percent of the data set should be
# used for training
#
# 2. first argument is the filename of
# the file containing the data set
tpercent = 0.7
filename = ""
if len(argv) == 2:
tpercent = float(argv[1])
# make sure it's in the legal range
if tpercent < 0.0:
tpercent *= -1
while tpercent >= 1.0:
tpercent -= 1.0
elif len(argv) == 3:
filename = argv[1]
tpercent = float(argv[2])
# make sure it's in the legal range
if tpercent < 0.0:
tpercent *= -1
while tpercent >= 1.0:
tpercent -= 1.0
# get the time for use as a random seed
# this can be replaced by something else,
# but using the time will allow for a
# different shuffle each time
seed = int(time.time())
if filename == "":
iris = datasets.load_iris()
data = iris.data
targets = iris.target
# load from a file instead if that's the correct approach
else:
csv = np.genfromtxt(filename, delimiter=",")
numcols = len(csv[0])
data = csv[:, : numcols - 1] # the first columns are the data
targets = csv[:, numcols - 1] # the last column is the targets
# shuffle based on the time
# this uses the same seed (the time)
# in both the data and the targets so
# they match up after the shuffle
np.random.seed(seed)
np.random.shuffle(data)
# reset the seed
np.random.seed(seed)
np.random.shuffle(targets)
# determine the correct sizes of the sets
tsize = int(tpercent * targets.size)
psize = targets.size - tsize
tdata = data[:tsize]
pdata = data[tsize : tsize + psize]
ttargets = targets[:tsize]
ptargets = targets[tsize : tsize + psize]
# train the classifier
# classifier = KnnClassifier(3)
# classifier = KNeighborsClassifier(n_neighbors=3)
# classifier = DTreeClassifier()
# classifier = tree.DecisionTreeClassifier()
classifier = MLP((3, 3), 0.2, 30)
classifier.fit(tdata, ttargets)
# see how it did
numcorrect = 0
predictions = classifier.predict(pdata)
for i in range(psize):
if predictions[i] == ptargets[i]:
numcorrect += 1
percentcorrect = (numcorrect / psize) * 100.0
print("Completed. Predicted", str(percentcorrect), "% correctly.")