本文整理汇总了Python中neuralnet.NeuralNet.test方法的典型用法代码示例。如果您正苦于以下问题:Python NeuralNet.test方法的具体用法?Python NeuralNet.test怎么用?Python NeuralNet.test使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类neuralnet.NeuralNet
的用法示例。
在下文中一共展示了NeuralNet.test方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: cross_validation_iterative
# 需要导入模块: from neuralnet import NeuralNet [as 别名]
# 或者: from neuralnet.NeuralNet import test [as 别名]
def cross_validation_iterative(folds, epochs, learn_rate, n, num_points):
averages = []
test_vals = []
fold_results = {}
timings = [0]*epochs
for x in xrange(len(folds.keys())):
fold_results[x] = {"train": [], "test": []}
test_index = x%n
test_set = folds[test_index]
train_set = []
for k,v in folds.items():
if k != test_index: train_set += v
nn = NeuralNet(9, [13,14], 1, learn_rate)
start_t = time.time()
for j in xrange(epochs):
nn.train(train_set, None, 1)
# get train and test accuracy
train_val = nn.test(train_set, None, False)
test_val = nn.test(test_set, None, False)
# store the accuracy results
fold_results[x]["train"].append(train_val)
fold_results[x]["test"].append(test_val)
timings[j] += time.time()-start_t
print "fold complete"
# compute the average for each epoch
train_a, test_a = [], []
for e in xrange(epochs):
num_train, num_test = 0, 0
for i in xrange(len(folds.keys())):
num_train += fold_results[i]["train"][e]
num_test += fold_results[i]["test"][e]
train_a.append((float(num_train)/(num_points*(n-1)))*100)
test_a.append((float(num_test)/num_points)*100)
for e in xrange(epochs):
timings[e] = float(timings[e])/len(folds.keys())
print train_a, test_a, timings
return train_a, test_a, timings
示例2: cvWithThreshold
# 需要导入模块: from neuralnet import NeuralNet [as 别名]
# 或者: from neuralnet.NeuralNet import test [as 别名]
def cvWithThreshold(conf, X, y_current_tr, y_current_te, threshold):
scores = []
fold=1
for TrainIndices, TestIndices in cross_validation.StratifiedKFold(y_current_tr, n_folds=10, shuffle=False, random_state=None):
#print('\r'+str(fold), end="")
fold+=1
X_tr = X[TrainIndices]
y_tr = y_current_tr[TrainIndices]
X_te = X[TestIndices]
y_te = y_current_te[TestIndices]
nn = NN(conf)
nn.train(X_tr, y_tr, conf.iterations)
_, score = nn.test(X_te, y_te)
scores.append(score)
print("\n--")
f1 = np.mean([s[0] for s in scores])
r = np.mean([s[1] for s in scores])
acc = np.mean([s[2] for s in scores])
p = np.mean([s[3] for s in scores])
return f1, r, acc, p
示例3: getBestThresholds
# 需要导入模块: from neuralnet import NeuralNet [as 别名]
# 或者: from neuralnet.NeuralNet import test [as 别名]
def getBestThresholds(X, y_current_tr, y_current_te, conf):
assert len(X) == len(y_current_tr) == len(y_current_te), 'Number of features ({}), annotator1 labels ({}) and annotator2 labels ({}) is not equal!'.format(len(X), len(y_current_tr), len(y_current_te))
#scores = {"F1":[], "Recall":[], "Accuracy":[], "Precision":[]}
scores = []
thresholds=[]
print('Finding best thresholds...')
fold=1
for TrainIndices, TestIndices in cross_validation.StratifiedKFold(y_current_tr, n_folds=10, shuffle=False, random_state=None):
#print('\r'+str(fold), end="")
fold+=1
X_tr = X[TrainIndices]
y_tr = y_current_tr[TrainIndices]
X_te = X[TestIndices]
y_te = y_current_te[TestIndices]
nn = NN(conf)
nn.train(X_tr, y_tr, conf.iterations)
#get prediction
best_t, score = nn.test(X_te, y_te)
thresholds.append(best_t)
scores.append(score)
#scores = cross_validation.cross_val_score(maxent, features, labels, cv=10)
print("\n--")
return np.array(thresholds), np.array(scores)
示例4: cross_validation_2
# 需要导入模块: from neuralnet import NeuralNet [as 别名]
# 或者: from neuralnet.NeuralNet import test [as 别名]
def cross_validation_2(folds, epochs, learn_rate, n):
averages = []
timings = []
for i in xrange(10):
averages.append([])
timings.append([])
start_t = time.time()
for j in xrange(10):
test_vals = []
for x in xrange(len(folds.keys())):
test_index = x%n
test_set = folds[test_index]
train_set = []
for k,v in folds.items():
if k != test_index: train_set += v
nn = NeuralNet(9, [j+1,i+1], 1, learn_rate)
nn.train(train_set, None, epochs)
test_vals.append(nn.test(test_set, None, False))
print "average: ", sum(test_vals) / len(test_vals)
print ""
timings[i].append(time.time()-start_t)
averages[i].append(sum(test_vals)/len(test_vals))
print timings[i]
print averages[i]
return averages, timings
示例5: NeuralNet
# 需要导入模块: from neuralnet import NeuralNet [as 别名]
# 或者: from neuralnet.NeuralNet import test [as 别名]
# [ (number_of_neurons, activation_function) ]
# The last pair in you list describes the number of output signals
# Optional settings
"weights_low" : -0.1, # Lower bound on initial weight range
"weights_high" : 0.1, # Upper bound on initial weight range
"save_trained_network" : False, # Whether to write the trained weights to disk
"input_layer_dropout" : 0.2, # dropout fraction of the input layer
"hidden_layer_dropout" : 0.5, # dropout fraction in all hidden layers
}
# initialize the neural network
network = NeuralNet( settings )
# load a stored network configuration
# network = NeuralNet.load_from_file( "trained_configuration.pkl" )
# start training on test set one
network.backpropagation(
training_wine, # specify the training set
ERROR_LIMIT = 1e-3, # define an acceptable error limit
learning_rate = 0.03, # learning rate
momentum_factor = 0.45, # momentum
#max_iterations = 100, # continues until the error limit is reach if this argument is skipped
)
print "Final MSE:", network.test( training_wine )
示例6: main
# 需要导入模块: from neuralnet import NeuralNet [as 别名]
# 或者: from neuralnet.NeuralNet import test [as 别名]
def main():
scriptdir = os.path.dirname(os.path.realpath(__file__))
data = scriptdir+'/../data/cwi_training/cwi_training.txt.lbl.conll'
testdata = scriptdir+'/../data/cwi_testing/cwi_testing.gold.txt.lbl.conll'
pickled_data = scriptdir+'/../data.pickle'
parser = argparse.ArgumentParser()
parser.add_argument('--threshold', '-t', type=float, help='Threshold for predicting 0/1. If not specified, the optimal threshold will first be computed as the median of all CV splits. May take a while.')
parser.add_argument('--iterations', '-i', type=int, default=50, help='Training iterations.')
parser.add_argument('--hidden-layers', '-l', dest='layers', required=True, type=int, nargs='+', help='List of layer sizes')
parser.add_argument('--cv-splits', '-c', dest='splits', type=int, help='No. of crossvalidation splits. If not specified, no CV will be performed.')
parser.add_argument('--data', '-d', default=data, help='Features and labels')
parser.add_argument('--testdata', '-y', default=testdata, help='Test data (not needed for crossval).')
parser.add_argument('--verbose', '-v', dest='verbose', action='store_true', help='Print average loss at every training iteration.')
parser.add_argument('--output', '-o', help="Output file")
parser.add_argument('--features', '-f', dest='features', default=[], type=str, nargs='+', help='List of feature types')
args = parser.parse_args()
# X, y = load_pickled(args.data)
combined_data = 'X_y_all.txt'
cutoff = combine_data(args.data, args.testdata, combined_data)
X, y, _ = feats_and_classify.collect_features(combined_data, True, args.features)
X_tr = X[:cutoff]
y_tr = y[:cutoff]
X_te = X[cutoff:]
y_te = y[cutoff:]
conf = NeuralNetConfig(X=X, y=y, layers=args.layers, iterations=args.iterations, verbose=args.verbose)
if args.splits:
if args.threshold:
crossval(X_tr,y_tr,args.splits, conf, t=args.threshold)
else:
# compute optimal threshold for each CV split
print '### Computing optimal threshold... '
ts = crossval(X_tr,y_tr,args.splits, conf)
avg = np.average(ts)
med = np.median(ts)
print '\nThresholds for crossval splits:', ts
print 'Mean threshold', avg
print 'Median threshold', med
print 'Threshold st.dev.', np.std(ts)
# Run CV with fixed avg/median threshold
print '\n\n### Running with avg. threshold... '
crossval(X_tr,y_tr,args.splits, conf, t=avg)
print '\n\n### Running with med. threshold... '
crossval(X_tr,y_tr,args.splits, conf, t=med)
else:
nn = NN(conf)
nn.train(X_tr,y_tr,args.iterations)
if args.testdata:
# X_test, y_test = load_pickled(args.testdata)
pred = nn.get_output(X_te)
if args.output:
with open(args.output, 'w') as of:
for p in pred:
of.write('%f\n'%p)
t, res = nn.test(X_te,y_te,args.threshold)
resout = "G: %f, R: %f, A: %f, P: %f\n"%res
sys.stderr.write('%s %f\n'%(' '.join(args.features), t))
sys.stderr.write(resout)
示例7: crossval
# 需要导入模块: from neuralnet import NeuralNet [as 别名]
# 或者: from neuralnet.NeuralNet import test [as 别名]
def crossval(X,y,splits, conf, t=None):
results = []
ts = []
m = len(X)
cs = [(i*m/splits, (i+1)*len(X)/splits) for i in range(splits)]
for s,e in cs:
X_tr = [X[i] for i in range(m) if i < s or i >= e]
X_te = [X[i] for i in range(m) if i >= s and i < e]
y_tr = [y[i] for i in range(m) if i < s or i >= e]
y_te = [y[i] for i in range(m) if i >= s and i < e]
nn = NN(conf)
nn.train(X_tr, y_tr, conf.iterations)
best_t, res = nn.test(X_te, y_te, t)
ts.append(best_t)
results.append(res)
f1s = [res[0] for res in results]
rec = [res[1] for res in results]
acc = [res[2] for res in results]
pre = [res[3] for res in results]
print '\nF1 | {:.3f} (std {:.3f})'.format(np.average(f1s), np.std(f1s))
print 'Rec | {:.3f} (std {:.3f})'.format(np.average(rec), np.std(rec))
print 'Acc | {:.3f} (std {:.3f})'.format(np.average(acc), np.std(acc))
print 'Pre | {:.3f} (std {:.3f})'.format(np.average(pre), np.std(pre))
return ts
示例8: create_roc_data
# 需要导入模块: from neuralnet import NeuralNet [as 别名]
# 或者: from neuralnet.NeuralNet import test [as 别名]
def create_roc_data(data):
epochs = 60
nn = NeuralNet(9, [13,14], 1, .1)
nn.train(data, None, epochs)
ret = nn.test(data, None, False)
results = []
for row in ret:
results.append((row[0][0][0],row[1][0][0],row[2][0][0]))
print results[0]
num_pos = len(filter(lambda x: x[1] == 1, results))
num_neg = len(results)-num_pos
results.sort(key=lambda x: x[-1])
results.reverse()
tp = 0
fp = 0
last_tp = 0
roc_set = [[x[-2],x[-1]] for x in results]
fpr_set = []
tpr_set = []
for i in range(1,len(roc_set)):
if roc_set[i][1] != roc_set[i-1][1] and roc_set[i][0] != 1 and tp > last_tp:
fpr = fp / float(num_neg)
tpr = tp / float(num_pos)
fpr_set.append(fpr)
tpr_set.append(tpr)
last_tp = tp
if roc_set[i][0] == 1:
tp += 1
else:
fp += 1
fpr = fp / float(num_neg)
tpr = tp / float(num_pos)
fpr_set.append(fpr)
tpr_set.append(tpr)
return fpr_set, tpr_set
示例9: NeuralNet
# 需要导入模块: from neuralnet import NeuralNet [as 别名]
# 或者: from neuralnet.NeuralNet import test [as 别名]
network = NeuralNet(num_dims, num_classes, hiddenLyr, hiddenLyrArgs, in_log_scale)
print("Training ▪▪▪")
for epoch in range(num_epochs):
print('Epoch : ', epoch)
for example in range(num_examples):
x = data_x[example]
y = data_y[example]
if example < num_training_examples:
if in_log_scale and len(y) < 2:
continue
cst, pred, aux = network.train(x, y)
if (epoch % 12 == 0 and example < 3) or np.isinf(cst):
print('\n▪▪▪▪▪▪▪▪▪▪▪▪▪▪ COST = {} ▪▪▪▪▪▪▪▪▪▪▪▪▪▪ '.format(np.round(cst, 3)))
diagnostix(y, x, pred, aux > 1e-20, 'Forward probabilities:')
if np.isinf(cst):
print('Cost Blew Up! Exiting ...')
sys.exit()
elif ((epoch >1 and epoch % 12 == 0) and example - num_training_examples < 3) \
or epoch == num_epochs - 1:
# Sample some images for testing
pred, aux = network.test(x)
aux = (aux + 1) / 2.0
print('\n▪▪▪▪▪▪▪▪▪▪▪▪▪▪ TESTING ▪▪▪▪▪▪▪▪▪▪▪▪▪▪')
diagnostix(y, x, pred, aux)