当前位置: 首页>>代码示例>>Python>>正文


Python NeuralNet.train方法代码示例

本文整理汇总了Python中neuralnet.NeuralNet.train方法的典型用法代码示例。如果您正苦于以下问题:Python NeuralNet.train方法的具体用法?Python NeuralNet.train怎么用?Python NeuralNet.train使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在neuralnet.NeuralNet的用法示例。


在下文中一共展示了NeuralNet.train方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: getBestThresholds

# 需要导入模块: from neuralnet import NeuralNet [as 别名]
# 或者: from neuralnet.NeuralNet import train [as 别名]
def getBestThresholds(X, y_current_tr, y_current_te, conf):
    assert len(X) == len(y_current_tr) == len(y_current_te), 'Number of features ({}), annotator1 labels ({}) and annotator2 labels ({}) is not equal!'.format(len(X), len(y_current_tr), len(y_current_te))
    #scores = {"F1":[], "Recall":[], "Accuracy":[], "Precision":[]}
    scores = []
    thresholds=[]


    print('Finding best thresholds...')
    fold=1
    for TrainIndices, TestIndices in cross_validation.StratifiedKFold(y_current_tr, n_folds=10, shuffle=False, random_state=None):
        #print('\r'+str(fold), end="")
        fold+=1
        X_tr = X[TrainIndices]
        y_tr = y_current_tr[TrainIndices]

        X_te = X[TestIndices]
        y_te = y_current_te[TestIndices]

        nn = NN(conf)
        nn.train(X_tr, y_tr, conf.iterations)
        #get prediction
        best_t, score = nn.test(X_te, y_te)
        thresholds.append(best_t)

        scores.append(score)
    
    #scores = cross_validation.cross_val_score(maxent, features, labels, cv=10)
    print("\n--")
    
    return np.array(thresholds), np.array(scores)
开发者ID:jbingel,项目名称:cwi2016,代码行数:32,代码来源:nn-predict.py

示例2: cvWithThreshold

# 需要导入模块: from neuralnet import NeuralNet [as 别名]
# 或者: from neuralnet.NeuralNet import train [as 别名]
def cvWithThreshold(conf, X, y_current_tr, y_current_te, threshold):
    scores = []
    fold=1
    for TrainIndices, TestIndices in cross_validation.StratifiedKFold(y_current_tr, n_folds=10, shuffle=False, random_state=None):
        #print('\r'+str(fold), end="")
        fold+=1
        X_tr = X[TrainIndices]
        y_tr = y_current_tr[TrainIndices]

        X_te = X[TestIndices]
        y_te = y_current_te[TestIndices]

        nn = NN(conf)
        nn.train(X_tr, y_tr, conf.iterations)
        _, score = nn.test(X_te, y_te)

        scores.append(score)
    
    print("\n--")
    f1  = np.mean([s[0] for s in scores])
    r   = np.mean([s[1] for s in scores])
    acc = np.mean([s[2] for s in scores])
    p   = np.mean([s[3] for s in scores])

    return f1, r, acc, p
开发者ID:jbingel,项目名称:cwi2016,代码行数:27,代码来源:nn-predict.py

示例3: cross_validation_2

# 需要导入模块: from neuralnet import NeuralNet [as 别名]
# 或者: from neuralnet.NeuralNet import train [as 别名]
def cross_validation_2(folds, epochs, learn_rate, n):
    averages = []
    timings = []

    
    for i in xrange(10):
        averages.append([])
        timings.append([])
        start_t = time.time()
        for j in xrange(10):
            test_vals = []
            for x in xrange(len(folds.keys())):
                test_index = x%n
                test_set = folds[test_index]

                train_set = []
                for k,v in folds.items():
                    if k != test_index: train_set += v
        
                nn = NeuralNet(9, [j+1,i+1], 1, learn_rate)
                nn.train(train_set, None, epochs)
                test_vals.append(nn.test(test_set, None, False))

            print "average: ", sum(test_vals) / len(test_vals)
            print ""


            timings[i].append(time.time()-start_t)
            averages[i].append(sum(test_vals)/len(test_vals))        

            print timings[i]
            print averages[i]
    
    return averages, timings
开发者ID:ACAHNN,项目名称:ann_project,代码行数:36,代码来源:ann_data.py

示例4: main

# 需要导入模块: from neuralnet import NeuralNet [as 别名]
# 或者: from neuralnet.NeuralNet import train [as 别名]
def main():
    """Testing file to show neural network can learn linearly separable
    data."""
    data = np.genfromtxt("training.csv", delimiter=',').tolist()

    shuffle(data)

    # NOTE: We have to wrap every target value into a tuple, for the
    # purpose of being able to classify n-tuples later
    targets = list((sample[-1] if sample[-1] == 1 else 0,) for sample in data)
    features = list(sample[:-1] for sample in data)
    print "Starting to train..."
    start = time()

    num_features = len(features[0])  # Subtract one because of target values
    nn = NeuralNet(num_features, max_epochs=2, default_bias="random",
                   learn_rate=.85, scale=0.1, verbose=True)
    nn.train(features, targets)
    print "Done with training. Took {0} seconds to train." \
            .format(round(time() - start, 2))

    print "Beginning with scoring..."
    start = time()
    scored_data = np.genfromtxt("data_features.csv", delimiter=",")
    correct = np.genfromtxt("data_targets.csv", delimiter=",")
    prediction = nn.score_data(scored_data)
    print "Done with scoring. Took {0} seconds to score the dataset" \
            .format(round(time() - start, 2))
    num_incorrect = sum(1 for i in xrange(len(correct)) \
                        if correct[i] != prediction[i])
    print "Total number incorrect: {0}".format(num_incorrect)
开发者ID:hlin117,项目名称:FF-Neural-Net,代码行数:33,代码来源:main.py

示例5: main

# 需要导入模块: from neuralnet import NeuralNet [as 别名]
# 或者: from neuralnet.NeuralNet import train [as 别名]
def main():
    scriptdir = os.path.dirname(os.path.realpath(__file__))
    data = scriptdir+'/../data/cwi_training/cwi_training.txt.lbl.conll'
    testdata = scriptdir+'/../data/cwi_testing/cwi_testing.gold.txt.lbl.conll'
    pickled_data = scriptdir+'/../data.pickle'
    parser = argparse.ArgumentParser()
    parser.add_argument('--threshold', '-t', type=float, help='Threshold for predicting 0/1. If not specified, the optimal threshold will first be computed as the median of all CV splits. May take a while.')
    parser.add_argument('--iterations', '-i', type=int, default=50, help='Training iterations.')
    parser.add_argument('--hidden-layers', '-l', dest='layers', required=True, type=int, nargs='+', help='List of layer sizes')
    parser.add_argument('--cv-splits', '-c', dest='splits', type=int, help='No. of crossvalidation splits. If not specified, no CV will be performed.')
    parser.add_argument('--data', '-d', default=data, help='Features and labels')
    parser.add_argument('--testdata', '-y', default=testdata,  help='Test data (not needed for crossval).')
    parser.add_argument('--verbose', '-v', dest='verbose', action='store_true', help='Print average loss at every training iteration.')
    parser.add_argument('--output', '-o', help="Output file")
    parser.add_argument('--features', '-f', dest='features', default=[], type=str, nargs='+', help='List of feature types')

    args = parser.parse_args()
    # X, y = load_pickled(args.data)
    combined_data = 'X_y_all.txt'
    cutoff = combine_data(args.data, args.testdata, combined_data)
    X, y, _ = feats_and_classify.collect_features(combined_data, True, args.features)
    X_tr = X[:cutoff]
    y_tr = y[:cutoff]
    X_te = X[cutoff:]
    y_te = y[cutoff:]
    conf = NeuralNetConfig(X=X, y=y, layers=args.layers, iterations=args.iterations, verbose=args.verbose)

    if args.splits:
        if args.threshold:
            crossval(X_tr,y_tr,args.splits, conf, t=args.threshold)
        else:
            # compute optimal threshold for each CV split
            print '### Computing optimal threshold... '
            ts = crossval(X_tr,y_tr,args.splits, conf)
            avg = np.average(ts)
            med = np.median(ts)
            print '\nThresholds for crossval splits:', ts
            print 'Mean threshold', avg
            print 'Median threshold', med
            print 'Threshold st.dev.', np.std(ts)
            # Run CV with fixed avg/median threshold
            print '\n\n### Running with avg. threshold... '
            crossval(X_tr,y_tr,args.splits, conf, t=avg)
            print '\n\n### Running with med. threshold... '
            crossval(X_tr,y_tr,args.splits, conf, t=med)
    else:
        
        nn = NN(conf)
        nn.train(X_tr,y_tr,args.iterations)
        if args.testdata:
            # X_test, y_test = load_pickled(args.testdata)
            pred = nn.get_output(X_te)
            if args.output:
                with open(args.output, 'w') as of:
                    for p in pred:
                        of.write('%f\n'%p)
            t, res = nn.test(X_te,y_te,args.threshold)
            resout = "G: %f, R: %f, A: %f, P: %f\n"%res
            sys.stderr.write('%s %f\n'%(' '.join(args.features), t))
            sys.stderr.write(resout)
开发者ID:jbingel,项目名称:cwi2016,代码行数:62,代码来源:nn-classify.py

示例6: crossval

# 需要导入模块: from neuralnet import NeuralNet [as 别名]
# 或者: from neuralnet.NeuralNet import train [as 别名]
def crossval(X,y,splits, conf, t=None):
    results = []
    ts = []
    m = len(X)
    cs = [(i*m/splits, (i+1)*len(X)/splits) for i in range(splits)]
    for s,e in cs:
        X_tr = [X[i] for i in range(m) if i < s or i >= e]
        X_te = [X[i] for i in range(m) if i >= s and i < e]
        y_tr = [y[i] for i in range(m) if i < s or i >= e]
        y_te = [y[i] for i in range(m) if i >= s and i < e]

    nn = NN(conf)
    nn.train(X_tr, y_tr, conf.iterations)
    best_t, res = nn.test(X_te, y_te, t)
    ts.append(best_t)
    results.append(res)

    f1s = [res[0] for res in results]
    rec = [res[1] for res in results]
    acc = [res[2] for res in results]
    pre = [res[3] for res in results]

    print '\nF1  | {:.3f}   (std {:.3f})'.format(np.average(f1s), np.std(f1s))
    print 'Rec | {:.3f}   (std {:.3f})'.format(np.average(rec), np.std(rec))
    print 'Acc | {:.3f}   (std {:.3f})'.format(np.average(acc), np.std(acc))
    print 'Pre | {:.3f}   (std {:.3f})'.format(np.average(pre), np.std(pre))

    return ts 
开发者ID:jbingel,项目名称:cwi2016,代码行数:30,代码来源:nn-classify.py

示例7: cross_validation_iterative

# 需要导入模块: from neuralnet import NeuralNet [as 别名]
# 或者: from neuralnet.NeuralNet import train [as 别名]
def cross_validation_iterative(folds, epochs, learn_rate, n, num_points):
    
    averages = []
    test_vals = []
    fold_results = {}
    timings = [0]*epochs

    for x in xrange(len(folds.keys())):
        fold_results[x] = {"train": [], "test": []}
        
        test_index = x%n
        test_set = folds[test_index]

        train_set = []
        for k,v in folds.items():
            if k != test_index: train_set += v
        
        nn = NeuralNet(9, [13,14], 1, learn_rate)
        
        start_t = time.time()
        for j in xrange(epochs):
            nn.train(train_set, None, 1)
        
            # get train and test accuracy
            train_val = nn.test(train_set, None, False)
            test_val = nn.test(test_set, None, False)
            
            # store the accuracy results
            fold_results[x]["train"].append(train_val)
            fold_results[x]["test"].append(test_val)
            timings[j] += time.time()-start_t
        print "fold complete"

    
    # compute the average for each epoch
    train_a, test_a = [], []
    for e in xrange(epochs):
        num_train, num_test = 0, 0
        for i in xrange(len(folds.keys())):
            num_train += fold_results[i]["train"][e]
            num_test += fold_results[i]["test"][e]
        train_a.append((float(num_train)/(num_points*(n-1)))*100)
        test_a.append((float(num_test)/num_points)*100)
    
    for e in xrange(epochs):
        timings[e] = float(timings[e])/len(folds.keys())
    
    print train_a, test_a, timings
    return train_a, test_a, timings
开发者ID:ACAHNN,项目名称:ann_project,代码行数:51,代码来源:ann_data.py

示例8: create_roc_data

# 需要导入模块: from neuralnet import NeuralNet [as 别名]
# 或者: from neuralnet.NeuralNet import train [as 别名]
def create_roc_data(data):
    
    epochs = 60
    nn = NeuralNet(9, [13,14], 1, .1)
    nn.train(data, None, epochs)
    ret = nn.test(data, None, False)

    results = []
    for row in ret:
        results.append((row[0][0][0],row[1][0][0],row[2][0][0]))

    print results[0]

    num_pos = len(filter(lambda x: x[1] == 1, results))
    num_neg = len(results)-num_pos

    results.sort(key=lambda x: x[-1])
    results.reverse()

    tp = 0
    fp = 0
    last_tp = 0

    roc_set = [[x[-2],x[-1]] for x in results]
    fpr_set = []
    tpr_set = []

    for i in range(1,len(roc_set)):
        if roc_set[i][1] != roc_set[i-1][1] and roc_set[i][0] != 1 and tp > last_tp:
            fpr = fp / float(num_neg)
            tpr = tp / float(num_pos)
            
            fpr_set.append(fpr)
            tpr_set.append(tpr)

            last_tp = tp
        if roc_set[i][0] == 1:
            tp += 1
        else:
            fp += 1

    fpr = fp / float(num_neg)
    tpr = tp / float(num_pos)

    fpr_set.append(fpr)
    tpr_set.append(tpr)

    return fpr_set, tpr_set
开发者ID:ACAHNN,项目名称:ann_project,代码行数:50,代码来源:ann_data.py

示例9: main

# 需要导入模块: from neuralnet import NeuralNet [as 别名]
# 或者: from neuralnet.NeuralNet import train [as 别名]
def main():
    args = get_args()
    # f1_matrix holds for every training annotator: the list of tuples of 
    # avg/med f1_row based on avg/med threshold
    f1_matrix = []
    # holds for every training annotator: the list of tuples of avg/med threshold
    t_matrix = []
    current_label_list = []
    
    f1_final = [] # holds 4-tuples of avgs over (f1_avg_avg, f1_avg_med, f1_med_avg, f1_med_med) f.e. tr 
    t_final  = [] # holds 4-tuples of (t_avg_avg, t_avg_med, t_med_avg, t_med_med) f.e. tr

    #X_tr, _, v = feats_and_classify_py2.collect_features(args.parsed_file)
    with open('X_train.pickle', 'rb') as pf:
        X_tr = pickle.load(pf)
    with open('X_test.pickle', 'rb') as pf:
        X_te = pickle.load(pf)
    y_tr = feats_and_classify_py2.collect_labels_positive_threshold(args.all_annotations_file, 1)

    #X_out, _, _ = feats_and_classify_py2.collect_features(args.predictfile)
    # filter for targets
    #X_out = [x for x in X_out if not x.label == '?']

    conf = NeuralNetConfig(X=X_tr, y=y_tr, layers=args.layers, iterations=args.iterations, verbose=args.verbose)
    
    nn = NN(conf)
    nn.train(X_tr, y_tr)
    if args.threshold:
        preds = nn.predict_for_threshold(X_te, args.threshold)
    else:
        preds = nn.get_output(X_te) 
    with open(args.output, 'w') as outfile:
        for p in preds:
            #print(p)
            outfile.write(str(p))
            outfile.write('\n')
    sys.exit(0)
开发者ID:jbingel,项目名称:cwi2016,代码行数:39,代码来源:nn-predict.py

示例10: NeuralNet

# 需要导入模块: from neuralnet import NeuralNet [as 别名]
# 或者: from neuralnet.NeuralNet import train [as 别名]

network = NeuralNet(num_dims, num_classes, hiddenLyr, hiddenLyrArgs, in_log_scale)

print("Training ▪▪▪")
for epoch in range(num_epochs):
    print('Epoch : ', epoch)
    for example in range(num_examples):
        x = data_x[example]
        y = data_y[example]

        if example < num_training_examples:
            if in_log_scale and len(y) < 2:
                continue

            cst, pred, aux = network.train(x, y)
            if (epoch % 12 == 0 and example < 3) or np.isinf(cst):
               print('\n▪▪▪▪▪▪▪▪▪▪▪▪▪▪ COST = {}  ▪▪▪▪▪▪▪▪▪▪▪▪▪▪ '.format(np.round(cst, 3)))
               diagnostix(y, x, pred, aux > 1e-20, 'Forward probabilities:')
            if np.isinf(cst):
                print('Cost Blew Up! Exiting ...')
                sys.exit()

        elif ((epoch >1 and epoch % 12 == 0) and example - num_training_examples < 3) \
                or epoch == num_epochs - 1:
            # Sample some images for testing
            pred, aux = network.test(x)
            aux = (aux + 1) / 2.0
            print('\n▪▪▪▪▪▪▪▪▪▪▪▪▪▪ TESTING ▪▪▪▪▪▪▪▪▪▪▪▪▪▪')
            diagnostix(y, x, pred, aux)
开发者ID:Neuroschemata,项目名称:Toy-RNN,代码行数:31,代码来源:train.py

示例11: extract_hist_features

# 需要导入模块: from neuralnet import NeuralNet [as 别名]
# 或者: from neuralnet.NeuralNet import train [as 别名]
from neuralnet import NeuralNet
from extractfeatures import *
import cv2

if __name__ == "__main__":
    red = cv2.imread('/home/hoshiro/Pictures/test-img/red-light.jpg', cv2.CV_LOAD_IMAGE_COLOR)
    yellow = cv2.imread('/home/hoshiro/Pictures/test-img/yellow-light.jpg', cv2.CV_LOAD_IMAGE_COLOR)
    green = cv2.imread('/home/hoshiro/Pictures/test-img/green-light.jpg', cv2.CV_LOAD_IMAGE_COLOR)

    features_red = extract_hist_features(red)
    features_yellow = extract_hist_features(yellow)
    features_green = extract_hist_features(green)

    neural_net = NeuralNet()
    neural_net.build(len(features_red), len(features_red) / 2, 1)
    neural_net.create_data_set()
    neural_net.add_list_of_data([features_red], 1)
    neural_net.add_list_of_data([features_yellow], 2)
    neural_net.add_list_of_data([features_green], 3)
    neural_net.train()

    print neural_net.apply_over_data(features_yellow)
开发者ID:dtbinh,项目名称:mo416-final-project,代码行数:24,代码来源:detect_example.py

示例12: import

# 需要导入模块: from neuralnet import NeuralNet [as 别名]
# 或者: from neuralnet.NeuralNet import train [as 别名]
from data_reader import DataReader
from neuralnet import (NeuralNet, RELU_FUN, SOFTMAX_FUN, SIGMOID_FUN)
import numpy as np

training = DataReader.GetImages('training-9k.txt', -1)
test = DataReader.GetImages('test-1k.txt', -1)

net = NeuralNet([196, 40, 10], [0], [2],
                [(None, None), ([0], RELU_FUN), ([1], SOFTMAX_FUN)], learning_rate=0.00001)
def mk_arr(n):
    ls = [0.0] * 10
    ls[n] = 1.0
    return ls

net.train([np.array(mk_arr(img.label)) for img in training],
          [[np.array(sum(img.pixels, []))] for img in training])
开发者ID:fding,项目名称:evilpoker,代码行数:18,代码来源:test_neuralnet.py


注:本文中的neuralnet.NeuralNet.train方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。