当前位置: 首页>>代码示例>>Python>>正文


Python Network.train方法代码示例

本文整理汇总了Python中Network.Network.train方法的典型用法代码示例。如果您正苦于以下问题:Python Network.train方法的具体用法?Python Network.train怎么用?Python Network.train使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Network.Network的用法示例。


在下文中一共展示了Network.train方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: mnistFullyConnectedClassif

# 需要导入模块: from Network import Network [as 别名]
# 或者: from Network.Network import train [as 别名]
def mnistFullyConnectedClassif(train=False):
    model = {
        'inputLayer' : FlatInputLayer(inputsize=784),
        'hiddenLayers' : [
            FullyConnectedLayer(inputsize=784,outputsize=1000,weightInitFunc=WeightInit.truncatedNormal,biasInitFunc=WeightInit.positive,activationFunc=tf.nn.tanh),
            FullyConnectedLayer(inputsize=1000,outputsize=200,weightInitFunc=WeightInit.truncatedNormal,biasInitFunc=WeightInit.positive,activationFunc=tf.nn.tanh)
            ],
        'outputLayer' : FullyConnectedLayer(inputsize=200,outputsize=10,weightInitFunc=WeightInit.truncatedNormal,biasInitFunc=WeightInit.positive,activationFunc=tf.nn.softmax)
    }

    batch_size = 50
    net = Network(model, objective='classification', batch_size=batch_size)
    net.setupTraining("cross-entropy", "Adam")

    init = tf.initialize_all_variables()
    mnist = MNISTData(train_dir='MNIST_data', one_hot=True)

    saver = tf.train.Saver()
    sess = tf.Session()

    with sess.as_default():
        assert tf.get_default_session() is sess
        sess.run(init)
        if train==False:
            saver.restore(sess, "/home/adrien/workspace/DeepNet/mnistFullyConnectedClassif.ckpt")
            print "Test accuracy %g"%net.evaluate(mnist.test['images'], mnist.test['labels'])
        else:
            for i in range(20000):
                batch_xs, batch_ys = mnist.next_batch(batch_size, set=mnist.train)
                
                if i%1000 == 0:
                    cost = net.cost.eval(feed_dict={net.x: batch_xs, net.target: batch_ys})
                    print "step %d, training cost %g"%(i,cost)
            
                net.train(batch_xs, batch_ys)

            print "Test accuracy %g"%net.evaluate(mnist.test['images'], mnist.test['labels'])
            save_path = saver.save(sess, "/home/adrien/workspace/DeepNet/mnistFullyConnectedClassif.ckpt")

        plt.figure(1)
        plt.gray()
        plt.imshow(mnist.test['images'][0].reshape((28,28)), interpolation='nearest')
        plt.title(str(np.argmax(net.predict([mnist.test['images'][0]]))))
        plt.show()
开发者ID:adfoucart,项目名称:deep-net-histology,代码行数:46,代码来源:mnistFullyConnectedClassif.py

示例2: distribute

# 需要导入模块: from Network import Network [as 别名]
# 或者: from Network.Network import train [as 别名]
def distribute(rate, sigmoid, hidden, examples, variables, layers, rule, dropout, table):
    example = 0
    not_learned = ""
    tables = monotone_generator(variables)
    print "Learning", tables[table-1],
    learned = False
    tries = 0
    while not learned and tries < 200000:
        tries += 1
        model = Network(rate, sigmoid, hidden, examples, variables, layers, rule, dropout)
        learned = model.train(tables[table-1])
    if learned:
        print "Learned with {0} models".format(tries)
        model.save("hebb{0}.txt".format(table-1))
    else:
        print "Not Learned"
    return
开发者ID:nceglia,项目名称:Hebbian,代码行数:19,代码来源:HebbianNetwork.py

示例3: run

# 需要导入模块: from Network import Network [as 别名]
# 或者: from Network.Network import train [as 别名]
def run(rate, sigmoid, hidden, examples, variables, layers, rule, dropout):
    """
    Creates network and trains a model for each boolean function

    Keyword arguments:
    rate -- learning rate (float)
    sigmoid -- sigmoid function for weights if rule is basic hebbian  (int)
    hidden -- number of hidden units, 0 removes hidden layer (int)
    examples -- number of random boolean examples to present.
    layers -- number of hidden layers 1 to N (int)
    rule -- learning rule, "hebbian" or "oja" (str)
    dropout -- percentage of edge weights to update
    prints each function, whether it was able to learn it, and a summary.
    """
    functions = []
    example = 0
    monotone_fxns = 0
    #tables = truth_tables(variables)
    tables = monotone_generator(variables)
    not_learned = ""
    for i in range(len(tables)):
        print "Learning", tables[i],
        example += 1
        learned = False
        tries = 0
        while not learned and tries < 200000:
            tries += 1
            model = Network(rate, sigmoid, hidden, examples, variables, layers, rule, dropout)
            learned = model.train(tables[i])
        if learned:
            print "Learned with {0} models".format(tries)
            functions.append(bit_repr(tables[i]))
            model.save("models/hebb{0}.txt".format(example))
            #model.test("models/hebb{0}.txt".format(example))
        else:
            not_learned = not_learned+str(i)+","
            print "Not Learned"
    return "Learned:", len(functions),"Not Learned", not_learned
开发者ID:nceglia,项目名称:Hebbian,代码行数:40,代码来源:HebbianNetwork.py

示例4: Network

# 需要导入模块: from Network import Network [as 别名]
# 或者: from Network.Network import train [as 别名]
from Network import Network, InputLayer, FullyConnectedLayer, SoftMaxLayer, sigmoid, log_likelihood
import data

import math
import numpy


mnist = data.load("mnist", flatvecs = True)
test_data = mnist["test"].values()
train_data = mnist["train"]

net = Network(
    layers = [InputLayer(784),
              FullyConnectedLayer(20, activation = sigmoid),
              SoftMaxLayer(10)],
    cost=log_likelihood)

for i in range(100):
    for m in range(100):
        net.train(*train_data.get_batch(30), learning_rate = 0.01 )
    print i, "Test Accuracy:", net.accuracy(*test_data)

开发者ID:vyraun,项目名称:NN-Group-1-Code,代码行数:23,代码来源:ex04-NeuralNet.py

示例5: mitos12ClassifierFromConvAutoEncoder3

# 需要导入模块: from Network import Network [as 别名]
# 或者: from Network.Network import train [as 别名]
def mitos12ClassifierFromConvAutoEncoder3(train=False, resumeTraining=False, iterations=20000):
    autoEncoderModel = {
        'inputLayer' : ImageInputLayer(width=64,height=64,channels=3),
        'hiddenLayers' : [
            ConvolutionLayer(kernelsize=5, channels=3, features=12, stride=2, weightInitFunc=WeightInit.truncatedNormal, biasInitFunc=WeightInit.positive, activationFunc=tf.nn.relu, inputshape=[64,64,3]),
            ConvolutionLayer(kernelsize=5, channels=12, features=48, stride=2,weightInitFunc=WeightInit.truncatedNormal, biasInitFunc=WeightInit.positive, activationFunc=tf.nn.relu, inputshape=[32,32,12]),
            ConvolutionLayer(kernelsize=5, channels=48, features=192, stride=2,weightInitFunc=WeightInit.truncatedNormal, biasInitFunc=WeightInit.positive, activationFunc=tf.nn.relu, inputshape=[16,16,48])
        ]
    }

    classifierModel = {
        'inputLayer': ImageInputLayer(width=8,height=8,channels=192),
        'hiddenLayers': [
            ImageToVectorLayer(imagesize=(8,8,192)),
            FullyConnectedLayer(inputsize=8*8*192,outputsize=200,weightInitFunc=WeightInit.truncatedNormal,biasInitFunc=WeightInit.positive,activationFunc=tf.nn.tanh),
            FullyConnectedLayer(inputsize=200,outputsize=100,weightInitFunc=WeightInit.truncatedNormal,biasInitFunc=WeightInit.positive,activationFunc=tf.nn.tanh)
            ],
        'outputLayer' : FullyConnectedLayer(inputsize=100,outputsize=2,weightInitFunc=WeightInit.truncatedNormal,biasInitFunc=WeightInit.positive,activationFunc=tf.nn.softmax)
    }

    batch_size = 50
    autoEncoder = Network(autoEncoderModel, objective='reconstruction', batch_size=batch_size)

    clf = Network(classifierModel, objective='classification', batch_size=batch_size)
    clf.setupTraining("cross-entropy", "Adam", a=0.995)

    init = tf.initialize_all_variables()
    basedir = "/media/sf_E_DRIVE/Dropbox/ULB/Doctorat/ImageSet/MITOS12/"
    mitos12 = MITOS12Data(train_dirs=[os.path.join(basedir,d) for d in ["A00_v2", "A01_v2", "A02_v2", "A03_v2", "A04_v2"]])

    aesaver = tf.train.Saver(autoEncoder.getVariables())
    clfsaver = tf.train.Saver(clf.getVariables())
    sess = tf.Session()

    with sess.as_default():
        assert tf.get_default_session() is sess
        sess.run(init)
        aesaver.restore(sess, "/home/adrien/workspace/DeepNet/mitos12ConvAutoEncoder3.ckpt")

        if train==False or resumeTraining==True:
            clfsaver.restore(sess, "/home/adrien/workspace/DeepNet/mitos12ClfFromConvAutoEncoder3.ckpt")
        if train==True:
            for i in range(iterations):
                batch = mitos12.next_supervised_batch(batch_size)
                input_images = [b[0] for b in batch]
                
                batch_xs = autoEncoder.encode(input_images)
                batch_ys = [b[1] for b in batch]

                if i%1000==0:
                    cost = clf.cost.eval(feed_dict={clf.x: batch_xs, clf.target: batch_ys})
                    loss = clf.loss.eval(feed_dict={clf.x: batch_xs, clf.target: batch_ys})
                    l2loss = clf.l2loss.eval()
                    print "step %d, training cost %g, loss %g, l2loss %g"%(i,cost,loss,l2loss)
                    # cost = clf.cost.eval(feed_dict={clf.x: batch_xs, clf.target: batch_ys})
                    # print "step %d, training cost %g"%(i,cost)
                    save_path = clfsaver.save(sess, "/home/adrien/workspace/DeepNet/mitos12ClfFromConvAutoEncoder3.ckpt")
            
                clf.train(batch_xs, batch_ys)

            save_path = clfsaver.save(sess, "/home/adrien/workspace/DeepNet/mitos12ClfFromConvAutoEncoder3.ckpt")

        # Eval :
        Cmat = np.zeros((2,2))
        for i in range(50):
            batch = mitos12.next_supervised_batch(batch_size)
            input_images = [b[0] for b in batch]
            
            batch_xs = autoEncoder.encode(input_images)
            batch_ys = [b[1] for b in batch]

            pred = clf.predict(batch_xs)
            Cmat += C(pred,batch_ys)

        print Cmat

        im_, path, basename = mitos12.images[18]
        im = np.array(im_)
        stride = 15
        rangex = np.arange(0,im.shape[0]-64,stride)
        rangey = np.arange(0,im.shape[1]-64,stride)
        ts = [(t/len(rangey), t%len(rangey)) for t in range(len(rangex)*len(rangey))]
        chunks = [im[tx*stride:tx*stride+64,ty*stride:ty*stride+64,:] for tx,ty in ts]
        chunksPos = [(tx*stride,ty*stride) for tx,ty in ts]
        pMitosis = np.zeros((im.shape[0], im.shape[1], 3))

        print len(chunks)        
        for t in range(len(chunks)/50):
            batch = chunks[t*50:t*50+50]
            batch_xs = autoEncoder.encode(batch)
            is_mitosis = clf.predict(batch_xs)
            for i,p in enumerate(is_mitosis):
                cp = chunksPos[t*50+i]
                pMitosis[cp[0]:cp[0]+64, cp[1]:cp[1]+64, 0] += p[0]
                pMitosis[cp[0]:cp[0]+64, cp[1]:cp[1]+64, 1] += p[1]
                pMitosis[cp[0]:cp[0]+64, cp[1]:cp[1]+64, 2] += 1

        plt.figure()
        plt.gray()
        plt.imshow(pMitosis[:,:,0], interpolation=None)
#.........这里部分代码省略.........
开发者ID:adfoucart,项目名称:deep-net-histology,代码行数:103,代码来源:mitos12ConvAutoEncoder3.py

示例6: mitos12ConvAutoEncoder3

# 需要导入模块: from Network import Network [as 别名]
# 或者: from Network.Network import train [as 别名]
def mitos12ConvAutoEncoder3(train=False, resumeTraining=False, iterations=20000):
    model = {
        'inputLayer' : ImageInputLayer(width=64,height=64,channels=3),
        'hiddenLayers' : [
            ConvolutionLayer(kernelsize=5, channels=3, features=12, stride=2, weightInitFunc=WeightInit.truncatedNormal, biasInitFunc=WeightInit.positive, activationFunc=tf.nn.relu, inputshape=[64,64,3]),
            ConvolutionLayer(kernelsize=5, channels=12, features=48, stride=2,weightInitFunc=WeightInit.truncatedNormal, biasInitFunc=WeightInit.positive, activationFunc=tf.nn.relu, inputshape=[32,32,12]),
            ConvolutionLayer(kernelsize=5, channels=48, features=192, stride=2,weightInitFunc=WeightInit.truncatedNormal, biasInitFunc=WeightInit.positive, activationFunc=tf.nn.relu, inputshape=[16,16,48])
        ]
    }

    batch_size = 50
    net = Network(model, objective='reconstruction', batch_size=batch_size)
    net.setupTraining("squared-diff", "Adam")

    init = tf.initialize_all_variables()
    basedir = "/media/sf_E_DRIVE/Dropbox/ULB/Doctorat/ImageSet/MITOS12/"
    mitos12 = MITOS12Data(train_dirs=[os.path.join(basedir,d) for d in ["A00_v2", "A01_v2", "A02_v2", "A03_v2", "A04_v2"]])

    saver = tf.train.Saver()
    sess = tf.Session()

    with sess.as_default():
        assert tf.get_default_session() is sess
        sess.run(init)
        if train==False or resumeTraining==True:
            saver.restore(sess, "/home/adrien/workspace/DeepNet/mitos12ConvAutoEncoder3.ckpt")
        if train==True:
            for i in range(iterations):
                batch_xs = mitos12.next_batch(batch_size)
                batch_ys = batch_xs
                
                if i%1000 == 0:
                    cost = net.cost.eval(feed_dict={net.x: batch_xs, net.target: batch_ys})
                    loss = net.loss.eval(feed_dict={net.x: batch_xs, net.target: batch_ys})
                    l2loss = net.l2loss.eval()
                    print "step %d, training cost %g, loss %g, l2loss %g"%(i,cost,loss,l2loss)
                    save_path = saver.save(sess, "/home/adrien/workspace/DeepNet/mitos12ConvAutoEncoder3.ckpt")
            
                net.train(batch_xs, batch_ys)

            save_path = saver.save(sess, "/home/adrien/workspace/DeepNet/mitos12ConvAutoEncoder3.ckpt")

        xs = mitos12.next_batch(batch_size)

        #es = net.encode(xs)
        
        # for idx in range(len(es)):
        #     max_act = 0
        #     arg_max_act = 0
        #     es0 = es[idx]
        #     for t in np.arange(0,192):
        #         if( es0[:,:,t].sum() > max_act ):
        #             max_act = es0[:,:,t].sum()
        #             arg_max_act = t
            
        #     print arg_max_act
        # return
        # featt = np.zeros(es0.shape)
        # featt[:,:,arg_max_act] = es0[:,:,arg_max_act]
        # rs = net.decode([featt])
        # print es0[:,:,arg_max_act]
        # plt.figure()
        # plt.gray()
        # plt.imshow(es0[:,:,arg_max_act], interpolation='nearest')
        # plt.figure()
        # plt.imshow(rs[0]/rs[0].max(), interpolation='nearest')
        # plt.show()
        # return

        # rs = net.predict(xs)
        # rs[0][rs[0]>1.] = 1.
        
        # plt.figure(1)
        # plt.subplot(2,3,1)
        # plt.title('Original')
        # plt.imshow(xs[0], interpolation='nearest')
        # plt.axis('off')
        # plt.subplot(2,3,2)
        # plt.title('Reconstruction')
        # plt.imshow(rs[0], interpolation='nearest')
        # plt.axis('off')
        # plt.gray()
        # plt.subplot(2,3,4)
        # plt.title('Diff - R')
        # plt.imshow(np.abs(rs[0]-xs[0])[:,:,0], interpolation='nearest')
        # plt.axis('off')
        # plt.subplot(2,3,5)
        # plt.title('Diff - G')
        # plt.imshow(np.abs(rs[0]-xs[0])[:,:,1], interpolation='nearest')
        # plt.axis('off')
        # plt.subplot(2,3,6)
        # plt.title('Diff - B')
        # plt.imshow(np.abs(rs[0]-xs[0])[:,:,2], interpolation='nearest')
        # plt.axis('off')
        # plt.show()

        W_1 = net.layers[1].W.eval()
        W_1n = (W_1-W_1.min())/(W_1.max()-W_1.min())
        plt.figure()
        for i in range(W_1.shape[3]):
#.........这里部分代码省略.........
开发者ID:adfoucart,项目名称:deep-net-histology,代码行数:103,代码来源:mitos12ConvAutoEncoder3.py

示例7: mitos12ClassifierFromConvAutoEncoder3

# 需要导入模块: from Network import Network [as 别名]
# 或者: from Network.Network import train [as 别名]
def mitos12ClassifierFromConvAutoEncoder3(train=False, resumeTraining=False, iterations=20000):
    autoEncoderModel = {
        'inputLayer' : ImageInputLayer(width=64,height=64,channels=3),
        'hiddenLayers' : [
            ConvolutionLayer(kernelsize=5, channels=3, features=12, stride=2, weightInitFunc=WeightInit.truncatedNormal, biasInitFunc=WeightInit.positive, activationFunc=tf.nn.relu, inputshape=[64,64,3]),
            ConvolutionLayer(kernelsize=5, channels=12, features=48, stride=2,weightInitFunc=WeightInit.truncatedNormal, biasInitFunc=WeightInit.positive, activationFunc=tf.nn.relu, inputshape=[32,32,12]),
            ConvolutionLayer(kernelsize=5, channels=48, features=192, stride=2,weightInitFunc=WeightInit.truncatedNormal, biasInitFunc=WeightInit.positive, activationFunc=tf.nn.relu, inputshape=[16,16,48])
        ]
    }

    classifierModel = {
        'inputLayer': ImageInputLayer(width=8,height=8,channels=192),
        'hiddenLayers': [
            ImageToVectorLayer(imagesize=(8,8,192)),
            FullyConnectedLayer(inputsize=8*8*192,outputsize=200,weightInitFunc=WeightInit.truncatedNormal,biasInitFunc=WeightInit.positive,activationFunc=tf.nn.tanh),
            FullyConnectedLayer(inputsize=200,outputsize=100,weightInitFunc=WeightInit.truncatedNormal,biasInitFunc=WeightInit.positive,activationFunc=tf.nn.tanh)
            ],
        'outputLayer' : FullyConnectedLayer(inputsize=100,outputsize=2,weightInitFunc=WeightInit.truncatedNormal,biasInitFunc=WeightInit.positive,activationFunc=tf.nn.softmax)
    }

    batch_size = 50
    autoEncoder = Network(autoEncoderModel, objective='reconstruction', batch_size=batch_size)

    clf = Network(classifierModel, objective='classification', batch_size=batch_size)
    clf.setupTraining("cross-entropy", "Adam")

    init = tf.initialize_all_variables()
    basedir = "/media/sf_E_DRIVE/Dropbox/ULB/Doctorat/ImageSet/MITOS12/"
    mitos12 = MITOS12Data(train_dirs=[os.path.join(basedir,d) for d in ["A00_v2", "A01_v2", "A02_v2", "A03_v2", "A04_v2"]])

    aesaver = tf.train.Saver(autoEncoder.getVariables())
    clfsaver = tf.train.Saver(clf.getVariables())
    sess = tf.Session()

    with sess.as_default():
        assert tf.get_default_session() is sess
        sess.run(init)
        aesaver.restore(sess, "/home/adrien/workspace/DeepNet/mitos12ConvAutoEncoder3.ckpt")

        if train==False or resumeTraining==True:
            clfsaver.restore(sess, "/home/adrien/workspace/DeepNet/mitos12ClfFromConvAutoEncoder3.ckpt")
        if train==True:
            for i in range(iterations):
                batch = mitos12.next_supervised_batch(batch_size)
                input_images = [b[0] for b in batch]
                
                batch_xs = autoEncoder.encode(input_images)
                batch_ys = [b[1] for b in batch]

                if i%1000==0:
                    cost = clf.cost.eval(feed_dict={clf.x: batch_xs, clf.target: batch_ys})
                    print "step %d, training cost %g"%(i,cost)
                    save_path = clfsaver.save(sess, "/home/adrien/workspace/DeepNet/mitos12ClfFromConvAutoEncoder3.ckpt")
            
                clf.train(batch_xs, batch_ys)

            save_path = clfsaver.save(sess, "/home/adrien/workspace/DeepNet/mitos12ClfFromConvAutoEncoder3.ckpt")

        # Eval :
        Cmat = np.zeros((2,2))
        for i in range(50):
            batch = mitos12.next_supervised_batch(batch_size)
            input_images = [b[0] for b in batch]
            
            batch_xs = autoEncoder.encode(input_images)
            batch_ys = [b[1] for b in batch]

            pred = clf.predict(batch_xs)
            Cmat += C(pred,batch_ys)

        print Cmat
开发者ID:niutyut,项目名称:deep-net-histology,代码行数:73,代码来源:mitos12ConvAutoEncoder3.py

示例8: mitos12ConvAutoEncoder4

# 需要导入模块: from Network import Network [as 别名]
# 或者: from Network.Network import train [as 别名]
def mitos12ConvAutoEncoder4(train=False, resumeTraining=False, iterations=20000):
    model = {
        'inputLayer' : ImageInputLayer(width=128,height=128,channels=3),
        'hiddenLayers' : [
            ConvolutionLayer(kernelsize=15, channels=3, features=12, stride=4, weightInitFunc=WeightInit.truncatedNormal, biasInitFunc=WeightInit.positive, activationFunc=tf.nn.relu, inputshape=[128,128,3]),
            ConvolutionLayer(kernelsize=7, channels=12, features=40, stride=2,weightInitFunc=WeightInit.truncatedNormal, biasInitFunc=WeightInit.positive, activationFunc=tf.nn.relu, inputshape=[32,32,12]),
            ConvolutionLayer(kernelsize=5, channels=40, features=80, stride=2,weightInitFunc=WeightInit.truncatedNormal, biasInitFunc=WeightInit.positive, activationFunc=tf.nn.relu, inputshape=[16,16,40])
        ]
    }

    batch_size = 50
    net = Network(model, objective='reconstruction', batch_size=batch_size)
    net.setupTraining("squared-diff", "Adam", a=0.998)
    autoEncoderName = "mitos12ConvAutoEncoder4WithDistortion"

    init = tf.initialize_all_variables()
    basedir = "/media/sf_E_DRIVE/Dropbox/ULB/Doctorat/ImageSet/MITOS12/"
    mitos12 = MITOS12Data(train_dirs=[os.path.join(basedir,d) for d in ["A00_v2", "A01_v2", "A02_v2", "A03_v2", "A04_v2"]],chunksize=(128,128))

    saver = tf.train.Saver()
    sess = tf.Session()

    with sess.as_default():
        assert tf.get_default_session() is sess
        sess.run(init)
        if train==False or resumeTraining==True:
            saver.restore(sess, "/home/adrien/workspace/DeepNet/%s.ckpt"%autoEncoderName)
        if train==True:
            for i in range(iterations):
                batch_xs = mitos12.next_batch(batch_size, noise=True, nc=0.02)
                batch_ys = batch_xs
                
                if i%1000 == 0:
                    cost = net.cost.eval(feed_dict={net.x: batch_xs, net.target: batch_ys})
                    loss = net.loss.eval(feed_dict={net.x: batch_xs, net.target: batch_ys})
                    l2loss = net.l2loss.eval()
                    print "step %d, training cost %g, loss %g, l2loss %g"%(i,cost,loss,l2loss)
                    save_path = saver.save(sess, "/home/adrien/workspace/DeepNet/%s.ckpt"%autoEncoderName)
                    with open("/home/adrien/workspace/DeepNet/%s_results.txt"%autoEncoderName, "a") as resFile:
                        resFile.write("step %d, training cost %g, loss %g, l2loss %g\n"%(i,cost,loss,l2loss))
            
                net.train(batch_xs, batch_ys)

            save_path = saver.save(sess, "/home/adrien/workspace/DeepNet/%s.ckpt"%autoEncoderName)

        xs = mitos12.next_batch(batch_size)
        cost = net.cost.eval(feed_dict={net.x: xs, net.target: xs})
        loss = net.loss.eval(feed_dict={net.x: xs, net.target: xs})
        l2loss = net.l2loss.eval()
        print "test cost %g, loss %g, l2loss %g"%(cost,loss,l2loss)

        es = net.encode(xs)

        # for idx in range(40):
        W = net.layers[1].W.eval()
        W = (W-W.min())/(W.max()-W.min())
        plt.figure()
        for idx in range(12):
            plt.subplot(4,3,idx+1)
            plt.axis('off')
            plt.imshow(W[:,:,:,idx], interpolation='nearest', vmin=-1, vmax=1)
        # plt.show()
        # return
        
        # for idx in range(len(es)):
        #     max_act = 0
        #     arg_max_act = 0
        #     es0 = es[idx]
        #     for t in np.arange(0,160):
        #         if( es0[:,:,t].sum() > max_act ):
        #             max_act = es0[:,:,t].sum()
        #             arg_max_act = t
            
        #     print arg_max_act
        # return
        # featt = np.zeros(es0.shape)
        # featt[:,:,arg_max_act] = es0[:,:,arg_max_act]
        # rs = net.decode([featt])
        # print es0[:,:,arg_max_act]
        # plt.figure()
        # plt.gray()
        # plt.imshow(es0[:,:,arg_max_act], interpolation='nearest')
        # plt.figure()
        # plt.imshow(rs[0]/rs[0].max(), interpolation='nearest')
        # plt.show()
        # return

        rs = net.predict(xs)
        rs[0][rs[0]>1.] = 1.
        
        plt.figure()
        plt.subplot(2,3,1)
        plt.title('Original')
        plt.imshow(xs[0], interpolation='nearest')
        plt.axis('off')
        plt.subplot(2,3,2)
        plt.title('Reconstruction')
        plt.imshow(rs[0], interpolation='nearest')
        plt.axis('off')
        plt.gray()
#.........这里部分代码省略.........
开发者ID:adfoucart,项目名称:deep-net-histology,代码行数:103,代码来源:mitos12ConvAutoEncoder4.py

示例9: mitos12FullyConnectedAE

# 需要导入模块: from Network import Network [as 别名]
# 或者: from Network.Network import train [as 别名]
def mitos12FullyConnectedAE(train=False, resumeTraining=False, iterations=20000):
    model = {
        'inputLayer' : FlatInputLayer(inputsize=64*64*3),
        'hiddenLayers' : [
            FullyConnectedLayer(inputsize=64*64*3,outputsize=64*64,weightInitFunc=WeightInit.truncatedNormal,biasInitFunc=WeightInit.positive,activationFunc=tf.nn.tanh),
            FullyConnectedLayer(inputsize=64*64,outputsize=32*32,weightInitFunc=WeightInit.truncatedNormal,biasInitFunc=WeightInit.positive,activationFunc=tf.nn.tanh),
            FullyConnectedLayer(inputsize=32*32,outputsize=16*16,weightInitFunc=WeightInit.truncatedNormal,biasInitFunc=WeightInit.positive,activationFunc=tf.nn.tanh),
        ]
    }
    savedModelPath = "/home/adrien/workspace/DeepNet/mitos12FullyConnectedAE.ckpt"

    batch_size = 50
    net = Network(model, objective='reconstruction', batch_size=batch_size)
    net.setupTraining("squared-diff", "Adam", True)

    init = tf.initialize_all_variables()
    mitos12 = MITOS12Data(train_dirs=["/media/sf_VirtualDropbox"])

    saver = tf.train.Saver()
    sess = tf.Session()

    with sess.as_default():
        assert tf.get_default_session() is sess
        sess.run(init)
        if train==False or resumeTraining==True:
            print "Restoring from "+savedModelPath
            saver.restore(sess, savedModelPath)
        if train==True:
            for i in range(iterations):
                batch_xs = mitos12.next_batch(batch_size, flat=True)
                batch_ys = batch_xs
                
                if i%1000 == 0:
                    cost = net.cost.eval(feed_dict={net.x: batch_xs, net.target: batch_ys})
                    print "step %d, training cost %g"%(i,cost)
                    save_path = saver.save(sess,savedModelPath)
            
                net.train(batch_xs, batch_ys)

            save_path = saver.save(sess,savedModelPath)


        im = np.array(mitos12.getRandomImage())
        print im.shape
        cols = im.shape[0]/64
        rows = im.shape[1]/64
        xs_1 = [im[i*64:i*64+64,j*64:j*64+64,:].flatten() for i in xrange(cols) for j in xrange(rows)]
        xs_2 = [im[32+i*64:32+i*64+64,32+j*64:32+j*64+64,:].flatten() for i in xrange(cols) for j in xrange(rows)]
        rs_1 = net.predict(xs_1)
        rs_2 = net.predict(xs_2)
        rim = np.zeros(im.shape)
        im_k = np.zeros((im.shape[0],im.shape[1]))
        for k,r in enumerate(rs_1):
            i = (k/cols)*64
            j = (k%cols)*64
            rim[i:i+64,j:j+64,:] += r.reshape((64,64,3))
            im_k[i:i+64,j:j+64] += 1
        for k,r in enumerate(rs_2):
            i = (k/cols)*64
            j = (k%cols)*64
            rim[32+i:32+i+64,32+j:32+j+64,:] += r.reshape((64,64,3))
            im_k[32+i:32+i+64,32+j:32+j+64] += 1

        rim[im_k>0,0] /= im_k[im_k>0]
        rim[im_k>0,1] /= im_k[im_k>0]
        rim[im_k>0,2] /= im_k[im_k>0]
        rim[rim>1.] = 1.
        rim[rim<0.] = 0.
        plt.figure(0)
        plt.subplot(1,2,1)
        plt.imshow(im, interpolation='nearest')
        plt.axis('off')
        plt.subplot(1,2,2)
        plt.imshow(rim, interpolation='nearest')
        plt.axis('off')
        plt.show()
开发者ID:adfoucart,项目名称:deep-net-histology,代码行数:78,代码来源:mitos12FullyConnectedAE.py


注:本文中的Network.Network.train方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。