当前位置: 首页>>代码示例>>Python>>正文


Python chainer.FunctionSet类代码示例

本文整理汇总了Python中chainer.FunctionSet的典型用法代码示例。如果您正苦于以下问题:Python FunctionSet类的具体用法?Python FunctionSet怎么用?Python FunctionSet使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了FunctionSet类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

def main():
    if P.use_mean_var:
        conv6_output = 126
    else:
        conv6_output = 128

    if P.model_name is None:
        model = FunctionSet(
            conv1 = F.Convolution2D( 1, 128, 3, stride=1),
            conv2 = F.Convolution2D(128, 128, 3, stride=1),
            conv3 = F.Convolution2D(128, 128, 3, stride=1),
            conv4 = F.Convolution2D(128, 128, 3, stride=1),
            conv5 = F.Convolution2D(128, 128, 3, stride=1),
            conv6 = F.Convolution2D(128, conv6_output, 3, stride=1),
            conv7 = F.Convolution2D(128, 128, 1, stride=1),
            conv8 = F.Convolution2D(128, 1, 1, stride=1)
            )
        if P.gpu >= 0:
            cuda.init(P.gpu)
            model.to_gpu()
    else:
        if P.gpu >= 0:
            cuda.init(P.gpu)
        model = pickle.load(open(os.path.join(P.model_dir, P.model_name), 'rb'))

    optimizer = optimizers.MomentumSGD(lr=P.lr, momentum=P.momentum)
    optimizer.setup(model.collect_parameters())

    train(model, optimizer)
    return
开发者ID:ymgcmstk,项目名称:denoising_dirty_documents,代码行数:30,代码来源:train_model.py

示例2: TestFunctionSet

class TestFunctionSet(TestCase):
    def setUp(self):
        self.fs = FunctionSet(
            a = Linear(3, 2),
            b = Linear(3, 2)
        )

    def test_get_sorted_funcs(self):
        self.assertItemsEqual([k for (k, v) in self.fs._get_sorted_funcs()], ('a', 'b'))

    def check_equal_fs(self, fs1, fs2):
        self.assertTrue((fs1.a.W == fs2.a.W).all())
        self.assertTrue((fs1.a.b == fs2.a.b).all())
        self.assertTrue((fs1.b.W == fs2.b.W).all())
        self.assertTrue((fs1.b.b == fs2.b.b).all())

    def test_pickle_cpu(self):
        s   = pickle.dumps(self.fs)
        fs2 = pickle.loads(s)
        self.check_equal_fs(self.fs, fs2)

    @attr.gpu
    def test_pickle_gpu(self):
        self.fs.to_gpu()
        s   = pickle.dumps(self.fs)
        fs2 = pickle.loads(s)

        self.fs.to_cpu()
        fs2.to_cpu()
        self.check_equal_fs(self.fs, fs2)
开发者ID:Hiroshi123,项目名称:chainer,代码行数:30,代码来源:test_function_set.py

示例3: CNN3_Model

class CNN3_Model(ModelBase):
    u"""see: http://aidiary.hatenablog.com/entry/20151007/1444223445"""
    def __init__(self, input_size=32):
        super(CNN3_Model, self).__init__()
        # F.Convolution2D(in_channel, out_channel, filter_size)
        self.model = FunctionSet(  # 1*32*32 -(conv)-> 20*28*28 -(pool)-> 20*14*14
                                 conv1=F.Convolution2D(1, 20, 5),
                                   # 20*14*14 -(conv)-> 50*10*10 -(pool)-> 50*5*5=1250
                                 conv2=F.Convolution2D(20, 50, 5),
                                 l1=F.Linear(1250, 300),
                                 l2=F.Linear(300, 2))

    def forward(self, x_data, y_data, train=True):
        u"""return loss, accuracy"""
        x, t = Variable(x_data), Variable(y_data)
        h1 = F.max_pooling_2d(F.relu(self.model.conv1(x)), 2)
        h2 = F.max_pooling_2d(F.relu(self.model.conv2(h1)), 2)
        h3 = F.dropout(F.relu(self.model.l1(h2)), train=train)
        y  = self.model.l2(h3)
        # 多クラス分類なので誤差関数としてソフトマックス関数の
        # 交差エントロピー関数を用いて、誤差を導出。最低でもlossは必要
        return {
                "loss": F.softmax_cross_entropy(y, t),
                "accuracy": F.accuracy(y, t)
                }
开发者ID:ykicisk,项目名称:DeepLearningTrial,代码行数:25,代码来源:ChainerPredictor.py

示例4: main

def main(log_file, h_sizes, improve_loss_min=0.001):
    x_train, y_train, x_test, y_test = generate_cases(log_file)

    in_size = LINE_MAX_CHAR
    out_size = 2
    layers = [in_size] + h_sizes + [out_size]
    model = FunctionSet()
    for li in range(1, len(layers)):
        setattr(model, "l%d" % li, F.Linear(layers[li-1], layers[li]))

    optimizer = optimizers.SGD()
    optimizer.setup(model.collect_parameters())
    last_loss = None
    for epoch in range(3000000):
        optimizer.zero_grads()
        loss, accuracy = forward(model, x_train, y_train)
        loss.backward()

        if epoch % 100 == 0:
            print "epoch: %s, loss: %s, accuracy: %s" % (epoch, loss.data, accuracy.data)
            if last_loss is not None and last_loss - improve_loss_min < loss.data:
                print "Finish Training"
                break
            last_loss = loss.data

        optimizer.update()
        if epoch % 1000 == 0:
            loss, accuracy = forward(model, x_test, y_test)
            print "epoch: %s, Try Test Result: loss: %s, accuracy: %s" % (epoch, loss.data, accuracy.data)

    # result
    loss, accuracy = forward(model, x_test, y_test)
    print "epoch: %s, Test Result: loss: %s, accuracy: %s" % (epoch, loss.data, accuracy.data)
    return epoch, accuracy.data
开发者ID:hitoyozake,项目名称:PlayingChainer,代码行数:34,代码来源:find_error_from_log.py

示例5: __init__

class AutoEncoder:
    """
    Constract AutoEncoder by #input and #hidden
    """
    def __init__(self, xn, hn):
        self.model = FunctionSet(encode=F.Linear(xn, hn), decode=F.Linear(hn, xn))

    def encode(self, x, train=True):
        h = F.dropout(F.relu(self.model.encode(x)), train=train)
        return h

    def decode(self, h, train=True):
        y = F.dropout(F.relu(self.model.decode(h)), train=train)
        return y

    def train_once(self, x_data):
        x = Variable(x_data)
        h = self.encode(x)
        y = self.decode(h)
        return F.mean_squared_error(x, y)#, F.accuracy(x, y)

    def reconstract(self, x_data):
        x = Variable(x_data)
        h = self.encode(x, train=False)
        y = self.decode(h, train=False)
        return y.data
开发者ID:kubyaddi,项目名称:chainer_practice,代码行数:26,代码来源:auto_encoder.py

示例6: TestFunctionSet

class TestFunctionSet(TestCase):
    def setUp(self):
        self.fs = FunctionSet(
            a = Linear(3, 2),
            b = Linear(3, 2)
        )

    def check_equal_fs(self, fs1, fs2):
        self.assertTrue((fs1.a.W == fs2.a.W).all())
        self.assertTrue((fs1.a.b == fs2.a.b).all())
        self.assertTrue((fs1.b.W == fs2.b.W).all())
        self.assertTrue((fs1.b.b == fs2.b.b).all())

    def test_pickle_cpu(self):
        s   = pickle.dumps(self.fs)
        fs2 = pickle.loads(s)
        self.check_equal_fs(self.fs, fs2)

    def test_pickle_gpu(self):
        self.fs.to_gpu()
        s   = pickle.dumps(self.fs)
        fs2 = pickle.loads(s)

        self.fs.to_cpu()
        fs2.to_cpu()
        self.check_equal_fs(self.fs, fs2)
开发者ID:ALEXGUOQ,项目名称:chainer,代码行数:26,代码来源:test_function_set.py

示例7: main

def main(args):
    def forward(x_data, y_data):
        x = Variable(x_data)
        t = Variable(y_data)
        h1 = F.relu(model.l1(x))  # activation function
        h2 = F.relu(model.l2(h1)) # ReLU does not have parameters to optimize
        y = model.l3(h2)
        # the loss function of softmax regression
        return F.softmax_cross_entropy(y, t), F.accuracy(y, t)  # current accuracy

    def evaluate():
        sum_loss, sum_accuracy = 0, 0
        for i in xrange(0, 10000, batchsize):
            x_batch = x_test[i:i+batchsize]
            y_batch = y_test[i:i+batchsize]
            loss, accuracy = forward(x_batch, y_batch)
            sum_loss += loss.data * batchsize
            sum_accuracy += accuracy.data * batchsize
        mean_loss = sum_loss / 10000
        mean_accuracy = sum_accuracy / 10000
        print(mean_loss[0], mean_accuracy)
        return

    global debug, verbose
    debug = args.debug
    if debug == True:
        verbose = True
    else:
        verbose = args.verbose

    mnist = fetch_mldata('MNIST original')
    x_all = mnist.data.astype(np.float32) / 255  # Scaling features to [0, 1]
    y_all = mnist.target.astype(np.int32)
    x_train, x_test = np.split(x_all, [60000])   # 60000 for training, 10000 for test
    y_train, y_test = np.split(y_all, [60000])

    # Simple three layer rectfier network
    model = FunctionSet(
        l1 = F.Linear(784, 100),  # 784 pixels -> 100 units
        l2 = F.Linear(100, 100),  # 100 units -> 100 units
        l3 = F.Linear(100, 10),   # 100 units -> 10 digits
    )
    optimizer = optimizers.SGD()
    optimizer.setup(model.collect_parameters())

    batchsize = 100
    for epoch in xrange(20):
        if verbose: logger.info('epoch: {}'.format(epoch))
        indexes = np.random.permutation(60000)
        for i in xrange(0, 60000, batchsize):
            x_batch = x_train[indexes[i:i+batchsize]]
            y_batch = y_train[indexes[i:i+batchsize]]

            optimizer.zero_grads()  # Initialize gradient arrays
            loss, accuracy = forward(x_batch, y_batch)  # loss function
            loss.backward()  # Backpropagation
            optimizer.update()
        evaluate()

    return 0
开发者ID:notani,项目名称:practice,代码行数:60,代码来源:t01.py

示例8: __init__

class Model1:

    def __init__(self, model):
        if isinstance(model, tuple):
            input_dims, n_units, output_dims = model
            self.model = FunctionSet(l1=F.Linear(input_dims, n_units),
                                     l2=F.Linear(n_units, n_units),
                                     l3=F.Linear(n_units, output_dims))
        else:
            self.model = model

    def __call__(self):
        return self.model

    # Neural net architecture
    # ニューラルネットの構造
    def forward(self, x_data, y_data, train=True):
        x = Variable(x_data)
        if not y_data is None: t = Variable(y_data)
        h1 = F.dropout(F.relu(self.model.l1(x)),  train=train)
        h2 = F.dropout(F.relu(self.model.l2(h1)), train=train)
        y  = self.model.l3(h2)
        if not y_data is None:
            # 多クラス分類なので誤差関数としてソフトマックス関数の
            # 交差エントロピー関数を用いて、誤差を導出
            return F.softmax_cross_entropy(y, t), F.accuracy(y, t), y
        else:
            return y

    def evaluate(self, x_data):
        return self.forward(x_data, None, train=False)
开发者ID:kotarot,项目名称:nl-solver,代码行数:31,代码来源:nnmodel.py

示例9: __init__

	def __init__(self, in_channels=1, n_hidden=100, n_outputs=10):
		FunctionSet.__init__(
			self,
			conv1=F.Convolution2D(in_channels, 32, 5),
			conv2=F.Convolution2D(32, 32, 5),
			l3=F.Linear(288, n_hidden),
			l4=F.Linear(n_hidden, n_outputs)
		)
开发者ID:medinfo2,项目名称:deeplearning,代码行数:8,代码来源:cnn.py

示例10: setup_model

def setup_model(n_dimention, n_units):

    model = FunctionSet(l1=F.Linear(n_dimention, n_units),
                        l2=F.Linear(n_units, n_dimention))
    # Setup optimizer
    optimizer = optimizers.Adam()
    optimizer.setup(model.collect_parameters())

    return model, optimizer
开发者ID:nalvdao,项目名称:let_s_analyze_miss_collection_girls,代码行数:9,代码来源:sigmoid_2layer.py

示例11: __init__

class FTCS_Y:
    def __init__(self):
        self.model0 = FunctionSet(l=F.Convolution2D(1, 1, 3, pad=1, nobias=True))
        self.model0.l.W[0,0,:,:] = np.array([[0,-1,0],[0,0,0],[0,1,0]]).astype(np.float32)/2
        self.model0.to_gpu()

    def forward(self, x_data):
        y0 = self.model0.l(x_data)
        return y0
开发者ID:chenaoki,项目名称:chainer-fluid,代码行数:9,代码来源:fluid.py

示例12: ConvolutionalDenoisingAutoencoder

class ConvolutionalDenoisingAutoencoder():
    def __init__(self, imgsize, n_in_channels, n_out_channels, ksize, stride=1, pad=0, use_cuda=False):
        self.model = FunctionSet(
            encode=F.Convolution2D(n_in_channels, n_out_channels, ksize, stride, pad),
            decode=F.Linear(n_out_channels*(math.floor((imgsize+2*pad-ksize)/stride)+1)**2, n_in_channels*imgsize**2)
        )
        self.use_cuda = use_cuda

        if self.use_cuda:
            self.model.to_gpu()

        self.optimizer = optimizers.Adam()
        self.optimizer.setup(self.model.collect_parameters())

    def encode(self, x_var):
        return F.sigmoid(self.model.encode(x_var))

    def decode(self, x_var):
        return self.model.decode(x_var)

    def predict(self, x_data):
        if self.use_cuda:
            x_data = cuda.to_gpu(x_data)
        x = Variable(x_data)
        p = self.encode(x)
        if self.use_cuda:
            return cuda.to_cpu(p.data)
        else:
            return p.data

    def cost(self, x_data):
        x = Variable(x_data)
        t = Variable(x_data.reshape(x_data.shape[0], x_data.shape[1]*x_data.shape[2]*x_data.shape[3]))
        h = F.dropout(x)
        h = self.encode(h)
        y = self.decode(h)
        return F.mean_squared_error(y, t)

    def train(self, x_data):
        if self.use_cuda:
            x_data = cuda.to_gpu(x_data)
        self.optimizer.zero_grads()
        loss = self.cost(x_data)
        loss.backward()
        self.optimizer.update()
        if self.use_cuda:
            return float(cuda.to_cpu(loss.data))
        else:
            return loss.data

    def test(self, x_data):
        if self.use_cuda:
            x_data = cuda.to_gpu(x_data)
        loss = self.cost(x_data)
        return float(cuda.to_cpu(loss.data))
开发者ID:TakuTsuzuki,项目名称:Hackathon2015,代码行数:55,代码来源:CdA.py

示例13: DenoisingAutoencoder

class DenoisingAutoencoder(SuperClass):
    def __init__(self, n_in, n_hidden, n_epoch=20, batchsize=100, use_cuda=False):
        super().__init__(n_epoch, batchsize, use_cuda)
        
        self.model = FunctionSet(
            encode=F.Linear(n_in, n_hidden),
            decode=F.Linear(n_hidden, n_in)
        )
        self.registModel()

    def encode(self, x_var):
        return F.sigmoid(self.model.encode(x_var))

    def decode(self, x_var):
        return self.model.decode(x_var)

    def predict(self, x_data):
        x_data = self.procInput(x_data)
        x = Variable(x_data)
        p = self.encode(x)
        return self.procOutput(p.data)

    def cost(self, x_data):
        x_data = self.procInput(x_data)
        x = Variable(x_data)
        t = Variable(x_data)
        h = self.encode(F.dropout(t))
        y = self.decode(h)
        return self.procOutput(F.mean_squared_error(y, x))

    def test(self, x_data):
        x_data = self.procInput(x_data)
        x = Variable(x_data)
        t = Variable(x_data)
        h = self.encode(t)
        y = self.decode(h)
        return self.procOutput(F.mean_squared_error(y, x))

    def save(self, filedir, n_hidden, n_epoch, batchsize):
        name = "SdA_"+ "layer"+str(n_hidden) + "_epoch"+str(n_epoch)
        param = {}
        param['W'] = self.model.encode.parameters[0]
        param['b'] = self.model.encode.parameters[1]
        pickle.dump(param, open(filedir+'/'+name+'.pkl', 'wb'), pickle.HIGHEST_PROTOCOL)
        return
    
    def load(self, filename):
        if filename.find('.pkl')==-1:
            filename = filename + '.pkl'
        param = pickle.load(open(filename, 'rb'))
        self.model.encode.parameters = (param['W'], param['b'])
        return
开发者ID:TakuTsuzuki,项目名称:Hackathon2015,代码行数:52,代码来源:dA.py

示例14: getResult

    def getResult(self, data, batch_size=100):
        """
        入力データをネットワークに与え、結果を取得する。
        batch_size は一度にネットワークに投げるデータ数。マシン性能により調整。
        """
        self.logger.info("Get result start.")

        ### Model 設定
        model = FunctionSet()
        for num, f_layer in enumerate(self.f_layers, 1):
            name = "l_f{0}".format(num)
            model.__setattr__(name, f_layer)

        if self.use_gpu:
            model = model.to_gpu()
        self.optimizer.setup(model)

        ### forward 処理設定
        def forward(x_data):
            x = Variable(x_data)
            t = Variable(x_data)

            h = x
            for num in xrange(1, len(self.f_layers)):
                h = self.activation(model.__getitem__("l_f{0}".format(num))(h))
            y = model.__getitem__("l_f{0}".format(num + 1))(h)

            return y.data

        ### 結果取得
        test_data = data
        test_size = len(test_data)
        batch_max = int(math.ceil(test_size / float(batch_size)))

        y_data = np.zeros((test_size, self.layer_sizes[len(self.layer_sizes) - 1]), dtype=test_data.dtype)
        for i in xrange(batch_max):
            start = i * batch_size
            end = (i + 1) * batch_size
            x_batch = test_data[start:end]
            self.logger.debug("Index {0} => {1}, data count = {2}".format(start, end, len(x_batch)))

            if self.use_gpu:
                x_batch = cuda.to_gpu(x_batch)
            y_batch = forward(x_batch)
            if self.use_gpu:
                y_batch = cuda.to_cpu(y_batch)

            y_data[start:end] = y_batch

        self.logger.info("Complete get result.")
        return y_data
开发者ID:ISP-Kazuki-Nagasawa,项目名称:chainer_sda_mnist_sample,代码行数:51,代码来源:stacked_auto_encoder.py

示例15: __init__

class DenoisingAutoencoder:
    def __init__(
        self,
        n_input,
        n_hidden,
        tied=True,
        noise=None,
        ratio=None,
        optimizer=optimizers.Adam(),
        loss_function=F.sigmoid_cross_entropy,
        activation_function=F.sigmoid,
    ):
        self.model = FunctionSet(encoder=F.Linear(n_input, n_hidden), decoder=F.Linear(n_hidden, n_input))
        if tied:
            self.model.decoder.W = self.model.encoder.W.T
        self.noise = noise
        self.ratio = ratio
        self.optimizer = optimizer
        self.optimizer.setup(self.model.collect_parameters())
        self.loss_function = loss_function
        self.activation_function = activation_function

    def train(self, x_data):
        self.optimizer.zero_grads()
        loss = self.autoencode(x_data, train=True)
        loss.backward()
        self.optimizer.update()
        return loss

    def test(self, x_data):
        return self.autoencode(x_data, train=False)

    def autoencode(self, x_data, train=True):
        x = Variable(x_data)
        if self.noise and train:
            nx = Variable(self.noise.noise(x_data))
        else:
            nx = Variable(x_data)
        if self.ratio:
            h = F.dropout(self.encode(nx), ratio=self.ratio, train=train)
        else:
            h = self.encode(nx)
        y = self.decode(h)
        return self.loss_function(y, x)

    def encode(self, x):
        return self.activation_function(self.model.encoder(x))

    def decode(self, x):
        return self.activation_function(self.model.decoder(x))
开发者ID:junion-org,项目名称:chainer_example,代码行数:50,代码来源:denoising_autoencoder.py


注:本文中的chainer.FunctionSet类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。