当前位置: 首页>>代码示例>>Python>>正文


Python chainer.Variable类代码示例

本文整理汇总了Python中chainer.Variable的典型用法代码示例。如果您正苦于以下问题:Python Variable类的具体用法?Python Variable怎么用?Python Variable使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了Variable类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: transform

    def transform(self, data, test=False):
        #make sure that data has the right shape.
        if not type(data) == Variable:
            if len(data.shape) < 4:
                data = data[np.newaxis]
            if len(data.shape) != 4:
                raise TypeError("Invalid dimensions for image data. Dim = %s.                     Must be 4d array." % str(data.shape))
            if data.shape[1] != self.color_channels:
                if data.shape[-1] == self.color_channels:
                    data = data.transpose(0, 3, 1, 2)
                else:
                    raise TypeError("Invalid dimensions for image data. Dim = %s"
                                    % str(data.shape))
            data = Variable(data)
        else:
            if len(data.data.shape) < 4:
                data.data = data.data[np.newaxis]
            if len(data.data.shape) != 4:
                raise TypeError("Invalid dimensions for image data. Dim = %s.                     Must be 4d array." % str(data.data.shape))
            if data.data.shape[1] != self.color_channels:
                if data.data.shape[-1] == self.color_channels:
                    data.data = data.data.transpose(0, 3, 1, 2)
                else:
                    raise TypeError("Invalid dimensions for image data. Dim = %s"
                                    % str(data.shape))

        # Actual transformation.
        if self.flag_gpu:
            data.to_gpu()
        z = self._encode(data, test=test)[0]

        z.to_cpu()

        return z.data
开发者ID:tok41,项目名称:chainer-samples,代码行数:34,代码来源:test_vaegan-Copy1.py

示例2: sample_z_from_n_2d_gaussian_mixture

def sample_z_from_n_2d_gaussian_mixture(batchsize, z_dim, label_indices, n_labels, gpu=False):
	if z_dim % 2 != 0:
		raise Exception("z_dim must be a multiple of 2.")

	def sample(x, y, label, n_labels):
		shift = 1.4
		r = 2.0 * np.pi / float(n_labels) * float(label)
		new_x = x * cos(r) - y * sin(r)
		new_y = x * sin(r) + y * cos(r)
		new_x += shift * cos(r)
		new_y += shift * sin(r)
		return np.array([new_x, new_y]).reshape((2,))

	x_var = 0.5
	y_var = 0.05
	x = np.random.normal(0, x_var, (batchsize, z_dim / 2))
	y = np.random.normal(0, y_var, (batchsize, z_dim / 2))
	z = np.empty((batchsize, z_dim), dtype=np.float32)
	for batch in xrange(batchsize):
		for zi in xrange(z_dim / 2):
			z[batch, zi*2:zi*2+2] = sample(x[batch, zi], y[batch, zi], label_indices[batch], n_labels)

	z = Variable(z)
	if gpu:
		z.to_gpu()
	return z
开发者ID:smajida,项目名称:adversarial-autoencoder,代码行数:26,代码来源:util.py

示例3: forward_eye_states

    def forward_eye_states(self, x_batch_curr, y_batch_curr, volatile):

        current_sample = Variable(x_batch_curr, volatile=volatile)

        y_batch_curr = np.asarray(y_batch_curr).reshape(32, -1)
        current_output = Variable(y_batch_curr, volatile=volatile)

        h1_current = F.sigmoid(self.model_to_use.x_h1(current_sample))

        h2_current = F.sigmoid(self.model_to_use.h1_h2(h1_current))

        h3_current = F.sigmoid(self.model_to_use.h2_h3(h2_current))

        h4_current = F.sigmoid(self.model_to_use.h3_h4(h3_current))

        h4 = h4_current
        y = self.model_to_use.h4_y(h4)

        y.data = y.data.reshape(32, -1)
        loss = F.sigmoid_cross_entropy(y, current_output)
        current_output.data = np.squeeze(current_output.data)

        accuracy = F.accuracy(y, current_output)

        return accuracy, loss, y
开发者ID:sweetrabh,项目名称:testeeg,代码行数:25,代码来源:main.py

示例4: optimizeCRNN

def optimizeCRNN(iterNum,maxIndex,indicies):
    batchSize = 1000
    model = EvalCRNN(maxIndex,500)
    print(len(indicies),computeEntropy(maxIndex,indicies))
    learningRate = 0.001
    epoch = 3 
    for j in range(epoch):
        
        my_optimizer = optimizers.RMSpropGraves(lr = learningRate)
        my_optimizer.setup(model) 
        my_optimizer.add_hook(optimizer.GradientClipping(1))
        
        model.cRNN.reset()
        
        loss = Variable(np.array([[0]]))
        for i in range(iterNum):
            t1 = time.clock()
            model.zerograds()
            loss.unchain_backward()
            loss = model(indicies[batchSize*i:batchSize*(i+1)],iterNum*batchSize)
            loss.backward()
            t2 = time.clock()
            
            msg = "iter: " + str(i + iterNum * j + 1) + "/" + str(iterNum * epoch) 
            msgLoss = "loss: " + str(loss.data/batchSize)
            msgNorm = "grad: " + str(my_optimizer.compute_grads_norm())
            msgTime = "time: " + str(t2 - t1) + " seconds"
            print(msgLoss,msgNorm,msg,msgTime)
            my_optimizer.update()

        learningRate *= 0.50

    print(model(indicies[batchSize*(iterNum):batchSize*(iterNum+10)]).data/(batchSize*10))
    return model.cRNN
开发者ID:CurtisHuebner,项目名称:GPNN,代码行数:34,代码来源:mobyDick.py

示例5: update

    def update(self, trajs):
        obs = np.concatenate([traj['observations'] for traj in trajs], axis=0)
        if self.concat_time:
            ts = np.concatenate([np.arange(len(traj['observations'])) / self.env_spec.timestep_limit for traj in trajs],
                                axis=0)
            obs = np.concatenate([obs, ts[:, None]], axis=-1)
        returns = np.concatenate([traj['returns'] for traj in trajs], axis=0)
        baselines = np.concatenate([traj['baselines']
                                    for traj in trajs], axis=0)

        # regress to a mixture of current and past predictions
        targets = returns * (1. - self.mixture_fraction) + \
            baselines * self.mixture_fraction

        # use lbfgs to perform the update
        cur_params = get_flat_params(self)

        obs = Variable(obs)
        targets = Variable(targets.astype(np.float32))

        def f_loss_grad(x):
            set_flat_params(self, x)
            self.cleargrads()
            values = self.compute_baselines(obs)
            loss = F.mean(F.square(values - targets))
            loss.backward()
            flat_grad = get_flat_grad(self)
            return loss.data.astype(np.float64), flat_grad.astype(np.float64)

        new_params = scipy.optimize.fmin_l_bfgs_b(
            f_loss_grad, cur_params, maxiter=10)[0]

        set_flat_params(self, new_params)
开发者ID:stjordanis,项目名称:Deep-RL-Bootcamp-Labs,代码行数:33,代码来源:models.py

示例6: visualize_walkthrough

def visualize_walkthrough():
	x_batch = sample_x_from_data_distribution(20)
	z_batch = gen(x_batch, test=True)
	if use_gpu:
		z_batch.to_cpu()

	fig = pylab.gcf()
	fig.set_size_inches(16.0, 16.0)
	pylab.clf()
	if config.img_channel == 1:
		pylab.gray()
	
	z_a = z_batch.data[:10,:]
	z_b = z_batch.data[10:,:]
	for col in range(10):
		_z_batch = z_a * (1 - col / 9.0) + z_b * col / 9.0
		_z_batch = Variable(_z_batch)
		if use_gpu:
			_z_batch.to_gpu()
		_x_batch = dec(_z_batch, test=True)
		if use_gpu:
			_x_batch.to_cpu()
		for row in range(10):
			pylab.subplot(10, 10, row * 10 + col + 1)
			if config.img_channel == 1:
				pylab.imshow(np.clip((_x_batch.data[row] + 1.0) / 2.0, 0.0, 1.0).reshape((config.img_width, config.img_width)), interpolation="none")
			elif config.img_channel == 3:
				pylab.imshow(np.clip((_x_batch.data[row] + 1.0) / 2.0, 0.0, 1.0).reshape((config.img_channel, config.img_width, config.img_width)), interpolation="none")
			pylab.axis("off")
				
	pylab.savefig("%s/walk_through.png" % args.visualization_dir)
开发者ID:smajida,项目名称:adversarial-autoencoder,代码行数:31,代码来源:visualize.py

示例7: AdamLearner

class AdamLearner(Link):
    def __init__(self, dim):
        super(AdamLearner, self).__init__(
            beta1=(dim, ),
            beta2=(dim, )
        )
        self.beta1.data.fill(-1e12)
        self.beta2.data.fill(-1e12)

        self.m = Variable(np.zeros_like(self.beta1.data))
        self.v = Variable(np.zeros_like(self.beta2.data))

    def to_gpu(self, device=None):
        super(AdamLearner, self).to_gpu()

        self.m.to_gpu(device)
        self.v.to_gpu(device)

    def __call__(self, x):
        f1 = F.sigmoid(self.beta1)
        f2 = F.sigmoid(self.beta2)
        #self.m = f1 * self.m + (1 - f1) * x
        #self.v = f2 * self.v + (1 - f2) * x**2
        self.m = self.beta1 * self.m + (1 - self.beta1) * x
        self.v = self.beta2 * self.v + (1 - self.beta2) * x**2
        g = 1e-3 * self.m / F.sqrt(self.v + 1e-8)
        return g
开发者ID:kzky,项目名称:works,代码行数:27,代码来源:experiments002.py

示例8: inverse_transform

    def inverse_transform(self, data, test=False):
        if not type(data) == Variable:
            if len(data.shape) < 2:
                data = data[np.newaxis]
            if len(data.shape) != 2:
                raise TypeError("Invalid dimensions for latent data. Dim = %s.                     Must be a 2d array." % str(data.shape))
            data = Variable(data)

        else:
            if len(data.data.shape) < 2:
                data.data = data.data[np.newaxis]
            if len(data.data.shape) != 2:
                raise TypeError("Invalid dimensions for latent data. Dim = %s.                     Must be a 2d array." % str(data.data.shape))
        assert data.data.shape[-1] == self.latent_width,            "Latent shape %d != %d" % (data.data.shape[-1], self.latent_width)

        if self.flag_gpu:
            data.to_gpu()
        out = self._decode(data, test=test)

        out.to_cpu()

        if self.mode == 'linear':
            final = out.data
        else:
            final = out.data.transpose(0, 2, 3, 1)

        return final
开发者ID:tok41,项目名称:chainer-samples,代码行数:27,代码来源:test_vaegan-Copy1.py

示例9: setUp

 def setUp(self):
     chainer.set_debug(True)
     np.random.seed(0)
     dataset = VOC('train')
     img, im_info, bbox = dataset[1]
     self.x = Variable(img[None, ...])
     self.im_info = Variable(im_info[None, ...])
     self.gt_boxes = Variable(bbox[None, ...])
开发者ID:quan821223,项目名称:pulmonary-nodules-MaskRCNN,代码行数:8,代码来源:test_faster_rcnn.py

示例10: update_core

    def update_core(self):        
        enc_optimizer = self.get_optimizer('enc')
        dec_optimizer = self.get_optimizer('dec')
        dis_optimizer = self.get_optimizer('dis')
        
        enc, dec, dis = self.enc, self.dec, self.dis
        xp = enc.xp

        batch = self.get_iterator('main').next()
        batchsize = len(batch)
        in_ch = batch[0][0].shape[0]
        """ Edit g """
        #print("Batch size", len(batch))
        #print("Batch all", batch)
        #print("Batch -1[0]", batch[-1][0])
        #print("Batch -1[1]", batch[-1][1])
        #print("Batch -1[0][0]", batch[-1][0][0])
        """ 最後のインデックスにアクセスして、情報を取り出す """
        """ これは、バッチサイズが1のときのみ有効であるからして、気をつけること """
        #path_through1 = []
        #for in_contain in batch[-1][0][-1]:
            #print("IN_CONTAIN", in_contain)
        #    for c in in_contain:
        #        path_through1.append(c)
        #print("path-through len", len(path_through1))
        """ ここまで """

        out_ch = batch[0][1].shape[0]
        w_in = 256
        w_out = 256
        
        x_in = xp.zeros((batchsize, in_ch, w_in, w_in)).astype("f")
        t_out = xp.zeros((batchsize, out_ch, w_out, w_out)).astype("f")
        
        for i in range(batchsize):
            x_in[i,:] = xp.asarray(batch[i][0])
            t_out[i,:] = xp.asarray(batch[i][1])
        x_in = Variable(x_in)
        
        z = enc(x_in, test=False)
        """ このzベクトルを変化させれば、任意の方向性に持っていくことができる """
        #print("z", z)
        """ Zを直接編集するのは危険なので、decの引数を増やして対処したほうが良さそう """
        #x_out = dec(z, path_through1, test=False)
        x_out = dec(z, test=False)

        y_fake = dis(x_in, x_out, test=False)
        y_real = dis(x_in, t_out, test=False)


        enc_optimizer.update(self.loss_enc, enc, x_out, t_out, y_fake)
        for z_ in z:
            z_.unchain_backward()
        dec_optimizer.update(self.loss_dec, dec, x_out, t_out, y_fake)
        x_in.unchain_backward()
        x_out.unchain_backward()
        dis_optimizer.update(self.loss_dis, dis, y_real, y_fake)
开发者ID:GINK03,项目名称:KindleReferencedIndexScore,代码行数:57,代码来源:updater.py

示例11: sample_z_from_n_2d_gaussian_mixture

def sample_z_from_n_2d_gaussian_mixture(batchsize, z_dim, label_indices, n_labels, gpu=False):
	z = np.zeros((batchsize, z_dim), dtype=np.float32)
	for i in range(batchsize):
		z1 = np.random.normal(0.5, 0.2, 1) 
		z2 = np.random.normal(0.5, 0.2, 1) 
		z[i] = np.array([z1, z2]).reshape((2,))
	z = Variable(z)
	if gpu:
		z.to_gpu()
	return z
开发者ID:risyarisya,项目名称:parser,代码行数:10,代码来源:util.py

示例12: __init__

    def __init__(self, dim):
        super(AdamLearner, self).__init__(
            beta1=(dim, ),
            beta2=(dim, )
        )
        self.beta1.data.fill(-1e12)
        self.beta2.data.fill(-1e12)

        self.m = Variable(np.zeros_like(self.beta1.data))
        self.v = Variable(np.zeros_like(self.beta2.data))
开发者ID:kzky,项目名称:works,代码行数:10,代码来源:experiments002.py

示例13: encode

    def encode(self, data, test=False):
        x = self.enc(data, test=test)
        mean, ln_var = F.split_axis(x, 2, 1)
        samp = np.random.standard_normal(mean.data.shape).astype('float32')
        samp = Variable(samp)
        if self.flag_gpu:
            samp.to_gpu()
        z = samp * F.exp(0.5*ln_var) + mean

        return z, mean, ln_var
开发者ID:4Quant,项目名称:fauxtograph,代码行数:10,代码来源:vaegan.py

示例14: multi_box_intersection

def multi_box_intersection(a, b):
    w = multi_overlap(a.x, a.w, b.x, b.w)
    h = multi_overlap(a.y, a.h, b.y, b.h)
    zeros = Variable(np.zeros(w.shape, dtype=w.data.dtype))
    zeros.to_gpu()

    w = F.maximum(w, zeros)
    h = F.maximum(h, zeros)

    area = w * h
    return area
开发者ID:Merlin2013,项目名称:YOLOv2,代码行数:11,代码来源:utils.py

示例15: sample_x_from_data_distribution

def sample_x_from_data_distribution(batchsize):
	shape = config.img_channel * config.img_width * config.img_width
	x_batch = np.zeros((batchsize, shape), dtype=np.float32)
	for j in range(batchsize):
		data_index = np.random.randint(len(dataset))
		img = dataset[data_index]
		x_batch[j] = img.reshape((shape,))
	x_batch = Variable(x_batch)
	if config.use_gpu:
		x_batch.to_gpu()
	return x_batch
开发者ID:smajida,项目名称:adversarial-autoencoder,代码行数:11,代码来源:train.py


注:本文中的chainer.Variable类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。