当前位置: 首页>>代码示例>>Python>>正文


Python tensor.tensor4函数代码示例

本文整理汇总了Python中theano.tensor.tensor4函数的典型用法代码示例。如果您正苦于以下问题:Python tensor4函数的具体用法?Python tensor4怎么用?Python tensor4使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了tensor4函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: theano_kernel_derivative

def theano_kernel_derivative(imshp,kshp,featshp,stride=1):

    features = T.tensor4(dtype=theano.config.floatX)
    kernel = T.tensor4(dtype=theano.config.floatX)
    image = T.tensor4(dtype=theano.config.floatX)

    # Need to transpose first two dimensions of kernel, and reverse index kernel image dims (for correlation)
    kernel_rotated = T.transpose(kernel[:,:,::-1,::-1],axes=[1,0,2,3])

    featshp_logical = (featshp[0],featshp[1],featshp[2]*stride,featshp[3]*stride)
    kshp_rotated = (kshp[1], kshp[0], kshp[2], kshp[3])
    image_estimate = conv2d(features,kernel_rotated,border_mode='full',
                            image_shape=featshp,filter_shape=kshp_rotated,
                            imshp_logical=featshp_logical[1:],kshp_logical=kshp[2:])

    image_error = image - image_estimate

    image_error_rot = T.transpose(image_error,[1,0,2,3])[:,:,::-1,::-1]
    imshp_rot = (imshp[1],imshp[0],imshp[2],imshp[3])
    featshp_rot = (featshp[1],featshp[0],featshp[2],featshp[3])
    features_rot = T.transpose(features,[1,0,2,3])

    featshp_rot_logical = (featshp_rot[0],featshp_rot[1],featshp_rot[2]*stride,featshp_rot[3]*stride)
    kernel_grad_rot = -conv2d(image_error_rot,features_rot,
                              image_shape=imshp_rot,filter_shape=featshp_rot,
                              imshp_logical=imshp_rot[1:],kshp_logical=featshp_rot_logical[2:])
    kernel_grad = T.transpose(kernel_grad_rot,[1,0,2,3])

    return function(inputs=[image,features,kernel],outputs=kernel_grad)
开发者ID:baylabs,项目名称:hdl,代码行数:29,代码来源:conv_models.py

示例2: test_graph

    def test_graph(self):
        # define common values  first
        groups = 3
        bottom = np.random.rand(3, 6, 5, 5).astype(theano.config.floatX)
        kern = np.random.rand(9, 2, 3, 3).astype(theano.config.floatX)
        bottom_sym = T.tensor4('bottom')
        kern_sym = T.tensor4('kern')

        # grouped convolution graph
        conv_group = self.conv(num_groups=groups)(bottom_sym, kern_sym)
        gconv_func = theano.function([bottom_sym, kern_sym], conv_group, mode=self.mode)

        # Graph for the normal hard way
        kern_offset = kern_sym.shape[0] // groups
        bottom_offset = bottom_sym.shape[1] // groups
        split_conv_output = [self.conv()(bottom_sym[:, i * bottom_offset:(i + 1) * bottom_offset, :, :],
                             kern_sym[i * kern_offset:(i + 1) * kern_offset, :, :, :])
                             for i in range(groups)]
        concatenated_output = T.concatenate(split_conv_output, axis=1)
        conv_func = theano.function([bottom_sym, kern_sym], concatenated_output, mode=self.mode)

        # calculate outputs for each graph
        gconv_output = gconv_func(bottom, kern)
        conv_output = conv_func(bottom, kern)

        # compare values
        utt.assert_allclose(gconv_output, conv_output)
开发者ID:DEVESHTARASIA,项目名称:Theano,代码行数:27,代码来源:test_corr.py

示例3: fix_gpu_transfer

def fix_gpu_transfer():

    kshp=(10,2,10,10)
    featshp=(3,10,11,11)
    stride=8
    mask = False
    imshp = (featshp[0],kshp[1],featshp[2]*stride + kshp[2] - 1,featshp[3]*stride + kshp[3] - 1) # num images, channels, szy, szx

    from theano import tensor as T
    x = T.tensor4()
    a = T.tensor4()
    A = T.tensor4()

    image_error = helper_T_l2_cost_conv(x,a,A,imshp,kshp,featshp,stride=(stride,stride),mask=mask)
    cost = .5*T.sum(image_error **2)

    func = function([x,a,A],cost)

    import theano
    theano.printing.debugprint(func)

    x_in = np.random.randn(*imshp).astype(np.float32)
    a_in = np.random.randn(*featshp).astype(np.float32)
    A_in = np.random.randn(*kshp).astype(np.float32)

    from time import time as now
    repeats = 10
    t0 = now()
    for i in range(repeats):
        output = func(x_in,a_in,A_in)
    t = now() - t0
    print 'time / iter = %f' % (t/repeats)
开发者ID:baylabs,项目名称:hdl,代码行数:32,代码来源:conv_models.py

示例4: __init__

 def __init__(self,test_data_x,test_data_y):
     self.test_data_x=test_data_x
     self.test_data_y=test_data_y
     test = T.tensor4('test')
     pred = T.tensor4('pred')
     dc = dice_coef(test,pred)
     self.dc = theano.function([test,pred],dc)
开发者ID:ericsolo,项目名称:python,代码行数:7,代码来源:LUNA_unet.py

示例5: create_iter_funcs_valid

def create_iter_funcs_valid(l_out, bs=None, N=50, mc_dropout=False):
    X = T.tensor4('X')
    y = T.ivector('y')
    X_batch = T.tensor4('X_batch')
    y_batch = T.ivector('y_batch')

    if not mc_dropout:
        y_hat = layers.get_output(l_out, X, deterministic=True)
    else:
        if bs is None:
            raise ValueError('a fixed batch size is required for mc dropout')
        X_repeat = T.extra_ops.repeat(X, N, axis=0)
        y_sample = layers.get_output(
            l_out, X_repeat, deterministic=False)

        sizes = [X_repeat.shape[0] / X.shape[0]] * bs
        y_sample_split = T.as_tensor_variable(
            T.split(y_sample, sizes, bs, axis=0))
        y_hat = T.mean(y_sample_split, axis=1)

    valid_loss = T.mean(
        T.nnet.categorical_crossentropy(y_hat, y))
    valid_acc = T.mean(
        T.eq(y_hat.argmax(axis=1), y))

    valid_iter = theano.function(
        inputs=[theano.Param(X_batch), theano.Param(y_batch)],
        outputs=[valid_loss, valid_acc],
        givens={
            X: X_batch,
            y: y_batch,
        },
    )

    return valid_iter
开发者ID:hjweide,项目名称:cifar-10-uncertainty,代码行数:35,代码来源:iter_funcs.py

示例6: __init__

    def __init__(self):
        X1 = T.tensor4()
        X2 = T.tensor4()
        X = [X1, X2]
        Y = [T.ivector()]
        
        model = Model()
        #conv1
        model.add(Conv(filter_shape = (32, 3, 3, 3), regularizers = {'W': l1(0.0001)},  w_shared = True, n_inputs = 2))
        model.add(Conv(filter_shape = (32, 32, 2, 2), regularizers = {'W': l1(0.0001)}, w_shared = True, n_inputs = 2))
        model.add(Pooling(pool_size = (2,2)))
        model.add(Activation(mode = 'tanh'))
        #conv2
        model.add(Conv(filter_shape = (32, 32, 3, 3), regularizers = {'W': l1(0.0001)}, w_shared = True, n_inputs = 2))
        model.add(Pooling(pool_size = (2,2)))
        model.add(Activation(mode = 'tanh'))
        #abs_diff
        model.add(Abs_diff())
        #conv3
        model.add(Conv(filter_shape = (32, 32, 3, 3), regularizers = {'W': l1(0.0001)}, w_shared = True))
        model.add(Pooling(pool_size = (2,2)))
        model.add(Activation(mode = 'tanh'))
        model.add(Flatten())

        self.f = theano.function(X, model.f(X, is_train = True))

        model.add(Fully((2880, 512)))
        model.add(Activation(mode = 'tanh'))
        model.add(Dropout(0.5))
        model.add(Fully((512, 2)))
        model.add(Activation(mode = 'softmax'))
        model.build(CostFunc.nll, RMSprop(), X, Y)
        self.model = model
开发者ID:yangli625,项目名称:ReId_theano,代码行数:33,代码来源:model_new.py

示例7: test_mask_loss_sobel

def test_mask_loss_sobel():
    th_mask, th_img = T.tensor4(), T.tensor4()
    ml = mask_loss_sobel(th_mask, th_img)
    mask_loss = theano.function([th_mask, th_img],
                                [ml.loss] + list(ml.sobel_mask) +
                                list(ml.sobel_img))

    mask_idx = next(masks(1))
    image_ok = 0.5 * np.ones_like(mask_idx)
    image_ok[mask_idx > MASK["IGNORE"]] = 1
    image_ok[mask_idx < MASK["BACKGROUND_RING"]] = 0

    print()
    loss, sobel_mask_x, sobel_mask_y, sobel_img_x, sobel_img_y = \
        mask_loss(mask_idx, image_ok)
    plt.set_cmap('gray')
    plt.subplot(221)
    plt.imshow(sobel_mask_x[0, 0])
    plt.subplot(222)
    plt.imshow(sobel_mask_y[0, 0])
    plt.colorbar()
    plt.subplot(223)
    plt.imshow(sobel_img_x[0, 0])
    plt.subplot(224)
    plt.imshow(sobel_img_y[0, 0])
    plt.colorbar()
    plt.savefig("mask_loss_sobel.png")
    print()
    print("mask_loss: {}".format(mask_loss(mask_idx, image_ok)))
    assert loss == 0
开发者ID:GALI472,项目名称:deepdecoder,代码行数:30,代码来源:test_gpu_only_mask_loss.py

示例8: compile_dream

    def compile_dream(self, X_train, shapes, indices, initializer):
        self.dream_compiled = True
        self.X_dream = []
        index = 0
        for i in range(len(X_train)):
            if i in indices:
                self.X_dream.append(theano.shared(initializer(shapes[index]).astype('float32')))
                index += 1
            else:
                X_train[i] = atleast_4d(X_train[i][[0]])
                self.X_dream.append(theano.shared(X_train[i].astype('float32')))

        y_hat_test, layer_updates = self.tree.get_output(self.params_shared, self.X_dream[:], True)
        preds = y_hat_test.flatten(self.num_output_dims).mean(axis=None)

        self.dream_optimizer.build([self.X_dream[index] for index in indices])
        updates = list(self.dream_optimizer.get_updates([self.X_dream[index] for index in indices], -preds))
        for i, update in enumerate(updates):
            updates[i] = (update[0], update[1].astype('float32'))
        updates += layer_updates

        y_pred = T.tensor4(dtype='float32')
        y = T.tensor4(dtype='float32')
        accuracy = self.accuracy.get_accuracy(y_pred, y)

        self.dream_accuracy_theano = theano.function([y_pred, y], accuracy)
        self.dream_update = theano.function(
            inputs=[],
            outputs=preds,
            updates=updates
        )
开发者ID:agajews,项目名称:Neural-Network-Dev,代码行数:31,代码来源:Core.py

示例9: compile

    def compile(self):
        # Helper function for rendering test images during training, or standalone inference mode.
        input_tensor, seed_tensor = T.tensor4(), T.tensor4()
        input_layers = {self.network['img']: input_tensor, self.network['seed']: seed_tensor}
        output = lasagne.layers.get_output([self.network[k] for k in ['seed','out']], input_layers, deterministic=True)
        self.predict = theano.function([seed_tensor], output)

        if not args.train: return

        output_layers = [self.network['out'], self.network[args.perceptual_layer], self.network['disc']]
        gen_out, percept_out, disc_out = lasagne.layers.get_output(output_layers, input_layers, deterministic=False)

        # Generator loss function, parameters and updates.
        self.gen_lr = theano.shared(np.array(0.0, dtype=theano.config.floatX))
        self.adversary_weight = theano.shared(np.array(0.0, dtype=theano.config.floatX))
        gen_losses = [self.loss_perceptual(percept_out) * args.perceptual_weight,
                      self.loss_total_variation(gen_out) * args.smoothness_weight,
                      self.loss_adversarial(disc_out) * self.adversary_weight]
        gen_params = lasagne.layers.get_all_params(self.network['out'], trainable=True)
        print('  - {} tensors learned for generator.'.format(len(gen_params)))
        gen_updates = lasagne.updates.adam(sum(gen_losses, 0.0), gen_params, learning_rate=self.gen_lr)

        # Discriminator loss function, parameters and updates.
        self.disc_lr = theano.shared(np.array(0.0, dtype=theano.config.floatX))
        disc_losses = [self.loss_discriminator(disc_out)]
        disc_params = list(itertools.chain(*[l.get_params() for k, l in self.network.items() if 'disc' in k]))
        print('  - {} tensors learned for discriminator.'.format(len(disc_params)))
        grads = [g.clip(-5.0, +5.0) for g in T.grad(sum(disc_losses, 0.0), disc_params)]
        disc_updates = lasagne.updates.adam(grads, disc_params, learning_rate=self.disc_lr)

        # Combined Theano function for updating both generator and discriminator at the same time.
        updates = collections.OrderedDict(list(gen_updates.items()) + list(disc_updates.items()))
        self.fit = theano.function([input_tensor, seed_tensor], gen_losses + [disc_out.mean(axis=(1,2,3))], updates=updates)
开发者ID:JulienHeiduk,项目名称:neural-enhance,代码行数:33,代码来源:enhance.py

示例10: create_iter_funcs_test

def create_iter_funcs_test(l_out, bs, N=50):
    X = T.tensor4('X')
    X_batch = T.tensor4('X_batch')

    X_repeat = T.extra_ops.repeat(X, N, axis=0)
    y_sample = layers.get_output(
        l_out, X_repeat, deterministic=False)

    # the number of splits needs to be pre-defined
    sizes = [X_repeat.shape[0] / X.shape[0]] * bs
    y_sample_split = T.as_tensor_variable(
        T.split(y_sample, sizes, bs, axis=0))
    y_hat = T.mean(y_sample_split, axis=1)
    #y_var = T.var(y_sample_split, axis=1)

    test_iter = theano.function(
        inputs=[theano.Param(X_batch)],
        outputs=y_hat,
        #outputs=[y_hat, y_var],
        givens={
            X: X_batch,
        },
    )

    return test_iter
开发者ID:hjweide,项目名称:cifar-10-uncertainty,代码行数:25,代码来源:iter_funcs.py

示例11: burn

def burn():
    sz = 128
    img_shp = [sz, sz, sz, sz]
    kern_shp = [sz // 2, sz, 3, 3]
    out_shp = get_conv_output_shape(img_shp, kern_shp, 'valid', (1, 1))
    img = T.tensor4('img')
    kern = T.tensor4('kern')
    out = T.tensor4('out')

    def rand(shp):
        return np.random.rand(*shp).astype(theano.config.floatX)

    img = theano.shared(rand(img_shp))
    kern = theano.shared(rand(kern_shp))
    out = theano.shared(rand(out_shp))
    # beta 1 is needed to force the reuse of out, otherwise, it is
    # replaced by a GpuAllocEmpty
    o1 = dnn._dnn_conv(img, kern, conv_mode='conv', out=out, beta=1.)
    mode = theano.compile.get_default_mode().including(
        "local_remove_all_assert")
    f = theano.function([], [o1], mode=mode)
    theano.printing.debugprint(f)
    print("Start computation")
    for i in range(10000):
        f.fn()
    print("Computation stopped")
开发者ID:Theano,项目名称:Theano,代码行数:26,代码来源:burn_gpu.py

示例12: test_batch_normalization_train_without_running_averages

def test_batch_normalization_train_without_running_averages():
    # compile and run batch_normalization_train without running averages
    utt.seed_rng()

    x, scale, bias, dy = T.tensor4('x'), T.tensor4('scale'), T.tensor4('bias'), T.tensor4('dy')
    data_shape = (5, 10, 30, 25)
    param_shape = (1, 10, 30, 25)

    # forward pass
    out, x_mean, x_invstd = bn.batch_normalization_train(x, scale, bias, 'per-activation')
    # backward pass
    grads = T.grad(None, wrt=[x, scale, bias], known_grads={out: dy})
    # compile
    f = theano.function([x, scale, bias, dy], [out, x_mean, x_invstd] + grads)
    # check if the abstract Ops have been replaced
    assert not any([isinstance(n.op, (bn.AbstractBatchNormTrain,
                                      bn.AbstractBatchNormInference,
                                      bn.AbstractBatchNormTrainGrad))
                    for n in f.maker.fgraph.toposort()])
    # run
    X = 4 + 3 * numpy.random.randn(*data_shape).astype(theano.config.floatX)
    Dy = -1 + 2 * numpy.random.randn(*data_shape).astype(theano.config.floatX)
    Scale = numpy.random.randn(*param_shape).astype(theano.config.floatX)
    Bias = numpy.random.randn(*param_shape).astype(theano.config.floatX)
    f(X, Scale, Bias, Dy)
开发者ID:Faruk-Ahmed,项目名称:Theano,代码行数:25,代码来源:test_bn.py

示例13: create_iter_funcs_train

def create_iter_funcs_train(l_out, lr, mntm, wd):
    X = T.tensor4('X')
    y = T.ivector('y')
    X_batch = T.tensor4('X_batch')
    y_batch = T.ivector('y_batch')

    y_hat = layers.get_output(l_out, X, deterministic=False)

    # softmax loss
    train_loss = T.mean(
        T.nnet.categorical_crossentropy(y_hat, y))

    # L2 regularization
    train_loss += wd * regularize_network_params(l_out, l2)

    train_acc = T.mean(
        T.eq(y_hat.argmax(axis=1), y))

    all_params = layers.get_all_params(l_out, trainable=True)
    updates = lasagne.updates.nesterov_momentum(
        train_loss, all_params, lr, mntm)

    train_iter = theano.function(
        inputs=[theano.Param(X_batch), theano.Param(y_batch)],
        outputs=[train_loss, train_acc],
        updates=updates,
        givens={
            X: X_batch,
            y: y_batch,
        },
    )

    return train_iter
开发者ID:hjweide,项目名称:cifar-10-uncertainty,代码行数:33,代码来源:iter_funcs.py

示例14: test_theano_transposed_convolution

    def test_theano_transposed_convolution(self):
        # how to use t_mk_conv_transpose
        from deconv.tdeconv_utils import t_mk_conv_transpose
        in4 = T.tensor4(name='conv_in', dtype=theano.config.floatX)
        f4 = T.tensor4(name='filters', dtype=theano.config.floatX)
        f_t_conv = theano.function(
            [in4],
            t_mk_conv_transpose(in4, f4),
            givens=[(f4, self.filters)]
        )

        test_input = np.array(
            [[[[0, 1, 0],
               [0, 1, 0],
               [0, 1, 0]],
              [[0, 0, 0],
               [1, 1, 1],
               [0, 0, 0]]]],
            dtype=theano.config.floatX
        )
        ground_truth = np.array(
            [[[[ 0,  0,  0,  0,  0],
               [-1, -1, -1,  0,  0],
               [ 0,  0,  0,  1,  0],
               [ 0,  0,  0,  1,  0],
               [ 0,  0,  0,  1,  0]]]],
            dtype=theano.config.floatX
        )
        assert_true(np.all(f_t_conv(test_input) == ground_truth))
开发者ID:bonext,项目名称:deconvn,代码行数:29,代码来源:tdeconv_tests.py

示例15: make_apply_gabor_function

def make_apply_gabor_function(filter_stack_shape,complex_cell=True):
    stim_tnsr = tnsr.tensor4('stim_tnsr')  ##T x n_color_channels x stim_size x stim_size
    real_filter_stack_tnsr = tnsr.tensor4('real_feature_map_tnsr') ##D x n_color_channels x stim_size x stim_size. complex
    imag_filter_stack_tnsr = tnsr.tensor4('imag_feature_map_tnsr') ##D x n_color_channels x stim_size x stim_size. complex
    real_feature_map_tnsr = tnsr.nnet.conv2d(stim_tnsr,
                                     real_filter_stack_tnsr,                                     
                                     filter_shape = filter_stack_shape,
                                     border_mode = 'full')  ##produces T x D x stim_size x stim_size maps
    imag_feature_map_tnsr = tnsr.nnet.conv2d(stim_tnsr,
                                     imag_filter_stack_tnsr,
                                     filter_shape = filter_stack_shape,
                                     border_mode = 'full')  ##produces T x D x stim_size x stim_size maps

    
    if complex_cell:
        ##for filtering with complex gabors, we need an operation for squaring/summing real/imag parts
        abs_value = tnsr.sqrt(tnsr.sqr(real_feature_map_tnsr) + tnsr.sqr(imag_feature_map_tnsr))
        ##functionize feature mapping
        make_feature_maps = function(inputs = [stim_tnsr,real_filter_stack_tnsr,imag_filter_stack_tnsr],
                                     outputs = abs_value)
    else:
        make_feature_maps = function(inputs = [stim_tnsr,real_filter_stack_tnsr],
                                     outputs = real_feature_map_tnsr)

    return make_feature_maps
开发者ID:tnaselar,项目名称:hrf_fitting,代码行数:25,代码来源:gabor_feature_dictionaries.py


注:本文中的theano.tensor.tensor4函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。