当前位置: 首页>>代码示例>>Python>>正文


Python tensor.patternbroadcast函数代码示例

本文整理汇总了Python中theano.tensor.patternbroadcast函数的典型用法代码示例。如果您正苦于以下问题:Python patternbroadcast函数的具体用法?Python patternbroadcast怎么用?Python patternbroadcast使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了patternbroadcast函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: grad

    def grad(self, inp, grads):
        bottom, weights = inp
        top, = grads
        d_bottom = AbstractConv2d_gradInputs(self.imshp, self.kshp,
                                             self.border_mode,
                                             self.subsample,
                                             self.filter_flip)(
            weights, top, bottom.shape[-2:])
        d_weights = AbstractConv2d_gradWeights(self.imshp, self.kshp,
                                               self.border_mode,
                                               self.subsample,
                                               self.filter_flip)(

            bottom, top, weights.shape[-2:])

        # Make sure that the broadcastable pattern of the inputs is used
        # for the gradients, even if the grad opts are not able to infer
        # that the dimensions are broadcastable.
        # Also make sure that the gradient lives on the same device than
        # the corresponding input.
        d_bottom = patternbroadcast(d_bottom, bottom.broadcastable)
        d_bottom = bottom.type.filter_variable(d_bottom)
        d_weights = patternbroadcast(d_weights, weights.broadcastable)
        d_weights = weights.type.filter_variable(d_weights)
        return d_bottom, d_weights
开发者ID:Azrael1,项目名称:Theano,代码行数:25,代码来源:abstract_conv.py

示例2: apply

    def apply(self, input_):
        aggregate_axes = [0] + [1 + i for i, b in enumerate(self.broadcastable) if b]
        # NOTE: don't put batch_stats on self because apply may be
        # called multiple times
        batch_stats = dict(
            (stat, getattr(input_, stat)(axis=aggregate_axes,
                                         keepdims=True))
            for stat in self.stats)

        for stat, role in self.roles.items():
            graph.add_transform([batch_stats[stat]],
                                graph.ConstantTransform(
                                    # adding zero to ensure it's a TensorType(float32, row)
                                    # just like the corresponding batch_stat, rather than a
                                    # CudaNdarray(float32, row).  -__-
                                    0 + T.patternbroadcast(
                                        self.population_stats[stat],
                                        [True] + self.broadcastable)),
                                reason="population_normalization")

            # make the batch statistics identifiable to get_updates() below
            add_role(batch_stats[stat], self.roles[stat])
            batch_stats[stat] = self.annotated_statistic(batch_stats[stat])

        gamma = T.patternbroadcast(self.gamma, [True] + self.broadcastable)
        beta = T.patternbroadcast(self.beta, [True] + self.broadcastable)
        return theano.tensor.nnet.bn.batch_normalization(
            inputs=input_, gamma=gamma, beta=beta,
            mean=batch_stats["mean"],
            std=T.sqrt(batch_stats["var"] + self.epsilon))
开发者ID:cooijmanstim,项目名称:tsa-rnn,代码行数:30,代码来源:bricks.py

示例3: squeeze

def squeeze(x, axis):
    '''Remove a 1-dimension from the tensor at index "axis".
    '''
    broadcastable = x.broadcastable[:axis] + x.broadcastable[axis+1:]
    x = T.patternbroadcast(x, [i == axis for i in range(x.type.ndim)])
    x = T.squeeze(x)
    x = T.patternbroadcast(x, broadcastable)
    return x
开发者ID:fvisin,项目名称:keras,代码行数:8,代码来源:theano_backend.py

示例4: get_output_for

    def get_output_for(self,input, **kwargs):

        if input.ndim > 2:
            input = inpu.flatten(2)

        inputData = input * 10
        inputData.name = 'inputData'
        
        inputData_reshape = inputData.dimshuffle(0, 'x', 'x', 1)
        inputData_reshape.name = 'inputData_reshape'
        inputData_reshape = T.patternbroadcast(inputData_reshape, (False, True, True, False))
        #mean_reshape has dimension: (1, NumofClass, NumofComponent, p)
        mean_reshape = self._means.dimshuffle('x', 0, 1, 2)
        mean_reshape = T.patternbroadcast(mean_reshape, (True, False, False,False))
        mean_reshape.name = 'mean_reshape'

        #self.sigma = nonlinearities.rectify(self.sigma) + T.ones_like(self.sigma)
        sigma = T.exp(self.sigma)
        sigma_reshape = sigma.dimshuffle('x', 0, 1, 2)
        sigma_reshape = T.patternbroadcast(sigma_reshape, (True, False, False, False))
        sigma_reshape.name = 'sigma_reshape'

        #self.weights = nonlinearities.rectify(self.weights) + 1e-16
        weights = T.exp(self.weights)
        weights_sum = T.sum(weights, axis = 1)
        weights_sum = T.patternbroadcast(weights_sum.dimshuffle(0,'x'), (False, True))
        weights = weights / weights_sum
        
        weights_reshape = weights.dimshuffle('x', 0, 1)
        weights_reshape = T.patternbroadcast(weights_reshape, (True, False, False))
        weights_reshape.name = 'weights_reshape' 
        sigma_inverse_sqrt = T.sqrt(1.0/sigma_reshape)
        sigma_inverse_sqrt.name = 'sigma_inverse_sqrt'

        # positive: 
        sqrtTemp = T.sqr((inputData_reshape - mean_reshape) * sigma_inverse_sqrt).sum(axis = 3) 
        
        # negative: 784 * log(sigma) ? sigma = 0.1 -> -1805, else positive.
        sigmaTemp = T.log(sigma_reshape).sum(axis = 3)
        

        # positive:28x28 dimension, then we have 784 * log(2\pi) = 1440
        dimTemp = T.ones((self.num_models, self.num_components), 'float32') * self.dim * T.log(2.0 * np.pi)
        
        logComponentOutput = - 1.0 / 2 * (sqrtTemp + sigmaTemp + dimTemp)
        #logComponentOutput = -1.0/2 * sqrtTemp
        logComponentOutput.name = 'logComponentOutput'
        logComponentSum = logComponentOutput + T.log(weights_reshape) 
        logComponentSum.name = 'logComponentSum'
        logComponentSum_max = logComponentSum.max(axis = 2)
        logComponentSum_max_reshape = logComponentSum_max.dimshuffle(0, 1, 'x')
        componentSum_before = T.exp(logComponentSum - logComponentSum_max_reshape)
        componentSum_before_sum = componentSum_before.sum(axis = 2)
        addLog =  T.log(componentSum_before_sum + T.ones_like(componentSum_before_sum)) + logComponentSum_max
        #addLog = (componentSum_before + T.ones_like().sum(axis = 2)
        #return logComponentOutput, sqrtTemp, sigmaTemp, dimTemp, logComponentSum, logComponentSum_mean_reshape, componentSum_before, addLog, classSum
        return addLog
开发者ID:jiajunshen,项目名称:Lasagne,代码行数:57,代码来源:gaussianMixture.py

示例5: _train_fprop

    def _train_fprop(self, state_below):
        if self.layer_type == "fc":
            miu = state_below.mean(axis=0)
            var = T.mean((state_below - miu) ** 2, axis=0)
        elif self.layer_type == "conv":
            miu = state_below.mean(axis=(0, 2, 3), keepdims=True)
            var = T.mean((state_below - miu) ** 2, axis=(0, 2, 3), keepdims=True)
        self.moving_mean = self.mem * miu + (1 - self.mem) * self.moving_mean
        self.moving_var = self.mem * var + (1 - self.mem) * self.moving_var

        Z = (state_below - self.moving_mean) / T.sqrt(self.moving_var + self.epsilon)
        gamma = T.patternbroadcast(self.gamma, self.broadcastable)
        beta = T.patternbroadcast(self.beta, self.broadcastable)
        return gamma * Z + beta
开发者ID:ColaWithIce,项目名称:Mozi,代码行数:14,代码来源:normalization.py

示例6: f

 def f(W_0, W_1):
     index = 0
     d = {
         b_layers[0].W: T.patternbroadcast(
             W_0,
             (False, False, False, False)
         ),
         b_layers[1].W: T.patternbroadcast(
             W_1,
             (False, False, False, False)
         ),
         b_x: train_set_x_b[index*batch_size:(index+1)*batch_size],
         y: train_set_y[index*batch_size:(index+1)*batch_size]
     }
     return theano.clone(b_cost, d)
开发者ID:daemonmaker,项目名称:biglittle,代码行数:15,代码来源:utils.py

示例7: Recurrent

def Recurrent(name, hidden_dims, step_fn, inputs, non_sequences=[], h0s=None):
    if not isinstance(inputs, list):
        inputs = [inputs]

    if not isinstance(hidden_dims, list):
        hidden_dims = [hidden_dims]

    if h0s is None:
        h0s = [None]*len(hidden_dims)

    for i in xrange(len(hidden_dims)):
        if h0s[i] is None:
            h0_unbatched = lib.param(
                name + '.h0_' + str(i),
                numpy.zeros((hidden_dims[i],), dtype=theano.config.floatX)
            )
            num_batches = inputs[0].shape[1]
            h0s[i] = T.alloc(h0_unbatched, num_batches, hidden_dims[i])

        h0s[i] = T.patternbroadcast(h0s[i], [False] * h0s[i].ndim)

    outputs, _ = theano.scan(
        step_fn,
        sequences=inputs,
        outputs_info=h0s,
        non_sequences=non_sequences
    )

    return outputs
开发者ID:kylemcdonald,项目名称:speech,代码行数:29,代码来源:ops.py

示例8: dropout

def dropout(x, level, noise_shape=None, seed=None):
    '''Sets entries in `x` to zero at random,
    while scaling the entire tensor.

    # Arguments
        x: tensor
        level: fraction of the entries in the tensor
            that will be set to 0.
        noise_shape: shape for randomly generated keep/drop flags,
            must be broadcastable to the shape of `x`
        seed: random seed to ensure determinism.
    '''
    if level < 0. or level >= 1:
        raise Exception('Dropout level must be in interval [0, 1[.')
    if seed is None:
        seed = np.random.randint(1, 10e6)

    rng = RandomStreams(seed=seed)
    retain_prob = 1. - level

    if noise_shape is None:
        random_tensor = rng.binomial(x.shape, p=retain_prob, dtype=x.dtype)
    else:
        random_tensor = rng.binomial(noise_shape, p=retain_prob, dtype=x.dtype)
        random_tensor = T.patternbroadcast(random_tensor, [dim == 1 for dim in noise_shape])

    x *= random_tensor
    x /= retain_prob
    return x
开发者ID:leomauro,项目名称:keras,代码行数:29,代码来源:theano_backend.py

示例9: get_output_for

    def get_output_for(self, input, deterministic=False, **kwargs):
        if deterministic or self.p == 0:
            return input
        else:
            # Using theano constant to prevent upcasting
            one = T.constant(1)

            retain_prob = one - self.p
            if self.rescale:
                input /= retain_prob

            # use nonsymbolic shape for dropout mask if possible
            mask_shape = self.input_shape
            if any(s is None for s in mask_shape):
                mask_shape = input.shape

            # apply dropout, respecting shared axes
            if self.shared_axes:
                shared_axes = tuple(a if a >= 0 else a + input.ndim
                                    for a in self.shared_axes)
                mask_shape = tuple(1 if a in shared_axes else s
                                   for a, s in enumerate(mask_shape))
            mask = self._srng.binomial(mask_shape, p=retain_prob,
                                       dtype=input.dtype)
            if self.shared_axes:
                bcast = tuple(bool(s == 1) for s in mask_shape)
                mask = T.patternbroadcast(mask, bcast)
            return input * mask
开发者ID:HapeMask,项目名称:Lasagne,代码行数:28,代码来源:noise.py

示例10: local_gpualloc

def local_gpualloc(node):
    replace = False
    if node.op == tensor.alloc:
        if node.inputs[0].owner and node.inputs[0].owner.op == host_from_gpu:
            replace = True
        elif all([c != 'output' and c.op == gpu_from_host
                  for c, idx in node.outputs[0].clients]):
            replace = True
        elif all([c != 'output' and c.op == tensor.join and
                  all([i.owner and i.owner.op in [host_from_gpu, tensor.alloc]
                       for i in c.inputs[1:]])
                  for c, idx in node.outputs[0].clients]):
            replace = True
    if replace:
        val = node.inputs[0]
        shp = node.inputs[1:]
        old_out = node.outputs[0]
        val2 = tensor.shape_padleft(val, len(shp) - val.ndim)
        new_out = host_from_gpu(gpu_alloc(val, *shp))
        if new_out.type != old_out.type:
            assert new_out.type.ndim == old_out.type.ndim
            assert new_out.type.dtype == old_out.type.dtype
            for b_old, b_new in zip(old_out.type.broadcastable,
                                    new_out.type.broadcastable):
                assert b_new or (not b_old)
            new_out = tensor.patternbroadcast(new_out. old_out.broadcastable)

        return [new_out]
开发者ID:DeepLearningIndia,项目名称:Theano,代码行数:28,代码来源:opt.py

示例11: local_conv_dnn_alternative

 def local_conv_dnn_alternative(node):
     if not dnn_available():
         return
     if isinstance(node.op, GpuConv):
         border_mode = node.op.border_mode
         subsample = node.op.subsample
         if border_mode not in ['full', 'valid'] or subsample != (1, 1):
             return
         img, kern = node.inputs
         direction_hint = node.op.direction_hint
         if border_mode == 'full':
             # for a full convolution, try using the forward pass instead
             # of the backward pass wrt. inputs
             direction_hint = 'forward!'
         elif border_mode == 'valid':
             # for a valid convolution, try using the backward pass wrt.
             # weights instead of the forward pass and vice versa
             if direction_hint == 'bprop weights':
                 direction_hint = 'forward'
             else:
                 direction_hint = 'bprop weights'
         rval = dnn_conv(img, kern,
                         border_mode=border_mode, subsample=subsample,
                         direction_hint=direction_hint)
         if node.outputs[0].broadcastable != rval.broadcastable:
             rval = tensor.patternbroadcast(
                 rval, node.outputs[0].type.broadcastable)
         return [rval]
开发者ID:c0g,项目名称:Theano,代码行数:28,代码来源:dnn.py

示例12: createGradientFunctions

    def createGradientFunctions(self):
        #create
        X = T.dmatrices("X")
        mu, logSigma, u, v, f, R = T.dcols("mu", "logSigma", "u", "v", "f", "R")
        mu = sharedX( np.random.normal(10, 10, (self.dimTheta, 1)), name='mu') 
        logSigma = sharedX(np.random.uniform(0, 4, (self.dimTheta, 1)), name='logSigma')
        logLambd = sharedX(np.matrix(np.random.uniform(0, 10)),name='logLambd')
        logLambd = T.patternbroadcast(T.dmatrix("logLambd"),[1,1])
        negKL = 0.5 * T.sum(1 + 2*logSigma - mu ** 2 - T.exp(logSigma) ** 2)
        theta = mu+T.exp(logSigma)*v
        W=theta
        y=X[:,0]
        X_sim=X[:,1:]
        f = (T.dot(X_sim,W)+u).flatten()
        
        gradvariables = [mu, logSigma, logLambd]
        
        
        logLike = T.sum(-(0.5 * np.log(2 * np.pi) + logLambd) - 0.5 * ((y-f)/(T.exp(logLambd)))**2)

        logp = (negKL + logLike)/self.m

        optimizer = -logp
        
        self.negKL = th.function([mu, logSigma], negKL, on_unused_input='ignore')
        self.f = th.function(gradvariables + [X,u,v], f, on_unused_input='ignore')
        self.logLike = th.function(gradvariables + [X, u, v], logLike,on_unused_input='ignore')
        derivatives = T.grad(logp,gradvariables)
        derivatives.append(logp)

        self.gradientfunction = th.function(gradvariables + [X, u, v], derivatives, on_unused_input='ignore')
        self.lowerboundfunction = th.function(gradvariables + [X, u, v], logp, on_unused_input='ignore')

        self.optimizer = BatchGradientDescent(objective=optimizer, params=gradvariables,inputs = [X,u,v],conjugate=True,max_iter=1)
开发者ID:onenoc,项目名称:lfvbae,代码行数:34,代码来源:lfvbaeold.py

示例13: _step

 def _step(tensor):
     tensor._keras_shape = (batch_size, 1, input_dim)
     # tensor._uses_learning_phase = x._uses_learning_phase
     tensor._uses_learning_phase = False  # TODO: should this be hard-coded?
     output = self.model(tensor)
     for layer in self.layers:
         layer.initial_state = layer.final_states
     output = T.patternbroadcast(output, tensor.broadcastable)
     return output, self.feedback_function(output)
开发者ID:lobachevzky,项目名称:keras,代码行数:9,代码来源:recurrent.py

示例14: Recurrent

def Recurrent(
    name, 
    hidden_dims, 
    step_fn, 
    inputs, 
    non_sequences=[], 
    h0s=None,
    reset=None
    ):

    if not isinstance(inputs, list):
        inputs = [inputs]

    if not isinstance(hidden_dims, list):
        hidden_dims = [hidden_dims]

    if h0s is None:
        h0s = [None]*len(hidden_dims)

    for i in xrange(len(hidden_dims)):
        if h0s[i] is None:
            h0_unbatched = lib.param(
                name + '.h0_' + str(i),
                np.zeros((hidden_dims[i],), dtype=theano.config.floatX)
            )
            num_batches = inputs[0].shape[1]
            h0s[i] = T.alloc(h0_unbatched, num_batches, hidden_dims[i])

        h0s[i] = T.patternbroadcast(h0s[i], [False] * h0s[i].ndim)

    if reset is not None:
        last_hiddens = []
        for i in xrange(len(h0s)):
            # The shape of last_hidden doesn't matter right now; we assume
            # it won't be used until we put something proper in it.
            last_hidden = theano.shared(
                np.zeros([1]*h0s[i].ndim, dtype=h0s[i].dtype),
                name=name+'.last_hidden_'+str(i)
            )
            last_hiddens.append(last_hidden)
            h0s[i] = theano.ifelse.ifelse(reset, h0s[i], last_hidden)

    outputs, _ = theano.scan(
        step_fn,
        sequences=inputs,
        outputs_info=h0s,
        non_sequences=non_sequences
    )

    if reset is not None:
        if len(last_hiddens) == 1:
            last_hiddens[0].default_update = outputs[-1]
        else:
            for i in xrange(len(last_hiddens)):
                last_hiddens[i].default_update = outputs[i][-1]

    return outputs
开发者ID:Faruk-Ahmed,项目名称:nn,代码行数:57,代码来源:gru.py

示例15: get_theano_variables

    def get_theano_variables(self, inputs=None, outputs=None):
        """
        Returns a dict containing inputs, outputs and graph corresponding to
        the Theano version of the pyfn.

        This version of the function returns a single vector input.
        """
        inputs = utils.as_seq(inputs, tuple)
        outputs = utils.as_seq(outputs, tuple)

        if inputs:
            sym_inputs = [self.get_symbolic(x) for x in inputs]
        else:
            sym_inputs = self.s_inputs.values()

        if outputs:
            sym_outputs = [self.get_symbolic(x) for x in outputs]
        else:
            sym_outputs = self.s_outputs.values()

        if len(sym_outputs) > 1:
            raise ValueError(
                'VectorArg functions should return a single output.')

        # get symbolic inputs corresponding to shared inputs in s_inputs
        s_memo = OrderedDict()
        sym_args = utils.flat_from_doc(sym_inputs)
        real_args = utils.flat_from_doc(self.all_init_args)

        # create a symbolic vector, then split it up into symbolic input
        # args
        inputs_dtype = self.vector_from_args(self.all_init_args).dtype
        theano_input = tt.vector(name='theta', dtype=inputs_dtype)
        i = 0
        for sa, ra in zip(sym_args, real_args):
            if sa.ndim > 0:
                vector_arg = theano_input[i: i + ra.size].reshape(ra.shape)
            else:
                vector_arg = theano_input[i]
            s_memo[sa] = tt.patternbroadcast(
                vector_arg.astype(str(sa.dtype)),
                broadcastable=sa.broadcastable)
            i += ra.size

        # get new graph, replacing shared inputs with symbolic ones
        graph = theano.gof.graph.clone_get_equiv(
            theano.gof.graph.inputs(sym_outputs),
            sym_outputs,
            memo=s_memo.copy())

        # get symbolic outputs
        theano_outputs = graph[sym_outputs[0]]

        f_in, f_out = self.finalize(theano_input, theano_outputs, graph)

        return f_in, f_out, graph
开发者ID:Bihaqo,项目名称:pyautodiff,代码行数:56,代码来源:symbolic.py


注:本文中的theano.tensor.patternbroadcast函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。