当前位置: 首页>>代码示例>>Python>>正文


Python MatrixMul.lmul方法代码示例

本文整理汇总了Python中pylearn2.linear.matrixmul.MatrixMul.lmul方法的典型用法代码示例。如果您正苦于以下问题:Python MatrixMul.lmul方法的具体用法?Python MatrixMul.lmul怎么用?Python MatrixMul.lmul使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pylearn2.linear.matrixmul.MatrixMul的用法示例。


在下文中一共展示了MatrixMul.lmul方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from pylearn2.linear.matrixmul import MatrixMul [as 别名]
# 或者: from pylearn2.linear.matrixmul.MatrixMul import lmul [as 别名]
class Adam:
    def __init__(self, batch_size, alpha, irange):
        self.alpha = alpha
        self.visible_layer = GaussianConvolutionalVisLayer(rows = 32,cols = 32, channels = 3, init_beta =1., init_mu = 0.)
        self.hidden_layers = [ Softmax(n_classes = 10,
                                            irange = .01) ]
        rng = np.random.RandomState([2012,8,20])
        self.W = MatrixMul( sharedX( rng.uniform(-irange, irange, (108,1600))))
        #make_random_conv2D(irange = .05, input_space = self.visible_layer.get_input_space(),
        #                output_space = Conv2DSpace([27,27],1600),
        #                kernel_shape = (6,6),
        #                batch_size = batch_size)
        self.batch_size = batch_size
        self.hidden_layers[0].dbm = self
        self.hidden_layers[0].set_input_space(Conv2DSpace([2,2],3200))

    def get_params(self):
        return set(self.hidden_layers[0].get_params()).union(self.W.get_params())

    def mf(self, X):
        patches = cifar10neighbs(X,(6,6))
        patches -= patches.mean(axis=1).dimshuffle(0,'x')
        patches /= T.sqrt(T.sqr(patches).sum(axis=1)+10.0).dimshuffle(0,'x')

        Z = self.W.lmul(patches)

        #Z = Print('Z',attrs=['min','mean','max'])(Z)

        Z = T.concatenate((Z,-Z),axis=1)
        Z = multichannel_neibs2imgs(Z, self.batch_size, 27, 27, 3200, 1, 1)
        Z = Z.dimshuffle(0,3,1,2)
        p = max_pool_2d(Z,(14,14),False)
        p = p.dimshuffle(0,1,2,3)
        p = T.maximum(p - self.alpha, 0.)
        #p = Print('p',attrs=['min','mean','max'])(p)
        y = self.hidden_layers[0].mf_update(state_below = p, state_above = None)
        return [ Z, y ]

    def get_weights_topo(self):
        outp, inp, rows, cols = range(4)
        raw = self.W._filters.get_value()
        return np.transpose(raw,(outp,rows,cols,inp))
开发者ID:cc13ny,项目名称:galatea,代码行数:44,代码来源:adam.py

示例2: Maxout

# 需要导入模块: from pylearn2.linear.matrixmul import MatrixMul [as 别名]
# 或者: from pylearn2.linear.matrixmul.MatrixMul import lmul [as 别名]

#.........这里部分代码省略.........
            for key, val in [
                ("max_x.max_u", v_max.max()),
                ("max_x.mean_u", v_max.mean()),
                ("max_x.min_u", v_max.min()),
                ("min_x.max_u", v_min.max()),
                ("min_x.mean_u", v_min.mean()),
                ("min_x.min_u", v_min.min()),
                ("range_x.max_u", v_range.max()),
                ("range_x.mean_u", v_range.mean()),
                ("range_x.min_u", v_range.min()),
                ("mean_x.max_u", v_mean.max()),
                ("mean_x.mean_u", v_mean.mean()),
                ("mean_x.min_u", v_mean.min()),
            ]:
                rval[prefix + key] = val

        return rval

    def fprop(self, state_below):

        self.input_space.validate(state_below)

        if self.requires_reformat:
            if not isinstance(state_below, tuple):
                for sb in get_debug_values(state_below):
                    if sb.shape[0] != self.dbm.batch_size:
                        raise ValueError(
                            "self.dbm.batch_size is %d but got shape of %d" % (self.dbm.batch_size, sb.shape[0])
                        )
                    assert reduce(lambda x, y: x * y, sb.shape[1:]) == self.input_dim

            state_below = self.input_space.format_as(state_below, self.desired_space)

        z = self.transformer.lmul(state_below) + self.b

        if not hasattr(self, "randomize_pools"):
            self.randomize_pools = False

        if not hasattr(self, "pool_stride"):
            self.pool_stride = self.pool_size

        if self.randomize_pools:
            z = T.dot(z, self.permute)

        if not hasattr(self, "min_zero"):
            self.min_zero = False

        if self.min_zero:
            p = T.zeros_like(z)
        else:
            p = None

        last_start = self.detector_layer_dim - self.pool_size
        for i in xrange(self.pool_size):
            cur = z[:, i : last_start + i + 1 : self.pool_stride]
            if p is None:
                p = cur
            else:
                p = T.maximum(cur, p)

        p.name = self.layer_name + "_p_"

        return p

    def foo(self, state_below):
开发者ID:bbudescu,项目名称:pylearn2,代码行数:69,代码来源:maxout.py

示例3: RBM

# 需要导入模块: from pylearn2.linear.matrixmul import MatrixMul [as 别名]
# 或者: from pylearn2.linear.matrixmul.MatrixMul import lmul [as 别名]

#.........这里部分代码省略.........
            List of the necessary parameters to sample :math:`p(v|h)`. In the
            case of a binary-binary RBM this is a single-element list
            containing the symbolic representing :math:`p(v|h)`, as returned
            by `mean_v_given_h`.

        Returns
        -------
        vprime : tensor_like
            Theano symbolic representing stochastic samples from :math:`p(v|h)`
        """
        v_mean = params[0]
        return as_floatX(rng.uniform(size=shape) < v_mean)

    def input_to_h_from_v(self, v):
        """
        Compute the affine function (linear map plus bias) that serves as
        input to the hidden layer in an RBM.

        Parameters
        ----------
        v  : tensor_like or list of tensor_likes
            Theano symbolic (or list thereof) representing the one or several
            minibatches on the visible units, with the first dimension indexing
            training examples and the second indexing data dimensions.

        Returns
        -------
        a : tensor_like or list of tensor_likes
            Theano symbolic (or list thereof) representing the input to each
            hidden unit for each training example.
        """

        if isinstance(v, tensor.Variable):
            return self.bias_hid + self.transformer.lmul(v)
        else:
            return [self.input_to_h_from_v(vis) for vis in v]

    def input_to_v_from_h(self, h):
        """
        Compute the affine function (linear map plus bias) that serves as
        input to the visible layer in an RBM.

        Parameters
        ----------
        h  : tensor_like or list of tensor_likes
            Theano symbolic (or list thereof) representing the one or several
            minibatches on the hidden units, with the first dimension indexing
            training examples and the second indexing data dimensions.

        Returns
        -------
        a : tensor_like or list of tensor_likes
            Theano symbolic (or list thereof) representing the input to each
            visible unit for each row of h.
        """
        if isinstance(h, tensor.Variable):
            return self.bias_vis + self.transformer.lmul_T(h)
        else:
            return [self.input_to_v_from_h(hid) for hid in h]

    def upward_pass(self, v):
        """
        wrapper around mean_h_given_v method.  Called when RBM is accessed
        by mlp.HiddenLayer.
        """
        return self.mean_h_given_v(v)
开发者ID:niharsarangi,项目名称:pylearn2,代码行数:70,代码来源:rbm.py

示例4: Discomax

# 需要导入模块: from pylearn2.linear.matrixmul import MatrixMul [as 别名]
# 或者: from pylearn2.linear.matrixmul.MatrixMul import lmul [as 别名]

#.........这里部分代码省略.........

        sq_W = T.sqr(W)

        row_norms = T.sqrt(sq_W.sum(axis=1))
        col_norms = T.sqrt(sq_W.sum(axis=0))

        row_norms_min = row_norms.min()
        row_norms_min.__doc__ = ("The smallest norm of any row of the "
                                 "weight matrix W. This is a measure of the "
                                 "least influence any visible unit has.")

        rval = OrderedDict([('row_norms_min',  row_norms_min),
                            ('row_norms_mean', row_norms.mean()),
                            ('row_norms_max',  row_norms.max()),
                            ('col_norms_min',  col_norms.min()),
                            ('col_norms_mean', col_norms.mean()),
                            ('col_norms_max',  col_norms.max()), ])

        if (state is not None) or (state_below is not None):
            if state is None:
                state = self.fprop(state_below)

            P = state
            if self.pool_size == 1:
                vars_and_prefixes = [(P, '')]
            else:
                vars_and_prefixes = [(P, 'p_')]

            for var, prefix in vars_and_prefixes:
                v_max = var.max(axis=0)
                v_min = var.min(axis=0)
                v_mean = var.mean(axis=0)
                v_range = v_max - v_min

                # max_x.mean_u is "the mean over *u*nits of the max over
                # e*x*amples" The x and u are included in the name because
                # otherwise its hard to remember which axis is which when
                # reading the monitor I use inner.outer
                # rather than outer_of_inner or
                # something like that because I want mean_x.* to appear next to
                # each other in the alphabetical list, as these are commonly
                # plotted together
                for key, val in [('max_x.max_u', v_max.max()),
                                 ('max_x.mean_u', v_max.mean()),
                                 ('max_x.min_u', v_max.min()),
                                 ('min_x.max_u', v_min.max()),
                                 ('min_x.mean_u', v_min.mean()),
                                 ('min_x.min_u', v_min.min()),
                                 ('range_x.max_u', v_range.max()),
                                 ('range_x.mean_u', v_range.mean()),
                                 ('range_x.min_u', v_range.min()),
                                 ('mean_x.max_u', v_mean.max()),
                                 ('mean_x.mean_u', v_mean.mean()),
                                 ('mean_x.min_u', v_mean.min())]:
                    rval[prefix+key] = val

        return rval

    @functools.wraps(Layer.fprop)
    def fprop(self, state_below):

        self.input_space.validate(state_below)

        if self.requires_reformat:
            state_below = self.input_space.format_as(state_below,
                                                     self.desired_space)

        z = self.transformer.lmul(state_below) + self.b

        z = T.switch(z > 0., z + self.ofs, z)


        if not hasattr(self, 'randomize_pools'):
            self.randomize_pools = False

        if not hasattr(self, 'pool_stride'):
            self.pool_stride = self.pool_size

        if self.randomize_pools:
            z = T.dot(z, self.permute)

        if not hasattr(self, 'min_zero'):
            self.min_zero = False

        if self.min_zero:
            p = 0.
        else:
            p = None

        last_start = self.detector_layer_dim - self.pool_size
        for i in xrange(self.pool_size):
            cur = z[:, i:last_start+i+1:self.pool_stride]
            if p is None:
                p = cur
            else:
                p = T.maximum(cur, p)

        p.name = self.layer_name + '_p_'

        return p
开发者ID:cc13ny,项目名称:galatea,代码行数:104,代码来源:discomax.py

示例5: RectifiedLinear

# 需要导入模块: from pylearn2.linear.matrixmul import MatrixMul [as 别名]
# 或者: from pylearn2.linear.matrixmul.MatrixMul import lmul [as 别名]

#.........这里部分代码省略.........
    def get_params(self):
        assert self.b.name is not None
        W ,= self.transformer.get_params()
        assert W.name is not None
        rval = self.transformer.get_params()
        assert not isinstance(rval, set)
        rval = list(rval)
        assert self.b not in rval
        rval.append(self.b)
        return rval

    def get_weight_decay(self, coeff):
        if isinstance(coeff, str):
            coeff = float(coeff)
        assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
        W ,= self.transformer.get_params()
        return coeff * T.sqr(W).sum()

    def get_weights(self):
        if self.requires_reformat:
            # This is not really an unimplemented case.
            # We actually don't know how to format the weights
            # in design space. We got the data in topo space
            # and we don't have access to the dataset
            raise NotImplementedError()
        W ,= self.transformer.get_params()
        return W.get_value()

    def set_weights(self, weights):
        W, = self.transformer.get_params()
        W.set_value(weights)

    def set_biases(self, biases):
        self.b.set_value(biases)

    def get_biases(self):
        return self.b.get_value()

    def get_weights_format(self):
        return ('v', 'h')

    def get_weights_topo(self):

        if not isinstance(self.input_space, Conv2DSpace):
            raise NotImplementedError()

        W ,= self.transformer.get_params()

        W = W.T

        W = W.reshape((self.dim, self.input_space.shape[0],
                       self.input_space.shape[1], self.input_space.nchannels))

        W = Conv2DSpace.convert(W, self.input_space.axes, ('b', 0, 1, 'c'))

        return function([], W)()

    def get_monitoring_channels(self):

        W ,= self.transformer.get_params()

        assert W.ndim == 2

        sq_W = T.sqr(W)

        row_norms = T.sqrt(sq_W.sum(axis=1))
        col_norms = T.sqrt(sq_W.sum(axis=0))

        return OrderedDict([
                            ('row_norms_min'  , row_norms.min()),
                            ('row_norms_mean' , row_norms.mean()),
                            ('row_norms_max'  , row_norms.max()),
                            ('col_norms_min'  , col_norms.min()),
                            ('col_norms_mean' , col_norms.mean()),
                            ('col_norms_max'  , col_norms.max()),
                            ])

    def fprop(self, state_below):

        self.input_space.validate(state_below)

        if self.requires_reformat:
            if not isinstance(state_below, tuple):
                for sb in get_debug_values(state_below):
                    if sb.shape[0] != self.dbm.batch_size:
                        raise ValueError("self.dbm.batch_size is %d but got shape of %d" % (self.dbm.batch_size, sb.shape[0]))
                    assert reduce(lambda x,y: x * y, sb.shape[1:]) == self.input_dim

            state_below = self.input_space.format_as(state_below, self.desired_space)

        z = self.transformer.lmul(state_below) + self.b
        if self.layer_name is not None:
            z.name = self.layer_name + '_z'

        p = z * (z > 0.) + self.left_slope * z * (z < 0.)

        if self.copy_input:
            p = T.concatenate((p, state_below), axis=1)

        return p
开发者ID:renjupaul,项目名称:pylearn,代码行数:104,代码来源:mlp.py

示例6: SoftmaxPool

# 需要导入模块: from pylearn2.linear.matrixmul import MatrixMul [as 别名]
# 或者: from pylearn2.linear.matrixmul.MatrixMul import lmul [as 别名]

#.........这里部分代码省略.........

    def get_weights_topo(self):

        if not isinstance(self.input_space, Conv2DSpace):
            raise NotImplementedError()

        W ,= self.transformer.get_params()

        W = W.T

        W = W.reshape((self.detector_layer_dim, self.input_space.shape[0],
                       self.input_space.shape[1], self.input_space.nchannels))

        W = Conv2DSpace.convert(W, self.input_space.axes, ('b', 0, 1, 'c'))

        return function([], W)()

    def get_monitoring_channels(self):

        W ,= self.transformer.get_params()

        assert W.ndim == 2

        sq_W = T.sqr(W)

        row_norms = T.sqrt(sq_W.sum(axis=1))
        col_norms = T.sqrt(sq_W.sum(axis=0))

        return OrderedDict([
                            ('row_norms_min'  , row_norms.min()),
                            ('row_norms_mean' , row_norms.mean()),
                            ('row_norms_max'  , row_norms.max()),
                            ('col_norms_min'  , col_norms.min()),
                            ('col_norms_mean' , col_norms.mean()),
                            ('col_norms_max'  , col_norms.max()),
                            ])


    def get_monitoring_channels_from_state(self, state):

        P = state

        rval = OrderedDict()

        if self.pool_size == 1:
            vars_and_prefixes = [ (P,'') ]
        else:
            vars_and_prefixes = [ (P, 'p_') ]

        for var, prefix in vars_and_prefixes:
            v_max = var.max(axis=0)
            v_min = var.min(axis=0)
            v_mean = var.mean(axis=0)
            v_range = v_max - v_min

            # max_x.mean_u is "the mean over *u*nits of the max over e*x*amples"
            # The x and u are included in the name because otherwise its hard
            # to remember which axis is which when reading the monitor
            # I use inner.outer rather than outer_of_inner or something like that
            # because I want mean_x.* to appear next to each other in the alphabetical
            # list, as these are commonly plotted together
            for key, val in [
                             ('max_x.max_u', v_max.max()),
                             ('max_x.mean_u', v_max.mean()),
                             ('max_x.min_u', v_max.min()),
                             ('min_x.max_u', v_min.max()),
                             ('min_x.mean_u', v_min.mean()),
                             ('min_x.min_u', v_min.min()),
                             ('range_x.max_u', v_range.max()),
                             ('range_x.mean_u', v_range.mean()),
                             ('range_x.min_u', v_range.min()),
                             ('mean_x.max_u', v_mean.max()),
                             ('mean_x.mean_u', v_mean.mean()),
                             ('mean_x.min_u', v_mean.min())
                             ]:
                rval[prefix+key] = val

        return rval

    def fprop(self, state_below):

        self.input_space.validate(state_below)

        if self.requires_reformat:
            if not isinstance(state_below, tuple):
                for sb in get_debug_values(state_below):
                    if sb.shape[0] != self.dbm.batch_size:
                        raise ValueError("self.dbm.batch_size is %d but got shape of %d" % (self.dbm.batch_size, sb.shape[0]))
                    assert reduce(lambda x,y: x * y, sb.shape[1:]) == self.input_dim

            state_below = self.input_space.format_as(state_below, self.desired_space)

        z = self.transformer.lmul(state_below) + self.b
        if self.layer_name is not None:
            z.name = self.layer_name + '_z'
        p,h = max_pool_channels(z, self.pool_size)

        p.name = self.layer_name + '_p_'

        return p
开发者ID:renjupaul,项目名称:pylearn,代码行数:104,代码来源:mlp.py

示例7: CpuConvMaxout

# 需要导入模块: from pylearn2.linear.matrixmul import MatrixMul [as 别名]
# 或者: from pylearn2.linear.matrixmul.MatrixMul import lmul [as 别名]

#.........这里部分代码省略.........
    @wraps(Layer.set_weights)
    def set_weights(self, weights):

        W, = self.transformer.get_params()
        W.set_value(weights)

    @wraps(Layer.set_biases)
    def set_biases(self, biases):

        self.b.set_value(biases)

    @wraps(Layer.get_biases)
    def get_biases(self):

        return self.b.get_value()

    @wraps(Layer.get_weights_format)
    def get_weights_format(self):

        return ('v', 'h')

    @wraps(Layer.get_weights_topo)
    def get_weights_topo(self):

        outp, inp, rows, cols = range(4)
        raw = self.transformer._filters.get_value()

        return np.transpose(raw, (outp, rows, cols, inp))

    @wraps(Layer.get_monitoring_channels)
    def get_monitoring_channels(self):

        W, = self.transformer.get_params()

        sq_W = T.sqr(W)

        row_norms = T.sqrt(sq_W.sum(axis=(1)))

        return OrderedDict([('kernel_norms_min',  row_norms.min()),
                            ('kernel_norms_mean', row_norms.mean()),
                            ('kernel_norms_max',  row_norms.max()), ])

    @wraps(Layer.fprop)
    def fprop(self, state_below):

        self.input_space.validate(state_below)
        axes = self.input_space.axes
        #z = self.transformer.lmul(state_below) + self.b
        state_below = state_below.dimshuffle(3,1,2,0)	
        z = self.transformer.lmul(state_below) +self.b
        z = z.dimshuffle(0,3,1,2)

        if self.layer_name is not None:
            z.name = self.layer_name + '_z'

	    #ReLUs
        d = T.maximum(z, 0)
		
        # Max pooling between linear pieces
        # d = None
        # for i in xrange(self.num_pieces):
            # t = z[:,i::self.num_pieces,:,:]
            # if d is None:
                # d = t
            # else:
                # d = T.maximum(d, t)

        self.detector_space.validate(d)

        if not hasattr(self, 'detector_normalization'):
            self.detector_normalization = None

        if self.detector_normalization:
            d = self.detector_normalization(d)

        # NOTE : Custom pooling
        p = d.max(3)[:,:,None,:]

        self.output_space.validate(p)

        if not hasattr(self, 'output_normalization'):
            self.output_normalization = None

        if self.output_normalization:
            p = self.output_normalization(p)

        return p
    def upward_pass(self, inputs):
        """
        Wrapper to fprop functions for PretrainedLayer class

        Parameters
        ----------
        inputs : WRITEME

        Returns
        -------
        WRITEME
        """
        return self.fprop(inputs)
开发者ID:AtousaTorabi,项目名称:HumanActivityRecognition,代码行数:104,代码来源:customCpuDotMaxout.py

示例8: IsingHidden

# 需要导入模块: from pylearn2.linear.matrixmul import MatrixMul [as 别名]
# 或者: from pylearn2.linear.matrixmul.MatrixMul import lmul [as 别名]

#.........这里部分代码省略.........
            for key, val in [
                    ('max_x.max_u', v_max.max()),
                    ('max_x.mean_u', v_max.mean()),
                    ('max_x.min_u', v_max.min()),
                    ('min_x.max_u', v_min.max()),
                    ('min_x.mean_u', v_min.mean()),
                    ('min_x.min_u', v_min.min()),
                    ('range_x.max_u', v_range.max()),
                    ('range_x.mean_u', v_range.mean()),
                    ('range_x.min_u', v_range.min()),
                    ('mean_x.max_u', v_mean.max()),
                    ('mean_x.mean_u', v_mean.mean()),
                    ('mean_x.min_u', v_mean.min())
                    ]:
                rval[prefix+key] = val

        return rval

    def sample(self, state_below = None, state_above = None,
            layer_above = None,
            theano_rng = None):

        if theano_rng is None:
            raise ValueError("theano_rng is required; it just defaults to None so that it may appear after layer_above / state_above in the list.")

        if state_above is not None:
            msg = layer_above.downward_message(state_above)
        else:
            msg = None

        if self.requires_reformat:
            state_below = self.input_space.format_as(state_below, self.desired_space)

        z = self.transformer.lmul(state_below) + self.b

        if msg != None:
            z = z + msg

        on_prob = T.nnet.sigmoid(2. * z)

        samples = theano_rng.binomial(p = on_prob, n=1, size=on_prob.shape, dtype=on_prob.dtype) * 2. - 1.

        return samples

    def downward_message(self, downward_state):
        rval = self.transformer.lmul_T(downward_state)

        if self.requires_reformat:
            rval = self.desired_space.format_as(rval, self.input_space)

        return rval

    def init_mf_state(self):
        raise NotImplementedError("This is just a copy-paste of BVMP")
        # work around theano bug with broadcasted vectors
        z = T.alloc(0., self.dbm.batch_size, self.detector_layer_dim).astype(self.b.dtype) + \
                self.b.dimshuffle('x', 0)
        rval = max_pool_channels(z = z,
                pool_size = self.pool_size)
        return rval

    def make_state(self, num_examples, numpy_rng):
        """ Returns a shared variable containing an actual state
           (not a mean field state) for this variable.
        """
        driver = numpy_rng.uniform(0.,1., (num_examples, self.dim))
开发者ID:Alienfeel,项目名称:pylearn2,代码行数:70,代码来源:ising.py

示例9: BinaryVectorMaxPool

# 需要导入模块: from pylearn2.linear.matrixmul import MatrixMul [as 别名]
# 或者: from pylearn2.linear.matrixmul.MatrixMul import lmul [as 别名]

#.........这里部分代码省略.........
                eps = [eps]
        else:
            assert all([len(elem) == 2 for elem in [state, target, coeff]])
            if eps is None:
                eps = [0., 0.]
            if target[1] < target[0]:
                warnings.warn("Do you really want to regularize the detector units to be sparser than the pooling units?")

        for s, t, c, e in safe_zip(state, target, coeff, eps):
            assert all([isinstance(elem, float) for elem in [t, c, e]])
            if c == 0.:
                continue
            m = s.mean(axis=0)
            assert m.ndim == 1
            rval += T.maximum(abs(m-t)-e,0.).mean()*c

        return rval

    def sample(self, state_below = None, state_above = None,
            layer_above = None,
            theano_rng = None):

        if theano_rng is None:
            raise ValueError("theano_rng is required; it just defaults to None so that it may appear after layer_above / state_above in the list.")

        if state_above is not None:
            msg = layer_above.downward_message(state_above)
        else:
            msg = None

        if self.requires_reformat:
            state_below = self.input_space.format_as(state_below, self.desired_space)

        z = self.transformer.lmul(state_below) + self.b
        p, h, p_sample, h_sample = max_pool_channels(z,
                self.pool_size, msg, theano_rng)

        return p_sample, h_sample

    def downward_message(self, downward_state):
        rval = self.transformer.lmul_T(downward_state)

        if self.requires_reformat:
            rval = self.desired_space.format_as(rval, self.input_space)

        return rval

    def make_state(self, num_examples, numpy_rng):
        """ Returns a shared variable containing an actual state
           (not a mean field state) for this variable.
        """

        t1 = time.time()

        empty_input = self.h_space.get_origin_batch(num_examples)
        h_state = sharedX(empty_input)

        default_z = T.zeros_like(h_state) + self.b

        theano_rng = MRG_RandomStreams(numpy_rng.randint(2 ** 16))

        p_exp, h_exp, p_sample, h_sample = max_pool_channels(
                z = default_z,
                pool_size = self.pool_size,
                theano_rng = theano_rng)
开发者ID:deigen,项目名称:pylearn,代码行数:69,代码来源:dbm.py

示例10: Powerup

# 需要导入模块: from pylearn2.linear.matrixmul import MatrixMul [as 别名]
# 或者: from pylearn2.linear.matrixmul.MatrixMul import lmul [as 别名]

#.........这里部分代码省略.........
                rval[prefix+key] = val
        return rval

    def get_power_activ(self, power_in):
        if self.power_activ == "exp":
            pT = T.exp(power_in) + 1
        elif self.power_activ == "rect":
            pT = T.maximum(power_in, 1)
        elif self.power_activ == "softplus":
            pT = T.nnet.softplus(power_in) + 1
        elif self.power_activ == "softhalf":
            pT = T.log(T.exp(power_in) + 0.5) + 1.0
        elif self.power_activ == "sqr":
            pT = T.sqr(power_in) + 1
        else:
            pT = abs(power_in) + 1
        return pT

    def fprop(self, state_below):
        #Implements (\sum_i^T 1/T |W_i x|^{p_j} )^(1/p_j)
        self.input_space.validate(state_below)
        epsilon = 1e-10

        if self.requires_reformat:
            if not isinstance(state_below, tuple):
                for sb in get_debug_values(state_below):
                    if sb.shape[0] != self.dbm.batch_size:
                        raise ValueError("self.powerup.batch_size is %d but got shape of %d" % (self.mlp.batch_size, sb.shape[0]))
                    assert reduce(lambda x,y: x * y, sb.shape[1:]) == self.input_dim

            state_below = self.input_space.format_as(state_below, self.desired_space)

        if not self.post_bias:
            z = self.transformer.lmul(state_below) + self.b
        else:
            z = self.transformer.lmul(state_below)

        if not hasattr(self, 'randomize_pools'):
            self.randomize_pools = False

        if not hasattr(self, 'pool_stride'):
            self.pool_stride = self.pool_size

        if self.randomize_pools:
            z = T.dot(z, self.permute)

        if not hasattr(self, 'min_zero'):
            self.min_zero = False

        #Reshape the presynaptic activation to a 3D tensor. Such that the first
        #dimension is the batch size, second dimension corresponds to number of
        #hidden units and the third dimension is for the size of the pool.
        z_pools = z.reshape((z.shape[0], self.num_units, self.pool_size))

        #Center the pools
        if self.centered_bias:
            c = self.c.reshape((self.num_units, self.pool_size))
            c = c.dimshuffle('x', 0, 1)
            z_pools = z_pools - c

        #Dimshuffle the p_j for |W_i x|^{p_j}
        power_in = self.p.dimshuffle('x', 0, 'x')
        p_j = self.get_power_activ(power_in)

        if self.relu:
            z_pools = T.maximum(z_pools, 0)
开发者ID:kyunghyuncho,项目名称:powerup,代码行数:70,代码来源:powerup.py

示例11: WeightedLogNormalLogLikelihood

# 需要导入模块: from pylearn2.linear.matrixmul import MatrixMul [as 别名]
# 或者: from pylearn2.linear.matrixmul.MatrixMul import lmul [as 别名]

#.........这里部分代码省略.........

        W = Conv2DSpace.convert(W, self.input_space.axes, ('b', 0, 1, 'c'))

        return function([], W)()

    @wraps(Layer.get_layer_monitoring_channels)
    def get_layer_monitoring_channels(self, state_below=None,
                                      state=None, targets=None):
        W, = self.transformer.get_params()

        assert W.ndim == 2

        sq_W = T.sqr(W)

        row_norms = T.sqrt(sq_W.sum(axis=1))
        col_norms = T.sqrt(sq_W.sum(axis=0))

        rval = OrderedDict([('row_norms_min',  row_norms.min()),
                            ('row_norms_mean', row_norms.mean()),
                            ('row_norms_max',  row_norms.max()),
                            ('col_norms_min',  col_norms.min()),
                            ('col_norms_mean', col_norms.mean()),
                            ('col_norms_max',  col_norms.max()), ])

        if (state is not None) or (state_below is not None):
            if state is None:
                state = self.fprop(state_below)

            mx = state.max(axis=0)
            mean = state.mean(axis=0)
            mn = state.min(axis=0)
            rg = mx - mn

            rval['range_x_max_u'] = rg.max()
            rval['range_x_mean_u'] = rg.mean()
            rval['range_x_min_u'] = rg.min()

            rval['max_x_max_u'] = mx.max()
            rval['max_x_mean_u'] = mx.mean()
            rval['max_x_min_u'] = mx.min()

            rval['mean_x_max_u'] = mean.max()
            rval['mean_x_mean_u'] = mean.mean()
            rval['mean_x_min_u'] = mean.min()

            rval['min_x_max_u'] = mn.max()
            rval['min_x_mean_u'] = mn.mean()
            rval['min_x_min_u'] = mn.min()

        if targets:
            y_target = targets[:, 0]
            cost_multiplier = targets[:, 1]
            mean = state[:, 0]
            sigma = T.exp(state[:, 1])
            nll = self.logprob(y_target, mean, sigma)
            prob_vector = T.exp(-nll)
            rval['prob'] = (prob_vector * cost_multiplier).sum() / (1.0 * cost_multiplier.sum())
            rval['ppl'] = T.exp((nll* cost_multiplier).sum() / (1.0 * cost_multiplier.sum()))
        return rval

    def _linear_part(self, state_below):
        """
        Parameters
        ----------
        state_below : member of input_space
        Returns
        -------
        output : theano matrix
            Affine transformation of state_below
        """
        self.input_space.validate(state_below)

        if self.requires_reformat:
            state_below = self.input_space.format_as(state_below,
                                                     self.desired_space)

        z = self.transformer.lmul(state_below)
        z += self.b

        if self.layer_name is not None:
            z.name = self.layer_name + '_z'

        return z

    @wraps(Layer.fprop)
    def fprop(self, state_below):
        p = self._linear_part(state_below)
        return p


    def logprob(self, y_target, mean, sigma):
        return (((T.log(y_target) - mean) ** 2 / (2 * sigma ** 2) + T.log(y_target * sigma * T.sqrt(2 * np.pi))))

    @wraps(Layer.cost)
    def cost(self, Y, Y_hat):
        mean = Y_hat[:, 0] #+ 1.6091597151048114
        sigma = T.exp(Y_hat[:, 1]) #+ 0.26165911509618789
        y_target = Y[:, 0]
        cost_multiplier = Y[:, 1]
        return (self.logprob(y_target, mean, sigma) * cost_multiplier).sum() / (1.0 * cost_multiplier.sum())
开发者ID:alumae,项目名称:kaldi-nnet-dur-model,代码行数:104,代码来源:durmodel_elements.py


注:本文中的pylearn2.linear.matrixmul.MatrixMul.lmul方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。