当前位置: 首页>>代码示例>>Python>>正文


Python MatrixMul.get_params方法代码示例

本文整理汇总了Python中pylearn2.linear.matrixmul.MatrixMul.get_params方法的典型用法代码示例。如果您正苦于以下问题:Python MatrixMul.get_params方法的具体用法?Python MatrixMul.get_params怎么用?Python MatrixMul.get_params使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pylearn2.linear.matrixmul.MatrixMul的用法示例。


在下文中一共展示了MatrixMul.get_params方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from pylearn2.linear.matrixmul import MatrixMul [as 别名]
# 或者: from pylearn2.linear.matrixmul.MatrixMul import get_params [as 别名]
class Adam:
    def __init__(self, batch_size, alpha, irange):
        self.alpha = alpha
        self.visible_layer = GaussianConvolutionalVisLayer(rows = 32,cols = 32, channels = 3, init_beta =1., init_mu = 0.)
        self.hidden_layers = [ Softmax(n_classes = 10,
                                            irange = .01) ]
        rng = np.random.RandomState([2012,8,20])
        self.W = MatrixMul( sharedX( rng.uniform(-irange, irange, (108,1600))))
        #make_random_conv2D(irange = .05, input_space = self.visible_layer.get_input_space(),
        #                output_space = Conv2DSpace([27,27],1600),
        #                kernel_shape = (6,6),
        #                batch_size = batch_size)
        self.batch_size = batch_size
        self.hidden_layers[0].dbm = self
        self.hidden_layers[0].set_input_space(Conv2DSpace([2,2],3200))

    def get_params(self):
        return set(self.hidden_layers[0].get_params()).union(self.W.get_params())

    def mf(self, X):
        patches = cifar10neighbs(X,(6,6))
        patches -= patches.mean(axis=1).dimshuffle(0,'x')
        patches /= T.sqrt(T.sqr(patches).sum(axis=1)+10.0).dimshuffle(0,'x')

        Z = self.W.lmul(patches)

        #Z = Print('Z',attrs=['min','mean','max'])(Z)

        Z = T.concatenate((Z,-Z),axis=1)
        Z = multichannel_neibs2imgs(Z, self.batch_size, 27, 27, 3200, 1, 1)
        Z = Z.dimshuffle(0,3,1,2)
        p = max_pool_2d(Z,(14,14),False)
        p = p.dimshuffle(0,1,2,3)
        p = T.maximum(p - self.alpha, 0.)
        #p = Print('p',attrs=['min','mean','max'])(p)
        y = self.hidden_layers[0].mf_update(state_below = p, state_above = None)
        return [ Z, y ]

    def get_weights_topo(self):
        outp, inp, rows, cols = range(4)
        raw = self.W._filters.get_value()
        return np.transpose(raw,(outp,rows,cols,inp))
开发者ID:cc13ny,项目名称:galatea,代码行数:44,代码来源:adam.py

示例2: Maxout

# 需要导入模块: from pylearn2.linear.matrixmul import MatrixMul [as 别名]
# 或者: from pylearn2.linear.matrixmul.MatrixMul import get_params [as 别名]
class Maxout(Layer):
    """
    A hidden layer that does max pooling over groups of linear
    units. If you use this code in a research project, please
    cite

    "Maxout Networks" Ian J. Goodfellow, David Warde-Farley,
    Mehdi Mirza, Aaron Courville, and Yoshua Bengio. ICML 2013
    """

    def __str__(self):
        return "Maxout"

    def __init__(
        self,
        layer_name,
        num_units,
        num_pieces,
        pool_stride=None,
        randomize_pools=False,
        irange=None,
        sparse_init=None,
        sparse_stdev=1.0,
        include_prob=1.0,
        init_bias=0.0,
        W_lr_scale=None,
        b_lr_scale=None,
        max_col_norm=None,
        max_row_norm=None,
        mask_weights=None,
        min_zero=False,
    ):
        """
            layer_name: A name for this layer that will be prepended to
                        monitoring channels related to this layer.
            num_units: The number of maxout units to use in this layer.
            num_pieces: The number of linear pieces to use in each maxout
                        unit.
            pool_stride: The distance between the start of each max pooling
                        region. Defaults to num_pieces, which makes the
                        pooling regions disjoint. If set to a smaller number,
                        can do overlapping pools.
            randomize_pools: Does max pooling over randomized subsets of
                        the linear responses, rather than over sequential
                        subsets.
            irange: if specified, initializes each weight randomly in
                U(-irange, irange)
            sparse_init: if specified, irange must not be specified.
                        This is an integer specifying how many weights to make
                        non-zero. All non-zero weights will be initialized
                        randomly in N(0, sparse_stdev^2)
            include_prob: probability of including a weight element in the set
               of weights initialized to U(-irange, irange). If not included
               a weight is initialized to 0. This defaults to 1.
            init_bias: All biases are initialized to this number
            W_lr_scale: The learning rate on the weights for this layer is
                multiplied by this scaling factor
            b_lr_scale: The learning rate on the biases for this layer is
                multiplied by this scaling factor
            max_col_norm: The norm of each column of the weight matrix is
                constrained to have at most this norm. If unspecified, no
                constraint. Constraint is enforced by re-projection (if
                necessary) at the end of each update.
            max_row_norm: Like max_col_norm, but applied to the rows.
            mask_weights: A binary matrix multiplied by the weights after each
                         update, allowing you to restrict their connectivity.
            min_zero: If true, includes a zero in the set we take a max over
                    for each maxout unit. This is equivalent to pooling over
                    rectified linear units.
        """

        detector_layer_dim = num_units * num_pieces
        pool_size = num_pieces

        if pool_stride is None:
            pool_stride = pool_size

        self.__dict__.update(locals())
        del self.self

        self.b = sharedX(np.zeros((self.detector_layer_dim,)) + init_bias, name=layer_name + "_b")

        if max_row_norm is not None:
            raise NotImplementedError()

    def get_lr_scalers(self):

        if not hasattr(self, "W_lr_scale"):
            self.W_lr_scale = None

        if not hasattr(self, "b_lr_scale"):
            self.b_lr_scale = None

        rval = OrderedDict()

        if self.W_lr_scale is not None:
            W, = self.transformer.get_params()
            rval[W] = self.W_lr_scale

        if self.b_lr_scale is not None:
#.........这里部分代码省略.........
开发者ID:bbudescu,项目名称:pylearn2,代码行数:103,代码来源:maxout.py

示例3: RBM

# 需要导入模块: from pylearn2.linear.matrixmul import MatrixMul [as 别名]
# 或者: from pylearn2.linear.matrixmul.MatrixMul import get_params [as 别名]

#.........这里部分代码省略.........

            self.vis_space = VectorSpace(nvis)
            self.hid_space = VectorSpace(nhid)
        else:
            assert hid_space is not None
            assert transformer is not None
            assert nvis is None
            assert nhid is None

            self.vis_space = vis_space
            self.hid_space = hid_space
            self.transformer = transformer


        try:
            b_vis = self.vis_space.get_origin()
            b_vis += init_bias_vis
        except ValueError:
            raise ValueError("bad shape or value for init_bias_vis")
        self.bias_vis = sharedX(b_vis, name='bias_vis', borrow=True)

        try:
            b_hid = self.hid_space.get_origin()
            b_hid += init_bias_hid
        except ValueError:
            raise ValueError('bad shape or value for init_bias_hid')
        self.bias_hid = sharedX(b_hid, name='bias_hid', borrow=True)

        self.random_patches_src = random_patches_src
        self.register_names_to_del(['random_patches_src'])


        self.__dict__.update(nhid=nhid, nvis=nvis)
        self._params = safe_union(self.transformer.get_params(), [self.bias_vis, self.bias_hid])

        self.base_lr = base_lr
        self.anneal_start = anneal_start
        self.nchains = nchains
        self.sml_gibbs_steps = sml_gibbs_steps

    def get_input_dim(self):
        if not isinstance(self.vis_space, VectorSpace):
            raise TypeError("Can't describe "+str(type(self.vis_space))+" as a dimensionality number.")
        return self.vis_space.dim

    def get_output_dim(self):
        if not isinstance(self.hid_space, VectorSpace):
            raise TypeError("Can't describe "+str(type(self.hid_space))+" as a dimensionality number.")
        return self.hid_space.dim

    def get_input_space(self):
        return self.vis_space

    def get_output_space(self):
        return self.hid_space

    def get_params(self):
        return [param for param in self._params]

    def get_weights(self, borrow=False):

        weights ,= self.transformer.get_params()

        return weights.get_value(borrow=borrow)

    def get_weights_topo(self):
开发者ID:niharsarangi,项目名称:pylearn2,代码行数:70,代码来源:rbm.py

示例4: Discomax

# 需要导入模块: from pylearn2.linear.matrixmul import MatrixMul [as 别名]
# 或者: from pylearn2.linear.matrixmul.MatrixMul import get_params [as 别名]

#.........这里部分代码省略.........
                 min_zero=False):

        super(Discomax, self).__init__()

        detector_layer_dim = num_units * num_pieces
        pool_size = num_pieces

        if pool_stride is None:
            pool_stride = pool_size

        self.__dict__.update(locals())
        del self.self

        self.b = sharedX(np.zeros((self.detector_layer_dim,)) + init_bias,
                         name=(layer_name + '_b'))
        self.ofs = sharedX(np.zeros((self.detector_layer_dim,)),
                         name=(layer_name + '_ofs'))

        if max_row_norm is not None:
            raise NotImplementedError()

    @functools.wraps(Model.get_lr_scalers)
    def get_lr_scalers(self):

        if not hasattr(self, 'W_lr_scale'):
            self.W_lr_scale = None

        if not hasattr(self, 'b_lr_scale'):
            self.b_lr_scale = None

        rval = OrderedDict()

        if self.W_lr_scale is not None:
            W, = self.transformer.get_params()
            rval[W] = self.W_lr_scale

        if self.b_lr_scale is not None:
            rval[self.b] = self.b_lr_scale

        return rval

    def set_input_space(self, space):
        """
        Tells the layer to use the specified input space.

        This resets parameters! The weight matrix is initialized with the
        size needed to receive input from this space.

        Parameters
        ----------
        space : Space
            The Space that the input will lie in.
        """

        self.input_space = space

        if isinstance(space, VectorSpace):
            self.requires_reformat = False
            self.input_dim = space.dim
        else:
            self.requires_reformat = True
            self.input_dim = space.get_total_dimension()
            self.desired_space = VectorSpace(self.input_dim)

        if not (0 == ((self.detector_layer_dim - self.pool_size) %
                      self.pool_stride)):
开发者ID:cc13ny,项目名称:galatea,代码行数:70,代码来源:discomax.py

示例5: RectifiedLinear

# 需要导入模块: from pylearn2.linear.matrixmul import MatrixMul [as 别名]
# 或者: from pylearn2.linear.matrixmul.MatrixMul import get_params [as 别名]
class RectifiedLinear(Layer):
    """
        WRITEME
    """

    def __init__(self,
                 dim,
                 layer_name,
                 irange = None,
                 istdev = None,
                 sparse_init = None,
                 sparse_stdev = 1.,
                 include_prob = 1.0,
                 init_bias = 0.,
                 W_lr_scale = None,
                 b_lr_scale = None,
                 mask_weights = None,
                 left_slope = 0.0,
                 copy_input = 0,
                 max_row_norm = None):
        """

            include_prob: probability of including a weight element in the set
            of weights initialized to U(-irange, irange). If not included
            it is initialized to 0.

            """
        self.__dict__.update(locals())
        del self.self

        self.b = sharedX( np.zeros((self.dim,)) + init_bias, name = layer_name + '_b')

    def get_lr_scalers(self):

        if not hasattr(self, 'W_lr_scale'):
            self.W_lr_scale = None

        if not hasattr(self, 'b_lr_scale'):
            self.b_lr_scale = None

        rval = OrderedDict()

        if self.W_lr_scale is not None:
            W, = self.transformer.get_params()
            rval[W] = self.W_lr_scale

        if self.b_lr_scale is not None:
            rval[self.b] = self.b_lr_scale

        return rval

    def set_input_space(self, space):
        """ Note: this resets parameters! """

        self.input_space = space

        if isinstance(space, VectorSpace):
            self.requires_reformat = False
            self.input_dim = space.dim
        else:
            self.requires_reformat = True
            self.input_dim = space.get_total_dimension()
            self.desired_space = VectorSpace(self.input_dim)

        self.output_space = VectorSpace(self.dim + self.copy_input * self.input_dim)

        rng = self.mlp.rng
        if self.irange is not None:
            assert self.istdev is None
            assert self.sparse_init is None
            W = rng.uniform(-self.irange,
                            self.irange,
                            (self.input_dim, self.dim)) * \
                (rng.uniform(0.,1., (self.input_dim, self.dim))
                 < self.include_prob)
        elif self.istdev is not None:
            assert self.sparse_init is None
            W = rng.randn(self.input_dim, self.dim) * self.istdev
        else:
            assert self.sparse_init is not None
            W = np.zeros((self.input_dim, self.dim))
            def mask_rejects(idx, i):
                if self.mask_weights is None:
                    return False
                return self.mask_weights[idx, i] == 0.
            for i in xrange(self.dim):
                assert self.sparse_init <= self.input_dim
                for j in xrange(self.sparse_init):
                    idx = rng.randint(0, self.input_dim)
                    while W[idx, i] != 0 or mask_rejects(idx, i):
                        idx = rng.randint(0, self.input_dim)
                    W[idx, i] = rng.randn()
            W *= self.sparse_stdev

        W = sharedX(W)
        W.name = self.layer_name + '_W'

        self.transformer = MatrixMul(W)

        W ,= self.transformer.get_params()
#.........这里部分代码省略.........
开发者ID:renjupaul,项目名称:pylearn,代码行数:103,代码来源:mlp.py

示例6: SoftmaxPool

# 需要导入模块: from pylearn2.linear.matrixmul import MatrixMul [as 别名]
# 或者: from pylearn2.linear.matrixmul.MatrixMul import get_params [as 别名]
class SoftmaxPool(Layer):
    """
        A hidden layer that uses the softmax function to do
        max pooling over groups of units.
        When the pooling size is 1, this reduces to a standard
        sigmoidal MLP layer.
        """

    def __init__(self,
                 detector_layer_dim,
                 pool_size,
                 layer_name,
                 irange = None,
                 sparse_init = None,
                 sparse_stdev = 1.,
                 include_prob = 1.0,
                 init_bias = 0.,
                 W_lr_scale = None,
                 b_lr_scale = None,
                 mask_weights = None,
        ):
        """

            include_prob: probability of including a weight element in the set
            of weights initialized to U(-irange, irange). If not included
            it is initialized to 0.

            """
        self.__dict__.update(locals())
        del self.self

        self.b = sharedX( np.zeros((self.detector_layer_dim,)) + init_bias, name = layer_name + '_b')

    def get_lr_scalers(self):

        if not hasattr(self, 'W_lr_scale'):
            self.W_lr_scale = None

        if not hasattr(self, 'b_lr_scale'):
            self.b_lr_scale = None

        rval = OrderedDict()

        if self.W_lr_scale is not None:
            W, = self.transformer.get_params()
            rval[W] = self.W_lr_scale

        if self.b_lr_scale is not None:
            rval[self.b] = self.b_lr_scale

        return rval

    def set_input_space(self, space):
        """ Note: this resets parameters! """

        self.input_space = space

        if isinstance(space, VectorSpace):
            self.requires_reformat = False
            self.input_dim = space.dim
        else:
            self.requires_reformat = True
            self.input_dim = space.get_total_dimension()
            self.desired_space = VectorSpace(self.input_dim)


        if not (self.detector_layer_dim % self.pool_size == 0):
            raise ValueError("detector_layer_dim = %d, pool_size = %d. Should be divisible but remainder is %d" %
                             (self.detector_layer_dim, self.pool_size, self.detector_layer_dim % self.pool_size))

        self.h_space = VectorSpace(self.detector_layer_dim)
        self.pool_layer_dim = self.detector_layer_dim / self.pool_size
        self.output_space = VectorSpace(self.pool_layer_dim)

        rng = self.mlp.rng
        if self.irange is not None:
            assert self.sparse_init is None
            W = rng.uniform(-self.irange,
                            self.irange,
                            (self.input_dim, self.detector_layer_dim)) * \
                (rng.uniform(0.,1., (self.input_dim, self.detector_layer_dim))
                 < self.include_prob)
        else:
            assert self.sparse_init is not None
            W = np.zeros((self.input_dim, self.detector_layer_dim))
            def mask_rejects(idx, i):
                if self.mask_weights is None:
                    return False
                return self.mask_weights[idx, i] == 0.
            for i in xrange(self.detector_layer_dim):
                assert self.sparse_init <= self.input_dim
                for j in xrange(self.sparse_init):
                    idx = rng.randint(0, self.input_dim)
                    while W[idx, i] != 0 or mask_rejects(idx, i):
                        idx = rng.randint(0, self.input_dim)
                    W[idx, i] = rng.randn()
            W *= self.sparse_stdev

        W = sharedX(W)
        W.name = self.layer_name + '_W'
#.........这里部分代码省略.........
开发者ID:renjupaul,项目名称:pylearn,代码行数:103,代码来源:mlp.py

示例7: CpuConvMaxout

# 需要导入模块: from pylearn2.linear.matrixmul import MatrixMul [as 别名]
# 或者: from pylearn2.linear.matrixmul.MatrixMul import get_params [as 别名]
class CpuConvMaxout(Layer):
    """
    .. todo::

        WRITEME
    """
    def __init__(self,
                 output_channels,
                 num_pieces,
                 kernel_shape,
                 pool_shape,
                 pool_stride,
                 layer_name,
                 irange=None,
                 border_mode='valid',
                 sparse_init=None,
                 include_prob=1.0,
                 init_bias=0.,
                 W_lr_scale=None,
                 b_lr_scale=None,
                 left_slope=0.0,
                 max_kernel_norm=None,
                 pool_type='max',
                 detector_normalization=None,
                 output_normalization=None,
                 kernel_stride=(1, 1)):
        """
        .. todo::

            WRITEME properly

         output_channels: The number of output channels the layer should have.
         kernel_shape: The shape of the convolution kernel.
         pool_shape: The shape of the spatial max pooling. A two-tuple of ints.
         pool_stride: The stride of the spatial max pooling. Also must be
                      square.
         layer_name: A name for this layer that will be prepended to
                     monitoring channels related to this layer.
         irange: if specified, initializes each weight randomly in
                 U(-irange, irange)
         border_mode: A string indicating the size of the output:
            full - The output is the full discrete linear convolution of the
                   inputs.
            valid - The output consists only of those elements that do not rely
                    on the zero-padding.(Default)
         include_prob: probability of including a weight element in the set
                       of weights initialized to U(-irange, irange). If not
                       included it is initialized to 0.
         init_bias: All biases are initialized to this number
         W_lr_scale: The learning rate on the weights for this layer is
                     multiplied by this scaling factor
         b_lr_scale: The learning rate on the biases for this layer is
                     multiplied by this scaling factor
         left_slope: **TODO**
         max_kernel_norm: If specifed, each kernel is constrained to have at
                          most this norm.
         pool_type: The type of the pooling operation performed the the
                    convolution. Default pooling type is max-pooling.
         detector_normalization, output_normalization:
              if specified, should be a callable object. the state of the
              network is optionally replaced with normalization(state) at each
              of the 3 points in processing:
                  detector: the maxout units can be normalized prior to the
                            spatial pooling
                  output: the output of the layer, after sptial pooling, can
                          be normalized as well
         kernel_stride: The stride of the convolution kernel. A two-tuple of
                        ints.
        """

        #super(ConvRectifiedLinear, self).__init__()

        if (irange is None) and (sparse_init is None):
            raise AssertionError("You should specify either irange or "
                                 "sparse_init when calling the constructor of "
                                 "ConvRectifiedLinear.")
        elif (irange is not None) and (sparse_init is not None):
            raise AssertionError("You should specify either irange or "
                                 "sparse_init when calling the constructor of "
                                 "ConvRectifiedLinear and not both.")

        self.__dict__.update(locals())
        del self.self

    @wraps(Layer.get_lr_scalers)
    def get_lr_scalers(self):

        if not hasattr(self, 'W_lr_scale'):
            self.W_lr_scale = None

        if not hasattr(self, 'b_lr_scale'):
            self.b_lr_scale = None

        rval = OrderedDict()

        if self.W_lr_scale is not None:
            W, = self.transformer.get_params()
            rval[W] = self.W_lr_scale

        if self.b_lr_scale is not None:
#.........这里部分代码省略.........
开发者ID:AtousaTorabi,项目名称:HumanActivityRecognition,代码行数:103,代码来源:customCpuDotMaxout.py

示例8: IsingHidden

# 需要导入模块: from pylearn2.linear.matrixmul import MatrixMul [as 别名]
# 或者: from pylearn2.linear.matrixmul.MatrixMul import get_params [as 别名]
class IsingHidden(HiddenLayer):
    """

    A hidden layer with h being a vector in {-1, 1}^dim,
    implementing the energy function term

    -v^T Wh -b^T h

    where W and b are parameters of this layer, and v is
    the upward state of the layer below

    """

    def __init__(self,
            dim,
            layer_name,
            irange = None,
            sparse_init = None,
            sparse_stdev = 1.,
            include_prob = 1.0,
            init_bias = 0.,
            W_lr_scale = None,
            b_lr_scale = None,
            max_col_norm = None):
        """

            include_prob: probability of including a weight element in the set
                    of weights initialized to U(-irange, irange). If not included
                    it is initialized to 0.

        """
        self.__dict__.update(locals())
        del self.self

        self.b = sharedX( np.zeros((self.dim,)) + init_bias, name = layer_name + '_b')

    def get_lr_scalers(self):

        if not hasattr(self, 'W_lr_scale'):
            self.W_lr_scale = None

        if not hasattr(self, 'b_lr_scale'):
            self.b_lr_scale = None

        rval = OrderedDict()

        if self.W_lr_scale is not None:
            W, = self.transformer.get_params()
            rval[W] = self.W_lr_scale

        if self.b_lr_scale is not None:
            rval[self.b] = self.b_lr_scale

        return rval

    def set_input_space(self, space):
        """ Note: this resets parameters! """

        self.input_space = space

        if isinstance(space, VectorSpace):
            self.requires_reformat = False
            self.input_dim = space.dim
        else:
            self.requires_reformat = True
            self.input_dim = space.get_total_dimension()
            self.desired_space = VectorSpace(self.input_dim)

        self.output_space = VectorSpace(self.dim)

        rng = self.dbm.rng
        if self.irange is not None:
            assert self.sparse_init is None
            W = rng.uniform(-self.irange,
                                 self.irange,
                                 (self.input_dim, self.dim)) * \
                    (rng.uniform(0.,1., (self.input_dim, self.dim))
                     < self.include_prob)
        else:
            assert self.sparse_init is not None
            W = np.zeros((self.input_dim, self.dim))
            W *= self.sparse_stdev

        W = sharedX(W)
        W.name = self.layer_name + '_W'

        self.transformer = MatrixMul(W)

        W ,= self.transformer.get_params()
        assert W.name is not None

    def censor_updates(self, updates):

        if self.max_col_norm is not None:
            W, = self.transformer.get_params()
            if W in updates:
                updated_W = updates[W]
                col_norms = T.sqrt(T.sum(T.sqr(updated_W), axis=0))
                desired_norms = T.clip(col_norms, 0, self.max_col_norm)
                updates[W] = updated_W * (desired_norms / (1e-7 + col_norms))
#.........这里部分代码省略.........
开发者ID:Alienfeel,项目名称:pylearn2,代码行数:103,代码来源:ising.py

示例9: BinaryVectorMaxPool

# 需要导入模块: from pylearn2.linear.matrixmul import MatrixMul [as 别名]
# 或者: from pylearn2.linear.matrixmul.MatrixMul import get_params [as 别名]
class BinaryVectorMaxPool(HiddenLayer):
    """
        A hidden layer that does max-pooling on binary vectors.
        It has two sublayers, the detector layer and the pooling
        layer. The detector layer is its downward state and the pooling
        layer is its upward state.

        TODO: this layer uses (pooled, detector) as its total state,
              which can be confusing when listing all the states in
              the network left to right. Change this and
              pylearn2.expr.probabilistic_max_pooling to use
              (detector, pooled)
    """

    def __init__(self,
             detector_layer_dim,
            pool_size,
            layer_name,
            irange = None,
            sparse_init = None,
            include_prob = 1.0,
            init_bias = 0.):
        """

            include_prob: probability of including a weight element in the set
                    of weights initialized to U(-irange, irange). If not included
                    it is initialized to 0.

        """
        self.__dict__.update(locals())
        del self.self

        self.b = sharedX( np.zeros((self.detector_layer_dim,)) + init_bias, name = layer_name + '_b')

    def set_input_space(self, space):
        """ Note: this resets parameters! """

        self.input_space = space

        if isinstance(space, VectorSpace):
            self.requires_reformat = False
            self.input_dim = space.dim
        else:
            self.requires_reformat = True
            self.input_dim = space.get_total_dimension()
            self.desired_space = VectorSpace(self.input_dim)


        if not (self.detector_layer_dim % self.pool_size == 0):
            raise ValueError("detector_layer_dim = %d, pool_size = %d. Should be divisible but remainder is %d" %
                    (self.detector_layer_dim, self.pool_size, self.detector_layer_dim % self.pool_size))

        self.h_space = VectorSpace(self.detector_layer_dim)
        self.pool_layer_dim = self.detector_layer_dim / self.pool_size
        self.output_space = VectorSpace(self.pool_layer_dim)

        rng = self.dbm.rng
        if self.irange is not None:
            assert self.sparse_init is None
            W = rng.uniform(-self.irange,
                                 self.irange,
                                 (self.input_dim, self.detector_layer_dim)) * \
                    (rng.uniform(0.,1., (self.input_dim, self.detector_layer_dim))
                     < self.include_prob)
        else:
            assert self.sparse_init is not None
            W = np.zeros((self.input_dim, self.detector_layer_dim))
            for i in xrange(self.detector_layer_dim):
                for j in xrange(self.sparse_init):
                    idx = rng.randint(0, self.input_dim)
                    while W[idx, i] != 0:
                        idx = rng.randint(0, self.input_dim)
                    W[idx, i] = rng.randn()

        W = sharedX(W)
        W.name = self.layer_name + '_W'

        self.transformer = MatrixMul(W)

        W ,= self.transformer.get_params()
        assert W.name is not None

    def get_total_state_space(self):
        return CompositeSpace((self.output_space, self.h_space))

    def get_params(self):
        assert self.b.name is not None
        W ,= self.transformer.get_params()
        assert W.name is not None
        return self.transformer.get_params().union([self.b])

    def get_weight_decay(self, coeff):
        if isinstance(coeff, str):
            coeff = float(coeff)
        assert isinstance(coeff, float)
        W ,= self.transformer.get_params()
        return coeff * T.sqr(W).sum()

    def get_weights(self):
        if self.requires_reformat:
#.........这里部分代码省略.........
开发者ID:deigen,项目名称:pylearn,代码行数:103,代码来源:dbm.py

示例10: Powerup

# 需要导入模块: from pylearn2.linear.matrixmul import MatrixMul [as 别名]
# 或者: from pylearn2.linear.matrixmul.MatrixMul import get_params [as 别名]

#.........这里部分代码省略.........
        """
            Sample the values uniformly such that the initial value of
            softplus(.) + 1 is between min and max.
        """
        rng = np.random.RandomState(12435)
        p_vals = np.log(np.exp(rng.uniform(low=min, high=max, size=(self.num_units,))-1)-1)
        return p_vals

    def get_log_p(self, mean=None, std=None):
        rng = np.random.RandomState(12435)
        assert mean >= 1.0, "Mean should be greater than 1."
        if self.power_activ == "softplus":
            p_vals = np.log(rng.normal(loc=np.exp(mean-1), scale=std, size=(self.num_units,)) - 1)
        elif self.power_activ == "exp":
            p_vals = rng.normal(loc=np.log(mean-1), scale=std, size=(self.num_units,))
        else:
            p_vals = np.sqrt(rng.normal(loc=mean, scale=std, size=(self.num_units,)) - 1)
        #p_vals = np.log(np.exp(rng.normal(loc=mean, scale=std, size=(self.num_units,))-1) - 1)
        return p_vals

    def get_lr_scalers(self):
        if not hasattr(self, 'W_lr_scale'):
            self.W_lr_scale = None

        if not hasattr(self, 'b_lr_scale'):
            self.b_lr_scale = None

        if not hasattr(self, 'p_lr_scale'):
            self.p_lr_scale = None

        rval = OrderedDict()

        if self.W_lr_scale is not None:
            W, = self.transformer.get_params()
            rval[W] = self.W_lr_scale

        if self.b_lr_scale is not None:
            rval[self.b] = self.b_lr_scale

        if self.p_lr_scale is not None:
            rval[self.p] = self.p_lr_scale

        return rval

    def set_input_space(self, space):
        """
        Note: this resets parameters!
        """
        self.input_space = space

        if isinstance(space, VectorSpace):
            self.requires_reformat = False
            self.input_dim = space.dim
        else:
            self.requires_reformat = True
            self.input_dim = space.get_total_dimension()
            self.desired_space = VectorSpace(self.input_dim)

        self.p.name = self.layer_name + "_p"

        if not ((self.detector_layer_dim - self.pool_size) % self.pool_stride == 0):
            if self.pool_stride == self.pool_size:
                raise ValueError("detector_layer_dim = %d, pool_size = %d. Should be divisible but remainder is %d" %
                             (self.detector_layer_dim, self.pool_size, self.detector_layer_dim % self.pool_size))
            raise ValueError()
开发者ID:kyunghyuncho,项目名称:powerup,代码行数:69,代码来源:powerup.py

示例11: SparseCodingLayer

# 需要导入模块: from pylearn2.linear.matrixmul import MatrixMul [as 别名]
# 或者: from pylearn2.linear.matrixmul.MatrixMul import get_params [as 别名]
class SparseCodingLayer(Linear):
    
    def __init__(self, batch_size, fprop_code=True, lr=.01, n_steps=10, lbda=0, top_most=False, 
            nonlinearity=RectifierConvNonlinearity(),*args, **kwargs):
        '''
        Compiled version: the sparse code is calulated using 'top' and is not just simbolic.
        Parameters for the optimization/feedforward operation:
        lr      : learning rate
        n_steps : number of steps or uptades of the hidden code
        truncate: truncate the gradient after this number (default -1 which means do not truncate)
        '''
        super(SparseCodingLayer, self).__init__(*args, **kwargs)
        self.batch_size = batch_size
        self.fprop_code = fprop_code
        self.n_steps = n_steps
        self.lr = lr
        self.lbda = lbda
        self.top_most = top_most
        self.nonlin = nonlinearity

    @wraps(Linear.set_input_space)
    def set_input_space(self, space):
        
        self.input_space = space

        if isinstance(space, VectorSpace):
            self.requires_reformat = False
            self.input_dim = space.dim
        else:
            self.requires_reformat = True
            self.input_dim = space.get_total_dimension()
            self.desired_space = VectorSpace(self.input_dim)

        if self.fprop_code==True:
            self.output_space = VectorSpace(self.dim)
        else:
            self.output_space = VectorSpace(self.input_dim)

        rng = self.mlp.rng
        W = rng.randn(self.input_dim, self.dim)
        self.W = sharedX(W.T, self.layer_name + '_W')
        self.transformer = MatrixMul(self.W)
        self.W, = self.transformer.get_params()
        b = np.zeros((self.input_dim,))
        self.b = sharedX(b, self.layer_name + '_b') # We need both to pass input_dim valid
        X = .001 * rng.randn(self.batch_size, self.dim)
        self.X = sharedX(X, self.layer_name + '_X')
        S = rng.normal(0, .001, size=(self.batch_size, self.input_dim))
        self.S = sharedX(S, self.layer_name + '_S')
        self._params = [self.W, self.b]
        #self.state_below = T.zeros((self.batch_size, self.input_dim))
        
        cost = self.get_local_cost()
        self.opt = top.Optimizer(self.X, cost,  
                                 method='rmsprop', 
                                 learning_rate=self.lr, momentum=.9)

        self._reconstruction = theano.function([], T.dot(self.X, self.W))
    
    def get_local_cost(self):
        er = T.sqr(self.S - T.dot(self.X, self.W)).sum()
        l1 = T.sqrt(T.sqr(self.X) + 1e-6).sum()
        top_down = self.get_top_down_flow()
        return er + .1 * l1 + top_down
    
    def update_top_state(self, state_above=None):
        if self.lbda is not 0:
            assert state_above is not None
            self.top_flow.set_value(state_above)     
    
    def get_nonlin_output(self):
        return self.nonlin(self.X)

    def get_top_down_flow(self):
        if self.lbda == 0:
            rval = 0.
        elif self.top_flow == True:
            rval = (self.lbda * (self.top_flow - self.X)**2).sum()
        else:
            out = self.get_nonlin_output()
            rval = (self.lbda * (self.top_flow - out)**2).sum()

        return rval

    def _renormW(self):
        A = self.W.get_value(borrow=True)
        A = np.dot(A.T, np.diag(1./np.sqrt(np.sum(A**2, axis=1)))).T
        self.W.set_value( A )
  
    def get_reconstruction(self):
        return self._reconstruction()

    def get_sparse_code(self, state_below):

        # Renorm W
        self._renormW()

        if hasattr(state_below, 'get_value'):
            #print '!!!! state_below does have get_value'
            self.S.set_value(state_below.get_value(borrow=True))
#.........这里部分代码省略.........
开发者ID:EderSantana,项目名称:mdpcn,代码行数:103,代码来源:cdpcn.py

示例12: SparseCodingLayer

# 需要导入模块: from pylearn2.linear.matrixmul import MatrixMul [as 别名]
# 或者: from pylearn2.linear.matrixmul.MatrixMul import get_params [as 别名]
class SparseCodingLayer(Linear):
    
    def __init__(self, batch_size, fprop_code=True, lr=.01, n_steps=10, truncate=-1, *args, **kwargs):
        '''
        Parameters for the optimization/feedforward operation:
        lr      : learning rate
        n_steps : number of steps or uptades of the hidden code
        truncate: truncate the gradient after this number (default -1 which means do not truncate)
        '''
        super(SparseCodingLayer, self).__init__(*args, **kwargs)
        self.batch_size = batch_size
        self.fprop_code = fprop_code
        self.n_steps = n_steps
        self.truncate = truncate
        self.lr = lr
        self._scan_updates = OrderedDict()

    @wraps(Linear.set_input_space)
    def set_input_space(self, space):
        
        self.input_space = space

        if isinstance(space, VectorSpace):
            self.requires_reformat = False
            self.input_dim = space.dim
        else:
            self.requires_reformat = True
            self.input_dim = space.get_total_dimension()
            self.desired_space = VectorSpace(self.input_dim)

        if self.fprop_code==True:
            self.output_space = VectorSpace(self.dim)
        else:
            self.output_space = VectorSpace(self.input_dim)

        rng = self.mlp.rng
        W = rng.randn(self.input_dim, self.dim)
        self.W = sharedX(W.T, self.layer_name + '_W')
        self.transformer = MatrixMul(self.W)
        self.W, = self.transformer.get_params()
        b = np.zeros((self.input_dim,))
        self.b = sharedX(b, self.layer_name + '_b') # We need both to pass input_dim valid
        X = .001 * rng.randn(self.batch_size, self.dim)
        self.X = sharedX(X, self.layer_name + '_X')
        self._params = [self.W, self.b, self.X]
        self.state_below = T.zeros((self.batch_size, self.input_dim))

    def _renormW(self):
        A = self.W.get_value(borrow=True)
        A = np.dot(A.T, np.diag(1./np.sqrt(np.sum(A**2, axis=1)))).T
        self.W.set_value( A )
    
    def get_local_cost(self,state_below):
        er = T.sqr(state_below - T.dot(self.X, self.W)).sum()
        l1 = T.sqrt(T.sqr(self.X) + 1e-6).sum()
        return er + .1 * l1
        
    def get_sparse_code(self, state_below):

        def _optimization_step(Xt, accum, vt, S):
                
            '''
            Note that this is the RMSprop update. 
            Thus, we running gradient updates inside scan (the dream)
            
            TODO: put this a better place.
            I tried to make if a method of self, but I'm not sure how to tell 
            theano.scan that the first argument of the function is a non_sequence
            '''
            
            rho = .9
            momentum = .9
            lr = self.lr
            Y = T.dot(Xt, self.W) #+ self.b
            err = (S - Y) ** 2
            l1 = T.sqrt(Xt**2 + 1e-6)
            cost = err.sum() + .1 * l1.sum()
            gX = T.grad(cost, Xt)
            new_accum = rho * accum + (1-rho) * gX**2
            v = momentum * vt  - lr * gX / T.sqrt(new_accum + 1e-8)
            X = Xt + momentum * v - lr * gX / T.sqrt(new_accum + 1e-8)
            return [X, new_accum, v]

        # Renorm W
        self._renormW()
        
        rng = self.mlp.rng
        #X = rng.randn(self.batch_size, self.dim)
        #self.X = sharedX(X, 'SparseCodingLinear_X')
        '''
        accum = T.zeros_like(self.X)
        vt = T.zeros_like(self.X)
        [Xfinal,_,_], updates = theano.scan(fn=_optimization_step,
                     outputs_info=[self.X, accum, vt], 
                     non_sequences=[state_below], 
                     n_steps=self.n_steps, truncate_gradient=self.truncate)
        
        self._scan_updates.update(updates)

        self.Xout = Xfinal[-1]
#.........这里部分代码省略.........
开发者ID:EderSantana,项目名称:mdpcn,代码行数:103,代码来源:dpcn.py

示例13: WeightedLogNormalLogLikelihood

# 需要导入模块: from pylearn2.linear.matrixmul import MatrixMul [as 别名]
# 或者: from pylearn2.linear.matrixmul.MatrixMul import get_params [as 别名]
class WeightedLogNormalLogLikelihood(Layer):

    __metaclass__ = RNNWrapper

    def __init__(self, layer_name, irange=0.0, init_bias=0.):
        super(WeightedLogNormalLogLikelihood, self).__init__()
        self.__dict__.update(locals())
        del self.self
        self.dim = 2

        self.b = sharedX(np.zeros((self.dim,)) + init_bias,
                             name=(layer_name + '_b'))

    @wraps(Layer.set_input_space)
    def set_input_space(self, space):

        self.input_space = space

        if isinstance(space, VectorSpace):
            self.requires_reformat = False
            self.input_dim = space.dim
        else:
            self.requires_reformat = True
            self.input_dim = space.get_total_dimension()
            self.desired_space = VectorSpace(self.input_dim)

        self.output_space = VectorSpace(self.dim)

        rng = self.mlp.rng

        W = rng.uniform(-self.irange,
                        self.irange,
                        (self.input_dim, self.dim))

        W = sharedX(W)
        W.name = self.layer_name + '_W'

        self.transformer = MatrixMul(W)

        W, = self.transformer.get_params()
        assert W.name is not None


    @wraps(Layer.get_params)
    def get_params(self):

        W, = self.transformer.get_params()
        assert W.name is not None
        rval = self.transformer.get_params()
        assert not isinstance(rval, set)
        rval = list(rval)
        assert self.b.name is not None
        assert self.b not in rval
        rval.append(self.b)
        return rval


    @wraps(Layer.get_weights)
    def get_weights(self):

        if self.requires_reformat:
            # This is not really an unimplemented case.
            # We actually don't know how to format the weights
            # in design space. We got the data in topo space
            # and we don't have access to the dataset
            raise NotImplementedError()
        W, = self.transformer.get_params()

        W = W.get_value()

        return W

    @wraps(Layer.set_weights)
    def set_weights(self, weights):

        W, = self.transformer.get_params()
        W.set_value(weights)

    @wraps(Layer.set_biases)
    def set_biases(self, biases):

        self.b.set_value(biases)

    @wraps(Layer.get_biases)
    def get_biases(self):
        """
        .. todo::
            WRITEME
        """
        return self.b.get_value()

    @wraps(Layer.get_weights_format)
    def get_weights_format(self):

        return ('v', 'h')

    @wraps(Layer.get_weights_topo)
    def get_weights_topo(self):

        if not isinstance(self.input_space, Conv2DSpace):
#.........这里部分代码省略.........
开发者ID:alumae,项目名称:kaldi-nnet-dur-model,代码行数:103,代码来源:durmodel_elements.py


注:本文中的pylearn2.linear.matrixmul.MatrixMul.get_params方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。