当前位置: 首页>>代码示例>>Python>>正文


Python rng.make_np_rng函数代码示例

本文整理汇总了Python中pylearn2.utils.rng.make_np_rng函数的典型用法代码示例。如果您正苦于以下问题:Python make_np_rng函数的具体用法?Python make_np_rng怎么用?Python make_np_rng使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了make_np_rng函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

    def __init__(self,
                 window_shape,
                 randomize=None,
                 randomize_once=None,
                 center=None,
                 rng=(2013, 2, 20),
                 pad_randomized=0,
                 flip=True):
        self._window_shape = tuple(window_shape)

        # Defined in setup(). A dict that maps Datasets in self._randomize and
        # self._randomize_once to zero-padded versions of their topological
        # views.
        self._original = None

        self._randomize = randomize if randomize else []
        self._randomize_once = randomize_once if randomize_once else []
        self._center = center if center else []
        self._pad_randomized = pad_randomized
        self._flip = flip

        if randomize is None and randomize_once is None and center is None:
            warnings.warn(self.__class__.__name__ + " instantiated without "
                          "any dataset arguments, and therefore does nothing",
                          stacklevel=2)

        self._rng = make_np_rng(rng, which_method="random_integers")
开发者ID:DevSinghSachan,项目名称:pylearn2,代码行数:27,代码来源:window_flip.py

示例2: make_sparse_random_conv2D

def make_sparse_random_conv2D(num_nonzero, input_space, output_space,
                              kernel_shape, pad=0, kernel_stride=(1, 1),
                              border_mode='valid', message="", rng=None,
                              partial_sum=None):
    """
    .. todo::

        WRITEME properly

    Creates a Conv2D with random kernels, where the randomly initialized
    values are sparse
    """

    rng = make_np_rng(rng, default_sparse_seed,
                      which_method=['randn', 'randint'])

    W = np.zeros((input_space.num_channels, kernel_shape[0],
                  kernel_shape[1], output_space.num_channels))

    def random_coord():
        return [rng.randint(dim) for dim in W.shape[0:3]]

    for o in xrange(output_space.num_channels):
        for i in xrange(num_nonzero):
            ch, r, c = random_coord()
            while W[ch, r, c, o] != 0:
                ch, r, c = random_coord()
            W[ch, r, c, o] = rng.randn()

    W = sharedX(W)

    return Conv2D(filters=W, input_axes=input_space.axes,
                  output_axes=output_space.axes, kernel_stride=kernel_stride,
                  pad=pad, message=message, partial_sum=partial_sum)
开发者ID:CURG,项目名称:pylearn2,代码行数:34,代码来源:conv2d_c01b.py

示例3: make_weights

    def make_weights(input_space, output_space, kernel_shape, **kwargs):
        rs = make_np_rng(rng, default_seed, which_method='uniform')

        shape = (output_space.num_channels, input_space.num_channels,
             kernel_shape[0], kernel_shape[1])

        return sharedX(rs.uniform(-irange, irange, shape))
开发者ID:Refefer,项目名称:pylearn2,代码行数:7,代码来源:conv2d.py

示例4: __init__

    def __init__(self, which_set='debug', start=None, end=None, shuffle=True,
                 lazy_load=False, rng=_default_seed):

        assert which_set in ['debug', 'train', 'test']
        if which_set == 'debug':
            maxlen, n_samples, n_annotations, n_features = 10, 12, 13, 14
            X = N.zeros(shape=(n_samples, maxlen))
            X_mask = X  # same with X
            Z = N.zeros(shape=(n_annotations, n_samples, n_features))
        elif which_set == 'train':
            pass
        else:
            pass

        self.X, self.X_mask, self.Z = (X, X_mask, Z)
        self.sources = ('features', 'target')

        self.spaces = CompositeSpace([
            SequenceSpace(space=VectorSpace(dim=self.X.shape[1])),
            SequenceDataSpace(space=VectorSpace(dim=self.Z.shape[-1]))
        ])
        self.data_spces = (self.spaces, self.sources)
        # self.X_space, self.X_mask_space, self.Z_space
        # Default iterator
        self._iter_mode = resolve_iterator_class('sequential')
        self._iter_topo = False
        self._iter_target = False
        self._iter_data_specs = self.data_spces
        self.rng = make_np_rng(rng, which_method='random_intergers')
开发者ID:EugenePY,项目名称:tensor-work,代码行数:29,代码来源:im2latex.py

示例5: __init__

    def __init__(self, X, y):
        if (self.dataset_name in dataset_info.aod_datasets
            and self.which_set == "full"):
            self.targets, self.novels = self.load_aod_gts()
            assert self.targets.shape == self.novels.shape
            if X.shape[0] % self.targets.shape[0] != 0:
                raise ValueError("AOD data and targets seems to have "
                                 "incompatible shapes: %r vs %r"
                                 % (X.shape, self.targets.shape))

        X = self.preprocess(X)

        if self.shuffle:
            logger.info("Shuffling data")
            self.shuffle_rng = make_np_rng(None, [1 ,2 ,3],
                                           which_method="shuffle")
            for i in xrange(m):
                j = self.shuffle_rng.randint(m)
                tmp = X[i].copy()
                X[i] = X[j]
                X[j] = tmp
                tmp = y[i:i+1].copy()
                y[i] = y[j]
                y[j] = tmp

        max_labels = np.amax(y) + 1
        logger.info("%d labels found." % max_labels)

        super(MRI, self).__init__(X=X,
                                  y=y,
                                  view_converter=self.view_converter,
                                  y_labels=max_labels)

        assert not np.any(np.isnan(self.X))
开发者ID:ecastrow,项目名称:pl2mind,代码行数:34,代码来源:MRI.py

示例6: __init__

    def __init__(self, data=None, data_specs=None, rng=_default_seed,
                 preprocessor=None, fit_preprocessor=False):
        # data_specs should be flat, and there should be no
        # duplicates in source, as we keep only one version
        assert is_flat_specs(data_specs)
        if isinstance(data_specs[1], tuple):
            assert sorted(set(data_specs[1])) == sorted(data_specs[1])
        space, source = data_specs
        space.np_validate(data)
        # TODO: assume that data[0] is num example => error if channel in c01b
        # assert len(set(elem.shape[0] for elem in list(data))) <= 1
        self.data = data
        self.data_specs = data_specs
        # TODO: assume that data[0] is num example => error if channel in c01b
        self.num_examples = list(data)[-1].shape[0] # TODO: list(data)[0].shape[0]

        self.compress = False
        self.design_loc = None
        self.rng = make_np_rng(rng, which_method='random_integers')
        # Defaults for iterators
        self._iter_mode = resolve_iterator_class('sequential')

        if preprocessor:
            preprocessor.apply(self, can_fit=fit_preprocessor)
        self.preprocessor = preprocessor
开发者ID:Dining-Engineers,项目名称:Multi-Column-Deep-Neural-Network,代码行数:25,代码来源:vector_spaces_dataset_c01b.py

示例7: __init__

    def __init__(self, dataset_size, batch_size, num_batches=None, rng=None):
        self._rng = make_np_rng(rng, which_method=["random_integers",
                                                   "shuffle"])
        assert num_batches is None or num_batches >= 0
        self._dataset_size = dataset_size
        if batch_size is None:
            if num_batches is not None:
                batch_size = int(np.ceil(self._dataset_size / num_batches))
            else:
                raise ValueError("need one of batch_size, num_batches "
                                 "for sequential batch iteration")
        elif batch_size is not None:
            if num_batches is not None:
                max_num_batches = np.ceil(self._dataset_size / batch_size)
                if num_batches > max_num_batches:
                    raise ValueError("dataset of %d examples can only provide "
                                     "%d batches with batch_size %d, but %d "
                                     "batches were requested" %
                                     (self._dataset_size, max_num_batches,
                                      batch_size, num_batches))
            else:
                num_batches = np.ceil(self._dataset_size / batch_size)

        self._batch_size = batch_size
        self._num_batches = int(num_batches)
        self._next_batch_no = 0
        self._idx = 0
        self._batch_order = list(range(self._num_batches))
        self._rng.shuffle(self._batch_order)
开发者ID:dwf,项目名称:pylearn2,代码行数:29,代码来源:iteration.py

示例8: setup_rng

    def setup_rng(self):
        """
        .. todo::

            WRITEME
        """
        self.rng = make_np_rng(None, [2012, 10, 17], which_method="uniform")
开发者ID:HALLAB-Halifax,项目名称:pylearn2,代码行数:7,代码来源:dbm.py

示例9: make_random_conv2D

def make_random_conv2D(irange, input_space, output_space,
                       kernel_shape, batch_size=None, \
                       subsample = (1,1), border_mode = 'valid',
                       message = "", rng = None):
    """
    .. todo::

        WRITEME properly

    Creates a Conv2D with random kernels
    """

    rng = make_np_rng(rng, default_seed, which_method='uniform')

    W = sharedX(rng.uniform(
        -irange, irange,
        (output_space.num_channels, input_space.num_channels,
         kernel_shape[0], kernel_shape[1])
    ))

    return Conv2D(
        filters=W,
        batch_size=batch_size,
        input_space=input_space,
        output_axes=output_space.axes,
        subsample=subsample, border_mode=border_mode,
        filters_shape=W.get_value(borrow=True).shape, message=message
    )
开发者ID:AlexArgus,项目名称:pylearn2,代码行数:28,代码来源:conv2d.py

示例10: _create_subset_iterator

 def _create_subset_iterator(self, mode, batch_size=None, num_batches=None,
                             rng=None):
     subset_iterator = resolve_iterator_class(mode)
     if rng is None and subset_iterator.stochastic:
         rng = make_np_rng()
     return subset_iterator(self.get_num_examples(), batch_size,
                            num_batches, rng)
开发者ID:123fengye741,项目名称:pylearn2,代码行数:7,代码来源:penntree.py

示例11: make_random_conv2D

def make_random_conv2D(irange, input_channels, input_axes, output_axes,
        output_channels,
        kernel_shape,
        kernel_stride = (1,1), pad=0, message = "", rng = None,
        partial_sum = None, sparse_init = None):
    """
    .. todo::

        WRITEME properly

    Creates a Conv2D with random kernels.
    Should be functionally equivalent to
    pylearn2.linear.conv2d.make_random_conv2D
    """

    rng = make_np_rng(rng, default_seed, which_method='uniform')

    W = sharedX( rng.uniform(-irange,irange,(input_channels, \
            kernel_shape[0], kernel_shape[1], output_channels)))

    return Conv2D(filters = W,
        input_axes = input_axes,
        output_axes = output_axes,
        kernel_stride = kernel_stride, pad=pad,
        message = message, partial_sum=partial_sum)
开发者ID:SinaHonari,项目名称:pylearn2,代码行数:25,代码来源:conv2d_c01b.py

示例12: __init__

    def __init__(self, nvis, nhid,
            init_lambda,
            init_p, init_alpha, learning_rate):
        """
        .. todo::

            WRITEME
        """
        self.nvis = int(nvis)
        self.nhid = int(nhid)
        self.init_lambda = float(init_lambda)
        self.init_p = float(init_p)
        self.init_alpha = N.cast[config.floatX](init_alpha)
        self.tol = 1e-6
        self.time_constant = 1e-2
        self.learning_rate = N.cast[config.floatX](learning_rate)

        self.predictor_learning_rate = self.learning_rate

        self.rng = make_np_rng(None, [1,2,3], which_method="randn")

        self.error_record = []
        self.ERROR_RECORD_MODE_MONITORING = 0
        self.error_record_mode = self.ERROR_RECORD_MODE_MONITORING

        self.instrumented = False

        self.redo_everything()
开发者ID:bobchennan,项目名称:pylearn2,代码行数:28,代码来源:differentiable_sparse_coding.py

示例13: make_sparse_random_local

def make_sparse_random_local(num_nonzero, input_space, output_space,
        kernel_shape, batch_size, \
        kernel_stride = (1,1), border_mode = 'valid', message = "", rng=None):
    """
    .. todo::

        WRITEME
    """
    raise NotImplementedError("Not yet modified after copy-paste from "
            "pylearn2.linear.conv2d_c01b")
    """ Creates a Conv2D with random kernels, where the randomly initialized
    values are sparse"""

    rng = make_np_rng(rng, default_sparse_seed, which_method=['randn','randint'])

    W = np.zeros(( output_space.num_channels, input_space.num_channels, \
            kernel_shape[0], kernel_shape[1]))

    def random_coord():
        return [ rng.randint(dim) for dim in W.shape ]

    for i in xrange(num_nonzero):
        o, ch, r, c = random_coord()
        while W[o, ch, r, c] != 0:
            o, ch, r, c = random_coord()
        W[o, ch, r, c] = rng.randn()


    W = sharedX( W)
开发者ID:EderSantana,项目名称:pylearn2,代码行数:29,代码来源:local_c01b.py

示例14: split_patients

def split_patients(patients, valid_percent, test_percent, rng=(2014, 10, 22)):
    if isinstance(rng, (list, tuple)):
        rng = make_np_rng(None, rng, which_method='uniform')

    vals = np.asarray(patients.values())
    keys = np.asarray(patients.keys())
    sss = StratifiedShuffleSplit(
        vals, n_iter=1, test_size=test_percent, random_state=rng)
    remaining_idx, test_idx = sss.__iter__().next()

    if valid_percent > 0:
        # Rate of samples required to build validation set
        valid_rate = valid_percent / (1 - test_percent)

        sss = StratifiedShuffleSplit(
            vals[remaining_idx], n_iter=1, test_size=valid_rate, random_state=rng)
        tr_idx, val_idx = sss.__iter__().next()
        valid_idx = remaining_idx[val_idx]
        train_idx = remaining_idx[tr_idx]
    else:
        train_idx = remaining_idx
        valid_idx = []

    train_patients = dict(zip(keys[train_idx], vals[train_idx]))
    valid_patients = dict(zip(keys[valid_idx], vals[valid_idx]))
    test_patients = dict(zip(keys[test_idx], vals[test_idx]))
    return train_patients, valid_patients, test_patients
开发者ID:johnarevalo,项目名称:cnn-bcdr,代码行数:27,代码来源:utils.py

示例15: make_random_conv3D

def make_random_conv3D(irange, input_axes, output_axes,
        signal_shape,
        filter_shape,
        kernel_stride = (1,1), pad=0, message = "", rng = None,
        partial_sum = None):
    

    if rng is None:
        rng = make_np_rng(rng, default_seed, which_method='uniform')

    _filter_5d_shape = (
                filter_shape[0],
                filter_shape[1],
                filter_shape[2],
                filter_shape[3],filter_shape[4])

    # initialize weights
    W = sharedX(rng.uniform(-irange,irange,(_filter_5d_shape)))

    return Conv3DBCT01(filters = W,
                       input_axes = input_axes,
                       output_axes = output_axes,
                       signal_shape = signal_shape,
                       filter_shape = filter_shape,
                       kernel_stride = kernel_stride, pad=pad,
                       message = message, partial_sum=partial_sum)
开发者ID:AtousaTorabi,项目名称:HumanActivityRecognition,代码行数:26,代码来源:conv3d_bct01.py


注:本文中的pylearn2.utils.rng.make_np_rng函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。