当前位置: 首页>>代码示例>>Python>>正文


Python asserts.assert_all_integer函数代码示例

本文整理汇总了Python中simplelearn.asserts.assert_all_integer函数的典型用法代码示例。如果您正苦于以下问题:Python assert_all_integer函数的具体用法?Python assert_all_integer怎么用?Python assert_all_integer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了assert_all_integer函数的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _get_batches

        def _get_batches(self, tensors, formats, batch_indices):
            '''
            Extracts batches form self.dataset.tensors.

            Overrides superclass' _get_batch, because h5py.Dataset can't handle
            duplicate or out-of-order elements in batch_indices.
            '''
            if isinstance(batch_indices, slice):
                return super(H5Iterator, self)._get_batches(tensors,
                                                            formats,
                                                            batch_indices)

            assert_is_instance(batch_indices, numpy.ndarray)
            assert_all_integer(batch_indices)

            # pylint: disable=unbalanced-tuple-unpacking
            (unique_batch_indices,
             unique_to_batch_indices) = numpy.unique(batch_indices,
                                                     return_inverse=True)

            super_self = super(H5Iterator, self)

            unique_batches = super_self._get_batches(tensors,
                                                     formats,
                                                     unique_batch_indices)

            return super_self._get_batches(unique_batches,
                                           formats,
                                           unique_to_batch_indices)
开发者ID:imclab,项目名称:simplelearn,代码行数:29,代码来源:h5_dataset.py

示例2: _next

    def _next(self):

        batch_indices = self._next_batch_indices()
        # pdb.set_trace()

        # sanity-check output of _next_batch_indices()
        if not isinstance(batch_indices, slice):
            assert_all_integer(batch_indices)

            if isinstance(batch_indices, numpy.ndarray):
                # Workaround to a bug in h5py.Dataset where indexing by a
                # length-1 ndarray is treated like indexing with the integer it
                # contains.
                if len(batch_indices) == 1:
                    batch_indices = tuple(batch_indices)
            else:
                assert_is_instance(batch_indices, collections.Sequence)

        result = tuple(self._get_batches(self.dataset.tensors,
                                         self.dataset.formats,
                                         batch_indices))

        # sanity-check size of batches
        for batch, fmt in safe_izip(result, self.dataset.formats):
            assert_equal(batch.shape[fmt.axes.index('b')], self.batch_size)

        return result
开发者ID:imclab,项目名称:simplelearn,代码行数:27,代码来源:dataset.py

示例3: __init__

    def __init__(self, all_norb_labels):
        assert_true(numpy.issubdtype(all_norb_labels.dtype, numpy.integer))
        assert_equal(len(all_norb_labels.shape), 2)
        assert_in(all_norb_labels.shape[1], (5, 11))

        classes = all_norb_labels[:, 0]
        instances = all_norb_labels[:, 1]
        assert_all_integer(classes)
        assert_all_integer(instances)
        assert_greater_equal(classes.min(), 0)
        assert_greater_equal(instances.min(), 0)

        max_instance = int(instances.max())

        sparse_ids = classes * (max_instance + 1) + instances
        assert_true(numpy.all(sparse_ids >= instances), "integer overflow")

        sparse_id_to_dense_id = numpy.empty(sparse_ids.max() + 1,
                                            dtype='int32')
        sparse_id_to_dense_id[:] = -1

        unique_sparse_ids = numpy.asarray(list(frozenset(sparse_ids)))
        unique_sparse_ids.sort()
        sparse_id_to_dense_id[unique_sparse_ids] = \
            numpy.arange(len(unique_sparse_ids))

        self.__max_instance = max_instance
        self.sparse_id_to_dense_id = sparse_id_to_dense_id
        self.num_unique_ids = len(unique_sparse_ids)
开发者ID:SuperElectric,项目名称:poselearn,代码行数:29,代码来源:__init__.py

示例4: limit_param_norms

def limit_param_norms(parameter_updater, param, max_norm, input_axes):
    '''
    Modifies the update of an SgdParameterUpdater to limit param L2 norms.

    Parameter norms are computed by summing over the input_axes, provided.
    These are so named because you typically want to sum over the axes
    that get dotted with the input to the node (e.g. input_axes=[0] for Linear,
    input_axes=[1, 2, 3] for Conv2D).

    Parameters
    ----------

    parameter_updater: simplelearn.training.ParameterUpdater
      The parameter updater whose updates this will modify.

    param: theano shared variable

      The parameter being updated by parameter_updater.

      (No way to get this from SgdParameterUpdater at present; it updates the
      parameter and its velocity, and there's no way to safely distinguish them
      in parameter_updates.update_pairs)

    max_norm: floating-point scalar
      The maximum L2 norm to be permitted for the parameters.

    input_axes: Sequence
      A Sequence of ints. The indices to sum over when computing the
      L2 norm of the updated params.
    '''

    assert_is_instance(parameter_updater, ParameterUpdater)
    assert_in(param, parameter_updater.update_pairs)

    assert_floating(max_norm)
    assert_greater(max_norm, 0.0)

    assert_greater(len(input_axes), 0)
    assert_all_integer(input_axes)
    assert_all_greater_equal(input_axes, 0)
    assert_all_less(input_axes, param.ndim)

    input_axes = numpy.asarray(input_axes)
    updated_param = parameter_updater.update_pairs[param]

    norms = T.sqrt(T.sum(T.sqr(updated_param),
                         axis=input_axes,
                         keepdims=True))
    desired_norms = T.clip(norms, 0, max_norm)

    broadcast_mask = numpy.zeros(param.ndim, dtype=bool)
    broadcast_mask[input_axes] = True
    scales = T.patternbroadcast(desired_norms / (1e-7 + norms),
                                broadcast_mask)

    parameter_updater.update_pairs[param] = updated_param * scales
开发者ID:paulfun92,项目名称:simplelearn,代码行数:56,代码来源:training.py

示例5: elevation_label_to_radians

    def elevation_label_to_radians(labels):
        '''
        Converts NORB elevation labels to radians.
        '''
        assert_equal(labels.ndim, 1)
        assert_all_integer(labels)

        result = (labels * 5.0 + 30.) / 180. * numpy.pi
        assert_true(numpy.all(result >= 0.0))
        assert_true(numpy.all(result <= (numpy.pi / 2.0)))

        return result
开发者ID:SuperElectric,项目名称:poselearn,代码行数:12,代码来源:__init__.py

示例6: azimuth_label_to_radians

    def azimuth_label_to_radians(labels):
        '''
        Converts NORB azimuth labels to radians.

        Parameters:
        -----------

        labels
        '''
        assert_true(labels.ndim, 1)
        assert_all_integer(labels)

        result = (labels * 10.) / 180. * numpy.pi
        return result
开发者ID:SuperElectric,项目名称:poselearn,代码行数:14,代码来源:__init__.py

示例7: _norb_label_to_camera_direction

def _norb_label_to_camera_direction(labels):
    '''
    Computes camera direction from NORB labels.

    This operates on numeric arrays, unlike the pylearn2 version which operates
    on Theano symbols.
    '''
    assert_false(isinstance(labels, theano.gof.Variable))
    assert_all_integer(labels)
    assert_equal(labels.ndim, 2)
    assert_in(labels.shape[1], (5, 11))

    def elevation_label_to_radians(labels):
        '''
        Converts NORB elevation labels to radians.
        '''
        assert_equal(labels.ndim, 1)
        assert_all_integer(labels)

        result = (labels * 5.0 + 30.) / 180. * numpy.pi
        assert_true(numpy.all(result >= 0.0))
        assert_true(numpy.all(result <= (numpy.pi / 2.0)))

        return result

    def azimuth_label_to_radians(labels):
        '''
        Converts NORB azimuth labels to radians.

        Parameters:
        -----------

        labels
        '''
        assert_true(labels.ndim, 1)
        assert_all_integer(labels)

        result = (labels * 10.) / 180. * numpy.pi
        return result

    elevations = elevation_label_to_radians(labels[:, 2])
    azimuths = azimuth_label_to_radians(labels[:, 3])
    rotated_vectors = rotate_unit_x_vector(elevations, azimuths)

    return numpy.cast[floatX](rotated_vectors)
开发者ID:SuperElectric,项目名称:poselearn,代码行数:45,代码来源:__init__.py

示例8: build_fc_classifier

def build_fc_classifier(input_node, sizes, sparse_init_counts, dropout_include_probabilities, rng, theano_rng):
    """
    Builds a stack of fully-connected layers followed by a Softmax.

    Each hidden layer will be preceded by a ReLU.

    Initialization:

    Weights are initialized in the same way as in Pylearn2's MLP tutorial:
    pylearn2/scripts/tutorials/multilayer_perceptron/mlp_tutorial_part_3.yaml

    This means the following:

    Of the N affine layers, the weights of the first N-1 are to all 0.0, except
    for k randomly-chosen elements, which are set to some random number drawn
    from the normal distribution with stddev=1.0.

    The biases are all initialized to 0.0.
    The last layer's weights and biases are both set to 0.0.

    Parameters
    ----------
    input_node: Node
      The node to build the stack on.

    sizes: Sequence
      A sequence of ints, indicating the output sizes of each layer.
      The last int is the number of classes.

    sparse_init_counts:
      A sequence of N-1 ints, where N = len(sizes).
      Used to initialize the weights of the first N-1 layers.
      If the n'th element is x, this means that the n'th layer
      will have x nonzeros, with the rest initialized to zeros.

    dropout_include_probabilities: Sequence
      A Sequence of N-1 floats, where N := len(sizes)
      The dropout include probabilities for the outputs of each of the layers,
      except for the final one.
      If any of these probabilities is 1.0, the corresponding Dropout node
      will be omitted.

    rng: numpy.random.RandomState
      The RandomState to draw initial weights from.

    theano_rng: theano.tensor.shared_randomstreams.RandomStreams
      The RandomStreams to draw dropout masks from.

    Returns
    -------
    rval: tuple
      (affine_nodes, output_node), where affine_nodes is a list of the
      AffineNodes, in order, and output_node is the final node, a Softmax.
    """
    assert_is_instance(input_node, Node)

    # pylint: disable=no-member
    assert_equal(input_node.output_format.dtype, numpy.dtype(theano.config.floatX))

    assert_greater(len(sizes), 0)
    assert_all_greater(sizes, 0)

    assert_equal(len(sparse_init_counts), len(sizes) - 1)
    assert_all_integer(sparse_init_counts)
    assert_all_greater(sparse_init_counts, 0)
    assert_all_less_equal(sparse_init_counts, sizes[:-1])

    assert_equal(len(dropout_include_probabilities), len(sizes))

    affine_nodes = []

    last_node = input_node

    for layer_index, layer_output_size in enumerate(sizes):
        # Add dropout, if asked for
        include_probability = dropout_include_probabilities[layer_index]
        if include_probability != 1.0:
            last_node = Dropout(last_node, include_probability, theano_rng)

        output_format = DenseFormat(axes=("b", "f"), shape=(-1, layer_output_size), dtype=None)

        if layer_index < (len(sizes) - 1):
            last_node = AffineLayer(last_node, output_format)
        else:
            last_node = SoftmaxLayer(last_node, output_format)

        affine_nodes.append(last_node.affine_node)

    # Not used in this demo, but keeping it in in case we want to start using
    # it again.
    def init_sparse_bias(shared_variable, num_nonzeros, rng):
        """
        Mimics the sparse initialization in
        pylearn2.models.mlp.Linear.set_input_space()
        """

        params = shared_variable.get_value()
        assert_equal(params.shape[0], 1)

        assert_greater_equal(num_nonzeros, 0)
#.........这里部分代码省略.........
开发者ID:paulfun92,项目名称:project_code,代码行数:101,代码来源:SGD_nesterov.py

示例9: build_conv_classifier

def build_conv_classifier(input_node,
                          filter_shapes,
                          filter_counts,
                          filter_init_uniform_ranges,
                          pool_shapes,
                          pool_strides,
                          affine_output_sizes,
                          affine_init_stddevs,
                          dropout_include_rates,
                          conv_pads,
                          rng,
                          theano_rng):
    '''
    Builds a classification convnet on top of input_node.

    Returns
    -------
    rval: tuple
      (conv_nodes, affine_nodes, output_node), where:
         conv_nodes is a list of the Conv2d nodes.
         affine_nodes is a list of the AffineNodes.
         output_node is the final node, a Softmax.
    '''

    assert_is_instance(input_node, Lcn)

    conv_shape_args = (filter_shapes,
                       pool_shapes,
                       pool_strides)

    for conv_shapes in conv_shape_args:
        for conv_shape in conv_shapes:
            assert_all_integer(conv_shape)
            assert_all_greater(conv_shape, 0)

    conv_args = conv_shape_args + (filter_counts, filter_init_uniform_ranges)
    assert_all_equal([len(c) for c in conv_args])

    assert_equal(len(affine_output_sizes), len(affine_init_stddevs))

    assert_equal(len(dropout_include_rates),
                 len(filter_shapes) + len(affine_output_sizes))

    assert_equal(affine_output_sizes[-1], 10)  # for MNIST

    #assert_equal(input_node.output_format.axes, ('b', '0', '1'))

    #
    # Done sanity-checking args.
    #

    input_shape = input_node.output_format.shape

    # Converts from MNIST's ('b', '0', '1') to ('b', 'c', '0', '1')
    last_node = input_node

    conv_dropout_include_rates = \
        dropout_include_rates[:len(filter_shapes)]

    # Adds a dropout-conv-bias-relu-maxpool stack for each element in
    # filter_XXXX

    conv_layers = []

    def uniform_init(rng, params, init_range):
        '''
        Fills params with values uniformly sampled from
        [-init_range, init_range]
        '''

        assert_floating(init_range)
        assert_greater_equal(init_range, 0)

        values = params.get_value()
        values[...] = rng.uniform(low=-init_range,
                                  high=init_range,
                                  size=values.shape)
        params.set_value(values)

    for (filter_shape,
         filter_count,
         filter_init_range,
         pool_shape,
         pool_stride,
         conv_dropout_include_rate,
         conv_pads)                 in safe_izip(filter_shapes,
                                                 filter_counts,
                                                 filter_init_uniform_ranges,
                                                 pool_shapes,
                                                 pool_strides,
                                                 conv_dropout_include_rates,
                                                 conv_pads):
        if conv_dropout_include_rate != 1.0:
            last_node = Dropout(last_node,
                                conv_dropout_include_rate,
                                theano_rng)

        last_node = Conv2dLayer(last_node,
                                filter_shape,
                                filter_count,
#.........这里部分代码省略.........
开发者ID:paulfun92,项目名称:project_code,代码行数:101,代码来源:cifar10_conv3.py

示例10: build_conv_classifier

def build_conv_classifier(input_node,
                          filter_shapes,
                          filter_counts,
                          filter_init_uniform_ranges,
                          pool_shapes,
                          pool_strides,
                          affine_output_sizes,
                          affine_init_stddevs,
                          dropout_include_rates,
                          conv_pads,
                          rng,
                          theano_rng):
    '''
    Builds a classification convnet on top of input_node.

    Returns
    -------
    rval: tuple
      (conv_nodes, affine_nodes, output_node), where:
         conv_nodes is a list of the Conv2d nodes.
         affine_nodes is a list of the AffineNodes.
         output_node is the final node, a Softmax.
    '''

    assert_is_instance(input_node, Lcn)

    conv_shape_args = (filter_shapes,
                       pool_shapes,
                       pool_strides)

    for conv_shapes in conv_shape_args:
        for conv_shape in conv_shapes:
            assert_all_integer(conv_shape)
            assert_all_greater(conv_shape, 0)

    conv_args = conv_shape_args + (filter_counts, filter_init_uniform_ranges)
    assert_all_equal([len(c) for c in conv_args])

    assert_equal(len(affine_output_sizes), len(affine_init_stddevs))

    assert_equal(len(dropout_include_rates),
                 len(filter_shapes) + len(affine_output_sizes))

    assert_equal(affine_output_sizes[-1], 10)  # for MNIST

    #assert_equal(input_node.output_format.axes, ('b', '0', '1'))

    #
    # Done sanity-checking args.
    #

    input_shape = input_node.output_format.shape

    # Converts from MNIST's ('b', '0', '1') to ('b', 'c', '0', '1')
    last_node = input_node

    conv_dropout_include_rates = \
        dropout_include_rates[:len(filter_shapes)]

    # Adds a dropout-conv-bias-relu-maxpool stack for each element in
    # filter_XXXX

    conv_layers = []

    def uniform_init(rng, params, init_range):
        '''
        Fills params with values uniformly sampled from
        [-init_range, init_range]
        '''

        assert_floating(init_range)
        assert_greater_equal(init_range, 0)

        values = params.get_value()
        values[...] = rng.uniform(low=-init_range,
                                  high=init_range,
                                  size=values.shape)
        params.set_value(values)

    for (filter_shape,
         filter_count,
         filter_init_range,
         pool_shape,
         pool_stride,
         conv_dropout_include_rate,
         conv_pad)                 in safe_izip(filter_shapes,
                                                 filter_counts,
                                                 filter_init_uniform_ranges,
                                                 pool_shapes,
                                                 pool_strides,
                                                 conv_dropout_include_rates,
                                                 conv_pads):
        if conv_dropout_include_rate != 1.0:
            last_node = Dropout(last_node,
                                conv_dropout_include_rate,
                                theano_rng)

        last_node = Conv2dLayer(last_node,
                                filter_shape,
                                filter_count,
#.........这里部分代码省略.........
开发者ID:paulfun92,项目名称:project_code,代码行数:101,代码来源:LBFGS_mini_batch.py

示例11: build_fc_classifier

def build_fc_classifier(input_node,
                        sizes,
                        sparse_init_counts,
                        dropout_include_probabilities,
                        rng,
                        theano_rng):
    '''
    Builds a stack of fully-connected layers followed by a Softmax.

    Each hidden layer will be preceded by a ReLU.

    Initialization:

    Weights are initialized in the same way as in Pylearn2's MLP tutorial:
    pylearn2/scripts/tutorials/multilayer_perceptron/mlp_tutorial_part_3.yaml

    This means the following:

    Of the N affine layers, the weights of the first N-1 are to all 0.0, except
    for k randomly-chosen elements, which are set to some random number drawn
    from the normal distribution with stddev=1.0.

    The biases are all initialized to 0.0.
    The last layer's weights and biases are both set to 0.0.

    Parameters
    ----------
    input_node: Node
      The node to build the stack on.

    sizes: Sequence
      A sequence of ints, indicating the output sizes of each layer.
      The last int is the number of classes.

    sparse_init_counts:
      A sequence of N-1 ints, where N = len(sizes).
      Used to initialize the weights of the first N-1 layers.
      If the n'th element is x, this means that the n'th layer
      will have x nonzeros, with the rest initialized to zeros.

    dropout_include_probabilities: Sequence
      A Sequence of N-1 floats, where N := len(sizes)
      The dropout include probabilities for the outputs of each of the layers,
      except for the final one.
      If any of these probabilities is 1.0, the corresponding Dropout node
      will be omitted.

    rng: numpy.random.RandomState
      The RandomState to draw initial weights from.

    theano_rng: theano.tensor.shared_randomstreams.RandomStreams
      The RandomStreams to draw dropout masks from.

    Returns
    -------
    rval: tuple
      (affine_nodes, output_node), where affine_nodes is a list of the
      AffineNodes, in order, and output_node is the final node, a Softmax.
    '''
    assert_is_instance(input_node, Node)

    # pylint: disable=no-member
    assert_equal(input_node.output_format.dtype,
                 numpy.dtype(theano.config.floatX))

    assert_greater(len(sizes), 0)
    assert_all_greater(sizes, 0)

    assert_equal(len(sparse_init_counts), len(sizes) - 1)
    assert_all_integer(sparse_init_counts)
    assert_all_greater(sparse_init_counts, 0)
    assert_all_less_equal(sparse_init_counts, sizes[:-1])

    assert_equal(len(dropout_include_probabilities), len(sizes))

    '''
    affine_nodes = []

    last_node = input_node

    for layer_index, layer_output_size in enumerate(sizes):
        # Add dropout, if asked for
        include_probability = dropout_include_probabilities[layer_index]
        if include_probability != 1.0:
            last_node = Dropout(last_node, include_probability, theano_rng)

        output_format = DenseFormat(axes=('b', 'f'),
                                    shape=(-1, layer_output_size),
                                    dtype=None)

        if layer_index < (len(sizes) - 1):
            last_node = AffineLayer(last_node, output_format)
        else:
            last_node = SoftmaxLayer(last_node, output_format)

        affine_nodes.append(last_node.affine_node)


    # Not used in this demo, but keeping it in in case we want to start using
    # it again.
#.........这里部分代码省略.........
开发者ID:paulfun92,项目名称:project_code,代码行数:101,代码来源:GD.py

示例12: make_h5_file

def make_h5_file(path,
                 partition_names,
                 partition_sizes,
                 tensor_names,
                 tensor_formats):
    '''
    Creates a h5py.File with groups that can be wrapped by H5Dataset.

    Usage
    -----

    h5_file = make_hf_file(file_path, p_names, p_sizes, t_names, t_formats)
      1: Call this function to create a h5py.File object
      2: Fill the h5py.File's data tensors with appropriate data.
      3: Close the h5py.File, then re-open it using H5Dataset,
         a read-only dataset interface.

    Parameters
    ----------
    partition_names: Sequence
      Names of the sub-datasets, e.g. ['test', 'train'].
      May only contain alphanumeric characters and underscores, as
      load_h5_dataset() uses these names as NamedTuple names.

    partition_sizes: Sequence
      Number of examples in each sub-dataset, e.g. [50000, 10000] for
      MNIST.

    tensor_names: Sequence
      Names of the data tensors, e.g. ['images', 'labels']. Each
      sub-tensor uses the same tensor_names.

    tensor_formats: Sequence
      The DataFormats of the data tensors, e.g. (for MNIST):
      [DataFormat(axes=['b', '0', '1'], shape=[-1, 28, 28], dtype='uint8'),
       DataFormat(axes=['b'], shape=[-1], dtype='uint8')]

    The example parameter values above would create an h5py.File
    with the following hierarchical structure:

    hfpy.File/
      'partition_names': an h5py.Dataset of strings, ['test', 'train']
      'tensor_names': an h5py.Dataset of strings, ['images', 'labels']
      'partitions': an h5py.Group with the following members:
        'train': an h5py.Group, with the following members:
          'images': an h5py.Dataset tensor, with shape given by
                    partition_sizes[0] and tensor_formats[0].
          'labels': an h5py.Dataset tensor, with shape given by
                    partition_sizes[0] and tensor_formats[1].
        'test': an h5py.Group, with the following members:
          'images': an h5py.Dataset tensor, with shape given by
                    partition_sizes[1] and tensor_formats[0].
          'labels': an h5py.Dataset tensor, with shape given by
                    partition_sizes[1] and tensor_formats[1].
    '''

    assert_is_instance(path, basestring)
    assert_equal(os.path.splitext(path)[1], '.h5')
    absolute_path = os.path.abspath(path)
    assert_true(absolute_path.startswith(simplelearn.data.data_path),
                ("{} is not a subdirectory of simplelearn.data.data_path "
                 "{}").format(absolute_path, simplelearn.data.data_path))

    assert_all_is_instance(partition_names, basestring)
    assert_equal(len(frozenset(partition_names)), len(partition_names))
    for partition_name in partition_names:
        for char in partition_name:
            if not (char.isalnum() or char == "_"):
                raise ValueError("Partition name {} must contain only "
                                 "alphanumeric characters or "
                                 "underscores.".format(partition_name))

    assert_all_integer(partition_sizes)
    assert_all_greater_equal(partition_sizes, 0)

    assert_all_is_instance(tensor_names, basestring)
    assert_equal(len(frozenset(tensor_names)), len(tensor_names))

    assert_all_is_instance(tensor_formats, DenseFormat)
    for tensor_format in tensor_formats:
        assert_in('b', tensor_format.axes)

    # Done sanity-checking args

    h5_file = h5py.File(absolute_path, mode='w')

    # Add ordered lists of tensor/partition names, since h5py.Group.keys()
    # can't be trusted to list group members in the order that they were
    # added in.

    def add_ordered_names(list_name, names, group):
        '''
        Adds a list of names to a group, as a h5py.Dataset of strings.
        '''
        max_name_length = max([len(n) for n in names])
        string_dtype = 'S{}'.format(max_name_length)
        result = group.create_dataset(list_name,
                                      (len(names), ),
                                      dtype=string_dtype)
        for n, name in enumerate(names):
#.........这里部分代码省略.........
开发者ID:imclab,项目名称:simplelearn,代码行数:101,代码来源:h5_dataset.py

示例13: build_fc_classifier

def build_fc_classifier(input_node,
                        sizes,
                        sparse_init_counts,
                        dropout_include_probabilities,
                        rng,
                        theano_rng):
    '''
    Builds a stack of fully-connected layers followed by a Softmax.

    Each hidden layer will be preceded by a ReLU.

    Initialization:

    Weights are initialized in the same way as in Pylearn2's MLP tutorial:
    pylearn2/scripts/tutorials/multilayer_perceptron/mlp_tutorial_part_3.yaml

    This means the following:

    Of the N affine layers, the weights of the first N-1 are to all 0.0, except
    for k randomly-chosen elements, which are set to some random number drawn
    from the normal distribution with stddev=1.0.

    The biases are all initialized to 0.0.
    The last layer's weights and biases are both set to 0.0.

    Parameters
    ----------
    input_node: Node
      The node to build the stack on.

    sizes: Sequence
      A sequence of ints, indicating the output sizes of each layer.
      The last int is the number of classes.

    sparse_init_counts:
      A sequence of N-1 ints, where N = len(sizes).
      Used to initialize the weights of the first N-1 layers.
      If the n'th element is x, this means that the n'th layer
      will have x nonzeros, with the rest initialized to zeros.

    dropout_include_probabilities: Sequence
      A Sequence of N-1 floats, where N := len(sizes)
      The dropout include probabilities for the outputs of each of the layers,
      except for the final one.
      If any of these probabilities is 1.0, the corresponding Dropout node
      will be omitted.

    rng: numpy.random.RandomState
      The RandomState to draw initial weights from.

    theano_rng: theano.tensor.shared_randomstreams.RandomStreams
      The RandomStreams to draw dropout masks from.

    Returns
    -------
    rval: tuple
      (affine_nodes, output_node), where affine_nodes is a list of the
      AffineNodes, in order, and output_node is the final node, a Softmax.
    '''
    assert_is_instance(input_node, Node)
    assert_equal(input_node.output_format.dtype,
                 numpy.dtype(theano.config.floatX))

    assert_greater(len(sizes), 0)
    assert_all_greater(sizes, 0)

    assert_equal(len(sparse_init_counts), len(sizes) - 1)
    assert_all_integer(sparse_init_counts)
    assert_all_greater(sparse_init_counts, 0)
    assert_all_less_equal(sparse_init_counts, sizes[:-1])

    assert_equal(len(dropout_include_probabilities), len(sizes))

    affine_nodes = []

    last_node = input_node

    for layer_index, layer_output_size in enumerate(sizes):
        # Add dropout, if asked for
        include_probability = dropout_include_probabilities[layer_index]
        if include_probability != 1.0:
            last_node = Dropout(last_node, include_probability, theano_rng)

        output_format = DenseFormat(axes=('b', 'f'),
                                    shape=(-1, layer_output_size),
                                    dtype=None)

        if layer_index < (len(sizes) - 1):
            last_node = AffineLayer(last_node, output_format)
        else:
            last_node = SoftmaxLayer(last_node, output_format)

        affine_nodes.append(last_node.affine_node)

    def init_sparse_bias(shared_variable, num_nonzeros, rng):
        '''
        Mimics the sparse initialization in
        pylearn2.models.mlp.Linear.set_input_space()
        '''

#.........这里部分代码省略.........
开发者ID:paulfun92,项目名称:project_code,代码行数:101,代码来源:RMSprop_nesterov2_mnist_fully_connected.py

示例14: make_instance_dataset

def make_instance_dataset(norb_name,
                          a_norb,
                          b_norb,
                          test_elevation_stride,
                          test_azimuth_stride,
                          objects=None):
    '''
    Creates instance recognition datasets from category recognition datasets.

    Merges two category recognition datasets (with disjoint object instances),
    and re-partitions them into instance recognition datasets (with disjoint
    camera views).

    The instance recognition dataset consists of a train and test set.

    All objects not selected by <objects> are ignored.

    Of the remaining images, he test set consists of all images that satisfy
    both the test_elevation_stride and test_azimuth_stride. The other
    images are used for the training set.

    If the category datset is in stereo, only the left stereo images are used.

    Parameters
    ----------
    norb_name: str
      The name of the category recognition dataset (e.g. 'big_norb'). Used to
      build the name of the instance recognition dataset. Alphanumeric
      characters and '_' only.

    a_norb: NORB Dataset
      One of the category recognition datasets (i.e. training set).

    b_norb: NORB Dataset
      The other category recognition dataset (i.e. testing set).

    test_elevation_stride: int
      Use every M'th elevation as a test image.

    test_azimuth_stride: int
      Use every N'th azimuth as a test image.

    objects: Sequence
      [(c0, i0), (c1, i1), ..., (cN, iN)]
      Each (cx, ix) pair specifies an object to include, by their
      class and instance labels cx and ix.

    Returns
    -------
    rval: str
      The path to the newly created .h5 file.
    '''

    assert_is_instance(norb_name, basestring)
    assert_all_true(c.isalnum() or c == '_' for c in norb_name)

    assert_is_instance(a_norb, Dataset)
    assert_is_instance(b_norb, Dataset)
    assert_all_equal(a_norb.names, b_norb.names)
    assert_all_equal(a_norb.formats, b_norb.formats)

    assert_integer(test_elevation_stride)
    assert_greater(test_elevation_stride, 0)

    assert_integer(test_azimuth_stride)
    assert_greater(test_azimuth_stride, 0)

    if objects is not None:
        assert_is_instance(objects, Sequence)
        for id_pair in objects:
            assert_equal(len(id_pair), 2)
            assert_all_integer(id_pair)
            assert_all_greater_equal(id_pair, 0)

    #
    # Done sanity-checking args
    #

    (category_index,
     instance_index,
     azimuth_index,
     elevation_index) = range(4)  # no need for lighting_index (= 4)

    def get_row_indices(labels,
                        test_elevation_stride,
                        test_azimuth_stride,
                        objects):
        '''
        Returns row indices or training and testing sets.
        '''

        logical_and = numpy.logical_and

        if objects is not None:
            objects = numpy.asarray(objects)
            obj_cols = (category_index, instance_index)
            object_mask = (labels[:, obj_cols] == objects).all(axis=1)
        else:
            object_mask = numpy.ones(labels.shape[0], dtype=bool)

#.........这里部分代码省略.........
开发者ID:imclab,项目名称:simplelearn,代码行数:101,代码来源:make_norb_instance_dataset.py


注:本文中的simplelearn.asserts.assert_all_integer函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。