当前位置: 首页>>代码示例>>Python>>正文


Python asserts.assert_all_greater函数代码示例

本文整理汇总了Python中simplelearn.asserts.assert_all_greater函数的典型用法代码示例。如果您正苦于以下问题:Python assert_all_greater函数的具体用法?Python assert_all_greater怎么用?Python assert_all_greater使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了assert_all_greater函数的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: build_conv_classifier

def build_conv_classifier(input_node,
                          filter_shapes,
                          filter_counts,
                          filter_init_uniform_ranges,
                          pool_shapes,
                          pool_strides,
                          affine_output_sizes,
                          affine_init_stddevs,
                          dropout_include_rates,
                          conv_pads,
                          rng,
                          theano_rng):
    '''
    Builds a classification convnet on top of input_node.

    Returns
    -------
    rval: tuple
      (conv_nodes, affine_nodes, output_node), where:
         conv_nodes is a list of the Conv2d nodes.
         affine_nodes is a list of the AffineNodes.
         output_node is the final node, a Softmax.
    '''

    assert_is_instance(input_node, Lcn)

    conv_shape_args = (filter_shapes,
                       pool_shapes,
                       pool_strides)

    for conv_shapes in conv_shape_args:
        for conv_shape in conv_shapes:
            assert_all_integer(conv_shape)
            assert_all_greater(conv_shape, 0)

    conv_args = conv_shape_args + (filter_counts, filter_init_uniform_ranges)
    assert_all_equal([len(c) for c in conv_args])

    assert_equal(len(affine_output_sizes), len(affine_init_stddevs))

    assert_equal(len(dropout_include_rates),
                 len(filter_shapes) + len(affine_output_sizes))

    assert_equal(affine_output_sizes[-1], 10)  # for MNIST

    #assert_equal(input_node.output_format.axes, ('b', '0', '1'))

    #
    # Done sanity-checking args.
    #

    input_shape = input_node.output_format.shape

    # Converts from MNIST's ('b', '0', '1') to ('b', 'c', '0', '1')
    last_node = input_node

    conv_dropout_include_rates = \
        dropout_include_rates[:len(filter_shapes)]

    # Adds a dropout-conv-bias-relu-maxpool stack for each element in
    # filter_XXXX

    conv_layers = []

    def uniform_init(rng, params, init_range):
        '''
        Fills params with values uniformly sampled from
        [-init_range, init_range]
        '''

        assert_floating(init_range)
        assert_greater_equal(init_range, 0)

        values = params.get_value()
        values[...] = rng.uniform(low=-init_range,
                                  high=init_range,
                                  size=values.shape)
        params.set_value(values)

    for (filter_shape,
         filter_count,
         filter_init_range,
         pool_shape,
         pool_stride,
         conv_dropout_include_rate,
         conv_pads)                 in safe_izip(filter_shapes,
                                                 filter_counts,
                                                 filter_init_uniform_ranges,
                                                 pool_shapes,
                                                 pool_strides,
                                                 conv_dropout_include_rates,
                                                 conv_pads):
        if conv_dropout_include_rate != 1.0:
            last_node = Dropout(last_node,
                                conv_dropout_include_rate,
                                theano_rng)

        last_node = Conv2dLayer(last_node,
                                filter_shape,
                                filter_count,
#.........这里部分代码省略.........
开发者ID:paulfun92,项目名称:project_code,代码行数:101,代码来源:cifar10_conv3.py

示例2: build_fc_classifier

def build_fc_classifier(input_node, sizes, sparse_init_counts, dropout_include_probabilities, rng, theano_rng):
    """
    Builds a stack of fully-connected layers followed by a Softmax.

    Each hidden layer will be preceded by a ReLU.

    Initialization:

    Weights are initialized in the same way as in Pylearn2's MLP tutorial:
    pylearn2/scripts/tutorials/multilayer_perceptron/mlp_tutorial_part_3.yaml

    This means the following:

    Of the N affine layers, the weights of the first N-1 are to all 0.0, except
    for k randomly-chosen elements, which are set to some random number drawn
    from the normal distribution with stddev=1.0.

    The biases are all initialized to 0.0.
    The last layer's weights and biases are both set to 0.0.

    Parameters
    ----------
    input_node: Node
      The node to build the stack on.

    sizes: Sequence
      A sequence of ints, indicating the output sizes of each layer.
      The last int is the number of classes.

    sparse_init_counts:
      A sequence of N-1 ints, where N = len(sizes).
      Used to initialize the weights of the first N-1 layers.
      If the n'th element is x, this means that the n'th layer
      will have x nonzeros, with the rest initialized to zeros.

    dropout_include_probabilities: Sequence
      A Sequence of N-1 floats, where N := len(sizes)
      The dropout include probabilities for the outputs of each of the layers,
      except for the final one.
      If any of these probabilities is 1.0, the corresponding Dropout node
      will be omitted.

    rng: numpy.random.RandomState
      The RandomState to draw initial weights from.

    theano_rng: theano.tensor.shared_randomstreams.RandomStreams
      The RandomStreams to draw dropout masks from.

    Returns
    -------
    rval: tuple
      (affine_nodes, output_node), where affine_nodes is a list of the
      AffineNodes, in order, and output_node is the final node, a Softmax.
    """
    assert_is_instance(input_node, Node)

    # pylint: disable=no-member
    assert_equal(input_node.output_format.dtype, numpy.dtype(theano.config.floatX))

    assert_greater(len(sizes), 0)
    assert_all_greater(sizes, 0)

    assert_equal(len(sparse_init_counts), len(sizes) - 1)
    assert_all_integer(sparse_init_counts)
    assert_all_greater(sparse_init_counts, 0)
    assert_all_less_equal(sparse_init_counts, sizes[:-1])

    assert_equal(len(dropout_include_probabilities), len(sizes))

    affine_nodes = []

    last_node = input_node

    for layer_index, layer_output_size in enumerate(sizes):
        # Add dropout, if asked for
        include_probability = dropout_include_probabilities[layer_index]
        if include_probability != 1.0:
            last_node = Dropout(last_node, include_probability, theano_rng)

        output_format = DenseFormat(axes=("b", "f"), shape=(-1, layer_output_size), dtype=None)

        if layer_index < (len(sizes) - 1):
            last_node = AffineLayer(last_node, output_format)
        else:
            last_node = SoftmaxLayer(last_node, output_format)

        affine_nodes.append(last_node.affine_node)

    # Not used in this demo, but keeping it in in case we want to start using
    # it again.
    def init_sparse_bias(shared_variable, num_nonzeros, rng):
        """
        Mimics the sparse initialization in
        pylearn2.models.mlp.Linear.set_input_space()
        """

        params = shared_variable.get_value()
        assert_equal(params.shape[0], 1)

        assert_greater_equal(num_nonzeros, 0)
#.........这里部分代码省略.........
开发者ID:paulfun92,项目名称:project_code,代码行数:101,代码来源:SGD_nesterov.py

示例3: build_conv_classifier

def build_conv_classifier(input_node,
                          filter_shapes,
                          filter_counts,
                          filter_init_uniform_ranges,
                          pool_shapes,
                          pool_strides,
                          affine_output_sizes,
                          affine_init_stddevs,
                          dropout_include_rates,
                          conv_pads,
                          rng,
                          theano_rng):
    '''
    Builds a classification convnet on top of input_node.

    Returns
    -------
    rval: tuple
      (conv_nodes, affine_nodes, output_node), where:
         conv_nodes is a list of the Conv2d nodes.
         affine_nodes is a list of the AffineNodes.
         output_node is the final node, a Softmax.
    '''

    assert_is_instance(input_node, Lcn)

    conv_shape_args = (filter_shapes,
                       pool_shapes,
                       pool_strides)

    for conv_shapes in conv_shape_args:
        for conv_shape in conv_shapes:
            assert_all_integer(conv_shape)
            assert_all_greater(conv_shape, 0)

    conv_args = conv_shape_args + (filter_counts, filter_init_uniform_ranges)
    assert_all_equal([len(c) for c in conv_args])

    assert_equal(len(affine_output_sizes), len(affine_init_stddevs))

    assert_equal(len(dropout_include_rates),
                 len(filter_shapes) + len(affine_output_sizes))

    assert_equal(affine_output_sizes[-1], 10)  # for MNIST

    #assert_equal(input_node.output_format.axes, ('b', '0', '1'))

    #
    # Done sanity-checking args.
    #

    input_shape = input_node.output_format.shape

    # Converts from MNIST's ('b', '0', '1') to ('b', 'c', '0', '1')
    last_node = input_node

    conv_dropout_include_rates = \
        dropout_include_rates[:len(filter_shapes)]

    # Adds a dropout-conv-bias-relu-maxpool stack for each element in
    # filter_XXXX

    conv_layers = []

    def uniform_init(rng, params, init_range):
        '''
        Fills params with values uniformly sampled from
        [-init_range, init_range]
        '''

        assert_floating(init_range)
        assert_greater_equal(init_range, 0)

        values = params.get_value()
        values[...] = rng.uniform(low=-init_range,
                                  high=init_range,
                                  size=values.shape)
        params.set_value(values)

    for (filter_shape,
         filter_count,
         filter_init_range,
         pool_shape,
         pool_stride,
         conv_dropout_include_rate,
         conv_pad)                 in safe_izip(filter_shapes,
                                                 filter_counts,
                                                 filter_init_uniform_ranges,
                                                 pool_shapes,
                                                 pool_strides,
                                                 conv_dropout_include_rates,
                                                 conv_pads):
        if conv_dropout_include_rate != 1.0:
            last_node = Dropout(last_node,
                                conv_dropout_include_rate,
                                theano_rng)

        last_node = Conv2dLayer(last_node,
                                filter_shape,
                                filter_count,
#.........这里部分代码省略.........
开发者ID:paulfun92,项目名称:project_code,代码行数:101,代码来源:LBFGS_mini_batch.py

示例4: build_fc_classifier

def build_fc_classifier(input_node,
                        sizes,
                        sparse_init_counts,
                        dropout_include_probabilities,
                        rng,
                        theano_rng):
    '''
    Builds a stack of fully-connected layers followed by a Softmax.

    Each hidden layer will be preceded by a ReLU.

    Initialization:

    Weights are initialized in the same way as in Pylearn2's MLP tutorial:
    pylearn2/scripts/tutorials/multilayer_perceptron/mlp_tutorial_part_3.yaml

    This means the following:

    Of the N affine layers, the weights of the first N-1 are to all 0.0, except
    for k randomly-chosen elements, which are set to some random number drawn
    from the normal distribution with stddev=1.0.

    The biases are all initialized to 0.0.
    The last layer's weights and biases are both set to 0.0.

    Parameters
    ----------
    input_node: Node
      The node to build the stack on.

    sizes: Sequence
      A sequence of ints, indicating the output sizes of each layer.
      The last int is the number of classes.

    sparse_init_counts:
      A sequence of N-1 ints, where N = len(sizes).
      Used to initialize the weights of the first N-1 layers.
      If the n'th element is x, this means that the n'th layer
      will have x nonzeros, with the rest initialized to zeros.

    dropout_include_probabilities: Sequence
      A Sequence of N-1 floats, where N := len(sizes)
      The dropout include probabilities for the outputs of each of the layers,
      except for the final one.
      If any of these probabilities is 1.0, the corresponding Dropout node
      will be omitted.

    rng: numpy.random.RandomState
      The RandomState to draw initial weights from.

    theano_rng: theano.tensor.shared_randomstreams.RandomStreams
      The RandomStreams to draw dropout masks from.

    Returns
    -------
    rval: tuple
      (affine_nodes, output_node), where affine_nodes is a list of the
      AffineNodes, in order, and output_node is the final node, a Softmax.
    '''
    assert_is_instance(input_node, Node)

    # pylint: disable=no-member
    assert_equal(input_node.output_format.dtype,
                 numpy.dtype(theano.config.floatX))

    assert_greater(len(sizes), 0)
    assert_all_greater(sizes, 0)

    assert_equal(len(sparse_init_counts), len(sizes) - 1)
    assert_all_integer(sparse_init_counts)
    assert_all_greater(sparse_init_counts, 0)
    assert_all_less_equal(sparse_init_counts, sizes[:-1])

    assert_equal(len(dropout_include_probabilities), len(sizes))

    '''
    affine_nodes = []

    last_node = input_node

    for layer_index, layer_output_size in enumerate(sizes):
        # Add dropout, if asked for
        include_probability = dropout_include_probabilities[layer_index]
        if include_probability != 1.0:
            last_node = Dropout(last_node, include_probability, theano_rng)

        output_format = DenseFormat(axes=('b', 'f'),
                                    shape=(-1, layer_output_size),
                                    dtype=None)

        if layer_index < (len(sizes) - 1):
            last_node = AffineLayer(last_node, output_format)
        else:
            last_node = SoftmaxLayer(last_node, output_format)

        affine_nodes.append(last_node.affine_node)


    # Not used in this demo, but keeping it in in case we want to start using
    # it again.
#.........这里部分代码省略.........
开发者ID:paulfun92,项目名称:project_code,代码行数:101,代码来源:GD.py

示例5: build_fc_classifier

def build_fc_classifier(input_node,
                        sizes,
                        sparse_init_counts,
                        dropout_include_probabilities,
                        rng,
                        theano_rng):
    '''
    Builds a stack of fully-connected layers followed by a Softmax.

    Each hidden layer will be preceded by a ReLU.

    Initialization:

    Weights are initialized in the same way as in Pylearn2's MLP tutorial:
    pylearn2/scripts/tutorials/multilayer_perceptron/mlp_tutorial_part_3.yaml

    This means the following:

    Of the N affine layers, the weights of the first N-1 are to all 0.0, except
    for k randomly-chosen elements, which are set to some random number drawn
    from the normal distribution with stddev=1.0.

    The biases are all initialized to 0.0.
    The last layer's weights and biases are both set to 0.0.

    Parameters
    ----------
    input_node: Node
      The node to build the stack on.

    sizes: Sequence
      A sequence of ints, indicating the output sizes of each layer.
      The last int is the number of classes.

    sparse_init_counts:
      A sequence of N-1 ints, where N = len(sizes).
      Used to initialize the weights of the first N-1 layers.
      If the n'th element is x, this means that the n'th layer
      will have x nonzeros, with the rest initialized to zeros.

    dropout_include_probabilities: Sequence
      A Sequence of N-1 floats, where N := len(sizes)
      The dropout include probabilities for the outputs of each of the layers,
      except for the final one.
      If any of these probabilities is 1.0, the corresponding Dropout node
      will be omitted.

    rng: numpy.random.RandomState
      The RandomState to draw initial weights from.

    theano_rng: theano.tensor.shared_randomstreams.RandomStreams
      The RandomStreams to draw dropout masks from.

    Returns
    -------
    rval: tuple
      (affine_nodes, output_node), where affine_nodes is a list of the
      AffineNodes, in order, and output_node is the final node, a Softmax.
    '''
    assert_is_instance(input_node, Node)
    assert_equal(input_node.output_format.dtype,
                 numpy.dtype(theano.config.floatX))

    assert_greater(len(sizes), 0)
    assert_all_greater(sizes, 0)

    assert_equal(len(sparse_init_counts), len(sizes) - 1)
    assert_all_integer(sparse_init_counts)
    assert_all_greater(sparse_init_counts, 0)
    assert_all_less_equal(sparse_init_counts, sizes[:-1])

    assert_equal(len(dropout_include_probabilities), len(sizes))

    affine_nodes = []

    last_node = input_node

    for layer_index, layer_output_size in enumerate(sizes):
        # Add dropout, if asked for
        include_probability = dropout_include_probabilities[layer_index]
        if include_probability != 1.0:
            last_node = Dropout(last_node, include_probability, theano_rng)

        output_format = DenseFormat(axes=('b', 'f'),
                                    shape=(-1, layer_output_size),
                                    dtype=None)

        if layer_index < (len(sizes) - 1):
            last_node = AffineLayer(last_node, output_format)
        else:
            last_node = SoftmaxLayer(last_node, output_format)

        affine_nodes.append(last_node.affine_node)

    def init_sparse_bias(shared_variable, num_nonzeros, rng):
        '''
        Mimics the sparse initialization in
        pylearn2.models.mlp.Linear.set_input_space()
        '''

#.........这里部分代码省略.........
开发者ID:paulfun92,项目名称:project_code,代码行数:101,代码来源:RMSprop_nesterov2_mnist_fully_connected.py

示例6: apply_subwindow_func

    def apply_subwindow_func(subwindow_func,
                             padded_images,
                             pads,
                             window_shape,
                             strides):
        '''
        Applies a sliding-window function to all subwindows of a feature map.

        Parameters
        ----------
        subwindow_func: function
          A function that takes a subwindow and returns a scalar.
          Input: tensor with shape [BATCH_SIZE, NUM_CHANNELS, ROWS, COLS]
          Output: tensor with shape [BATCH_SIZE, NUM_CHANNELS]

        padded_images: numpy.ndarray
          A feature map with shape [BATCH_SIZE, NUM_CHANNELS, ROWS, COLS].
          This has pad[0] rows and pad[1] columns of zero-padding.

        max_pad: Sequence
          [pad_rows, pad_columns], the # of padded rows and columns on each
          side of the image.

        '''
        assert_equal(padded_images.ndim, 4)
        assert_all_greater(padded_images.shape[2:], pads)
        _assert_is_shape2d(window_shape)
        _assert_is_shape2d(strides)

        pads, window_shape, strides = (numpy.asarray(a) for a in (pads,
                                                                  window_shape,
                                                                  strides))

        assert_all_greater(numpy.asarray(padded_images.shape[2:]), 2 * pads)

        # Check that pad region is full of the same value
        if pads[0] > 0:
            pad_value = padded_images[0, 0, 0, 0]
            assert_true(numpy.all(padded_images[:, :, :pads[0], :] ==
                                  pad_value))
            assert_true(numpy.all(padded_images[:, :, -pads[0]:, :] ==
                                  pad_value))

        if pads[1] > 0:
            pad_value = padded_images[0, 0, 0, 0]
            assert_true(numpy.all(padded_images[:, :, :, :pads[1]] ==
                                  pad_value))
            assert_true(numpy.all(padded_images[:, :, :, -pads[1]:] ==
                                  pad_value))

        rows, cols = (range(0,
                            padded_images.shape[i + 2] - window_shape[i] + 1,
                            strides[i])
                      for i in (0, 1))
        output_image = None

        for out_r, in_r in enumerate(rows):
            for out_c, in_c in enumerate(cols):
                subwindow = padded_images[:,
                                          :,
                                          in_r:(in_r + window_shape[0]),
                                          in_c:(in_c + window_shape[1])]
                output = subwindow_func(subwindow)
                assert_equal(output.ndim, 2)

                # check that subwindow_func preserved the batch size
                assert_equal(output.shape[0], padded_images.shape[0])
                assert_greater(output.shape[1], 0)

                if output_image is None:
                    output_image = numpy.zeros((output.shape[0],
                                                output.shape[1],
                                                len(rows),
                                                len(cols)),
                                               dtype=output.dtype)

                output_image[:, :, out_r, out_c] = output

        return output_image
开发者ID:paulfun92,项目名称:simplelearn,代码行数:79,代码来源:test_nodes.py

示例7: _sliding_window_2d_testimpl

def _sliding_window_2d_testimpl(expected_subwindow_funcs,
                                pad_values,
                                make_node_funcs,
                                make_pad_args_funcs,
                                rtol=None):
    '''
    Implementation of tests for 2D sliding-window nodes like Pool2D and Conv2d.

    Parameters
    ----------
    expected_subwindow_funcs: Sequence
      A Sequence of subwindow functions.
      These take a subwindow and return a scalar.
      Input: tensor with shape [BATCH_SIZE, NUM_CHANNELS, ROWS, COLS]
      Output: tensor with shape [BATCH_SIZE, NUM_CHANNELS]

    pad_values: Sequence
      A sequence of pad filler values to use for eah of the
      expected_subwindow_funcs. For example, if expected_subwindow_funcs
      is [average_pool, max_pool], use [0.0, -numpy.inf].

    make_node_funcs: Sequence
      A Sequence of functions that create sliding-window Nodes to be tested
      against the ground-truth provided by the corresponding
      expected_subwindow_funcs. Its paramters are as follows:

      Parameters
      ----------
      input_node: Node
      window_shape: Sequence
        [NUM_ROWS, NUM_COLUMNS] of the sliding window.
      strides: Sequence
        [ROW_STRIDE, COLUMN_STRIDE], or how many rows/columns to skip between
        applications of the sliding window.

      pad: Sequence
        [ROW_PAD, COLUMN_PAD], or # of zero-padding rows/columns to add to each
        side of the image.

      axis_map: dict
        Maps strings to strings. Optional.
        If the node uses different axis names than 'b', 'c', '0', '1', this
        specifies the mapping from the node's axis names to 'b', 'c', '0', '1'.

    make_pad_args_funcs: Sequence
      A Sequence of functions that take a window_shape arg (2d array) and
      returns an Iterable of 'pad' arguments, which can be strings or 2d arrays
      of ints.
    '''

    assert_is_instance(expected_subwindow_funcs, Sequence)
    assert_is_instance(pad_values, Sequence)
    assert_is_instance(make_node_funcs, Sequence)

    # TODO: change this to construct a Toeplitz matrix out of padded_images,
    # so we get a giant stack of C X WR X WC matrices, which can then be fed
    # to subwindow_func as a single batch.
    # See scipy.linalg.toeplitz
    def apply_subwindow_func(subwindow_func,
                             padded_images,
                             pads,
                             window_shape,
                             strides):
        '''
        Applies a sliding-window function to all subwindows of a feature map.

        Parameters
        ----------
        subwindow_func: function
          A function that takes a subwindow and returns a scalar.
          Input: tensor with shape [BATCH_SIZE, NUM_CHANNELS, ROWS, COLS]
          Output: tensor with shape [BATCH_SIZE, NUM_CHANNELS]

        padded_images: numpy.ndarray
          A feature map with shape [BATCH_SIZE, NUM_CHANNELS, ROWS, COLS].
          This has pad[0] rows and pad[1] columns of zero-padding.

        max_pad: Sequence
          [pad_rows, pad_columns], the # of padded rows and columns on each
          side of the image.

        '''
        assert_equal(padded_images.ndim, 4)
        assert_all_greater(padded_images.shape[2:], pads)
        _assert_is_shape2d(window_shape)
        _assert_is_shape2d(strides)

        pads, window_shape, strides = (numpy.asarray(a) for a in (pads,
                                                                  window_shape,
                                                                  strides))

        assert_all_greater(numpy.asarray(padded_images.shape[2:]), 2 * pads)

        # Check that pad region is full of the same value
        if pads[0] > 0:
            pad_value = padded_images[0, 0, 0, 0]
            assert_true(numpy.all(padded_images[:, :, :pads[0], :] ==
                                  pad_value))
            assert_true(numpy.all(padded_images[:, :, -pads[0]:, :] ==
                                  pad_value))
#.........这里部分代码省略.........
开发者ID:paulfun92,项目名称:simplelearn,代码行数:101,代码来源:test_nodes.py


注:本文中的simplelearn.asserts.assert_all_greater函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。