当前位置: 首页>>代码示例>>Python>>正文


Python numpy.void函数代码示例

本文整理汇总了Python中numpy.void函数的典型用法代码示例。如果您正苦于以下问题:Python void函数的具体用法?Python void怎么用?Python void使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了void函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: check_numpy_scalar_argument_return_void

 def check_numpy_scalar_argument_return_void(self):
     f = PyCFunction('foo')
     f += Variable('a1', numpy.void, 'in, out')
     f += Variable('a2', numpy.void, 'in, out')
     foo = f.build()
     args = ('he', 4)
     results = (numpy.void('he'), numpy.void(4))
     assert_equal(foo(*args), results)
开发者ID:dagss,项目名称:f2py-g3,代码行数:8,代码来源:test_py_support.py

示例2: test_meta_nonempty

def test_meta_nonempty():
    df1 = pd.DataFrame({'A': pd.Categorical(['Alice', 'Bob', 'Carol']),
                        'B': list('abc'),
                        'C': 'bar',
                        'D': np.float32(1),
                        'E': np.int32(1),
                        'F': pd.Timestamp('2016-01-01'),
                        'G': pd.date_range('2016-01-01', periods=3,
                                           tz='America/New_York'),
                        'H': pd.Timedelta('1 hours', 'ms'),
                        'I': np.void(b' '),
                        'J': pd.Categorical([UNKNOWN_CATEGORIES] * 3)},
                       columns=list('DCBAHGFEIJ'))
    df2 = df1.iloc[0:0]
    df3 = meta_nonempty(df2)
    assert (df3.dtypes == df2.dtypes).all()
    assert df3['A'][0] == 'Alice'
    assert df3['B'][0] == 'foo'
    assert df3['C'][0] == 'foo'
    assert df3['D'][0] == np.float32(1)
    assert df3['D'][0].dtype == 'f4'
    assert df3['E'][0] == np.int32(1)
    assert df3['E'][0].dtype == 'i4'
    assert df3['F'][0] == pd.Timestamp('1970-01-01 00:00:00')
    assert df3['G'][0] == pd.Timestamp('1970-01-01 00:00:00',
                                       tz='America/New_York')
    assert df3['H'][0] == pd.Timedelta('1', 'ms')
    assert df3['I'][0] == 'foo'
    assert df3['J'][0] == UNKNOWN_CATEGORIES

    s = meta_nonempty(df2['A'])
    assert s.dtype == df2['A'].dtype
    assert (df3['A'] == s).all()
开发者ID:rlugojr,项目名称:dask,代码行数:33,代码来源:test_utils_dataframe.py

示例3: _convert_value

    def _convert_value(self, value):
        """Convert a string into a numpy object (scalar or array).

        The value is most of the time a string, but it can be python object
        in case if TIFF decoder for example.
        """
        if isinstance(value, list):
            # convert to a numpy array
            return numpy.array(value)
        if isinstance(value, dict):
            # convert to a numpy associative array
            key_dtype = numpy.min_scalar_type(list(value.keys()))
            value_dtype = numpy.min_scalar_type(list(value.values()))
            associative_type = [('key', key_dtype), ('value', value_dtype)]
            assert key_dtype.kind != "O" and value_dtype.kind != "O"
            return numpy.array(list(value.items()), dtype=associative_type)
        if isinstance(value, numbers.Number):
            dtype = numpy.min_scalar_type(value)
            assert dtype.kind != "O"
            return dtype.type(value)

        if isinstance(value, six.binary_type):
            try:
                value = value.decode('utf-8')
            except UnicodeDecodeError:
                return numpy.void(value)

        if " " in value:
            result = self._convert_list(value)
        else:
            result = self._convert_scalar_value(value)
        return result
开发者ID:vallsv,项目名称:silx,代码行数:32,代码来源:fabioh5.py

示例4: test_meta_nonempty

def test_meta_nonempty():
    df1 = pd.DataFrame({'A': pd.Categorical(['Alice', 'Bob', 'Carol']),
                        'B': list('abc'),
                        'C': 'bar',
                        'D': 3.0,
                        'E': pd.Timestamp('2016-01-01'),
                        'F': pd.date_range('2016-01-01', periods=3,
                                           tz='America/New_York'),
                        'G': pd.Timedelta('1 hours'),
                        'H': np.void(b' ')},
                       columns=list('DCBAHGFE'))
    df2 = df1.iloc[0:0]
    df3 = meta_nonempty(df2)
    assert (df3.dtypes == df2.dtypes).all()
    assert df3['A'][0] == 'Alice'
    assert df3['B'][0] == 'foo'
    assert df3['C'][0] == 'foo'
    assert df3['D'][0] == 1.0
    assert df3['E'][0] == pd.Timestamp('1970-01-01 00:00:00')
    assert df3['F'][0] == pd.Timestamp('1970-01-01 00:00:00',
                                       tz='America/New_York')
    assert df3['G'][0] == pd.Timedelta('1 days')
    assert df3['H'][0] == 'foo'

    s = meta_nonempty(df2['A'])
    assert s.dtype == df2['A'].dtype
    assert (df3['A'] == s).all()
开发者ID:ankravch,项目名称:dask,代码行数:27,代码来源:test_utils_dataframe.py

示例5: _saveValue

 def _saveValue(self, group, name, value):
     # we pickle to a string and convert to numpy.void,
     # because HDF5 has some limitations as to which strings it can serialize
     # (see http://docs.h5py.org/en/latest/strings.html)
     pickled = numpy.void(pickle.dumps(value, 0))
     dset = group.create_dataset(name, data=pickled)
     dset.attrs['version'] = self._version
     self._failed_to_deserialize = False
开发者ID:ilastik,项目名称:ilastik,代码行数:8,代码来源:appletSerializer.py

示例6: store

    def store(self, k, v):
        logging.info("{} storing {}".format(self.TAG, k))
        v_ = np.void(zlib.compress(cPickle.dumps(v, protocol=cPickle.HIGHEST_PROTOCOL)))

        if k in self.db:
            logging.error("{} Overwriting group {}!".format(self.TAG, k))
            del self.db[k]

        self.db[k] = [v_]
开发者ID:jonathanmasci,项目名称:EG16_tutorial,代码行数:9,代码来源:snapshotter.py

示例7: serialize_hdf5

    def serialize_hdf5(self, h5py_group):
        logger.debug("Serializing")
        h5py_group[self.HDF5_GROUP_FILENAME] = self._filename
        h5py_group["pickled_type"] = pickle.dumps(type(self), 0)

        # HACK: can this be done more elegantly?
        with tempfile.TemporaryFile() as f:
            self._tiktorch_net.serialize(f)
            f.seek(0)
            h5py_group["classifier"] = numpy.void(f.read())
开发者ID:ilastik,项目名称:lazyflow,代码行数:10,代码来源:tiktorchLazyflowClassifier.py

示例8: VideoToStringArray

def VideoToStringArray(video_array):
    """Converts a NCHW video array to a N length string array with
    JPEG encoded strings, to be able to store as h5 files.
    """
    nframes = video_array.shape[0]
    frames = np.split(np.transpose(video_array, (0, 2, 3, 1)), nframes, axis=0)
    # np.void from http://docs.h5py.org/en/latest/strings.html
    frames = np.array([np.void(cv2.imencode(
        '.jpg', frame[0])[1].tostring()) for frame in frames])
    return frames
开发者ID:TPNguyen,项目名称:DetectAndTrack,代码行数:10,代码来源:video_io.py

示例9: save_dataset_as_hdf5

def save_dataset_as_hdf5(dataset, filename=None, variant=None):
    """
    Method to write simple datasets to an HDF5 file.

    :param dataset: The dataset to be stored as a dictionary of tuples.
        Each entry is one usage and contains (input_data, targets)
    :type dataset: dict[unicode, (numpy.ndarray, pylstm.targets.Targets)]

    :param filename: Filename/path of the file that should be written.
        Will overwrite if it already exists. Can be None if variant is given.
    :type filename: unicode

    :param variant: hdf5 group object the dataset will be saved to instead of
        writing it to a new file. Either this or filename has to be set.

    :rtype: None
    """
    hdffile = None
    if variant is None:
        assert filename is not None
        import h5py
        hdffile = h5py.File(filename, "w")
        variant = hdffile

    if 'description' in dataset:
        variant.attrs['description'] = dataset['description']
    for usage in ['training', 'validation', 'test']:
        if usage not in dataset:
            continue
        input_data, targets = dataset[usage]
        grp = variant.create_group(usage)

        grp.create_dataset('input_data', data=input_data,
                           chunks=get_chunksize(input_data),
                           compression="gzip")

        if targets.is_labeling():
            targets_encoded = np.void(cPickle.dumps(targets.data))
            targets_ds = grp.create_dataset('targets',
                                            data=targets_encoded,
                                            dtype=targets_encoded.dtype)
        else:
            targets_ds = grp.create_dataset(
                'targets',
                data=targets.data,
                chunks=get_chunksize(targets.data),
                compression="gzip"
            )
        targets_ds.attrs.create('targets_type', str(targets.targets_type[0]))
        targets_ds.attrs.create('binarize_to', targets.binarize_to or 0)
        if targets.mask is not None:
            grp.create_dataset('mask', data=targets.mask, dtype='u1')

    if hdffile is not None:
        hdffile.close()
开发者ID:Qwlouse,项目名称:pylstm,代码行数:55,代码来源:loader.py

示例10: write_hdf5

    def write_hdf5(self, filename, dataset_name=None, info=None):
        r"""Writes ImageArray to hdf5 file.

        Parameters
        ----------
        filename: string
            The filename to create and write a dataset to

        dataset_name: string
            The name of the dataset to create in the file.

        info: dictionary
            A dictionary of supplementary info to write to append as attributes
            to the dataset.

        Examples
        --------
        >>> a = YTArray([1,2,3], 'cm')

        >>> myinfo = {'field':'dinosaurs', 'type':'field_data'}

        >>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs',
        ...              info=myinfo)

        """
        import h5py
        from yt.extern.six.moves import cPickle as pickle

        if info is None:
            info = {}

        info["units"] = str(self.units)
        info["unit_registry"] = np.void(pickle.dumps(self.units.registry.lut))

        if dataset_name is None:
            dataset_name = "array_data"

        f = h5py.File(filename)
        if dataset_name in f.keys():
            d = f[dataset_name]
            # Overwrite without deleting if we can get away with it.
            if d.shape == self.shape and d.dtype == self.dtype:
                d[:] = self
                for k in d.attrs.keys():
                    del d.attrs[k]
            else:
                del f[dataset_name]
                d = f.create_dataset(dataset_name, data=self)
        else:
            d = f.create_dataset(dataset_name, data=self)

        for k, v in info.items():
            d.attrs[k] = v
        f.close()
开发者ID:danielgrassinger,项目名称:yt_new_frontend,代码行数:54,代码来源:yt_array.py

示例11: metadata

    def metadata(self, value):
        try:
            del self.metadata
        except KeyError:
            pass

        dump = pickle.dumps(value)

        for i, start in enumerate(range(0, len(dump), MAX_ATTRIBUTE_SIZE)):
            self._group.attrs['_metadata{}'.format(i)] = np.void(
                dump[start : start + MAX_ATTRIBUTE_SIZE])
        self._group.attrs['_metadata_num'] = i + 1
开发者ID:arvidfm,项目名称:masters-thesis,代码行数:12,代码来源:dataset.py

示例12: write_to

    def write_to(self, group, append=False):
        """Writes the properties to a `group`, or append it"""
        data = self.data
        if append is True:
            try:
                # concatenate original and new properties in a single list
                original = read_properties(group)
                data = original + data
            except EOFError:
                pass  # no former data to append on

        # h5py does not support embedded NULLs in strings ('\x00')
        data = pickle.dumps(data).replace(b'\x00', b'__NULL__')
        group['properties'][...] = np.void(data)
开发者ID:bootphon,项目名称:h5features,代码行数:14,代码来源:properties.py

示例13: save

 def save(self, hdf5_handle):
     g = hdf5_handle
     # Class settings
     g.attrs.update(self.settings)
     # Class attributes
     h = g.create_group("class")
     h.attrs["label"] = self.label
     if self.settings["store_cxx_serial"]:
         if self.verbose: self.log << "[h5] Writing cxx serial" << self.log.endl
         # Prune pid data if not required to compute gradients
         prune_pid_data = False if self.options['spectrum.gradients'] else True
         cxx_serial = self.spectrum.saves(prune_pid_data)
         h = g.create_dataset("cxx_serial", data=np.void(cxx_serial))
     if self.settings["store_cmap"]:
         if self.verbose: self.log << "[h5] Writing coefficient map" << self.log.endl
         h = g.create_group("cmap")
         for idx, cmap in enumerate(self.cmap):
             hh = h.create_group('%d' % idx)
             for key in cmap:
                 hh.create_dataset(key, data=cmap[key], compression='gzip')
     if self.settings["store_gcmap"]:
         if self.verbose: self.log << "[h5] Writing global coefficient map" << self.log.endl
         h = g.create_group("gcmap")
         for idx, gcmap in enumerate(self.gcmap):
             hh = h.create_group('%d' % idx)
             for key in gcmap:
                 hh.create_dataset(key, data=gcmap[key], compression='gzip')
     if self.settings["store_sdmap"]:
         if self.verbose: self.log << "[h5] Writing descriptor map" << self.log.endl
         h = g.create_group('sdmap')
         for idx, sdmap in enumerate(self.sdmap):
             hh = h.create_group('%d' % idx)
             for key in sdmap:
                 hh.create_dataset(key, data=sdmap[key], compression='gzip')
     if self.settings["store_gsdmap"]:
         if self.verbose: self.log << "[h5] Writing global descriptor map" << self.log.endl
         h = g.create_group('gsdmap')
         for idx, gsdmap in enumerate(self.gsdmap):
             hh = h.create_group('%d' % idx)
             for key in gsdmap:
                 hh.create_dataset(key, data=gsdmap[key], compression='gzip')
     if self.settings["store_sd"]:
         if self.verbose: self.log << "[h5] Writing descriptor matrix" << self.log.endl
         g.create_dataset('sd', data=self.sd, compression='gzip')
     if self.settings["store_gsd"]:
         if self.verbose: self.log << "[h5] Writing global descriptor matrix" << self.log.endl
         g.create_dataset('gsd', data=self.gsd, compression='gzip')
     return self
开发者ID:capoe,项目名称:soapxx,代码行数:48,代码来源:run.py

示例14: _create_data

    def _create_data(self):
        """Initialize hold data by merging all headers of each frames.
        """
        headers = []
        types = set([])
        for fabio_frame in self.__fabio_reader.iter_frames():
            header = fabio_frame.header

            data = []
            for key, value in header.items():
                data.append("%s: %s" % (str(key), str(value)))

            data = "\n".join(data)
            try:
                line = data.encode("ascii")
                types.add(numpy.string_)
            except UnicodeEncodeError:
                try:
                    line = data.encode("utf-8")
                    types.add(numpy.unicode_)
                except UnicodeEncodeError:
                    # Fallback in void
                    line = numpy.void(data)
                    types.add(numpy.void)

            headers.append(line)

        if numpy.void in types:
            dtype = numpy.void
        elif numpy.unicode_ in types:
            dtype = numpy.unicode_
        else:
            dtype = numpy.string_

        if dtype == numpy.unicode_ and h5py is not None:
            # h5py only support vlen unicode
            dtype = h5py.special_dtype(vlen=six.text_type)

        return numpy.array(headers, dtype=dtype)
开发者ID:vallsv,项目名称:silx,代码行数:39,代码来源:fabioh5.py

示例15: test_void_scalar_recursion

 def test_void_scalar_recursion(self):
     # gh-9345
     repr(np.void(b'test'))  # RecursionError ?
开发者ID:Juanlu001,项目名称:numpy,代码行数:3,代码来源:test_arrayprint.py


注:本文中的numpy.void函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。