当前位置: 首页>>代码示例>>Python>>正文


Python numpy.string_函数代码示例

本文整理汇总了Python中numpy.string_函数的典型用法代码示例。如果您正苦于以下问题:Python string_函数的具体用法?Python string_怎么用?Python string_使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了string_函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: setupimgh5

def setupimgh5(f, Nframetotal:int, Nrow:int, Ncol:int, dtype=np.uint16,
               writemode='r+', key='/rawimg',cmdlog:str=''):
    """
    f: HDF5 handle (or filename)

    h: HDF5 dataset handle
    """
    if isinstance(f, (str,Path)):  # assume new HDF5 file wanted
        print('creating', f)
        with h5py.File(f, writemode, libver='latest') as F:
            return setupimgh5(F,Nframetotal,Nrow,Ncol,dtype,writemode,key)
    elif isinstance(f, h5py.File):
        h = f.create_dataset(key,
                 shape =  (Nframetotal,Nrow,Ncol),
                 dtype=dtype,
                 chunks= (1,Nrow,Ncol), # each image is a chunk
                 compression='gzip',
                 compression_opts=1, #no difference in filesize from 1 to 5, except much faster to use lower numbers!
                 shuffle= True,
                 fletcher32= True,
                 track_times= True)
        h.attrs["CLASS"] = np.string_("IMAGE")
        h.attrs["IMAGE_VERSION"] = np.string_("1.2")
        h.attrs["IMAGE_SUBCLASS"] = np.string_("IMAGE_GRAYSCALE")
        h.attrs["DISPLAY_ORIGIN"] = np.string_("LL")
        h.attrs['IMAGE_WHITE_IS_ZERO'] = np.uint8(0)

        if cmdlog and isinstance(cmdlog,str):
            f['/cmdlog'] = cmdlog
    else:
        raise TypeError(f'{type(f)} is not correct, must be filename or h5py.File HDF5 file handle')

    return h
开发者ID:scienceopen,项目名称:histutils,代码行数:33,代码来源:__init__.py

示例2: store_image

def store_image(h5group, data, compression):
    if len(data.shape) == 2:
        # single event
        data = data.reshape(1, data.shape[0], data.shape[1])
    if "image" not in h5group:
        maxshape = (None, data.shape[1], data.shape[2])
        dset = h5group.create_dataset("image",
                                      data=data,
                                      dtype=np.uint8,
                                      maxshape=maxshape,
                                      chunks=True,
                                      fletcher32=True,
                                      compression=compression)
        # Create and Set image attributes:
        # HDFView recognizes this as a series of images.
        # Use np.string_ as per
        # http://docs.h5py.org/en/stable/strings.html#compatibility
        dset.attrs.create('CLASS', np.string_('IMAGE'))
        dset.attrs.create('IMAGE_VERSION', np.string_('1.2'))
        dset.attrs.create('IMAGE_SUBCLASS', np.string_('IMAGE_GRAYSCALE'))
    else:
        dset = h5group["image"]
        oldsize = dset.shape[0]
        dset.resize(oldsize + data.shape[0], axis=0)
        dset[oldsize:] = data
开发者ID:ZELLMECHANIK-DRESDEN,项目名称:dclab,代码行数:25,代码来源:write_hdf5.py

示例3: _write_buffer

    def _write_buffer(self):
        self.attrs['version'] = np.string_(__version__)
        self.attrs['type'] = np.string_(type(self).__name__)
        self['charge'] = self.buffer.charge
        self['charge'].attrs['unit'] = self.buffer.charge_unit
        self['numMol'] = self.buffer.numMol
        self['volume'] = self.buffer.volume
        self['volume'].attrs['unit'] = self.buffer.volume_unit
        self['temperature'] = self.buffer.temperature
        self['temperature'].attrs['unit'] = self.buffer.temperature_unit
        self['timeLags'] = self.buffer.timeLags
        self['timeLags'].attrs['unit'] = self.buffer.timeLags_unit
        self['nCorr'] = self.buffer.nCorr
        self['nCorr'].attrs['unit'] = self.buffer.nCorr_unit

        def do_dec(dectype):
            dec_group = self.require_group(dectype.value)
            buf = getattr(self.buffer, dectype.value)
            dec_group['decBins'] = buf.decBins
            dec_group['decCorr'] = buf.decCorr
            dec_group['decPairCount'] = buf.decPairCount

            dec_group['decBins'].attrs['unit'] = buf.decBins_unit
            dec_group['decCorr'].attrs['unit'] = buf.decCorr_unit

        for type_ in DecType:
            if getattr(self.buffer, type_.value) is not None:
                do_dec(type_)
开发者ID:gitter-badger,项目名称:decond,代码行数:28,代码来源:analyze.py

示例4: write

    def write(self, struct_name, data_dict):
        """write data_dict under the group struct_name in the open hdf5 file

        :param struct_name: the identificatioon of the structure to write in the hdf5
        :param data_dict: The python dictionnary containing the informations to write
        """
        if self.file is None:
            info = "No file currently open"
            logger.info(info)
            return

        group_l1 = self.file.create_group(struct_name)
        group_l1.attrs['OCTAVE_GLOBAL'] = np.uint8(1)
        group_l1.attrs['OCTAVE_NEW_FORMAT'] = np.uint8(1)
        group_l1.create_dataset("type", data=np.string_('scalar struct'), dtype="|S14")
        group_l2 = group_l1.create_group('value')
        for ftparams in data_dict:
            group_l3 = group_l2.create_group(ftparams)
            group_l3.attrs['OCTAVE_NEW_FORMAT'] = np.uint8(1)
            if type(data_dict[ftparams]) == str:
                group_l3.create_dataset("type", (), data=np.string_('sq_string'), dtype="|S10")
                if self.octave_targetted_version < 3.8:
                    group_l3.create_dataset("value", data=np.string_(data_dict[ftparams] + '0'))
                else:
                    group_l3.create_dataset("value", data=np.string_(data_dict[ftparams]))
            else:
                group_l3.create_dataset("type", (), data=np.string_('scalar'), dtype="|S7")
                group_l3.create_dataset("value", data=data_dict[ftparams])
开发者ID:vasole,项目名称:silx,代码行数:28,代码来源:octaveh5.py

示例5: new_entry

    def new_entry(self, entry="entry", program_name="pyFAI",
                  title="description of experiment",
                  force_time=None, force_name=False):
        """
        Create a new entry

        :param entry: name of the entry
        :param program_name: value of the field as string
        :param title: value of the field as string
        :param force_time: enforce the start_time (as string!)
        :param force_name: force the entry name as such, without numerical suffix.
        :return: the corresponding HDF5 group
        """

        if not force_name:
            nb_entries = len(self.get_entries())
            entry = "%s_%04i" % (entry, nb_entries)
        entry_grp = self.h5.require_group(entry)
        entry_grp.attrs["NX_class"] = numpy.string_("NXentry")
        entry_grp["title"] = numpy.string_(title)
        entry_grp["program_name"] = numpy.string_(program_name)
        if force_time:
            entry_grp["start_time"] = numpy.string_(force_time)
        else:
            entry_grp["start_time"] = numpy.string_(get_isotime())
        self.to_close.append(entry_grp)
        return entry_grp
开发者ID:jonwright,项目名称:pyFAI,代码行数:27,代码来源:io.py

示例6: test_roundtrip_strings_with_fill_value

    def test_roundtrip_strings_with_fill_value(self):
        values = np.array(['ab', 'cdef', np.nan], dtype=object)
        encoding = {'_FillValue': np.string_('X'), 'dtype': np.dtype('S1')}
        original = Dataset({'x': ('t', values, {}, encoding)})
        expected = original.copy(deep=True)
        expected['x'][:2] = values[:2].astype('S')
        with self.roundtrip(original) as actual:
            self.assertDatasetIdentical(expected, actual)

        original = Dataset({'x': ('t', values, {}, {'_FillValue': '\x00'})})
        if not isinstance(self, Only32BitTypes):
            # these stores can save unicode strings
            expected = original.copy(deep=True)
        if isinstance(self, BaseNetCDF4Test):
            # netCDF4 can't keep track of an empty _FillValue for VLEN
            # variables
            expected['x'][-1] = ''
        elif (isinstance(self, (NetCDF3ViaNetCDF4DataTest,
                                NetCDF4ClassicViaNetCDF4DataTest)) or
              (has_netCDF4 and type(self) is GenericNetCDFDataTest)):
            # netCDF4 can't keep track of an empty _FillValue for nc3, either:
            # https://github.com/Unidata/netcdf4-python/issues/273
            expected['x'][-1] = np.string_('')
        with self.roundtrip(original) as actual:
            self.assertDatasetIdentical(expected, actual)
开发者ID:gyenney,项目名称:Tools,代码行数:25,代码来源:test_backends.py

示例7: _convert_list

    def _convert_list(self, value):
        """Convert a string into a typed numpy array.

        If it is not possible it returns a numpy string.
        """
        try:
            numpy_values = []
            values = value.split(" ")
            types = set([])
            for string_value in values:
                v = self._convert_scalar_value(string_value)
                numpy_values.append(v)
                types.add(v.dtype.type)

            result_type = numpy.result_type(*types)

            if issubclass(result_type.type, (numpy.string_, six.binary_type)):
                # use the raw data to create the result
                return numpy.string_(value)
            elif issubclass(result_type.type, (numpy.unicode_, six.text_type)):
                # use the raw data to create the result
                return numpy.unicode_(value)
            else:
                return numpy.array(numpy_values, dtype=result_type)
        except ValueError:
            return numpy.string_(value)
开发者ID:vallsv,项目名称:silx,代码行数:26,代码来源:fabioh5.py

示例8: _write_dataset

def _write_dataset(f, ds):
    f[ds.name] = ds.data
    dsobj = f[ds.name]
    
    if ds.comment:
        dsobj.attrs['COMMENT'] = np.string_(ds.comment)
        
    if ds._display_name:
        dsobj.attrs['DISPLAY_NAME'] = np.string_(ds.display_name)
    
    if ds.quantity:
        dsobj.attrs['QUANTITY'] = np.string_(ds.quantity)
    
    if ds.unit:
        dsobj.attrs['UNIT'] = np.string_(ds.unit)
    
    if ds.display_unit != ds.unit:
        dsobj.attrs['DISPLAY_UNIT'] = np.string_(ds.display_unit)
       
    for ri in range(len(ds.scales)):
        s = ds.scales[ri]
        sobj = f[s.name]
        dsobj.dims.create_scale(sobj, s.scale_name)
        dsobj.dims[ri].attach_scale(sobj)
    return dsobj
开发者ID:PyWilhelm,项目名称:EDRIS_DS,代码行数:25,代码来源:__init__.py

示例9: setup_root_attr

def setup_root_attr(f, extension=None):
    """
    Write the root metadata for this file

    Parameter
    ---------
    f : an h5py.File object
        The file in which to write the data
    """

    if extension is None:
        extension = "ED-PIC"

    # Required attributes
    f.attrs["openPMD"] = numpy.string_("1.0.0")
    f.attrs["openPMDextension"] = ext_list[extension]
    f.attrs["basePath"] = numpy.string_("/data/%T/")
    f.attrs["meshesPath"] = numpy.string_("meshes/")
    f.attrs["particlesPath"] = numpy.string_("particles/")
    f.attrs["iterationEncoding"] = numpy.string_("groupBased")
    f.attrs["iterationFormat"] = numpy.string_("/data/%T/")

    # Recommended attributes
    f.attrs["author"] = numpy.string_("NN")
    f.attrs["software"] = numpy.string_("simex_platform")
    f.attrs["softwareVersion"] = numpy.string_("0.2")
    f.attrs["date"] = numpy.string_( datetime.datetime.now(tzlocal()).strftime('%Y-%m-%d %H:%M:%S %z'))
开发者ID:eucall-software,项目名称:simex_platform,代码行数:27,代码来源:OpenPMDTools.py

示例10: _cal_cesaro

    def _cal_cesaro(self):
        """
        Calculate Cesaro data
        """
        def cesaro_integrate(y, x):
            cesaro = integrate.cumtrapz(y, x, initial=0)
            cesaro = integrate.cumtrapz(cesaro, x, initial=0)
            return cesaro

        self.buffer.nDCesaro = cesaro_integrate(self.buffer.nCorr,
                                                self.buffer.timeLags)

        # Unit: nCorr (L^2 T^-2), nDCesaro (L^2)
        self.buffer.nDCesaro_unit = np.string_(
                self.buffer.nCorr_unit.decode().split()[0])

        self.buffer.nDTotalCesaro = np.sum(
                self.buffer.nDCesaro *
                (self.zz * self.ww)[:, np.newaxis], axis=0)

        self.buffer.nDTotalCesaro_unit = np.string_(
                self.buffer.nCorr_unit.decode().split()[0])

        def do_dec(buf):
            buf.decDCesaro = cesaro_integrate(
                    buf.decCorr, self.buffer.timeLags)
            buf.decDCesaro_unit = np.string_(
                    buf.decCorr_unit.decode().split()[0])

        for type_ in DecType:
            buf = getattr(self.buffer, type_.value)
            if buf is not None:
                do_dec(buf)
开发者ID:gitter-badger,项目名称:decond,代码行数:33,代码来源:analyze.py

示例11: to_hdf5

    def to_hdf5(self, group):
        """Write reaction to an HDF5 group

        Parameters
        ----------
        group : h5py.Group
            HDF5 group to write to

        """

        group.attrs['mt'] = self.mt
        if self.mt in REACTION_NAME:
            group.attrs['label'] = np.string_(REACTION_NAME[self.mt])
        else:
            group.attrs['label'] = np.string_(self.mt)
        group.attrs['Q_value'] = self.q_value
        group.attrs['center_of_mass'] = 1 if self.center_of_mass else 0
        for T in self.xs:
            Tgroup = group.create_group(T)
            if self.xs[T] is not None:
                dset = Tgroup.create_dataset('xs', data=self.xs[T].y)
                if hasattr(self.xs[T], '_threshold_idx'):
                    threshold_idx = self.xs[T]._threshold_idx + 1
                else:
                    threshold_idx = 1
                dset.attrs['threshold_idx'] = threshold_idx
        for i, p in enumerate(self.products):
            pgroup = group.create_group('product_{}'.format(i))
            p.to_hdf5(pgroup)
开发者ID:mit-crpg,项目名称:openmc,代码行数:29,代码来源:reaction.py

示例12: get_roi_hdf5

def get_roi_hdf5(hdf5FileName, hdf5FileName_ROI, run, rois, detector_names, pede_thr=-1, dark_file=""):

    if rois == []:
        for d in detector_names:
            rois.append([])
    if len(rois) != len(detector_names):
        print "ERROR: please put one ROI per detector!"
        sys.exit(-1)

    f = h5py.File(hdf5FileName, 'r')

    f_out = h5py.File(hdf5FileName_ROI, 'a', )

    if dark_file != "":
        f_dark = h5py.File(dark_file, "r")
    run_dst = f["/run_%06d" % run]

    detectors_list = []
    detector_dstnames = [i for i in run_dst.keys() if i.find("detector_2d") != -1]
    for d in detector_dstnames:
        if run_dst[d + "/detector_info/detector_name"].value in detector_names:
            detectors_list.append(d)

    tag_list = f["/run_%06d/event_info/tag_number_list" % run][:]
    DET_INFO_DSET = "/detector_info/detector_name"
    RUN_INFO_DST = ["event_info", "exp_info", "run_info"]
    file_info = f["file_info"]
    f.copy(file_info, f_out)
    try:
        f_out.create_group("/run_%06d" % run)
    except:
        print sys.exc_info()[1]

    for info_dst in RUN_INFO_DST:
        info = run_dst[info_dst]
        f.copy(info, f_out["/run_%06d" % run])

    for i, dreal in enumerate(detectors_list):
        detector_dsetname = "/run_%06d/%s" % (run, dreal)
        print detector_dsetname
        try:
            fout_grp = f_out.create_group(detector_dsetname)
        except:
            print sys.exc_info()[1]
        info = f[detector_dsetname]["detector_info"]
        f.copy(info, f_out[detector_dsetname])

        if dark_file != "":
            print "With dark correction"
            sacla_hdf5.get_roi_data(f[detector_dsetname], f_out[detector_dsetname], tag_list, rois[i], pede_thr=pede_thr, dark_matrix=f_dark[dark_dset_names[i]][:])
            f_out[detector_dsetname].attrs['dark_filename'] = np.string_(dark_file.split("/")[-1])
            print np.string_(dark_file.split("/")[-1])

        else:
            sacla_hdf5.get_roi_data(f[detector_dsetname], f_out[detector_dsetname], tag_list, rois[i], pede_thr=pede_thr)
        #asciiList = [n.encode("ascii", "ignore") for n in strList]
        #f_out[detector_dsetname + "/dark_fname"] = np.string_("aaaaaaaa")
    f_out.close()
    print "Run %s done!" % str(run)
开发者ID:ivan-usov,项目名称:sacla,代码行数:59,代码来源:get_roi_hdf5.py

示例13: GetCheckPoint

	def GetCheckPoint (self) :
		"""
		pickle the current iteration of the optimization 
		"""
		return {"population" : np.string_(pickle.dumps(self.optimization_pop)), 
				"halloffame" : np.string_(pickle.dumps(self.optimization_hof)) }

##########################################################################
开发者ID:dibondar,项目名称:PyPhotonicReagents,代码行数:8,代码来源:deap_algorthims_tabs.py

示例14: init

    def init(self, lima_cfg):
        """
        Initializes the HDF5 file for writing. Part of prepareAcq.
        
        @param lima_cfg: dictionnary with parameters coming from Lima at the "prepareAcq" 
        """
        with self._sem:

            self.filename = lima_cfg.get("directory")
            if not self.filename.endswith('/'): self.filename +=  '/'
            self.filename += lima_cfg.get("prefix")+'.h5'

            # silence non serious error messages, which are printed
            # because we use h5py in a new thread (not in the main one)
            # this is a bug seems to be fixed on newer version !!
            # see this h5py issue 206: https://code.google.com/p/h5py/issues/detail?id=206
            h5py._errors.silence_errors()

            try:
                self.hdf5 = h5py.File(self.filename, 'a')

            except IOError:
                os.unlink(self.filename)
                print ("here e e ")
                self.hdf5 = h5py.File(self.filename)


            prefix = lima_cfg.get("prefix") or self.CONFIG_ITEMS["hpath"]
            if not prefix.endswith("_"):
                prefix+="_"
            entries = len([i.startswith(prefix) for i in self.hdf5])
            self.hpath = posixpath.join("%s%04d"%(prefix,entries),self.lima_grp)
            self.group = self.hdf5.require_group(self.hpath)
            self.group.parent.attrs["NX_class"] = "NXentry"
            self.group.attrs["NX_class"] = "NXdata"
            cfg_grp = self.hdf5.require_group(posixpath.join(self.hpath, self.metadata_grp))
            cfg_grp["detector_name"] = numpy.string_(self.detector_name)
            for k, v in lima_cfg.items():
                if type(v) in types.StringTypes:
                    cfg_grp[k] = numpy.string_(v)
                else:
                    cfg_grp[k] = v
            self.number_of_frames = (max(1, lima_cfg["number_of_frames"]) + self.min_size - 1) // self.min_size * self.min_size
            self.min_size = max(1, self.min_size)
            self.shape = (self.number_of_frames , lima_cfg.get("dimY", 1), lima_cfg.get("dimX", 1))
            self.chunk = (self.min_size, lima_cfg.get("dimY", 1), lima_cfg.get("dimX", 1))
            if "dtype" in lima_cfg:
                self.dtype = numpy.dtype(lima_cfg["dtype"])
            else:
                self.dtype = numpy.int32
            self.dataset = self.group.require_dataset(self.dataset_name, self.shape, dtype=self.dtype, chunks=self.chunk,
                                                      maxshape=(None,) + self.chunk[1:])
            self.dataset.attrs["interpretation"] = "image"
            self.dataset.attrs["metadata"] = self.metadata_grp
            self.dataset.attrs["signal"] = "1"
            self.group.parent["title"] = numpy.string_("Raw frames")
            self.group.parent["program"] = numpy.string_("LImA HDF5 plugin")
            self.group.parent["start_time"] = numpy.string_(self.getIsoTime())
开发者ID:claustre,项目名称:UPBL09a,代码行数:58,代码来源:HDF5Sink.py

示例15: test_export_averages_hdf5

    def test_export_averages_hdf5(self):
        time_avg_path = os.path.join(self.filepath, 'time_avg.h5')
        self.ds.export_averages(time_avg_path, fmt='HDF5', scale_values=False)

        h5_time_avg = h5py.File(time_avg_path, 'r')['time_average']
        assert_equal(self.ds.time_averages.astype('uint16'), h5_time_avg)
        assert_equal(np.string_(self.ds.channel_names),
                     np.string_(h5_time_avg.attrs['channel_names']))
        dim_labels = [dim.label for dim in h5_time_avg.dims]
        assert_equal(['z', 'y', 'x', 'c'], dim_labels)
开发者ID:MaximilianHoffmann,项目名称:sima,代码行数:10,代码来源:test_imaging.py


注:本文中的numpy.string_函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。