当前位置: 首页>>代码示例>>Python>>正文


Python file_sys.FileSystem类代码示例

本文整理汇总了Python中pyon.util.file_sys.FileSystem的典型用法代码示例。如果您正苦于以下问题:Python FileSystem类的具体用法?Python FileSystem怎么用?Python FileSystem使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了FileSystem类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _force_clean

    def _force_clean(cls, recreate=False, initial=False):
        # Database resources
        from pyon.core.bootstrap import get_sys_name, CFG
        from pyon.datastore.datastore_common import DatastoreFactory
        datastore = DatastoreFactory.get_datastore(config=CFG, variant=DatastoreFactory.DS_BASE, scope=get_sys_name())
        if initial:
            datastore._init_database(datastore.database)

        dbs = datastore.list_datastores()
        clean_prefix = '%s_' % get_sys_name().lower()
        things_to_clean = [x for x in dbs if x.startswith(clean_prefix)]
        try:
            for thing in things_to_clean:
                datastore.delete_datastore(datastore_name=thing)
                if recreate:
                    datastore.create_datastore(datastore_name=thing)

        finally:
            datastore.close()

        # Broker resources
        from putil.rabbitmq.rabbit_util import RabbitManagementUtil
        rabbit_util = RabbitManagementUtil(CFG, sysname=bootstrap.get_sys_name())
        deleted_exchanges, deleted_queues = rabbit_util.clean_by_sysname()
        log.info("Deleted %s exchanges, %s queues" % (len(deleted_exchanges), len(deleted_queues)))

        # File system
        from pyon.util.file_sys import FileSystem
        FileSystem._clean(CFG)
开发者ID:edwardhunter,项目名称:scioncc,代码行数:29,代码来源:int_test.py

示例2: tearDown

    def tearDown(self):
        """
        Cleanup. Delete Subscription, Stream, Process Definition
        """

        for fname in self.fnames:
            FileSystem.unlink(fname)
开发者ID:oldpatricka,项目名称:pyon,代码行数:7,代码来源:test_hdf_array_iterator.py

示例3: create_known

        def create_known(dataset_name, rootgrp_name, grp_name):
            """
            A known array to compare against during tests
            """

            known_array = numpy.ones((10,20))

            filename = FileSystem.get_url(FS.TEMP,random_name(), ".hdf5")

            # Write an hdf file with known values to compare against
            h5pyfile = h5py.File(filename, mode = 'w', driver='core')
            grp = h5pyfile.create_group(rootgrp_name)
            subgrp = grp.create_group(grp_name)
            dataset = subgrp.create_dataset(dataset_name, known_array.shape, known_array.dtype.str, maxshape=(None,None))
            dataset.write_direct(known_array)
            h5pyfile.close()

            # convert the hdf file into a binary string
            f = open(filename, mode='rb')
            # read the binary string representation of the file
            known_hdf_as_string = f.read() # this is a known string to compare against during tests
            f.close()
            # cleaning up
            FileSystem.unlink(f.name)

            return known_array, known_hdf_as_string
开发者ID:dstuebe,项目名称:pyon,代码行数:26,代码来源:test_science_object_codec.py

示例4: on_start

    def on_start(self):
        #these values should come in from a config file, maybe pyon.yml
        self.pydap_host = self.CFG.get_safe('server.pydap.host', 'localhost')
        self.pydap_port = self.CFG.get_safe('server.pydap.port', '8001')
        self.pydap_url  = 'http://%s:%s/' % (self.pydap_host, self.pydap_port)
        self.pydap_data_path = self.CFG.get_safe('server.pydap.data_path', 'RESOURCE:ext/pydap')
        self.datasets_xml_path = self.CFG.get_safe('server.pydap.datasets_xml_path', "RESOURCE:ext/datasets.xml")
        self.pydap_data_path = FileSystem.get_extended_url(self.pydap_data_path) + '/'

        filename = self.datasets_xml_path.split('/')[-1]
        base = '/'.join(self.datasets_xml_path.split('/')[:-1])
        real_path = FileSystem.get_extended_url(base)
        self.datasets_xml_path = os.path.join(real_path, filename)
        self.setup_filesystem(real_path)
开发者ID:mbarry02,项目名称:coi-services,代码行数:14,代码来源:registration_process.py

示例5: _force_clean

    def _force_clean(cls, recreate=False):
        from pyon.core.bootstrap import get_sys_name, CFG
        from pyon.datastore.couchdb.couchdb_standalone import CouchDataStore
        datastore = CouchDataStore(config=CFG)
        dbs = datastore.list_datastores()
        things_to_clean = filter(lambda x: x.startswith('%s_' % get_sys_name().lower()), dbs)
        try:
            for thing in things_to_clean:
                datastore.delete_datastore(datastore_name=thing)
                if recreate:
                    datastore.create_datastore(datastore_name=thing)

        finally:
            datastore.close()
        FileSystem._clean(CFG)
开发者ID:ateranishi,项目名称:pyon,代码行数:15,代码来源:int_test.py

示例6: _get_time_index

    def _get_time_index(self, granule, timeval):
        '''
        @brief Obtains the index where a time's value is
        @param granule must be a complete dataset (hdf_string provided)
        @param timeval the vector value
        @return Index value for timeval or closest approx such that timeval is IN the subset
        '''
        assert isinstance(granule, StreamGranuleContainer), 'object is not a granule.'
        assert granule.identifiables[self.data_stream_id].values, 'hdf_string is not provided.'

        hdf_string = granule.identifiables[self.data_stream_id].values
        file_path = self._get_hdf_from_string(hdf_string)

        #-------------------------------------------------------------------------------------
        # Determine the field_id for the temporal coordinate vector (aka time)
        #-------------------------------------------------------------------------------------

        time_field = self.definition.identifiables[self.time_id].coordinate_ids[0]
        value_path = granule.identifiables[time_field].values_path or self.definition.identifiables[time_field].values_path
        record_count = granule.identifiables[self.element_count_id].value

        #-------------------------------------------------------------------------------------
        # Go through the time vector and get the indexes that correspond to the timeval
        # It will find a value such that
        # t_n <= i < t_(n+1), where i is the index
        #-------------------------------------------------------------------------------------


        var_name = value_path.split('/').pop()
        res = acquire_data([file_path], [var_name], record_count).next()
        time_vector = res[var_name]['values']
        retval = 0
        for i in xrange(len(time_vector)):
            if time_vector[i] == timeval:
                retval = i
                break
            elif i==0 and time_vector[i] > timeval:
                retval = i
                break
            elif (i+1) < len(time_vector): # not last val
                if time_vector[i] < timeval and time_vector[i+1] > timeval:
                    retval = i
                    break
            else: # last val
                retval = i
                break
        FileSystem.unlink(file_path)
        return retval
开发者ID:dstuebe,项目名称:coi-services,代码行数:48,代码来源:replay_process.py

示例7: make_some_data

    def make_some_data(self):
        import numpy as np

        stream_id = 'I am very special'
        definition = SBE37_CDM_stream_definition()
        definition.stream_resource_id = stream_id

        self.couch.create(definition)

        total = 200
        n = 10 # at most n records per granule
        i = 0

        while i < total:
            r = random.randint(1,n)

            psc = PointSupplementConstructor(point_definition=definition, stream_id=stream_id)
            for x in xrange(r):
                i+=1
                point_id = psc.add_point(time=i, location=(0,0,0))
                psc.add_scalar_point_coverage(point_id=point_id, coverage_id='temperature', value=np.random.normal(loc=48.0,scale=4.0, size=1)[0])
                psc.add_scalar_point_coverage(point_id=point_id, coverage_id='pressure', value=np.float32(1.0))
                psc.add_scalar_point_coverage(point_id=point_id, coverage_id='conductivity', value=np.float32(2.0))
            granule = psc.close_stream_granule()
            hdf_string = granule.identifiables[definition.data_stream_id].values
            sha1 = hashlib.sha1(hdf_string).hexdigest().upper()
            with open(FileSystem.get_hierarchical_url(FS.CACHE, '%s.hdf5' % sha1),'w') as f:
                f.write(hdf_string)
            granule.identifiables[definition.data_stream_id].values = ''
            self.couch.create(granule)
开发者ID:dstuebe,项目名称:coi-services,代码行数:30,代码来源:data_retriever_test.py

示例8: get_datasets_xml_path

    def get_datasets_xml_path(cls, cfg):
        datasets_xml_path = cfg.get_safe('server.pydap.datasets_xml_path', 'RESOURCE:ext/datasets.xml')
        base, filename = os.path.split(datasets_xml_path)
        base = FileSystem.get_extended_url(base)
        path = os.path.join(base, filename)

        return path
开发者ID:n1ywb,项目名称:coi-services,代码行数:7,代码来源:registration_process.py

示例9: _create_coverage

 def _create_coverage(self, dataset_id, description, parameter_dict, spatial_domain,temporal_domain):
     pdict = ParameterDictionary.load(parameter_dict)
     sdom = GridDomain.load(spatial_domain)
     tdom = GridDomain.load(temporal_domain)
     file_root = FileSystem.get_url(FS.CACHE,'datasets')
     scov = SimplexCoverage(file_root,dataset_id,description or dataset_id,parameter_dictionary=pdict, temporal_domain=tdom, spatial_domain=sdom, inline_data_writes=self.inline_data_writes)
     return scov
开发者ID:blazetopher,项目名称:coi-services,代码行数:7,代码来源:dataset_management_service.py

示例10: _splice_coverage

 def _splice_coverage(cls, dataset_id, scov):
     file_root = FileSystem.get_url(FS.CACHE,'datasets')
     vcov = cls._get_coverage(dataset_id,mode='a')
     scov_pth = scov.persistence_dir
     if isinstance(vcov.reference_coverage, SimplexCoverage):
         ccov = ComplexCoverage(file_root, uuid4().hex, 'Complex coverage for %s' % dataset_id, 
                 reference_coverage_locs=[vcov.head_coverage_path,],
                 parameter_dictionary=ParameterDictionary(),
                 complex_type=ComplexCoverageType.TEMPORAL_AGGREGATION)
         log.info('Creating Complex Coverage: %s', ccov.persistence_dir)
         ccov.append_reference_coverage(scov_pth)
         ccov_pth = ccov.persistence_dir
         ccov.close()
         vcov.replace_reference_coverage(ccov_pth)
     elif isinstance(vcov.reference_coverage, ComplexCoverage):
         log.info('Appending simplex coverage to complex coverage')
         #vcov.reference_coverage.append_reference_coverage(scov_pth)
         dir_path = vcov.reference_coverage.persistence_dir
         vcov.close()
         ccov = AbstractCoverage.load(dir_path, mode='a')
         ccov.append_reference_coverage(scov_pth)
         ccov.refresh()
         ccov.close()
     vcov.refresh()
     vcov.close()
开发者ID:edwardhunter,项目名称:coi-services,代码行数:25,代码来源:dataset_management_service.py

示例11: __init__

    def __init__(self, name = None):
        """
        @param name The name of the dataset
        """
        # generate a random name for the filename if it has not been provided.
        self.filename = FileSystem.get_url(fs=FS.TEMP, filename=name or random_name(), ext='encoder.hdf5')

        # Using inline imports to put off making hdf/numpy required dependencies
        import h5py

        # open an hdf file on disk - in /tmp to write data to since we can't yet do in memory
        try:
            log.debug("Creating h5py file object for the encoder at %s" % self.filename)
            if os.path.isfile(self.filename):
                # if file exists, then append to it
                self.h5pyfile = h5py.File(self.filename, mode = 'r+', driver='core')
            else:
                # if file does not already exist, write a new one
                self.h5pyfile = h5py.File(self.filename, mode = 'w', driver='core')
            assert self.h5pyfile, 'No h5py file object created.'
        except IOError:
            log.debug("Error opening file for the HDFEncoder! ")
            raise HDFEncoderException("Error while trying to open file. ")
        except AssertionError as err:
            log.debug(err.message)
            raise HDFEncoderException(err.message)
开发者ID:dstuebe,项目名称:pyon,代码行数:26,代码来源:hdf_codec.py

示例12: _get_cov

 def _get_cov(self, name, nt):
     path = CFG.get_safe('server.pydap.data_path', "RESOURCE:ext/pydap")
     ext_path = FileSystem.get_extended_url(path)
     cov,filename = _make_coverage(ext_path, "the_cov")
     cov.insert_timesteps(nt) 
     cov.set_parameter_values("time", value=nt)
     return cov, filename
开发者ID:blazetopher,项目名称:coi-services,代码行数:7,代码来源:test_coverage_handler.py

示例13: _slice

    def _slice(self,granule,slice_):
        '''
        @brief Creates a granule which is a slice of the granule parameter
        @param granule the superset
        @param slice_ The slice values for which to create the granule
        @return Crafted subset granule of the parameter granule.
        '''
        retval = copy.deepcopy(granule)
        fields = self._list_data(self.definition,granule)
        record_count = slice_.stop - slice_.start
        assert record_count > 0, 'slice is malformed'
        pairs = self._pair_up(granule)
        var_names = list([i[0] for i in pairs]) # Get the var_names from the pairs
        log.debug('var_names: %s',var_names)
        file_path = self._get_hdf_from_string(granule.identifiables[self.data_stream_id].values)
        codec = HDFEncoder()
        vectors = acquire_data([file_path],var_names,record_count,slice_ ).next()

        for row, value in vectors.iteritems():
            vp = self._find_vp(pairs, row)
            # Determine the range_id reverse dictionary lookup
            #@todo: improve this pattern
            for field,path in fields.iteritems():
                if vp==path:
                    range_id = field
                    break
            bounds_id = retval.identifiables[range_id].bounds_id
            # Recalculate the bounds for this fields and update the granule
            range = value['range']
            retval.identifiables[bounds_id].value_pair[0] = float(range[0])
            retval.identifiables[bounds_id].value_pair[1] = float(range[1])
            codec.add_hdf_dataset(vp, value['values'])
            record_count = len(value['values'])
            #----- DEBUGGING ---------
            log.debug('slice- row: %s', row)
            log.debug('slice- value_path: %s', vp)
            log.debug('slice- range_id: %s', range_id)
            log.debug('slice- bounds_id: %s', bounds_id)
            log.debug('slice- limits: %s', value['range'])
            #-------------------------


        retval.identifiables[self.element_count_id].value = record_count
        hdf_string = codec.encoder_close()
        self._patch_granule(retval, hdf_string)
        FileSystem.unlink(file_path)
        return retval
开发者ID:dstuebe,项目名称:coi-services,代码行数:47,代码来源:replay_process.py

示例14: _create_coverage

 def _create_coverage(self, dataset_id, parameter_dict_id, time_dom, spatial_dom):
     pd = self.dataset_management_client.read_parameter_dictionary(parameter_dict_id)
     pdict = ParameterDictionary.load(pd)
     sdom = GridDomain.load(spatial_dom.dump())
     tdom = GridDomain.load(time_dom.dump())
     file_root = FileSystem.get_url(FS.CACHE,'datasets')
     scov = SimplexCoverage(file_root, dataset_id, dataset_id, parameter_dictionary=pdict, temporal_domain=tdom, spatial_domain=sdom)
     return scov
开发者ID:wbollenbacher,项目名称:coi-services,代码行数:8,代码来源:test_stream_ingestion_worker.py

示例15: _force_clean

    def _force_clean(cls, recreate=False):
        from pyon.core.bootstrap import get_sys_name, CFG
        from pyon.datastore.datastore_common import DatastoreFactory
        datastore = DatastoreFactory.get_datastore(config=CFG, variant=DatastoreFactory.DS_BASE, scope=get_sys_name())
        #datastore = DatastoreFactory.get_datastore(config=CFG, variant=DatastoreFactory.DS_BASE)

        dbs = datastore.list_datastores()
        things_to_clean = filter(lambda x: x.startswith('%s_' % get_sys_name().lower()), dbs)
        try:
            for thing in things_to_clean:
                datastore.delete_datastore(datastore_name=thing)
                if recreate:
                    datastore.create_datastore(datastore_name=thing)

        finally:
            datastore.close()
        FileSystem._clean(CFG)
开发者ID:j2project,项目名称:pyon,代码行数:17,代码来源:int_test.py


注:本文中的pyon.util.file_sys.FileSystem类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。