当前位置: 首页>>代码示例>>Python>>正文


Python FileSystem.get_hierarchical_url方法代码示例

本文整理汇总了Python中pyon.util.file_sys.FileSystem.get_hierarchical_url方法的典型用法代码示例。如果您正苦于以下问题:Python FileSystem.get_hierarchical_url方法的具体用法?Python FileSystem.get_hierarchical_url怎么用?Python FileSystem.get_hierarchical_url使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pyon.util.file_sys.FileSystem的用法示例。


在下文中一共展示了FileSystem.get_hierarchical_url方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: make_some_data

# 需要导入模块: from pyon.util.file_sys import FileSystem [as 别名]
# 或者: from pyon.util.file_sys.FileSystem import get_hierarchical_url [as 别名]
    def make_some_data(self):
        import numpy as np

        stream_id = 'I am very special'
        definition = SBE37_CDM_stream_definition()
        definition.stream_resource_id = stream_id

        self.couch.create(definition)

        total = 200
        n = 10 # at most n records per granule
        i = 0

        while i < total:
            r = random.randint(1,n)

            psc = PointSupplementConstructor(point_definition=definition, stream_id=stream_id)
            for x in xrange(r):
                i+=1
                point_id = psc.add_point(time=i, location=(0,0,0))
                psc.add_scalar_point_coverage(point_id=point_id, coverage_id='temperature', value=np.random.normal(loc=48.0,scale=4.0, size=1)[0])
                psc.add_scalar_point_coverage(point_id=point_id, coverage_id='pressure', value=np.float32(1.0))
                psc.add_scalar_point_coverage(point_id=point_id, coverage_id='conductivity', value=np.float32(2.0))
            granule = psc.close_stream_granule()
            hdf_string = granule.identifiables[definition.data_stream_id].values
            sha1 = hashlib.sha1(hdf_string).hexdigest().upper()
            with open(FileSystem.get_hierarchical_url(FS.CACHE, '%s.hdf5' % sha1),'w') as f:
                f.write(hdf_string)
            granule.identifiables[definition.data_stream_id].values = ''
            self.couch.create(granule)
开发者ID:dstuebe,项目名称:coi-services,代码行数:32,代码来源:data_retriever_test.py

示例2: read_persisted_cache

# 需要导入模块: from pyon.util.file_sys import FileSystem [as 别名]
# 或者: from pyon.util.file_sys.FileSystem import get_hierarchical_url [as 别名]
 def read_persisted_cache(self, sha1, encoding):
     byte_string = None
     path = FileSystem.get_hierarchical_url(FS.CACHE,sha1,'.%s' % encoding)
     try:
         with open(path, 'r') as f:
             byte_string = f.read()
     except IOError as e:
         raise BadRequest(e.message)
     return byte_string
开发者ID:ooici-eoi,项目名称:coi-services,代码行数:11,代码来源:replay_process_a.py

示例3: process_stream

# 需要导入模块: from pyon.util.file_sys import FileSystem [as 别名]
# 或者: from pyon.util.file_sys.FileSystem import get_hierarchical_url [as 别名]
    def process_stream(self, packet, dset_config):
        """
        Accepts a stream. Also accepts instruction (a dset_config). According to the received dset_config it processes the
        stream such as store in hfd_storage, couch_storage.
        @param: packet The incoming data stream of type stream.
        @param: dset_config The dset_config telling this method what to do with the incoming data stream.
        """


        ingestion_attributes={'variables':[], 'number_of_records':-1,'updated_metadata':False, 'updated_data':False}

        if dset_config is None:
            log.info('No dataset config for this stream!')
            return


        # Get back to the serialized form - the process receives only the IonObject after the interceptor stack has decoded it...
        simple_dict = ion_serializer.serialize(packet) #packet is an ion_object
        byte_string = msgpack.packb(simple_dict, default=encode_ion)

        encoding_type = 'ion_msgpack'

        # Persisted sha1 is crafted from the byte string msgpack creates
        calculated_sha1 = hashlib.sha1(byte_string).hexdigest().upper()

        dataset_granule = {
            'stream_id'      : dset_config.stream_id,
            'dataset_id'     : dset_config.dataset_id,
            'persisted_sha1' : calculated_sha1,
            'encoding_type'  : encoding_type,
            'ts_create'      : get_ion_ts()
        }


        self.persist_immutable(dataset_granule)





        filename = FileSystem.get_hierarchical_url(FS.CACHE, calculated_sha1, ".%s" % encoding_type)

        with open(filename, mode='wb') as f:
            f.write(byte_string)
            f.close()


        return ingestion_attributes
开发者ID:ooici-eoi,项目名称:coi-services,代码行数:50,代码来源:ingestion_worker_a.py

示例4: _parse_granule

# 需要导入模块: from pyon.util.file_sys import FileSystem [as 别名]
# 或者: from pyon.util.file_sys.FileSystem import get_hierarchical_url [as 别名]
    def _parse_granule(self, granule):
        '''
        @brief Ensures the granule is valid and gets some metadata from the granule for building the dataset
        @param granule raw granule straight from couch
        @return metadata in the granule as well as the granule itself if valid.
        '''

        granule.stream_resource_id = self.stream_id

        element_count_id = self.element_count_id
        encoding_id = self.encoding_id

        record_count = granule.identifiables[element_count_id].value
        sha1 = granule.identifiables[encoding_id].sha1 or None

        # If there are no records then this is not a proper granule
        if not (record_count > 0):
            log.debug('Granule had no record count discarding.')
            return None

        # No encoding, no packet
        if not encoding_id in granule.identifiables:
            log.debug('Granule had no encoding discarding.')
            return None

        if not sha1:
            log.debug('Granule had no sha1')
            return None


        filepath = FileSystem.get_hierarchical_url(FS.CACHE, sha1, '.hdf5')

        if not os.path.exists(filepath):
            log.debug('File with sha1 does not exist')
            return None

        return {
            'granule':granule,
            'records':record_count,
            'sha1':sha1
        }
开发者ID:dstuebe,项目名称:coi-services,代码行数:43,代码来源:replay_process.py

示例5: persist_file

# 需要导入模块: from pyon.util.file_sys import FileSystem [as 别名]
# 或者: from pyon.util.file_sys.FileSystem import get_hierarchical_url [as 别名]
    def persist_file(self, file_data='', digest='', metadata=None):
        ds = self.container.datastore_manager.get_datastore(self.datastore_name, DS.DS_PROFILE.FILESYSTEM)
        validate_is_instance(file_data,basestring, "File or binary data must be a string.")
        validate_is_instance(metadata,File)

        if self.list_files(metadata.name + metadata.extension):
            raise BadRequest('%s already exists.' % metadata.name + metadata.extension)

        digest_ = sha224(file_data).hexdigest()
        if digest:
            validate_equal(digest,digest_,"The provided digest does not match the file's digest. Ensure you are using sha224.")
        else:
            digest = digest_

        extension = metadata.extension
        if '.' in metadata.name:
            t = metadata.name.split('.')
            metadata.name, metadata.extension = ('.'.join(t[:-1]), '.' + t[-1])
        url = FileSystem.get_hierarchical_url(FS.CACHE, digest, extension)
        try:
            with open(url,'w+b') as f:
                f.write(file_data)
                f.close()
        except Exception:
            log.exception('Failed to write %s', url)
            raise BadRequest('Could not successfully write file data')
        if metadata.name[0] != '/':
            metadata.name = '/' + metadata.name
        metadata.url = url
        metadata.digest = digest
        metadata.created_date = IonTime().to_string()
        metadata.modified_date = IonTime().to_string()
        metadata.size = len(file_data)

        doc_id, rev_id = ds.create(metadata)
        return doc_id
开发者ID:kerfoot,项目名称:coi-services,代码行数:38,代码来源:preservation_management_service.py

示例6: _get_coverage

# 需要导入模块: from pyon.util.file_sys import FileSystem [as 别名]
# 或者: from pyon.util.file_sys.FileSystem import get_hierarchical_url [as 别名]
 def _get_coverage(cls,dataset_id):
     filename = FileSystem.get_hierarchical_url(FS.CACHE, dataset_id, '.cov')
     coverage = SimplexCoverage.load(filename)
     return coverage
开发者ID:pombredanne,项目名称:coi-services,代码行数:6,代码来源:dataset_management_service.py

示例7: _persist_coverage

# 需要导入模块: from pyon.util.file_sys import FileSystem [as 别名]
# 或者: from pyon.util.file_sys.FileSystem import get_hierarchical_url [as 别名]
 def _persist_coverage(cls, dataset_id, coverage):
     validate_is_instance(coverage,SimplexCoverage,'Coverage is not an instance of SimplexCoverage: %s' % type(coverage))
     filename = FileSystem.get_hierarchical_url(FS.CACHE, dataset_id, '.cov')
     SimplexCoverage.save(coverage, filename, use_ascii=False)
开发者ID:pombredanne,项目名称:coi-services,代码行数:6,代码来源:dataset_management_service.py

示例8: process_stream

# 需要导入模块: from pyon.util.file_sys import FileSystem [as 别名]
# 或者: from pyon.util.file_sys.FileSystem import get_hierarchical_url [as 别名]
    def process_stream(self, packet, dset_config):
        """
        Accepts a stream. Also accepts instruction (a dset_config). According to the received dset_config it processes the
        stream such as store in hfd_storage, couch_storage.
        @param: packet The incoming data stream of type stream.
        @param: dset_config The dset_config telling this method what to do with the incoming data stream.
        """


        ingestion_attributes={'variables':[], 'number_of_records':-1,'updated_metadata':False, 'updated_data':False}

        if dset_config is None:
            log.info('No dataset config for this stream!')
            return

        values_string = ''
        sha1 = ''
        encoding_type = ''

        for key,value in packet.identifiables.iteritems():
            if isinstance(value, DataStream):
                values_string = value.values
                value.values=''

            elif isinstance(value, Encoding):
                sha1 = value.sha1
                encoding_type = value.encoding_type

            elif isinstance(value, Coverage):
                ingestion_attributes['variables'].append(key)

            elif isinstance(value, CountElement):
                ingestion_attributes['number_of_records'] = value.value

        if dset_config.archive_metadata is True:
            log.debug("Persisting data....")
            ingestion_attributes['updated_metadata'] = True
            self.persist_immutable(packet )

        if dset_config.archive_data is True:
            #@todo - grab the filepath to save the hdf string somewhere..

            ingestion_attributes['updated_data'] = True
            if values_string:

                calculated_sha1 = hashlib.sha1(values_string).hexdigest().upper()

                filename = FileSystem.get_hierarchical_url(FS.CACHE, calculated_sha1, ".%s" % encoding_type)

                if sha1 != calculated_sha1:
                    raise  IngestionWorkerException('The sha1 stored is different than the calculated from the received hdf_string')

                #log.warn('writing to filename: %s' % filename)

                with open(filename, mode='wb') as f:
                    f.write(values_string)
                    f.close()
            else:
                log.warn("Nothing to write!")


        return ingestion_attributes
开发者ID:seman,项目名称:coi-services,代码行数:64,代码来源:ingestion_worker.py

示例9: _merge

# 需要导入模块: from pyon.util.file_sys import FileSystem [as 别名]
# 或者: from pyon.util.file_sys.FileSystem import get_hierarchical_url [as 别名]
    def _merge(self, msgs):
        '''
        @brief Merges all the granules and datasets into one large dataset (Union)
        @param msgs raw granules from couch
        @return complete dataset
        @description
             n
        D := U [ msgs_i ]
            i=0
        '''
        granule = None
        file_list = list()
        count = len(msgs)
        used_vals = list()

        #-------------------------------------------------------------------------------------
        # Merge each granule to another granule one by one.
        # After each merge operation keep track of what files belong where on the timeline
        #-------------------------------------------------------------------------------------


        for i in xrange(count):
            if i==0:
                granule = msgs[0]['granule']
                psc = PointSupplementConstructor(point_definition=self.definition)

                res = ReplayProcess.merge_granule(definition=self.definition, granule1=granule, granule2=None)
                granule = res['granule']
                file_pair = res['files']
                log.debug('file_pair: %s', file_pair)

                if file_pair[0] not in file_list and file_pair[0][0] not in used_vals:
                    file_list.append( tuple(file_pair[0]))
                    used_vals.append(file_pair[0][0])


            else:
                res = ReplayProcess.merge_granule(definition=self.definition, granule1=granule, granule2=msgs[i]['granule'])

                granule = res['granule']
                file_pair = res['files']
                log.debug('file_pair: %s', file_pair)

                if file_pair[0] not in file_list and file_pair[0][0] not in used_vals:
                    file_list.append( tuple(file_pair[0]))
                    used_vals.append(file_pair[0][0])
                if file_pair[1] not in file_list and file_pair[1][0] not in used_vals:
                    file_list.append(tuple(file_pair[1]))
                    used_vals.append(file_pair[1][0])

        if not granule:
            return
        log.debug('file_list: %s', file_list)
        #-------------------------------------------------------------------------------------
        # Order the lists using a stable sort from python (by the first value in the tuples
        # Then peel off just the file names
        # Then get the appropriate URL for the file using FileSystem
        #-------------------------------------------------------------------------------------
        file_list.sort()
        file_list = list(i[1] for i in file_list)
        file_list = list([FileSystem.get_hierarchical_url(FS.CACHE, '%s' % i) for i in file_list])

        pairs = self._pair_up(granule)
        var_names = list([i[0] for i in pairs])

        record_count = granule.identifiables[self.element_count_id].value
        codec = HDFEncoder()
        log.debug('acquire_data:')
        log.debug('\tfile_list: %s', file_list)
        log.debug('\tfields: %s', var_names)
        log.debug('\trecords: %s', record_count)

        data = acquire_data(file_list, var_names, record_count).next()

        for row,value in data.iteritems():
            value_path = self._find_vp(pairs,row)
            codec.add_hdf_dataset(value_path,nparray=value['values'])
            #-------------------------------------------------------------------------------------
            # Debugging
            #-------------------------------------------------------------------------------------
            log.debug('row: %s', row)
            log.debug('value path: %s', value_path)
            log.debug('value: %s', value['values'])

        hdf_string = codec.encoder_close()
        self._patch_granule(granule,hdf_string)
        return granule
开发者ID:dstuebe,项目名称:coi-services,代码行数:89,代码来源:replay_process.py


注:本文中的pyon.util.file_sys.FileSystem.get_hierarchical_url方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。