当前位置: 首页>>代码示例>>Python>>正文


Python SD.attributes方法代码示例

本文整理汇总了Python中pyhdf.SD.SD.attributes方法的典型用法代码示例。如果您正苦于以下问题:Python SD.attributes方法的具体用法?Python SD.attributes怎么用?Python SD.attributes使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pyhdf.SD.SD的用法示例。


在下文中一共展示了SD.attributes方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: read

# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import attributes [as 别名]
    def read(self, filename, **kwargs):
        """Read the data"""
        from pyhdf.SD import SD
        import datetime

        #print "*** >>> Read the hdf-eos file!"
        root = SD(filename)
    
        # Get all the Attributes:
        # Common Attributes, Data Time,
        # Data Structure and Scene Coordinates
        for key in root.attributes().keys():
            self._eoshdf_info[key] = root.attributes()[key]

        # Start Time - datetime object
        starttime = datetime.datetime.strptime(self._eoshdf_info['Start Time'][0:13], 
                                               "%Y%j%H%M%S")
        msec = float(self._eoshdf_info['Start Time'][13:16])/1000.
        self.starttime = starttime + datetime.timedelta(seconds=msec)
    
        # End Time - datetime object
        endtime = datetime.datetime.strptime(self._eoshdf_info['End Time'][0:13], 
                                             "%Y%j%H%M%S")
        msec = float(self._eoshdf_info['End Time'][13:16])/1000.
        self.endtime = endtime + datetime.timedelta(seconds=msec)

        # What is the leading 'H' doing here?
        sensor_name = self._eoshdf_info['Sensor Name'][1:-1].lower()
        try:
            self.satid = EOS_SATELLITE[sensor_name]
        except KeyError:
            LOG.error("Failed setting the satellite id - sat-name = ", 
                      sensor_name)
            
        self.orbit = self._eoshdf_info['Orbit Number']
        self.shape = (self._eoshdf_info['Number of Scan Control Points'],
                      self._eoshdf_info['Number of Pixel Control Points'])

        #try:
        if 1:
            value = root.select(self.name)
            attr = value.attributes()
            data = value.get()

            self.attr = attr
            band = data
            if self.name in FLAGS_QUALITY:
                self.data = band
            else:
                nodata = attr['bad_value_scaled']
                self.data = (np.ma.masked_equal(band, nodata) * 
                             attr['slope'] + attr['intercept'])
            
            value.endaccess()
        #except:
        #    pass

        root.end()
        self.filled= True
开发者ID:Pelgrum,项目名称:mpop,代码行数:61,代码来源:modis_level2.py

示例2: __init__

# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import attributes [as 别名]
 def __init__(self, filename, filename_info, filetype_info):
     super(HDF4FileHandler, self).__init__(filename, filename_info, filetype_info)
     self.file_content = {}
     file_handle = SD(self.filename, SDC.READ)
     self._collect_attrs('', file_handle.attributes())
     for k, v in file_handle.datasets().items():
         self.collect_metadata(k, file_handle.select(k))
     del file_handle
开发者ID:davidh-ssec,项目名称:satpy,代码行数:10,代码来源:hdf4_utils.py

示例3: open_file

# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import attributes [as 别名]
def open_file(filename):
    A = SD(filename)

    # retrieve data SDS
    d = A.datasets()
    sds_name = d.keys()[0]  # name of sds. Dictionary method.
    sds = A.select(sds_name)
    pin = A.attributes()

    return sds, pin
开发者ID:DataSounds,项目名称:OceanSound,代码行数:12,代码来源:extract_pyhdf.py

示例4: load_thin_modis

# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import attributes [as 别名]
def load_thin_modis(satscene, options):
    """Read modis data from file and load it into *satscene*.
    """
    filename = satscene.time_slot.strftime("thin_MYD021KM.A%Y%j.%H%M.005.NRT.hdf")
    filename = os.path.join(options["dir"], filename)
    
    data = SD(filename)

    datasets = ['EV_250_Aggr1km_RefSB',
                'EV_500_Aggr1km_RefSB',
                'EV_1KM_RefSB',
                'EV_1KM_Emissive']

    for dataset in datasets:
        subdata = data.select(dataset)
        band_names = subdata.attributes()["band_names"].split(",")
        if len(satscene.channels_to_load & set(band_names)) > 0:
            uncertainty = data.select(dataset+"_Uncert_Indexes")
            if dataset == 'EV_1KM_Emissive':
                array = calibrate_tb(subdata, uncertainty)
            else:
                array = calibrate_refl(subdata, uncertainty)
            for (i, band) in enumerate(band_names):
                if band in satscene.channels_to_load:
                    satscene[band] = array[i]

    mda = data.attributes()["CoreMetadata.0"]
    orbit_idx = mda.index("ORBITNUMBER")
    satscene.orbit = mda[orbit_idx + 111:orbit_idx + 116]

    lat, lon = get_lat_lon(satscene, None)
    from pyresample import geometry
    satscene.area = geometry.SwathDefinition(lons=lon, lats=lat)

    # trimming out dead sensor lines
    if satscene.satname == "aqua":
        for band in ["6", "27"]:
            if not satscene[band].is_loaded() or satscene[band].data.mask.all():
                continue
            width = satscene[band].data.shape[1]
            height = satscene[band].data.shape[0]
            indices = satscene[band].data.mask.sum(1) < width
            if indices.sum() == height:
                continue
            satscene[band] = satscene[band].data[indices, :]
            satscene[band].area = geometry.SwathDefinition(
                lons=satscene.area.lons[indices,:],
                lats=satscene.area.lats[indices,:])
            satscene[band].area.area_id = ("swath_" + satscene.fullname + "_"
                                           + str(satscene.time_slot) + "_"
                                           + str(satscene[band].shape) + "_"
                                           + str(band))
开发者ID:3Geo,项目名称:mpop,代码行数:54,代码来源:thin_modis.py

示例5: read_hdf4_info

# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import attributes [as 别名]
def read_hdf4_info(input_file_path) :
    """
    get information about variable names and attributes (both global and variable specific) from the
    given file. The file is assumed to exist and be a valid hdf4 file

    returns something in the form:

        {
            GLOBAL_ATTRS_KEY    : a dictionary of attribute values keyed by the attribute names
            VAR_LIST_KEY        : [list of variable names]
            VAR_INFO_KEY        :   {
                                        <var_name> :    {
                                                            SHAPE_KEY: (shape of variable data)
                                                            VAR_ATTRS_KEY: a dictionary of attribute values keyed by the attribute names
                                                        }
                                    }

        }

        TODO, depending on what changes need to be made for CF compliance this data structure may need to change a lot
    """

    file_info = { }

    # open the file
    file_object = SD(input_file_path, SDC.READ)

    # get information on the global attributes in the file
    global_attrs = file_object.attributes()
    file_info[GLOBAL_ATTRS_KEY] = global_attrs

    # get information on the variables in the file
    variable_list = file_object.datasets().keys()
    file_info[VAR_LIST_KEY] = variable_list

    # for each variable in a file, get more specific information about it
    file_info[VAR_INFO_KEY] = { }
    sets_temp = file_object.datasets()
        # this should return a dictionary with entries for each variable in the form
        #       <variable name>: ((dimension names), (data shape), type, index num)
    for var_name in variable_list :
        var_object = file_object.select(var_name)
        var_attrs  = var_object.attributes()
        file_info[VAR_INFO_KEY][var_name] = {
                                                SHAPE_KEY: sets_temp[var_name][1],
                                                VAR_ATTRS_KEY: var_attrs,
                                            }

    return file_info, file_object
开发者ID:evas-ssec,项目名称:geo_converter,代码行数:51,代码来源:convert.py

示例6: read_amsr_hdf4

# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import attributes [as 别名]
def read_amsr_hdf4(filename):
    from pyhdf.SD import SD, SDC
    from pyhdf.HDF import HDF, HC
    import pyhdf.VS 

    retv = AmsrObject()
    h4file = SD(filename, SDC.READ)
    datasets = h4file.datasets()
    attributes = h4file.attributes()
    #for idx,attr in enumerate(attributes.keys()):
    #    print idx, attr
    for sds in ["Longitude", "Latitude", "High_res_cloud"]:
        data = h4file.select(sds).get()
        if sds in ["Longitude", "Latitude"]:
            retv.all_arrays[sds.lower()] = data.ravel()
        elif sds in ["High_res_cloud"]:
            lwp_gain = h4file.select(sds).attributes()['Scale']
            retv.all_arrays["lwp_mm"] = data.ravel() * lwp_gain

        #print h4file.select(sds).info()
    h4file = HDF(filename, SDC.READ)
    vs = h4file.vstart()
    data_info_list = vs.vdatainfo()
    #print "1D data compound/Vdata"
    for item in data_info_list:
        #1D data compound/Vdata
        name = item[0]
        #print name
        if name in ["Time"]:
            data_handle = vs.attach(name)
            data = np.array(data_handle[:])
            retv.all_arrays["sec1993"] = data 
            data_handle.detach()
        else:
            pass
            #print name
        #data = np.array(data_handle[:])
        #attrinfo_dic = data_handle.attrinfo()
        #factor = data_handle.findattr('factor')
        #offset = data_handle.findattr('offset')
        #print data_handle.factor
        #data_handle.detach()
    #print data_handle.attrinfo()
    h4file.close()
    #for key in retv.all_arrays.keys():
    #    print key, retv.all_arrays[key]
    return retv
开发者ID:adybbroe,项目名称:atrain_match,代码行数:49,代码来源:amsr.py

示例7: read_calipso_hdf4

# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import attributes [as 别名]
def read_calipso_hdf4(filename, retv):
    from pyhdf.SD import SD, SDC
    from pyhdf.HDF import HDF, HC
    import pyhdf.VS 
    def convert_data(data):
        if len(data.shape) == 2:
            if data.shape[1] == 1:
                return data[:, 0]
            elif data.shape[0] == 1:
                return data[0, :]
        return data
    if filename is not None:
        h4file = SD(filename, SDC.READ)
        datasets = h4file.datasets()
        attributes = h4file.attributes()
        singleshotdata = {}
        for idx, dataset in enumerate(datasets.keys()):
            #non-goups
            if dataset in scip_these_larger_variables_until_needed.keys():        
                continue
            elif dataset[0:8] == 'Surface_':
                continue
            if dataset in ["ssNumber_Layers_Found", 
                           "ssLayer_Base_Altitude", 
                           "ssLayer_Top_Altitude"]:
                singleshotdata[dataset] = h4file.select(dataset).get()
            if dataset[0:2] == "ss":
                #already saved temporarly what we need
                continue            
            name = dataset.lower()
            #print idx, dataset
            if dataset in atrain_match_names.keys():
                name = atrain_match_names[dataset]
            data = np.array(h4file.select(dataset).get())
            setattr(retv, name, data) 
        if "ssNumber_Layers_Found" in singleshotdata.keys():
            # Extract number of cloudy single shots (max 15)
            # plus average cloud base and top
            # in 5 km FOV
            logger.info("Reading single shot information")
            retv = rearrange_calipso_the_single_shot_info(
                retv,
                singleshotdata)
    return retv 
开发者ID:adybbroe,项目名称:atrain_match,代码行数:46,代码来源:calipso.py

示例8: _read_hdf

# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import attributes [as 别名]
 def _read_hdf(self, fieldname='l3m_data'):
     sd = SD(self.filename, SDC.READ)
     ds = sd.select(fieldname)
     attrdict = ds.attributes()
     for d in attrdict.keys(): attrdict[d.lower()] = attrdict.pop(d)
     field      = ds[self.j1:self.j2, self.i1:self.i2].copy()
     intercept  = ds.attributes()['intercept']
     slope      = ds.attributes()['slope']
     try:
         nanval = ds.attributes()['fill']
     except:
         nanval = ds.attributes()['bad_value_scaled']
     try:
         base   = ds.attributes()['base']
     except KeyError:
         base   = -999
     dstr = sd.attributes()['Start Time'] 
     self.jd = pl.datestr2num(dstr[:4] + "-1-1") + float(dstr[4:7]) - 1
     return field,base,intercept,slope,nanval
开发者ID:brorfred,项目名称:satmap,代码行数:21,代码来源:l2.py

示例9: write_interpolated

# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import attributes [as 别名]
def write_interpolated(filename, f0, f1, fact, datasetNames):
    '''
    interpolate two hdf files f0 and f1 using factor fact, and
    write the result to filename
    '''

    hdf = SD(filename, SDC.WRITE|SDC.CREATE)
    for datasetName in datasetNames:

        try:
            info = SD(f0).select(datasetName).info()
        except:
            print >> stderr, 'Error loading %s in %s' % (datasetName, f0)
            raise

        typ  = info[3]
        shp  = info[2]
        sds_in1 = SD(f0).select(datasetName)
        met0 = sds_in1.get()
        met1 = SD(f1).select(datasetName).get()

        interp = (1-fact)*met0 + fact*met1

        interp = interp.astype({
                SDC.INT16: 'int16',
                SDC.FLOAT32: 'float32',
                SDC.FLOAT64: 'float64',
            }[typ])

        # write
        sds = hdf.create(datasetName, typ, shp)
        sds[:] = interp[:]

        # copy attributes
        attr = sds_in1.attributes()
        if len(attr) > 0:
            for name in attr.keys():
                setattr(sds, name, attr[name])
        sds.endaccess()

    hdf.end()
开发者ID:bcdev,项目名称:oc-cci,代码行数:43,代码来源:polymer-get_meteo_calvalus.py

示例10: load_generic

# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import attributes [as 别名]
def load_generic(satscene, filename, resolution, cores):
    """Read modis data, generic part.
    """

    try:
        data = SD(str(filename))
    except HDF4Error as err:
        logger.warning("Could not load data from " + str(filename)
                       + ": " + str(err))
        return

    datadict = {
        1000: ['EV_250_Aggr1km_RefSB',
               'EV_500_Aggr1km_RefSB',
               'EV_1KM_RefSB',
               'EV_1KM_Emissive'],
        500: ['EV_250_Aggr500_RefSB',
              'EV_500_RefSB'],
        250: ['EV_250_RefSB']}

    datasets = datadict[resolution]

    loaded_bands = []

    # process by dataset, reflective and emissive datasets separately

    for dataset in datasets:
        subdata = data.select(dataset)
        band_names = subdata.attributes()["band_names"].split(",")
        if len(satscene.channels_to_load & set(band_names)) > 0:
            # get the relative indices of the desired channels
            indices = [i for i, band in enumerate(band_names)
                       if band in satscene.channels_to_load]
            uncertainty = data.select(dataset + "_Uncert_Indexes")
            if dataset.endswith('Emissive'):
                array = calibrate_tb(subdata, uncertainty, indices, band_names)
            else:
                array = calibrate_refl(subdata, uncertainty, indices)
            for (i, idx) in enumerate(indices):
                satscene[band_names[idx]] = array[i]
                # fix the resolution to match the loaded data.
                satscene[band_names[idx]].resolution = resolution
                loaded_bands.append(band_names[idx])

    # Get the orbit number
    if not satscene.orbit:
        mda = data.attributes()["CoreMetadata.0"]
        orbit_idx = mda.index("ORBITNUMBER")
        satscene.orbit = mda[orbit_idx + 111:orbit_idx + 116]

    # Get the geolocation
    # if resolution != 1000:
    #    logger.warning("Cannot load geolocation at this resolution (yet).")
    #    return

    lat, lon = get_lat_lon(satscene, resolution, filename, cores)
    area = geometry.SwathDefinition(lons=lon, lats=lat)
    for band_name in loaded_bands:
        satscene[band_name].area = area

    # Trimming out dead sensor lines (detectors) on aqua:
    # (in addition channel 21 is noisy)
    if satscene.satname == "aqua":
        for band in ["6", "27", "36"]:
            if not satscene[band].is_loaded() or satscene[band].data.mask.all():
                continue
            width = satscene[band].data.shape[1]
            height = satscene[band].data.shape[0]
            indices = satscene[band].data.mask.sum(1) < width
            if indices.sum() == height:
                continue
            satscene[band] = satscene[band].data[indices, :]
            satscene[band].area = geometry.SwathDefinition(
                lons=satscene[band].area.lons[indices, :],
                lats=satscene[band].area.lats[indices, :])
            satscene[band].area.area_id = ("swath_" + satscene.fullname + "_"
                                           + str(satscene.time_slot) + "_"
                                           + str(satscene[band].shape) + "_"
                                           + str(band))

    # Trimming out dead sensor lines (detectors) on terra:
    # (in addition channel 27, 30, 34, 35, and 36 are nosiy)
    if satscene.satname == "terra":
        for band in ["29"]:
            if not satscene[band].is_loaded() or satscene[band].data.mask.all():
                continue
            width = satscene[band].data.shape[1]
            height = satscene[band].data.shape[0]
            indices = satscene[band].data.mask.sum(1) < width
            if indices.sum() == height:
                continue
            satscene[band] = satscene[band].data[indices, :]
            satscene[band].area = geometry.SwathDefinition(
                lons=satscene[band].area.lons[indices, :],
                lats=satscene[band].area.lats[indices, :])
            satscene[band].area.area_id = ("swath_" + satscene.fullname + "_"
                                           + str(satscene.time_slot) + "_"
                                           + str(satscene[band].shape) + "_"
                                           + str(band))
开发者ID:meteoswiss-mdr,项目名称:mpop,代码行数:101,代码来源:hdfeos_l1b.py

示例11: ModisReader

# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import attributes [as 别名]

#.........这里部分代码省略.........
        datadict = {
            1000: ['EV_250_Aggr1km_RefSB',
                   'EV_500_Aggr1km_RefSB',
                   'EV_1KM_RefSB',
                   'EV_1KM_Emissive'],
            500: ['EV_250_Aggr500_RefSB',
                  'EV_500_RefSB'],
            250: ['EV_250_RefSB']}

        loaded_bands = []

        # process by dataset, reflective and emissive datasets separately

        resolutions = [250, 500, 1000]

        for res in resolutions:
            if res < resolution:
                continue
            logger.debug("Working on resolution %d", res)
            self.filename = self.datafiles[res]

            logger.debug("Using " + str(cores) + " cores for interpolation")

            try:
                self.data = SD(str(self.filename))
            except HDF4Error as err:
                logger.warning("Could not load data from " + str(self.filename)
                               + ": " + str(err))
                continue

            datasets = datadict[res]
            for dataset in datasets:
                subdata = self.data.select(dataset)
                band_names = subdata.attributes()["band_names"].split(",")
                if len(satscene.channels_to_load & set(band_names)) > 0:
                    # get the relative indices of the desired channels
                    indices = [i for i, band in enumerate(band_names)
                               if band in satscene.channels_to_load]
                    uncertainty = self.data.select(dataset + "_Uncert_Indexes")
                    if dataset.endswith('Emissive'):
                        array = calibrate_tb(
                            subdata, uncertainty, indices, band_names)
                    else:
                        array = calibrate_refl(subdata, uncertainty, indices)
                    for (i, idx) in enumerate(indices):
                        if band_names[idx] in loaded_bands:
                            continue
                        satscene[band_names[idx]] = array[i]
                        # fix the resolution to match the loaded data.
                        satscene[band_names[idx]].resolution = res
                        loaded_bands.append(band_names[idx])

        # Get the orbit number
        if not satscene.orbit:
            mda = self.data.attributes()["CoreMetadata.0"]
            orbit_idx = mda.index("ORBITNUMBER")
            satscene.orbit = mda[orbit_idx + 111:orbit_idx + 116]

        # Get the geolocation
        # if resolution != 1000:
        #    logger.warning("Cannot load geolocation at this resolution (yet).")
        #    return

        for band_name in loaded_bands:
            lon, lat = self.get_lonlat(satscene[band_name].resolution, cores)
            area = geometry.SwathDefinition(lons=lon, lats=lat)
开发者ID:meteoswiss-mdr,项目名称:mpop,代码行数:70,代码来源:hdfeos_l1b.py

示例12: load

# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import attributes [as 别名]
def load(satscene, **kwargs):
    """Read data from file and load it into *satscene*.  Load data into the
    *channels*. *Channels* is a list or a tuple containing channels we will
    load data into. If None, all channels are loaded.
    """    
    del kwargs

    conf = ConfigParser()
    conf.read(os.path.join(CONFIG_PATH, satscene.fullname + ".cfg"))
    options = {}
    for option, value in conf.items(satscene.instrument_name+"-level3",
                                    raw = True):
        options[option] = value

    pathname = os.path.join(options["dir"], options['filename'])    
    filename = satscene.time_slot.strftime(pathname)
    
    for prodname in GEO_PHYS_PRODUCTS + FLAGS_QUALITY:
        if prodname in satscene.channels_to_load:
            
            prod_chan = ModisEosHdfLevel2(prodname)
            prod_chan.read(filename)
            prod_chan.satid = satscene.satname.capitalize()
            prod_chan.resolution = 1000.0
            prod_chan.shape = prod_chan.data.shape

            # All this for the netCDF writer:
            prod_chan.info['var_name'] = prodname
            prod_chan.info['var_data'] = prod_chan.data
            resolution_str = str(int(prod_chan.resolution))+'m'
            prod_chan.info['var_dim_names'] = ('y'+resolution_str,
                                               'x'+resolution_str)
            prod_chan.info['long_name'] = prod_chan.attr['long_name'][:-1]
            try:
                prod_chan.info['standard_name'] = prod_chan.attr['standard_name'][:-1]
            except KeyError:
                pass
            valid_min = np.min(prod_chan.data)
            valid_max = np.max(prod_chan.data)
            prod_chan.info['valid_range'] = np.array([valid_min, valid_max])
            prod_chan.info['resolution'] = prod_chan.resolution

            if prodname == 'l2_flags':
                # l2 flags definitions
                for i in range(1, 33):
                    key =  "f%02d_name"%i
                    prod_chan.info[key] = prod_chan.attr[key][:-1]

            satscene.channels.append(prod_chan)
            if prodname in CHANNELS:
                satscene[prodname].info['units'] = '%'
            else:
                satscene[prodname].info['units'] = prod_chan.attr['units'][:-1]

            LOG.info("Loading modis lvl2 product '%s' done"%prodname)

    # Check if there are any bands to load:
    channels_to_load = False
    for bandname in CHANNELS:
        if bandname in satscene.channels_to_load:
            channels_to_load = True
            break

    if channels_to_load:
        #print "FILE: ", filename
        eoshdf = SD(filename)
        # Get all the Attributes:
        # Common Attributes, Data Time,
        # Data Structure and Scene Coordinates
        info = {}
        for key in eoshdf.attributes().keys():
            info[key] = eoshdf.attributes()[key]

        dsets = eoshdf.datasets()
        selected_dsets = []

        for bandname in CHANNELS:
            if (bandname in satscene.channels_to_load and
                bandname in dsets):

                value = eoshdf.select(bandname)
                selected_dsets.append(value)
        
                # Get only the selected datasets
                attr = value.attributes()
                band = value.get()

                nodata = attr['bad_value_scaled']
                mask = np.equal(band, nodata)
                satscene[bandname] = (np.ma.masked_where(mask, band) * 
                                      attr['slope'] + attr['intercept'])

                satscene[bandname].info['units'] = '%'
                satscene[bandname].info['long_name'] = attr['long_name'][:-1]

        for dset in selected_dsets:
            dset.endaccess()  

        LOG.info("Loading modis lvl2 Remote Sensing Reflectances done")
        eoshdf.end()
#.........这里部分代码省略.........
开发者ID:Pelgrum,项目名称:mpop,代码行数:103,代码来源:modis_level2.py

示例13: run

# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import attributes [as 别名]
def run(FILE_NAME):
    
    DATAFIELD_NAME = 'NDVI_TOA'
    if USE_GDAL:    

        # Gdal
        import gdal

        GRID_NAME = 'WELD_GRID'
        gname = 'HDF4_EOS:EOS_GRID:"{0}":{1}:{2}'.format(FILE_NAME,
                                                         GRID_NAME,
                                                         DATAFIELD_NAME)

        # Scale down the data by a factor of 5 so that low-memory machines
        # can handle it.
        gdset = gdal.Open(gname)
        data = gdset.ReadAsArray().astype(np.float64)[::5, ::5]
    
        # Get any needed attributes.
        meta = gdset.GetMetadata()
        scale = np.float(meta['scale_factor'])
        fillvalue = np.float(meta['_FillValue'])
        valid_range = [np.float(x) for x in meta['valid_range'].split(', ')]
        units = meta['units']
    
        # Construct the grid.
        x0, xinc, _, y0, _, yinc = gdset.GetGeoTransform()
        ny, nx = (gdset.RasterYSize / 5, gdset.RasterXSize / 5)
        x = np.linspace(x0, x0 + xinc*5*nx, nx)
        y = np.linspace(y0, y0 + yinc*5*ny, ny)
        xv, yv = np.meshgrid(x, y)

        del gdset

    else:
        if USE_NETCDF:

            from netCDF4 import Dataset

            # Scale down the data by a factor of 5 so that low-memory machines
            # can handle it.
            nc = Dataset(FILE_NAME)
            ncvar = nc.variables[DATAFIELD_NAME]
            ncvar.set_auto_maskandscale(False)
            data = ncvar[::5, ::5].astype(np.float64)

            # Get any needed attributes.
            scale = ncvar.scale_factor
            fillvalue = ncvar._FillValue
            valid_range = ncvar.valid_range
            units = ncvar.units
            gridmeta = getattr(nc, 'StructMetadata.0')
        else:
            from pyhdf.SD import SD, SDC
            hdf = SD(FILE_NAME, SDC.READ)

            # Read dataset.
            data2D = hdf.select(DATAFIELD_NAME)
            data = data2D[:,:].astype(np.double)

            # Scale down the data by a factor of 6 so that low-memory machines
            # can handle it.
            data = data[::6, ::6]

        
            # Read attributes.
            attrs = data2D.attributes(full=1)
            vra=attrs["valid_range"]
            valid_range = vra[0]
            fva=attrs["_FillValue"]
            fillvalue = fva[0]
            sfa=attrs["scale_factor"]
            scale = sfa[0]        
            ua=attrs["units"]
            units = ua[0]
            fattrs = hdf.attributes(full=1)
            ga = fattrs["StructMetadata.0"]
            gridmeta = ga[0]
        # Construct the grid.  The needed information is in a global attribute
        # called 'StructMetadata.0'.  Use regular expressions to tease out the
        # extents of the grid.  

        ul_regex = re.compile(r'''UpperLeftPointMtrs=\(
                                  (?P<upper_left_x>[+-]?\d+\.\d+)
                                  ,
                                  (?P<upper_left_y>[+-]?\d+\.\d+)
                                  \)''', re.VERBOSE)
        match = ul_regex.search(gridmeta)
        x0 = np.float(match.group('upper_left_x'))
        y0 = np.float(match.group('upper_left_y'))

        lr_regex = re.compile(r'''LowerRightMtrs=\(
                                  (?P<lower_right_x>[+-]?\d+\.\d+)
                                  ,
                                  (?P<lower_right_y>[+-]?\d+\.\d+)
                                  \)''', re.VERBOSE)
        match = lr_regex.search(gridmeta)
        x1 = np.float(match.group('lower_right_x'))
        y1 = np.float(match.group('lower_right_y'))
        
#.........这里部分代码省略.........
开发者ID:hdfeos,项目名称:zoo_python,代码行数:103,代码来源:CONUS_annual_2012_h01v06_doy007to356_v1_5.py

示例14: run

# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import attributes [as 别名]
def run(FILE_NAME):

    # Identify the data field.
    DATAFIELD_NAME = "TbOceanRain"

    if USE_GDAL:
        import gdal

        GRID_NAME = "MonthlyRainTotal_GeoGrid"
        gname = 'HDF4_EOS:EOS_GRID:"{0}":{1}:{2}'.format(FILE_NAME, GRID_NAME, DATAFIELD_NAME)
        gdset = gdal.Open(gname)
        data = gdset.ReadAsArray().astype(np.float64)

        # Apply the attributes information.
        meta = gdset.GetMetadata()

        # Construct the grid.  The projection is GEO, so this immediately
        # gives us latitude and longitude.
        x0, xinc, _, y0, _, yinc = gdset.GetGeoTransform()
        nx, ny = (gdset.RasterXSize, gdset.RasterYSize)
        x = np.linspace(x0, x0 + xinc * nx, nx)
        y = np.linspace(y0, y0 + yinc * ny, ny)
        longitude, latitude = np.meshgrid(x, y)
        del gdset

    else:
        from pyhdf.SD import SD, SDC

        hdf = SD(FILE_NAME, SDC.READ)

        # Read dataset.
        data2D = hdf.select(DATAFIELD_NAME)
        data = data2D[:, :].astype(np.float64)

        # Read global attribute.
        fattrs = hdf.attributes(full=1)
        ga = fattrs["StructMetadata.0"]
        gridmeta = ga[0]

        # Construct the grid.  The needed information is in a global attribute
        # called 'StructMetadata.0'.  Use regular expressions to tease out the
        # extents of the grid.  In addition, the grid is in packed decimal
        # degrees, so we need to normalize to degrees.

        ul_regex = re.compile(
            r"""UpperLeftPointMtrs=\(
                                  (?P<upper_left_x>[+-]?\d+\.\d+)
                                  ,
                                  (?P<upper_left_y>[+-]?\d+\.\d+)
                                  \)""",
            re.VERBOSE,
        )
        match = ul_regex.search(gridmeta)
        x0 = np.float(match.group("upper_left_x")) / 1e6
        y0 = np.float(match.group("upper_left_y")) / 1e6

        lr_regex = re.compile(
            r"""LowerRightMtrs=\(
                                  (?P<lower_right_x>[+-]?\d+\.\d+)
                                  ,
                                  (?P<lower_right_y>[+-]?\d+\.\d+)
                                  \)""",
            re.VERBOSE,
        )
        match = lr_regex.search(gridmeta)
        x1 = np.float(match.group("lower_right_x")) / 1e6
        y1 = np.float(match.group("lower_right_y")) / 1e6

        ny, nx = data.shape
        x = np.linspace(x0, x1, nx)
        y = np.linspace(y0, y1, ny)
        longitude, latitude = np.meshgrid(x, y)

    # Apply the attributes information.
    data[data == -1] = np.nan
    data = np.ma.masked_array(data, np.isnan(data))

    long_name = DATAFIELD_NAME
    units = "mm"

    m = Basemap(projection="cyl", resolution="l", lon_0=0, llcrnrlat=-90, urcrnrlat=90, llcrnrlon=-180, urcrnrlon=180)
    m.drawcoastlines(linewidth=0.5)
    m.drawparallels(np.arange(-90, 91, 45), labels=[1, 0, 0, 0])
    m.drawmeridians(np.arange(-180, 181, 45), labels=[0, 0, 0, 1])
    m.pcolormesh(longitude, latitude, data, latlon=True)
    cb = m.colorbar()
    cb.set_label(units)

    basename = os.path.basename(FILE_NAME)
    plt.title("{0}\n{1}".format(basename, long_name))
    fig = plt.gcf()
    # plt.show()
    pngfile = "{0}.py.png".format(basename)
    fig.savefig(pngfile)
开发者ID:hdfeos,项目名称:zoo_python,代码行数:96,代码来源:AMSR_E_L3_RG_TbOceanRain.py

示例15: hdf

# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import attributes [as 别名]
class hdf (object):
    """wrapper for HDF4 dataset for comparison
    __call__ yields sequence of variable names
    __getitem__ returns individual variables ready for slicing to numpy arrays
    """
    
    _hdf = None
    
    def __init__(self, filename, allowWrite=False):
        
        if pyhdf is None:
            LOG.error('pyhdf is not installed and is needed in order to read hdf4 files')
            assert(pyhdf is not None)
        mode = SDC.READ
        if allowWrite:
            mode = mode | SDC.WRITE
        
        self._hdf = SD(filename, mode)
        self.attributeCache = CaseInsensitiveAttributeCache(self)

    def __call__(self):
        "yield names of variables to be compared"
        return self._hdf.datasets().keys()
    
    # this returns a numpy array with a copy of the full, scaled
    # data for this variable, if the data type must be changed to allow
    # for scaling it will be (so the return type may not reflect the
    # type found in the original file)
    def __getitem__(self, name):
        # defaults
        scale_factor = 1.0
        add_offset = 0.0
        data_type = None 
        scaling_method = None
        
        # get the variable object and use it to
        # get our raw data and scaling info
        variable_object = self.get_variable_object(name)
        raw_data_copy = variable_object[:]
        try :
            # TODO, this currently won't work with geocat data, work around it for now
            scale_factor, scale_factor_error, add_offset, add_offset_error, data_type = SDS.getcal(variable_object)
        except HDF4Error:
            # load just the scale factor and add offset information by hand
            temp = self.attributeCache.get_variable_attributes(name)
            if ADD_OFFSET_STR in temp.keys() :
                add_offset = temp[ADD_OFFSET_STR]
                data_type = np.dtype(type(add_offset))
            if SCALE_FACTOR_STR in temp.keys() :
                scale_factor = temp[SCALE_FACTOR_STR]
                data_type = np.dtype(type(scale_factor))
            if SCALE_METHOD_STR in temp.keys() :
                scaling_method = temp[SCALE_METHOD_STR]
        SDS.endaccess(variable_object)
        
        # don't do lots of work if we don't need to scale things
        if (scale_factor == 1.0) and (add_offset == 0.0) :
            return raw_data_copy
        
        # at the moment geocat has several scaling methods that don't match the normal standards for hdf
        """
        please see constant.f90 for a more up to date version of this information:
            INTEGER(kind=int1) :: NO_SCALE              ! 0
            INTEGER(kind=int1) :: LINEAR_SCALE          ! 1
            INTEGER(kind=int1) :: LOG_SCALE             ! 2
            INTEGER(kind=int1) :: SQRT_SCALE            ! 3 
        """
        if (scaling_method == 0) :
            return raw_data_copy
        if not ((scaling_method is None) or (int(scaling_method) <= 1)) :
            LOG.warn ('Scaling method of \"' + str(scaling_method) + '\" will be ignored in favor of hdf standard method. '
                      + 'This may cause problems with data consistency')
        
        # if we don't have a data type something strange has gone wrong
        assert(not (data_type is None))
        
        # get information about where the data is the missing value
        missing_val = self.missing_value(name)
        missing_mask = np.zeros(raw_data_copy.shape, dtype=np.bool)
        missing_mask[raw_data_copy == missing_val] = True
        
        # create the scaled version of the data
        scaled_data_copy                = np.array(raw_data_copy, dtype=data_type)
        scaled_data_copy[~missing_mask] = (scaled_data_copy[~missing_mask] * scale_factor) + add_offset #TODO, type truncation issues?
        
        return scaled_data_copy 
    
    def get_variable_object(self, name):
        return self._hdf.select(name)
    
    def missing_value(self, name):
        
        return self.get_attribute(name, fillValConst1)
    
    def create_new_variable(self, variablename, missingvalue=None, data=None, variabletocopyattributesfrom=None):
        """
        create a new variable with the given name
        optionally set the missing value (fill value) and data to those given
        
        the created variable will be returned, or None if a variable could not
#.........这里部分代码省略.........
开发者ID:evas-ssec,项目名称:uwglance,代码行数:103,代码来源:io.py


注:本文中的pyhdf.SD.SD.attributes方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。