本文整理汇总了Python中pyhdf.SD.SD.datasets方法的典型用法代码示例。如果您正苦于以下问题:Python SD.datasets方法的具体用法?Python SD.datasets怎么用?Python SD.datasets使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pyhdf.SD.SD
的用法示例。
在下文中一共展示了SD.datasets方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: read_hdf4_info
# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import datasets [as 别名]
def read_hdf4_info(input_file_path) :
"""
get information about variable names and attributes (both global and variable specific) from the
given file. The file is assumed to exist and be a valid hdf4 file
returns something in the form:
{
GLOBAL_ATTRS_KEY : a dictionary of attribute values keyed by the attribute names
VAR_LIST_KEY : [list of variable names]
VAR_INFO_KEY : {
<var_name> : {
SHAPE_KEY: (shape of variable data)
VAR_ATTRS_KEY: a dictionary of attribute values keyed by the attribute names
}
}
}
TODO, depending on what changes need to be made for CF compliance this data structure may need to change a lot
"""
file_info = { }
# open the file
file_object = SD(input_file_path, SDC.READ)
# get information on the global attributes in the file
global_attrs = file_object.attributes()
file_info[GLOBAL_ATTRS_KEY] = global_attrs
# get information on the variables in the file
variable_list = file_object.datasets().keys()
file_info[VAR_LIST_KEY] = variable_list
# for each variable in a file, get more specific information about it
file_info[VAR_INFO_KEY] = { }
sets_temp = file_object.datasets()
# this should return a dictionary with entries for each variable in the form
# <variable name>: ((dimension names), (data shape), type, index num)
for var_name in variable_list :
var_object = file_object.select(var_name)
var_attrs = var_object.attributes()
file_info[VAR_INFO_KEY][var_name] = {
SHAPE_KEY: sets_temp[var_name][1],
VAR_ATTRS_KEY: var_attrs,
}
return file_info, file_object
示例2: get_variable_names
# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import datasets [as 别名]
def get_variable_names(self, filenames, data_type=None):
try:
from pyhdf.SD import SD
from pyhdf.HDF import HDF
except ImportError:
raise ImportError("HDF support was not installed, please reinstall with pyhdf to read HDF files.")
valid_variables = set([])
for filename in filenames:
# Do VD variables
datafile = HDF(filename)
vdata = datafile.vstart()
variables = vdata.vdatainfo()
# Assumes that latitude shape == longitude shape (it should):
# dim_length = [var[3] for var in variables if var[0] == 'Latitude'][0]
for var in variables:
# if var[3] == dim_length:
valid_variables.add(var[0])
# Do SD variables:
sd = SD(filename)
datasets = sd.datasets()
# if 'Height' in datasets:
# valid_shape = datasets['Height'][1]
for var in datasets:
# if datasets[var][1] == valid_shape:
valid_variables.add(var)
return valid_variables
示例3: read_rrc
# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import datasets [as 别名]
def read_rrc(inpath):
'''Read rrc data m*n from hdf file'''
'''b1-5;b13-16 for MODIS Rrc
Rrc_1238 Rrc_443-862 ozone senz solz for VIIRS rrc
'''
hdf = SD(inpath, SDC.READ)
#dts = sorted(hdf.datasets().keys())
modis_key = ['CorrRefl_01','CorrRefl_02','CorrRefl_03','CorrRefl_04','CorrRefl_05',
'CorrRefl_13','CorrRefl_14','CorrRefl_15','CorrRefl_16']
viirs_key = ['Rrc_443','Rrc_486','Rrc_551','Rrc_671','Rrc_745','Rrc_862','Rrc_1238']
mission = os.path.basename(inpath)[0]
if mission =='A' or mission =='T':keys = modis_key
elif mission=='V':keys = viirs_key
else:keys = hdf.datasets().keys()
for i,dt in enumerate(keys):
print(i,dt)
band = hdf.select(dt)[:,:]
if i==0:
limit = (band.shape[0],band.shape[1],len(keys))
rrc = np.zeros(limit,dtype = np.float)
rrc[:,:,i] = band
else:
rrc[:,:,i] = band
hdf.end()
print(rrc.shape)
return rrc
示例4: load
# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import datasets [as 别名]
def load(self, fldname, **kwargs):
""" Load Cali Current fields for a given day"""
self._timeparams(**kwargs)
if fldname == 'chl':
filename = "/C%04i%03i_chl_mapped.hdf" % (self.yr, self.yd)
#ncfieldname = 'chl_%04i_%03i' % (yr,yd)
def scale(PV): return 10**(PV*0.015-2)
elif fldname == 'sst':
filename = "/M%04i%03i_sst_mapped.hdf" % (self.yr, self.yd)
#ncfieldname = 'sst_%04i_%03i' % (yr,yd)
def scale(PV): return PV*0.15000001-3
if not os.path.isfile(self.datadir + filename):
print "Downloading " + filename
self.download(fldname, self.jd)
h = SD(self.datadir + filename,SDC.READ)
ncfieldname = h.datasets().keys()[0]
fld = h.select(ncfieldname)
attr = fld.attributes()
PV = fld[:].astype(np.float)
PV[PV<0] = PV[PV<0]+256
PV[PV==0] = np.nan
PV[PV==255] = np.nan
setattr(self, fldname, scale(PV)[self.j1:self.j2, self.i1:self.i2])
示例5: export_multi_fluid_LFM
# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import datasets [as 别名]
def export_multi_fluid_LFM(argv):
if (len(argv) >= 2):
input_filename = argv[0]
output_filename = argv[1]
print input_filename
sd = SD(input_filename, SDC.READ)
grid = get_corners(sd)
timesteps = 0
# step = 1640000
for key in sd.datasets().keys():
shift = key.find('time_step')
if shift == 0:
if len(argv) == 3:
step = argv[2]
if key == 'time_step_'+str(step):
export_timestep(sd, output_filename, key, grid)
else:
export_timestep(sd, output_filename, key, grid)
timesteps += 1
print 'timesteps found in file:', timesteps
else:
print 'usage: python lfm_split.py input_multi_timestep_hdf output_filename_prefix step(optional)'
示例6: main
# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import datasets [as 别名]
def main():
varname_to_rpn_name = {
"precipitation": "PR",
"relativeError": "RERR"
}
varnames = list(varname_to_rpn_name.keys())
target_dir = "/skynet3_rech1/huziy/from_hdf4"
source_dir = "/st1_fs2/winger/Validation/TRMM/HDF_format"
for f_name in os.listdir(source_dir):
if not f_name.endswith("HDF"):
continue
path = os.path.join(source_dir, f_name)
ds = SD(path)
print(ds.datasets())
target_path = os.path.join(target_dir, f_name + ".rpn")
r_obj = RPN(target_path, mode="w")
for varname in varnames:
var_data = ds.select(varname)[0, :, :]
r_obj.write_2D_field(
name=varname_to_rpn_name[varname],
data=var_data, label=varname, grid_type="L",
ig = [25, 25, 4013, 18012])
r_obj.close()
示例7: print_dataset_2A12
# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import datasets [as 别名]
def print_dataset_2A12(*arg):
FILE_NAME=arg[0]+'1B01.'+arg[1]
hdf = SD(FILE_NAME, SDC.READ)
'List available SDS datasets'
for ds in hdf.datasets():
print ds
示例8: __init__
# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import datasets [as 别名]
def __init__(self, filename, filename_info, filetype_info):
super(HDF4FileHandler, self).__init__(filename, filename_info, filetype_info)
self.file_content = {}
file_handle = SD(self.filename, SDC.READ)
self._collect_attrs('', file_handle.attributes())
for k, v in file_handle.datasets().items():
self.collect_metadata(k, file_handle.select(k))
del file_handle
示例9: print_dataset_1C21
# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import datasets [as 别名]
def print_dataset_1C21(*arg):
FILE_NAME=arg[0]+'1C21.'+arg[1]
print FILE_NAME
hdf = SD(FILE_NAME, SDC.READ)
'List available SDS datasets'
for ds in hdf.datasets():
print ds
示例10: landmask
# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import datasets [as 别名]
def landmask(self):
if not hasattr(self, "_landmask"):
filename = os.path.basename(self.landurl)
if not os.path.isfile(self.datadir + filename):
urllib.urlretrieve(self.dataurl + self.landurl,
self.datadir + filename)
h = SD(self.datadir + filename, SDC.READ)
ncfieldname = h.datasets().keys()[0]
self._landmask = h.select(ncfieldname)[:] == -1
return self._landmask
示例11: open_file
# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import datasets [as 别名]
def open_file(filename):
A = SD(filename)
# retrieve data SDS
d = A.datasets()
sds_name = d.keys()[0] # name of sds. Dictionary method.
sds = A.select(sds_name)
pin = A.attributes()
return sds, pin
示例12: get_variable_names
# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import datasets [as 别名]
def get_variable_names(self, filenames, data_type=None):
try:
from pyhdf.SD import SD
except ImportError:
raise ImportError("HDF support was not installed, please reinstall with pyhdf to read HDF files.")
variables = set([])
# Determine the valid shape for variables
sd = SD(filenames[0])
datasets = sd.datasets()
len_x = datasets['Latitude_Midpoint'][1][1]
len_y = datasets['Longitude_Midpoint'][1][1]
len_z = datasets['Altitude_Midpoint'][1][1]
valid_shape = (len_x, len_y, len_z)
for filename in filenames:
sd = SD(filename)
for var_name, var_info in sd.datasets().items():
if var_info[1] == valid_shape:
variables.add(var_name)
return variables
示例13: load_vision
# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import datasets [as 别名]
def load_vision(filename, var=None, T=1):
"""load_vision loads a Vision log file and
returns its content in a dict.
"""
assert exists(filename), 'Invalid filename.'
f = SD(filename, SDC.READ)
# New time axis
end = ceil(f.select('ts_group_0').get()[-1])
new_time = np.arange(0, end, T)
# Initialize dict
req_data = {'t': new_time}
# Loop over variable list and loaded signals to search for matches
if not var:
req_data.update({key.split('.')[-1]: _select_interp(new_time, f, key)
for key in f.datasets().keys()
if not key.startswith('ts_')})
elif isinstance(var, basestring):
first_match = next((key for key in f.datasets().keys() if var in key),
None)
req_data.update({var: _select_interp(new_time, f, first_match)})
else:
first_match = zip(var,
[next((key for key in f.datasets().keys()
if sig in key), None)
for sig in var])
req_data.update({sig: _select_interp(new_time, f, key)
for sig, key in first_match})
f.end()
return req_data
示例14: get_variable_names
# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import datasets [as 别名]
def get_variable_names(self, filenames, data_type=None):
try:
from pyhdf.SD import SD
except ImportError:
raise ImportError("HDF support was not installed, please reinstall with pyhdf to read HDF files.")
variables = set([])
for filename in filenames:
sd = SD(filename)
for var_name, var_info in sd.datasets().iteritems():
# Check that the dimensions are correct
if var_info[0] == ("YDim:mod08", "XDim:mod08"):
variables.add(var_name)
return variables
示例15: parseMetadata
# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import datasets [as 别名]
def parseMetadata(self, filepath):
metadata = {}
dir, filename = os.path.split(filepath)
if re.match(FILENAME_PATTERN, filename):
logging.info("Parsing HDF file=%s" % filepath)
# open HDF file
try:
hdfFile = SD(filepath, SDC.READ)
except HDF4Error as e:
logging.info(e)
raise e
# variables
variables = hdfFile.datasets().keys()
# time fields
year = hdfFile.select('Year')[:]
month = hdfFile.select('Month')[:]
day = hdfFile.select('Day')[:]
hour = hdfFile.select('Hour')[:]
minute = hdfFile.select('Minute')[:]
second = hdfFile.select('Seconds')[:]
# space fields
lon = hdfFile.select('Longitude')[:]
lat = hdfFile.select('Latitude')[:]
datetimes = []
lats = []
lons = []
for t in range(22):
for x in range(15):
if year[t,x] != -9999:
datetimes.append( dt.datetime(year[t,x],month[t,x],day[t,x],hour[t,x],minute[t,x],second[t,x], tzinfo=tzutc()) )
lons.append( lon[t,x] )
lats.append( lat[t,x] )
# store metadata values
storeMetadata(metadata, np.asarray(lons), np.asarray(lats), np.asarray(datetimes), variables)
# close HDF file
hdfFile.end()
return metadata