本文整理汇总了Python中pyhdf.SD.SD.close方法的典型用法代码示例。如果您正苦于以下问题:Python SD.close方法的具体用法?Python SD.close怎么用?Python SD.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pyhdf.SD.SD
的用法示例。
在下文中一共展示了SD.close方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: read_amsr_hdf4
# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import close [as 别名]
def read_amsr_hdf4(filename):
from pyhdf.SD import SD, SDC
from pyhdf.HDF import HDF, HC
import pyhdf.VS
retv = AmsrObject()
h4file = SD(filename, SDC.READ)
datasets = h4file.datasets()
attributes = h4file.attributes()
#for idx,attr in enumerate(attributes.keys()):
# print idx, attr
for sds in ["Longitude", "Latitude", "High_res_cloud"]:
data = h4file.select(sds).get()
if sds in ["Longitude", "Latitude"]:
retv.all_arrays[sds.lower()] = data.ravel()
elif sds in ["High_res_cloud"]:
lwp_gain = h4file.select(sds).attributes()['Scale']
retv.all_arrays["lwp_mm"] = data.ravel() * lwp_gain
#print h4file.select(sds).info()
h4file = HDF(filename, SDC.READ)
vs = h4file.vstart()
data_info_list = vs.vdatainfo()
#print "1D data compound/Vdata"
for item in data_info_list:
#1D data compound/Vdata
name = item[0]
#print name
if name in ["Time"]:
data_handle = vs.attach(name)
data = np.array(data_handle[:])
retv.all_arrays["sec1993"] = data
data_handle.detach()
else:
pass
#print name
#data = np.array(data_handle[:])
#attrinfo_dic = data_handle.attrinfo()
#factor = data_handle.findattr('factor')
#offset = data_handle.findattr('offset')
#print data_handle.factor
#data_handle.detach()
#print data_handle.attrinfo()
h4file.close()
#for key in retv.all_arrays.keys():
# print key, retv.all_arrays[key]
return retv
示例2: read_cloudsat_hdf4
# 需要导入模块: from pyhdf.SD import SD [as 别名]
# 或者: from pyhdf.SD.SD import close [as 别名]
def read_cloudsat_hdf4(filename):
from pyhdf.SD import SD, SDC
from pyhdf.HDF import HDF, HC
import pyhdf.VS
def convert_data(data):
if len(data.shape) == 2:
if data.shape[1] == 1:
return data[:, 0]
elif data.shape[0] == 1:
return data[0, :]
return data
retv = CloudsatObject()
h4file = SD(filename, SDC.READ)
datasets = h4file.datasets()
attributes = h4file.attributes()
#for idx,attr in enumerate(attributes.keys()):
# print idx, attr
for idx,sds in enumerate(datasets.keys()):
#2D data, print idx, sds
data = h4file.select(sds).get()
#print h4file.select(sds).attributes().keys()
am_name = clsat_name_conversion(sds, retv)
if am_name in retv.all_arrays.keys():
retv.all_arrays[am_name] = convert_data(data)
#print h4file.select(sds).info()
h4file = HDF(filename, SDC.READ)
vs = h4file.vstart()
data_info_list = vs.vdatainfo()
for item in data_info_list:
#1D data compound/Vdata
name = item[0]
data_handle = vs.attach(name)
data = np.array(data_handle[:])
attrinfo_dic = data_handle.attrinfo()
factor = data_handle.findattr('factor')
offset = data_handle.findattr('offset')
#print data_handle.factor
am_name = clsat_name_conversion(name, retv)
if am_name in retv.all_arrays.keys():
#To save RAM and disk only read what we use!
if factor is None and offset is None:
retv.all_arrays[am_name] = convert_data(data)
elif np.float(factor.get()) == 1.0 and np.float(offset.get()) == 0.0:
retv.all_arrays[am_name] = convert_data(data)
else:
if factor is None:
factor = 1.0
if offset is None:
offset = 0.0
raise MatchupError("Not default offset and factor. Fix code")
#The code below is probably ok, but make sure:
#the_data_scaled = convert_data(data)*factor + offset
#retv.all_arrays[am_name] = the_data_scaled
data_handle.detach()
#print data_handle.attrinfo()
h4file.close()
# Convert from TAI time to UTC in seconds since 1970:
dsec = time.mktime((1993,1,1,0,0,0,0,0,0)) - time.timezone
retv.sec_1970 = retv.Profile_time.ravel() + retv.TAI_start + dsec
return retv