本文整理汇总了Python中geodata.netcdf.DatasetNetCDF.hasVariable方法的典型用法代码示例。如果您正苦于以下问题:Python DatasetNetCDF.hasVariable方法的具体用法?Python DatasetNetCDF.hasVariable怎么用?Python DatasetNetCDF.hasVariable使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类geodata.netcdf.DatasetNetCDF
的用法示例。
在下文中一共展示了DatasetNetCDF.hasVariable方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: loadCFSR_TS
# 需要导入模块: from geodata.netcdf import DatasetNetCDF [as 别名]
# 或者: from geodata.netcdf.DatasetNetCDF import hasVariable [as 别名]
def loadCFSR_TS(name=dataset_name, grid=None, varlist=None, varatts=None, resolution='hires',
filelist=None, folder=None, lautoregrid=None):
''' Get a properly formatted CFSR dataset with monthly mean time-series. '''
if grid is None:
# load from original time-series files
if folder is None: folder = orig_ts_folder
# translate varlist
if varatts is None: varatts = tsvaratts.copy()
if varlist is None:
if resolution == 'hires' or resolution == '03' or resolution == '031': varlist = varlist_hires
elif resolution == 'lowres' or resolution == '05': varlist = varlist_lowres
if varlist and varatts: varlist = translateVarNames(varlist, varatts)
if filelist is None: # generate default filelist
if resolution == 'hires' or resolution == '03' or resolution == '031':
files = [hiresfiles[var] for var in varlist if var in hiresfiles]
elif resolution == 'lowres' or resolution == '05':
files = [lowresfiles[var] for var in varlist if var in lowresfiles]
# load dataset
dataset = DatasetNetCDF(name=name, folder=folder, filelist=files, varlist=varlist, varatts=varatts,
check_override=['time'], multifile=False, ncformat='NETCDF4_CLASSIC')
# load static data
if filelist is None: # generate default filelist
if resolution == 'hires' or resolution == '03' or resolution == '031':
files = [hiresstatic[var] for var in varlist if var in hiresstatic]
elif resolution == 'lowres' or resolution == '05':
files = [lowresstatic[var] for var in varlist if var in lowresstatic]
# load constants, if any (and with singleton time axis)
if len(files) > 0:
staticdata = DatasetNetCDF(name=name, folder=folder, filelist=files, varlist=varlist, varatts=varatts,
axes=dict(lon=dataset.lon, lat=dataset.lat), multifile=False,
check_override=['time'], ncformat='NETCDF4_CLASSIC')
# N.B.: need to override the axes, so that the datasets are consistent
if len(staticdata.variables) > 0:
for var in staticdata.variables.values():
if not dataset.hasVariable(var.name):
var.squeeze() # remove time dimension
dataset.addVariable(var, copy=False) # no need to copy... but we can't write to the netcdf file!
# replace time axis with number of month since Jan 1979
data = np.arange(0,len(dataset.time),1, dtype='int16') # month since 1979 (Jan 1979 = 0)
timeAxis = Axis(name='time', units='month', coord=data, atts=dict(long_name='Month since 1979-01'))
dataset.replaceAxis(dataset.time, timeAxis, asNC=False, deepcopy=False)
# add projection
dataset = addGDALtoDataset(dataset, projection=None, geotransform=None, gridfolder=grid_folder)
# N.B.: projection should be auto-detected as geographic
else:
# load from neatly formatted and regridded time-series files
if folder is None: folder = avgfolder
grid, resolution = checkGridRes(grid, resolution)
dataset = loadObservations(name=name, folder=folder, projection=None, resolution=resolution, grid=grid,
period=None, varlist=varlist, varatts=varatts, filepattern=tsfile,
filelist=filelist, lautoregrid=lautoregrid, mode='time-series')
# return formatted dataset
return dataset
示例2: computeClimatology
# 需要导入模块: from geodata.netcdf import DatasetNetCDF [as 别名]
# 或者: from geodata.netcdf.DatasetNetCDF import hasVariable [as 别名]
#.........这里部分代码省略.........
assert os.path.exists(expfolder)
filepath = expfolder+filename
tmpfilepath = expfolder+tmpfilename
lskip = False # else just go ahead
if os.path.exists(filepath):
if not loverwrite:
age = datetime.fromtimestamp(os.path.getmtime(filepath))
# if sink file is newer than source file, skip (do not recompute)
if age > sourceage and os.path.getsize(filepath) > 1e6: lskip = True
# N.B.: NetCDF files smaller than 1MB are usually incomplete header fragments from a previous crash
#print sourceage, age
if not lskip: os.remove(filepath)
# depending on last modification time of file or overwrite setting, start computation, or skip
if lskip:
# print message
skipmsg = "\n{:s} >>> Skipping: file '{:s}' in dataset '{:s}' already exists and is newer than source file.".format(pidstr,filename,dataset_name)
skipmsg += "\n{:s} >>> ('{:s}')\n".format(pidstr,filepath)
logger.info(skipmsg)
else:
## begin actual computation
beginmsg = "\n{:s} <<< Computing '{:s}' (d{:02d}) Climatology from {:s}".format(
pidstr,dataset_name,domain,periodstr)
if griddef is None: beginmsg += " >>> \n"
else: beginmsg += " ('{:s}' grid) >>> \n".format(griddef.name)
logger.info(beginmsg)
## actually load datasets
if source is None:
source = loadWRF_TS(experiment=experiment, filetypes=[filetype], domains=domain) # comes out as a tuple...
if not lparallel and ldebug: logger.info('\n'+str(source)+'\n')
# prepare sink
if os.path.exists(tmpfilepath): os.remove(tmpfilepath) # remove old temp files
sink = DatasetNetCDF(name='WRF Climatology', folder=expfolder, filelist=[tmpfilename], atts=source.atts.copy(), mode='w')
sink.atts.period = periodstr
# initialize processing
if griddef is None: lregrid = False
else: lregrid = True
CPU = CentralProcessingUnit(source, sink, varlist=varlist, tmp=lregrid, feedback=ldebug) # no need for lat/lon
# start processing climatology
if shift != 0:
logger.info('{0:s} (shifting climatology by {1:d} month, to start with January) \n'.format(pidstr,shift))
CPU.Climatology(period=period, offset=offset, shift=shift, flush=False)
# N.B.: immediate flushing should not be necessary for climatologies, since they are much smaller!
# reproject and resample (regrid) dataset
if lregrid:
CPU.Regrid(griddef=griddef, flush=True)
logger.info('%s --- '+str(griddef.geotansform)+' --- \n'%(pidstr))
# sync temporary storage with output dataset (sink)
CPU.sync(flush=True)
# add Geopotential Height Variance
if 'GHT_Var' in sink and 'Z_var' not in sink:
data_array = ( sink['GHT_Var'].data_array - sink['Z'].data_array**2 )**0.5
atts = dict(name='Z_var',units='m',long_name='Square Root of Geopotential Height Variance')
sink += Variable(axes=sink['Z'].axes, data=data_array, atts=atts)
# add (relative) Vorticity Variance
if 'Vorticity_Var' in sink and 'zeta_var' not in sink:
data_array = ( sink['Vorticity_Var'].data_array - sink['zeta'].data_array**2 )**0.5
atts = dict(name='zeta_var',units='1/s',long_name='Square Root of Relative Vorticity Variance')
sink += Variable(axes=sink['zeta'].axes, data=data_array, atts=atts)
# add names and length of months
sink.axisAnnotation('name_of_month', name_of_month, 'time',
atts=dict(name='name_of_month', units='', long_name='Name of the Month'))
if not sink.hasVariable('length_of_month'):
sink += Variable(name='length_of_month', units='days', axes=(sink.time,), data=days_per_month,
atts=dict(name='length_of_month',units='days',long_name='Length of Month'))
# close... and write results to file
sink.sync()
sink.close()
writemsg = "\n{:s} >>> Writing to file '{:s}' in dataset {:s}".format(pidstr,filename,dataset_name)
writemsg += "\n{:s} >>> ('{:s}')\n".format(pidstr,filepath)
logger.info(writemsg)
# rename file to proper name
if os.path.exists(filepath): os.remove(filepath) # remove old file
os.rename(tmpfilepath,filepath) # this will overwrite the old file
# print dataset
if not lparallel and ldebug:
logger.info('\n'+str(sink)+'\n')
# clean up (not sure if this is necessary, but there seems to be a memory leak...
del sink, CPU; gc.collect() # get rid of these guys immediately
# clean up and return
if source is not None: source.unload(); del source
# N.B.: source is only loaded once for all periods
# N.B.: garbage is collected in multi-processing wrapper as well
# return
return 0 # so far, there is no measure of success, hence, if there is no crash...
示例3: CentralProcessingUnit
# 需要导入模块: from geodata.netcdf import DatasetNetCDF [as 别名]
# 或者: from geodata.netcdf.DatasetNetCDF import hasVariable [as 别名]
# determine averaging interval
offset = source.time.getIndex(period[0]-1979)/12 # origin of monthly time-series is at January 1979
# initialize processing
CPU = CentralProcessingUnit(source, sink, tmp=True)
# start processing climatology
CPU.Climatology(period=period[1]-period[0], offset=offset, flush=False)
# shift longitude axis by 180 degrees left (i.e. 0 - 360 -> -180 - 180)
CPU.Shift(lon=-180, flush=False)
# sync temporary storage with output (sink variable; do not flush!)
CPU.sync(flush=False)
# make new masks
if sink.hasVariable('landmask'):
sink.mask(sink.landmask, maskSelf=False, varlist=['snow','snowh','zs'], invert=True, merge=False)
# add names and length of months
sink.axisAnnotation('name_of_month', name_of_month, 'time',
atts=dict(name='name_of_month', units='', long_name='Name of the Month'))
#print ' === month === '
# sink += VarNC(sink.dataset, name='length_of_month', units='days', axes=(sink.time,), data=days_per_month,
# atts=dict(name='length_of_month',units='days',long_name='Length of Month'))
# close...
sink.sync()
sink.close()
# print dataset
print('')
print(sink)