本文整理汇总了Python中geodata.netcdf.DatasetNetCDF.close方法的典型用法代码示例。如果您正苦于以下问题:Python DatasetNetCDF.close方法的具体用法?Python DatasetNetCDF.close怎么用?Python DatasetNetCDF.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类geodata.netcdf.DatasetNetCDF
的用法示例。
在下文中一共展示了DatasetNetCDF.close方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: computeClimatology
# 需要导入模块: from geodata.netcdf import DatasetNetCDF [as 别名]
# 或者: from geodata.netcdf.DatasetNetCDF import close [as 别名]
def computeClimatology(experiment, filetype, domain, periods=None, offset=0, griddef=None, varlist=None,
ldebug=False, loverwrite=False, lparallel=False, pidstr='', logger=None):
''' worker function to compute climatologies for given file parameters. '''
# input type checks
if not isinstance(experiment,Exp): raise TypeError
if not isinstance(filetype,basestring): raise TypeError
if not isinstance(domain,(np.integer,int)): raise TypeError
if periods is not None and not (isinstance(periods,(tuple,list)) and isInt(periods)): raise TypeError
if not isinstance(offset,(np.integer,int)): raise TypeError
if not isinstance(loverwrite,(bool,np.bool)): raise TypeError
if griddef is not None and not isinstance(griddef,GridDefinition): raise TypeError
#if pidstr == '[proc01]': raise TypeError # to test error handling
# load source
dataset_name = experiment.name
fileclass = fileclasses[filetype] # used for target file name
tsfile = fileclass.tsfile.format(domain,'')
expfolder = experiment.avgfolder
filepath = '{:s}/{:s}'.format(expfolder, tsfile)
logger.info('\n\n{0:s} *** Processing Experiment {1:<15s} *** '.format(pidstr,"'{:s}'".format(dataset_name)) +
'\n{0:s} *** {1:^37s} *** \n'.format(pidstr,"'{:s}'".format(tsfile)))
# check file and read begin/enddates
if not os.path.exists(filepath):
#raise IOError, "Source file '{:s}' does not exist!".format(filepath)
# print message and skip
skipmsg = "\n{:s} >>> File '{:s}' in dataset '{:s}' is missing --- skipping!".format(pidstr,tsfile,dataset_name)
skipmsg += "\n{:s} >>> ('{:s}')\n".format(pidstr,filepath)
logger.warning(skipmsg)
# N.B.: this can cause a lot of error messages, when not all files are present
else: # if monthly source file exists
import netCDF4 as nc
ncfile = nc.Dataset(filepath,mode='r')
begintuple = ncfile.begin_date.split('-')
endtuple = ncfile.end_date.split('-')
ncfile.close()
# N.B.: at this point we don't want to initialize a full GDAL-enabled dataset, since we don't even
# know if we need it, and it creates a lot of overhead
# determine age of source file
if not loverwrite: sourceage = datetime.fromtimestamp(os.path.getmtime(filepath))
# figure out start date
filebegin = int(begintuple[0]) # first element is the year
fileend = int(endtuple[0]) # first element is the year
begindate = offset + filebegin
if not ( filebegin <= begindate <= fileend ): raise DateError
# handle cases where the first month in the record is not January
firstmonth = int(begintuple[1]) # second element is the month
shift = firstmonth-1 # will be zero for January (01)
## loop over periods
if periods is None: periods = [begindate-fileend]
# periods.sort(reverse=True) # reverse, so that largest chunk is done first
source = None # will later be assigned to the source dataset
for period in periods:
# figure out period
enddate = begindate + period
if filebegin > enddate: raise DateError, 'End date earlier than begin date.'
if enddate-1 > fileend: # if filebegin is 1979 and the simulation is 10 years, fileend will be 1988, not 1989!
# if end date is not available, skip period
endmsg = "\n{:s} --- Invalid Period for '{:s}': End Date {:4d} not in File! --- \n".format(pidstr,dataset_name,enddate)
endmsg += "{:s} --- ('{:s}')\n".format(pidstr,filepath)
logger.info(endmsg)
else: ## perform averaging for selected period
# determine if sink file already exists, and what to do about it
periodstr = '{0:4d}-{1:4d}'.format(begindate,enddate)
gridstr = '' if griddef is None or griddef.name is 'WRF' else '_'+griddef.name
filename = fileclass.climfile.format(domain,gridstr,'_'+periodstr)
if ldebug: filename = 'test_' + filename
if lparallel: tmppfx = 'tmp_wrfavg_{:s}_'.format(pidstr[1:-1])
else: tmppfx = 'tmp_wrfavg_'.format(pidstr[1:-1])
tmpfilename = tmppfx + filename
assert os.path.exists(expfolder)
filepath = expfolder+filename
tmpfilepath = expfolder+tmpfilename
lskip = False # else just go ahead
if os.path.exists(filepath):
if not loverwrite:
age = datetime.fromtimestamp(os.path.getmtime(filepath))
# if sink file is newer than source file, skip (do not recompute)
if age > sourceage and os.path.getsize(filepath) > 1e6: lskip = True
# N.B.: NetCDF files smaller than 1MB are usually incomplete header fragments from a previous crash
#print sourceage, age
if not lskip: os.remove(filepath)
# depending on last modification time of file or overwrite setting, start computation, or skip
if lskip:
# print message
skipmsg = "\n{:s} >>> Skipping: file '{:s}' in dataset '{:s}' already exists and is newer than source file.".format(pidstr,filename,dataset_name)
skipmsg += "\n{:s} >>> ('{:s}')\n".format(pidstr,filepath)
logger.info(skipmsg)
else:
## begin actual computation
beginmsg = "\n{:s} <<< Computing '{:s}' (d{:02d}) Climatology from {:s}".format(
#.........这里部分代码省略.........
示例2: performExtraction
# 需要导入模块: from geodata.netcdf import DatasetNetCDF [as 别名]
# 或者: from geodata.netcdf.DatasetNetCDF import close [as 别名]
#.........这里部分代码省略.........
## extract meta data from arguments
module, dataargs, loadfct, filepath, datamsgstr = getMetaData(dataset, mode, dataargs)
dataset_name = dataargs.dataset_name; periodstr = dataargs.periodstr; avgfolder = dataargs.avgfolder
# load template dataset
stndata = stnfct() # load station dataset from function
if not isinstance(stndata, Dataset): raise TypeError
# N.B.: the loading function is necessary, because DataseNetCDF instances do not pickle well
# determine age of source file
if not loverwrite: sourceage = datetime.fromtimestamp(os.path.getmtime(filepath))
# get filename for target dataset and do some checks
filename = getTargetFile(stndata.name, dataset, mode, module, dataargs, lwrite)
if ldebug: filename = 'test_' + filename
if not os.path.exists(avgfolder): raise IOError, "Dataset folder '{:s}' does not exist!".format(avgfolder)
lskip = False # else just go ahead
if lwrite:
if lreturn:
tmpfilename = filename # no temporary file if dataset is passed on (can't rename the file while it is open!)
else:
if lparallel: tmppfx = 'tmp_exstns_{:s}_'.format(pidstr[1:-1])
else: tmppfx = 'tmp_exstns_'.format(pidstr[1:-1])
tmpfilename = tmppfx + filename
filepath = avgfolder + filename
tmpfilepath = avgfolder + tmpfilename
if os.path.exists(filepath):
if not loverwrite:
age = datetime.fromtimestamp(os.path.getmtime(filepath))
# if source file is newer than sink file or if sink file is a stub, recompute, otherwise skip
if age > sourceage and os.path.getsize(filepath) > 1e5: lskip = True
# N.B.: NetCDF files smaller than 100kB are usually incomplete header fragments from a previous crashed
if not lskip: os.remove(filepath) # recompute
# depending on last modification time of file or overwrite setting, start computation, or skip
if lskip:
# print message
skipmsg = "\n{:s} >>> Skipping: file '{:s}' in dataset '{:s}' already exists and is newer than source file.".format(pidstr,filename,dataset_name)
skipmsg += "\n{:s} >>> ('{:s}')\n".format(pidstr,filepath)
logger.info(skipmsg)
else:
## actually load datasets
source = loadfct() # load source
# check period
if 'period' in source.atts and dataargs.periodstr != source.atts.period: # a NetCDF attribute
raise DateError, "Specifed period is inconsistent with netcdf records: '{:s}' != '{:s}'".format(periodstr,source.atts.period)
# print message
if lclim: opmsgstr = "Extracting '{:s}'-type Point Data from Climatology ({:s})".format(stndata.name, periodstr)
elif lts: opmsgstr = "Extracting '{:s}'-type Point Data from Time-series".format(stndata.name)
else: raise NotImplementedError, "Unrecognized Mode: '{:s}'".format(mode)
# print feedback to logger
logger.info('\n{0:s} *** {1:^65s} *** \n{0:s} *** {2:^65s} *** \n'.format(pidstr,datamsgstr,opmsgstr))
if not lparallel and ldebug: logger.info('\n'+str(source)+'\n')
## create new sink/target file
# set attributes
atts=source.atts.copy()
atts['period'] = dataargs.periodstr if dataargs.periodstr else 'time-series'
atts['name'] = dataset_name; atts['station'] = stndata.name
atts['title'] = '{:s} (Stations) from {:s} {:s}'.format(stndata.title,dataset_name,mode.title())
# make new dataset
if lwrite: # write to NetCDF file
if os.path.exists(tmpfilepath): os.remove(tmpfilepath) # remove old temp files
sink = DatasetNetCDF(folder=avgfolder, filelist=[tmpfilename], atts=atts, mode='w')
else: sink = Dataset(atts=atts) # ony create dataset in memory
# initialize processing
CPU = CentralProcessingUnit(source, sink, varlist=varlist, tmp=False, feedback=ldebug)
# extract data at station locations
CPU.Extract(template=stndata, flush=True)
# get results
CPU.sync(flush=True)
# print dataset
if not lparallel and ldebug:
logger.info('\n'+str(sink)+'\n')
# write results to file
if lwrite:
sink.sync()
writemsg = "\n{:s} >>> Writing to file '{:s}' in dataset {:s}".format(pidstr,filename,dataset_name)
writemsg += "\n{:s} >>> ('{:s}')\n".format(pidstr,filepath)
logger.info(writemsg)
# rename file to proper name
if not lreturn:
sink.unload(); sink.close(); del sink # destroy all references
if os.path.exists(filepath): os.remove(filepath) # remove old file
os.rename(tmpfilepath,filepath)
# N.B.: there is no temporary file if the dataset is returned, because an open file can't be renamed
# clean up and return
source.unload(); del source#, CPU
if lreturn:
return sink # return dataset for further use (netcdf file still open!)
else:
return 0 # "exit code"
示例3: addLandMask
# 需要导入模块: from geodata.netcdf import DatasetNetCDF [as 别名]
# 或者: from geodata.netcdf.DatasetNetCDF import close [as 别名]
#print sink.dataset
addLandMask(sink) # create landmask from precip mask
#sink.stations.mask(sink.landmask) # mask all fields using the new landmask
# add length and names of month
addLengthAndNamesOfMonth(sink, noleap=False)
# newvar = sink.precip
# print
# print newvar.name, newvar.masked
# print newvar.fillValue
# print newvar.data_array.__class__
# print
# close...
sink.sync()
sink.close()
# print dataset
print('')
print(sink)
del sink
print
# # print time coordinate
# dataset = loadGPCC(grid=grid,resolution=res,period=period)
# print dataset
# print
# print dataset.time
# print
# print dataset.time.data_array