当前位置: 首页>>代码示例>>Python>>正文


Python DatasetNetCDF.sync方法代码示例

本文整理汇总了Python中geodata.netcdf.DatasetNetCDF.sync方法的典型用法代码示例。如果您正苦于以下问题:Python DatasetNetCDF.sync方法的具体用法?Python DatasetNetCDF.sync怎么用?Python DatasetNetCDF.sync使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在geodata.netcdf.DatasetNetCDF的用法示例。


在下文中一共展示了DatasetNetCDF.sync方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: computeClimatology

# 需要导入模块: from geodata.netcdf import DatasetNetCDF [as 别名]
# 或者: from geodata.netcdf.DatasetNetCDF import sync [as 别名]

#.........这里部分代码省略.........
        assert os.path.exists(expfolder)
        filepath = expfolder+filename
        tmpfilepath = expfolder+tmpfilename
        lskip = False # else just go ahead
        if os.path.exists(filepath): 
          if not loverwrite: 
            age = datetime.fromtimestamp(os.path.getmtime(filepath))
            # if sink file is newer than source file, skip (do not recompute)
            if age > sourceage and os.path.getsize(filepath) > 1e6: lskip = True
            # N.B.: NetCDF files smaller than 1MB are usually incomplete header fragments from a previous crash
            #print sourceage, age
          if not lskip: os.remove(filepath) 
        
        # depending on last modification time of file or overwrite setting, start computation, or skip
        if lskip:        
          # print message
          skipmsg =  "\n{:s}   >>>   Skipping: file '{:s}' in dataset '{:s}' already exists and is newer than source file.".format(pidstr,filename,dataset_name)
          skipmsg += "\n{:s}   >>>   ('{:s}')\n".format(pidstr,filepath)
          logger.info(skipmsg)              
        else:
           
          ## begin actual computation
          beginmsg = "\n{:s}   <<<   Computing '{:s}' (d{:02d}) Climatology from {:s}".format(
                      pidstr,dataset_name,domain,periodstr)
          if griddef is None: beginmsg += "  >>>   \n" 
          else: beginmsg += " ('{:s}' grid)  >>>   \n".format(griddef.name)
          logger.info(beginmsg)
  
          ## actually load datasets
          if source is None:
            source = loadWRF_TS(experiment=experiment, filetypes=[filetype], domains=domain) # comes out as a tuple... 
          if not lparallel and ldebug: logger.info('\n'+str(source)+'\n')
  
          # prepare sink
          if os.path.exists(tmpfilepath): os.remove(tmpfilepath) # remove old temp files
          sink = DatasetNetCDF(name='WRF Climatology', folder=expfolder, filelist=[tmpfilename], atts=source.atts.copy(), mode='w')
          sink.atts.period = periodstr 
          
          # initialize processing
          if griddef is None: lregrid = False
          else: lregrid = True
          CPU = CentralProcessingUnit(source, sink, varlist=varlist, tmp=lregrid, feedback=ldebug) # no need for lat/lon
          
          # start processing climatology
          if shift != 0: 
            logger.info('{0:s}   (shifting climatology by {1:d} month, to start with January)   \n'.format(pidstr,shift))
          CPU.Climatology(period=period, offset=offset, shift=shift, flush=False)
          # N.B.: immediate flushing should not be necessary for climatologies, since they are much smaller!
          
          # reproject and resample (regrid) dataset
          if lregrid:
            CPU.Regrid(griddef=griddef, flush=True)
            logger.info('%s    ---   '+str(griddef.geotansform)+'   ---   \n'%(pidstr))              
          
          # sync temporary storage with output dataset (sink)
          CPU.sync(flush=True)
          
          # add Geopotential Height Variance
          if 'GHT_Var' in sink and 'Z_var' not in sink:
            data_array = ( sink['GHT_Var'].data_array - sink['Z'].data_array**2 )**0.5
            atts = dict(name='Z_var',units='m',long_name='Square Root of Geopotential Height Variance')
            sink += Variable(axes=sink['Z'].axes, data=data_array, atts=atts)
            
          # add (relative) Vorticity Variance
          if 'Vorticity_Var' in sink and 'zeta_var' not in sink:
            data_array = ( sink['Vorticity_Var'].data_array - sink['zeta'].data_array**2 )**0.5
            atts = dict(name='zeta_var',units='1/s',long_name='Square Root of Relative Vorticity Variance')
            sink += Variable(axes=sink['zeta'].axes, data=data_array, atts=atts)
            
          # add names and length of months
          sink.axisAnnotation('name_of_month', name_of_month, 'time', 
                              atts=dict(name='name_of_month', units='', long_name='Name of the Month'))        
          if not sink.hasVariable('length_of_month'):
            sink += Variable(name='length_of_month', units='days', axes=(sink.time,), data=days_per_month,
                          atts=dict(name='length_of_month',units='days',long_name='Length of Month'))
          
          # close... and write results to file
          sink.sync()
          sink.close()
          writemsg =  "\n{:s}   >>>   Writing to file '{:s}' in dataset {:s}".format(pidstr,filename,dataset_name)
          writemsg += "\n{:s}   >>>   ('{:s}')\n".format(pidstr,filepath)
          logger.info(writemsg)      
          # rename file to proper name
          if os.path.exists(filepath): os.remove(filepath) # remove old file
          os.rename(tmpfilepath,filepath) # this will overwrite the old file
          
          # print dataset
          if not lparallel and ldebug:
            logger.info('\n'+str(sink)+'\n')
          
          # clean up (not sure if this is necessary, but there seems to be a memory leak...   
          del sink, CPU; gc.collect() # get rid of these guys immediately
          
    # clean up and return
    if source is not None: source.unload(); del source
    # N.B.: source is only loaded once for all periods    

  # N.B.: garbage is collected in multi-processing wrapper as well
  # return
  return 0 # so far, there is no measure of success, hence, if there is no crash...
开发者ID:EdwardBetts,项目名称:GeoPy,代码行数:104,代码来源:wrfavg.py

示例2: performExtraction

# 需要导入模块: from geodata.netcdf import DatasetNetCDF [as 别名]
# 或者: from geodata.netcdf.DatasetNetCDF import sync [as 别名]

#.........这里部分代码省略.........
  
  ## extract meta data from arguments
  module, dataargs, loadfct, filepath, datamsgstr = getMetaData(dataset, mode, dataargs)
  dataset_name = dataargs.dataset_name; periodstr = dataargs.periodstr; avgfolder = dataargs.avgfolder

  # load template dataset
  stndata = stnfct() # load station dataset from function
  if not isinstance(stndata, Dataset): raise TypeError
  # N.B.: the loading function is necessary, because DataseNetCDF instances do not pickle well 
            
  # determine age of source file
  if not loverwrite: sourceage = datetime.fromtimestamp(os.path.getmtime(filepath))    
          
  # get filename for target dataset and do some checks
  filename = getTargetFile(stndata.name, dataset, mode, module, dataargs, lwrite)
  if ldebug: filename = 'test_' + filename
  if not os.path.exists(avgfolder): raise IOError, "Dataset folder '{:s}' does not exist!".format(avgfolder)
  lskip = False # else just go ahead
  if lwrite:
    if lreturn: 
      tmpfilename = filename # no temporary file if dataset is passed on (can't rename the file while it is open!)
    else: 
      if lparallel: tmppfx = 'tmp_exstns_{:s}_'.format(pidstr[1:-1])
      else: tmppfx = 'tmp_exstns_'.format(pidstr[1:-1])
      tmpfilename = tmppfx + filename      
    filepath = avgfolder + filename
    tmpfilepath = avgfolder + tmpfilename
    if os.path.exists(filepath): 
      if not loverwrite: 
        age = datetime.fromtimestamp(os.path.getmtime(filepath))
        # if source file is newer than sink file or if sink file is a stub, recompute, otherwise skip
        if age > sourceage and os.path.getsize(filepath) > 1e5: lskip = True
        # N.B.: NetCDF files smaller than 100kB are usually incomplete header fragments from a previous crashed
      if not lskip: os.remove(filepath) # recompute
  
  # depending on last modification time of file or overwrite setting, start computation, or skip
  if lskip:        
    # print message
    skipmsg =  "\n{:s}   >>>   Skipping: file '{:s}' in dataset '{:s}' already exists and is newer than source file.".format(pidstr,filename,dataset_name)
    skipmsg += "\n{:s}   >>>   ('{:s}')\n".format(pidstr,filepath)
    logger.info(skipmsg)              
  else:
          
    ## actually load datasets
    source = loadfct() # load source 
    # check period
    if 'period' in source.atts and dataargs.periodstr != source.atts.period: # a NetCDF attribute
      raise DateError, "Specifed period is inconsistent with netcdf records: '{:s}' != '{:s}'".format(periodstr,source.atts.period)
  
    # print message
    if lclim: opmsgstr = "Extracting '{:s}'-type Point Data from Climatology ({:s})".format(stndata.name, periodstr)
    elif lts: opmsgstr = "Extracting '{:s}'-type Point Data from Time-series".format(stndata.name)
    else: raise NotImplementedError, "Unrecognized Mode: '{:s}'".format(mode)
    # print feedback to logger
    logger.info('\n{0:s}   ***   {1:^65s}   ***   \n{0:s}   ***   {2:^65s}   ***   \n'.format(pidstr,datamsgstr,opmsgstr))
    if not lparallel and ldebug: logger.info('\n'+str(source)+'\n')  
    
    ## create new sink/target file
    # set attributes   
    atts=source.atts.copy()
    atts['period'] = dataargs.periodstr if dataargs.periodstr else 'time-series' 
    atts['name'] = dataset_name; atts['station'] = stndata.name
    atts['title'] = '{:s} (Stations) from {:s} {:s}'.format(stndata.title,dataset_name,mode.title())
    # make new dataset
    if lwrite: # write to NetCDF file 
      if os.path.exists(tmpfilepath): os.remove(tmpfilepath) # remove old temp files 
      sink = DatasetNetCDF(folder=avgfolder, filelist=[tmpfilename], atts=atts, mode='w')
    else: sink = Dataset(atts=atts) # ony create dataset in memory
    
    # initialize processing
    CPU = CentralProcessingUnit(source, sink, varlist=varlist, tmp=False, feedback=ldebug)
  
    # extract data at station locations
    CPU.Extract(template=stndata, flush=True)
    # get results    
    CPU.sync(flush=True)
    
    # print dataset
    if not lparallel and ldebug:
      logger.info('\n'+str(sink)+'\n')   
    # write results to file
    if lwrite:
      sink.sync()
      writemsg =  "\n{:s}   >>>   Writing to file '{:s}' in dataset {:s}".format(pidstr,filename,dataset_name)
      writemsg += "\n{:s}   >>>   ('{:s}')\n".format(pidstr,filepath)
      logger.info(writemsg)      
      
      # rename file to proper name
      if not lreturn:
        sink.unload(); sink.close(); del sink # destroy all references 
        if os.path.exists(filepath): os.remove(filepath) # remove old file
        os.rename(tmpfilepath,filepath)
      # N.B.: there is no temporary file if the dataset is returned, because an open file can't be renamed
        
    # clean up and return
    source.unload(); del source#, CPU
    if lreturn:      
      return sink # return dataset for further use (netcdf file still open!)
    else:            
      return 0 # "exit code"
开发者ID:EdwardBetts,项目名称:GeoPy,代码行数:104,代码来源:exstns.py

示例3: addLandMask

# 需要导入模块: from geodata.netcdf import DatasetNetCDF [as 别名]
# 或者: from geodata.netcdf.DatasetNetCDF import sync [as 别名]
      #sink.mask(sink.landmask)
      #print sink.dataset
      addLandMask(sink) # create landmask from precip mask
      #sink.stations.mask(sink.landmask) # mask all fields using the new landmask
      # add length and names of month
      addLengthAndNamesOfMonth(sink, noleap=False) 
                    
#       newvar = sink.precip
#       print
#       print newvar.name, newvar.masked
#       print newvar.fillValue
#       print newvar.data_array.__class__
#       print
      
      # close...
      sink.sync()
      sink.close()
      # print dataset
      print('')
      print(sink)
      del sink     
      print

#       # print time coordinate
#       dataset = loadGPCC(grid=grid,resolution=res,period=period)      
#       print dataset
#       print
#       print dataset.time
#       print
#       print dataset.time.data_array
    
开发者ID:xiefengy,项目名称:GeoPy,代码行数:32,代码来源:GPCC.py


注:本文中的geodata.netcdf.DatasetNetCDF.sync方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。