本文整理汇总了Python中netCDF4.Dataset类的典型用法代码示例。如果您正苦于以下问题:Python Dataset类的具体用法?Python Dataset怎么用?Python Dataset使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Dataset类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_level
def get_level(resource, level):
from flyingpigeon.ocgis_module import call
from netCDF4 import Dataset
from flyingpigeon.utils import get_variable
from numpy import squeeze
try:
level_data = call(resource, level_range=[int(level),int(level)])
if type(resource) == list:
resource.sort()
variable = get_variable(level_data)
logger.info('found %s in file' % variable)
ds = Dataset(level_data, mode='a')
var = ds.variables.pop(variable)
dims = var.dimensions
new_var = ds.createVariable('z%s'% level, var.dtype, dimensions=(dims[0],dims[2],dims[3]))
# i = where(var[:]==level)
new_var[:,:,:] = squeeze(var[:,0,:,:])
ds.close()
logger.info('level %s extracted' % level)
data = call(level_data , variable = 'z%s'%level)
except Exception as e:
logger.error('failed to extract level %s ' % e)
return data
示例2: read_netcdf
def read_netcdf(nc_file, variables=None, coords=None):
"""
Read data from input netCDF. Will read all variables if none provided.
Will also return all variable attributes.
Both variables (data and attributes) are returned as dictionaries named
by variable
"""
f = Dataset(nc_file, 'r')
if not variables:
variables = f.variables.keys()
if not coords:
coords = slice(None)
log.debug('Reading input data variables: %s, from file: %s', variables,
nc_file)
d = {}
a = {}
g = {}
for var in variables:
d[var] = f.variables[var][coords]
a[var] = f.variables[var].__dict__
for attr in f.ncattrs():
g[attr] = getattr(f, attr)
f.close()
return d, a, g
示例3: runTest
def runTest(self):
"""testing NETCDF3_64BIT_DATA format (CDF-5)"""
f = Dataset(self.netcdf_file, 'r')
assert f.dimensions['dim'].size == dimsize
assert_array_equal(arrdata, f.variables['var'][:ndim])
assert (type(f.int64_attr) == np.int64)
f.close()
示例4: setUp
def setUp(self):
""" Check that a the AIMS system or this script hasn't been modified.
This function checks that a downloaded file still has the same md5.
"""
logging_aims()
channel_id = '8365'
from_date = '2008-09-30T00:27:27Z'
thru_date = '2008-09-30T00:30:00Z'
level_qc = 1
aims_rss_val = 100
xml_url = 'http://data.aims.gov.au/gbroosdata/services/rss/netcdf/level%s/%s' % (str(level_qc), str(aims_rss_val))
aims_xml_info = parse_aims_xml(xml_url)
channel_id_info = aims_xml_info[channel_id]
self.netcdf_tmp_file_path = download_channel(channel_id, from_date, thru_date, level_qc)
modify_soop_trv_netcdf(self.netcdf_tmp_file_path, channel_id_info)
# force values of attributes which change all the time
netcdf_file_obj = Dataset(self.netcdf_tmp_file_path, 'a', format='NETCDF4')
netcdf_file_obj.date_created = "1970-01-01T00:00:00Z"
netcdf_file_obj.history = 'data validation test only'
netcdf_file_obj.close()
shutil.move(self.netcdf_tmp_file_path, remove_creation_date_from_filename(self.netcdf_tmp_file_path))
self.netcdf_tmp_file_path = remove_creation_date_from_filename(self.netcdf_tmp_file_path)
示例5: test_3d
def test_3d(self):
"""testing variable slicing"""
f = Dataset(self.file, 'r')
v = f.variables['data']
vu = f.variables['datau']
# test return of array scalar.
assert_equal(v[0,0,0].shape,())
assert_array_equal(v[:], datarev)
# test reading of slices.
# negative value means count back from end.
assert_array_equal(v[:-1,:-2,:-3],datarev[:-1,:-2,:-3])
# every other element (positive step)
assert_array_equal(v[2:-1:2,2:-2:2,2:-3:2],datarev[2:-1:2,2:-2:2,2:-3:2])
# every other element (negative step)
assert_array_equal(v[-1:2:-2,-2:2:-2,-3:2:-2],datarev[-1:2:-2,-2:2:-2,-3:2:-2])
# read elements in reverse order
assert_array_equal(v[:,::-1,:],data)
assert_array_equal(v[::-1,:,::-1],datarev[::-1,:,::-1])
assert_array_equal(v[xdim-1::-3,:,zdim-1::-3],datarev[xdim-1::-3,:,zdim-1::-3])
# ellipsis slice.
assert_array_equal(v[...,2:],datarev[...,2:])
# variable with an unlimited dimension.
assert_array_equal(vu[:], data[:,::-1,:])
# read data in reverse order
assert_array_equal(vu[:,::-1,:],data)
# index using an integer array scalar
i = NP.ones(1,'i4')[0]
assert_array_equal(v[i],datarev[1])
f.close()
示例6: NetCDFData
class NetCDFData(Data):
def __init__(self, url):
self._dataset = None
self.__timestamp_cache = TTLCache(1, 3600)
super(NetCDFData, self).__init__(url)
def __enter__(self):
self._dataset = Dataset(self.url, 'r')
return self
def __exit__(self, exc_type, exc_value, traceback):
self._dataset.close()
@property
def timestamps(self):
if self.__timestamp_cache.get("timestamps") is None:
var = None
for v in ['time', 'time_counter']:
if v in self._dataset.variables:
var = self._dataset.variables[v]
break
t = netcdftime.utime(var.units)
timestamps = np.array(
map(
lambda ts: t.num2date(ts).replace(tzinfo=pytz.UTC),
var[:]
)
)
timestamps.flags.writeable = False
self.__timestamp_cache["timestamps"] = timestamps
return self.__timestamp_cache.get("timestamps")
示例7: monreduce
def monreduce(filein):
''' Averages the files down to a month.
Creates a new temporary file per month.
'''
directory = '/group_workspaces/jasmin/hiresgw/mj07/monthly_means/'
month = filein[:-7]
create_temp_nc(month)
# Get the 3 files for each month
files = glob(directory+'temp_files/'+filein[-22:-7]+'??.temp.v.nc')
fileblob = ''
for filename in files:
fileblob+=filename+','
splitind = range(0,1024,128)
pool = Pool(processes=8)
TASKS = [(fileblob,n) for n in splitind]
meansection = [pool.apply_async(splitmonthcalc, t) for t in TASKS]
mean = np.concatenate((meansection[0].get(),meansection[1].get(),\
meansection[2].get(),meansection[3].get(),\
meansection[4].get(),meansection[5].get(), \
meansection[6].get(),meansection[7].get()), 2)
print 'done for %s' % (month)
# Save mean in file
filename = directory+'temp_files/'+filein[-22:-7]+'.temp.v.nc'
f = Dataset(filename,'a')
u = f.variables['v']
u[:] = mean[:]
f.close()
示例8: get_indices_from_file
def get_indices_from_file(path = 'data/streamflows/hydrosheds_euler9/aex_discharge_1970_01_01_00_00.nc'):
fpin = Dataset(path)
vars = fpin.variables
x, y = vars['x-index'][:], vars['y-index'][:]
fpin.close()
return x, y
示例9: open
def open(filename):
'''Import netCDF output file as OpenDrift object of correct class'''
import os
import logging
import pydoc
from netCDF4 import Dataset
if not os.path.exists(filename):
logging.info('File does not exist, trying to retrieve from URL')
import urllib
try:
urllib.urlretrieve(filename, 'opendrift_tmp.nc')
filename = 'opendrift_tmp.nc'
except:
raise ValueError('%s does not exist' % filename)
n = Dataset(filename)
try:
module_name = n.opendrift_module
class_name = n.opendrift_class
except:
raise ValueError(filename + ' does not contain '
'necessary global attributes '
'opendrift_module and opendrift_class')
n.close()
cls = pydoc.locate(module_name + '.' + class_name)
if cls is None:
from models import oceandrift3D
cls = oceandrift3D.OceanDrift3D
o = cls()
o.io_import_file(filename)
logging.info('Returning ' + str(type(o)) + ' object')
return o
示例10: __init__
def __init__(self, coord_file="coordinates.nc"):
ds = Dataset(coord_file)
self.target_lons = ds.variables["glamt"][:]
self.target_lats = ds.variables["gphit"][:]
print("target lons shape = ", self.target_lons.shape)
ds.close()
示例11: FileBuffer
class FileBuffer(object):
""" Class that encapsulates and manages deferred access to file data. """
def __init__(self, filename, dimensions):
self.filename = filename
self.dimensions = dimensions # Dict with dimension keyes for file data
self.dataset = None
def __enter__(self):
self.dataset = Dataset(str(self.filename), 'r', format="NETCDF4")
return self
def __exit__(self, type, value, traceback):
self.dataset.close()
@property
def lon(self):
lon = self.dataset[self.dimensions['lon']]
return lon[0, :] if len(lon.shape) > 1 else lon[:]
@property
def lat(self):
lat = self.dataset[self.dimensions['lat']]
return lat[:, 0] if len(lat.shape) > 1 else lat[:]
@property
def data(self):
if len(self.dataset[self.dimensions['data']].shape) == 3:
return self.dataset[self.dimensions['data']][:, :, :]
else:
return self.dataset[self.dimensions['data']][:, 0, :, :]
@property
def time(self):
if self.time_units is not None:
dt = num2date(self.dataset[self.dimensions['time']][:],
self.time_units, self.calendar)
dt -= num2date(0, self.time_units, self.calendar)
return list(map(timedelta.total_seconds, dt))
else:
return self.dataset[self.dimensions['time']][:]
@property
def time_units(self):
""" Derive time_units if the time dimension has units """
try:
return self.dataset[self.dimensions['time']].units
except:
try:
return self.dataset[self.dimensions['time']].Unit
except:
return None
@property
def calendar(self):
""" Derive calendar if the time dimension has calendar """
try:
return self.dataset[self.dimensions['time']].calendar
except:
return 'standard'
示例12: piomasReader
def piomasReader(directory,month,years):
"""
Reads piomas data for sea ice thickness over 1979-2015
"""
### Enter filename
filename = 'piomas_regrid_sit_19792015.nc'
### Month/Years extracted
dateyr = now.year
datemo = datetime.date(dateyr,month+1,1).strftime('%B')
yearsp = np.arange(1979,2016)
yearmin = years.min()
yearmax = years.max()
yearnone = 2010
yearslice = np.where((yearsp <= yearmax) & (yearsp >= yearmin) & \
(yearsp != yearnone))[0]
### Retrieve data
data = Dataset(directory + filename)
latp = data.variables['lat'][:]
lonp = data.variables['lon'][:]
thk_p = data.variables['newthickness'][yearslice,month,:,:]
data.close()
print 'Completed: PIOMAS data read (%s)!' % datemo
return latp,lonp,thk_p
示例13: mooring_2dvar
def mooring_2dvar(ncfile, level):
"""Standard EcoFOCI Mooring .nc files with two dimensional parameters as a function of time
(such as ein - echo intensity
Timestep of data is assumed to be in fractions of a day"""
###nc readin/out
nchandle = Dataset(ncfile,'r')
params = ['time', 'time2', 'depth','latitude', 'longitude', 'AGC1_1221']
time = nchandle.variables[params[0]][:]
time2 = nchandle.variables[params[1]][:]
lat = nchandle.variables[params[3]][:]
lon = nchandle.variables[params[4]][:]
depth = nchandle.variables[params[2]][:]
ncdata = nchandle.variables[params[5]][:,:,0,0]
nchandle.close()
dt = 1. #data is hourly
time_base = 'hours'
pytime = util.EPICdate2udunits(time, time2)
xx = ncdata[:,level]
dt = 24. * (1. / pytime['interval_min']) #data is 4 times daily
print dt
time = pytime['timeint']
variance = np.var(xx)
#normalize
print 'Variance = %s ' % (variance)
x = (xx - np.mean(xx)) / np.sqrt(variance)
variance = np.var(x)
return (ncdata, x,dt,np.array(time) * 24., variance, time_base, depth)
示例14: set_basic_md
def set_basic_md(resource):
"""
basis meta data
:param resource: netCDF file where basic meta data should be set
"""
import sys
from datetime import datetime as dt
py_version = sys.version
creation_date = dt.strftime( dt.now(), format='%Y-%m-%dT%H:%M:%S')
md_basic = {
'activity': 'birdhouse project',
'software':'flyingpigeon v 0.1',
'software_project': 'birdhouse',
'software_reference':'https://github.com/bird-house/',
'software_platform': 'PYTHON %s' % py_version,
'contact_mail_1':'[email protected]',
'contact_mail_2':'[email protected]',
'creation_date': creation_date ,
}
ds = Dataset(resource, mode='a')
ds.setncatts(md_basic)
ds.close()
return(resource)
示例15: ReadFile
def ReadFile(self):
'''
读取TCCON数据,此数据一个站点一个文件,当站点目录下多个文件只保留一个结果
'''
if self.FileList == []:
return
for file in self.FileList:
ncFile = Dataset(file, 'r', format='NETCDF3_CLASSIC') # 'NCETCDF4'
ncTime = ncFile.variables['time'][:]
xco2_ppm = ncFile.variables['xco2_ppm'][:]
ncFile.close()
# print file
for i in xrange(len(ncTime)):
seconds = ncTime[i] * 24 * 60 * 60
strTime = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(seconds))
self.FileLine.append([strTime, xco2_ppm[i]])
# print self.FileLine[0], self.FileLine[-1]
title = ['time', 'xco2']
dtype = ['S19'] + ['f4']
ary = np.core.records.fromarrays(np.array(self.FileLine).transpose(),
names=','.join(title),
formats=','.join(dtype))
condition = np.logical_and(ary['xco2'] > 0, ary['xco2'] < 600)
self.FileData = ary[np.where(condition)]