本文整理汇总了Python中netCDF4.Dataset.version方法的典型用法代码示例。如果您正苦于以下问题:Python Dataset.version方法的具体用法?Python Dataset.version怎么用?Python Dataset.version使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类netCDF4.Dataset
的用法示例。
在下文中一共展示了Dataset.version方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_netcdf
# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import version [as 别名]
def create_netcdf(
a_netcdf_filename, a_lat_points, a_lon_points, a_nb_levels, a_celerity_arr, a_u_arr, a_v_arr, a_time, a_loc_names
):
"""
dimensions:
altitude = 401;
profile = 1 ;
variables:
float altitude(altitude) ;
altitude:long_name = "height above mean sea level" ;
altitude:units = "km" ;
altitude:positive = "up" ;
double time(profile);
time:long_name = "time" ;
time:units = "days since 1970-01-01 00:00:00" ;
string loc_name(profile) ;
loc_name:units = "-" ;
loc_name:long_name = "Location name" ;
float lon(profile);
lon:long_name = "longitude" ;
lon:units = "degrees_east" ;
float lat(profile);
lat:long_name = "latitude" ;
lat:units = "degrees_north" ;
float celerity(profile, altitude) ;
celerity:long_name = "celerity" ;
celerity:units = "m s**-1" ;
celerity:coordinates = "time lon lat altitude" ;
float u(profile, altitude) ;
u:long_name = "U velocity" ;
celerity:units = "m s**-1" ;
celerity:coordinates = "time lon lat altitude" ;
float v(profile, altitude) ;
u:long_name = "V velocity" ;
celerity:units = "m s**-1" ;
celerity:coordinates = "time lon lat altitude" ;
attributes:
:CF\:featureType = "profile";
"""
print ("In create_netcdf %s" % (a_netcdf_filename))
conf = Conf.get_instance()
netcdf_format = conf.get("NETCDF", "produced_format", "NETCDF3_CLASSIC")
# create file
dataset = Dataset(a_netcdf_filename, "w", format=netcdf_format)
# create dimension
dataset.createDimension("altitude", a_nb_levels)
dataset.createDimension("profile", len(a_lat_points))
loc_name_len = dataset.createDimension("loc_name_len", 5)
# create basic variables
the_time = dataset.createVariable("time", "f8", ("profile"))
lat = dataset.createVariable("latitude", "f4", ("profile"))
lon = dataset.createVariable("longitude", "f4", ("profile"))
altitudes = dataset.createVariable("altitude", "f4", ("altitude"))
# create loc_name
# In netcdf4 it would be
# loc_names = dataset.createVariable('loc_name', str,('profile'))
if netcdf_format == "NETCDF3_CLASSIC":
loc_names = dataset.createVariable("loc_name", "c", ("profile", "loc_name_len"))
else:
loc_names = dataset.createVariable("loc_name", str, ("profile"))
# create param variables
# u and v wind components
u = dataset.createVariable("u", "f4", ("profile", "altitude"))
v = dataset.createVariable("v", "f4", ("profile", "altitude"))
# celerity
c = dataset.createVariable("c", "f4", ("profile", "altitude"))
# dataset.sync()
# add attributes
dataset.description = "CTBTO Infrasound wind profiles"
dataset.history = "Created " + time.ctime(time.time()) + " by infra-profile-generator-v1.2.2"
dataset.source = "infra-profile-generator-v1.2.2"
dataset.version = "infrasound profile v1.0-20090801"
# dataset.station = 'IS42'
lat.units = "degrees north"
lat.long_name = "Latitude"
lon.units = "degrees east"
lon.long_name = "Longitude"
altitudes.units = "m"
altitudes.long_name = "Altitude"
loc_names.units = "-"
#.........这里部分代码省略.........
开发者ID:pombredanne,项目名称:java-balivernes,代码行数:103,代码来源:old_run_g2s_and_create_trajectory_profiles_netcdf.py
示例2: create_netcdf
# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import version [as 别名]
def create_netcdf(a_netcdf_filename, a_lat_points, a_lon_points, a_nb_levels, a_celerity_arr, a_u_arr, a_v_arr, a_time, a_loc_names):
"""
dimensions:
altitude = 401;
profile = 1 ;
variables:
float altitude(altitude) ;
altitude:long_name = "height above mean sea level" ;
altitude:units = "km" ;
altitude:positive = "up" ;
double time(profile);
time:long_name = "time" ;
time:units = "days since 1970-01-01 00:00:00" ;
string loc_name(profile) ;
loc_name:units = "-" ;
loc_name:long_name = "Location name" ;
float lon(profile);
lon:long_name = "longitude" ;
lon:units = "degrees_east" ;
float lat(profile);
lat:long_name = "latitude" ;
lat:units = "degrees_north" ;
float celerity(profile, altitude) ;
celerity:long_name = "celerity" ;
celerity:units = "m s**-1" ;
celerity:coordinates = "time lon lat altitude" ;
float u(profile, altitude) ;
u:long_name = "U velocity" ;
u:units = "m s**-1" ;
u:coordinates = "time lon lat altitude" ;
float v(profile, altitude) ;
u:long_name = "V velocity" ;
v:units = "m s**-1" ;
v:coordinates = "time lon lat altitude" ;
attributes:
:CF\:featureType = "profile";
"""
print("In create_netcdf %s" %(a_netcdf_filename))
conf = Conf.get_instance()
netcdf_format = conf.get('NETCDF', 'produced_format', 'NETCDF3_CLASSIC')
#create file
dataset = Dataset(a_netcdf_filename, 'w', format=netcdf_format)
#create dimension
dataset.createDimension('altitude', a_nb_levels)
dataset.createDimension('profile', len(a_lat_points))
loc_name_len = dataset.createDimension('loc_name_len', 5)
#create basic variables
the_time = dataset.createVariable('time', 'f8', ('profile'))
lat = dataset.createVariable('latitude', 'f4', ('profile'))
lon = dataset.createVariable('longitude', 'f4', ('profile'))
altitudes = dataset.createVariable('altitude', 'f4', ('altitude'))
# create loc_name
# In netcdf4 it would be
#loc_names = dataset.createVariable('loc_name', str,('profile'))
if netcdf_format == 'NETCDF3_CLASSIC':
loc_names = dataset.createVariable('loc_name', 'c', ('profile','loc_name_len') )
else:
loc_names = dataset.createVariable('loc_name', str, ('profile') )
# create param variables
# u and v wind components
u = dataset.createVariable('u', 'f4', ('profile', 'altitude'))
v = dataset.createVariable('v', 'f4', ('profile', 'altitude'))
# celerity
c = dataset.createVariable('c', 'f4', ('profile','altitude'))
#dataset.sync()
# add attributes
dataset.description = 'CTBTO Infrasound wind profiles'
dataset.history = 'Created ' + time.ctime(time.time()) + ' by infra-profile-generator-v1.2.2'
dataset.source = 'infra-profile-generator-v1.2.2'
dataset.version = 'infrasound profile v1.0-20090801'
#dataset.station = 'IS42'
lat.units = 'degrees north'
lat.long_name = 'Latitude'
lon.units = 'degrees east'
lon.long_name = 'Longitude'
altitudes.units = 'm'
altitudes.long_name = 'Altitude'
loc_names.units = '-'
loc_names.long_name = 'Location name'
#.........这里部分代码省略.........
示例3: usage
# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import version [as 别名]
usage(sys.argv[0])
if opt[0]=="-o":
overwrite=1
if opt[0]=="-g":
doget=1
if overwrite and doget:
print "Unable overwrite the file while getting it"
print "Make a better selection of command line options"
sys.exit(1)
# If we must overwrite the reference file, we need to
# create it, first, otherwise, just open it for reading
refname="reference.cdf"
if overwrite:
nc=Dataset(refname,"w")
nc.version=sys.version
nc.platform=sys.platform
if 'byteorder' in dir(sys):
nc.byteorder=sys.byteorder
else:
nc.byteorder="Unknown, Python older than 2.0??"
else:
# If there is no local copy of the file, get it from
# its URL
if os.access(refname,os.F_OK)==0 or doget:
thedir="http://starship.python.net/crew/jsaenz/pyclimate/"
thedir=thedir+"references/"
theurl=thedir+pyclimate.tools.pyclimateversion()+"/"+refname
print "There is no local copy of:",refname
print "Do you want me to get it from"
print theurl,"?[no]/yes"
示例4: Funceme
# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import version [as 别名]
foo.createDimension('time', None)
foo.createDimension('latitude', pcp.shape[1])
foo.createDimension('longitude', pcp.shape[2])
foo.institution = 'Climate Hazards Group. University of California at Santa Barbara'
foo.creator_name = 'Pete Peterson'
foo.history = 'created by Climate Hazards Group - Modified by Funceme (NetCDF3 - South America)'
foo.title = 'CHIRPS Version 2.0'
foo.creator_email = '[email protected].edu'
foo.documentation = 'http://pubs.usgs.gov/ds/832/'
foo.comments = 'time variable denotes the first day of the given month.'
foo.ftp_url = 'ftp://chg-ftpout.geog.ucsb.edu/pub/org/chg/products/CHIRPS-latest/'
foo.website = 'http://chg.geog.ucsb.edu/data/chirps/index.html'
foo.faq = 'http://chg-wiki.geog.ucsb.edu/wiki/CHIRPS_FAQ'
foo.version = 'Version 2.0'
foo.date_created = '2015-12-02'
lats = foo.createVariable('latitude', 'f4', ('latitude'))
lats.units = 'degrees_north'
lats.long_name = 'latitude'
lats.axis = "Y"
lats[:] = lat[:]
lons = foo.createVariable('longitude', 'f4', ('longitude'))
lons.units = 'degrees_east'
lons.long_name = 'longitude'
lons.axis = "X"
lons[:] = lon[:]
times = foo.createVariable('time', 'f4', ('time'))
示例5: write_exodus_file
# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import version [as 别名]
def write_exodus_file(filename, cells, vertices, shape="SHELL4"):
"""
Write Exodus-II file compatible with CUBIT.
cells is a 0-based array (ncells, ncorners).
vertices is (nvertices, dim).
All cells are placed in a single block.
Requires netCDF4 module.
"""
import numpy
from netCDF4 import Dataset
len_string = 33
root = Dataset(filename, 'w', format='NETCDF3_CLASSIC')
# Set global attributes
root.api_version = 4.98
root.version = 4.98
root.floating_point_word_size = 8
root.file_size = 0
root.title = "cubit"
# Setup dimensions
# Generic information
root.createDimension('len_string', len_string)
root.createDimension('len_line', 81)
root.createDimension('four', 4)
root.createDimension('num_qa_rec', 1)
root.createDimension('time_step', None)
# Mesh specific information
(ncells, ncorners) = cells.shape
(nvertices, dim) = vertices.shape
root.createDimension('num_dim', dim)
root.createDimension('num_el_blk', 1)
root.createDimension('num_nod_per_el1', ncorners)
root.createDimension('num_att_in_blk1', 1)
root.createDimension('num_nodes', nvertices)
root.createDimension('num_elem', ncells)
root.createDimension('num_el_in_blk1', ncells)
# Setup variables
connect1 = root.createVariable('connect1', numpy.int32,
('num_el_in_blk1', 'num_nod_per_el1',))
coord = root.createVariable('coord', numpy.float64,
('num_dim', 'num_nodes',))
time_whole = root.createVariable('time_whole', numpy.float64,
('time_step',))
coor_names = root.createVariable('coor_names', 'S1',
('num_dim', 'len_string',))
qa_records = root.createVariable('qa_records', 'S1',
('num_qa_rec', 'four', 'len_string',))
eb_names = root.createVariable('eb_names', 'S1',
('num_el_blk', 'len_string',))
elem_map = root.createVariable('elem_map', numpy.int32,
('num_elem',))
eb_status = root.createVariable('eb_status', numpy.int32,
('num_el_blk',))
eb_prop1 = root.createVariable('eb_prop1', numpy.int32,
('num_el_blk',))
attrib1 = root.createVariable('attrib1', numpy.float64,
('num_el_in_blk1', 'num_att_in_blk1',))
# Set variable values
connect1[:] = 1+cells[:]
connect1.elem_type = shape
coord[:] = vertices.transpose()[:]
from netCDF4 import stringtoarr
if dim == 2:
coor_names[0,:] = stringtoarr("x", len_string)
coor_names[1,:] = stringtoarr("y", len_string)
elif dim == 3:
coor_names[0,:] = stringtoarr("x", len_string)
coor_names[1,:] = stringtoarr("y", len_string)
coor_names[2,:] = stringtoarr("z", len_string)
qa_records[0,0,:] = stringtoarr("CUBIT", len_string)
qa_records[0,1,:] = stringtoarr("11.0", len_string)
qa_records[0,2,:] = stringtoarr("01/01/2000", len_string)
qa_records[0,3,:] = stringtoarr("12:00:00", len_string)
elem_map[:] = numpy.arange(1, ncells+1, dtype=numpy.int32)[:]
#.........这里部分代码省略.........
示例6: ConvertNCCF
# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import version [as 别名]
def ConvertNCCF(TheFileIn,TheFileOut,TheTimes,TheDaysArray,TheCLats,TheCLongs,TheClimPeriod,TheMissing,TheType):
''' Discover what is in the file '''
''' Open and read in all bits '''
''' Write out in cf compliant style '''
ncf=Dataset(TheFileIn,'r')
nc_dims = list(ncf.dimensions) # list of dimensions [dim for dim in ncf.dimensions]
nc_vars = list(ncf.variables) # list of nc variables [var for var in ncf.variables]
nc_attrs = ncf.ncattrs() # list of global attributes
ndims=len(nc_dims)
nvars=len(nc_vars)
ngatts=len(nc_attrs)
# Get all global attributes
TheGAtts=np.empty(ngatts,dtype=object) # an empty array with the right number of string elements
for (noo,att) in enumerate(nc_attrs): # enumerate and use elements of the list
TheGAtts[noo]=ncf.getncattr(att) # get each global attribute and populate array
# Get all dimensions
TheDims=np.empty(ndims) # an empty array with the right number of string elements
for (noo,dim) in enumerate(nc_dims): # enumerate and use elements of the list
TheDims[noo]=len(ncf.dimensions[dim]) # get length of each dimension
# NO DIMENSION ATTRIBUTES -
# TheDimAttrNames=[[] for i in xrange(ndims)] # create list of lists - one for the attribute names of each dimension
# TheDimAttrs=[[] for i in xrange(ndims)] # create list of lists - one for the attributes of each dimension
# for (noo,dim) in enumerate(nc_dims): # enumerate and use elements of the list
# TheDimAttrNames[noo]=ncf.dimensions[dim].ncattrs() # fill names
# for (nee,nats) in enumerate(TheDimAttrNames[noo]): # loop through each name and get the attribute
# TheDimAttrs[noo][nee]=f.dimensions[dim].getncattr(nats)
# Get all variables, and their attributes
TheVarAttrNames=[[] for i in xrange(nvars)] # create list of lists - one for the attribute names of each dimension
TheVarAttrs=[[] for i in xrange(nvars)] # create list of lists - one for the attributes of each dimension
TheVars=[[] for i in xrange(nvars)] # create list of lists - one for the attributes of each dimension
for (noo,var) in enumerate(nc_vars): # enumerate and use elements of the list
TheVarAttrNames[noo]=ncf.variables[var].ncattrs() # fill names
for (nee,nats) in enumerate(TheVarAttrNames[noo]): # loop through each name and get the attribute
TheVarAttrs[noo].append(ncf.variables[var].getncattr(nats))
TheVars[noo]=ncf.variables[nc_vars[noo]][:]
# Now write out, checking if the standard stuff is not there, and if not, then add in
ncfw=Dataset(TheFileOut,'w',format='NETCDF3_CLASSIC')
# Set up the global attributes
# Is there a description?
moo=np.where(np.array(nc_attrs) == 'description')
if (moo[0] >= 0):
ncfw.description=TheGAtts[moo[0]]
else:
ncfw.description="HadISDH monthly mean land surface "+TheType+" climate monitoring product from 1973 onwards. Quality control, homogenisation, uncertainty estimation, averaging over gridboxes (no smoothing or interpolation)."
# Is there a title?
moo=np.where(np.array(nc_attrs) == 'title')
if (moo[0] >= 0):
ncfw.title=TheGAtts[moo[0]]
else:
ncfw.title="HadISDH monthly mean land surface "+TheType+" climate monitoring product from 1973 onwards."
# Is there an institution?
moo=np.where(np.array(nc_attrs) == 'institution')
if (moo[0] >= 0):
ncfw.institution=TheGAtts[moo[0]]
else:
ncfw.institution="Met Office Hadley Centre (UK), National Climatic Data Centre (USA), Climatic Research Unit (UK), National Physical Laboratory (UK), Bjerknes Centre for Climate Research (Norway)"
# Is there a history?
moo=np.where(np.array(nc_attrs) == 'history')
if (moo[0] >= 0):
ncfw.history=TheGAtts[moo[0]]
else:
ncfw.history="Updated 4 February 2014"
# Is there a source?
moo=np.where(np.array(nc_attrs) == 'source')
if (moo[0] >= 0):
ncfw.source=TheGAtts[moo[0]]
else:
ncfw.source="HadISD.1.0.2.2013f (Dunn et al., 2012)"
# Is there a comment?
moo=np.where(np.array(nc_attrs) == 'comment')
if (moo[0] >= 0):
ncfw.comment=TheGAtts[moo[0]]
else:
ncfw.comment=""
# Is there a reference?
moo=np.where(np.array(nc_attrs) == 'reference')
if (moo[0] >= 0):
ncfw.reference=TheGAtts[moo[0]]
else:
ncfw.reference="Willett, K. M., Dunn, R. J. H., Thorne, P. W., Bell, S., de Podesta, M., Parker, D. E., Jones, P. D., and Williams Jr., C. N.: HadISDH land surface multi-variable humidity and temperature record for climate monitoring, Clim. Past, 10, 1983-2006, doi:10.5194/cp-10-1983-2014, 2014."
# Is there a version?
moo=np.where(np.array(nc_attrs) == 'version')
if (moo[0] >= 0):
ncfw.version=TheGAtts[moo[0]]
else:
ncfw.version="HadISDH.2.0.0.2013p"
# Is there a Conventions?
moo=np.where(np.array(nc_attrs) == 'Conventions')
if (moo[0] >= 0):
ncfw.Conventions=TheGAtts[moo[0]]
else:
ncfw.Conventions="CF-1.0"
#.........这里部分代码省略.........