本文整理汇总了Python中netCDF4.Dataset.ncattrs方法的典型用法代码示例。如果您正苦于以下问题:Python Dataset.ncattrs方法的具体用法?Python Dataset.ncattrs怎么用?Python Dataset.ncattrs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类netCDF4.Dataset
的用法示例。
在下文中一共展示了Dataset.ncattrs方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: ode_littlerock
# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import ncattrs [as 别名]
def ode_littlerock():
filename = 'littlerock.nc'
print 'reading file: %s\n' %(filename)
nc_file = Dataset(filename)
var_names = nc_file.variables.keys()
print nc_file.ncattrs()
print nc_file.units
print nc_file.col_names
sound_var = nc_file.variables[var_names[3]]
press = sound_var[:,0]
height = sound_var[:,1]
temp = sound_var[:,2]
dewpoint = sound_var[:,3]
#height must have unique values
newHeight= nudge(height)
#Tenv and TdEnv interpolators return temp. in deg C, given height in m
#Press interpolator returns pressure in hPa given height in m
interpTenv = lambda zVals: np.interp(zVals, newHeight, temp)
interpTdEnv = lambda zVals: np.interp(zVals, newHeight, dewpoint)
interpPress = lambda zVals: np.interp(zVals, newHeight, press)
p900_level = np.where(abs(900 - press) < 2.)
p800_level = np.where(abs(800 - press) < 7.)
thetaeVal=thetaep(dewpoint[p900_level] + c.Tc,temp[p900_level] + c.Tc,press[p900_level]*100.)
height_800=height[p800_level]
yinit = [0.5, height_800] #(intial velocity = 0.5 m/s, initial height in m)
tinit = 0
tfin = 2500
dt = 10
#want to integrate F using ode45 (from MATLAB) equivalent integrator
r = ode(F).set_integrator('dopri5')
r.set_f_params(thetaeVal, interpTenv, interpTdEnv, interpPress)
r.set_initial_value(yinit, tinit)
y = np.array(yinit)
t = np.array(tinit)
#stop integration when the parcel changes direction, or time runs out
while r.successful() and r.t < tfin and r.y[0] > 0:
#find y at the next time step
#(r.integrate(t) updates the fields r.y and r.t so that r.y = F(t) and r.t = t
#where F is the function being integrated)
r.integrate(r.t+dt)
#keep track of y at each time step
y = np.vstack((y, r.y))
t = np.vstack((t, r.t))
wvel = y[:,0]
height = y[:,1]
plt.figure(1)
plt.plot(wvel, height)
plt.xlabel('vertical velocity (m/s)')
plt.ylabel('height about surface (m)')
plt.show()
示例2: attribCDF
# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import ncattrs [as 别名]
def attribCDF(pathDirFile):
import numpy as np
from netCDF4 import Dataset
print('File: ', pathDirFile)
origFile= Dataset(pathDirFile, 'a', format='NETCDF4')
# copy root group attributes
print "Dataset attributes"
for att in origFile.ncattrs():
print(':::GlobalAtt:', att,' Val:', getattr(origFile,att))
# creates root group dimensions
print "Dataset dimensions"
for key in origFile.dimensions:
val = len(origFile.dimensions[key])
print (':::GlobalDim:', key, ' Val:', val)
print('')
# copy variables, variable dimensions, and attributes
print "Dataset variables"
# print origFile.variables
for var in origFile.variables:
myVar = origFile.variables.get(var)
print 'Variable : ', var
print 10*' ','size : ', myVar.size
#print 10*' ', myVar.maskandscale
print 10*' ','type : ', myVar.datatype
# print 10*' ', myVar.fill_value
print 10*' ','number dims : ', myVar.ndim
print 10*' ','dimension : ', myVar.dimensions
print 10*' ','shape : ', myVar.shape
if var == 'time' or var == 'yearday':
print myVar[:]
for attr in myVar.ncattrs():
print 20*' ', attr,' : ', myVar.getncattr(attr)
if var == 'yearday' and attr == 'valid_range':
#myVar.setncattr(attr, [1, 366])
print myVar.getncattr(attr)
# myVar.setncattr('valid_range', [1])
# if var == origFile.variables.keys()[0]:
# print('Variable: ', var)
# print(origFile.variables[var])
# else:
# print('Variable: ', var)
# print(origFile.variables[var])
# print(origFile.variables[var].dtype)
#print(origFile.variables[var].maskandscale)
#print('M&S:', var.maskandscale)
#copies variable attributes
# for att in origFile.variables[var].ncattrs():
# print(':::VarAtt:', att,' Val:', getattr(origFile.variables[var],att))
# copy var dimensions
# for i in range(len(origFile.variables[var].dimensions)):
# if origFile.variables[var].dimensions[i] =='time':
# outDim =
# print(':::VarDim',origFile.variables[var].dimensions[i])
print('')
# for key in origFile.variables:
# print key
origFile.close()
示例3: wrf_copy_attributes
# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import ncattrs [as 别名]
def wrf_copy_attributes( infilename, outfilename, nlevs ):
'''
Copies the netCDF attributes of one file into another file
that is created by this function. This is so that information
like the model start date, dx, and namelist options are data
attributes in post-processed netCDF files. This will assume that
the grid domain in both files is the same, and will use the west-
east and south-north values from the input file.
Parameters
----------
infilename: The name/path of the input file to be read
outfilename: The name/path of the output file to be written
nlevs: The number of vertical levels that the output file should have
'''
## open the files
infile = Dataset( infilename )
outfile = Dataset( outfilename, 'w', format='NETCDF4' )
## create dimensions
level = outfile.createDimension( 'bottom_top', nlevs )
time = outfile.createDimension( 'time', None )
lon = outfile.createDimension( 'south_north', infile.getncattr('SOUTH-NORTH_PATCH_END_UNSTAG') )
lat = outfile.createDimension( 'west_east', infile.getncattr('WEST-EAST_PATCH_END_UNSTAG') )
## copy the global attributes to the new file
inattrs = infile.ncattrs()
for attr in inattrs:
outfile.setncattr( attr, infile.getncattr( attr ) )
## close both files
infile.close()
outfile.close()
示例4: load_everything
# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import ncattrs [as 别名]
def load_everything(fname, basket_dest=''):
try:
fname = expand_path(fname)
ext = get_ext(fname)
if ext == '.npy':
return np.load(fname), "npy"
elif ext in ('.nc', '.ncf'):
data = Dataset(fname, mode='r')
if 'lidarname' in data.ncattrs():
return LidarDataset(fname), "LidarDataset"
else:
return data, "netcdf"
elif ext in ('.h5', '.h5f', '.hdf', '.hdf5'):
return pd.HDFStore(fname, mode='r'), "pd.HDFStore"
elif ext in ('.csv'):
return pd.DataFrame.from_csv(fname), "pd.DataFrame"
elif ext in ('.zip'):
if basket_dest:
globald[basket_dest] = dict()
varnames = loadbasket(fname, dest=globald[basket_dest])
else:
varnames = loadbasket(fname)
return varnames, "basket"
elif ext in ('.pickle', '.pic'):
return loadpickle(fname), "pickle"
elif ext in ('.txt'):
return np.loadtxt(fname), "txt"
except Exception as e:
warner.write("Error while loading : %s \n" % fname)
warner.write(e)
warner.write('\n')
示例5: read_netcdf
# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import ncattrs [as 别名]
def read_netcdf(nc_file, variables=None, coords=None):
"""
Read data from input netCDF. Will read all variables if none provided.
Will also return all variable attributes.
Both variables (data and attributes) are returned as dictionaries named
by variable
"""
f = Dataset(nc_file, 'r')
if not variables:
variables = f.variables.keys()
if not coords:
coords = slice(None)
log.debug('Reading input data variables: %s, from file: %s', variables,
nc_file)
d = {}
a = {}
g = {}
for var in variables:
d[var] = f.variables[var][coords]
a[var] = f.variables[var].__dict__
for attr in f.ncattrs():
g[attr] = getattr(f, attr)
f.close()
return d, a, g
示例6: read_one_file
# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import ncattrs [as 别名]
def read_one_file(self, fname, **kwargs):
"""read one file for initing self"""
self.init_clean()
f = Dataset(fname, **kwargs)
for d in f.dimensions:
self.dims[str(d)] = len(f.dimensions[d])
if 'TIME' not in self.dims or self.dims['TIME'] == 0:
raise RuntimeError("%s contains no data" % fname)
for v in f.variables:
strv = str(v)
ncv = f.variables[v]
v_arr = ncv[:]
# datetime handling
if v == 'datetime':
datetime_type = type(v_arr[0])
if datetime_type in (np.string_ , str, unicode):
v_arr = np.array([datetime.strptime(datestr, _std_datetime_fmt) for datestr in v_arr])
else:
v_arr = num2date(v_arr, units=_std_datetime_units)
self.vars[strv] = v_arr
self.var_dims[strv] = tuple([str(dimname) for dimname in ncv.dimensions])
# aver_method
if 'aver_method' in ncv.ncattrs():
self.var_aver_methods[strv] = str(ncv.getncattr('aver_method'))
elif v in _special_avermethods:
self.var_aver_methods[strv] = _special_avermethods[v]
else:
self.var_aver_methods[strv] = 'mean'
for a in f.ncattrs():
self.attrs[str(a)] = f.getncattr(a)
self.recheck_time()
f.close()
示例7: netcdf_to_parameter
# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import ncattrs [as 别名]
def netcdf_to_parameter(inputFileName, outputFileName, event_emitter=None, **kwargs):
fileHandle = Dataset(inputFileName, 'r')
temporaryFileHandle = open(outputFileName, 'w')
# global attributes
attributes = fileHandle.ncattrs()
for attribute in attributes:
if attribute == 'title':
attributeValue = repr(str(fileHandle.getncattr(attribute))).replace("'", "")
temporaryFileHandle.write(attributeValue+'\n')
if attribute == 'version':
attributeValue = repr(str(fileHandle.getncattr(attribute))).replace("'", "")
temporaryFileHandle.write(attributeValue+'\n')
if attribute == 'number_of_hrus':
numberOfHruCells = int(repr(str(fileHandle.getncattr(attribute))).replace("'", ""))
# dimensions
dim = find_dimensions(fileHandle)
dimensionNames = dim[0]
dimensionValues = dim[1]
temporaryFileHandle.write('** Dimensions **\n')
for index in range(len(dimensionNames)):
temporaryFileHandle.write('####\n'+dimensionNames[index]+'\n'+str(dimensionValues[index])+'\n')
# variables from file
varFromFile = find_variables_from_file(fileHandle)
variableNamesFromFile = varFromFile[0]
variableDimensionsFromFile = varFromFile[1]
variableTypesFromFile = varFromFile[2]
# variables
var = find_variables(variableNamesFromFile, variableDimensionsFromFile, variableTypesFromFile)
variableNames = var[0]
variableDimensions = var[1]
variableTypes = var[2]
numberOfParameterValues = find_number_of_parameter_values(variableDimensions, dimensionNames, dimensionValues)
countOfDimensions = find_count_of_dimensions(variableDimensions)
sizeOfLatitudeVariable = find_size_of_latitude_variable(fileHandle)
kwargs['event_name'] = 'nc_to_parameter'
kwargs['event_description'] = 'creating input parameter file from netcdf file'
kwargs['progress_value'] = 0.00
if event_emitter:
event_emitter.emit('progress',**kwargs)
write_variable_data_to_file(temporaryFileHandle, fileHandle, variableNames, \
variableDimensions, countOfDimensions, sizeOfLatitudeVariable, \
numberOfParameterValues, variableTypes, numberOfHruCells, event_emitter=event_emitter, **kwargs)
kwargs['event_name'] = 'nc_to_parameter'
kwargs['event_description'] = 'creating input parameter file from output netcdf file'
kwargs['progress_value'] = 100
if event_emitter:
event_emitter.emit('progress',**kwargs)
示例8: getInputDataFileStartYear
# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import ncattrs [as 别名]
def getInputDataFileStartYear(inputNetCDF_File):
rootGrp = Dataset(inputNetCDF_File, 'r', format='NETCDF3_CLASSIC')
dataYear = None
for gAttribute in rootGrp.ncattrs():
if(gAttribute == 'start_year'):
dataYear = getattr(rootGrp,gAttribute)
return dataYear
return dataYear
示例9: main
# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import ncattrs [as 别名]
def main(args):
dsin = Dataset(args.infile)
dsout = Dataset(args.outfile, 'w')
# Copy global attributes
for att in dsin.ncattrs():
dsout.setncattr(att, dsin.getncattr(att))
# Determine variables to copy
if args.variable:
if not set(args.variable).issubset(set(dsin.variables.keys())):
raise AssertionError('Specified variables are not available in the input file')
vars_to_copy = set(args.variable)
# Vars as exclusion list?
if args.exclude:
vars_to_copy = set(dsin.variables.keys()).difference(vars_to_copy)
else:
vars_to_copy = dsin.variables.keys()
# Determine dimensions to copy
dims_to_copy = set()
for v in vars_to_copy:
dims_to_copy = dims_to_copy.union(set(dsin.variables[v].dimensions))
# Add associate dimvars (Assumes dimvars have same name as dimension)
if not all([x in dsin.variables.keys() for x in dims_to_copy]):
raise AssertionError('Not all dimenions being copied have associated dimension variables')
print 'Copying variables: {}'.format(vars_to_copy)
print 'Copying dimensions: {}'.format(dims_to_copy)
# Copy Dimensions
for dname, dim in dsin.dimensions.items():
if dname not in dims_to_copy:
continue
print dname, len(dim)
dsout.createDimension(dname, len(dim) if not dim.isunlimited() else None)
# Copy Variables
for v_name, varin in dsin.variables.items():
if v_name not in vars_to_copy:
continue
outVar = dsout.createVariable(v_name, varin.datatype, varin.dimensions)
print v_name, varin.datatype, varin.dimensions, varin.shape, len(varin.shape)
# Copy all variable attributes
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
# Itteratively write variables with 3+ dimensions
if len(varin.shape) > 2:
count = float(varin.shape[0])
for i in range(varin.shape[0]):
if args.progress:
sys.stdout.write("\r{:.2%}".format(i/count))
outVar[i,:,:] = varin[i,:,:]
else:
outVar[:] = varin[:]
示例10: get_global_attributes
# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import ncattrs [as 别名]
def get_global_attributes(self):
attributes = {}
if self.patch_files:
patch_files = sorted(glob(join(self.path, "{0}/{0}_".format(self.wrf_filename) + "[0-9]" * 5)))
wrf_data = Dataset(patch_files[0])
else:
wrf_data = Dataset(join(self.path, self.wrf_filename))
for attr in wrf_data.ncattrs():
attributes[attr] = getattr(wrf_data, attr)
wrf_data.close()
return attributes
示例11: process
# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import ncattrs [as 别名]
def process(input, output, direction, collapse):
"""Process the file 'input', expanding or collapsing data according to
'action' and 'direction'. Saves result in 'output'."""
try:
nc = CDF(input)
except:
print "ERROR: Can't open %s" % input
exit(1)
try:
out = CDF(output, 'w', format="NETCDF3_CLASSIC")
except:
print "ERROR: Can't open %s" % output
exit(1)
copy_attributes(nc, out)
for name in nc.dimensions.keys():
copy_dim(nc, out, name, direction)
if collapse:
for name in nc.variables.keys():
if name == direction:
continue
collapse_var(nc, out, name, direction)
message = "Collapsed using flowline.py"
else:
out.createDimension(direction, 3)
if direction == 'x':
dim = 'y'
else:
dim = 'x'
var1 = nc.variables[dim]
delta = np.diff(var1[:])[0]
var2 = out.createVariable(direction, 'f8', (direction,))
var2.axis = "%s" % direction.upper()
var2.long_name = "%s-coordinate in Cartesian system" % direction.upper()
var2.standard_name = "projection_%s_coordinate" % direction
var2.units = var1.units
var2[:] = [-delta, 0, delta]
for name in nc.variables.keys():
expand_var(nc, out, name, direction)
message = asctime() + ': ' + ' '.join(argv) + '\n'
if 'history' in out.ncattrs():
out.history = message + out.history # prepend to history string
else:
out.history = message
out.close()
示例12: extract_row_column_hru_information
# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import ncattrs [as 别名]
def extract_row_column_hru_information(parameterFile):
fileHandle = Dataset(parameterFile, 'r')
attributes = fileHandle.ncattrs()
for attribute in attributes:
if attribute == 'number_of_hrus':
numberOfHruCells = int(repr(str(fileHandle.getncattr(attribute))).replace("'", ""))
if attribute == 'number_of_rows':
numberOfRows = int(repr(str(fileHandle.getncattr(attribute))).replace("'", ""))
if attribute == 'number_of_columns':
numberOfColumns = int(repr(str(fileHandle.getncattr(attribute))).replace("'", ""))
return numberOfHruCells, numberOfRows, numberOfColumns
示例13: nc_to_dataframe
# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import ncattrs [as 别名]
def nc_to_dataframe(nc_fname,
columns=slice(None)):
"""
Return a pandas data frame containing the information in the
netCDF file *nc_fname*. Return it and a mapping to the header
metadata. Use *columns* to select columns (via a list of column
names).
"""
root = Dataset(nc_fname)
data = {}
data.update({dim: root[dim][:] for dim in root.dimensions if dim != 'time'})
index = data['time'] = map(fromJ2000, root['time'][:])
return (PD.DataFrame(data=data, index=index)[columns],
{x: getattr(root, x) for x in root.ncattrs()})
示例14: copy_gridded_ioapi_to_empty
# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import ncattrs [as 别名]
def copy_gridded_ioapi_to_empty(input_path, output_path):
'''Copy a GRIDDED IOAPI NetCDF file (NETCDF3_CLASSIC)
to an empty NetCDF file that has the same structure,
but the variables are empty of data.'''
# open input NetCDF file
fin = Dataset(input_path, 'r', format='NETCDF3_CLASSIC')
# read variables
fin_var = {}
for var in fin.variables:
fin_var[var] = [var, fin.variables[var].units, fin.variables[var].var_desc]
# read attributes
fin_attr = {}
for attr in fin.ncattrs():
fin_attr[attr] = getattr(fin, attr)
# open output NetCDF file
fout = Dataset(output_path, 'w', format='NETCDF3_CLASSIC')
# create the 6 GRIDDED IOAPI dimensions
TSTEP = fout.createDimension('TSTEP', None)
DATE_TIME = fout.createDimension('DATE-TIME', 2)
LAY = fout.createDimension('LAY', fin.NLAYS)
VAR = fout.createDimension('VAR', fin.NVARS)
ROW = fout.createDimension('ROW', fin.NROWS)
COL = fout.createDimension('COL', fin.NCOLS)
# close input file
fin.close()
# variable and attribute definitions
TFLAG = fout.createVariable('TFLAG', 'i4', ('TSTEP', 'VAR', 'DATE-TIME'))
TFLAG.units = '<YYYYDDD,HHMMSS>'
TFLAG.long_name = 'TFLAG'
TFLAG.var_desc = 'Timestep-valid flags: (1) YYYYDDD or (2) HHMMSS'
# remaining variables and attribute definitions
for key in fin_var:
species = key
if species == 'TFLAG':
continue
fout.createVariable(species, 'f4', ('TSTEP', 'LAY', 'ROW', 'COL'))
fout.variables[species].long_name = species
fout.variables[species].units = fin_var[species][1]
fout.variables[species].var_desc = fin_var[species][2]
# global attributes
for name in fin_attr:
setattr(fout, name, fin_attr[name])
示例15: _parse
# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import ncattrs [as 别名]
def _parse(self, filepath):
filepath = os.path.abspath(filepath)
logger.debug("parse %s", filepath)
try:
ds = NCDataset(filepath, 'r')
# loop over global attributes
for attname in ds.ncattrs():
attvalue = getattr(ds, attname)
if 'date' in attname.lower():
# must format dates in Solr format, if possible
try:
solr_dt = dateparser.parse(attvalue)
self._add_attribute(attname, solr_dt.strftime('%Y-%m-%dT%H:%M:%SZ') )
except:
pass # disregard this attribute
else:
self._add_attribute(attname, attvalue)
# loop over dimensions
for key, dim in ds.dimensions.items():
self._add_attribute('dimension', "%s:%s" % (key, len(dim)) )
# loop over variable attributes
for key, variable in ds.variables.items():
if key.lower() in ds.dimensions:
# skip dimension variables
continue
if '_bnds' in key.lower():
continue
if key.lower() in SPATIAL_VARIABLES:
continue
self._add_attribute('variable', key)
self._add_attribute('variable_long_name', getattr(variable, 'long_name', None) )
cf_standard_name = getattr(variable, 'standard_name', None)
if cf_standard_name is not None:
self._add_attribute('cf_standard_name', getattr(variable, 'standard_name', None) )
self._add_attribute('units', getattr(variable, 'units', None) )
except Exception as e:
logging.error(e)
finally:
try:
ds.close()
except:
pass