本文整理汇总了Python中netCDF4.Dataset方法的典型用法代码示例。如果您正苦于以下问题:Python netCDF4.Dataset方法的具体用法?Python netCDF4.Dataset怎么用?Python netCDF4.Dataset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类netCDF4
的用法示例。
在下文中一共展示了netCDF4.Dataset方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: dataValidation
# 需要导入模块: import netCDF4 [as 别名]
# 或者: from netCDF4 import Dataset [as 别名]
def dataValidation(self, in_nc, messages):
"""Check the necessary dimensions and variables in the input netcdf data"""
data_nc = NET.Dataset(in_nc)
dims = data_nc.dimensions.keys()
globalattrs = data_nc.__dict__.keys()
for each in self.dimensions:
if each not in dims:
messages.addErrorMessage(self.errorMessages[1].format(each))
raise arcpy.ExecuteError
for each in self.globalattributes:
if each not in globalattrs:
messages.addErrorMessage(self.errorMessages[2].format(each))
raise arcpy.ExecuteError
data_nc.close()
return
示例2: dataValidation
# 需要导入模块: import netCDF4 [as 别名]
# 或者: from netCDF4 import Dataset [as 别名]
def dataValidation(self, in_nc, messages):
"""Check the necessary dimensions and variables in the input netcdf data"""
data_nc = NET.Dataset(in_nc)
vars = data_nc.variables.keys()
for each in self.vars_oi:
if each not in vars:
messages.addErrorMessage(self.errorMessages[3].format(each))
raise arcpy.ExecuteError
else:
dims = data_nc.variables[each].dimensions
if self.dims_var != dims:
messages.addErrorMessage(self.errorMessages[4].format(each))
raise arcpy.ExecuteError
data_nc.close()
return
示例3: updateMessages
# 需要导入模块: import netCDF4 [as 别名]
# 或者: from netCDF4 import Dataset [as 别名]
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
if parameters[0].altered:
in_nc = parameters[0].valueAsText
try:
data_nc = NET.Dataset(in_nc)
data_nc.close()
except Exception as e:
parameters[0].setErrorMessage(e.message)
if parameters[1].altered:
(dirnm, basenm) = os.path.split(parameters[1].valueAsText)
if not basenm.endswith(".csv"):
parameters[1].setErrorMessage("The weight table must be in CSV format")
return
示例4: createUniqueIDTable
# 需要导入模块: import netCDF4 [as 别名]
# 或者: from netCDF4 import Dataset [as 别名]
def createUniqueIDTable(self, in_nc, out_table):
"""Create a table of unique stream IDs"""
data_nc = NET.Dataset(in_nc)
comid_arr = data_nc.variables[self.vars_oi[0]][:]
comid_size = len(comid_arr)
comid_arr = comid_arr.reshape(comid_size, 1)
arcpy.AddMessage(comid_arr.transpose())
arcpy.AddMessage(self.vars_oi[0])
#convert to numpy structured array
str_arr = NUM.core.records.fromarrays(comid_arr.transpose(), NUM.dtype([(self.vars_oi[0], NUM.int32)]))
# numpy structured array to table
arcpy.da.NumPyArrayToTable(str_arr, out_table)
data_nc.close()
return
示例5: __init__
# 需要导入模块: import netCDF4 [as 别名]
# 或者: from netCDF4 import Dataset [as 别名]
def __init__(self, **kwargs):
"""Initialise a Dataset object.
All keyword arguments will be translated into attributes.
Does not take positional arguments.
Note that if you create a dataset with a name that already exists,
the existing object is returned, but __init__ is still called
(Python does this, see
https://docs.python.org/3.7/reference/datamodel.html#object.__new__).
"""
self.mandatory_fields = set()
for (k, v) in kwargs.items():
setattr(self, k, v)
self.setlocal()
if self.my_pseudo_fields is None:
self.my_pseudo_fields = collections.OrderedDict()
示例6: _apply_limits_and_filters
# 需要导入模块: import netCDF4 [as 别名]
# 或者: from netCDF4 import Dataset [as 别名]
def _apply_limits_and_filters(self, cont, limits, simple_filters):
if isinstance(cont, xarray.Dataset):
if len(limits)>0:
raise NotImplementedError(
"limits not implemented on xarray datasets")
oldsize = cont[self.time_field].size
for f in simple_filters:
cont = f(cont)
logger.debug("Filters reduced number from "
"{:d} to {:d}".format(oldsize, cont[self.time_field].size))
return cont
oldsize = cont.size
cont = tpmath.array.limit_ndarray(cont, limits)
for f in simple_filters:
cont = f(cont)
if cont.size < oldsize:
logger.debug("Applying limitations, reducing "
"{:d} to {:d}".format(oldsize, cont.size))
return cont
示例7: _add_cont_to_arr
# 需要导入模块: import netCDF4 [as 别名]
# 或者: from netCDF4 import Dataset [as 别名]
def _add_cont_to_arr(self, arr, N, cont):
"""Changes arr in-situ, does not return"""
if isinstance(cont, xarray.Dataset):
# we should already know it's large enough
# for arr[self.time_field] I start at N
# for the other time coordinates at the relative "speed" they
# are behind N
# but this is not guaranteed to be regular so I would need to
# keep trac of each individually, or inspect it on-the-fly
# this approximation may be good enough for pre-allocation
# (which is approximate anyway), when actually storing we need
# to do a better job… for each time coordinate, check when it
# “dies”
raise NotImplementedError("This is not used for xarrays. "
"But see comment in source-code for some thoughts.")
else:
arr[N:(N+cont.size)] = cont
#arr = self._finalise_arr(arr, N)
示例8: get_additional_field
# 需要导入模块: import netCDF4 [as 别名]
# 或者: from netCDF4 import Dataset [as 别名]
def get_additional_field(self, M, fld):
"""Get additional field.
Get field from other dataset, original objects, or otherwise.
To be implemented by subclass implementations.
Exact fields depend on subclass.
Arguments:
M (ndarray): ndarray with existing data
A (masked) array with a dtype such as returned from
`self.read <Dataset.read>`.
fld (str): Additional field to read from original data
Returns:
ndarray with fields of M + fld.
"""
raise NotImplementedError("Must be implemented by child-class")
示例9: read
# 需要导入模块: import netCDF4 [as 别名]
# 或者: from netCDF4 import Dataset [as 别名]
def read(self, file_info, fields=None, **kwargs):
"""Read a CSV file and return an xarray.Dataset with its content
Args:
file_info: Path and name of the file as string or FileInfo object.
fields: Field that you want to extract from the file. If not given,
all fields are going to be extracted.
**kwargs: Additional keyword arguments for the pandas function
`pandas.read_csv`. See for more details:
https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html
Returns:
A xarray.Dataset object.
"""
data = pd.read_csv(file_info.path, **kwargs).to_xarray()
if fields is None:
return data
else:
return data[fields]
示例10: __init__
# 需要导入模块: import netCDF4 [as 别名]
# 或者: from netCDF4 import Dataset [as 别名]
def __init__(self, filename):
self.data = netCDF4.Dataset(filename)
# data variables and dimensions
variables = set(self.data.variables.keys())
dimensions = set(self.data.dimensions.keys())
self.keys = tuple(variables - dimensions)
# size of lat/lon dimensions
self.lat_size = self.data.dimensions['latitude'].size
self.lon_size = self.data.dimensions['longitude'].size
# spatial resolution in degrees
self.delta_lat = -180.0 / (self.lat_size - 1) # from north to south
self.delta_lon = 360.0 / self.lon_size # from west to east
# time resolution in hours
self.time_size = self.data.dimensions['time'].size
self.start_time = self.data['time'][0]
self.stop_time = self.data['time'][-1]
self.time_range = self.stop_time - self.start_time
self.delta_time = self.time_range / (self.time_size - 1)
示例11: __init__
# 需要导入模块: import netCDF4 [as 别名]
# 或者: from netCDF4 import Dataset [as 别名]
def __init__(self, filename, mode='w'):
"""Create NetCDF storage layer, creating or appending to an existing file.
Parameters
----------
filename : str
Name of storage file to bind to.
mode : str, optional, default='w'
File open mode, 'w' for (over)write, 'a' for append.
"""
self._filename = filename
self._ncfile = netcdf.Dataset(self._filename, mode=mode)
self._envname = None
self._modname = None
# Create standard dimensions.
if 'iterations' not in self._ncfile.dimensions:
self._ncfile.createDimension('iterations', size=None)
if 'spatial' not in self._ncfile.dimensions:
self._ncfile.createDimension('spatial', size=3)
示例12: nc_var_has_attr_vals
# 需要导入模块: import netCDF4 [as 别名]
# 或者: from netCDF4 import Dataset [as 别名]
def nc_var_has_attr_vals(ds, var_name, att_dict):
"""
Checks that the variable, var_name, as the attributes (and values)
in the att_dict.
"""
if not isinstance(ds, netCDF4.Dataset):
ds = netCDF4.Dataset(ds)
for key, val in att_dict.items():
try:
if val != getattr(ds.variables[var_name], key):
return False
except AttributeError:
return False
return True
示例13: load_grid
# 需要导入模块: import netCDF4 [as 别名]
# 或者: from netCDF4 import Dataset [as 别名]
def load_grid(nc):
"""
Get a SGrid object from a netCDF4.Dataset or file/URL.
:param str or netCDF4.Dataset nc: a netCDF4 Dataset or URL/filepath
to the netCDF file
:return: SGrid object
:rtype: sgrid.SGrid
"""
if isinstance(nc, Dataset):
pass
else:
nc = Dataset(nc, 'r')
return SGrid.load_grid(nc)
示例14: get_dataset
# 需要导入模块: import netCDF4 [as 别名]
# 或者: from netCDF4 import Dataset [as 别名]
def get_dataset(ncfile, dataset=None):
"""
Utility to create a netCDF4 Dataset from a filename, list of filenames,
or just pass it through if it's already a netCDF4.Dataset
if dataset is not None, it should be a valid netCDF4 Dataset object,
and it will simply be returned
"""
if dataset is not None:
return dataset
if isinstance(ncfile, nc4.Dataset):
return ncfile
elif isinstance(ncfile, Iterable) and len(ncfile) == 1:
return nc4.Dataset(ncfile[0])
elif isstring(ncfile):
return nc4.Dataset(ncfile)
else:
return nc4.MFDataset(ncfile)
示例15: get_writable_dataset
# 需要导入模块: import netCDF4 [as 别名]
# 或者: from netCDF4 import Dataset [as 别名]
def get_writable_dataset(ncfile, format="netcdf4"):
"""
Utility to create a writable netCDF4 Dataset from a filename, list of filenames,
or just pass it through if it's already a netCDF4.Dataset
if dataset is not None, it should be a valid netCDF4 Dataset object,
and it will simply be returned
"""
if isinstance(ncfile, nc4.Dataset):
# fixme: check for writable
return ncfile
elif isstring(ncfile): # Fixme: should be pathlike...
print("filename is:", ncfile)
return nc4.Dataset(ncfile,
mode="w",
clobber=True,
format="NETCDF4")
else:
raise ValueError("Must be a string path or a netcdf4 Dataset")