当前位置: 首页>>代码示例>>Python>>正文


Python Dataset.setncatts方法代码示例

本文整理汇总了Python中netCDF4.Dataset.setncatts方法的典型用法代码示例。如果您正苦于以下问题:Python Dataset.setncatts方法的具体用法?Python Dataset.setncatts怎么用?Python Dataset.setncatts使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在netCDF4.Dataset的用法示例。


在下文中一共展示了Dataset.setncatts方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: updateNCattrs_single

# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import setncatts [as 别名]
    def updateNCattrs_single(self, ncName):
        """on a single file: run when ONLY nc METADATA needs updating, NOT any data

        :param str ncName: filename of netCDF to be made, with path

        .. note::
            Function uses metaDict variables set in various levels of __init__.

        .. todo::
            - Shouldn't delete global attributes set in ``NCtimeMeta`` :
            time_coverage_duration, date_issued, date_modified, time_coverage_start, time_coverage_end
            - Except SASS object now sets ``metaDict`` in ``createNCshell``. Therefore,
            these global attributes won't work with this function.
            These include: title, date_created, history, geospatial_lat/lon/vertical_min/max,
            institution, comment.
        """
        print ncName
        ncfile = Dataset(ncName, 'a', format='NETCDF4')
        #print ncfile.variables.keys()
        #print ncfile.ncattrs()
        print ncfile.__dict__
        ncfile.setncatts(self.metaDict)
        print "EDITED"
        #print ncfile.__dict__.keys()
        #take out attributes that are no longer in the meta dictionary
        for k in ncfile.__dict__.keys():
            if k not in self.metaDict:
                print 'DELETED', k
                ncfile.delncattr(k)
        self.NCtimeMeta(ncfile)
        print "DONE"
        print ncfile.__dict__#.keys()
        ncfile.close()
开发者ID:sarahheim,项目名称:ncObjects,代码行数:35,代码来源:nc.py

示例2: make_test_file

# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import setncatts [as 别名]
def make_test_file(filename, attributes={}, **variables):
    """Create a netcdf file with the given global and variable
    attributes. Variables are created as dimensionless doubles.

    For example this:

        make_test_file(testfile,
                       {'title':'test file', 'site_code':'NRSMAI'},
                       TEMP = {'standard_name':'sea_water_temperature'},
                       PSAL = {'standard_name':'sea_water_salinity'}
        )

    will create (in cdl):

        netcdf testfile {
        variables:
            double PSAL ;
                    PSAL:standard_name = "sea_water_salinity" ;
            double TEMP ;
                    TEMP:standard_name = "sea_water_temperature" ;

        // global attributes:
                    :site_code = "NRSMAI" ;
                    :title = "test file" ;
        }

    """
    ds = Dataset(filename, 'w')
    ds.setncatts(attributes)
    for name, adict in variables.iteritems():
        var = ds.createVariable(name, float)
        var.setncatts(adict)
    ds.close()
开发者ID:aodn,项目名称:data-services,代码行数:35,代码来源:test_file_classifier.py

示例3: set_basic_md

# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import setncatts [as 别名]
def set_basic_md(resource):
  """
  basis meta data
  :param resource: netCDF file where basic meta data should be set
  """
  import sys
  from datetime import datetime as dt 
  
  py_version = sys.version
  creation_date = dt.strftime( dt.now(), format='%Y-%m-%dT%H:%M:%S')
  
  md_basic = {
     'activity': 'birdhouse project',
     'software':'flyingpigeon v 0.1', 
     'software_project': 'birdhouse',
     'software_reference':'https://github.com/bird-house/',
     'software_platform': 'PYTHON %s' % py_version,
     'contact_mail_1':'[email protected]',
     'contact_mail_2':'[email protected]',
     'creation_date': creation_date ,
     }
  
  ds = Dataset(resource, mode='a')
  ds.setncatts(md_basic)
  ds.close()
  
  return(resource)
开发者ID:KatiRG,项目名称:flyingpigeon,代码行数:29,代码来源:metadata.py

示例4: ncSaveGrid

# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import setncatts [as 别名]

#.........这里部分代码省略.........
                                    'units':
                                    ...} },
                             ...}
    
        The value for the 'dims' key must be a tuple that is a subset of
        the dimensions specified above.
    
    :param float nodata: Value to assign to missing data, default is -9999.
    :param str datatitle: Optional title to give the stored dataset.
    :param gatts: Optional dictionary of global attributes to include in the file.
    :type gatts: `dict` or None
    :param dtype: The data type of the missing value. If not given, infer from other input arguments.
    :type dtype: :class:`numpy.dtype`
    :param bool writedata: If true, then the function will write the provided data
        (passed in via the variables dict) to the file. Otherwise, no data is
        written.
    
    :param bool keepfileopen:  If True, return a netcdf object and keep the file open, so that data
        can be written by the calling program. Otherwise, flush data to disk and close the file.

    :param bool zlib: If true, compresses data in variables using gzip compression.

    :param integer complevel: Value between 1 and 9, describing level of compression desired.
         Ignored if zlib=False.

    :param integer lsd: Variable data will be truncated to this number of significant digits.

    :return: `netCDF4.Dataset` object (if keepfileopen=True)
    :rtype: :class:`netCDF4.Dataset`

    :raises KeyError: If input dimension or variable dicts do not have required keys.
    :raises IOError: If output file cannot be created.
    :raises ValueError: if there is a mismatch between dimensions and shape of values to write.
    
    """

    try:
        ncobj = Dataset(filename, 'w', format='NETCDF4', clobber=True)
    except IOError:
        raise IOError("Cannot open {0} for writing".format(filename))

    # Dict keys required for dimensions and variables
    dimkeys = set(['name', 'values', 'dtype', 'atts'])
    varkeys = set(['name', 'values', 'dtype', 'dims', 'atts'])
    
    dims = ()
    for d in dimensions.itervalues():
        missingkeys = [x for x in dimkeys if x not in d.keys()]
        if len(missingkeys) > 0:
            ncobj.close()
            raise KeyError("Dimension dict missing key '{0}'".
                           format(missingkeys))
        
        ncCreateDim(ncobj, d['name'], d['values'], d['dtype'], d['atts'])
        dims = dims + (d['name'],)

    for v in variables.itervalues():
        missingkeys = [x for x in varkeys if x not in v.keys()]
        if len(missingkeys) > 0:
            ncobj.close()
            raise KeyError("Variable dict missing key '{0}'".
                           format(missingkeys))

        if v['values'] is not None:
            if (len(v['dims']) != v['values'].ndim):
                ncobj.close()
                raise ValueError("Mismatch between shape of "
                                 "variable and dimensions")
        if v.has_key('least_significant_digit'):
            varlsd = v['least_significant_digit']
        else:
            varlsd = lsd

        var = ncobj.createVariable(v['name'], v['dtype'],
                                   v['dims'], 
                                   zlib=zlib,
                                   complevel=complevel,
                                   least_significant_digit=varlsd,
                                   fill_value=nodata)

        if (writedata and v['values'] is not None):
            var[:] = np.array(v['values'], dtype=v['dtype'])
    
        var.setncatts(v['atts'])

    # Additional global attributes:
    gatts['created_on'] = time.strftime(ISO_FORMAT, time.localtime())
    gatts['created_by'] = getpass.getuser()
    gatts['Conventions'] = 'CF-1.6'
       
    ncobj.setncatts(gatts)

    if datatitle:
        ncobj.setncattr('title', datatitle)
        
    if keepfileopen:
        return ncobj
    else:
        ncobj.close()
        return
开发者ID:squireg,项目名称:tcrm,代码行数:104,代码来源:nctools.py

示例5:

# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import setncatts [as 别名]
        # #    print '\thistPre', histPre
        # histAdj = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.strptime(histPre))
        # print '\tnew "history" date:', histAdj
        # dtCrPre = ncfile.getncattr('date_modified')
        # #    print '\tdtCrPre', dtCrPre
        # dtCrAdj = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.strptime(dtCrPre))
        # print '\tnew "date_modified"', dtCrAdj

        # print ncfile.__dict__

        #global attr
        newMeta = {
            "geospatial_vertical_min": 0.9,
            "geospatial_vertical_max": 0.9
        }
        ncfile.setncatts(newMeta)

        # #variable attributes
        # print '\tpre-Standard_name:',ncfile.variables['pCO2_atm'].getncattr('standard_name')
        # ncfile.variables['pCO2_atm'].setncatts(              { 'standard_name': 'surface_partial_pressure_of_carbon_dioxide_in_sea_water' })
        # ncfile.variables['pCO2_atm_flagPrimary'].setncatts(  { 'standard_name': 'surface_partial_pressure_of_carbon_dioxide_in_sea_water' })
        # ncfile.variables['pCO2_atm_flagSecondary'].setncatts({ 'standard_name': 'surface_partial_pressure_of_carbon_dioxide_in_sea_water' })

        #depth variable attribute and value
        ncfile.variables['depth'].setncatts({
            'valid_min': 0.9,
            'valid_max': 0.9
        })
        ncfile.variables['depth'][0] = 0.9
        # ncfile.variables['depth'][0] = self.staMeta['depth']!!!
开发者ID:sarahheim,项目名称:ncObjects,代码行数:32,代码来源:man_editMeta_caf.py

示例6: add_vars_to_grp

# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import setncatts [as 别名]

### Create some attributes

att_dict = {
    'foo'           : 'bar',
    'foo_byte'      : numpy.iinfo(numpy.int8).max,
    'foo_short'     : numpy.iinfo(numpy.int16).max,
    'foo_int'       : numpy.iinfo(numpy.int32).max,
    'foo_float'     : numpy.float32(numpy.pi),
    'foo_long'      : numpy.iinfo(numpy.int64).max,
    'foo_double'    : numpy.float64(numpy.pi),
    'foo funny name': 'fun [email protected]#$%^<>&*()_-+""{}'
}

rootgrp.setncatts(att_dict)
#create some attributes in a group
group3.setncatts(att_dict)


### Create some variables (with atts)

def add_vars_to_grp(grp,types, **kwargs):
    v = grp.createVariable(kwargs.get('var1','var1'),numpy.int8)
    v[:] = numpy.int8(8)
    v.foo = 'bar'
    
    v = grp.createVariable(kwargs.get('var2','var2'),numpy.int8, (dim3._name,), fill_value=5)
    v[:] = numpy.int8(8)
    v.foo = 'bar'
    
开发者ID:benjwadams,项目名称:petulant-bear,代码行数:31,代码来源:create_test_nc_file.py

示例7: _nc3tonc4

# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import setncatts [as 别名]
def _nc3tonc4(filename3,filename4,unpackshort=True,
    zlib=True,complevel=6,shuffle=True,fletcher32=False,
    clobber=False,lsd_dict=None,nchunk=10,quiet=False,classic=0,
    vars=None,istart=0,istop=-1):
    """convert a netcdf 3 file (filename3) to a netcdf 4 file
    The default format is 'NETCDF4', but can be set
    to NETCDF4_CLASSIC if classic=1.
    If unpackshort=True, variables stored as short
    integers with a scale and offset are unpacked to floats.
    in the netcdf 4 file.  If the lsd_dict is not None, variable names
    corresponding to the keys of the dict will be truncated to the decimal place
    specified by the values of the dict.  This improves compression by
    making it 'lossy'..
    If vars is not None, only variable names in the list
    will be copied (plus all the dimension variables).
    The zlib, complevel and shuffle keywords control
    how the compression is done."""

    from netCDF4 import Dataset

    ncfile3 = Dataset(filename3,'r')
    if classic:
        ncfile4 = Dataset(filename4,'w',clobber=clobber,format='NETCDF4_CLASSIC')
    else:
        ncfile4 = Dataset(filename4,'w',clobber=clobber,format='NETCDF4')
    mval = 1.e30 # missing value if unpackshort=True
    # create dimensions. Check for unlimited dim.
    unlimdimname = False
    unlimdim = None
    # create global attributes.
    if not quiet: sys.stdout.write('copying global attributes ..\n')
    #for attname in ncfile3.ncattrs():
    #    setattr(ncfile4,attname,getattr(ncfile3,attname))
    ncfile4.setncatts(ncfile3.__dict__)
    if not quiet: sys.stdout.write('copying dimensions ..\n')
    for dimname,dim in ncfile3.dimensions.items():
        if dim.isunlimited():
            unlimdimname = dimname
            unlimdim = dim
            ncfile4.createDimension(dimname,None)
            if istop == -1: istop=len(unlimdim)
        else:
            ncfile4.createDimension(dimname,len(dim))
    # create variables.
    if vars is None:
       varnames = ncfile3.variables.keys()
    else:
       # variables to copy specified
       varnames = vars
       # add dimension variables
       for dimname in ncfile3.dimensions.keys():
           if dimname in ncfile3.variables.keys() and\
              dimname not in varnames:
               varnames.append(dimname)
    for varname in varnames:
        ncvar = ncfile3.variables[varname]
        if not quiet: sys.stdout.write('copying variable %s\n' % varname)
        # quantize data?
        if lsd_dict is not None and varname in lsd_dict:
            lsd = lsd_dict[varname]
            if not quiet: sys.stdout.write('truncating to least_significant_digit = %d\n'%lsd)
        else:
            lsd = None # no quantization.
        # unpack short integers to floats?
        if unpackshort and hasattr(ncvar,'scale_factor') and hasattr(ncvar,'add_offset'):
            dounpackshort = True
            datatype = 'f4'
        else:
            dounpackshort = False
            datatype = ncvar.dtype
        # is there an unlimited dimension?
        if unlimdimname and unlimdimname in ncvar.dimensions:
            hasunlimdim = True
        else:
            hasunlimdim = False
        if dounpackshort:
            if not quiet: sys.stdout.write('unpacking short integers to floats ...\n')
            sys.stdout.write('')
        # is there missing value?
        if hasattr(ncvar, '_FillValue'):
            fillvalue3 = ncvar._FillValue
        elif hasattr(ncvar, 'missing_value'):
            fillvalue3 = ncvar.missing_value
        else:
            fillvalue3 = None
        if fillvalue3 is not None:
            fillvalue4 = fillvalue3 if not dounpackshort else mval
        else:
            fillvalue4 = None
        var = ncfile4.createVariable(varname,datatype,ncvar.dimensions, fill_value=fillvalue4, least_significant_digit=lsd,zlib=zlib,complevel=complevel,shuffle=shuffle,fletcher32=fletcher32)
        # fill variable attributes.
        attdict = ncvar.__dict__
        if '_FillValue' in attdict: del attdict['_FillValue']
        if dounpackshort and 'add_offset' in attdict:
            del attdict['add_offset']
        if dounpackshort and 'scale_factor' in attdict:
            del attdict['scale_factor']
        if dounpackshort and 'missing_value' in attdict:
            attdict['missing_value'] = fillvalue4
        var.setncatts(attdict)
#.........这里部分代码省略.........
开发者ID:cmarquardt,项目名称:netcdf4-python,代码行数:103,代码来源:utils.py

示例8: range

# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import setncatts [as 别名]
import os, time
from netCDF4 import Dataset

staMeta = {
'stearns_wharf' : {'abbr': 'UCSB'},
'newport_pier' : {'abbr': 'UCI'},
'santa_monica_pier' : {'abbr': 'UCLA'},
'scripps_pier' : {'abbr': 'UCSD'}
}

print staMeta
for i in range(5,17):
    for sta in staMeta:
       fn = sta+'-20'+str(i).zfill(2)+'.nc'
       print fn
       print staMeta[sta]['abbr']
       ncName = os.path.join('./SASS_copy/', fn)
       print os.path.isfile(ncName)
       if os.path.isfile(ncName):
           ncfile = Dataset(ncName, 'a', format='NETCDF4')
           #print ncfile.__dict__
           newMeta = {'contributor_name': staMeta[sta]['abbr']+'/SCCOOS, SCCOOS/IOOS/NOAA, SCCOOS'}
           ncfile.setncatts(newMeta)
           nowStr = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
           ncfile.setncatts({
           "date_modified": nowStr,
           "date_issued": nowStr,
           })
           print 'EDITED'
开发者ID:sarahheim,项目名称:ncObjects,代码行数:31,代码来源:man_editMeta.py

示例9: _post_proc_thredds

# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import setncatts [as 别名]
    def _post_proc_thredds(self, tmp_filename, out_filename, bands, band_metadata, time, global_metadata):
        """Post processing of file for THREDDS (add time variable and metadata)"""
        # open files for input and output
        nc_inp = Dataset(tmp_filename, 'r')
        nc_out = Dataset(out_filename, 'w')

        grid_mapping_name, grid_mapping_var_name = self._create_dimensions(nc_inp, nc_out, time)

        # recreate file
        for inp_var_key in nc_inp.variables.keys():
            inp_var = nc_inp.variables[inp_var_key]
            if 'name' in inp_var.ncattrs():
                inp_var_name = inp_var.getncattr('name')
            else:
                inp_var_name = inp_var.name
            # create projection var
            if inp_var_name == grid_mapping_var_name:
                out_var = Exporter._copy_nc_var(inp_var, nc_out, grid_mapping_name,
                                                inp_var.dtype.str, inp_var.dimensions)
                continue

            # create simple x/y variables
            if inp_var_name in ['x', 'y', 'lon', 'lat']:
                out_var = Exporter._copy_nc_var(inp_var, nc_out, inp_var_name,
                                                '>f4', inp_var.dimensions)
            # create data var
            elif inp_var_name in band_metadata:
                fill_value = None
                if '_FillValue' in inp_var.ncattrs():
                    fill_value = inp_var._FillValue
                if '_FillValue' in band_metadata[inp_var_name]:
                    fill_value = band_metadata['_FillValue']
                dimensions = ('time', ) + inp_var.dimensions
                out_var = Exporter._copy_nc_var(inp_var, nc_out, inp_var_name,
                                                band_metadata[inp_var_name]['type'],
                                                dimensions, fill_value=fill_value)

            # copy array from input data
            data = inp_var[:]

            # copy rounded data from x/y
            if inp_var_name in ['x', 'y']:
                out_var[:] = np.floor(data).astype('>f4')
                # add axis=X or axis=Y
                out_var.axis = {'x': 'X', 'y': 'Y'}[inp_var_name]

            # copy data from lon/lat
            if inp_var_name in ['lon', 'lat']:
                out_var[:] = data.astype('>f4')

            # copy data from variables in the list
            if inp_var_name in band_metadata:
                # add offset and scale attributes
                scale = band_metadata[inp_var_name]['scale']
                offset = band_metadata[inp_var_name]['offset']
                if not (offset == 0.0 and scale == 1.0):
                    out_var.setncattr('add_offset', offset)
                    out_var.setncattr('scale_factor', scale)
                    data = (data - offset) / scale

                out_var[:] = data.astype(band_metadata[inp_var_name]['type'])

                # add custom attributes from input parameter bands
                if inp_var_name in bands:
                    for newAttr in bands[inp_var_name]:
                        if newAttr not in Exporter.UNWANTED_METADATA:
                            out_var.setncattr(newAttr, bands[inp_var_name][newAttr])
                    # add grid_mapping info
                    if grid_mapping_name is not None:
                        out_var.setncattr('grid_mapping', grid_mapping_name)

        # copy (some) global attributes
        for globAttr in nc_inp.ncattrs():
            if not(globAttr.strip().startswith('GDAL')):
                nc_out.setncattr(globAttr, nc_inp.getncattr(globAttr))

        # add common and custom global attributes
        nc_out.setncatts(global_metadata)

        # write output file
        nc_out.close()

        # close original files
        nc_inp.close()

        # Delete the temprary netCDF file
        os.remove(tmp_filename)

        return 0
开发者ID:nansencenter,项目名称:nansat,代码行数:91,代码来源:exporter.py

示例10: nc3tonc4

# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import setncatts [as 别名]
def nc3tonc4(filename3,filename4,unpackshort=True,zlib=True,complevel=6,shuffle=True,fletcher32=False,clobber=False,lsd_dict=None,nchunk=10,quiet=False,classic=0,istart=0,istop=-1):
    """convert a netcdf 3 file (filename3) to a netcdf 4 file
    The default format is 'NETCDF4', but can be set
    to NETCDF4_CLASSIC if classic=1.
    If unpackshort=True, variables stored as short
    integers with a scale and offset are unpacked to floats.
    in the netcdf 4 file.  If the lsd_dict is not None, variable names
    corresponding to the keys of the dict will be truncated to the decimal place
    specified by the values of the dict.  This improves compression by
    making it 'lossy'..
    The zlib, complevel and shuffle keywords control
    how the compression is done."""

    ncfile3 = Dataset(filename3,'r')
    if classic:
        ncfile4 = Dataset(filename4,'w',clobber=clobber,format='NETCDF4_CLASSIC')
    else:
        ncfile4 = Dataset(filename4,'w',clobber=clobber,format='NETCDF4')
    mval = 1.e30 # missing value if unpackshort=True
    # create dimensions. Check for unlimited dim.
    unlimdimname = False
    unlimdim = None
    # create global attributes.
    if not quiet: sys.stdout.write('copying global attributes ..\n')
    #for attname in ncfile3.ncattrs():
    #    setattr(ncfile4,attname,getattr(ncfile3,attname))
    ncfile4.setncatts(ncfile3.__dict__) 
    if not quiet: sys.stdout.write('copying dimensions ..\n')
    for dimname,dim in ncfile3.dimensions.items():
        if dim.isunlimited():
            unlimdimname = dimname
            unlimdim = dim
            ncfile4.createDimension(dimname,None)
            if istop == -1: istop==len(unlimdim)
        else:
            ncfile4.createDimension(dimname,len(dim))
    # create variables.
#    varnamelist=['temp','u','v','zeta']
    varnamelist=['ocean_time','lon_rho','lat_rho','h',
    'zeta','s_rho','s_w','theta_s','theta_b','hc','u','v',
    'lat_v','lat_u','lon_u','lon_v','temp']
    vars = [ncfile3.variables[i] for i in varnamelist]
    myvarlist = zip(varnamelist,vars)
#    for varname,ncvar in ncfile3.variables.items():
    for varname,ncvar in myvarlist:

        if not quiet: sys.stdout.write('copying variable %s\n' % varname)
        # quantize data?
        if lsd_dict is not None and lsd_dict.has_key(varname):
            lsd = lsd_dict[varname]
            if not quiet: sys.stdout.write('truncating to least_significant_digit = %d\n'%lsd)
        else:
            lsd = None # no quantization.
        # unpack short integers to floats?
        if unpackshort and hasattr(ncvar,'scale_factor') and hasattr(ncvar,'add_offset'):
            dounpackshort = True
            datatype = 'f4'
        else:
            dounpackshort = False
            datatype = ncvar.dtype
        # is there an unlimited dimension?
        if unlimdimname and unlimdimname in ncvar.dimensions:
            hasunlimdim = True
        else:
            hasunlimdim = False
        if dounpackshort:
            if not quiet: sys.stdout.write('unpacking short integers to floats ...\n')
            sys.stdout.write('')
        if hasattr(ncvar, '_FillValue'):
            FillValue = ncvar._FillValue
        else:
            FillValue = None 
        var = ncfile4.createVariable(varname,datatype,ncvar.dimensions, fill_value=FillValue, least_significant_digit=lsd,zlib=zlib,complevel=complevel,shuffle=shuffle,fletcher32=fletcher32)
        # fill variable attributes.
        attdict = ncvar.__dict__
        if '_FillValue' in attdict: del attdict['_FillValue']
        if dounpackshort and 'add_offset' in attdict: del attdict['add_offset']
        if dounpackshort and 'scale_factor' in attdict: del attdict['scale_factor']
        if dounpackshort and 'missing_value' in attdict: attdict['missing_value']=mval
        var.setncatts(attdict)
            #for attname in ncvar.ncattrs():
        #    if attname == '_FillValue': continue
        #    if dounpackshort and attname in ['add_offset','scale_factor']: continue
        #    if dounpackshort and attname == 'missing_value':
        #        setattr(var,attname,mval)
        #    else:
        #        setattr(var,attname,getattr(ncvar,attname))
        # fill variables with data.
        hasz = False
        try:
            iz = ncvar.dimensions.index('s_rho')
            hasz = True
            nlev = len(ncfile3.dimensions['s_rho'])
        except:
            print('no s_rho')
            
        try:
            iz = ncvar.dimensions.index('s_w')
            hasz = True
            nlev = len(ncfile3.dimensions['s_w'])
#.........这里部分代码省略.........
开发者ID:rsignell-usgs,项目名称:fvcom_utils,代码行数:103,代码来源:nc3tonc4_lp.py

示例11: save_rain_class

# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import setncatts [as 别名]
	def save_rain_class(self, ruta, ExtraVar = None, ArrayVar1 = None, 
                    ArrayVar2=None):
		gr = Dataset(ruta,'w',format='NETCDF4')
		'Descripcion: Guarda datos de radar procesados\n'\
		'\n'\
		'Parametros\n'\
		'----------\n'\
		'ruta : Ruta donde la cuenca sera guardada.\n'\
		'ExtraVar: Variables extras de simulacion deben ir en un diccionario.\n'\
		'	Forma del diccionario Dict = {"varName": {"Data": vector[ncells], "type": "tipo"}}.\n'\
                'ArrayVar 1 y 2: Variables array en una dim para ser guadadas.\n'\
                '       Forma: DicArray = {"varName": {"Data":vector, "type":"tipo"}}.\n'\
                '	Los tipos de variables son: flotante: "f4", entero "i4".\n'\
		'\n'\
		'Retornos\n'\
		'----------\n'\
		'self : Con las variables iniciadas.\n'\
                #Diccionario de propiedades
		Dict = {'ncols':RadProp[0],
                    'nrows': RadProp[1],
                    'xll': RadProp[2],
                    'yll': RadProp[3],
                    'dx': RadProp[4]}
                #Establece tamano de las variables 
		DimNcol = gr.createDimension('ncols',self.ConvStra.shape[0])
		DimNfil = gr.createDimension('nrows',self.ConvStra.shape[1])
                #variables del arrayVar
                if type(ArrayVar1) is dict:
                    k = ArrayVar1.keys()[0]
                    DimArray = gr.createDimension('narray1',ArrayVar1[k]['Data'].size)
                if type(ArrayVar2) is dict:
                    k = ArrayVar2.keys()[0]
                    DimArray = gr.createDimension('narray2',ArrayVar2[k]['Data'].size)
		#Crea variables
		ClasStruct = gr.createVariable('Conv_Strat','i4',('ncols','nrows'),zlib=True)
		ClasRain = gr.createVariable('Rain', 'i4', ('ncols','nrows'),zlib=True)
		ClasRainHigh = gr.createVariable('Rhigh', 'i4', ('ncols','nrows'),zlib=True)
		ClasRainLow = gr.createVariable('Rlow', 'i4', ('ncols','nrows'),zlib=True)
		#Asigna valores a las variables
		ClasStruct[:] = self.ConvStra
		#Lluvia normal
		ppt = np.copy(self.ppt['media']) * 1000
		ppt = ppt.astype(float)
		ClasRain[:] = ppt
		#Lluvia alta
		ppt = np.copy(self.ppt['alta']) * 1000
		ppt = ppt.astype(float)
                ClasRainHigh[:] = ppt
		#Lluvia baja
		ppt = np.copy(self.ppt['baja']) * 1000
		ppt = ppt.astype(float)
                ClasRainLow[:] = ppt
		#Extra veriables 
                if type(ExtraVar) is dict:
                    for k in ExtraVar.keys():
                        Var = gr.createVariable(k,ExtraVar[k]['type'],('ncols','nrows'),zlib=True)
			Var[:] = ExtraVar[k]['Data']
                #ArrayVar
                if type(ArrayVar1) is dict:
                    for k in ArrayVar1.keys():
                        var = gr.createVariable(k,ArrayVar1[k]['type'],('narray1',),zlib=True)
                        var[:] = ArrayVar1[k]['Data'] 
                if type(ArrayVar2) is dict:
                    for k in ArrayVar2.keys():
                        var = gr.createVariable(k,ArrayVar2[k]['type'],('narray2',),zlib=True)
                        var[:] = ArrayVar2[k]['Data']
                #Cierra el archivo 
		gr.setncatts(Dict)
		gr.close()
开发者ID:nicolas998,项目名称:Radar,代码行数:71,代码来源:radar.py

示例12: createNCshell

# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import setncatts [as 别名]
    def createNCshell(self, ncName, ignore):
        #NOT using: 'pH_aux', 'O2', 'O2sat'
        print "CAF createNCshell"
        ncfile = Dataset(ncName, 'w', format='NETCDF4')
        self.metaDict.update({
            'id':ncName.split('/')[-1], #filename
            'date_created': self.tupToISO(time.gmtime())
        })
        ncfile.setncatts(self.metaDict)
        #Move to NC/SCCOOS class???
        flagPrim_flag_values = bytearray([1, 2, 3, 4, 9]) # 1UB, 2UB, 3UB, 4UB, 9UB ;
        flagPrim_flag_meanings = 'GOOD_DATA UNKNOWN SUSPECT BAD_DATA MISSING'
        flagSec_flag_values = bytearray([0, 1, 2, 3]) # 1UB, 2UB, 3UB, 4UB, 9UB ;
        flagSec_flag_meanings = 'UNSPECIFIED RANGE FLAT_LINE SPIKE'
        dup_varatts = {
            'source':'insitu observations',
            'cell_methods': 'time: point',
            'grid_mapping':'crs',
            'coordinates':'time lat lon depth',
            'platform':'platform1'
        }
        dup_flagatts = {
            'source':'QC results',
            'comment': "Quality Control test are based on IOOS's Quality Control of Real-Time Ocean Data (QARTOD))"
        }

        # Create Dimensions
        # unlimited axis (can be appended to).
        time_dim = ncfile.createDimension('time', None)
#        name_dim = ncfile.createDimension('name_strlen', size=25)

        #Create Variables
        time_var = ncfile.createVariable(
            'time', np.int32, ('time'), zlib=True)  # int64? Gives error
        time_var.setncatts({
            'axis':"T",
            'calendar':'julian',
            'comment':'also known as Epoch or Unix time',
            'long_name':'time',
            'standard_name':'time',
            'units':'seconds since 1970-01-01 00:00:00 UTC'})

        temperature = ncfile.createVariable('temperature', 'f4', ('time'), zlib=True)
        temperature.setncatts({
            'long_name':'sea water temperature',
            'standard_name':'sea_water_temperature',
            'units':'celsius',
            'instrument':'instrument2',
            'coverage_content_type':'physicalMeasurement'})
        temperature.setncatts(self.qc_meta('temperature', self.qc_values['temperature']))
        temperature.setncatts(dup_varatts)
        temperature_flagPrim = ncfile.createVariable(
            'temperature_flagPrimary', 'B', ('time'), zlib=True)
        temperature_flagPrim.setncatts({
            'long_name':'sea water temperature, qc primary flag',
            'standard_name':"sea_water_temperature status_flag",
            'flag_values':flagPrim_flag_values,
            'flag_meanings':flagPrim_flag_meanings})
        temperature_flagPrim.setncatts(dup_flagatts)
        temperature_flagSec = ncfile.createVariable(
            'temperature_flagSecondary', 'B', ('time'), zlib=True)
        temperature_flagSec.setncatts({
            'long_name': 'sea water temperature, qc secondary flag',
            'standard_name':"sea_water_temperature status_flag",
            'flag_values': flagSec_flag_values,
            'flag_meanings': flagSec_flag_meanings})
        temperature_flagSec.setncatts(dup_flagatts)

        salinity = ncfile.createVariable('salinity', 'f4', ('time'), zlib=True)
        salinity.setncatts({
            'standard_name':'sea_water_salinity',
            'long_name':'sea water salinity',
            'units':'psu',
            'instrument':'instrument2',
            'coverage_content_type':'physicalMeasurement'}) #?
        salinity.setncatts(self.qc_meta('salinity', self.qc_values['salinity']))
        salinity.setncatts(dup_varatts)
        salinity_flagPrim = ncfile.createVariable(
            'salinity_flagPrimary', 'B', ('time'), zlib=True)
        salinity_flagPrim.setncatts({
            'long_name':'sea water salinity, qc primary flag',
            'standard_name':"sea_water_practical_salinity status_flag",
            'flag_values':flagPrim_flag_values,
            'flag_meanings':flagPrim_flag_meanings})
        salinity_flagPrim.setncatts(dup_flagatts)
        salinity_flagSec = ncfile.createVariable(
            'salinity_flagSecondary', 'B', ('time'), zlib=True)
        salinity_flagSec.setncatts({
            'long_name':'sea water salinity, qc secondary flag',
            'standard_name':"sea_water_practical_salinity status_flag",
            'flag_values':flagSec_flag_values,
            'flag_meanings':flagSec_flag_meanings})
        salinity_flagSec.setncatts(dup_flagatts)

        pCO2_atm = ncfile.createVariable('pCO2_atm', 'f4', ('time'), zlib=True)
        pCO2_atm.setncatts({
            'standard_name':'surface_partial_pressure_of_carbon_dioxide_in_sea_water', #SUBsurface?
            'long_name':'partial pressure of carbon dioxide',
            'units':'uatm',
            'instrument':'instrument1',
#.........这里部分代码省略.........
开发者ID:sarahheim,项目名称:ncObjects,代码行数:103,代码来源:caf.py

示例13: createNCshell

# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import setncatts [as 别名]
    def createNCshell(self, ncName, lookup):
        sn = lookup['sn']
        # dpmt = lookup['dpmt']
        #NOT using: 'pH_aux', 'O2', 'O2sat'
        print "CAF createNCshell"
        ncfile = Dataset(ncName, 'w', format='NETCDF4')
        self.metaDict.update({
            'id':ncName.split('/')[-1], #filename
            'date_created': self.tupToISO(time.gmtime()),
            "geospatial_vertical_min": self.instrDict[sn][self.dpmt]['m'],
            "geospatial_vertical_max": self.instrDict[sn][self.dpmt]['m']
        })
        ncfile.setncatts(self.metaDict)
        #Move to NC/SCCOOS class???
        flagPrim_flag_values = bytearray([1, 2, 3, 4, 9]) # 1UB, 2UB, 3UB, 4UB, 9UB ;
        flagPrim_flag_meanings = 'GOOD_DATA UNKNOWN SUSPECT BAD_DATA MISSING'
        flagSec_flag_values = bytearray([0, 1, 2, 3]) # 1UB, 2UB, 3UB, 4UB, 9UB ;
        flagSec_flag_meanings = 'UNSPECIFIED RANGE FLAT_LINE SPIKE'
        dup_varatts = {
            'source':'insitu observations',
            'cell_methods': 'time: point longitude: point latitude: point',
            'grid_mapping':'crs',
            'coordinates':'time lat lon depth',
            'platform':'platform1',
            'instrument':'instrument1'
        }
        dup_flagatts = {
            'source':'QC results',
            'comment': "Quality Control test are based on IOOS's Quality Control of Real-Time Ocean Data (QARTOD))"
        }

        # for dI, d in enumerate(self.depArr):
        # for m in self.depArr: #for ~~groups~~
        #     for i in self.instrDict:
        # a depth could change instruments
        # if self.instrDict[sn]['m'] == m:
        ncGrp = str(int(self.instrDict[sn][self.dpmt]['m']))+'m'#+str(self.instrDict[sn]['d'])+'d'
        #instrument variables are in the root group
        inst = ncfile.createVariable('instrument1', 'c')
        inst.setncatts(self.instrDict[sn]['meta'])
        # inst.setncatts({
        #     "comment": "serial number: "+str(sn), #What if this changes???
        #     "geospatial_vertical_min": self.instrDict[sn][self.dpmt]['m'],
        #     "geospatial_vertical_max": self.instrDict[sn][self.dpmt]['m'],
        # })

        # #Create group for each depth/deployment
        # dep = ncfile.createGroup(ncGrp)

        # Create Dimensions
        # unlimited axis (can be appended to).
        time_dim = ncfile.createDimension('time', None)

        #Create Variables
        time_var = ncfile.createVariable(
            'time', np.int32, ('time'), zlib=True)  # int64? Gives error
        time_var.setncatts({
            'axis':"T",
            'calendar':'julian',
            'comment':'also known as Epoch or Unix time',
            'long_name':'time',
            'standard_name':'time',
            'units':'seconds since 1970-01-01 00:00:00 UTC'})

        # test = dep.createVariable(
        #     'test', np.int32, ('time'), zlib=True)  # np.int32

        temperature = ncfile.createVariable('temperature', 'f4', ('time'), zlib=True)
        temperature.setncatts({
            'long_name':'sea water temperature',
            'standard_name':'sea_water_temperature',
            'units':'celsius',
            'instrument':'instrument1'})
        temperature.setncatts(self.qc_meta('temperature', self.instrDict[sn][self.dpmt]['qc']['temperature']))
        temperature.setncatts(dup_varatts)
        temperature_flagPrim = ncfile.createVariable(
            'temperature_flagPrimary', 'B', ('time'), zlib=True)
        temperature_flagPrim.setncatts({
            'long_name':'sea water temperature, qc primary flag',
            'standard_name':"sea_water_temperature status_flag",
            'flag_values':flagPrim_flag_values,
            'flag_meanings':flagPrim_flag_meanings})
        temperature_flagPrim.setncatts(dup_flagatts)
        temperature_flagSec = ncfile.createVariable(
            'temperature_flagSecondary', 'B', ('time'), zlib=True)
        temperature_flagSec.setncatts({
            'long_name': 'sea water temperature, qc secondary flag',
            'standard_name':"sea_water_temperature status_flag",
            'flag_values': flagSec_flag_values,
            'flag_meanings': flagSec_flag_meanings})
        temperature_flagSec.setncatts(dup_flagatts)

        salinity = ncfile.createVariable('salinity', 'f4', ('time'), zlib=True)
        salinity.setncatts({
            'standard_name':'sea_water_salinity',
            'long_name':'sea water salinity',
            'units':'psu',
            'instrument':'instrument1'})
        salinity.setncatts(self.qc_meta('salinity', self.instrDict[sn][self.dpmt]['qc']['salinity']))
        salinity.setncatts(dup_varatts)
#.........这里部分代码省略.........
开发者ID:sarahheim,项目名称:ncObjects,代码行数:103,代码来源:dm_mooring.py

示例14: set_metadata_segetalflora

# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import setncatts [as 别名]
def set_metadata_segetalflora(resource):
  """
  :param resources: imput files 
  """
  # gather the set_metadata
  
  dic_segetalflora = {
    'keywords' : 'Segetalflora', 
    'tier': '2',
    'in_var' : 'tas',
    'description':'Number of European segetalflora species', 
    'method':'regression equation',
    'institution':'Julius Kuehn-Institut (JKI) Federal Research Centre for Cultivated Plants', 
    'institution_url':'www.jki.bund.de',
    'institute_id' : "JKI",
    'contact_mail_3':'[email protected]',
    'version' : '1.0',
     }
  
  dic_climatetype = {
    '1' : 'cold northern species group', 
    '2' : 'warm northern species group',
    '3' : 'moderate warm-toned species group',
    '4' : 'moderate warm-toned to mediterranean species group',
    '5' : 'mediterranean species group',
    '6' : 'climate-indifferent species',
    '7' : 'climate-undefinable species',
    'all' : 'species of all climate types'
      }
  
  try:
    set_basic_md(resource)
  except Exception as e: 
    logger.error(e)
  
  try:
    set_dynamic_md(resource)
  except Exception as e: 
    logger.error(e)
  
  #set the segetalflora specific metadata
  try:
    ds = Dataset(resource, mode='a')
    ds.setncatts(dic_segetalflora)
    ds.close()
  except Exception as e: 
    logger.error(e)
    # set the variable attributes: 
  from flyingpigeon.utils import get_variable
  
  try:
    ds = Dataset(resource, mode='a')
    var = get_variable(resource)
    if 'all' in var: 
      climat_type = 'all'
    else: 
      climat_type = var[-1]

    culture_type = var.strip('sf').strip(climat_type)  
    
    sf = ds.variables[var]
    sf.setncattr('units',1)
    sf.setncattr('standard_name', 'sf%s%s' % (culture_type, climat_type))  
    sf.setncattr('long_name', 'Segetal flora %s land use for %s' % (culture_type, dic_climatetype['%s' % climat_type]))
    ds.close()
  except Exception as e: 
    logger.error('failed to set sf attributes %s ' % e)
  # sort the attributes: 
  try:
    ds = Dataset(resource, mode='a')
    att = ds.ncattrs()
    att.sort()
    for a in att: 
      entry = ds.getncattr(a)
      ds.setncattr(a,entry)
    history = '%s , Segetalflora Impact Model V1.0' % (ds.history) 
    ds.setncattr('history',history)
    ds.close()
  except Exception as e: 
    logger.error('failed to sort attributes %s ' % e)
  
  return resource
开发者ID:KatiRG,项目名称:flyingpigeon,代码行数:84,代码来源:metadata.py

示例15: L2_Process

# 需要导入模块: from netCDF4 import Dataset [as 别名]
# 或者: from netCDF4.Dataset import setncatts [as 别名]

#.........这里部分代码省略.........
        self.nc_dims = self.ncFile.dimensions
        self.nc_dimnames = [dim for dim in self.ncFile.dimensions]

        # Read in Variable names
        self.ncvars = self.ncFile.variables

        # Grab the metadata stored in global attributes as a dictionary
        self.metadata = self.ncFile.__dict__  # Gets entire attribute info
        self.globattr = self.ncFile.ncattrs() # Gets only attribute names

    ############################
    ##  WRITE GLOBAL METHODS  ##
    ############################

    def write_dimensions(self):
        # Write the dimension information
        for dimname in self.ncFile.dimensions:
            self.outfile.createDimension(dimname, len(self.ncFile.dimensions[dimname]))

    def write_glob_attr(self):
        '''Write the global attributes.'''
        self.global_atts = {
                            'description': "UWKA Level 2 Data",
                            'documentation': "http://flights.uwyo.edu/n2uw/users/",
                            'created_UTC': datetime.datetime.utcnow().isoformat(),
                            'coordinates': "longitude latitude altitude time",
                            'latitude_coordinate': "latitude",
                            'longitude_coordinate': "longitude",
                            'zaxis_coordinate': "altitude",
                            'time_coordinate': "time",
                            'wind_field': "wind_dir wind_spd w_wind",
                            }
        for attname in GLOB_ATTS:
            self.global_atts[attname] = str(getattr(self.ncFile, attname))
        self.outfile.setncatts(self.global_atts)

    def add_glob_attr(self, attname, attval):
        '''Add a global attribute from setup file.'''
        self.outfile.setncattr(attname, attval)

    ##########################
    ##  WRITE FILE METHODS  ##
    ##########################       
    def write_time_var(self, tIn, tOut):
        '''Create a time variable for user file'''
        self.outfile.createVariable(tOut, \
                                    self.ncvars[tIn].datatype, \
                                    self.ncvars[tIn].dimensions)
        self.outfile.variables[tOut].long_name = "time of measurement"
        self.outfile.variables[tOut].standard_name = "time"
        self.outfile.variables[tOut].units = self.ncvars['time'].units
        self.outfile.variables[tOut].strptime_format = "seconds since %F %T %z"
        self.outfile.variables[tOut][:] = self.ncvars[tIn][:]

    def write_var(self, VarIn, VarOut):
        '''Create output variable'''
        # Get the attributes associated with a variable
        InputVaratts = self.ncFile.variables[VarIn].ncattrs()

        # Check for _FillValue
        if hasattr(self.ncvars[VarIn], '_FillValue'):
            fillval = self.ncvars[VarIn]._FillValue
        elif hasattr(self.ncvars[VarIn], 'missing_value'):
            fillval = self.ncvars[VarIn].missing_value
        else:
            fillval = None

        self.outfile.createVariable(VarOut, \
                                       self.ncvars[VarIn].datatype, \
                                       self.ncvars[VarIn].dimensions,\
                                       fill_value=fillval)

        # Copy the variable attributes
        self.outfile.variables[VarOut].setncatts({k: self.ncvars[VarIn].getncattr(k) for k in self.ncvars[VarIn].ncattrs()})
        
        # Fill the data values
        self.outfile.variables[VarOut][:] = self.ncvars[VarIn][:]

        # Add the category attribute
#        self.outfile.variables[VarOut].Category = self.CAT

        # Set which attribute list to use
#        if self.CAT == 'Cloud_physics':
#            attribute_list = CLD_PHYS_ATTS

#        else:
#            attribute_list =  STD_ATTS

#        for name in attribute_list:
#            # If the attribute is there then assign
#            if hasattr(self.ncFile.variables[VarIn], name):
#                attval = self.ncFile.variables[VarIn].__getattribute__(name)
#                self.outfile.variables[VarOut].__setattr__(name, attval)
#            else:
#                print("'%s' attribute not found - need to assign"%name)


    def add_attr(self, Var, Attname, Attvalue):
        Var.__setattr__(Attname, Attvalue)
        self.print_msg("   '%s' added"%Attname)
开发者ID:nguy,项目名称:uwkingair,代码行数:104,代码来源:make_uwka_l2_flight.py


注:本文中的netCDF4.Dataset.setncatts方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。