当前位置: 首页>>代码示例>>Python>>正文


Python xarray.open_mfdataset函数代码示例

本文整理汇总了Python中xarray.open_mfdataset函数的典型用法代码示例。如果您正苦于以下问题:Python open_mfdataset函数的具体用法?Python open_mfdataset怎么用?Python open_mfdataset使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了open_mfdataset函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_subset_variables

    def test_subset_variables(self):
        fileName = str(self.datadir.join('example_jan.nc'))
        timestr = ['xtime_start', 'xtime_end']
        varList = ['time_avg_avgValueWithinOceanRegion_avgSurfaceTemperature']

        # first, test loading the whole data set and then calling
        # subset_variables explicitly
        ds = xr.open_mfdataset(
            fileName,
            preprocess=lambda x: mpas_xarray.preprocess_mpas(x,
                                                             timestr=timestr,
                                                             yearoffset=1850))
        ds = mpas_xarray.subset_variables(ds, varList)
        self.assertEqual(sorted(ds.data_vars.keys()), sorted(varList))
        self.assertEqual(pd.Timestamp(ds.Time.values[0]),
                         pd.Timestamp('1855-01-16 12:22:30'))

        # next, test the same with the onlyvars argument
        ds = xr.open_mfdataset(
            fileName,
            preprocess=lambda x: mpas_xarray.preprocess_mpas(x,
                                                             timestr=timestr,
                                                             onlyvars=varList,
                                                             yearoffset=1850))
        self.assertEqual(ds.data_vars.keys(), varList)
开发者ID:toddringler,项目名称:MPAS-Analysis,代码行数:25,代码来源:test_mpas_xarray.py

示例2: scaleVSpower

def scaleVSpower():

    power = xr.open_mfdataset('/users/global/cornkle/data/OBS/modis_LST/modis_netcdf/power_maps/' \
                           'lsta_daily_power*.nc')


    scale = xr.open_mfdataset('/users/global/cornkle/data/OBS/modis_LST/modis_netcdf/scale_maps/' \
                           'lsta_daily_scale*.nc')


    scales = np.unique(scale['LSTA'].values[0:300,:,:])
    scales = scales[np.isfinite(scales)]

    power_arr = power['LSTA'][0:300]
    scale_arr = scale['LSTA'][0:300]

    mlist = []

    for s in scales:
        print('Doing '+str(s))
        mean = np.nanmean(power_arr.where(scale_arr.values == s).values)
        mlist.append(mean)


    f= plt.figure()

    plt.scatter(scales,mlist)
开发者ID:cornkle,项目名称:proj_CEH,代码行数:27,代码来源:surfaceScales_Powerdistribution.py

示例3: file_loop

def file_loop():

    lsta = xr.open_mfdataset('/users/global/cornkle/data/OBS/modis_LST/modis_netcdf/scale_maps/' \
                           'lsta_daily_scale_*.nc')


    lsta_check = xr.open_mfdataset('/users/global/cornkle/data/OBS/modis_LST/modis_netcdf/' \
                           'lsta_daily_*.nc')

    lsta_check = lsta_check.sel(lat=slice(lsta['lat'].values.min(),lsta['lat'].values.max()), lon=slice(lsta['lon'].values.min(),lsta['lon'].values.max()))


    lsta_checks = lsta_check['LSTA'].where(lsta_check['LSTA']>-800)
    lsta_checks = lsta_checks.where(lsta.time==lsta_checks.time)

    bins = np.arange(-20,20,2)
    f=plt.figure()
    plt.hist(lsta_checks.values[np.isfinite(lsta_checks.values)], bins=bins, edgecolor='k')

    bins = np.arange(-140, 141, 10)

    ll = []

    for i, b in enumerate(bins[0:-1]):

        b1 = bins[i+1]

        lmean = np.percentile(lsta_checks.where((lsta['LSTA'].values>=b) &  (lsta['LSTA'].values<b1)), 90)

        ll.append(lmean)

    pdb.set_trace()
    f = plt.figure()
    plt.scatter(bins[1::], ll)
开发者ID:cornkle,项目名称:proj_CEH,代码行数:34,代码来源:surfaceScales_distribution.py

示例4: main

def main (era_filesearch, cesm_base_filesearch, bias_output):

    print("opening data")
    era_data         = xr.open_mfdataset(era_filesearch,         concat_dim='time')
    base_cesm_data   = xr.open_mfdataset(cesm_base_filesearch,   concat_dim='time')

    print("loading data")
    era_data.load()
    base_cesm_data.load()

    print("compute means")
    emean = era_data.std(dim="time")
    cmean = base_cesm_data.std(dim="time")

    print("creating data")
    interpolated_era = xr.zeros_like(cmean)
    print("loading data")
    interpolated_era.load()

    z_interp_all_vars(emean, interpolated_era, era_data["z"].mean(dim="time"), base_cesm_data["z"].mean(dim="time"), vars_to_correct)
    interpolated_era.to_netcdf("era_interpolated_std.nc")

    print("Computing Bias")
    bias = interpolated_era - cmean

    print("writing")
    bias.to_netcdf(bias_output)
开发者ID:gutmann,项目名称:scripted_sufferin_succotash,代码行数:27,代码来源:correct_forcing.py

示例5: test_deterministic_names

 def test_deterministic_names(self):
     with create_tmp_file() as tmp:
         data = create_test_data()
         data.to_netcdf(tmp)
         with open_mfdataset(tmp) as ds:
             original_names = dict((k, v.data.name) for k, v in ds.items())
         with open_mfdataset(tmp) as ds:
             repeat_names = dict((k, v.data.name) for k, v in ds.items())
         for var_name, dask_name in original_names.items():
             self.assertIn(var_name, dask_name)
             self.assertIn(tmp, dask_name)
         self.assertEqual(original_names, repeat_names)
开发者ID:ashang,项目名称:xarray,代码行数:12,代码来源:test_backends.py

示例6: read_nc_files

def read_nc_files(dir, bounds=None):
    def rmheight(d):
        #del d["height"]
        return d

    files = get_reanalysis_file_paths(dir)
    if len(files) > 1:
        data = xarray.open_mfdataset(files, preprocess=lambda d: assert_bounds(d, bounds))
    elif len(files) == 1:
        data = xarray.open_mfdataset(files, preprocess=lambda d: assert_bounds(d, bounds))
    else:
        raise IOError("There are no .nc files in that directory.")
    return data
开发者ID:tjvandal,项目名称:pydownscale,代码行数:13,代码来源:data.py

示例7: test_lock

 def test_lock(self):
     original = Dataset({'foo': ('x', np.random.randn(10))})
     with create_tmp_file() as tmp:
         original.to_netcdf(tmp, format='NETCDF3_CLASSIC')
         with open_dataset(tmp, chunks=10) as ds:
             task = ds.foo.data.dask[ds.foo.data.name, 0]
             self.assertIsInstance(task[-1], type(Lock()))
         with open_mfdataset(tmp) as ds:
             task = ds.foo.data.dask[ds.foo.data.name, 0]
             self.assertIsInstance(task[-1], type(Lock()))
         with open_mfdataset(tmp, engine='scipy') as ds:
             task = ds.foo.data.dask[ds.foo.data.name, 0]
             self.assertNotIsInstance(task[-1], type(Lock()))
开发者ID:ashang,项目名称:xarray,代码行数:13,代码来源:test_backends.py

示例8: test_open_and_do_math

 def test_open_and_do_math(self):
     original = Dataset({'foo': ('x', np.random.randn(10))})
     with create_tmp_file() as tmp:
         original.to_netcdf(tmp)
         with open_mfdataset(tmp) as ds:
             actual = 1.0 * ds
             self.assertDatasetAllClose(original, actual)
开发者ID:ashang,项目名称:xarray,代码行数:7,代码来源:test_backends.py

示例9: retrieve

    def retrieve(path, isel='all', lazy=True):
        path = Path(path)
        try:
            data = open_dataset(path / "data.nc")
            lazy = True
        except FileNotFoundError:
            data = open_mfdataset(path / "data*.nc",
                                  concat_dim="t").sortby("t")
        try:
            with open(Path(path) / 'metadata.yml', 'r') as yaml_file:
                metadata = yaml.load(yaml_file)
        except FileNotFoundError:
            # Ensure retro-compatibility with older version
            with open(path.glob("Treant.*.json")[0]) as f:
                metadata = json.load(f)["categories"]

        if isel == 'last':
            data = data.isel(t=-1)
        elif isel == 'all':
            pass
        elif isinstance(isel, dict):
            data = data.isel(**isel)
        else:
            data = data.isel(t=isel)

        if not lazy:
            return FieldsData(data=data.load(),
                              metadata=AttrDict(**metadata))

        return FieldsData(data=data,
                          metadata=AttrDict(**metadata))
开发者ID:celliern,项目名称:triflow,代码行数:31,代码来源:container.py

示例10: test_variable_map

    def test_variable_map(self):
        fileName = str(self.datadir.join('example_jan.nc'))
        varMap = {
            'avgSurfaceTemperature':
                ['time_avg_avgValueWithinOceanRegion_avgSurfaceTemperature',
                 'other_string',
                 'yet_another_string'],
            'daysSinceStartOfSim':
                ['time_avg_daysSinceStartOfSim',
                 'xtime',
                 'something_else'],
            'avgLayerTemperature':
                ['time_avg_avgValueWithinOceanLayerRegion_avgLayerTemperature',
                 'test1',
                 'test2'],
            'Time': [['xtime_start', 'xtime_end'],
                     'time_avg_daysSinceStartOfSim']}

        varList = ['avgSurfaceTemperature', 'avgLayerTemperature',
                   'refBottomDepth', 'daysSinceStartOfSim']

        # preprocess_mpas will use varMap to map the variable names from their
        # values in the file to the desired values in varList
        ds = xr.open_mfdataset(
            fileName,
            preprocess=lambda x: mpas_xarray.preprocess_mpas(
                x,
                timestr='Time',
                onlyvars=varList,
                yearoffset=1850,
                varmap=varMap))

        # make sure the remapping happened as expected
        self.assertEqual(sorted(ds.data_vars.keys()), sorted(varList))
开发者ID:toddringler,项目名称:MPAS-Analysis,代码行数:34,代码来源:test_mpas_xarray.py

示例11: month_count_concat

def month_count_concat():
    msg_folder = cnst.GRIDSAT
    fname = 'aggs/gridsat_WA_-65_monthly_count_-40base_15-21UTC_1000km2.nc'
    da = xr.open_mfdataset(cnst.GRIDSAT + 'gridsat_WA_-40_1000km2_15-21UTC*_monthSum.nc')

    enc = {'tir': {'complevel': 5, 'zlib': True}}
    da.to_netcdf(msg_folder + fname, encoding=enc)
开发者ID:cornkle,项目名称:proj_CEH,代码行数:7,代码来源:gridsat_postproc.py

示例12: open_cchdo_as_mfdataset

def open_cchdo_as_mfdataset(paths, target_pressure,
                            pressure_coord='pressure',
                            concat_dim='time'):
    """Open cchdo hydrographic data in netCDF format, interpolate to
    specified pressures, and combine as an xarray dataset
    
    Parameters
    ----------
    paths : str or sequence
        Either a string glob in the form "path/to/my/files/*.nc" or an explicit
        list of files to open.
    target_pressure : arraylike
        Target pressure to which all casts are interpolated
    pressure_coord : str
        Name of the coordinate variable for pressure
    concat_dim : str
        Name of the dimension along which to concatenate casts
        
    Returns
    -------
    ds : xarray Dataset
    """
   
    # add time if missing
    timefun = _maybe_add_time_coord
    # create interpolation function for pressure
    interpfun = functools.partial(interp_coordinate,
                interp_coord=pressure_coord, interp_data=target_pressure)
    # create renaming function for concatenation
    renamefun = functools.partial(rename_0d_coords, new_dim=concat_dim)
    # compose together
    ppfun = compose(interpfun, renamefun, timefun)
    #paths = os.path.join(ddir, match_pattern)
    return xr.open_mfdataset(paths, concat_dim=concat_dim, preprocess=ppfun)
开发者ID:rabernat,项目名称:ctd2xray,代码行数:34,代码来源:cchdo.py

示例13: _load_data_from_disk

def _load_data_from_disk(file_set, preprocess_func=lambda ds: ds,
                         data_vars='minimal', coords='minimal',
                         grid_attrs=None, **kwargs):
    """Load a Dataset from a list or glob-string of files.

    Datasets from files are concatenated along time,
    and all grid attributes are renamed to their aospy internal names.

    Parameters
    ----------
    file_set : list or str
        List of paths to files or glob-string
    preprocess_func : function (optional)
        Custom function to call before applying any aospy logic
        to the loaded dataset
    data_vars : str (default 'minimal')
        Mode for concatenating data variables in call to ``xr.open_mfdataset``
    coords : str (default 'minimal')
        Mode for concatenating coordinate variables in call to
        ``xr.open_mfdataset``.
    grid_attrs : dict
        Overriding dictionary of grid attributes mapping aospy internal
        names to names of grid attributes used in a particular model.

    Returns
    -------
    Dataset
    """
    apply_preload_user_commands(file_set)
    func = _preprocess_and_rename_grid_attrs(preprocess_func, grid_attrs,
                                             **kwargs)
    return xr.open_mfdataset(file_set, preprocess=func, concat_dim=TIME_STR,
                             decode_times=False, decode_coords=False,
                             mask_and_scale=True, data_vars=data_vars,
                             coords=coords)
开发者ID:spencerahill,项目名称:aospy,代码行数:35,代码来源:data_loader.py

示例14: saveMonthly18

def saveMonthly18():
    msg_folder = '/users/global/cornkle/data/OBS/gridsat/gridsat_netcdf/z18_panAfrica/'

    da = xr.open_mfdataset(msg_folder+'gridsat_WA_*18UTC.nc')
    da = da.where((da<=-40) & (da>=-110))
    da = da.resample('m', dim='time', how='mean')
    da.to_netcdf(msg_folder+'gridsat_monthly_18UTC.nc')
开发者ID:cornkle,项目名称:proj_CEH,代码行数:7,代码来源:saveGridsat_panAf.py

示例15: data

 def data(self):
     try:
         if self.path:
             return open_mfdataset(self.path / "data*.nc")
         return self._concat_fields(self._cached_data)
     except OSError:
         return
开发者ID:celliern,项目名称:triflow,代码行数:7,代码来源:container.py


注:本文中的xarray.open_mfdataset函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。