当前位置: 首页>>代码示例>>Python>>正文


Python Dataset.to_netcdf方法代码示例

本文整理汇总了Python中xarray.Dataset.to_netcdf方法的典型用法代码示例。如果您正苦于以下问题:Python Dataset.to_netcdf方法的具体用法?Python Dataset.to_netcdf怎么用?Python Dataset.to_netcdf使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在xarray.Dataset的用法示例。


在下文中一共展示了Dataset.to_netcdf方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_open_and_do_math

# 需要导入模块: from xarray import Dataset [as 别名]
# 或者: from xarray.Dataset import to_netcdf [as 别名]
 def test_open_and_do_math(self):
     original = Dataset({'foo': ('x', np.random.randn(10))})
     with create_tmp_file() as tmp:
         original.to_netcdf(tmp)
         with open_mfdataset(tmp) as ds:
             actual = 1.0 * ds
             self.assertDatasetAllClose(original, actual)
开发者ID:ashang,项目名称:xarray,代码行数:9,代码来源:test_backends.py

示例2: test_coordinates_encoding

# 需要导入模块: from xarray import Dataset [as 别名]
# 或者: from xarray.Dataset import to_netcdf [as 别名]
    def test_coordinates_encoding(self):
        def equals_latlon(obj):
            return obj == 'lat lon' or obj == 'lon lat'

        original = Dataset({'temp': ('x', [0, 1]), 'precip': ('x', [0, -1])},
                           {'lat': ('x', [2, 3]), 'lon': ('x', [4, 5])})
        with self.roundtrip(original) as actual:
            self.assertDatasetIdentical(actual, original)
        with create_tmp_file() as tmp_file:
            original.to_netcdf(tmp_file)
            with open_dataset(tmp_file, decode_coords=False) as ds:
                self.assertTrue(equals_latlon(ds['temp'].attrs['coordinates']))
                self.assertTrue(equals_latlon(ds['precip'].attrs['coordinates']))
                self.assertNotIn('coordinates', ds.attrs)
                self.assertNotIn('coordinates', ds['lat'].attrs)
                self.assertNotIn('coordinates', ds['lon'].attrs)

        modified = original.drop(['temp', 'precip'])
        with self.roundtrip(modified) as actual:
            self.assertDatasetIdentical(actual, modified)
        with create_tmp_file() as tmp_file:
            modified.to_netcdf(tmp_file)
            with open_dataset(tmp_file, decode_coords=False) as ds:
                self.assertTrue(equals_latlon(ds.attrs['coordinates']))
                self.assertNotIn('coordinates', ds['lat'].attrs)
                self.assertNotIn('coordinates', ds['lon'].attrs)
开发者ID:ashang,项目名称:xarray,代码行数:28,代码来源:test_backends.py

示例3: test_weakrefs

# 需要导入模块: from xarray import Dataset [as 别名]
# 或者: from xarray.Dataset import to_netcdf [as 别名]
    def test_weakrefs(self):
        example = Dataset({'foo': ('x', np.arange(5.0))})
        expected = example.rename({'foo': 'bar', 'x': 'y'})

        with create_tmp_file() as tmp_file:
            example.to_netcdf(tmp_file, engine='scipy')
            on_disk = open_dataset(tmp_file, engine='pynio')
            actual = on_disk.rename({'foo': 'bar', 'x': 'y'})
            del on_disk  # trigger garbage collection
            self.assertDatasetIdentical(actual, expected)
开发者ID:ashang,项目名称:xarray,代码行数:12,代码来源:test_backends.py

示例4: test_preprocess_mfdataset

# 需要导入模块: from xarray import Dataset [as 别名]
# 或者: from xarray.Dataset import to_netcdf [as 别名]
    def test_preprocess_mfdataset(self):
        original = Dataset({'foo': ('x', np.random.randn(10))})
        with create_tmp_file() as tmp:
            original.to_netcdf(tmp)

            def preprocess(ds):
                return ds.assign_coords(z=0)

            expected = preprocess(original)
            with open_mfdataset(tmp, preprocess=preprocess) as actual:
                self.assertDatasetIdentical(expected, actual)
开发者ID:ashang,项目名称:xarray,代码行数:13,代码来源:test_backends.py

示例5: test_open_dataset

# 需要导入模块: from xarray import Dataset [as 别名]
# 或者: from xarray.Dataset import to_netcdf [as 别名]
 def test_open_dataset(self):
     original = Dataset({'foo': ('x', np.random.randn(10))})
     with create_tmp_file() as tmp:
         original.to_netcdf(tmp)
         with open_dataset(tmp, chunks={'x': 5}) as actual:
             self.assertIsInstance(actual.foo.variable.data, da.Array)
             self.assertEqual(actual.foo.variable.data.chunks, ((5, 5),))
             self.assertDatasetIdentical(original, actual)
         with open_dataset(tmp, chunks=5) as actual:
             self.assertDatasetIdentical(original, actual)
         with open_dataset(tmp) as actual:
             self.assertIsInstance(actual.foo.variable.data, np.ndarray)
             self.assertDatasetIdentical(original, actual)
开发者ID:ashang,项目名称:xarray,代码行数:15,代码来源:test_backends.py

示例6: test_lock

# 需要导入模块: from xarray import Dataset [as 别名]
# 或者: from xarray.Dataset import to_netcdf [as 别名]
 def test_lock(self):
     original = Dataset({'foo': ('x', np.random.randn(10))})
     with create_tmp_file() as tmp:
         original.to_netcdf(tmp, format='NETCDF3_CLASSIC')
         with open_dataset(tmp, chunks=10) as ds:
             task = ds.foo.data.dask[ds.foo.data.name, 0]
             self.assertIsInstance(task[-1], type(Lock()))
         with open_mfdataset(tmp) as ds:
             task = ds.foo.data.dask[ds.foo.data.name, 0]
             self.assertIsInstance(task[-1], type(Lock()))
         with open_mfdataset(tmp, engine='scipy') as ds:
             task = ds.foo.data.dask[ds.foo.data.name, 0]
             self.assertNotIsInstance(task[-1], type(Lock()))
开发者ID:ashang,项目名称:xarray,代码行数:15,代码来源:test_backends.py

示例7: ensembles2dataset_dask

# 需要导入模块: from xarray import Dataset [as 别名]
# 或者: from xarray.Dataset import to_netcdf [as 别名]
def ensembles2dataset_dask(ensdict, ncfpath, dsattrs={}, chunks=10,
                           verbose=True, print_every=1000):
    """
    Convert a dictionary of ensembles into an xarray Dataset object
    using dask.delayed to keep memory usage feasible.
    """
    mms2ms = 1e-3
    n=0
    # fbadens = np.array(ensdict_aux)==None
    # nt = len(ensdict) - np.sum(fbadens)
    # embed()

    ensdict0 = None
    while ensdict0 is None:
        ensdict0 = ensdict[n].compute()
        n+=1
    nz = ensdict0['fixed_leader_janus']['number_of_cells']

    fixj = ensdict0['fixed_leader_janus'].compute()
    fix5 = ensdict0['fixed_leader_beam5'].compute()

    # Add ping offset to get beam 5's timestamps.
    dt5 = fix5['ping_offset_time'] # In milliseconds.
    dt5 = np.array(Timedelta(dt5, unit='ms'))

    th = fixj['beam_angle']
    assert th==25 # Always 25 degrees.
    th = th*np.pi/180.
    Cth = np.cos(th)

    # Construct along-beam/vertical axes.
    cm2m = 1e-2
    r1janus = fixj['bin_1_distance']*cm2m
    r1b5 = fix5['bin_1_distance']*cm2m
    ncj = fixj['number_of_cells']
    nc5 = fix5['number_of_cells']
    lcj = fixj['depth_cell_length']*cm2m
    lc5 = fix5['depth_cell_length']*cm2m
    Lj = ncj*lcj # Distance from center of bin 1 to the center of last bin (Janus).
    L5 = nc5*lc5 # Distance from center of bin 1 to the center of last bin (beam 5).

    rb = r1janus + np.arange(0, Lj, lcj) # Distance from xducer head
                                         # (Janus).
    zab = Cth*rb                         # Vertical distance from xducer head
                                         # (Janus).
    zab5 = r1b5 + np.arange(0, L5, lc5)  # Distance from xducer head, also
                                         # depth for the vertical beam.

    rb = IndexVariable('z', rb, attrs={'units':'meters', 'long_name':"along-beam distance from the xducer's face to the center of the bins, for beams 1-4 (Janus)"})
    zab = IndexVariable('z', zab, attrs={'units':'meters', 'long_name':"vertical distance from the instrument's head to the center of the bins, for beams 1-4 (Janus)"})
    zab5 = IndexVariable('z5', zab5, attrs={'units':'meters', 'long_name':"vertical distance from xducer face to the center of the bins, for beam 5 (vertical)"})

    ensdict = from_sequence(ensdict)
    tjanus = ensdict.map_partitions(_alloc_timestamp_parts)
    t5 = _addtarr(tjanus, dt5)

    if verbose: print("Unpacking timestamps.")
    time = IndexVariable('time', tjanus.compute(), attrs={'long_name':'timestamps for beams 1-4 (Janus)'})
    time5 = IndexVariable('time5', t5.compute(), attrs={'long_name':'timestamps for beam 5 (vertical)'})
    if verbose: print("Done unpacking timestamps.")

    coords0 = dict(time=time)
    coords = dict(z=zab, time=time, rb=rb)
    coords5 = dict(z5=zab5, time5=time5)
    dims = ['z', 'time']
    dims5 = ['z5', 'time5']
    dims0 = ['time']

    coordsdict = coords0
    if verbose: print("Allocating heading, pitch, roll.")
    kwda = dict(coords=coordsdict, dims=dims0, attrs=dict(units=unit, long_name=lname))
    svars = ['heading', 'pitch', 'roll']
    long_names = svars
    units = ['degrees']*3
    grp = 'variable_leader_janus'
    vars1d = dict()
    for vname,lname,unit in zip(svars,long_names,units):
        if verbose: print(vname)
        wrk = ensdict.map_partitions(_alloc_hpr, grp, vname)
        # wrk = darr.from_array(np.array(wrk.compute()), chunks=chunks)
        wrk2 = delayed(_bag2DataArray)(wrk, chunks)(**kwda)
        vars1d.update({vname:wrk2})
    del(wrk, wrk2)

    ds2hpr = Dataset(data_vars=vars1d, coords=coordsdict)
    ds2hpr = ds2hpr.to_netcdf(ncfpath, compute=False, mode='w')
    if verbose: print("Saving heading, pitch, roll.")
    ds2hpr.compute()
    if verbose: print("Done saving heading, pitch, roll.")
    del(ds2hpr)

    coordsdict = coords5
    # Load beam 5 variables into memory to
    # be able to put them in a chunked DataArray.
    if verbose: print("Allocating beam 5 variables.")
    grps = ['velocity_beam5', 'correlation_beam5', 'echo_intensity_beam5']
    long_names = ['Beam 5 velocity', 'Beam 5 correlation', 'Beam 5 echo amplitude']
    units = ['mm/s, positive toward xducer face', 'unitless', 'dB']
    vars5 = dict()
    for grp,lname,unit in zip(grps,long_names,units):
#.........这里部分代码省略.........
开发者ID:USF-COT,项目名称:trdi_adcp_readers,代码行数:103,代码来源:readers.py


注:本文中的xarray.Dataset.to_netcdf方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。