当前位置: 首页>>代码示例>>Python>>正文


Python Dataset.compute方法代码示例

本文整理汇总了Python中xarray.Dataset.compute方法的典型用法代码示例。如果您正苦于以下问题:Python Dataset.compute方法的具体用法?Python Dataset.compute怎么用?Python Dataset.compute使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在xarray.Dataset的用法示例。


在下文中一共展示了Dataset.compute方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_dataset_pickle

# 需要导入模块: from xarray import Dataset [as 别名]
# 或者: from xarray.Dataset import compute [as 别名]
 def test_dataset_pickle(self):
     ds1 = Dataset({'a': DataArray(build_dask_array())})
     ds1.compute()
     self.assertFalse(ds1['a']._in_memory)
     self.assertEquals(kernel_call_count, 1)
     ds2 = pickle.loads(pickle.dumps(ds1))
     self.assertEquals(kernel_call_count, 1)
     self.assertDatasetIdentical(ds1, ds2)
     self.assertFalse(ds1['a']._in_memory)
     self.assertFalse(ds2['a']._in_memory)
开发者ID:SixtyCapital,项目名称:xarray,代码行数:12,代码来源:test_dask.py

示例2: test_basic_compute

# 需要导入模块: from xarray import Dataset [as 别名]
# 或者: from xarray.Dataset import compute [as 别名]
def test_basic_compute():
    ds = Dataset({'foo': ('x', range(5)),
                  'bar': ('x', range(5))}).chunk({'x': 2})
    for get in [dask.threaded.get,
                dask.multiprocessing.get,
                dask.local.get_sync,
                None]:
        with dask.set_options(get=get):
            ds.compute()
            ds.foo.compute()
            ds.foo.variable.compute()
开发者ID:jcmgray,项目名称:xarray,代码行数:13,代码来源:test_dask.py

示例3: test_to_dask_dataframe_no_coordinate

# 需要导入模块: from xarray import Dataset [as 别名]
# 或者: from xarray.Dataset import compute [as 别名]
    def test_to_dask_dataframe_no_coordinate(self):
        x = da.from_array(np.random.randn(10), chunks=4)
        ds = Dataset({'x': ('dim_0', x)})

        expected = ds.compute().to_dataframe().reset_index()
        actual = ds.to_dask_dataframe()
        assert isinstance(actual, dd.DataFrame)
        assert_frame_equal(expected, actual.compute())

        expected = ds.compute().to_dataframe()
        actual = ds.to_dask_dataframe(set_index=True)
        assert isinstance(actual, dd.DataFrame)
        assert_frame_equal(expected, actual.compute())
开发者ID:jcmgray,项目名称:xarray,代码行数:15,代码来源:test_dask.py

示例4: test_basic_compute

# 需要导入模块: from xarray import Dataset [as 别名]
# 或者: from xarray.Dataset import compute [as 别名]
def test_basic_compute():
    ds = Dataset({'foo': ('x', range(5)),
                  'bar': ('x', range(5))}).chunk({'x': 2})
    for get in [dask.threaded.get,
                dask.multiprocessing.get,
                dask.local.get_sync,
                None]:
        with (dask.config.set(scheduler=get)
              if LooseVersion(dask.__version__) >= LooseVersion('0.19.4')
              else dask.config.set(scheduler=get)
              if LooseVersion(dask.__version__) >= LooseVersion('0.18.0')
              else dask.set_options(get=get)):
            ds.compute()
            ds.foo.compute()
            ds.foo.variable.compute()
开发者ID:benbovy,项目名称:xarray,代码行数:17,代码来源:test_dask.py

示例5: test_dataset_pickle

# 需要导入模块: from xarray import Dataset [as 别名]
# 或者: from xarray.Dataset import compute [as 别名]
 def test_dataset_pickle(self):
     # Test that pickling/unpickling converts the dask backend
     # to numpy in neither the data variables nor the non-index coords
     data = build_dask_array('data')
     nonindex_coord = build_dask_array('coord')
     ds1 = Dataset(data_vars={'a': ('x', data)},
                   coords={'y': ('x', nonindex_coord)})
     ds1.compute()
     assert not ds1['a']._in_memory
     assert not ds1['y']._in_memory
     assert kernel_call_count == 2
     ds2 = pickle.loads(pickle.dumps(ds1))
     assert kernel_call_count == 2
     assert_identical(ds1, ds2)
     assert not ds1['a']._in_memory
     assert not ds2['a']._in_memory
     assert not ds1['y']._in_memory
     assert not ds2['y']._in_memory
开发者ID:jcmgray,项目名称:xarray,代码行数:20,代码来源:test_dask.py

示例6: test_to_dask_dataframe_2D_set_index

# 需要导入模块: from xarray import Dataset [as 别名]
# 或者: from xarray.Dataset import compute [as 别名]
    def test_to_dask_dataframe_2D_set_index(self):
        # This will fail until dask implements MultiIndex support
        w = da.from_array(np.random.randn(2, 3), chunks=(1, 2))
        ds = Dataset({'w': (('x', 'y'), w)})
        ds['x'] = ('x', np.array([0, 1], np.int64))
        ds['y'] = ('y', list('abc'))

        expected = ds.compute().to_dataframe()
        actual = ds.to_dask_dataframe(set_index=True)
        assert isinstance(actual, dd.DataFrame)
        assert_frame_equal(expected, actual.compute())
开发者ID:jcmgray,项目名称:xarray,代码行数:13,代码来源:test_dask.py

示例7: ensembles2dataset_dask

# 需要导入模块: from xarray import Dataset [as 别名]
# 或者: from xarray.Dataset import compute [as 别名]
def ensembles2dataset_dask(ensdict, ncfpath, dsattrs={}, chunks=10,
                           verbose=True, print_every=1000):
    """
    Convert a dictionary of ensembles into an xarray Dataset object
    using dask.delayed to keep memory usage feasible.
    """
    mms2ms = 1e-3
    n=0
    # fbadens = np.array(ensdict_aux)==None
    # nt = len(ensdict) - np.sum(fbadens)
    # embed()

    ensdict0 = None
    while ensdict0 is None:
        ensdict0 = ensdict[n].compute()
        n+=1
    nz = ensdict0['fixed_leader_janus']['number_of_cells']

    fixj = ensdict0['fixed_leader_janus'].compute()
    fix5 = ensdict0['fixed_leader_beam5'].compute()

    # Add ping offset to get beam 5's timestamps.
    dt5 = fix5['ping_offset_time'] # In milliseconds.
    dt5 = np.array(Timedelta(dt5, unit='ms'))

    th = fixj['beam_angle']
    assert th==25 # Always 25 degrees.
    th = th*np.pi/180.
    Cth = np.cos(th)

    # Construct along-beam/vertical axes.
    cm2m = 1e-2
    r1janus = fixj['bin_1_distance']*cm2m
    r1b5 = fix5['bin_1_distance']*cm2m
    ncj = fixj['number_of_cells']
    nc5 = fix5['number_of_cells']
    lcj = fixj['depth_cell_length']*cm2m
    lc5 = fix5['depth_cell_length']*cm2m
    Lj = ncj*lcj # Distance from center of bin 1 to the center of last bin (Janus).
    L5 = nc5*lc5 # Distance from center of bin 1 to the center of last bin (beam 5).

    rb = r1janus + np.arange(0, Lj, lcj) # Distance from xducer head
                                         # (Janus).
    zab = Cth*rb                         # Vertical distance from xducer head
                                         # (Janus).
    zab5 = r1b5 + np.arange(0, L5, lc5)  # Distance from xducer head, also
                                         # depth for the vertical beam.

    rb = IndexVariable('z', rb, attrs={'units':'meters', 'long_name':"along-beam distance from the xducer's face to the center of the bins, for beams 1-4 (Janus)"})
    zab = IndexVariable('z', zab, attrs={'units':'meters', 'long_name':"vertical distance from the instrument's head to the center of the bins, for beams 1-4 (Janus)"})
    zab5 = IndexVariable('z5', zab5, attrs={'units':'meters', 'long_name':"vertical distance from xducer face to the center of the bins, for beam 5 (vertical)"})

    ensdict = from_sequence(ensdict)
    tjanus = ensdict.map_partitions(_alloc_timestamp_parts)
    t5 = _addtarr(tjanus, dt5)

    if verbose: print("Unpacking timestamps.")
    time = IndexVariable('time', tjanus.compute(), attrs={'long_name':'timestamps for beams 1-4 (Janus)'})
    time5 = IndexVariable('time5', t5.compute(), attrs={'long_name':'timestamps for beam 5 (vertical)'})
    if verbose: print("Done unpacking timestamps.")

    coords0 = dict(time=time)
    coords = dict(z=zab, time=time, rb=rb)
    coords5 = dict(z5=zab5, time5=time5)
    dims = ['z', 'time']
    dims5 = ['z5', 'time5']
    dims0 = ['time']

    coordsdict = coords0
    if verbose: print("Allocating heading, pitch, roll.")
    kwda = dict(coords=coordsdict, dims=dims0, attrs=dict(units=unit, long_name=lname))
    svars = ['heading', 'pitch', 'roll']
    long_names = svars
    units = ['degrees']*3
    grp = 'variable_leader_janus'
    vars1d = dict()
    for vname,lname,unit in zip(svars,long_names,units):
        if verbose: print(vname)
        wrk = ensdict.map_partitions(_alloc_hpr, grp, vname)
        # wrk = darr.from_array(np.array(wrk.compute()), chunks=chunks)
        wrk2 = delayed(_bag2DataArray)(wrk, chunks)(**kwda)
        vars1d.update({vname:wrk2})
    del(wrk, wrk2)

    ds2hpr = Dataset(data_vars=vars1d, coords=coordsdict)
    ds2hpr = ds2hpr.to_netcdf(ncfpath, compute=False, mode='w')
    if verbose: print("Saving heading, pitch, roll.")
    ds2hpr.compute()
    if verbose: print("Done saving heading, pitch, roll.")
    del(ds2hpr)

    coordsdict = coords5
    # Load beam 5 variables into memory to
    # be able to put them in a chunked DataArray.
    if verbose: print("Allocating beam 5 variables.")
    grps = ['velocity_beam5', 'correlation_beam5', 'echo_intensity_beam5']
    long_names = ['Beam 5 velocity', 'Beam 5 correlation', 'Beam 5 echo amplitude']
    units = ['mm/s, positive toward xducer face', 'unitless', 'dB']
    vars5 = dict()
    for grp,lname,unit in zip(grps,long_names,units):
#.........这里部分代码省略.........
开发者ID:USF-COT,项目名称:trdi_adcp_readers,代码行数:103,代码来源:readers.py


注:本文中的xarray.Dataset.compute方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。