当前位置: 首页>>代码示例>>Python>>正文


Python Context.map方法代码示例

本文整理汇总了Python中pysparkling.Context.map方法的典型用法代码示例。如果您正苦于以下问题:Python Context.map方法的具体用法?Python Context.map怎么用?Python Context.map使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pysparkling.Context的用法示例。


在下文中一共展示了Context.map方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_cache

# 需要导入模块: from pysparkling import Context [as 别名]
# 或者: from pysparkling.Context import map [as 别名]
def test_cache():
    # this crashes in version 0.2.28
    lines = Context().textFile('tests/*textFil*.py')
    lines = lines.map(lambda l: '-'+l).cache()
    print(len(lines.collect()))
    lines = lines.map(lambda l: '+'+l)
    lines = lines.map(lambda l: '-'+l).cache()
    lines = lines.collect()
    print(lines)
    assert '-+-from pysparkling import Context' in lines
开发者ID:hvsarma,项目名称:pysparkling,代码行数:12,代码来源:test_textFile.py

示例2: test_lazy_execution_threadpool

# 需要导入模块: from pysparkling import Context [as 别名]
# 或者: from pysparkling.Context import map [as 别名]
def test_lazy_execution_threadpool():
    def indent_line(l):
        return '--- '+l

    with futures.ThreadPoolExecutor(4) as p:
        r = Context(pool=p).textFile('tests/test_multiprocessing.py')
        r = r.map(indent_line).cache()
        r.collect()
        r = r.map(indent_line)
        r = r.collect()
        # ThreadPool is not lazy although it returns generators.
        print(r)
        assert '--- --- from pysparkling import Context' in r
开发者ID:nicoheidtke,项目名称:pysparkling,代码行数:15,代码来源:test_multiprocessing.py

示例3: test_lazy_execution_processpool

# 需要导入模块: from pysparkling import Context [as 别名]
# 或者: from pysparkling.Context import map [as 别名]
def test_lazy_execution_processpool():
    with futures.ProcessPoolExecutor(4) as p:
        r = Context(
            pool=p,
            serializer=dill.dumps,
            deserializer=dill.loads,
        ).textFile('tests/test_multiprocessing.py')
        r = r.map(indent_line).cache()
        r.collect()
        r = r.map(indent_line)
        r = r.collect()
        # ProcessPool is not lazy although it returns generators.
        print(r)
        assert '--- --- from pysparkling import Context' in r
开发者ID:gitter-badger,项目名称:pysparkling,代码行数:16,代码来源:test_multiprocessing.py

示例4: test_lazy_execution

# 需要导入模块: from pysparkling import Context [as 别名]
# 或者: from pysparkling.Context import map [as 别名]
def test_lazy_execution():
    r = Context().textFile('tests/test_multiprocessing.py')
    r = r.map(indent_line)
    exec_before_collect = INDENT_WAS_EXECUTED
    # at this point, no map() or foreach() should have been executed
    r.collect()
    exec_after_collect = INDENT_WAS_EXECUTED
    assert not exec_before_collect and exec_after_collect
开发者ID:gitter-badger,项目名称:pysparkling,代码行数:10,代码来源:test_multiprocessing.py

示例5: test_cache

# 需要导入模块: from pysparkling import Context [as 别名]
# 或者: from pysparkling.Context import map [as 别名]
def test_cache():
    my_rdd = Context().parallelize([1, 2, 3, 4], 2)
    my_rdd = my_rdd.map(lambda x: x*x).cache()
    print('no exec until here')
    print(my_rdd.first())
    print('executed map on first partition only')
    print(my_rdd.collect())
    print('now map() was executed on all partitions and should '
          'not be executed again')
    print(my_rdd.collect())
    assert len(my_rdd.collect()) == 4 and 16 in my_rdd.collect()
开发者ID:gitter-badger,项目名称:pysparkling,代码行数:13,代码来源:test_rdd_unit.py

示例6: test_lazy_execution_processpool

# 需要导入模块: from pysparkling import Context [as 别名]
# 或者: from pysparkling.Context import map [as 别名]
def test_lazy_execution_processpool():
    def indent_line(l):
        return '--- '+l

    with futures.ProcessPoolExecutor(4) as p:
        r = Context(
            pool=p,
            serializer=cloudpickle.dumps,
            deserializer=pickle.loads,
        ).textFile('tests/test_multiprocessing.py')  # .take(10)
        print(r.collect())
        r = r.map(indent_line)
        print(r.collect())
        r = r.cache()
        print(r.collect())
        r = r.map(indent_line)
        r = r.collect()
        # ProcessPool is not lazy although it returns generators.
        print(r)
        assert '--- --- from pysparkling import Context' in r
开发者ID:nicoheidtke,项目名称:pysparkling,代码行数:22,代码来源:test_multiprocessing.py

示例7: test_processpool_distributed_cache

# 需要导入模块: from pysparkling import Context [as 别名]
# 或者: from pysparkling.Context import map [as 别名]
def test_processpool_distributed_cache():
    with futures.ProcessPoolExecutor(4) as p:
        r = Context(
            pool=p,
            serializer=cloudpickle.dumps,
            deserializer=pickle.loads,
        ).parallelize(range(3), 3)
        r = r.map(lambda _: time.sleep(0.1)).cache()
        r.collect()

        time_start = time.time()
        print(r.collect())
        time_end = time.time()
        assert time_end - time_start < 0.3
开发者ID:nicoheidtke,项目名称:pysparkling,代码行数:16,代码来源:test_multiprocessing.py

示例8: test_lazy_execution

# 需要导入模块: from pysparkling import Context [as 别名]
# 或者: from pysparkling.Context import map [as 别名]
def test_lazy_execution():

    class I(object):
        def __init__(self):
            self.executed = False

        def indent_line(self, l):
            # global indent_was_executed
            self.executed = True
            return '--- '+l

    r = Context().textFile('tests/test_multiprocessing.py')
    i = I()

    r = r.map(i.indent_line)
    exec_before_collect = i.executed
    # at this point, no map() or foreach() should have been executed
    r = r.map(i.indent_line).cache()
    print(r.collect())
    r = r.map(i.indent_line)
    r.collect()
    exec_after_collect = i.executed
    print((exec_before_collect, exec_after_collect))
    assert not exec_before_collect and exec_after_collect
开发者ID:nicoheidtke,项目名称:pysparkling,代码行数:26,代码来源:test_multiprocessing.py

示例9: test_concurrent

# 需要导入模块: from pysparkling import Context [as 别名]
# 或者: from pysparkling.Context import map [as 别名]
def test_concurrent():
    with futures.ThreadPoolExecutor(4) as p:
        my_rdd = Context(pool=p).parallelize([1, 3, 4])
        r = my_rdd.map(math.sqrt).collect()
        print(r)
        assert 2 in r
开发者ID:nicoheidtke,项目名称:pysparkling,代码行数:8,代码来源:test_multiprocessing.py


注:本文中的pysparkling.Context.map方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。