当前位置: 首页>>代码示例>>Python>>正文


Python pysparkling.Context类代码示例

本文整理汇总了Python中pysparkling.Context的典型用法代码示例。如果您正苦于以下问题:Python Context类的具体用法?Python Context怎么用?Python Context使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了Context类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_read_7z

def test_read_7z():
    # file was created with:
    # 7z a tests/data.7z tests/readme_example.py
    # (brew install p7zip)
    rdd = Context().textFile('tests/data.7z')
    print(rdd.collect())
    assert 'from pysparkling import Context' in rdd.collect()
开发者ID:nicoheidtke,项目名称:pysparkling,代码行数:7,代码来源:test_textFile.py

示例2: test_multiprocessing

def test_multiprocessing():
    p = multiprocessing.Pool(4)
    c = Context(pool=p, serializer=dill.dumps, deserializer=dill.loads)
    my_rdd = c.parallelize([1, 3, 4])
    r = my_rdd.map(lambda x: x*x).collect()
    print(r)
    assert 16 in r
开发者ID:gitter-badger,项目名称:pysparkling,代码行数:7,代码来源:test_multiprocessing.py

示例3: test_first_mp

def test_first_mp():
    p = multiprocessing.Pool(4)
    c = Context(pool=p, serializer=cloudpickle.dumps,
                deserializer=pickle.loads)
    my_rdd = c.parallelize([1, 2, 2, 4, 1, 3, 5, 9], 3)
    print(my_rdd.first())
    assert my_rdd.first() == 1
开发者ID:nicoheidtke,项目名称:pysparkling,代码行数:7,代码来源:test_multiprocessing.py

示例4: test_saveAsTextFile_zip

def test_saveAsTextFile_zip():
    tempFile = tempfile.NamedTemporaryFile(delete=True)
    tempFile.close()
    Context().parallelize(range(10)).saveAsTextFile(tempFile.name+'.zip')
    read_rdd = Context().textFile(tempFile.name+'.zip')
    print(read_rdd.collect())
    assert '5' in read_rdd.collect()
开发者ID:nicoheidtke,项目名称:pysparkling,代码行数:7,代码来源:test_textFile.py

示例5: test_mapPartitions

def test_mapPartitions():
    rdd = Context().parallelize([1, 2, 3, 4], 2)

    def f(iterator):
        yield sum(iterator)

    r = rdd.mapPartitions(f).collect()
    assert 3 in r and 7 in r
开发者ID:gitter-badger,项目名称:pysparkling,代码行数:8,代码来源:test_rdd_unit.py

示例6: test_lazy_execution

def test_lazy_execution():
    r = Context().textFile('tests/test_multiprocessing.py')
    r = r.map(indent_line)
    exec_before_collect = INDENT_WAS_EXECUTED
    # at this point, no map() or foreach() should have been executed
    r.collect()
    exec_after_collect = INDENT_WAS_EXECUTED
    assert not exec_before_collect and exec_after_collect
开发者ID:gitter-badger,项目名称:pysparkling,代码行数:8,代码来源:test_multiprocessing.py

示例7: test_filter

def test_filter():
    my_rdd = Context().parallelize(
        [1, 2, 2, 4, 1, 3, 5, 9],
        3,
    ).filter(lambda x: x % 2 == 0)
    print(my_rdd.collect())
    print(my_rdd.count())
    assert my_rdd.count() == 3
开发者ID:gitter-badger,项目名称:pysparkling,代码行数:8,代码来源:test_rdd_unit.py

示例8: test_lazy_execution_threadpool

def test_lazy_execution_threadpool():
    with futures.ThreadPoolExecutor(4) as p:
        r = Context(pool=p).textFile('tests/test_multiprocessing.py')
        r = r.map(indent_line)
        r = r.map(indent_line)
        r = r.collect()
        # ThreadPool is not lazy although it returns generators.
        print(r)
        assert '--- --- from pysparkling import Context' in r
开发者ID:gitter-badger,项目名称:pysparkling,代码行数:9,代码来源:test_multiprocessing.py

示例9: test_s3_textFile

def test_s3_textFile():
    myrdd = Context().textFile(
        's3n://aws-publicdatasets/common-crawl/crawl-data/'
        'CC-MAIN-2015-11/warc.paths.*'
    )
    assert (
        'common-crawl/crawl-data/CC-MAIN-2015-11/segments/1424937481488.49/'
        'warc/CC-MAIN-20150226075801-00329-ip-10-28-5-156.ec2.'
        'internal.warc.gz' in myrdd.collect()
    )
开发者ID:svenkreiss,项目名称:pysparkling,代码行数:10,代码来源:test_textFile.py

示例10: test_hdfs_file_exists

def test_hdfs_file_exists():
    random.seed()

    fn1 = '{}/pysparkling_test_{:d}.txt'.format(
        HDFS_TEST_PATH, random.random() * 999999.0)
    fn2 = '{}/pysparkling_test_{:d}.txt'.format(
        HDFS_TEST_PATH, random.random() * 999999.0)

    rdd = Context().parallelize('Hello World {0}'.format(x) for x in range(10))
    rdd.saveAsTextFile(fn1)

    assert File(fn1).exists() and not File(fn2).exists()
开发者ID:svenkreiss,项目名称:pysparkling,代码行数:12,代码来源:test_textFile.py

示例11: test_processpool_distributed_cache

def test_processpool_distributed_cache():
    with futures.ProcessPoolExecutor(4) as p:
        r = Context(
            pool=p,
            serializer=cloudpickle.dumps,
            deserializer=pickle.loads,
        ).parallelize(range(3), 3)
        r = r.map(lambda _: time.sleep(0.1)).cache()
        r.collect()

        time_start = time.time()
        print(r.collect())
        time_end = time.time()
        assert time_end - time_start < 0.3
开发者ID:nicoheidtke,项目名称:pysparkling,代码行数:14,代码来源:test_multiprocessing.py

示例12: test_hdfs_textFile_loop

def test_hdfs_textFile_loop():
    random.seed()

    fn = '{}/pysparkling_test_{:d}.txt'.format(
        HDFS_TEST_PATH, random.random() * 999999.0)
    print('HDFS test file: {0}'.format(fn))

    rdd = Context().parallelize('Hello World {0}'.format(x) for x in range(10))
    rdd.saveAsTextFile(fn)
    read_rdd = Context().textFile(fn)
    print(rdd.collect())
    print(read_rdd.collect())
    assert (
        rdd.count() == read_rdd.count() and
        all(r1 == r2 for r1, r2 in zip(rdd.collect(), read_rdd.collect()))
    )
开发者ID:svenkreiss,项目名称:pysparkling,代码行数:16,代码来源:test_textFile.py

示例13: test_hdfs_file_exists

def test_hdfs_file_exists():
    if not HDFS_TEST_PATH:
        raise SkipTest

    random.seed()

    fn1 = HDFS_TEST_PATH+'/pysparkling_test_{0}.txt'.format(
        int(random.random()*999999.0)
    )
    fn2 = HDFS_TEST_PATH+'/pysparkling_test_{0}.txt'.format(
        int(random.random()*999999.0)
    )

    rdd = Context().parallelize('Hello World {0}'.format(x) for x in range(10))
    rdd.saveAsTextFile(fn1)

    assert File(fn1).exists() and not File(fn2).exists()
开发者ID:hvsarma,项目名称:pysparkling,代码行数:17,代码来源:test_textFile.py

示例14: test_gs_textFile_loop

def test_gs_textFile_loop():
    random.seed()

    fn = '{}/pysparkling_test_{:d}.txt'.format(
        GS_TEST_PATH, random.random() * 999999.0)

    rdd = Context().parallelize('Line {0}'.format(n) for n in range(200))
    rdd.saveAsTextFile(fn)
    rdd_check = Context().textFile(fn)

    assert (
        rdd.count() == rdd_check.count() and
        all(e1 == e2 for e1, e2 in zip(rdd.collect(), rdd_check.collect()))
    )
开发者ID:svenkreiss,项目名称:pysparkling,代码行数:14,代码来源:test_textFile.py

示例15: test_cache

def test_cache():
    my_rdd = Context().parallelize([1, 2, 3, 4], 2)
    my_rdd = my_rdd.map(lambda x: x*x).cache()
    print('no exec until here')
    print(my_rdd.first())
    print('executed map on first partition only')
    print(my_rdd.collect())
    print('now map() was executed on all partitions and should '
          'not be executed again')
    print(my_rdd.collect())
    assert len(my_rdd.collect()) == 4 and 16 in my_rdd.collect()
开发者ID:gitter-badger,项目名称:pysparkling,代码行数:11,代码来源:test_rdd_unit.py


注:本文中的pysparkling.Context类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。