本文整理汇总了Python中pysparkling.Context.collect方法的典型用法代码示例。如果您正苦于以下问题:Python Context.collect方法的具体用法?Python Context.collect怎么用?Python Context.collect使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pysparkling.Context
的用法示例。
在下文中一共展示了Context.collect方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_saveAsTextFile_zip
# 需要导入模块: from pysparkling import Context [as 别名]
# 或者: from pysparkling.Context import collect [as 别名]
def test_saveAsTextFile_zip():
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
Context().parallelize(range(10)).saveAsTextFile(tempFile.name+'.zip')
read_rdd = Context().textFile(tempFile.name+'.zip')
print(read_rdd.collect())
assert '5' in read_rdd.collect()
示例2: test_read_7z
# 需要导入模块: from pysparkling import Context [as 别名]
# 或者: from pysparkling.Context import collect [as 别名]
def test_read_7z():
# file was created with:
# 7z a tests/data.7z tests/readme_example.py
# (brew install p7zip)
rdd = Context().textFile('tests/data.7z')
print(rdd.collect())
assert 'from pysparkling import Context' in rdd.collect()
示例3: test_lazy_execution
# 需要导入模块: from pysparkling import Context [as 别名]
# 或者: from pysparkling.Context import collect [as 别名]
def test_lazy_execution():
r = Context().textFile('tests/test_multiprocessing.py')
r = r.map(indent_line)
exec_before_collect = INDENT_WAS_EXECUTED
# at this point, no map() or foreach() should have been executed
r.collect()
exec_after_collect = INDENT_WAS_EXECUTED
assert not exec_before_collect and exec_after_collect
示例4: test_cache
# 需要导入模块: from pysparkling import Context [as 别名]
# 或者: from pysparkling.Context import collect [as 别名]
def test_cache():
# this crashes in version 0.2.28
lines = Context().textFile('tests/*textFil*.py')
lines = lines.map(lambda l: '-'+l).cache()
print(len(lines.collect()))
lines = lines.map(lambda l: '+'+l)
lines = lines.map(lambda l: '-'+l).cache()
lines = lines.collect()
print(lines)
assert '-+-from pysparkling import Context' in lines
示例5: test_cache
# 需要导入模块: from pysparkling import Context [as 别名]
# 或者: from pysparkling.Context import collect [as 别名]
def test_cache():
my_rdd = Context().parallelize([1, 2, 3, 4], 2)
my_rdd = my_rdd.map(lambda x: x*x).cache()
print('no exec until here')
print(my_rdd.first())
print('executed map on first partition only')
print(my_rdd.collect())
print('now map() was executed on all partitions and should '
'not be executed again')
print(my_rdd.collect())
assert len(my_rdd.collect()) == 4 and 16 in my_rdd.collect()
示例6: test_lazy_execution_threadpool
# 需要导入模块: from pysparkling import Context [as 别名]
# 或者: from pysparkling.Context import collect [as 别名]
def test_lazy_execution_threadpool():
def indent_line(l):
return '--- '+l
with futures.ThreadPoolExecutor(4) as p:
r = Context(pool=p).textFile('tests/test_multiprocessing.py')
r = r.map(indent_line).cache()
r.collect()
r = r.map(indent_line)
r = r.collect()
# ThreadPool is not lazy although it returns generators.
print(r)
assert '--- --- from pysparkling import Context' in r
示例7: test_gs_textFile_loop
# 需要导入模块: from pysparkling import Context [as 别名]
# 或者: from pysparkling.Context import collect [as 别名]
def test_gs_textFile_loop():
random.seed()
fn = '{}/pysparkling_test_{:d}.txt'.format(
GS_TEST_PATH, random.random() * 999999.0)
rdd = Context().parallelize('Line {0}'.format(n) for n in range(200))
rdd.saveAsTextFile(fn)
rdd_check = Context().textFile(fn)
assert (
rdd.count() == rdd_check.count() and
all(e1 == e2 for e1, e2 in zip(rdd.collect(), rdd_check.collect()))
)
示例8: test_processpool_distributed_cache
# 需要导入模块: from pysparkling import Context [as 别名]
# 或者: from pysparkling.Context import collect [as 别名]
def test_processpool_distributed_cache():
with futures.ProcessPoolExecutor(4) as p:
r = Context(
pool=p,
serializer=cloudpickle.dumps,
deserializer=pickle.loads,
).parallelize(range(3), 3)
r = r.map(lambda _: time.sleep(0.1)).cache()
r.collect()
time_start = time.time()
print(r.collect())
time_end = time.time()
assert time_end - time_start < 0.3
示例9: test_lazy_execution_processpool
# 需要导入模块: from pysparkling import Context [as 别名]
# 或者: from pysparkling.Context import collect [as 别名]
def test_lazy_execution_processpool():
with futures.ProcessPoolExecutor(4) as p:
r = Context(
pool=p,
serializer=dill.dumps,
deserializer=dill.loads,
).textFile('tests/test_multiprocessing.py')
r = r.map(indent_line).cache()
r.collect()
r = r.map(indent_line)
r = r.collect()
# ProcessPool is not lazy although it returns generators.
print(r)
assert '--- --- from pysparkling import Context' in r
示例10: test_hdfs_textFile_loop
# 需要导入模块: from pysparkling import Context [as 别名]
# 或者: from pysparkling.Context import collect [as 别名]
def test_hdfs_textFile_loop():
random.seed()
fn = '{}/pysparkling_test_{:d}.txt'.format(
HDFS_TEST_PATH, random.random() * 999999.0)
print('HDFS test file: {0}'.format(fn))
rdd = Context().parallelize('Hello World {0}'.format(x) for x in range(10))
rdd.saveAsTextFile(fn)
read_rdd = Context().textFile(fn)
print(rdd.collect())
print(read_rdd.collect())
assert (
rdd.count() == read_rdd.count() and
all(r1 == r2 for r1, r2 in zip(rdd.collect(), read_rdd.collect()))
)
示例11: test_hdfs_textFile_loop
# 需要导入模块: from pysparkling import Context [as 别名]
# 或者: from pysparkling.Context import collect [as 别名]
def test_hdfs_textFile_loop():
if not HDFS_TEST_PATH:
raise SkipTest
random.seed()
fn = HDFS_TEST_PATH+'/pysparkling_test_{0}.txt'.format(
int(random.random()*999999.0)
)
rdd = Context().parallelize('Hello World {0}'.format(x) for x in range(10))
rdd.saveAsTextFile(fn)
read_rdd = Context().textFile(fn)
assert (
rdd.count() == read_rdd.count() and
all(r1 == r2 for r1, r2 in zip(rdd.collect(), read_rdd.collect()))
)
示例12: test_filter
# 需要导入模块: from pysparkling import Context [as 别名]
# 或者: from pysparkling.Context import collect [as 别名]
def test_filter():
my_rdd = Context().parallelize(
[1, 2, 2, 4, 1, 3, 5, 9],
3,
).filter(lambda x: x % 2 == 0)
print(my_rdd.collect())
print(my_rdd.count())
assert my_rdd.count() == 3
示例13: test_gs_textFile_loop
# 需要导入模块: from pysparkling import Context [as 别名]
# 或者: from pysparkling.Context import collect [as 别名]
def test_gs_textFile_loop():
if not OAUTH2_CLIENT_ID or not GS_TEST_PATH:
raise SkipTest
random.seed()
fn = '{}/pysparkling_test_{0}.txt'.format(
GS_TEST_PATH, int(random.random() * 999999.0)
)
rdd = Context().parallelize("Line {0}".format(n) for n in range(200))
rdd.saveAsTextFile(fn)
rdd_check = Context().textFile(fn)
assert (
rdd.count() == rdd_check.count() and
all(e1 == e2 for e1, e2 in zip(rdd.collect(), rdd_check.collect()))
)
示例14: test_s3_textFile_loop
# 需要导入模块: from pysparkling import Context [as 别名]
# 或者: from pysparkling.Context import collect [as 别名]
def test_s3_textFile_loop():
if not AWS_ACCESS_KEY_ID or not S3_TEST_PATH:
raise SkipTest
random.seed()
fn = S3_TEST_PATH+'/pysparkling_test_{0}.txt'.format(
int(random.random()*999999.0)
)
rdd = Context().parallelize("Line {0}".format(n) for n in range(200))
rdd.saveAsTextFile(fn)
rdd_check = Context().textFile(fn)
assert (
rdd.count() == rdd_check.count() and
all(e1 == e2 for e1, e2 in zip(rdd.collect(), rdd_check.collect()))
)
示例15: test_s3_textFile
# 需要导入模块: from pysparkling import Context [as 别名]
# 或者: from pysparkling.Context import collect [as 别名]
def test_s3_textFile():
myrdd = Context().textFile(
's3n://aws-publicdatasets/common-crawl/crawl-data/'
'CC-MAIN-2015-11/warc.paths.*'
)
assert (
'common-crawl/crawl-data/CC-MAIN-2015-11/segments/1424937481488.49/'
'warc/CC-MAIN-20150226075801-00329-ip-10-28-5-156.ec2.'
'internal.warc.gz' in myrdd.collect()
)