本文整理汇总了Python中moztelemetry.dataset.Dataset类的典型用法代码示例。如果您正苦于以下问题:Python Dataset类的具体用法?Python Dataset怎么用?Python Dataset使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Dataset类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_where_exact_match
def test_where_exact_match():
dataset = Dataset('test-bucket', ['dim1', 'dim2'], prefix='prefix/')
new_dataset = dataset.where(dim1='my-value')
assert new_dataset is not dataset
assert new_dataset.clauses.keys() == ['dim1']
condition = new_dataset.clauses['dim1']
assert condition('my-value')
示例2: test_select
def test_select():
dataset1 = Dataset('test-bucket', ['dim1', 'dim2']).select('field1', 'field2')
dataset2 = Dataset('test-bucket', ['dim1', 'dim2']).select('field1', field2='field2')
dataset3 = Dataset('test-bucket', ['dim1', 'dim2']).select(field1='field1', field2='field2')
assert dataset1.selection == {
'field1': 'field1',
'field2': 'field2',
}
assert dataset1.selection == dataset2.selection == dataset3.selection
dataset4 = Dataset('test-bucket', ['dim1', 'dim2']).select('field1', field2='f2', field3='f3')
assert dataset4.selection == {
'field1': 'field1',
'field2': 'f2',
'field3': 'f3',
}
dataset5 = dataset4.select('field4', field5='f5')
assert dataset5.selection == {
'field1': 'field1',
'field2': 'f2',
'field3': 'f3',
'field4': 'field4',
'field5': 'f5'
}
示例3: aggregate_metrics
def aggregate_metrics(sc, channels, submission_date, main_ping_fraction=1, fennec_ping_fraction=1, num_reducers=10000):
""" Returns the build-id and submission date aggregates for a given submission date.
:param sc: A SparkContext instance
:param channel: Either the name of a channel or a list/tuple of names
:param submission-date: The submission date for which the data will be aggregated
:param fraction: An approximative fraction of submissions to consider for aggregation
"""
if not isinstance(channels, (tuple, list)):
channels = [channels]
channels = set(channels)
pings = Dataset.from_source('telemetry') \
.where(appUpdateChannel=lambda x: x in channels,
submissionDate=submission_date,
docType='main',
sourceVersion='4',
appName=lambda x: x != 'Fennec') \
.records(sc, sample=main_ping_fraction)
fennec_pings = Dataset.from_source('telemetry') \
.where(appUpdateChannel=lambda x: x in channels,
submissionDate=submission_date,
docType='saved_session',
sourceVersion='4',
appName='Fennec') \
.records(sc, sample=fennec_ping_fraction)
all_pings = pings.union(fennec_pings)
return _aggregate_metrics(all_pings)
示例4: test_where
def test_where():
dataset = Dataset('test-bucket', ['dim1', 'dim2'], prefix='prefix/')
clause = lambda x: True
new_dataset = dataset.where(dim1=clause)
assert new_dataset is not dataset
assert new_dataset.clauses == {'dim1': clause}
示例5: test_scan_multiple_params
def test_scan_multiple_params():
dataset = Dataset('test-bucket', ['dim1', 'dim2'], prefix='prefix/')
new_dataset = dataset.where(dim1='myvalue')
assert new_dataset is not dataset
assert list(new_dataset.clauses.keys()) == ['dim1']
condition = new_dataset.clauses['dim1']
assert condition('myvalue')
示例6: test_where_wrong_dimension
def test_where_wrong_dimension():
dataset = Dataset('test-bucket', ['dim1', 'dim2'], prefix='prefix/')
clause = lambda x: True
with pytest.raises(Exception) as exc_info:
new_dataset = dataset.where(dim3=clause)
assert str(exc_info.value) == 'The dimension dim3 doesn\'t exist'
示例7: test_scan_multiple_where_params
def test_scan_multiple_where_params(spark_context):
bucket_name = 'test-bucket'
store = InMemoryStore(bucket_name)
store.store['dir1/subdir1/key1'] = 'value1'
store.store['dir1/another-dir/key2'] = 'value2'
dataset = Dataset(bucket_name, ['dim1', 'dim2'], store=store).where(dim1='dir1', dim2='subdir1')
summaries = dataset.summaries(spark_context)
expected_key = 'dir1/subdir1/key1'
assert summaries == [{'key': expected_key, 'size': len(store.store[expected_key])}]
示例8: test_where_dupe_dimension
def test_where_dupe_dimension():
clause = lambda x: True
dataset = Dataset('test-bucket', ['dim1', 'dim2'], prefix='prefix/',
clauses={'dim1': clause})
with pytest.raises(Exception) as exc_info:
new_dataset = dataset.where(dim1=clause)
assert str(exc_info.value) == 'There should be only one clause for dim1'
示例9: test_records_limit_and_sample
def test_records_limit_and_sample(spark_context):
bucket_name = 'test-bucket'
store = InMemoryStore(bucket_name)
for i in range(1, 100 + 1):
key = 'dir{}/subdir{}/key{}'.format(*[i] * 3)
value = 'value{}'.format(i)
store.store[key] = value
dataset = Dataset(bucket_name, ['dim1', 'dim2'], store=store)
records = dataset.records(spark_context, decode=lambda x: x, limit=5, sample=0.9)
assert records.count() == 5
示例10: test_scan_with_prefix
def test_scan_with_prefix():
bucket_name = 'test-bucket'
store = InMemoryStore(bucket_name)
store.store['prefix1/dir1/subdir1/key1'] = 'value1'
store.store['prefix2/dir2/another-dir/key2'] = 'value2'
dataset = Dataset(bucket_name, ['dim1', 'dim2'],
clauses={'dim1': lambda x: x == 'dir1'}, store=store)
with futures.ProcessPoolExecutor(1) as executor:
folders = dataset._scan(['dim1', 'dim2',], ['prefix1/',], dataset.clauses, executor)
assert list(folders) == ['prefix1/dir1/']
示例11: test_records
def test_records(spark_context):
bucket_name = 'test-bucket'
store = InMemoryStore(bucket_name)
store.store['dir1/subdir1/key1'] = 'value1'
store.store['dir2/subdir2/key2'] = 'value2'
dataset = Dataset(bucket_name, ['dim1', 'dim2'], store=store)
records = dataset.records(spark_context, decode=lambda x: x)
records = sorted(records.collect())
assert records == [b'value1', b'value2']
示例12: test_summaries_with_limit
def test_summaries_with_limit():
bucket_name = 'test-bucket'
store = InMemoryStore(bucket_name)
store.store['dir1/subdir1/key1'] = 'value1'
store.store['dir2/subdir2/key2'] = 'value2'
dataset = Dataset(bucket_name, ['dim1', 'dim2'], store=store)
summaries = list(dataset._summaries(1))
assert len(summaries) == 1
assert summaries[0]['key'] in store.store
示例13: test_records_print_output
def test_records_print_output(spark_context, capsys):
bucket_name = 'test-bucket'
store = InMemoryStore(bucket_name)
for i in range(1, 100+1):
key = 'dir{}/subdir{}/key{}'.format(*[i]*3)
value = 'value{}'.format(i)
store.store[key] = value
dataset = Dataset(bucket_name, ['dim1', 'dim2'], store=store)
dataset.records(spark_context, decode=lambda x: x)
out, err = capsys.readouterr()
assert out.rstrip() == "fetching 0.00066MB in 100 files..."
示例14: test_scan_no_clause
def test_scan_no_clause():
bucket_name = 'test-bucket'
store = InMemoryStore(bucket_name)
key = 'dir1/dir2/key1'
value = 'value1'
store.store[key] = value
dataset = Dataset(bucket_name, ['dim1', 'dim2'], store=store)
with futures.ProcessPoolExecutor(1) as executor:
folders = dataset._scan(['dim1', 'subdir'], ['prefix'], {}, executor)
assert list(folders) == ['prefix']
示例15: test_records_summaries
def test_records_summaries(spark_context):
bucket_name = 'test-bucket'
store = InMemoryStore(bucket_name)
store.store['dir1/subdir1/key1'] = 'value1'
store.store['dir2/subdir2/key2'] = 'value2'
dataset = Dataset(bucket_name, ['dim1', 'dim2'], store=store, max_concurrency=1)
records = dataset.records(spark_context, decode=lambda x: x,
summaries=[{'key': 'dir1/subdir1/key1', 'size': len('value1')}])
records = records.collect()
assert records == [b'value1']