本文整理汇总了Python中biom.Table.to_hdf5方法的典型用法代码示例。如果您正苦于以下问题:Python Table.to_hdf5方法的具体用法?Python Table.to_hdf5怎么用?Python Table.to_hdf5使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类biom.Table
的用法示例。
在下文中一共展示了Table.to_hdf5方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_execute_job_error
# 需要导入模块: from biom import Table [as 别名]
# 或者: from biom.Table import to_hdf5 [as 别名]
def test_execute_job_error(self):
# Create a prep template
prep_info = {'SKB8.640193': {'col': 'val1'},
'SKD8.640184': {'col': 'val2'}}
data = {'prep_info': dumps(prep_info),
'study': 1,
'data_type': '16S'}
template = self.qclient.post(
'/apitest/prep_template/', data=data)['prep']
# Create a new validate job
fd, biom_fp = mkstemp(suffix=".biom")
close(fd)
data = np.random.randint(100, size=(2, 2))
table = Table(data, ['O1', 'O2'], ['S1', 'S2'])
with biom_open(biom_fp, 'w') as f:
table.to_hdf5(f, "Test")
data = {'command': dumps(['BIOM type', '2.1.4', 'Validate']),
'parameters': dumps(
{'files': dumps({'biom': [biom_fp]}),
'template': template,
'artifact_type': 'BIOM'}),
'artifact_type': 'BIOM',
'status': 'queued'}
job_id = self.qclient.post(
'/apitest/processing_job/', data=data)['job']
plugin("https://localhost:21174", job_id, self.out_dir)
obs = self._wait_job(job_id)
self.assertEqual(obs, 'error')
示例2: test_validate_prefix
# 需要导入模块: from biom import Table [as 别名]
# 或者: from biom.Table import to_hdf5 [as 别名]
def test_validate_prefix(self):
httpretty.register_uri(
httpretty.POST,
"https://test_server.com/qiita_db/jobs/job-id/step/")
httpretty.register_uri(
httpretty.GET,
"https://test_server.com/qiita_db/prep_template/1/data",
body='{"data": {"1.S1": {"orig_name": "S1"}, "1.S2": '
'{"orig_name": "S2"}, "1.S3": {"orig_name": "S3"}}}')
fd, biom_fp = mkstemp(suffix=".biom")
close(fd)
data = np.asarray([[0, 0, 1], [1, 3, 42]])
table = Table(data, ['O1', 'O2'], ['S1', 'S2', 'S3'])
with biom_open(biom_fp, 'w') as f:
table.to_hdf5(f, "Test")
self._clean_up_files.append(biom_fp)
self.parameters['files'] = '{"BIOM": ["%s"]}' % biom_fp
obs_success, obs_ainfo, obs_error = validate(
self.qclient, 'job-id', self.parameters, self.out_dir)
exp_biom_fp = join(self.out_dir, basename(biom_fp))
self._clean_up_files.append(exp_biom_fp)
self.assertTrue(obs_success)
self.assertEqual(obs_ainfo, [[None, 'BIOM', [exp_biom_fp, 'biom']]])
self.assertEqual(obs_error, "")
obs_t = load_table(exp_biom_fp)
self.assertItemsEqual(obs_t.ids(), ["1.S1", "1.S2", "1.S3"])
示例3: setUp
# 需要导入模块: from biom import Table [as 别名]
# 或者: from biom.Table import to_hdf5 [as 别名]
def setUp(self):
# Registewr the URIs for the QiitaClient
httpretty.register_uri(
httpretty.POST,
"https://test_server.com/qiita_db/authenticate/",
body='{"access_token": "token", "token_type": "Bearer", '
'"expires_in": "3600"}')
self.qclient = QiitaClient('https://test_server.com', 'client_id',
'client_secret')
# Create a biom table
fd, self.biom_fp = mkstemp(suffix=".biom")
close(fd)
data = np.asarray([[0, 0, 1], [1, 3, 42]])
table = Table(data, ['O1', 'O2'], ['1.S1', '1.S2', '1.S3'])
with biom_open(self.biom_fp, 'w') as f:
table.to_hdf5(f, "Test")
self.out_dir = mkdtemp()
self.artifact_id = 4
self.parameters = {'input_data': self.artifact_id}
self._clean_up_files = [self.biom_fp, self.out_dir]
示例4: _create_job_and_biom
# 需要导入模块: from biom import Table [as 别名]
# 或者: from biom.Table import to_hdf5 [as 别名]
def _create_job_and_biom(self, sample_ids, template=None, analysis=None):
# Create the BIOM table that needs to be valdiated
fd, biom_fp = mkstemp(suffix=".biom")
close(fd)
data = np.random.randint(100, size=(2, len(sample_ids)))
table = Table(data, ['O1', 'O2'], sample_ids)
with biom_open(biom_fp, 'w') as f:
table.to_hdf5(f, "Test")
self._clean_up_files.append(biom_fp)
# Create a new job
parameters = {'template': template,
'files': dumps({'biom': [biom_fp]}),
'artifact_type': 'BIOM',
'analysis': analysis}
data = {'command': dumps(['BIOM type', '2.1.4', 'Validate']),
'parameters': dumps(parameters),
'status': 'running'}
res = self.qclient.post('/apitest/processing_job/', data=data)
job_id = res['job']
return biom_fp, job_id, parameters
示例5: create_non_rarefied_biom_artifact
# 需要导入模块: from biom import Table [as 别名]
# 或者: from biom.Table import to_hdf5 [as 别名]
def create_non_rarefied_biom_artifact(analysis, biom_data, rarefied_table):
"""Creates the initial non-rarefied BIOM artifact of the analysis
Parameters
----------
analysis : dict
Dictionary with the analysis information
biom_data : dict
Dictionary with the biom file information
rarefied_table : biom.Table
The rarefied BIOM table
Returns
-------
int
The id of the new artifact
"""
# The non rarefied biom artifact is the initial biom table of the analysis.
# This table does not currently exist anywhere, so we need to actually
# create the BIOM file. To create this BIOM file we need: (1) the samples
# and artifacts they come from and (2) whether the samples where
# renamed or not. (1) is on the database, but we need to inferr (2) from
# the existing rarefied BIOM table. Fun, fun...
with TRN:
# Get the samples included in the BIOM table grouped by artifact id
# Note that the analysis contains a BIOM table per data type included
# in it, and the table analysis_sample does not differentiate between
# datatypes, so we need to check the data type in the artifact table
sql = """SELECT artifact_id, array_agg(sample_id)
FROM qiita.analysis_sample
JOIN qiita.artifact USING (artifact_id)
WHERE analysis_id = %s AND data_type_id = %s
GROUP BY artifact_id"""
TRN.add(sql, [analysis['analysis_id'], biom_data['data_type_id']])
samples_by_artifact = TRN.execute_fetchindex()
# Create an empty BIOM table to be the new master table
new_table = Table([], [], [])
ids_map = {}
for a_id, samples in samples_by_artifact:
# Get the filepath of the BIOM table from the artifact
artifact = Artifact(a_id)
biom_fp = None
for _, fp, fp_type in artifact.filepaths:
if fp_type == 'biom':
biom_fp = fp
# Note that we are sure that the biom table exists for sure, so
# no need to check if biom_fp is undefined
biom_table = load_table(biom_fp)
samples = set(samples).intersection(biom_table.ids())
biom_table.filter(samples, axis='sample', inplace=True)
# we need to check if the table has samples left before merging
if biom_table.shape[0] != 0 and biom_table.shape[1] != 0:
new_table = new_table.merge(biom_table)
ids_map.update({sid: "%d.%s" % (a_id, sid)
for sid in biom_table.ids()})
# Check if we need to rename the sample ids in the biom table
new_table_ids = set(new_table.ids())
if not new_table_ids.issuperset(rarefied_table.ids()):
# We need to rename the sample ids
new_table.update_ids(ids_map, 'sample', True, True)
sql = """INSERT INTO qiita.artifact
(generated_timestamp, data_type_id, visibility_id,
artifact_type_id, submitted_to_vamps)
VALUES (%s, %s, %s, %s, %s)
RETURNING artifact_id"""
# Magic number 4 -> visibility sandbox
# Magix number 7 -> biom artifact type
TRN.add(sql, [analysis['timestamp'], biom_data['data_type_id'],
4, 7, False])
artifact_id = TRN.execute_fetchlast()
# Associate the artifact with the analysis
sql = """INSERT INTO qiita.analysis_artifact
(analysis_id, artifact_id)
VALUES (%s, %s)"""
TRN.add(sql, [analysis['analysis_id'], artifact_id])
# Link the artifact with its file
dd_id, mp = get_mountpoint('BIOM')[0]
dir_fp = join(get_db_files_base_dir(), mp, str(artifact_id))
if not exists(dir_fp):
makedirs(dir_fp)
new_table_fp = join(dir_fp, "biom_table.biom")
with biom_open(new_table_fp, 'w') as f:
new_table.to_hdf5(f, "Generated by Qiita")
sql = """INSERT INTO qiita.filepath
(filepath, filepath_type_id, checksum,
checksum_algorithm_id, data_directory_id)
VALUES (%s, %s, %s, %s, %s)
RETURNING filepath_id"""
# Magic number 7 -> filepath_type_id = 'biom'
# Magic number 1 -> the checksum algorithm id
TRN.add(sql, [basename(new_table_fp), 7,
compute_checksum(new_table_fp), 1, dd_id])
fp_id = TRN.execute_fetchlast()
sql = """INSERT INTO qiita.artifact_filepath
#.........这里部分代码省略.........