本文整理汇总了Python中biom.table.Table.ids方法的典型用法代码示例。如果您正苦于以下问题:Python Table.ids方法的具体用法?Python Table.ids怎么用?Python Table.ids使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类biom.table.Table
的用法示例。
在下文中一共展示了Table.ids方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: TopLevelTests
# 需要导入模块: from biom.table import Table [as 别名]
# 或者: from biom.table.Table import ids [as 别名]
class TopLevelTests(TestCase):
"""Tests of top-level functions"""
def setUp(self):
"""define some top-level data"""
self.otu_table_values = array([[0, 0, 9, 5, 3, 1],
[1, 5, 4, 0, 3, 2],
[2, 3, 1, 1, 2, 5]])
{(0, 2): 9.0, (0, 3): 5.0, (0, 4): 3.0, (0, 5): 1.0,
(1, 0): 1.0, (1, 1): 5.0, (1, 2): 4.0, (1, 4): 3.0, (1, 5): 2.0,
(2, 0): 2.0, (2, 1): 3.0, (2, 2): 1.0, (2, 3): 1.0, (2, 4): 2.0, (2, 5): 5.0}
self.otu_table = Table(self.otu_table_values,
['OTU1', 'OTU2', 'OTU3'],
['Sample1', 'Sample2', 'Sample3',
'Sample4', 'Sample5', 'Sample6'],
[{"taxonomy": ['Bacteria']},
{"taxonomy": ['Archaea']},
{"taxonomy": ['Streptococcus']}],
[None, None, None, None, None, None])
self.otu_table_f = Table(self.otu_table_values,
['OTU1', 'OTU2', 'OTU3'],
['Sample1', 'Sample2', 'Sample3',
'Sample4', 'Sample5', 'Sample6'],
[{"taxonomy": ['1A', '1B', '1C', 'Bacteria']},
{"taxonomy":
['2A', '2B', '2C', 'Archaea']},
{"taxonomy": ['3A', '3B', '3C', 'Streptococcus']}],
[None, None, None, None, None, None])
self.full_lineages = [['1A', '1B', '1C', 'Bacteria'],
['2A', '2B', '2C', 'Archaea'],
['3A', '3B', '3C', 'Streptococcus']]
self.metadata = [[['Sample1', 'NA', 'A'],
['Sample2', 'NA', 'B'],
['Sample3', 'NA', 'A'],
['Sample4', 'NA', 'B'],
['Sample5', 'NA', 'A'],
['Sample6', 'NA', 'B']],
['SampleID', 'CAT1', 'CAT2'], []]
self.tree_text = ["('OTU3',('OTU1','OTU2'))"]
fh, self.tmp_heatmap_fpath = mkstemp(prefix='test_heatmap_',
suffix='.pdf')
close(fh)
def test_extract_metadata_column(self):
"""Extracts correct column from mapping file"""
obs = extract_metadata_column(self.otu_table.ids(),
self.metadata, category='CAT2')
exp = ['A', 'B', 'A', 'B', 'A', 'B']
self.assertEqual(obs, exp)
def test_get_order_from_categories(self):
"""Sample indices should be clustered within each category"""
category_labels = ['A', 'B', 'A', 'B', 'A', 'B']
obs = get_order_from_categories(self.otu_table, category_labels)
group_string = "".join([category_labels[i] for i in obs])
self.assertTrue("AAABBB" == group_string or group_string == "BBBAAA")
def test_get_order_from_tree(self):
obs = get_order_from_tree(
self.otu_table.ids(axis='observation'),
self.tree_text)
exp = [2, 0, 1]
assert_almost_equal(obs, exp)
def test_make_otu_labels(self):
lineages = []
for val, id, meta in self.otu_table.iter(axis='observation'):
lineages.append([v for v in meta['taxonomy']])
obs = make_otu_labels(self.otu_table.ids(axis='observation'),
lineages, n_levels=1)
exp = ['Bacteria (OTU1)', 'Archaea (OTU2)', 'Streptococcus (OTU3)']
self.assertEqual(obs, exp)
full_lineages = []
for val, id, meta in self.otu_table_f.iter(axis='observation'):
full_lineages.append([v for v in meta['taxonomy']])
obs = make_otu_labels(self.otu_table_f.ids(axis='observation'),
full_lineages, n_levels=3)
exp = ['1B;1C;Bacteria (OTU1)',
'2B;2C;Archaea (OTU2)',
'3B;3C;Streptococcus (OTU3)']
self.assertEqual(obs, exp)
def test_names_to_indices(self):
new_order = ['Sample4', 'Sample2', 'Sample3',
'Sample6', 'Sample5', 'Sample1']
obs = names_to_indices(self.otu_table.ids(), new_order)
exp = [3, 1, 2, 5, 4, 0]
assert_almost_equal(obs, exp)
def test_get_log_transform(self):
obs = get_log_transform(self.otu_table)
data = [val for val in self.otu_table.iter_data(axis='observation')]
xform = asarray(data, dtype=float64)
for (i, val) in enumerate(obs.iter_data(axis='observation')):
#.........这里部分代码省略.........
示例2: FunctionTests
# 需要导入模块: from biom.table import Table [as 别名]
# 或者: from biom.table.Table import ids [as 别名]
class FunctionTests(TestCase):
def setUp(self):
self.tmp_dir = get_qiime_temp_dir()
self.otu_table_data = np.array([[2, 1, 0],
[0, 5, 0],
[0, 3, 0],
[1, 2, 0]])
self.sample_names = list('YXZ')
self.taxon_names = list('bacd')
self.otu_metadata = [{'domain': 'Archaea'},
{'domain': 'Bacteria'},
{'domain': 'Bacteria'},
{'domain': 'Bacteria'}]
self.otu_table = Table(self.otu_table_data,
self.taxon_names,
self.sample_names)
self.otu_table_meta = Table(self.otu_table_data,
self.taxon_names, self.sample_names,
observation_metadata=self.otu_metadata)
fd, self.otu_table_fp = mkstemp(dir=self.tmp_dir,
prefix='test_rarefaction',
suffix='.biom')
close(fd)
fd, self.otu_table_meta_fp = mkstemp(dir=self.tmp_dir,
prefix='test_rarefaction',
suffix='.biom')
close(fd)
self.rare_dir = mkdtemp(dir=self.tmp_dir,
prefix='test_rarefaction_dir', suffix='')
write_biom_table(self.otu_table, self.otu_table_fp)
write_biom_table(self.otu_table_meta, self.otu_table_meta_fp)
self._paths_to_clean_up = [self.otu_table_fp, self.otu_table_meta_fp]
self._dirs_to_clean_up = [self.rare_dir]
def tearDown(self):
""" cleanup temporary files """
map(remove, self._paths_to_clean_up)
for d in self._dirs_to_clean_up:
if os.path.exists(d):
rmtree(d)
def test_rarefy_to_list(self):
"""rarefy_to_list should rarefy correctly, same names
"""
maker = RarefactionMaker(self.otu_table_fp, 0, 1, 1, 1)
res = maker.rarefy_to_list(include_full=True)
self.assertItemsEqual(res[-1][2].ids(), self.otu_table.ids())
self.assertItemsEqual(
res[-1][2].ids(axis='observation'),
self.otu_table.ids(axis='observation'))
self.assertEqual(res[-1][2], self.otu_table)
sample_value_sum = []
for val in res[1][2].iter_data(axis='sample'):
sample_value_sum.append(val.sum())
npt.assert_almost_equal(sample_value_sum, [1.0, 1.0])
def test_rarefy_to_files(self):
"""rarefy_to_files should write valid files
"""
maker = RarefactionMaker(self.otu_table_fp, 1, 2, 1, 1)
maker.rarefy_to_files(
self.rare_dir,
include_full=True,
include_lineages=False)
fname = os.path.join(self.rare_dir, "rarefaction_1_0.biom")
otu_table = load_table(fname)
self.assertItemsEqual(
otu_table.ids(),
self.otu_table.ids()[:2])
# third sample had 0 seqs, so it's gone
def test_rarefy_to_files2(self):
"""rarefy_to_files should write valid files with some metadata on otus
"""
maker = RarefactionMaker(self.otu_table_meta_fp, 1, 2, 1, 1)
maker.rarefy_to_files(
self.rare_dir,
include_full=True,
include_lineages=False)
fname = os.path.join(self.rare_dir, "rarefaction_1_0.biom")
otu_table = load_table(fname)
self.assertItemsEqual(
otu_table.ids(),
self.otu_table.ids()[:2])
#.........这里部分代码省略.........
示例3: gibbs
# 需要导入模块: from biom.table import Table [as 别名]
# 或者: from biom.table.Table import ids [as 别名]
def gibbs(table_fp, mapping_fp, output_dir, loo, jobs, alpha1, alpha2, beta,
source_rarefaction_depth, sink_rarefaction_depth,
restarts, draws_per_restart, burnin, delay, cluster_start_delay,
source_sink_column, source_column_value, sink_column_value,
source_category_column):
'''Gibb's sampler for Bayesian estimation of microbial sample sources.
For details, see the project README file.
'''
# Create results directory. Click has already checked if it exists, and
# failed if so.
os.mkdir(output_dir)
# Load the mapping file and biom table and remove samples which are not
# shared.
o = open(mapping_fp, 'U')
sample_metadata_lines = o.readlines()
o.close()
sample_metadata, biom_table = \
_cli_sync_biom_and_sample_metadata(
parse_mapping_file(sample_metadata_lines),
load_table(table_fp))
# If biom table has fractional counts, it can produce problems in indexing
# later on.
biom_table.transform(lambda data, id, metadata: np.ceil(data))
# If biom table has sample metadata, there will be pickling errors when
# submitting multiple jobs. We remove the metadata by making a copy of the
# table without metadata.
biom_table = Table(biom_table._data.toarray(),
biom_table.ids(axis='observation'),
biom_table.ids(axis='sample'))
# Parse the mapping file and options to get the samples requested for
# sources and sinks.
source_samples, sink_samples = sinks_and_sources(
sample_metadata, column_header=source_sink_column,
source_value=source_column_value, sink_value=sink_column_value)
# If we have no source samples neither normal operation or loo will work.
# Will also likely get strange errors.
if len(source_samples) == 0:
raise ValueError('Mapping file or biom table passed contain no '
'`source` samples.')
# Prepare the 'sources' matrix by collapsing the `source_samples` by their
# metadata values.
sources_envs, sources_data = collapse_sources(source_samples,
sample_metadata,
source_category_column,
biom_table, sort=True)
# Rarefiy data if requested.
sources_data, biom_table = \
subsample_sources_sinks(sources_data, sink_samples, biom_table,
source_rarefaction_depth,
sink_rarefaction_depth)
# Build function that require only a single parameter -- sample -- to
# enable parallel processing if requested.
if loo:
f = partial(_cli_loo_runner, source_category=source_category_column,
alpha1=alpha1, alpha2=alpha2, beta=beta,
restarts=restarts, draws_per_restart=draws_per_restart,
burnin=burnin, delay=delay,
sample_metadata=sample_metadata,
sources_data=sources_data, sources_envs=sources_envs,
biom_table=biom_table, output_dir=output_dir)
sample_iter = source_samples
else:
f = partial(_cli_sink_source_prediction_runner, alpha1=alpha1,
alpha2=alpha2, beta=beta, restarts=restarts,
draws_per_restart=draws_per_restart, burnin=burnin,
delay=delay, sources_data=sources_data,
biom_table=biom_table, output_dir=output_dir)
sample_iter = sink_samples
if jobs > 1:
# Launch the ipcluster and wait for it to come up.
subprocess.Popen('ipcluster start -n %s --quiet' % jobs, shell=True)
time.sleep(cluster_start_delay)
c = Client()
c[:].map(f, sample_iter, block=True)
# Shut the cluster down. Answer taken from SO:
# http://stackoverflow.com/questions/30930157/stopping-ipcluster-engines-ipython-parallel
c.shutdown(hub=True)
else:
for sample in sample_iter:
f(sample)
# Format results for output.
samples = []
samples_data = []
for sample_fp in glob.glob(os.path.join(output_dir, '*')):
samples.append(sample_fp.strip().split('/')[-1].split('.txt')[0])
samples_data.append(np.loadtxt(sample_fp, delimiter='\t'))
mp, mps = _cli_collate_results(samples, samples_data, sources_envs)
#.........这里部分代码省略.........