本文整理汇总了Python中base.populators.DatasetPopulator.get_history_dataset_content方法的典型用法代码示例。如果您正苦于以下问题:Python DatasetPopulator.get_history_dataset_content方法的具体用法?Python DatasetPopulator.get_history_dataset_content怎么用?Python DatasetPopulator.get_history_dataset_content使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类base.populators.DatasetPopulator
的用法示例。
在下文中一共展示了DatasetPopulator.get_history_dataset_content方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: DockerizedJobsIntegrationTestCase
# 需要导入模块: from base.populators import DatasetPopulator [as 别名]
# 或者: from base.populators.DatasetPopulator import get_history_dataset_content [as 别名]
class DockerizedJobsIntegrationTestCase(integration_util.IntegrationTestCase, RunsEnvironmentJobs):
framework_tool_and_types = True
@classmethod
def handle_galaxy_config_kwds(cls, config):
cls.jobs_directory = tempfile.mkdtemp()
config["jobs_directory"] = cls.jobs_directory
config["job_config_file"] = DOCKERIZED_JOB_CONFIG_FILE
# Disable tool dependency resolution.
config["tool_dependency_dir"] = "none"
config["enable_beta_mulled_containers"] = "true"
def setUp(self):
super(DockerizedJobsIntegrationTestCase, self).setUp()
self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
self.history_id = self.dataset_populator.new_history()
def test_explicit(self):
self.dataset_populator.run_tool("mulled_example_explicit", {}, self.history_id)
self.dataset_populator.wait_for_history(self.history_id, assert_ok=True)
output = self.dataset_populator.get_history_dataset_content(self.history_id)
assert "0.7.15-r1140" in output
def test_mulled_simple(self):
self.dataset_populator.run_tool("mulled_example_simple", {}, self.history_id)
self.dataset_populator.wait_for_history(self.history_id, assert_ok=True)
output = self.dataset_populator.get_history_dataset_content(self.history_id)
assert "0.7.15-r1140" in output
def test_docker_job_enviornment(self):
job_env = self._run_and_get_environment_properties("job_environment_default")
euid = os.geteuid()
egid = os.getgid()
assert job_env.user_id == str(euid), job_env.user_id
assert job_env.group_id == str(egid), job_env.group_id
assert job_env.pwd.startswith(self.jobs_directory)
assert job_env.pwd.endswith("/working")
assert job_env.home.startswith(self.jobs_directory)
assert job_env.home.endswith("/home")
def test_docker_job_environment_legacy(self):
job_env = self._run_and_get_environment_properties("job_environment_default_legacy")
euid = os.geteuid()
egid = os.getgid()
assert job_env.user_id == str(euid), job_env.user_id
assert job_env.group_id == str(egid), job_env.group_id
assert job_env.pwd.startswith(self.jobs_directory)
assert job_env.pwd.endswith("/working")
# Should we change env_pass_through to just always include TMP and HOME for docker?
# I'm not sure, if yes this would change.
assert job_env.home == "/", job_env.home
示例2: test_runs_on_mule
# 需要导入模块: from base.populators import DatasetPopulator [as 别名]
# 或者: from base.populators.DatasetPopulator import get_history_dataset_content [as 别名]
def test_runs_on_mule(self):
tool_id = 'config_vars'
expect_server_name = self.expected_server_name
dataset_populator = DatasetPopulator(self.galaxy_interactor)
history_id = dataset_populator.new_history()
payload = dataset_populator.run_tool(
tool_id=tool_id,
inputs={'var': 'server_name'},
history_id=history_id,
)
dataset_id = payload['outputs'][0]['id']
dataset_populator.wait_for_dataset(history_id, dataset_id, assert_ok=True)
output = dataset_populator.get_history_dataset_content(history_id, dataset_id=dataset_id).strip()
assert output.startswith(expect_server_name), (
"Job handler's server name '{output}' does not start with expected string '{expected}'".format(
output=output,
expected=expect_server_name,
)
)
示例3: MaximumWorkflowJobsPerSchedulingIterationTestCase
# 需要导入模块: from base.populators import DatasetPopulator [as 别名]
# 或者: from base.populators.DatasetPopulator import get_history_dataset_content [as 别名]
class MaximumWorkflowJobsPerSchedulingIterationTestCase(integration_util.IntegrationTestCase):
framework_tool_and_types = True
def setUp(self):
super(MaximumWorkflowJobsPerSchedulingIterationTestCase, self).setUp()
self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
self.workflow_populator = WorkflowPopulator(self.galaxy_interactor)
self.dataset_collection_populator = DatasetCollectionPopulator(self.galaxy_interactor)
@classmethod
def handle_galaxy_config_kwds(cls, config):
config["maximum_workflow_jobs_per_scheduling_iteration"] = 1
def do_test(self):
workflow_id = self.workflow_populator.upload_yaml_workflow("""
class: GalaxyWorkflow
steps:
- type: input_collection
- tool_id: collection_creates_pair
state:
input1:
$link: 0
- tool_id: collection_paired_test
state:
f1:
$link: 1#paired_output
- tool_id: cat_list
state:
input1:
$link: 2#out1
""")
with self.dataset_populator.test_history() as history_id:
hdca1 = self.dataset_collection_populator.create_list_in_history(history_id, contents=["a\nb\nc\nd\n", "e\nf\ng\nh\n"]).json()
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
inputs = {
'0': {"src": "hdca", "id": hdca1["id"]},
}
invocation_id = self.workflow_populator.invoke_workflow(history_id, workflow_id, inputs)
self.workflow_populator.wait_for_workflow(history_id, workflow_id, invocation_id)
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
self.assertEqual("a\nc\nb\nd\ne\ng\nf\nh\n", self.dataset_populator.get_history_dataset_content(history_id, hid=0))
示例4: HistoriesApiTestCase
# 需要导入模块: from base.populators import DatasetPopulator [as 别名]
# 或者: from base.populators.DatasetPopulator import get_history_dataset_content [as 别名]
#.........这里部分代码省略.........
assert self._update(history_id, {str_key: False}).status_code == 400
for bool_key in ['deleted', 'importable', 'published']:
assert self._update(history_id, {bool_key: "a string"}).status_code == 400
assert self._update(history_id, {"tags": "a simple string"}).status_code == 400
assert self._update(history_id, {"tags": [True]}).status_code == 400
def test_invalid_keys(self):
invalid_history_id = "1234123412341234"
assert self._get("histories/%s" % invalid_history_id).status_code == 400
assert self._update(invalid_history_id, {"name": "new name"}).status_code == 400
assert self._delete("histories/%s" % invalid_history_id).status_code == 400
assert self._post("histories/deleted/%s/undelete" % invalid_history_id).status_code == 400
def test_create_anonymous_fails(self):
post_data = dict(name="CannotCreate")
# Using lower-level _api_url will cause key to not be injected.
histories_url = self._api_url("histories")
create_response = post(url=histories_url, data=post_data)
self._assert_status_code_is(create_response, 403)
def test_import_export(self):
history_name = "for_export"
history_id = self.dataset_populator.new_history(name=history_name)
self.dataset_populator.new_dataset(history_id, content="1 2 3")
imported_history_id = self._reimport_history(history_id, history_name)
contents_response = self._get("histories/%s/contents" % imported_history_id)
self._assert_status_code_is(contents_response, 200)
contents = contents_response.json()
assert len(contents) == 1
imported_content = self.dataset_populator.get_history_dataset_content(
history_id=imported_history_id,
dataset_id=contents[0]["id"]
)
assert imported_content == "1 2 3\n"
def test_import_export_collection(self):
from nose.plugins.skip import SkipTest
raise SkipTest("Collection import/export not yet implemented")
history_name = "for_export_with_collections"
history_id = self.dataset_populator.new_history(name=history_name)
self.dataset_collection_populator.create_list_in_history(history_id, contents=["Hello", "World"])
imported_history_id = self._reimport_history(history_id, history_name)
contents_response = self._get("histories/%s/contents" % imported_history_id)
self._assert_status_code_is(contents_response, 200)
contents = contents_response.json()
assert len(contents) == 3
def _reimport_history(self, history_id, history_name):
# Ensure the history is ready to go...
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
# Export the history.
download_path = self._export(history_id)
# Create download for history
full_download_url = "%s%s?key=%s" % (self.url, download_path, self.galaxy_interactor.api_key)
download_response = get(full_download_url)
self._assert_status_code_is(download_response, 200)
示例5: ToolsUploadTestCase
# 需要导入模块: from base.populators import DatasetPopulator [as 别名]
# 或者: from base.populators.DatasetPopulator import get_history_dataset_content [as 别名]
#.........这里部分代码省略.........
bam_path = TestDataResolver().get_filename("1.bam")
with open(bam_path, "rb") as fh:
details = self._upload_and_get_details(fh, file_type="auto")
assert details["state"] == "ok"
assert details["file_ext"] == "bam", details
def test_fetch_metadata(self):
table = ONE_TO_SIX_WITH_SPACES
details = self._upload_and_get_details(table, api='fetch', dbkey="hg19", info="cool upload", tags=["name:data", "group:type:paired-end"])
assert details.get("genome_build") == "hg19"
assert details.get("misc_info") == "cool upload", details
tags = details.get("tags")
assert len(tags) == 2, details
assert "group:type:paired-end" in tags
assert "name:data" in tags
def test_upload_multiple_files_1(self):
with self.dataset_populator.test_history() as history_id:
payload = self.dataset_populator.upload_payload(history_id, "Test123",
dbkey="hg19",
extra_inputs={
"files_1|url_paste": "SecondOutputContent",
"files_1|NAME": "SecondOutputName",
"files_1|file_type": "tabular",
"files_1|dbkey": "hg18",
"file_count": "2",
}
)
run_response = self.dataset_populator.tools_post(payload)
self.dataset_populator.wait_for_tool_run(history_id, run_response)
datasets = run_response.json()["outputs"]
assert len(datasets) == 2, datasets
content = self.dataset_populator.get_history_dataset_content(history_id, dataset=datasets[0])
assert content.strip() == "Test123"
assert datasets[0]["file_ext"] == "txt"
assert datasets[0]["genome_build"] == "hg19", datasets
content = self.dataset_populator.get_history_dataset_content(history_id, dataset=datasets[1])
assert content.strip() == "SecondOutputContent"
assert datasets[1]["file_ext"] == "tabular"
assert datasets[1]["genome_build"] == "hg18", datasets
def test_upload_multiple_files_2(self):
with self.dataset_populator.test_history() as history_id:
payload = self.dataset_populator.upload_payload(history_id, "Test123",
file_type="tabular",
dbkey="hg19",
extra_inputs={
"files_1|url_paste": "SecondOutputContent",
"files_1|NAME": "SecondOutputName",
"files_1|file_type": "txt",
"files_1|dbkey": "hg18",
"file_count": "2",
}
)
run_response = self.dataset_populator.tools_post(payload)
self.dataset_populator.wait_for_tool_run(history_id, run_response)
datasets = run_response.json()["outputs"]
assert len(datasets) == 2, datasets
content = self.dataset_populator.get_history_dataset_content(history_id, dataset=datasets[0])
assert content.strip() == "Test123"
assert datasets[0]["file_ext"] == "tabular", datasets
assert datasets[0]["genome_build"] == "hg19", datasets
示例6: ToolsUploadTestCase
# 需要导入模块: from base.populators import DatasetPopulator [as 别名]
# 或者: from base.populators.DatasetPopulator import get_history_dataset_content [as 别名]
#.........这里部分代码省略.........
"files_1|space_to_tab": "Yes",
"files_2|url_paste": "log\rcontent",
"files_2|type": "upload_dataset",
})
roadmaps_content = self._get_roadmaps_content(history_id, dataset)
assert roadmaps_content.strip() == "roadmaps\ncontent", roadmaps_content
def test_upload_dbkey(self):
with self.dataset_populator.test_history() as history_id:
payload = self.dataset_populator.upload_payload(history_id, "Test123", dbkey="hg19")
run_response = self.dataset_populator.tools_post(payload)
self.dataset_populator.wait_for_tool_run(history_id, run_response)
datasets = run_response.json()["outputs"]
assert datasets[0].get("genome_build") == "hg19", datasets[0]
def test_upload_multiple_files_1(self):
with self.dataset_populator.test_history() as history_id:
payload = self.dataset_populator.upload_payload(history_id, "Test123",
dbkey="hg19",
extra_inputs={
"files_1|url_paste": "SecondOutputContent",
"files_1|NAME": "SecondOutputName",
"files_1|file_type": "tabular",
"files_1|dbkey": "hg18",
"file_count": "2",
}
)
run_response = self.dataset_populator.tools_post(payload)
self.dataset_populator.wait_for_tool_run(history_id, run_response)
datasets = run_response.json()["outputs"]
assert len(datasets) == 2, datasets
content = self.dataset_populator.get_history_dataset_content(history_id, dataset=datasets[0])
assert content.strip() == "Test123"
assert datasets[0]["file_ext"] == "txt"
assert datasets[0]["genome_build"] == "hg19", datasets
content = self.dataset_populator.get_history_dataset_content(history_id, dataset=datasets[1])
assert content.strip() == "SecondOutputContent"
assert datasets[1]["file_ext"] == "tabular"
assert datasets[1]["genome_build"] == "hg18", datasets
def test_upload_multiple_files_2(self):
with self.dataset_populator.test_history() as history_id:
payload = self.dataset_populator.upload_payload(history_id, "Test123",
file_type="tabular",
dbkey="hg19",
extra_inputs={
"files_1|url_paste": "SecondOutputContent",
"files_1|NAME": "SecondOutputName",
"files_1|file_type": "txt",
"files_1|dbkey": "hg18",
"file_count": "2",
}
)
run_response = self.dataset_populator.tools_post(payload)
self.dataset_populator.wait_for_tool_run(history_id, run_response)
datasets = run_response.json()["outputs"]
assert len(datasets) == 2, datasets
content = self.dataset_populator.get_history_dataset_content(history_id, dataset=datasets[0])
assert content.strip() == "Test123"
assert datasets[0]["file_ext"] == "tabular", datasets
assert datasets[0]["genome_build"] == "hg19", datasets