本文整理汇总了Python中rq.get_current_job方法的典型用法代码示例。如果您正苦于以下问题:Python rq.get_current_job方法的具体用法?Python rq.get_current_job怎么用?Python rq.get_current_job使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类rq
的用法示例。
在下文中一共展示了rq.get_current_job方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run_module
# 需要导入模块: import rq [as 别名]
# 或者: from rq import get_current_job [as 别名]
def run_module(workspace, module):
results = {}
try:
# instantiate important objects
job = get_current_job()
recon = base.Recon(check=False, analytics=False, marketplace=False)
recon.start(base.Mode.JOB, workspace=workspace)
tasks = Tasks(recon)
# update the task's status
tasks.update_task(job.get_id(), status=job.get_status())
# execute the task
module = recon._loaded_modules.get(module)
module.run()
except Exception as e:
results['error'] = {
'type': str(type(e)),
'message': str(e),
'traceback': traceback.format_exc(),
}
results['summary'] = module._summary_counts
# update the task's status and results
tasks.update_task(job.get_id(), status='finished', result=results)
return results
示例2: long_runnig_task
# 需要导入模块: import rq [as 别名]
# 或者: from rq import get_current_job [as 别名]
def long_runnig_task(task):
job = get_current_job()
task.job_id = job.get_id()
task.result = 'STARTED'
duration_in_second_persentages = task.duration*1.0 / 100
for i in range(100):
import time
task.progress = i
task.save()
print task.progress
time.sleep(duration_in_second_persentages)
task.result = 'FINISHED'
task.save()
return task.result
示例3: scheduled_get_url_words
# 需要导入模块: import rq [as 别名]
# 或者: from rq import get_current_job [as 别名]
def scheduled_get_url_words(url):
"""
This creates a ScheduledTask instance for each group of
scheduled task - each time this scheduled task is run
a new instance of ScheduledTaskInstance will be created
"""
job = get_current_job()
task, created = ScheduledTask.objects.get_or_create(
job_id=job.get_id(),
name=url
)
response = requests.get(url)
response_len = len(response.text)
ScheduledTaskInstance.objects.create(
scheduled_task=task,
result = response_len,
)
return response_len
示例4: test_sleep
# 需要导入模块: import rq [as 别名]
# 或者: from rq import get_current_job [as 别名]
def test_sleep(n):
"""Used only for testing -- example method with argument. """
logger = LMLogger.get_logger()
logger.info("Starting test_sleep({}) in pid {}".format(n, os.getpid()))
try:
job = get_current_job()
job.meta['sample'] = 'test_sleep metadata'
job.meta['pid'] = int(os.getpid())
job.save_meta()
time.sleep(n)
logger.info("Completed test_sleep in pid {}".format(os.getpid()))
return 0
except Exception as e:
logger.error("Error on test_sleep in pid {}: {}".format(os.getpid(), e))
raise
示例5: process_document
# 需要导入模块: import rq [as 别名]
# 或者: from rq import get_current_job [as 别名]
def process_document(path, options, meta):
current_task = get_current_job()
with Office(app.config["LIBREOFFICE_PATH"]) as office: # acquire libreoffice lock
with office.documentLoad(path) as original_document: # open original document
with TemporaryDirectory() as tmp_dir: # create temp dir where output'll be stored
for fmt in options["formats"]: # iterate over requested formats
current_format = app.config["SUPPORTED_FORMATS"][fmt]
output_path = os.path.join(tmp_dir, current_format["path"])
original_document.saveAs(output_path, fmt=current_format["fmt"])
if options.get("thumbnails", None):
is_created = False
if meta["mimetype"] == "application/pdf":
pdf_path = path
elif "pdf" in options["formats"]:
pdf_path = os.path.join(tmp_dir, "pdf")
else:
pdf_tmp_file = NamedTemporaryFile()
pdf_path = pdf_tmp_file.name
original_document.saveAs(pdf_tmp_file.name, fmt="pdf")
is_created = True
image = Image(filename=pdf_path,
resolution=app.config["THUMBNAILS_DPI"])
if is_created:
pdf_tmp_file.close()
thumbnails = make_thumbnails(image, tmp_dir, options["thumbnails"]["size"])
result_path, result_url = make_zip_archive(current_task.id, tmp_dir)
remove_file.schedule(
datetime.timedelta(seconds=app.config["RESULT_FILE_TTL"]),
result_path
)
return result_url
示例6: unregister_all_dirty
# 需要导入模块: import rq [as 别名]
# 或者: from rq import get_current_job [as 别名]
def unregister_all_dirty(self, decrement=1):
"""Unregister current TreeItem and all parent paths as dirty
(should be called from RQ job procedure after cache is updated)
"""
r_con = get_connection()
job = get_current_job()
for p in self.all_pootle_paths():
if job:
logger.debug(
"UNREGISTER %s (-%s) where job_id=%s", p, decrement, job.id
)
else:
logger.debug("UNREGISTER %s (-%s)", p, decrement)
r_con.zincrby(KEY_DIRTY_TREEITEMS, 0 - decrement, p)
示例7: unregister_dirty
# 需要导入模块: import rq [as 别名]
# 或者: from rq import get_current_job [as 别名]
def unregister_dirty(self, decrement=1):
"""Unregister current TreeItem as dirty
(should be called from RQ job procedure after cache is updated)
"""
r_con = get_connection()
job = get_current_job()
if job:
logger.debug(
"UNREGISTER %s (-%s) where job_id=%s", self.cache_key, decrement, job.id
)
else:
logger.debug("UNREGISTER %s (-%s)", self.cache_key, decrement)
r_con.zincrby(KEY_DIRTY_TREEITEMS, 0 - decrement, self.cache_key)
示例8: update_cache_job
# 需要导入模块: import rq [as 别名]
# 或者: from rq import get_current_job [as 别名]
def update_cache_job(instance):
"""RQ job"""
job = get_current_job()
job_wrapper = JobWrapper(job.id, job.connection)
keys, decrement = job_wrapper.get_job_params()
# close unusable and obsolete connections before and after the job
# Note: setting CONN_MAX_AGE parameter can have negative side-effects
# CONN_MAX_AGE value should be lower than DB wait_timeout
connection.close_if_unusable_or_obsolete()
instance._update_cache_job(keys, decrement)
connection.close_if_unusable_or_obsolete()
job_wrapper.clear_job_params()
示例9: get_url_words
# 需要导入模块: import rq [as 别名]
# 或者: from rq import get_current_job [as 别名]
def get_url_words(url):
# This creates a Task instance to save the job instance and job result
job = get_current_job()
task = Task.objects.create(
job_id=job.get_id(),
name=url
)
response = requests.get(url)
task.result = len(response.text)
task.save()
return task.result
示例10: set_task_progress
# 需要导入模块: import rq [as 别名]
# 或者: from rq import get_current_job [as 别名]
def set_task_progress(progress):
job = get_current_job()
if job:
job.meta["progress"] = progress
job.save_meta()
if progress >= 100:
task = Task.query.filter_by(job_id=job.get_id()).first()
if task:
task.complete = True
db.session.commit()
示例11: add
# 需要导入模块: import rq [as 别名]
# 或者: from rq import get_current_job [as 别名]
def add(x, y):
job = get_current_job()
task = Task(job_id=job.id, name="Test Task", description="Trying things out")
db.session.add(task)
db.session.commit()
set_task_progress(100)
return x + y
示例12: publish_repository
# 需要导入模块: import rq [as 别名]
# 或者: from rq import get_current_job [as 别名]
def publish_repository(repository: Repository, username: str, access_token: str,
remote: Optional[str] = None, public: bool = False, id_token: str = None) -> None:
p = os.getpid()
logger = LMLogger.get_logger()
logger.info(f"(Job {p}) Starting publish_repository({str(repository)})")
def update_feedback(msg: str, has_failures: Optional[bool] = None, failure_detail: Optional[str] = None,
percent_complete: Optional[float] = None):
"""Method to update the job's metadata and provide feedback to the UI"""
current_job = get_current_job()
if not current_job:
return
if has_failures:
current_job.meta['has_failures'] = has_failures
if failure_detail:
current_job.meta['failure_detail'] = failure_detail
if percent_complete:
current_job.meta['percent_complete'] = percent_complete
current_job.meta['feedback'] = msg
current_job.save_meta()
update_feedback("Publish task in queue")
with repository.lock():
if isinstance(repository, LabBook):
wf = LabbookWorkflow(repository)
else:
wf = DatasetWorkflow(repository) # type: ignore
wf.publish(username=username, access_token=access_token, remote=remote or "origin",
public=public, feedback_callback=update_feedback, id_token=id_token)
示例13: sync_repository
# 需要导入模块: import rq [as 别名]
# 或者: from rq import get_current_job [as 别名]
def sync_repository(repository: Repository, username: str, override: MergeOverride,
remote: str = "origin", access_token: str = None,
pull_only: bool = False, id_token: str = None) -> int:
p = os.getpid()
logger = LMLogger.get_logger()
logger.info(f"(Job {p}) Starting sync_repository({str(repository)})")
def update_feedback(msg: str, has_failures: Optional[bool] = None, failure_detail: Optional[str] = None,
percent_complete: Optional[float] = None):
"""Method to update the job's metadata and provide feedback to the UI"""
current_job = get_current_job()
if not current_job:
return
if has_failures:
current_job.meta['has_failures'] = has_failures
if failure_detail:
current_job.meta['failure_detail'] = failure_detail
if percent_complete:
current_job.meta['percent_complete'] = percent_complete
current_job.meta['feedback'] = msg
current_job.save_meta()
try:
update_feedback("Sync task in queue")
with repository.lock():
if isinstance(repository, LabBook):
wf = LabbookWorkflow(repository)
else:
wf = DatasetWorkflow(repository) # type: ignore
cnt = wf.sync(username=username, remote=remote, override=override,
feedback_callback=update_feedback, access_token=access_token,
id_token=id_token, pull_only=pull_only)
logger.info(f"(Job {p} Completed sync_repository with cnt={cnt}")
return cnt
except MergeConflict as me:
logger.exception(f"(Job {p}) Merge conflict: {me}")
raise
示例14: import_labbook_from_remote
# 需要导入模块: import rq [as 别名]
# 或者: from rq import get_current_job [as 别名]
def import_labbook_from_remote(remote_url: str, username: str, config_file: str = None) -> str:
"""Return the root directory of the newly imported Project
Args:
remote_url: Canonical world-facing URI, like "https://repo.domain/owner/project". This will be converted to the
actual network location for our repository, like "https://username@repo.domain/owner/project.git/", as
robustly as we can manage.
username: username for currently logged in user
config_file: a copy of the parsed config file
Returns:
Path to project root directory
"""
p = os.getpid()
logger = LMLogger.get_logger()
logger.info(f"(Job {p}) Starting import_labbook_from_remote({remote_url}, {username})")
def update_meta(msg):
job = get_current_job()
if not job:
return
if 'feedback' not in job.meta:
job.meta['feedback'] = msg
else:
job.meta['feedback'] = job.meta['feedback'] + f'\n{msg}'
job.save_meta()
remote = RepoLocation(remote_url, username)
update_meta(f"Importing Project from {remote.owner_repo!r}...")
try:
wf = LabbookWorkflow.import_from_remote(remote, username, config_file)
except Exception as e:
update_meta(f"Could not import Project from {remote.remote_location}.")
logger.exception(f"(Job {p}) Error on import_labbook_from_remote: {e}")
raise
update_meta(f"Imported Project {wf.labbook.name}!")
return wf.labbook.root_dir
示例15: import_labboook_from_zip
# 需要导入模块: import rq [as 别名]
# 或者: from rq import get_current_job [as 别名]
def import_labboook_from_zip(archive_path: str, username: str, owner: str,
config_file: Optional[str] = None) -> str:
"""Method to import a labbook from a zip file
Args:
archive_path(str): Path to the uploaded zip
username(str): Username
owner(str): Owner username
config_file(str): Optional path to a labmanager config file
Returns:
str: directory path of imported labbook
"""
def update_meta(msg):
job = get_current_job()
if not job:
return
job.meta['feedback'] = msg
job.save_meta()
p = os.getpid()
logger = LMLogger.get_logger()
logger.info(f"(Job {p}) Starting import_labbook_from_zip(archive_path={archive_path},"
f"username={username}, owner={owner}, config_file={config_file})")
try:
lb = ZipExporter.import_labbook(archive_path, username, owner,
config_file=config_file,
update_meta=update_meta)
return lb.root_dir
except Exception as e:
logger.exception(f"(Job {p}) Error on import_labbook_from_zip({archive_path}): {e}")
raise
finally:
if os.path.exists(archive_path):
os.remove(archive_path)