本文整理汇总了Python中django.db.connection.close方法的典型用法代码示例。如果您正苦于以下问题:Python connection.close方法的具体用法?Python connection.close怎么用?Python connection.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类django.db.connection
的用法示例。
在下文中一共展示了connection.close方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
# 需要导入模块: from django.db import connection [as 别名]
# 或者: from django.db.connection import close [as 别名]
def run(self, chunksize=2000, parallel=4):
self.validate()
if not self.replacers:
return
chunks = self.get_queryset_chunk_iterator(chunksize)
if parallel == 0:
for objs in chunks:
_run(self, objs)
else:
connection.close()
pool = Pool(processes=parallel)
futures = [pool.apply_async(_run, (self, objs))
for objs in chunks]
for future in futures:
future.get()
pool.close()
pool.join()
示例2: run_case
# 需要导入模块: from django.db import connection [as 别名]
# 或者: from django.db.connection import close [as 别名]
def run_case(base_url, case_id, task_name, task_id):
report_id = run_case_by_id(base_url, case_id, task_name,"定时任务",isTask=True)
time.sleep(5) # 等待报告信息写入数据库
reports = ReportInfo.objects.all().filter(report_id=report_id)
tasks = TaskInfo.objects.filter(id=task_id)
if len(tasks) > 0:
task = tasks[0]
if len(reports) == 0:
# 若没有此条报告,则认为用例成功,不再需要后续操作
if len(tasks) > 0:
task.fail_times = 0
task.save()
else:
response_result = get_response_result(report_id)
if response_result != True:
task.fail_times += 1
task.save()
# 存失败记录
failRecord = TaskFailedRecord(task_id=task,report_id=reports[0].id,time=datetime.datetime.fromtimestamp(reports[0].test_time))
failRecord.save()
if task.fail_times % 2 == 0 and task.fail_times != 0:
receivers = task.receiver_email.split(';')
for receiver in receivers:
send_warn_mail(task_name, receiver, reports[0].id)
connection.close() # 避免造成mysql连接数过多的问题
示例3: get_running_tasks
# 需要导入模块: from django.db import connection [as 别名]
# 或者: from django.db.connection import close [as 别名]
def get_running_tasks():
global mutex
with mutex:
result = []
tasks = TaskInfo.objects.filter(is_run=True,is_loop=True)
now = datetime.datetime.now()
for task in tasks:
# 排除可能的重复执行
if task.start_time <= now <= (task.start_time + datetime.timedelta(seconds=5)) and (now - task.last_run_time > datetime.timedelta(seconds=5)):
result.append(task)
task.last_run_time = now
task.save()
# if datetime.datetime.now() - task.last_run_time > datetime.timedelta(seconds=task.interval_minute * 60 - 5):
# result.append(task)
connection.close()
if len(result) > 0:
for i in result:
print("获取到任务:",i.task_name)
return result
示例4: restart_running_task
# 需要导入模块: from django.db import connection [as 别名]
# 或者: from django.db.connection import close [as 别名]
def restart_running_task():
# 清除redis中的任务缓存
cache.delete_pattern("qa_paltform_loop_jobs_*")
# 清除redis中的分布式锁,避免偶发的锁出现问题,任务会在执行器中的run_pending阻塞
cache.delete_pattern('*qa_test_platform_get')
# 增加是否已经启动了线程的标记,避免每增加一个执行任务就启动一次线程,可能导致任务重复执行
cache.delete_pattern('qa_test_platform_running_flag')
print("清除任务缓存、清除锁、清除线程启动标记")
start_task_timer = StartTaskTimer(run_task_list, run_job_dict)
start_task_timer.start()
tasks = TaskInfo.objects.filter(is_run=True, is_loop=True)
count = 0
for task in tasks:
task.start_time = datetime.datetime.now() + datetime.timedelta(seconds=10*(count+1))
task.save()
count = count + 1
connection.close() # 避免造成mysql连接数过多的问题
示例5: transfer_avatars_to_s3
# 需要导入模块: from django.db import connection [as 别名]
# 或者: from django.db.connection import close [as 别名]
def transfer_avatars_to_s3(processes: int) -> None:
def _transfer_avatar_to_s3(user: UserProfile) -> int:
avatar_path = user_avatar_path(user)
file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", avatar_path) + ".original"
try:
with open(file_path, 'rb') as f:
s3backend.upload_avatar_image(f, user, user)
logging.info("Uploaded avatar for %s in realm %s", user.id, user.realm.name)
except FileNotFoundError:
pass
return 0
users = list(UserProfile.objects.all())
if processes == 1:
for user in users:
_transfer_avatar_to_s3(user)
else: # nocoverage
output = []
connection.close()
for (status, job) in run_parallel(_transfer_avatar_to_s3, users, processes):
output.append(job)
示例6: transfer_message_files_to_s3
# 需要导入模块: from django.db import connection [as 别名]
# 或者: from django.db.connection import close [as 别名]
def transfer_message_files_to_s3(processes: int) -> None:
def _transfer_message_files_to_s3(attachment: Attachment) -> int:
file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "files", attachment.path_id)
try:
with open(file_path, 'rb') as f:
guessed_type = guess_type(attachment.file_name)[0]
upload_image_to_s3(s3backend.uploads_bucket, attachment.path_id, guessed_type, attachment.owner, f.read())
logging.info("Uploaded message file in path %s", file_path)
except FileNotFoundError: # nocoverage
pass
return 0
attachments = list(Attachment.objects.all())
if processes == 1:
for attachment in attachments:
_transfer_message_files_to_s3(attachment)
else: # nocoverage
output = []
connection.close()
for status, job in run_parallel(_transfer_message_files_to_s3, attachments, processes):
output.append(job)
示例7: render_path
# 需要导入模块: from django.db import connection [as 别名]
# 或者: from django.db.connection import close [as 别名]
def render_path(self, *args, **kwargs):
# Retry after an InterfaceError
max_attempts = 5
for i in range(max_attempts):
try:
return super().render_path(*args, **kwargs)
except InterfaceError as e:
self.logger.warning("Caught InterfaceError, closing connection "
"and trying again (attempt #%s)",
i, exc_info=True)
try:
connection.close()
except:
pass
self.logger.error("Failed to render page after %s attempts. "
"Re-raising last exception...", max_attempts)
raise e
示例8: _send_to_users
# 需要导入模块: from django.db import connection [as 别名]
# 或者: from django.db.connection import close [as 别名]
def _send_to_users(self, qs_users: QuerySet, message_obj: ChannelMessage):
"""
Send the message to the users returned by the specified Django query set.
This is an async method made private for calling it from the sync public method.
:param qs_users: Django query set returning User models. Pk field will be requested via values_list(..).
:param message_obj: Message to send.
:return:
"""
connected_user_ids = self.get_connected_users()
if not connected_user_ids:
return
# A workaround for "connection already closed" problem.
# Looks like this code is being executed in a way that
# the "connection" object it accesses is re-used for a long time and appears broken after some long delay.
connection.close()
layer = get_channel_layer() # type: RedisChannelLayer
msg = {'type': 'send_to_client', 'message': message_obj.to_dict()}
coros = list()
for user_id in qs_users.filter(pk__in=connected_user_ids).values_list('pk', flat=True):
send_to_user_coro = layer.group_send(self.user_id_to_group_name(user_id), msg)
coros.append(send_to_user_coro)
await asyncio.gather(*coros)
示例9: execute_parallel_loader
# 需要导入模块: from django.db import connection [as 别名]
# 或者: from django.db.connection import close [as 别名]
def execute_parallel_loader(loader_class, *loader_args):
"""
ProcessPoolExecutor uses the multiprocessing module. Multiprocessing forks processes,
causing connection objects to be copied across processes. The key goal when running
multiple Python processes is to prevent any database connections from being shared
across processes. Depending on specifics of the driver and OS, the issues that arise
here range from non-working connections to socket connections that are used by multiple
processes concurrently, leading to broken messaging (e.g., 'MySQL server has gone away').
To get around this, we force each process to open its own connection to the database by
closing the existing, copied connection as soon as we're within the new process. This works
because as long as there is no existing, open connection, Django is smart enough to initialize
a new connection the next time one is necessary.
"""
connection.close()
return execute_loader(loader_class, *loader_args)
示例10: close_connection
# 需要导入模块: from django.db import connection [as 别名]
# 或者: from django.db.connection import close [as 别名]
def close_connection():
"""Closes the connection if we are not in an atomic block.
The connection should never be closed if we are in an atomic block, as
happens when running tests as part of a django TestCase. Otherwise, closing
the connection is important to avoid a connection time out after long actions.
Django does not automatically refresh a connection which has been closed
due to idleness (this normally happens in the request start/finish part
of a webapp's lifecycle, which this process does not have), so we must
do it ourselves if the connection goes idle due to stuff taking a really
long time.
source: http://stackoverflow.com/a/39322632/865091
"""
from django.db import connection
if not connection.in_atomic_block:
connection.close()
示例11: _get_sqs_messages
# 需要导入模块: from django.db import connection [as 别名]
# 或者: from django.db.connection import close [as 别名]
def _get_sqs_messages(self):
logger.info("-= SQS Poller run: BATCHSIZE=%s, POLLING_INTERVAL=%s =-" %
(settings.POLLER_CONFIG['batchsize'], settings.POLLER_CONFIG['interval']))
queue_reader = sqs_reader.SQSReader()
for m in queue_reader.get_message_batch():
logger.debug("Received: %s with ID:%s" % (m.get_body(), m.id))
if not self.parse_message(m.get_body()):
logger.error('MsgID:%s could not be written, will retry again.' % m.id)
else:
try:
gevent.spawn(queue_reader.delete_message, m)
gevent.sleep(0)
logger.debug('MsgID:%s deleted from queue' % m.id)
except Exception as exp:
logger.error("Error deleting msg from SQS: %s" % exp)
connection.close()
示例12: _get_rabbitmq_messages
# 需要导入模块: from django.db import connection [as 别名]
# 或者: from django.db.connection import close [as 别名]
def _get_rabbitmq_messages(self, i):
queue_reader = rabbitmq_reader.RabbitMQReader()
try:
message_frame, message_body = queue_reader.get_message()
if not message_frame:
raise StopIteration()
if not self.parse_message(message_body):
logger.error('MsgID:%s could not be written, will retry again.' % message_frame.delivery_tag)
else:
try:
gevent.spawn(queue_reader.delete_message, message_frame)
gevent.sleep(0)
except Exception as exp:
logger.error("Error deleting msg from RabbitMQ: %s" % exp)
except StopIteration:
pass
connection.close()
示例13: email_uncaught_exception
# 需要导入模块: from django.db import connection [as 别名]
# 或者: from django.db.connection import close [as 别名]
def email_uncaught_exception(func):
"""
Function decorator for send email with uncaught exceptions to admins.
Email is sent to ``settings.DBBACKUP_FAILURE_RECIPIENTS``
(``settings.ADMINS`` if not defined). The message contains a traceback
of error.
"""
@wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception:
logger = logging.getLogger('dbbackup')
exc_type, exc_value, tb = sys.exc_info()
tb_str = ''.join(traceback.format_tb(tb))
msg = '%s: %s\n%s' % (exc_type.__name__, exc_value, tb_str)
logger.error(msg)
raise
finally:
connection.close()
return wrapper
示例14: compress_file
# 需要导入模块: from django.db import connection [as 别名]
# 或者: from django.db.connection import close [as 别名]
def compress_file(inputfile, filename):
"""
Compress input file using gzip and change its name.
:param inputfile: File to compress
:type inputfile: ``file`` like object
:param filename: File's name
:type filename: ``str``
:returns: Tuple with compressed file and new file's name
:rtype: :class:`tempfile.SpooledTemporaryFile`, ``str``
"""
outputfile = create_spooled_temporary_file()
new_filename = filename + '.gz'
zipfile = gzip.GzipFile(filename=filename, fileobj=outputfile, mode="wb")
try:
inputfile.seek(0)
copyfileobj(inputfile, zipfile, settings.TMP_FILE_READ_SIZE)
finally:
zipfile.close()
return outputfile, new_filename
示例15: uncompress_file
# 需要导入模块: from django.db import connection [as 别名]
# 或者: from django.db.connection import close [as 别名]
def uncompress_file(inputfile, filename):
"""
Uncompress this file using gzip and change its name.
:param inputfile: File to compress
:type inputfile: ``file`` like object
:param filename: File's name
:type filename: ``str``
:returns: Tuple with file and new file's name
:rtype: :class:`tempfile.SpooledTemporaryFile`, ``str``
"""
zipfile = gzip.GzipFile(fileobj=inputfile, mode="rb")
try:
outputfile = create_spooled_temporary_file(fileobj=zipfile)
finally:
zipfile.close()
new_basename = os.path.basename(filename).replace('.gz', '')
return outputfile, new_basename