本文整理汇总了Python中superdesk.lock.lock函数的典型用法代码示例。如果您正苦于以下问题:Python lock函数的具体用法?Python lock怎么用?Python lock使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了lock函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_scheduled_content
def create_scheduled_content(now=None):
lock_name = get_lock_id("Template", "Schedule")
if not lock(lock_name, expire=130):
logger.info('Task: {} is already running.'.format(lock_name))
return
try:
if now is None:
now = utcnow()
templates = get_scheduled_templates(now)
production = superdesk.get_resource_service(ARCHIVE)
items = []
for template in templates:
set_template_timestamps(template, now)
item = get_item_from_template(template)
item[config.VERSION] = 1
production.post([item])
insert_into_versions(doc=item)
try:
apply_onstage_rule(item, item.get(config.ID_FIELD))
except Exception as ex: # noqa
logger.exception('Failed to apply on stage rule while scheduling template.')
items.append(item)
return items
except Exception as e:
logger.exception('Task: {} failed with error {}.'.format(lock_name, str(e)))
finally:
unlock(lock_name)
示例2: run
def run(self, page_size=None):
logger.info("Import to Legal Archive")
lock_name = get_lock_id("legal_archive", "import_to_legal_archive")
page_size = int(page_size) if page_size else self.default_page_size
if not lock(lock_name, "", expire=1800):
return
try:
legal_archive_import = LegalArchiveImport()
publish_queue = get_resource_service("publish_queue")
for items in self.get_expired_items(page_size):
for item in items:
try:
legal_archive_import.upsert_into_legal_archive(item.get("item_id"))
req = ParsedRequest()
req.where = json.dumps({"item_id": item["item_id"]})
queue_items = list(publish_queue.get(req=req, lookup=None))
if queue_items:
try:
logger.info("Import to Legal Publish Queue")
legal_archive_import.process_queue_items(queue_items, True)
except:
logger.exception(
"Failed to import into legal publish queue "
"archive via command {}.".format(item.get("item_id"))
)
except:
logger.exception(
"Failed to import into legal " "archive via command {}.".format(item.get("item_id"))
)
except:
logger.exception("Failed to import into legal archive.")
finally:
unlock(lock_name, "")
示例3: report
def report():
"""Check all saved_searches with subscribers, and publish reports"""
if not lock(LOCK_NAME, expire=REPORT_SOFT_LIMIT + 10):
return
try:
saved_searches = get_resource_service('saved_searches')
subscribed_searches = saved_searches.find({"subscribers": {"$exists": 1}})
tz = pytz.timezone(superdesk.app.config['DEFAULT_TIMEZONE'])
now = datetime.now(tz=tz)
for search in subscribed_searches:
do_update = False
subscribed_users = search['subscribers'].get('user_subscriptions', [])
do_update = process_subscribers(subscribed_users, search, now, False) or do_update
subscribed_desks = search['subscribers'].get('desk_subscriptions', [])
do_update = process_subscribers(subscribed_desks, search, now, True) or do_update
if do_update:
updates = {'subscribers': search['subscribers']}
saved_searches.update(search['_id'], updates, search)
except Exception as e:
logger.error("Can't report saved searched: {reason}".format(reason=e))
finally:
unlock(LOCK_NAME)
示例4: run
def run(self, expiry_days=None):
if expiry_days:
self.expiry_days = int(expiry_days)
elif app.settings.get('CONTENT_API_EXPIRY_DAYS'):
self.expiry_days = app.settings['CONTENT_API_EXPIRY_DAYS']
if self.expiry_days == 0:
logger.info('Expiry days is set to 0, therefor no items will be removed.')
return
now = utcnow()
self.log_msg = 'Expiry Time: {}'.format(now)
logger.info('{} Starting to remove expired content_api items.'.format(self.log_msg))
lock_name = get_lock_id('content_api', 'remove_expired')
if not lock(lock_name, expire=600):
logger.info('{} Remove expired content_api items task is already running'.format(self.log_msg))
return
try:
num_items_removed = self._remove_expired_items(now, self.expiry_days)
finally:
unlock(lock_name)
if num_items_removed == 0:
logger.info('{} Completed but no items were removed'.format(self.log_msg))
else:
logger.info('{} Completed removing {} expired content_api items'.format(self.log_msg, num_items_removed))
示例5: publish
def publish():
"""
Fetches items from publish queue as per the configuration,
calls the transmit function.
"""
lock_name = get_lock_id("Transmit", "Articles")
if not lock(lock_name, '', expire=1800):
logger.info('Task: {} is already running.'.format(lock_name))
return
try:
# Query any oustanding transmit requests
items = list(get_queue_items())
if len(items) > 0:
transmit_items(items)
# Query any outstanding retry attempts
retry_items = list(get_queue_items(True))
if len(retry_items) > 0:
transmit_items(retry_items)
except:
logger.exception('Task: {} failed.'.format(lock_name))
finally:
unlock(lock_name, '')
示例6: transmit_subscriber_items
def transmit_subscriber_items(self, queue_items, subscriber):
# Attempt to obtain a lock for transmissions to the subscriber
lock_name = get_lock_id("Subscriber", "Transmit", subscriber)
if not lock(lock_name, expire=610):
return
for queue_item in queue_items:
publish_queue_service = get_resource_service(PUBLISH_QUEUE)
log_msg = (
"_id: {_id} item_id: {item_id} state: {state} "
"item_version: {item_version} headline: {headline}".format(**queue_item)
)
try:
# check the status of the queue item
queue_item = publish_queue_service.find_one(req=None, _id=queue_item[config.ID_FIELD])
if queue_item.get("state") not in [QueueState.PENDING.value, QueueState.RETRYING.value]:
logger.info(
"Transmit State is not pending/retrying for queue item: {}. It is in {}".format(
queue_item.get(config.ID_FIELD), queue_item.get("state")
)
)
continue
# update the status of the item to in-progress
queue_update = {"state": "in-progress", "transmit_started_at": utcnow()}
publish_queue_service.patch(queue_item.get(config.ID_FIELD), queue_update)
logger.info("Transmitting queue item {}".format(log_msg))
destination = queue_item["destination"]
transmitter = superdesk.publish.registered_transmitters[destination.get("delivery_type")]
transmitter.transmit(queue_item)
logger.info("Transmitted queue item {}".format(log_msg))
except Exception as e:
logger.exception("Failed to transmit queue item {}".format(log_msg))
max_retry_attempt = app.config.get("MAX_TRANSMIT_RETRY_ATTEMPT")
retry_attempt_delay = app.config.get("TRANSMIT_RETRY_ATTEMPT_DELAY_MINUTES")
try:
orig_item = publish_queue_service.find_one(req=None, _id=queue_item["_id"])
updates = {config.LAST_UPDATED: utcnow()}
if orig_item.get("retry_attempt", 0) < max_retry_attempt and not isinstance(
e, PublishHTTPPushClientError
):
updates["retry_attempt"] = orig_item.get("retry_attempt", 0) + 1
updates["state"] = QueueState.RETRYING.value
updates["next_retry_attempt_at"] = utcnow() + timedelta(minutes=retry_attempt_delay)
else:
# all retry attempts exhausted marking the item as failed.
updates["state"] = QueueState.FAILED.value
publish_queue_service.system_update(orig_item.get(config.ID_FIELD), updates, orig_item)
except:
logger.error("Failed to set the state for failed publish queue item {}.".format(queue_item["_id"]))
# Release the lock for the subscriber
unlock(lock_name)
示例7: run
def run(self):
logger.info('Importing Legal Publish Queue')
lock_name = get_lock_id('legal_archive', 'import_legal_publish_queue')
if not lock(lock_name, '', expire=600):
return
try:
LegalArchiveImport().import_legal_publish_queue()
finally:
unlock(lock_name, '')
示例8: run
def run(self, page_size=None):
logger.info('Import to Legal Publish Queue')
lock_name = get_lock_id('legal_archive', 'import_legal_publish_queue')
page_size = int(page_size) if page_size else self.default_page_size
if not lock(lock_name, expire=310):
return
try:
LegalArchiveImport().import_legal_publish_queue(page_size=page_size)
finally:
unlock(lock_name)
示例9: update_provider
def update_provider(self, provider, rule_set=None, routing_scheme=None):
"""
Fetches items from ingest provider as per the configuration, ingests them into Superdesk and
updates the provider.
:param self:
:type self:
:param provider: Ingest Provider Details
:type provider: dict :py:class:`superdesk.io.ingest_provider_model.IngestProviderResource`
:param rule_set: Translation Rule Set if one is associated with Ingest Provider.
:type rule_set: dict :py:class:`apps.rules.rule_sets.RuleSetsResource`
:param routing_scheme: Routing Scheme if one is associated with Ingest Provider.
:type routing_scheme: dict :py:class:`apps.rules.routing_rules.RoutingRuleSchemeResource`
"""
lock_name = get_lock_id('ingest', provider['name'], provider[superdesk.config.ID_FIELD])
host_name = get_host_id(self)
if not lock(lock_name, host_name, expire=1800):
return
try:
feeding_service = registered_feeding_services[provider['feeding_service']]
feeding_service = feeding_service.__class__()
update = {LAST_UPDATED: utcnow()}
for items in feeding_service.update(provider):
ingest_items(items, provider, feeding_service, rule_set, routing_scheme)
stats.incr('ingest.ingested_items', len(items))
if items:
update[LAST_ITEM_UPDATE] = utcnow()
# Some Feeding Services update the collection and by this time the _etag might have been changed.
# So it's necessary to fetch it once again. Otherwise, OriginalChangedError is raised.
ingest_provider_service = superdesk.get_resource_service('ingest_providers')
provider = ingest_provider_service.find_one(req=None, _id=provider[superdesk.config.ID_FIELD])
ingest_provider_service.system_update(provider[superdesk.config.ID_FIELD], update, provider)
if LAST_ITEM_UPDATE not in update and get_is_idle(provider):
admins = superdesk.get_resource_service('users').get_users_by_user_type('administrator')
notify_and_add_activity(
ACTIVITY_EVENT,
'Provider {{name}} has gone strangely quiet. Last activity was on {{last}}',
resource='ingest_providers', user_list=admins, name=provider.get('name'),
last=provider[LAST_ITEM_UPDATE].replace(tzinfo=timezone.utc).astimezone(tz=None).strftime("%c"))
logger.info('Provider {0} updated'.format(provider[superdesk.config.ID_FIELD]))
if LAST_ITEM_UPDATE in update: # Only push a notification if there has been an update
push_notification('ingest:update', provider_id=str(provider[superdesk.config.ID_FIELD]))
finally:
unlock(lock_name, host_name)
示例10: init_elastic
def init_elastic(self, app):
"""Init elastic index.
It will create index and put mapping. It should run only once so locks are in place.
Thus mongo must be already setup before running this.
"""
with app.app_context():
if lock('elastic', expire=10):
try:
self.elastic.init_index(app)
finally:
unlock('elastic')
示例11: transmit_subscriber_items
def transmit_subscriber_items(queue_items, subscriber):
lock_name = get_lock_id('Subscriber', 'Transmit', subscriber)
publish_queue_service = get_resource_service(PUBLISH_QUEUE)
if not lock(lock_name, expire=610):
return
try:
for queue_item in queue_items:
log_msg = '_id: {_id} item_id: {item_id} state: {state} ' \
'item_version: {item_version} headline: {headline}'.format(**queue_item)
try:
# check the status of the queue item
queue_item = publish_queue_service.find_one(req=None, _id=queue_item[config.ID_FIELD])
if queue_item.get('state') not in [QueueState.PENDING.value, QueueState.RETRYING.value]:
logger.info('Transmit State is not pending/retrying for queue item: {}. It is in {}'.
format(queue_item.get(config.ID_FIELD), queue_item.get('state')))
continue
# update the status of the item to in-progress
queue_update = {'state': 'in-progress', 'transmit_started_at': utcnow()}
publish_queue_service.patch(queue_item.get(config.ID_FIELD), queue_update)
logger.info('Transmitting queue item {}'.format(log_msg))
destination = queue_item['destination']
transmitter = superdesk.publish.registered_transmitters[destination.get('delivery_type')]
transmitter.transmit(queue_item)
logger.info('Transmitted queue item {}'.format(log_msg))
except Exception as e:
logger.exception('Failed to transmit queue item {}'.format(log_msg))
max_retry_attempt = app.config.get('MAX_TRANSMIT_RETRY_ATTEMPT')
retry_attempt_delay = app.config.get('TRANSMIT_RETRY_ATTEMPT_DELAY_MINUTES')
try:
orig_item = publish_queue_service.find_one(req=None, _id=queue_item['_id'])
updates = {config.LAST_UPDATED: utcnow()}
if orig_item.get('retry_attempt', 0) < max_retry_attempt and \
not isinstance(e, PublishHTTPPushClientError):
updates['retry_attempt'] = orig_item.get('retry_attempt', 0) + 1
updates['state'] = QueueState.RETRYING.value
updates['next_retry_attempt_at'] = utcnow() + timedelta(minutes=retry_attempt_delay)
else:
# all retry attempts exhausted marking the item as failed.
updates['state'] = QueueState.FAILED.value
publish_queue_service.system_update(orig_item.get(config.ID_FIELD), updates, orig_item)
except:
logger.error('Failed to set the state for failed publish queue item {}.'.format(queue_item['_id']))
finally:
unlock(lock_name)
示例12: update_provider
def update_provider(provider, rule_set=None, routing_scheme=None):
"""Fetch items from ingest provider, ingest them into Superdesk and update the provider.
:param provider: Ingest Provider data
:param rule_set: Translation Rule Set if one is associated with Ingest Provider.
:param routing_scheme: Routing Scheme if one is associated with Ingest Provider.
"""
lock_name = get_lock_id('ingest', provider['name'], provider[superdesk.config.ID_FIELD])
if not lock(lock_name, expire=1810):
return
try:
feeding_service = registered_feeding_services[provider['feeding_service']]
feeding_service = feeding_service.__class__()
update = {LAST_UPDATED: utcnow()}
for items in feeding_service.update(provider, update):
ingest_items(items, provider, feeding_service, rule_set, routing_scheme)
if items:
last_item_update = max(
[item['versioncreated'] for item in items if item.get('versioncreated')],
default=utcnow()
)
if not update.get(LAST_ITEM_UPDATE) or update[LAST_ITEM_UPDATE] < last_item_update:
update[LAST_ITEM_UPDATE] = last_item_update
# Some Feeding Services update the collection and by this time the _etag might have been changed.
# So it's necessary to fetch it once again. Otherwise, OriginalChangedError is raised.
ingest_provider_service = superdesk.get_resource_service('ingest_providers')
provider = ingest_provider_service.find_one(req=None, _id=provider[superdesk.config.ID_FIELD])
ingest_provider_service.system_update(provider[superdesk.config.ID_FIELD], update, provider)
if LAST_ITEM_UPDATE not in update and get_is_idle(provider):
admins = superdesk.get_resource_service('users').get_users_by_user_type('administrator')
notify_and_add_activity(
ACTIVITY_EVENT,
'Provider {{name}} has gone strangely quiet. Last activity was on {{last}}',
resource='ingest_providers', user_list=admins, name=provider.get('name'),
last=provider[LAST_ITEM_UPDATE].replace(tzinfo=timezone.utc).astimezone(tz=None).strftime("%c"))
logger.info('Provider {0} updated'.format(provider[superdesk.config.ID_FIELD]))
if LAST_ITEM_UPDATE in update: # Only push a notification if there has been an update
push_notification('ingest:update', provider_id=str(provider[superdesk.config.ID_FIELD]))
except Exception as e:
logger.error("Failed to ingest file: {error}".format(error=e))
raise IngestFileError(3000, e, provider)
finally:
unlock(lock_name)
示例13: create
def create(self, docs, **kwargs):
guid_of_item_to_be_moved = request.view_args['guid']
guid_of_moved_items = []
# set the lock_id it per item
lock_id = "item_move {}".format(guid_of_item_to_be_moved)
if not lock(lock_id, expire=5):
raise SuperdeskApiError.forbiddenError(message="Item is locked for move by another user.")
try:
# doc represents the target desk and stage
doc = docs[0]
moved_item = self.move_content(guid_of_item_to_be_moved, doc)
guid_of_moved_items.append(moved_item.get(config.ID_FIELD))
if moved_item.get('type', None) == 'composite' and doc.get('allPackageItems', False):
try:
item_lock_id = None
for item_id in (ref[RESIDREF] for group in moved_item.get(GROUPS, [])
for ref in group.get(REFS, []) if RESIDREF in ref):
item_lock_id = "item_move {}".format(item_id)
if lock(item_lock_id, expire=5):
item = self.move_content(item_id, doc)
guid_of_moved_items.append(item.get(config.ID_FIELD))
unlock(item_lock_id, remove=True)
item_lock_id = None
else:
item_lock_id = None
finally:
if item_lock_id:
unlock(item_lock_id, remove=True)
return guid_of_moved_items
finally:
unlock(lock_id, remove=True)
示例14: run
def run(self):
"""Fetches items from publish queue as per the configuration, calls the transmit function.
"""
lock_name = get_lock_id('publish', 'enqueue_published')
if not lock(lock_name, expire=310):
logger.info('Enqueue Task: {} is already running.'.format(lock_name))
return
try:
items = get_published_items()
if len(items) > 0:
enqueue_items(items)
finally:
unlock(lock_name)
示例15: remove_expired
def remove_expired(self, provider):
lock_name = 'ingest:gc'
if not lock(lock_name, expire=300):
return
try:
remove_expired_data(provider)
push_notification('ingest:cleaned')
except Exception as err:
logger.exception(err)
raise ProviderError.expiredContentError(err, provider)
finally:
unlock(lock_name)