本文整理汇总了Python中pulp.server.db.connection.get_collection函数的典型用法代码示例。如果您正苦于以下问题:Python get_collection函数的具体用法?Python get_collection怎么用?Python get_collection使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_collection函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: migrate
def migrate(*args, **kwargs):
"""
For each repository with a yum distributor, clean up the old yum distributor's
mess and re-publish the repository with the new distributor.
"""
distributor_collection = get_collection('repo_distributors')
yum_distributors = list(distributor_collection.find({'distributor_type_id': YUM_DISTRIBUTOR_ID}))
repo_collection = get_collection('repos')
repo_ids = list(set(d['repo_id'] for d in yum_distributors))
repos = dict((r['id'], r) for r in repo_collection.find({'id': {'$in': repo_ids}}))
for d in yum_distributors:
repo = repos[d['repo_id']]
config = d['config'] or {}
if d['last_publish'] is None:
continue
_clear_working_dir(repo)
_clear_old_publish_dirs(repo, config)
_re_publish_repository(repo, d)
_remove_legacy_publish_dirs()
示例2: _create_or_update_type
def _create_or_update_type(type_def):
"""
This method creates or updates a type definition in MongoDB.
:param type_def: the type definition to update or create. If a type definition with the same
as an existing type, the type is updated, otherwise it is created.
:type type_def: ContentType
:return: This method will always return None
:rtype: None
"""
# Make sure a collection exists for the type
database = pulp_db.get_database()
collection_name = unit_collection_name(type_def.id)
if collection_name not in database.collection_names():
pulp_db.get_collection(collection_name, create=True)
# Add or update an entry in the types list
content_type_collection = ContentType.get_collection()
content_type = ContentType(
type_def.id, type_def.display_name, type_def.description, type_def.unit_key,
type_def.search_indexes, type_def.referenced_types)
# no longer rely on _id = id
existing_type = content_type_collection.find_one({'id': type_def.id}, fields=[])
if existing_type is not None:
content_type._id = existing_type['_id']
# XXX this still causes a potential race condition when 2 users are updating the same type
content_type_collection.save(content_type, safe=True)
示例3: migrate
def migrate(*args, **kwargs):
schedule_collection = connection.get_collection('scheduled_calls')
importer_collection = connection.get_collection('repo_importers')
distributor_collection = connection.get_collection('repo_distributors')
map(functools.partial(convert_schedule, schedule_collection.save), schedule_collection.find())
move_scheduled_syncs(importer_collection, schedule_collection)
move_scheduled_publishes(distributor_collection, schedule_collection)
示例4: repositories_with_yum_importers
def repositories_with_yum_importers():
repo_importer_collection = get_collection('repo_importers')
repo_yum_importers = repo_importer_collection.find({'importer_type_id': _TYPE_YUM_IMPORTER},
fields=['repo_id'])
yum_repo_ids = [i['repo_id'] for i in repo_yum_importers]
repo_collection = get_collection('repos')
yum_repos = repo_collection.find({'repo_id': {'$in': yum_repo_ids}},
fields=['repo_id', 'scratchpad'])
return list(yum_repos)
示例5: tearDown
def tearDown(self):
super(Migration0004Tests, self).tearDown()
# Delete any sample data added for the test
types_db.clean()
RepoContentUnit.get_collection().remove()
get_collection('repo_importers').remove()
model.Repository.drop_collection()
示例6: _migrate_rpmlike_units
def _migrate_rpmlike_units(unit_type):
"""
This function performs the migration on RPMs, DRPMs, and SRPMs. These all have the same schema
when it comes to checksumtype, so they can be treated the same way.
:param unit_type: The unit_type_id, as found in pulp_rpm.common.ids.
:type unit_type: basestring
"""
repos = connection.get_collection('repos')
repo_content_units = connection.get_collection('repo_content_units')
unit_collection = connection.get_collection('units_%s' % unit_type)
for unit in unit_collection.find():
try:
sanitized_type = verification.sanitize_checksum_type(unit['checksumtype'])
if sanitized_type != unit['checksumtype']:
# Let's see if we can get away with changing its checksumtype to the sanitized
# value. If this works, we won't have to do anything else.
unit_collection.update({'_id': unit['_id']},
{'$set': {'checksumtype': sanitized_type}})
except errors.DuplicateKeyError:
# Looks like there is already an identical unit with the sanitized checksum type. This
# means we need to remove the current unit, but first we will need to change any
# references to this unit to point to the other.
conflicting_unit = unit_collection.find_one(
{'name': unit['name'], 'epoch': unit['epoch'], 'version': unit['version'],
'release': unit['release'], 'arch': unit['arch'], 'checksum': unit['checksum'],
'checksumtype': sanitized_type})
for rcu in repo_content_units.find({'unit_type_id': unit_type, 'unit_id': unit['_id']}):
# Now we must either switch the rcu from pointing to unit to pointing to
# conflicting_unit, or delete the rcu if there is already one in the same repo.
try:
msg = _('Updating %(repo_id)s to contain %(type)s %(conflicting)s instead of '
'%(old_id)s.')
msg = msg % {'repo_id': rcu['repo_id'], 'type': unit_type,
'conflicting': conflicting_unit['_id'], 'old_id': unit['_id']}
_logger.debug(msg)
repo_content_units.update({'_id': rcu['_id']},
{'$set': {'unit_id': conflicting_unit['_id']}})
except errors.DuplicateKeyError:
# We will delete this RepoContentUnit since the sha1 RPM is already in the
# repository.
msg = _('Removing %(type)s %(old_id)s from repo %(repo_id)s since it conflicts '
'with %(conflicting)s.')
msg = msg % {'repo_id': rcu['repo_id'], 'type': unit_type,
'conflicting': conflicting_unit['_id'], 'old_id': unit['_id']}
_logger.debug(msg)
repo_content_units.remove({'_id': rcu['_id']})
# In this case, we now need to decrement the repository's "content_unit_counts"
# for this unit_type by one, since we removed a unit from a repository.
repos.update(
{'id': rcu['repo_id']},
{'$inc': {'content_unit_counts.%s' % unit_type: -1}})
# Now that we have removed or altered all references to the "sha" Unit, we need to
# remove it since it is a duplicate.
unit_collection.remove({'_id': unit['_id']})
示例7: create_user
def create_user(login, password=None, name=None, roles=None):
"""
Creates a new Pulp user and adds it to specified to roles.
@param login: login name / unique identifier for the user
@type login: str
@param password: password for login credentials
@type password: str
@param name: user's full name
@type name: str
@param roles: list of roles user will belong to
@type roles: list
@raise DuplicateResource: if there is already a user with the requested login
@raise InvalidValue: if any of the fields are unacceptable
"""
existing_user = get_collection('users').find_one({'login': login})
if existing_user is not None:
raise DuplicateResource(login)
invalid_values = []
if login is None or _USER_LOGIN_REGEX.match(login) is None:
invalid_values.append('login')
if invalid_type(name, basestring):
invalid_values.append('name')
if invalid_type(roles, list):
invalid_values.append('roles')
if invalid_values:
raise InvalidValue(invalid_values)
# Use the login for name of the user if one was not specified
name = name or login
roles = roles or None
# Creation
create_me = model.User(login=login, name=name, roles=roles)
create_me.set_password(password)
create_me.save()
# Grant permissions
permission_manager = factory.permission_manager()
permission_manager.grant_automatic_permissions_for_user(create_me.login)
# Retrieve the user to return the SON object
created = get_collection('users').find_one({'login': login})
created.pop('password')
return created
示例8: _remove_prestodelta_repo_units
def _remove_prestodelta_repo_units():
"""
Remove all prestodelta repo_content_units since they should not have been created
to begin with.
"""
metadata_collection = get_collection('units_yum_repo_metadata_file')
repo_units_collection = get_collection('repo_content_units')
for presto_unit in metadata_collection.find({'data_type': 'prestodelta'}):
# remove any repo repo units that reference it, the unit itself will
# be removed by the orphan cleanup at some point in the future
repo_units_collection.remove({'unit_id': presto_unit['_id']})
示例9: migrate
def migrate(*args, **kwargs):
"""
For each repository with a yum distributor, clean up the old yum distributor's
mess and re-publish the repository with the new distributor.
"""
if not api._is_initialized():
api.initialize()
distributor_collection = get_collection('repo_distributors')
yum_distributors = list(
distributor_collection.find({'distributor_type_id': YUM_DISTRIBUTOR_ID}))
repo_ids = list(set(d['repo_id'] for d in yum_distributors))
repo_objs = model.Repository.objects(repo_id__in=repo_ids)
repos = dict((repo_obj.repo_id, repo_obj.to_transfer_repo()) for repo_obj in repo_objs)
for d in yum_distributors:
repo = repos[d['repo_id']]
config = d['config'] or {}
if d.get('last_publish') is None:
continue
_clear_working_dir(repo)
_clear_old_publish_dirs(repo, config)
_re_publish_repository(repo, d)
_remove_legacy_publish_dirs()
示例10: __init__
def __init__(self):
"""
Call super with collection and fields.
"""
key_fields = ("data_type", "repo_id")
collection = connection.get_collection("units_yum_repo_metadata_file")
super(YumMetadataFile, self).__init__(collection, key_fields)
示例11: migrate
def migrate(*args, **kwargs):
"""
Add last_updated and last_override_config to the importer collection.
"""
updated_key = 'last_updated'
config_key = 'last_override_config'
collection = get_collection('repo_importers')
for importer in collection.find():
modified = False
if config_key not in importer:
importer[config_key] = {}
modified = True
# If the key doesn't exist, or does exist but has no value, set it based on the
# last sync time, if possible. Otherwise, set it to now.
if not importer.get(updated_key, None):
try:
importer[updated_key] = isodate.parse_datetime(importer['last_sync'])
# The attribute doesn't exist, or parsing failed. It's safe to set a newer timestamp.
except: # noqa: 722
importer[updated_key] = datetime.datetime.now(tz=isodate.UTC)
modified = True
if modified:
collection.save(importer)
示例12: migrate
def migrate(*args, **kwargs):
"""
Migrate existing yum importers to use the new configuration key names.
This migration has the consolidation of verify_checksum and verify_size into a single
config value. For simplicity, the value for verify_checksum is used as the new setting
and verify_size is discarded.
The newest flag in the old config was redundant; the num_old_packages serves the
same purpose. The newest flag is discarded.
The purge_orphaned flag was a carry over from v1 and has no effect. It's documented in
the old yum importer but I'm not sure it was actually used. This migration will attempt
to delete it anyway just in case.
"""
repo_importers = get_collection('repo_importers')
rename_query = {'$rename': {
'config.feed_url' : 'config.feed',
'config.ssl_verify' : 'config.ssl_validation',
'config.proxy_url' : 'config.proxy_host',
'config.proxy_user' : 'config.proxy_username',
'config.proxy_pass' : 'config.proxy_password',
'config.num_threads' : 'config.max_downloads',
'config.verify_checksum' : 'config.validate', # see comment above
'config.remove_old' : 'config.remove_missing',
'config.num_old_packages' : 'config.retain_old_count',
}}
repo_importers.update({'importer_type_id': 'yum_importer'}, rename_query, safe=True, multi=True)
remove_query = {'$unset' : {'config.newest' : 1,
'config.verify_size' : 1,
'config.purge_orphaned' : 1}}
repo_importers.update({'importer_type_id': 'yum_importer'}, remove_query, safe=True, multi=True)
示例13: migrate
def migrate(*args, **kwargs):
"""
Migrate existing errata to have the key "from" instead of "from_str"
"""
errata_collection = get_collection('units_erratum')
rename_query = {'$rename': {'from_str': 'from'}}
errata_collection.update({}, rename_query, multi=True)
示例14: _migrate_task_status
def _migrate_task_status():
"""
Find all task_status documents in an incomplete state and set the state to canceled.
"""
task_status = connection.get_collection('task_status')
task_status.update({'state': {'$in': CALL_INCOMPLETE_STATES}},
{'$set': {'state': CALL_CANCELED_STATE }}, multi=True)
示例15: _update_indexes
def _update_indexes(type_def, unique):
collection_name = unit_collection_name(type_def.id)
collection = connection.get_collection(collection_name, create=False)
if unique:
index_list = [type_def.unit_key] # treat the key as a compound key
else:
index_list = type_def.search_indexes
if index_list is None:
return
for index in index_list:
if isinstance(index, (list, tuple)):
msg = "Ensuring index [%s] (unique: %s) on type definition [%s]"
msg = msg % (", ".join(index), unique, type_def.id)
_logger.debug(msg)
mongo_index = _create_index_keypair(index)
else:
msg = "Ensuring index [%s] (unique: %s) on type definition [%s]"
msg = msg % (index, unique, type_def.id)
_logger.debug(msg)
mongo_index = index
index_name = collection.ensure_index(mongo_index, unique=unique)
if index_name is not None:
_logger.debug("Index [%s] created on type definition [%s]" % (index_name, type_def.id))
else:
_logger.debug("Index already existed on type definition [%s]" % type_def.id)