本文整理汇总了Python中core.model.DataSource类的典型用法代码示例。如果您正苦于以下问题:Python DataSource类的具体用法?Python DataSource怎么用?Python DataSource使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了DataSource类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_items_that_need_coverage
def test_items_that_need_coverage(self):
source = DataSource.lookup(self._db, DataSource.METADATA_WRANGLER)
other_source = DataSource.lookup(self._db, DataSource.OVERDRIVE)
# An item that hasn't been covered by the provider yet
cr = self._coverage_record(self._edition(), other_source)
# An item that has been covered by the reaper operation already
reaper_cr = self._coverage_record(
self._edition(), source, operation=CoverageRecord.REAP_OPERATION
)
# An item that has been covered by the reaper operation, but has
# had its license repurchased.
relicensed_edition, relicensed_licensepool = self._edition(with_license_pool=True)
relicensed_coverage_record = self._coverage_record(
relicensed_edition, source, operation=CoverageRecord.REAP_OPERATION
)
relicensed_licensepool.update_availability(1, 0, 0, 0)
items = self.provider.items_that_need_coverage().all()
# Provider ignores anything that has been reaped and doesn't have
# licenses.
assert reaper_cr.identifier not in items
# But it picks up anything that hasn't been covered at all and anything
# that's been licensed anew even if its already been reaped.
eq_(2, len(items))
assert relicensed_licensepool.identifier in items
assert cr.identifier in items
# The Wrangler Reaper coverage record is removed from the db
# when it's committed.
assert relicensed_coverage_record in relicensed_licensepool.identifier.coverage_records
self._db.commit()
assert relicensed_coverage_record not in relicensed_licensepool.identifier.coverage_records
示例2: __init__
def __init__(self, _db, overdrive=None, threem=None, axis=None):
self._db = _db
self.overdrive = overdrive
self.threem = threem
self.axis = axis
self.apis = [x for x in (overdrive, threem, axis) if x]
self.log = logging.getLogger("Circulation API")
# When we get our view of a patron's loans and holds, we need
# to include loans from all licensed data sources. We do not
# need to include loans from open-access sources because we
# are the authorities on those.
data_sources_for_sync = []
if self.overdrive:
data_sources_for_sync.append(
DataSource.lookup(_db, DataSource.OVERDRIVE)
)
if self.threem:
data_sources_for_sync.append(
DataSource.lookup(_db, DataSource.THREEM)
)
if self.axis:
data_sources_for_sync.append(
DataSource.lookup(_db, DataSource.AXIS_360)
)
self.identifier_type_to_data_source_name = dict(
(ds.primary_identifier_type, ds.name)
for ds in data_sources_for_sync)
self.data_source_ids_for_sync = [
x.id for x in data_sources_for_sync
]
示例3: test_items_that_need_coverage
def test_items_that_need_coverage(self):
source = DataSource.lookup(self._db, DataSource.METADATA_WRANGLER)
other_source = DataSource.lookup(self._db, DataSource.OVERDRIVE)
# An item that hasn't been covered by the provider yet
cr = self._coverage_record(self._edition(), other_source)
# An item that has been covered by the reaper operation already
reaper_cr = self._coverage_record(
self._edition(), source, operation=CoverageRecord.REAP_OPERATION
)
# An item that has been covered by the reaper operation, but has
# had its license repurchased.
relicensed, relicensed_lp = self._edition(with_license_pool=True)
self._coverage_record(
relicensed, source, operation=CoverageRecord.REAP_OPERATION
)
relicensed_lp.update_availability(1, 0, 0, 0)
with temp_config() as config:
config[Configuration.INTEGRATIONS][Configuration.METADATA_WRANGLER_INTEGRATION] = {
Configuration.URL : "http://url.gov"
}
provider = MetadataWranglerCoverageProvider(self._db)
items = provider.items_that_need_coverage.all()
# Provider ignores anything that has been reaped and doesn't have
# licenses.
assert reaper_cr.identifier not in items
# But it picks up anything that hasn't been covered at all and anything
# that's been licensed anew even if its already been reaped.
eq_(2, len(items))
assert relicensed_lp.identifier in items
assert cr.identifier in items
# The Wrangler Reaper coverage record is removed from the db
# when it's committed.
self._db.commit()
eq_([], relicensed_lp.identifier.coverage_records)
示例4: test_run_once
def test_run_once(self):
# Setup authentication and Metadata Wrangler details.
lp = self._licensepool(
None, data_source_name=DataSource.BIBLIOTHECA,
collection=self.collection
)
lp.identifier.type = Identifier.BIBLIOTHECA_ID
isbn = Identifier.parse_urn(self._db, u'urn:isbn:9781594632556')[0]
lp.identifier.equivalent_to(
DataSource.lookup(self._db, DataSource.BIBLIOTHECA), isbn, 1
)
eq_([], lp.identifier.links)
eq_([], lp.identifier.measurements)
# Queue some data to be found.
responses = (
'metadata_updates_response.opds',
'metadata_updates_empty_response.opds',
)
for filename in responses:
data = sample_data(filename, 'opds')
self.lookup.queue_response(
200, {'content-type' : OPDSFeed.ACQUISITION_FEED_TYPE}, data
)
timestamp = self.ts
new_timestamp = self.monitor.run_once(timestamp)
# We have a new value to use for the Monitor's timestamp -- the
# earliest date seen in the last OPDS feed that contained
# any entries.
eq_(datetime.datetime(2016, 9, 20, 19, 37, 2), new_timestamp.finish)
eq_("Editions processed: 1", new_timestamp.achievements)
# Normally run_once() doesn't update the monitor's timestamp,
# but this implementation does, so that work isn't redone if
# run_once() crashes or the monitor is killed.
eq_(new_timestamp.finish, self.monitor.timestamp().finish)
# The original Identifier has information from the
# mock Metadata Wrangler.
mw_source = DataSource.lookup(self._db, DataSource.METADATA_WRANGLER)
eq_(3, len(lp.identifier.links))
[quality] = lp.identifier.measurements
eq_(mw_source, quality.data_source)
# Check the URLs we processed.
url1, url2 = [x[0] for x in self.lookup.requests]
# The first URL processed was the default one for the
# MetadataWranglerOPDSLookup.
eq_(self.lookup.get_collection_url(self.lookup.UPDATES_ENDPOINT), url1)
# The second URL processed was whatever we saw in the 'next' link.
eq_("http://next-link/", url2)
示例5: test_load_cover_link
def test_load_cover_link(self):
# Create a directory import script with an empty mock filesystem.
script = MockDirectoryImportScript(self._db, {})
identifier = self._identifier(Identifier.GUTENBERG_ID, "2345")
gutenberg = DataSource.lookup(self._db, DataSource.GUTENBERG)
mirror = MockS3Uploader()
args = (identifier, gutenberg, "covers", mirror)
# There is nothing on the mock filesystem, so in this case
# load_cover_link returns None.
eq_(None, script.load_cover_link(*args))
# But we tried.
eq_(
('2345', 'covers', Representation.COMMON_IMAGE_EXTENSIONS,
'cover image'),
script._locate_file_args
)
# Try another script that has a populated mock filesystem.
mock_filesystem = {
'covers' : (
'acover.jpeg', Representation.JPEG_MEDIA_TYPE, "I'm an image."
)
}
script = MockDirectoryImportScript(self._db, mock_filesystem)
link = script.load_cover_link(*args)
eq_(Hyperlink.IMAGE, link.rel)
assert link.href.endswith(
'/test.cover.bucket/Gutenberg/Gutenberg+ID/2345/2345.jpg'
)
eq_(Representation.JPEG_MEDIA_TYPE, link.media_type)
eq_("I'm an image.", link.content)
示例6: __init__
def __init__(self, _db, data_directory):
self._db = _db
self.collection = Collection.by_protocol(self._db, ExternalIntegration.GUTENBERG).one()
self.source = DataSource.lookup(self._db, DataSource.GUTENBERG)
self.data_directory = data_directory
self.catalog_path = os.path.join(self.data_directory, self.FILENAME)
self.log = logging.getLogger("Gutenberg API")
示例7: __init__
def __init__(self, _db, input_identifier_types=None, metadata_lookup=None,
cutoff_time=None, operation=None):
self._db = _db
if not input_identifier_types:
input_identifier_types = [
Identifier.OVERDRIVE_ID,
Identifier.THREEM_ID,
Identifier.GUTENBERG_ID,
Identifier.AXIS_360_ID,
]
self.output_source = DataSource.lookup(
self._db, DataSource.METADATA_WRANGLER
)
if not metadata_lookup:
metadata_lookup = SimplifiedOPDSLookup.from_config()
self.lookup = metadata_lookup
if not operation:
operation = CoverageRecord.SYNC_OPERATION
self.operation = operation
super(MetadataWranglerCoverageProvider, self).__init__(
self.service_name,
input_identifier_types,
self.output_source,
workset_size=20,
cutoff_time=cutoff_time,
operation=self.operation,
)
示例8: __init__
def __init__(self, _db, lookup=None, input_identifier_types=None,
operation=None, **kwargs):
if not input_identifier_types:
input_identifier_types = [
Identifier.OVERDRIVE_ID,
Identifier.THREEM_ID,
Identifier.GUTENBERG_ID,
Identifier.AXIS_360_ID,
]
output_source = DataSource.lookup(
_db, DataSource.METADATA_WRANGLER
)
super(MetadataWranglerCoverageProvider, self).__init__(
lookup = lookup or SimplifiedOPDSLookup.from_config(),
service_name=self.SERVICE_NAME,
input_identifier_types=input_identifier_types,
output_source=output_source,
operation=operation or self.OPERATION,
**kwargs
)
if not self.lookup.authenticated:
self.log.warn(
"Authentication for the Library Simplified Metadata Wrangler "
"is not set up. You can still use the metadata wrangler, but "
"it will not know which collection you're asking about."
)
示例9: lookup_equivalent_isbns
def lookup_equivalent_isbns(self, identifier):
"""Finds NoveList data for all ISBNs equivalent to an identifier.
:return: Metadata object or None
"""
lookup_metadata = []
license_sources = DataSource.license_sources_for(self._db, identifier)
# Look up strong ISBN equivalents.
for license_source in license_sources:
lookup_metadata += [self.lookup(eq.output)
for eq in identifier.equivalencies
if (eq.data_source==license_source and eq.strength==1
and eq.output.type==Identifier.ISBN)]
if not lookup_metadata:
self.log.error(
"Identifiers without an ISBN equivalent can't \
be looked up with NoveList: %r", identifier
)
return None
# Remove None values.
lookup_metadata = [metadata for metadata in lookup_metadata if metadata]
if not lookup_metadata:
return None
best_metadata = self.choose_best_metadata(lookup_metadata, identifier)
if not best_metadata:
metadata, confidence = best_metadata
if round(confidence, 2) < 0.5:
self.log.warn(self.NO_ISBN_EQUIVALENCY, identifier)
return None
return metadata
示例10: metadata_needed_for
def metadata_needed_for(self, collection_details):
"""Returns identifiers in the collection that could benefit from
distributor metadata on the circulation manager.
"""
client = authenticated_client_from_request(self._db)
if isinstance(client, ProblemDetail):
return client
collection = collection_from_details(
self._db, client, collection_details
)
resolver = IdentifierResolutionCoverageProvider
unresolved_identifiers = collection.unresolved_catalog(
self._db, resolver.DATA_SOURCE_NAME, resolver.OPERATION
)
# Omit identifiers that currently have metadata pending for
# the IntegrationClientCoverImageCoverageProvider.
data_source = DataSource.lookup(
self._db, collection.name, autocreate=True
)
is_awaiting_metadata = self._db.query(
CoverageRecord.id, CoverageRecord.identifier_id
).filter(
CoverageRecord.data_source_id==data_source.id,
CoverageRecord.status==CoverageRecord.REGISTERED,
CoverageRecord.operation==IntegrationClientCoverImageCoverageProvider.OPERATION,
).subquery()
unresolved_identifiers = unresolved_identifiers.outerjoin(
is_awaiting_metadata,
Identifier.id==is_awaiting_metadata.c.identifier_id
).filter(is_awaiting_metadata.c.id==None)
# Add a message for each unresolved identifier
pagination = load_pagination_from_request(default_size=25)
feed_identifiers = pagination.apply(unresolved_identifiers).all()
messages = list()
for identifier in feed_identifiers:
messages.append(OPDSMessage(
identifier.urn, HTTP_ACCEPTED, "Metadata needed."
))
title = "%s Metadata Requests for %s" % (collection.protocol, client.url)
metadata_request_url = self.collection_feed_url(
'metadata_needed_for', collection
)
request_feed = AcquisitionFeed(
self._db, title, metadata_request_url, [], VerboseAnnotator,
precomposed_entries=messages
)
self.add_pagination_links_to_feed(
pagination, unresolved_identifiers, request_feed,
'metadata_needed_for', collection
)
return feed_response(request_feed)
示例11: test_handle_import_messages
def test_handle_import_messages(self):
data_source = DataSource.lookup(self._db, DataSource.OVERDRIVE)
provider = OPDSImportCoverageProvider("name", [], data_source)
message = StatusMessage(201, "try again later")
message2 = StatusMessage(404, "we're doomed")
message3 = StatusMessage(200, "everything's fine")
identifier = self._identifier()
identifier2 = self._identifier()
identifier3 = self._identifier()
messages_by_id = { identifier.urn: message,
identifier2.urn: message2,
identifier3.urn: message3,
}
[f1, f2] = sorted(list(provider.handle_import_messages(messages_by_id)),
key=lambda x: x.exception)
eq_(identifier, f1.obj)
eq_("201: try again later", f1.exception)
eq_(True, f1.transient)
eq_(identifier2, f2.obj)
eq_("404: we're doomed", f2.exception)
eq_(False, f2.transient)
示例12: _provider
def _provider(self, presentation_ready_on_success=True):
"""Create a generic MockOPDSImportCoverageProvider for testing purposes."""
source = DataSource.lookup(self._db, DataSource.OA_CONTENT_SERVER)
return MockOPDSImportCoverageProvider(
"mock provider", [], source,
presentation_ready_on_success=presentation_ready_on_success
)
示例13: test_items_that_need_coverage_respects_cutoff
def test_items_that_need_coverage_respects_cutoff(self):
"""Verify that this coverage provider respects the cutoff_time
argument.
"""
source = DataSource.lookup(self._db, DataSource.METADATA_WRANGLER)
edition = self._edition()
cr = self._coverage_record(edition, source, operation='sync')
# We have a coverage record already, so this book doesn't show
# up in items_that_need_coverage
items = self.provider.items_that_need_coverage().all()
eq_([], items)
# But if we send a cutoff_time that's later than the time
# associated with the coverage record...
one_hour_from_now = (
datetime.datetime.utcnow() + datetime.timedelta(seconds=3600)
)
provider_with_cutoff = self.create_provider(
cutoff_time=one_hour_from_now
)
# The book starts showing up in items_that_need_coverage.
eq_([edition.primary_identifier],
provider_with_cutoff.items_that_need_coverage().all())
示例14: test_finalize_edition
def test_finalize_edition(self):
provider_no_presentation_ready = self._provider(presentation_ready_on_success=False)
provider_presentation_ready = self._provider(presentation_ready_on_success=True)
identifier = self._identifier()
source = DataSource.lookup(self._db, DataSource.GUTENBERG)
# Here's an Edition with no LicensePool.
edition, is_new = Edition.for_foreign_id(
self._db, source, identifier.type, identifier.identifier
)
edition.title = self._str
# This will effectively do nothing.
provider_no_presentation_ready.finalize_edition(edition)
# No Works have been created.
eq_(0, self._db.query(Work).count())
# But if there's also a LicensePool...
pool, is_new = LicensePool.for_foreign_id(
self._db, source, identifier.type, identifier.identifier
)
# finalize_edition() will create a Work.
provider_no_presentation_ready.finalize_edition(edition)
work = pool.work
eq_(work, edition.work)
eq_(False, work.presentation_ready)
# If the provider is configured to do so, finalize_edition()
# will also set the Work as presentation-ready.
provider_presentation_ready.finalize_edition(edition)
eq_(True, work.presentation_ready)
示例15: generate_mock_api
def generate_mock_api(self):
"""Prep an empty NoveList result."""
source = DataSource.lookup(self._db, DataSource.OVERDRIVE)
metadata = Metadata(source)
mock_api = MockNoveListAPI(self._db)
mock_api.setup(metadata)
return mock_api