本文整理汇总了Python中flexget.utils.imdb.ImdbParser类的典型用法代码示例。如果您正苦于以下问题:Python ImdbParser类的具体用法?Python ImdbParser怎么用?Python ImdbParser使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ImdbParser类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_no_year
def test_no_year(self):
# Make sure parser doesn't crash for movies with no year
parser = ImdbParser()
parser.parse('tt3303790')
assert parser.name == 'Master of None'
# There is no year
assert not parser.year
示例2: test_no_plot
def test_no_plot(self):
# Make sure parser doesn't crash for movies with no plot
parser = ImdbParser()
parser.parse('tt0245062')
assert parser.name == 'The Magnet'
# There is no plot
assert not parser.plot_outline
示例3: test_plot_with_links
def test_plot_with_links(self):
"""Make sure plot doesn't terminate at the first link. GitHub #756"""
parser = ImdbParser()
parser.parse('tt2503944')
assert parser.plot_outline == ("Chef Adam Jones (Bradley Cooper) had it all - and lost it. A two-star Michelin "
"rockstar with the bad habits to match, the former enfant terrible of the Paris "
"restaurant scene did everything different every time out, and only ever cared "
"about the thrill of creating explosions of taste. To land his own kitchen and "
"that third elusive Michelin star though, he'll need the best of the best on "
"his side, including the beautiful Helene (Sienna Miller).")
示例4: _parse_new_movie
def _parse_new_movie(self, imdb_url, session):
"""
Get Movie object by parsing imdb page and save movie into the database.
:param imdb_url: IMDB url
:param session: Session to be used
:return: Newly added Movie
"""
parser = ImdbParser()
parser.parse(imdb_url)
# store to database
movie = Movie()
movie.photo = parser.photo
movie.title = parser.name
movie.original_title = parser.original_name
movie.score = parser.score
movie.votes = parser.votes
movie.meta_score = parser.meta_score
movie.year = parser.year
movie.mpaa_rating = parser.mpaa_rating
movie.plot_outline = parser.plot_outline
movie.url = imdb_url
for name in parser.genres:
genre = session.query(Genre).filter(Genre.name == name).first()
if not genre:
genre = Genre(name)
movie.genres.append(genre) # pylint:disable=E1101
for index, name in enumerate(parser.languages):
language = session.query(Language).filter(Language.name == name).first()
if not language:
language = Language(name)
movie.languages.append(MovieLanguage(language, prominence=index))
for imdb_id, name in parser.actors.items():
actor = session.query(Actor).filter(Actor.imdb_id == imdb_id).first()
if not actor:
actor = Actor(imdb_id, name)
movie.actors.append(actor) # pylint:disable=E1101
for imdb_id, name in parser.directors.items():
director = session.query(Director).filter(Director.imdb_id == imdb_id).first()
if not director:
director = Director(imdb_id, name)
movie.directors.append(director) # pylint:disable=E1101
for imdb_id, name in parser.writers.items():
writer = session.query(Writer).filter(Writer.imdb_id == imdb_id).first()
if not writer:
writer = Writer(imdb_id, name)
movie.writers.append(writer) # pylint:disable=E1101
# so that we can track how long since we've updated the info later
movie.updated = datetime.now()
session.add(movie)
return movie
示例5: test_parsed_data
def test_parsed_data(self):
parser = ImdbParser()
parser.parse('tt0114814')
assert parser.actors == {
'nm0000592': 'Pete Postlethwaite',
'nm0261452': 'Christine Estabrook',
'nm0000751': 'Suzy Amis',
'nm0000286': 'Stephen Baldwin',
'nm0000445': 'Dan Hedaya',
'nm0800339': 'Phillipe Simon',
'nm0002064': 'Giancarlo Esposito',
'nm0001590': 'Chazz Palminteri',
'nm0000321': 'Gabriel Byrne',
'nm0790436': 'Jack Shearer',
'nm0000228': 'Kevin Spacey',
'nm0001629': 'Kevin Pollak',
'nm0107808': 'Carl Bressler',
'nm0001125': 'Benicio Del Toro',
'nm0000860': 'Paul Bartel'
}, 'Actors not parsed correctly'
assert parser.directors == {'nm0001741': 'Bryan Singer'}, 'Directors not parsed correctly'
assert len(set(parser.genres).intersection([u'crime', u'drama', u'mystery', u'thriller'])) == \
len([u'crime', u'drama', u'mystery', u'thriller']), 'Genres not parsed correctly'
assert parser.imdb_id == 'tt0114814', 'ID not parsed correctly'
assert len(set(parser.languages).intersection(
['english', 'hungarian', 'spanish', 'french'])) == 4, 'Languages not parsed correctly'
assert parser.mpaa_rating == 'R', 'Rating not parsed correctly'
assert parser.name == 'The Usual Suspects', 'Name not parsed correctly'
assert (parser.photo ==
'https://images-na.ssl-images-amazon.com/images/M/[email protected]@.'
'_V1_UX182_CR0,0,182,268_AL_.jpg'
), 'Photo not parsed correctly'
assert parser.plot_outline == (
'Following a truck hijack in New York, five conmen are arrested and brought together for questioning. '
'As none of them are guilty, they plan a revenge operation against the police. The operation goes well, '
'but then the influence of a legendary mastermind criminal called Keyser S\xf6ze is felt. It becomes '
'clear that each one of them has wronged S\xf6ze at some point and must pay back now. The payback job '
'leaves 27 men dead in a boat explosion, but the real question arises now: Who actually is Keyser S\xf6ze?'
), 'Plot outline not parsed correctly'
assert 8.0 < parser.score < 9.0, 'Score not parsed correctly'
assert parser.url == 'http://www.imdb.com/title/tt0114814/', 'URL not parsed correctly'
assert 400000 < parser.votes < 1000000, 'Votes not parsed correctly'
assert parser.year == 1995, 'Year not parsed correctly'
示例6: upgrade
def upgrade(ver, session):
if ver == 0:
# Translate old qualities into new quality requirements
movie_table = table_schema('movie_queue', session)
for row in session.execute(select([movie_table.c.id, movie_table.c.quality])):
# Webdl quality no longer has dash
new_qual = row['quality'].replace('web-dl', 'webdl')
if new_qual.lower() != 'any':
# Old behavior was to get specified quality or greater, approximate that with new system
new_qual = ' '.join(qual + '+' for qual in new_qual.split(' '))
session.execute(update(movie_table, movie_table.c.id == row['id'],
{'quality': new_qual}))
ver = 1
if ver == 1:
# Bad upgrade left some qualities as 'ANY+'
movie_table = table_schema('movie_queue', session)
for row in session.execute(select([movie_table.c.id, movie_table.c.quality])):
if row['quality'].lower() == 'any+':
session.execute(update(movie_table, movie_table.c.id == row['id'],
{'quality': 'ANY'}))
ver = 2
if ver == 2:
from flexget.utils.imdb import ImdbParser
# Corrupted movie titles may be in the queue due to imdb layout changes. GitHub #729
movie_table = table_schema('movie_queue', session)
queue_base_table = table_schema('queue', session)
query = select([movie_table.c.id, movie_table.c.imdb_id, queue_base_table.c.title])
query = query.where(movie_table.c.id == queue_base_table.c.id)
for row in session.execute(query):
if row['imdb_id'] and (not row['title'] or row['title'] == 'None' or '\n' in row['title']):
log.info('Fixing movie_queue title for %s' % row['imdb_id'])
parser = ImdbParser()
parser.parse(row['imdb_id'])
if parser.name:
session.execute(update(queue_base_table, queue_base_table.c.id == row['id'],
{'title': parser.name}))
ver = 3
if ver == 3:
# adding queue_name column to movie_queue table and setting initial value to default)
table_add_column('movie_queue', 'queue_name', Unicode, session, default='default')
ver = 4
return ver
示例7: upgrade
def upgrade(ver, session):
if ver == 0:
# Translate old qualities into new quality requirements
movie_table = table_schema("movie_queue", session)
for row in session.execute(select([movie_table.c.id, movie_table.c.quality])):
# Webdl quality no longer has dash
new_qual = row["quality"].replace("web-dl", "webdl")
if new_qual.lower() != "any":
# Old behavior was to get specified quality or greater, approximate that with new system
new_qual = " ".join(qual + "+" for qual in new_qual.split(" "))
session.execute(update(movie_table, movie_table.c.id == row["id"], {"quality": new_qual}))
ver = 1
if ver == 1:
# Bad upgrade left some qualities as 'ANY+'
movie_table = table_schema("movie_queue", session)
for row in session.execute(select([movie_table.c.id, movie_table.c.quality])):
if row["quality"].lower() == "any+":
session.execute(update(movie_table, movie_table.c.id == row["id"], {"quality": "ANY"}))
ver = 2
if ver == 2:
from flexget.utils.imdb import ImdbParser
# Corrupted movie titles may be in the queue due to imdb layout changes. GitHub #729
movie_table = table_schema("movie_queue", session)
queue_base_table = table_schema("queue", session)
query = select([movie_table.c.id, movie_table.c.imdb_id, queue_base_table.c.title])
query = query.where(movie_table.c.id == queue_base_table.c.id)
for row in session.execute(query):
if row["imdb_id"] and (not row["title"] or row["title"] == "None" or "\n" in row["title"]):
log.info("Fixing movie_queue title for %s" % row["imdb_id"])
parser = ImdbParser()
parser.parse(row["imdb_id"])
if parser.name:
session.execute(
update(queue_base_table, queue_base_table.c.id == row["id"], {"title": parser.name})
)
ver = 3
return ver
示例8: lookup
#.........这里部分代码省略.........
session.add(result)
log.verbose('Found %s' % (entry['imdb_url']))
else:
log_once('Imdb lookup failed for %s' % entry['title'], log)
# store FAIL for this title
result = SearchResult(entry['title'])
result.fails = True
session.add(result)
raise PluginError('Title `%s` lookup failed' % entry['title'])
# check if this imdb page has been parsed & cached
movie = session.query(Movie).\
options(joinedload_all(Movie.genres, Movie.languages,
Movie.actors, Movie.directors)).\
filter(Movie.url == entry['imdb_url']).first()
refresh_interval = 2
if movie:
if movie.year:
age = (datetime.now().year - movie.year)
refresh_interval += age * 5
log.debug('cached movie `%s` age %i refresh interval %i days' % (movie.title, age, refresh_interval))
if not movie or movie.updated is None or \
movie.updated < datetime.now() - timedelta(days=refresh_interval):
# Remove the old movie, we'll store another one later.
session.query(Movie).filter(Movie.url == entry['imdb_url']).delete()
# search and store to cache
if 'title' in entry:
log.verbose('Parsing imdb for `%s`' % entry['title'])
else:
log.verbose('Parsing imdb for `%s`' % entry['imdb_id'])
try:
take_a_break = True
imdb = ImdbParser()
imdb.parse(entry['imdb_url'])
# store to database
movie = Movie()
movie.photo = imdb.photo
movie.title = imdb.name
movie.score = imdb.score
movie.votes = imdb.votes
movie.year = imdb.year
movie.mpaa_rating = imdb.mpaa_rating
movie.plot_outline = imdb.plot_outline
movie.url = entry['imdb_url']
for name in imdb.genres:
genre = session.query(Genre).\
filter(Genre.name == name).first()
if not genre:
genre = Genre(name)
movie.genres.append(genre) # pylint:disable=E1101
for name in imdb.languages:
language = session.query(Language).\
filter(Language.name == name).first()
if not language:
language = Language(name)
movie.languages.append(language) # pylint:disable=E1101
for imdb_id, name in imdb.actors.iteritems():
actor = session.query(Actor).\
filter(Actor.imdb_id == imdb_id).first()
if not actor:
actor = Actor(imdb_id, name)
movie.actors.append(actor) # pylint:disable=E1101
for imdb_id, name in imdb.directors.iteritems():
director = session.query(Director).\
filter(Director.imdb_id == imdb_id).first()
if not director:
director = Director(imdb_id, name)
movie.directors.append(director) # pylint:disable=E1101
# so that we can track how long since we've updated the info later
movie.updated = datetime.now()
session.add(movie)
except UnicodeDecodeError:
log.error('Unable to determine encoding for %s. Installing chardet library may help.' % entry['imdb_url'])
# store cache so this will not be tried again
movie = Movie()
movie.url = entry['imdb_url']
session.add(movie)
raise PluginError('UnicodeDecodeError')
except ValueError, e:
# TODO: might be a little too broad catch, what was this for anyway? ;P
if manager.options.debug:
log.exception(e)
raise PluginError('Invalid parameter: %s' % entry['imdb_url'], log)
for att in ['title', 'score', 'votes', 'year', 'genres', 'languages', 'actors', 'directors', 'mpaa_rating']:
log.trace('movie.%s: %s' % (att, getattr(movie, att)))
# store to entry
entry.update_using_map(self.field_map, movie)
# give imdb a little break between requests (see: http://flexget.com/ticket/129#comment:1)
if (take_a_break and
not manager.options.debug and
not manager.unit_test):
import time
time.sleep(3)
示例9: add_new_downloaditem_pre
#.........这里部分代码省略.........
try:
match = tvdbapi[series_name]
logger.debug("Show found")
instance.tvdbid_id = int(match['id'])
if match['imdb_id'] is not None:
logger.debug("also found imdbid %s from thetvdb" % match['imdb_id'])
instance.imdbid_id = int(match['imdb_id'].lstrip("tt"))
except tvdb_shownotfound:
logger.exception("Error finding show on thetvdb %s" % series_name)
except Exception as e:
logger.exception("Error finding : %s via thetvdb.com due to %s" % (series_name, e.message))
else:
logger.exception("Unable to parse series info")
else:
#must be a movie!
if instance.imdbid_id is None:
logger.debug("Looks like we are working with a Movie")
#Lets try find the movie details
parser = instance.metaparser()
movie_title = parser.details['title']
if 'year' in parser.details:
movie_year = parser.details['year']
else:
movie_year = None
imdbs = ImdbSearch()
results = imdbs.best_match(movie_title, movie_year)
if results and results['match'] > 0.70:
movieObj = ImdbParser()
movieObj.parse(results['url'])
logger.debug("Found imdb movie id %s" % movieObj.imdb_id)
instance.imdbid_id = int(movieObj.imdb_id.lstrip("tt"))
else:
logger.debug("Didnt find a good enough match on imdb")
#Now we have sorted both imdbid and thetvdbid lets sort it all out
#If we have a tvdbid do we need to add it to the db or does it exist or ignored?
if instance.tvdbid_id is not None and instance.tvdbid_id != "":
#Does it already exist?
try:
if instance.tvdbid:
#Do we need to update it
curTime = timezone.now()
hours = 0
if instance.tvdbid.updated is None:
hours = 50
else:
diff = curTime - instance.tvdbid.updated
hours = diff.total_seconds() / 60 / 60
if hours > 24:
try:
instance.tvdbid.update_from_tvdb()
instance.tvdbid.save()
except Exception as e:
logger.exception("Error updating TVDB info %s" % e.message)