本文整理汇总了Python中flexget.utils.imdb.ImdbParser.parse方法的典型用法代码示例。如果您正苦于以下问题:Python ImdbParser.parse方法的具体用法?Python ImdbParser.parse怎么用?Python ImdbParser.parse使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类flexget.utils.imdb.ImdbParser
的用法示例。
在下文中一共展示了ImdbParser.parse方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_no_year
# 需要导入模块: from flexget.utils.imdb import ImdbParser [as 别名]
# 或者: from flexget.utils.imdb.ImdbParser import parse [as 别名]
def test_no_year(self):
# Make sure parser doesn't crash for movies with no year
parser = ImdbParser()
parser.parse('tt3303790')
assert parser.name == 'Master of None'
# There is no year
assert not parser.year
示例2: test_no_plot
# 需要导入模块: from flexget.utils.imdb import ImdbParser [as 别名]
# 或者: from flexget.utils.imdb.ImdbParser import parse [as 别名]
def test_no_plot(self):
# Make sure parser doesn't crash for movies with no plot
parser = ImdbParser()
parser.parse('tt0245062')
assert parser.name == 'The Magnet'
# There is no plot
assert not parser.plot_outline
示例3: test_plot_with_links
# 需要导入模块: from flexget.utils.imdb import ImdbParser [as 别名]
# 或者: from flexget.utils.imdb.ImdbParser import parse [as 别名]
def test_plot_with_links(self):
"""Make sure plot doesn't terminate at the first link. GitHub #756"""
parser = ImdbParser()
parser.parse('tt2503944')
assert parser.plot_outline == ("Chef Adam Jones (Bradley Cooper) had it all - and lost it. A two-star Michelin "
"rockstar with the bad habits to match, the former enfant terrible of the Paris "
"restaurant scene did everything different every time out, and only ever cared "
"about the thrill of creating explosions of taste. To land his own kitchen and "
"that third elusive Michelin star though, he'll need the best of the best on "
"his side, including the beautiful Helene (Sienna Miller).")
示例4: _parse_new_movie
# 需要导入模块: from flexget.utils.imdb import ImdbParser [as 别名]
# 或者: from flexget.utils.imdb.ImdbParser import parse [as 别名]
def _parse_new_movie(self, imdb_url, session):
"""
Get Movie object by parsing imdb page and save movie into the database.
:param imdb_url: IMDB url
:param session: Session to be used
:return: Newly added Movie
"""
parser = ImdbParser()
parser.parse(imdb_url)
# store to database
movie = Movie()
movie.photo = parser.photo
movie.title = parser.name
movie.original_title = parser.original_name
movie.score = parser.score
movie.votes = parser.votes
movie.meta_score = parser.meta_score
movie.year = parser.year
movie.mpaa_rating = parser.mpaa_rating
movie.plot_outline = parser.plot_outline
movie.url = imdb_url
for name in parser.genres:
genre = session.query(Genre).filter(Genre.name == name).first()
if not genre:
genre = Genre(name)
movie.genres.append(genre) # pylint:disable=E1101
for index, name in enumerate(parser.languages):
language = session.query(Language).filter(Language.name == name).first()
if not language:
language = Language(name)
movie.languages.append(MovieLanguage(language, prominence=index))
for imdb_id, name in parser.actors.items():
actor = session.query(Actor).filter(Actor.imdb_id == imdb_id).first()
if not actor:
actor = Actor(imdb_id, name)
movie.actors.append(actor) # pylint:disable=E1101
for imdb_id, name in parser.directors.items():
director = session.query(Director).filter(Director.imdb_id == imdb_id).first()
if not director:
director = Director(imdb_id, name)
movie.directors.append(director) # pylint:disable=E1101
for imdb_id, name in parser.writers.items():
writer = session.query(Writer).filter(Writer.imdb_id == imdb_id).first()
if not writer:
writer = Writer(imdb_id, name)
movie.writers.append(writer) # pylint:disable=E1101
# so that we can track how long since we've updated the info later
movie.updated = datetime.now()
session.add(movie)
return movie
示例5: test_parsed_data
# 需要导入模块: from flexget.utils.imdb import ImdbParser [as 别名]
# 或者: from flexget.utils.imdb.ImdbParser import parse [as 别名]
def test_parsed_data(self):
parser = ImdbParser()
parser.parse('tt0114814')
assert parser.actors == {
'nm0000592': 'Pete Postlethwaite',
'nm0261452': 'Christine Estabrook',
'nm0000751': 'Suzy Amis',
'nm0000286': 'Stephen Baldwin',
'nm0000445': 'Dan Hedaya',
'nm0800339': 'Phillipe Simon',
'nm0002064': 'Giancarlo Esposito',
'nm0001590': 'Chazz Palminteri',
'nm0000321': 'Gabriel Byrne',
'nm0790436': 'Jack Shearer',
'nm0000228': 'Kevin Spacey',
'nm0001629': 'Kevin Pollak',
'nm0107808': 'Carl Bressler',
'nm0001125': 'Benicio Del Toro',
'nm0000860': 'Paul Bartel'
}, 'Actors not parsed correctly'
assert parser.directors == {'nm0001741': 'Bryan Singer'}, 'Directors not parsed correctly'
assert len(set(parser.genres).intersection([u'crime', u'drama', u'mystery', u'thriller'])) == \
len([u'crime', u'drama', u'mystery', u'thriller']), 'Genres not parsed correctly'
assert parser.imdb_id == 'tt0114814', 'ID not parsed correctly'
assert len(set(parser.languages).intersection(
['english', 'hungarian', 'spanish', 'french'])) == 4, 'Languages not parsed correctly'
assert parser.mpaa_rating == 'R', 'Rating not parsed correctly'
assert parser.name == 'The Usual Suspects', 'Name not parsed correctly'
assert (parser.photo ==
'https://images-na.ssl-images-amazon.com/images/M/[email protected]@.'
'_V1_UX182_CR0,0,182,268_AL_.jpg'
), 'Photo not parsed correctly'
assert parser.plot_outline == (
'Following a truck hijack in New York, five conmen are arrested and brought together for questioning. '
'As none of them are guilty, they plan a revenge operation against the police. The operation goes well, '
'but then the influence of a legendary mastermind criminal called Keyser S\xf6ze is felt. It becomes '
'clear that each one of them has wronged S\xf6ze at some point and must pay back now. The payback job '
'leaves 27 men dead in a boat explosion, but the real question arises now: Who actually is Keyser S\xf6ze?'
), 'Plot outline not parsed correctly'
assert 8.0 < parser.score < 9.0, 'Score not parsed correctly'
assert parser.url == 'http://www.imdb.com/title/tt0114814/', 'URL not parsed correctly'
assert 400000 < parser.votes < 1000000, 'Votes not parsed correctly'
assert parser.year == 1995, 'Year not parsed correctly'
示例6: upgrade
# 需要导入模块: from flexget.utils.imdb import ImdbParser [as 别名]
# 或者: from flexget.utils.imdb.ImdbParser import parse [as 别名]
def upgrade(ver, session):
if ver == 0:
# Translate old qualities into new quality requirements
movie_table = table_schema('movie_queue', session)
for row in session.execute(select([movie_table.c.id, movie_table.c.quality])):
# Webdl quality no longer has dash
new_qual = row['quality'].replace('web-dl', 'webdl')
if new_qual.lower() != 'any':
# Old behavior was to get specified quality or greater, approximate that with new system
new_qual = ' '.join(qual + '+' for qual in new_qual.split(' '))
session.execute(update(movie_table, movie_table.c.id == row['id'],
{'quality': new_qual}))
ver = 1
if ver == 1:
# Bad upgrade left some qualities as 'ANY+'
movie_table = table_schema('movie_queue', session)
for row in session.execute(select([movie_table.c.id, movie_table.c.quality])):
if row['quality'].lower() == 'any+':
session.execute(update(movie_table, movie_table.c.id == row['id'],
{'quality': 'ANY'}))
ver = 2
if ver == 2:
from flexget.utils.imdb import ImdbParser
# Corrupted movie titles may be in the queue due to imdb layout changes. GitHub #729
movie_table = table_schema('movie_queue', session)
queue_base_table = table_schema('queue', session)
query = select([movie_table.c.id, movie_table.c.imdb_id, queue_base_table.c.title])
query = query.where(movie_table.c.id == queue_base_table.c.id)
for row in session.execute(query):
if row['imdb_id'] and (not row['title'] or row['title'] == 'None' or '\n' in row['title']):
log.info('Fixing movie_queue title for %s' % row['imdb_id'])
parser = ImdbParser()
parser.parse(row['imdb_id'])
if parser.name:
session.execute(update(queue_base_table, queue_base_table.c.id == row['id'],
{'title': parser.name}))
ver = 3
if ver == 3:
# adding queue_name column to movie_queue table and setting initial value to default)
table_add_column('movie_queue', 'queue_name', Unicode, session, default='default')
ver = 4
return ver
示例7: upgrade
# 需要导入模块: from flexget.utils.imdb import ImdbParser [as 别名]
# 或者: from flexget.utils.imdb.ImdbParser import parse [as 别名]
def upgrade(ver, session):
if ver == 0:
# Translate old qualities into new quality requirements
movie_table = table_schema("movie_queue", session)
for row in session.execute(select([movie_table.c.id, movie_table.c.quality])):
# Webdl quality no longer has dash
new_qual = row["quality"].replace("web-dl", "webdl")
if new_qual.lower() != "any":
# Old behavior was to get specified quality or greater, approximate that with new system
new_qual = " ".join(qual + "+" for qual in new_qual.split(" "))
session.execute(update(movie_table, movie_table.c.id == row["id"], {"quality": new_qual}))
ver = 1
if ver == 1:
# Bad upgrade left some qualities as 'ANY+'
movie_table = table_schema("movie_queue", session)
for row in session.execute(select([movie_table.c.id, movie_table.c.quality])):
if row["quality"].lower() == "any+":
session.execute(update(movie_table, movie_table.c.id == row["id"], {"quality": "ANY"}))
ver = 2
if ver == 2:
from flexget.utils.imdb import ImdbParser
# Corrupted movie titles may be in the queue due to imdb layout changes. GitHub #729
movie_table = table_schema("movie_queue", session)
queue_base_table = table_schema("queue", session)
query = select([movie_table.c.id, movie_table.c.imdb_id, queue_base_table.c.title])
query = query.where(movie_table.c.id == queue_base_table.c.id)
for row in session.execute(query):
if row["imdb_id"] and (not row["title"] or row["title"] == "None" or "\n" in row["title"]):
log.info("Fixing movie_queue title for %s" % row["imdb_id"])
parser = ImdbParser()
parser.parse(row["imdb_id"])
if parser.name:
session.execute(
update(queue_base_table, queue_base_table.c.id == row["id"], {"title": parser.name})
)
ver = 3
return ver
示例8: lookup
# 需要导入模块: from flexget.utils.imdb import ImdbParser [as 别名]
# 或者: from flexget.utils.imdb.ImdbParser import parse [as 别名]
#.........这里部分代码省略.........
session.add(result)
log.verbose('Found %s' % (entry['imdb_url']))
else:
log_once('Imdb lookup failed for %s' % entry['title'], log)
# store FAIL for this title
result = SearchResult(entry['title'])
result.fails = True
session.add(result)
raise PluginError('Title `%s` lookup failed' % entry['title'])
# check if this imdb page has been parsed & cached
movie = session.query(Movie).\
options(joinedload_all(Movie.genres, Movie.languages,
Movie.actors, Movie.directors)).\
filter(Movie.url == entry['imdb_url']).first()
refresh_interval = 2
if movie:
if movie.year:
age = (datetime.now().year - movie.year)
refresh_interval += age * 5
log.debug('cached movie `%s` age %i refresh interval %i days' % (movie.title, age, refresh_interval))
if not movie or movie.updated is None or \
movie.updated < datetime.now() - timedelta(days=refresh_interval):
# Remove the old movie, we'll store another one later.
session.query(Movie).filter(Movie.url == entry['imdb_url']).delete()
# search and store to cache
if 'title' in entry:
log.verbose('Parsing imdb for `%s`' % entry['title'])
else:
log.verbose('Parsing imdb for `%s`' % entry['imdb_id'])
try:
take_a_break = True
imdb = ImdbParser()
imdb.parse(entry['imdb_url'])
# store to database
movie = Movie()
movie.photo = imdb.photo
movie.title = imdb.name
movie.score = imdb.score
movie.votes = imdb.votes
movie.year = imdb.year
movie.mpaa_rating = imdb.mpaa_rating
movie.plot_outline = imdb.plot_outline
movie.url = entry['imdb_url']
for name in imdb.genres:
genre = session.query(Genre).\
filter(Genre.name == name).first()
if not genre:
genre = Genre(name)
movie.genres.append(genre) # pylint:disable=E1101
for name in imdb.languages:
language = session.query(Language).\
filter(Language.name == name).first()
if not language:
language = Language(name)
movie.languages.append(language) # pylint:disable=E1101
for imdb_id, name in imdb.actors.iteritems():
actor = session.query(Actor).\
filter(Actor.imdb_id == imdb_id).first()
if not actor:
actor = Actor(imdb_id, name)
movie.actors.append(actor) # pylint:disable=E1101
for imdb_id, name in imdb.directors.iteritems():
director = session.query(Director).\
filter(Director.imdb_id == imdb_id).first()
if not director:
director = Director(imdb_id, name)
movie.directors.append(director) # pylint:disable=E1101
# so that we can track how long since we've updated the info later
movie.updated = datetime.now()
session.add(movie)
except UnicodeDecodeError:
log.error('Unable to determine encoding for %s. Installing chardet library may help.' % entry['imdb_url'])
# store cache so this will not be tried again
movie = Movie()
movie.url = entry['imdb_url']
session.add(movie)
raise PluginError('UnicodeDecodeError')
except ValueError, e:
# TODO: might be a little too broad catch, what was this for anyway? ;P
if manager.options.debug:
log.exception(e)
raise PluginError('Invalid parameter: %s' % entry['imdb_url'], log)
for att in ['title', 'score', 'votes', 'year', 'genres', 'languages', 'actors', 'directors', 'mpaa_rating']:
log.trace('movie.%s: %s' % (att, getattr(movie, att)))
# store to entry
entry.update_using_map(self.field_map, movie)
# give imdb a little break between requests (see: http://flexget.com/ticket/129#comment:1)
if (take_a_break and
not manager.options.debug and
not manager.unit_test):
import time
time.sleep(3)
示例9: add_new_downloaditem_pre
# 需要导入模块: from flexget.utils.imdb import ImdbParser [as 别名]
# 或者: from flexget.utils.imdb.ImdbParser import parse [as 别名]
def add_new_downloaditem_pre(sender, instance, **kwargs):
if instance.id is None:
from lazy_common import metaparser
logger.debug("Adding a new download %s" % instance.ftppath)
instance.ftppath = instance.ftppath.strip()
#Check if it exists already..
try:
existing_obj = DownloadItem.objects.get(ftppath=instance.ftppath)
if existing_obj:
logger.info("Found existing record %s" % instance.ftppath)
if existing_obj.status == DownloadItem.COMPLETE:
#its complete... maybe delete it so we can re-add if its older then 2 weeks?
curTime = timezone.now()
hours = 0
if existing_obj.dateadded is None:
hours = 300
else:
diff = curTime - existing_obj.dateadded
hours = diff.total_seconds() / 60 / 60
if hours > 288:
existing_obj.delete()
else:
raise AlradyExists()
else:
#lets update it with the new downloaded eps
if instance.onlyget is not None:
for get_season, get_eps in instance.onlyget.iteritems():
for get_ep in get_eps:
existing_obj.add_download(get_season, get_ep)
existing_obj.reset()
existing_obj.save()
raise AlradyExists_Updated(existing_obj)
raise AlradyExists_Updated(existing_obj)
except ObjectDoesNotExist:
pass
#Set default status as download queue
if instance.status is None:
instance.status = 1
#Get section and title
if instance.section is None:
split = instance.ftppath.split("/")
try:
section = split[1]
title = split[-1]
except:
raise Exception("Unable to determine section from path %s" % instance.ftppath)
if section:
instance.section = section
else:
raise Exception("Unable to determine section from path %s" % instance.ftppath)
if title:
instance.title = title
else:
raise Exception("Unable to determine title from path %s" % instance.ftppath)
#Figure out the local path
if instance.localpath is None:
if section == "XVID" or section == "HD":
path = settings.MOVIE_PATH_TEMP
elif section == "TVHD" or section == "TV":
path = settings.TV_PATH_TEMP
elif section == "REQUESTS":
path = settings.REQUESTS_PATH_TEMP
else:
raise Exception("Unable to find section path in config: %s" % section)
instance.localpath = os.path.join(path, instance.title)
instance.parse_title()
parser = instance.metaparser()
title = None
if 'title' in parser.details:
title = parser.details['title']
if 'series' in parser.details:
title = TVShow.clean_title(parser.details['series'])
#Ok now we know its a valid downloaditem lets add it to the db
tvdbapi = Tvdb()
type = instance.get_type()
from lazy_common import metaparser
#must be a tvshow
#.........这里部分代码省略.........