本文整理汇总了Python中newspaper.Source.category_urls方法的典型用法代码示例。如果您正苦于以下问题:Python Source.category_urls方法的具体用法?Python Source.category_urls怎么用?Python Source.category_urls使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类newspaper.Source
的用法示例。
在下文中一共展示了Source.category_urls方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_cache_categories
# 需要导入模块: from newspaper import Source [as 别名]
# 或者: from newspaper.Source import category_urls [as 别名]
def test_cache_categories(self):
"""
builds two same source objects in a row examines speeds of both
"""
s = Source("http://yahoo.com")
s.download()
s.parse()
s.set_categories()
saved_urls = s.category_urls()
s.categories = [] # reset and try again with caching
s.set_categories()
assert sorted(s.category_urls()) == sorted(saved_urls)
示例2: test_cache_categories
# 需要导入模块: from newspaper import Source [as 别名]
# 或者: from newspaper.Source import category_urls [as 别名]
def test_cache_categories(self):
"""Builds two same source objects in a row examines speeds of both
"""
url = 'http://uk.yahoo.com'
mock_response_with(url, 'yahoo_main_site')
s = Source(url)
s.download()
s.parse()
s.set_categories()
saved_urls = s.category_urls()
s.categories = []
s.set_categories()
assert sorted(s.category_urls()) == sorted(saved_urls)
示例3: test_cache_categories
# 需要导入模块: from newspaper import Source [as 别名]
# 或者: from newspaper.Source import category_urls [as 别名]
def test_cache_categories(self):
"""Builds two same source objects in a row examines speeds of both
"""
url = 'http://uk.yahoo.com'
html = mock_resource_with('yahoo_main_site', 'html')
s = Source(url)
s.download()
s.parse()
s.set_categories()
saved_urls = s.category_urls()
s.categories = []
s.set_categories()
self.assertCountEqual(saved_urls, s.category_urls())
示例4: test_source_build
# 需要导入模块: from newspaper import Source [as 别名]
# 或者: from newspaper.Source import category_urls [as 别名]
def test_source_build(self):
"""
builds a source object, validates it has no errors, prints out
all valid categories and feed urls
"""
DESC = """CNN.com delivers the latest breaking news and information on the latest top stories, weather, business, entertainment, politics, and more. For in-depth coverage, CNN.com provides special reports, video, audio, photo galleries, and interactive guides."""
BRAND = "cnn"
config = Configuration()
config.verbose = False
s = Source("http://cnn.com", config=config)
s.clean_memo_cache()
s.build()
assert s.brand == BRAND
assert s.description == DESC
# For this test case and a few more, I don't believe you can actually
# assert two values to equal eachother because some values are ever changing.
# Insead, i'm just going to print some stuff out so it is just as easy to take
# a glance and see if it looks OK.
print "\t\tWe have %d articles currently!" % s.size()
print
print "\t\t%s categories are: %s" % (s.url, str(s.category_urls()))
示例5: test_source_build
# 需要导入模块: from newspaper import Source [as 别名]
# 或者: from newspaper.Source import category_urls [as 别名]
def test_source_build(self):
"""
builds a source object, validates it has no errors, prints out
all valid categories and feed urls
"""
DESC = ('CNN.com International delivers breaking news from across '
'the globe and information on the latest top stories, '
'business, sports and entertainment headlines. Follow the '
'news as it happens through: special reports, videos, '
'audio, photo galleries plus interactive maps and timelines.')
CATEGORY_URLS = [
u'http://cnn.com/ASIA', u'http://connecttheworld.blogs.cnn.com',
u'http://cnn.com/HLN', u'http://cnn.com/MIDDLEEAST',
u'http://cnn.com', u'http://ireport.cnn.com',
u'http://cnn.com/video', u'http://transcripts.cnn.com',
u'http://cnn.com/espanol',
u'http://partners.cnn.com', u'http://www.cnn.com',
u'http://cnn.com/US', u'http://cnn.com/EUROPE',
u'http://cnn.com/TRAVEL', u'http://cnn.com/cnni',
u'http://cnn.com/SPORT', u'http://cnn.com/mostpopular',
u'http://arabic.cnn.com', u'http://cnn.com/WORLD',
u'http://cnn.com/LATINAMERICA', u'http://us.cnn.com',
u'http://travel.cnn.com', u'http://mexico.cnn.com',
u'http://cnn.com/SHOWBIZ', u'http://edition.cnn.com',
u'http://amanpour.blogs.cnn.com', u'http://money.cnn.com',
u'http://cnn.com/tools/index.html', u'http://cnnespanol.cnn.com',
u'http://cnn.com/CNNI', u'http://business.blogs.cnn.com',
u'http://cnn.com/AFRICA', u'http://cnn.com/TECH',
u'http://cnn.com/BUSINESS']
FEEDS = [u'http://rss.cnn.com/rss/edition.rss']
BRAND = 'cnn'
s = Source('http://cnn.com', verbose=False, memoize_articles=False)
url_re = re.compile(".*cnn\.com")
mock_response_with(url_re, 'cnn_main_site')
s.clean_memo_cache()
s.build()
assert s.brand == BRAND
assert s.description == DESC
assert s.size() == 241
assert s.category_urls() == CATEGORY_URLS
# TODO: A lot of the feed extraction is NOT being tested because feeds
# are primarly extracted from the HTML of category URLs. We lose this
# effect by just mocking CNN's main page HTML. Warning: tedious fix.
assert s.feed_urls() == FEEDS