当前位置: 首页>>代码示例>>Python>>正文


Python TestApp.build方法代码示例

本文整理汇总了Python中util.TestApp.build方法的典型用法代码示例。如果您正苦于以下问题:Python TestApp.build方法的具体用法?Python TestApp.build怎么用?Python TestApp.build使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在util.TestApp的用法示例。


在下文中一共展示了TestApp.build方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: setup_module

# 需要导入模块: from util import TestApp [as 别名]
# 或者: from util.TestApp import build [as 别名]
def setup_module():
    global app, original, original_uids
    app = TestApp()
    app.builder.env.app = app
    app.connect('doctree-resolved', on_doctree_resolved)
    app.build()
    original = doctrees['versioning/original']
    original_uids = [n.uid for n in add_uids(original, is_paragraph)]
开发者ID:zsiddiqui2,项目名称:sphinxGit,代码行数:10,代码来源:test_versioning.py

示例2: setup_module

# 需要导入模块: from util import TestApp [as 别名]
# 或者: from util.TestApp import build [as 别名]
def setup_module():
    global app, original, original_uids
    app = TestApp(testroot="versioning")
    app.builder.env.app = app
    app.connect("doctree-resolved", on_doctree_resolved)
    app.build()
    original = doctrees["original"]
    original_uids = [n.uid for n in add_uids(original, is_paragraph)]
开发者ID:Lyoness,项目名称:sphinx,代码行数:10,代码来源:test_versioning.py

示例3: test_feed

# 需要导入模块: from util import TestApp [as 别名]
# 或者: from util.TestApp import build [as 别名]
def test_feed():
    app = TestApp(buildername='html', warning=feed_warnfile, cleanenv=True)  
    app.build(force_all=True, filenames=[]) #build_all misses the crucial finish signal
    feed_warnings = feed_warnfile.getvalue().replace(os.sep, '/')
    feed_warnings_exp = FEED_WARNINGS % {'root': app.srcdir}
    yield assert_equals, feed_warnings, feed_warnings_exp
    rss_path = os.path.join(app.outdir, 'rss.xml')
    yield exists, rss_path
    
    base_path = unicode("file:/" + app.outdir)
    
    # see http://www.feedparser.org/
    f = feedparser.parse(rss_path)
    yield assert_equals, f.bozo, 0 #feedparser well-formedness detection. We want this.
    entries = f.entries
    yield assert_equals, entries[0].updated_parsed[0:6], (2001, 8, 11, 13, 0, 0)
    yield assert_equals, entries[0].title, "The latest blog post"
    
    yield assert_equals, entries[0].link, base_path + '/B_latest.html'
    yield assert_equals, entries[0].guid, base_path + '/B_latest.html'
    yield assert_equals, entries[1].updated_parsed[0:6], (2001, 8, 11, 9, 0, 0)
    yield assert_equals, entries[1].title, "An older blog post"
    yield assert_equals, entries[1].link, base_path + '/A_older.html'
    yield assert_equals, entries[1].guid, base_path + '/A_older.html'
    yield assert_equals, entries[2].updated_parsed[0:6], (1979, 1, 1, 0, 0, 0,)
    yield assert_equals, entries[2].title, "The oldest blog post"
    yield assert_equals, entries[2].link, base_path + '/C_most_aged.html'
    yield assert_equals, entries[2].guid, base_path + '/C_most_aged.html'
    #Now we do it all again to make sure that things work when handling stale files
    app2 = TestApp(buildername='html', warning=feed_warnfile)  
    app2.build(force_all=False, filenames=['most_aged'])
    f = feedparser.parse(rss_path)
    yield assert_equals, f.bozo, 0 #feedparser well-formedness detection. We want this.
    entries = f.entries
    yield assert_equals, entries[0].updated_parsed[0:6], (2001, 8, 11, 13, 0, 0)
    yield assert_equals, entries[0].title, "The latest blog post"
    yield assert_equals, entries[1].updated_parsed[0:6], (2001, 8, 11, 9, 0, 0)
    yield assert_equals, entries[1].title, "An older blog post"
    yield assert_equals, entries[2].updated_parsed[0:6], (1979, 1, 1, 0, 0, 0)
    yield assert_equals, entries[2].title, "The oldest blog post"
    
    #Tests for relative URIs. note that these tests only work because there is
    # no xml:base - otherwise feedparser will supposedly fix them up for us - 
    # http://www.feedparser.org/docs/resolving-relative-links.html
    links = BeautifulSoup(entries[0].description).findAll('a')
    # These links will look like:
    #[<a class="headerlink" href="#the-latest-blog-post" title="Permalink to this headline">¶</a>, <a class="reference internal" href="older.html"><em>a relative link</em></a>, <a class="reference external" href="http://google.com/">an absolute link</a>]
    yield assert_equals, links.pop()['href'], "http://google.com/"
    yield assert_equals, links.pop()['href'], base_path + '/A_older.html'
    yield assert_equals, links.pop()['href'], entries[0].link + '#the-latest-blog-post'
    
    app.cleanup()
    app2.cleanup()
开发者ID:whardier,项目名称:sphinx-feed,代码行数:55,代码来源:test_build.py

示例4: test_feed_by_parsing_it

# 需要导入模块: from util import TestApp [as 别名]
# 或者: from util.TestApp import build [as 别名]
 def test_feed_by_parsing_it(self):
     feed_warnfile = self.feed_warnfile
     app = TestApp(buildername='html', warning=feed_warnfile, cleanenv=True)  
     app.build(force_all=True, filenames=[]) #build_all misses the crucial finish signal
     feed_warnings = feed_warnfile.getvalue().replace(os.sep, '/')
     feed_warnings_exp = self.FEED_WARNINGS % {'root': app.srcdir}
     self.assertEqual(feed_warnings, feed_warnings_exp)
     rss_path = os.path.join(app.outdir, 'rss.xml')
     self.assertTrue(exists(rss_path))
 
     base_path = unicode("file:/" + app.outdir)
 
     # see http://www.feedparser.org/
     f = feedparser.parse(rss_path)
     #feedparser well-formedness detection. We want this.
     self.assertEqual(f.bozo, 0 )
     self.assertEqual(f.feed['title'], 'Sphinx Syndicate Test Title')
     entries = f.entries
     self.assertEqual(entries[0].updated_parsed[0:6], (2001, 8, 11, 13, 0, 0))
     self.assertEqual(entries[0].title, "The latest blog post")
 
     self.assertEqual(entries[0].link, base_path + '/B_latest.html')
     self.assertEqual(entries[0].guid, base_path + '/B_latest.html')
     self.assertEqual(entries[1].updated_parsed[0:6], (2001, 8, 11, 9, 0, 0))
     self.assertEqual(entries[1].title, "An older blog post")
     self.assertEqual(entries[1].link, base_path + '/A_older.html')
     self.assertEqual(entries[1].guid, base_path + '/A_older.html')
     self.assertEqual(entries[2].updated_parsed[0:6], (1979, 1, 1, 0, 0, 0,))
     self.assertEqual(entries[2].title, "The oldest blog post")
     self.assertEqual(entries[2].link, base_path + '/C_most_aged.html')
     self.assertEqual(entries[2].guid, base_path + '/C_most_aged.html')
     #Now we do it all again to make sure that things work when handling stale files
     app2 = TestApp(buildername='html', warning=feed_warnfile)  
     app2.build(force_all=False, filenames=['most_aged'])
     f = feedparser.parse(rss_path)
     self.assertEqual(f.bozo, 0)
     entries = f.entries
     self.assertEqual(entries[0].updated_parsed[0:6], (2001, 8, 11, 13, 0, 0))
     self.assertEqual(entries[0].title, "The latest blog post")
     self.assertEqual(entries[1].updated_parsed[0:6], (2001, 8, 11, 9, 0, 0))
     self.assertEqual(entries[1].title, "An older blog post")
     self.assertEqual(entries[2].updated_parsed[0:6], (1979, 1, 1, 0, 0, 0))
     self.assertEqual(entries[2].title, "The oldest blog post")
 
     #Tests for relative URIs. note that these tests only work because there is
     # no xml:base - otherwise feedparser will supposedly fix them up for us - 
     # http://www.feedparser.org/docs/resolving-relative-links.html
     links = BeautifulSoup(entries[0].description).findAll('a')
     # These links will look like:
     #[<a class="headerlink" href="#the-latest-blog-post" title="Permalink to this headline">¶</a>, <a class="reference internal" href="older.html"><em>a relative link</em></a>, <a class="reference external" href="http://google.com/">an absolute link</a>]
     self.assertEqual(links.pop()['href'], "http://google.com/")
     self.assertEqual(links.pop()['href'], base_path + '/A_older.html')
     self.assertEqual(links.pop()['href'], entries[0].link + '#the-latest-blog-post')
 
     index_path  = os.path.join(app.outdir, 'index.html')
     soup = BeautifulSoup(open(index_path).read())
     latest_tree = soup.find('div', 'latest-wrapper')
     latest_items = latest_tree.findAll('li')
     actual_links = [entry.contents[0]['href'] for entry in latest_items]
     ideal_links = [
         u'B_latest.html',
         u'A_older.html',
         u'C_most_aged.html',
     ]
     
     self.assertListEqual(actual_links, ideal_links)
     
     app.cleanup()
     app2.cleanup()
开发者ID:abeaumont,项目名称:sphinx-extensions,代码行数:71,代码来源:test_feed.py


注:本文中的util.TestApp.build方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。