本文整理汇总了C#中Page.RunFilters方法的典型用法代码示例。如果您正苦于以下问题:C# Page.RunFilters方法的具体用法?C# Page.RunFilters怎么用?C# Page.RunFilters使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Page
的用法示例。
在下文中一共展示了Page.RunFilters方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Crawl
/// <summary>
/// Crawl the site starting with the SeedUrl and stores the index when done
/// </summary>
/// <param name="SeedUrl"></param>
public async Task Crawl(String SeedUrl, FinishedCrawlAsync CrawlDoneCallBack)///not sure here if i should pass another function responsible for copying the IndexedPages to the IndexCache or just do it automatically
{
///0) Spin off a task or something
///1) Get HTML of seed
/// - put in _content
//HttpResponseMessage response = CrawlPage(SeedUrl);
String html = await CrawlPage(SeedUrl);//await response.Content.ReadAsStringAsync();
///2) Parse Urls
/// - toss out urls if UrlDiscovery is false
if (MvcIndexer.Configuration.UrlDiscovery)
{
List<String> urls = LinkParser.ParseLinks(html, SeedUrl);
foreach (String url in urls)
index.AddLink(url);
}
index.AddLinks(Indexable.GetIndexable());
#region add the seed page
Page p = new Page(SeedUrl, html);
await p.RunFilters(MvcIndexer.Configuration.Filters);
p.StripHtml();
index.AddLink(new Link()
{
Crawled = true,
Page = p
});
#endregion
///3) Cycle through all urls until everything has been crawled
IEnumerable<Link> links = index.GetUncrawledLinks();
Int32 blankcounter = 0;
while (blankcounter < 5)
{
foreach (Link link in links)
{
await CrawlPageAsync(link.Url);
}
links = index.GetUncrawledLinks();
if (links.Count() > 0)
{
blankcounter++;
Thread.Sleep(10000); ///sleep to give index a chance to repopulate with more links
}
}
///5) If crawl type is continuous slowly churn through them based on some
/// arbitrary limit based on a page every 3 seconds or something.
/// if crawl type is scheduled, do a taskfactory and burn through them
///7) Call the CallBack function
if(CrawlDoneCallBack != null)
await CrawlDoneCallBack(index);///???
}