本文整理汇总了C#中DotNetWikiBot.Page.ParsePageXml方法的典型用法代码示例。如果您正苦于以下问题:C# Page.ParsePageXml方法的具体用法?C# Page.ParsePageXml怎么用?C# Page.ParsePageXml使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类DotNetWikiBot.Page
的用法示例。
在下文中一共展示了Page.ParsePageXml方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: LoadWithMetadata
/// <summary>Loads texts and metadata (revision ID, timestamp, last comment,
/// last contributor, minor edit mark) for pages in this PageList.
/// Non-existent pages will be automatically removed from the PageList.
/// Please, don't use this function when going to edit big amount of pages on
/// popular public wikis, as it compromises edit conflict detection. In that case,
/// each page's text should be loaded individually right before its processing
/// and saving.</summary>
public void LoadWithMetadata()
{
if (IsEmpty())
throw new WikiBotException(Bot.Msg("The PageList is empty. Nothing to load."));
Console.WriteLine(Bot.Msg("Loading {0} pages..."), pages.Count);
string res = site.indexPath + "?title=Special:Export&action=submit";
string postData = "curonly=True&pages=";
foreach (Page page in pages)
postData += HttpUtility.UrlEncode(page.title) + "\r\n";
string src = site.PostDataAndGetResult(res, postData);
XmlReader reader = XmlReader.Create(new StringReader(src));
PageList pl = new PageList(site);
while (reader.ReadToFollowing("page")) {
Page p = new Page(site);
p.ParsePageXml(reader.ReadOuterXml());
pl.Add(p);
}
reader.Close();
if (pages.Count > 0) {
Clear();
pages = pl.pages;
return;
}
else { // FALLBACK, use alternative parsing way, XPath
Console.WriteLine(
Bot.Msg("XML parsing failed, switching to alternative parser..."), pages.Count);
src = Bot.RemoveXMLRootAttributes(src);
StringReader strReader = new StringReader(src);
XPathDocument doc = new XPathDocument(strReader);
strReader.Close();
XPathNavigator nav = doc.CreateNavigator();
foreach (Page page in pages) {
if (page.title.Contains("'")) { // There's no good way to escape "'" in XPath
page.LoadWithMetadata();
continue;
}
string query = "//page[title='" + page.title + "']/";
try {
page.text =
nav.SelectSingleNode(query + "revision/text").InnerXml;
}
catch (System.NullReferenceException) {
continue;
}
page.text = HttpUtility.HtmlDecode(page.text);
page.pageId = nav.SelectSingleNode(query + "id").InnerXml;
try {
page.lastUser = nav.SelectSingleNode(query +
"revision/contributor/username").InnerXml;
page.lastUserId = nav.SelectSingleNode(query +
"revision/contributor/id").InnerXml;
}
catch (System.NullReferenceException) {
page.lastUser = nav.SelectSingleNode(query +
"revision/contributor/ip").InnerXml;
}
page.lastUser = HttpUtility.HtmlDecode(page.lastUser);
page.revision = nav.SelectSingleNode(query + "revision/id").InnerXml;
page.lastMinorEdit = (nav.SelectSingleNode(query +
"revision/minor") == null) ? false : true;
try {
page.comment = nav.SelectSingleNode(query + "revision/comment").InnerXml;
page.comment = HttpUtility.HtmlDecode(page.comment);
}
catch (System.NullReferenceException) {;}
page.timestamp =
nav.SelectSingleNode(query + "revision/timestamp").ValueAsDateTime;
}
if (string.IsNullOrEmpty(pages[0].text)) { // FALLBACK 2, load pages one-by-one
foreach (Page page in pages)
page.LoadWithMetadata();
}
}
}
示例2: FillAndLoadFromXmlDump
/// <summary>Gets page titles and page text from local XML dump.
/// This function consumes much resources.</summary>
/// <param name="filePathName">The path to and name of the XML dump file as string.</param>
public void FillAndLoadFromXmlDump(string filePathName)
{
Console.WriteLine(Bot.Msg("Loading pages from XML dump..."));
XmlReader reader = XmlReader.Create(filePathName);
while (reader.ReadToFollowing("page")) {
Page p = new Page(site);
p.ParsePageXml(reader.ReadOuterXml());
pages.Add(p);
}
reader.Close();
Console.WriteLine(Bot.Msg("XML dump loaded successfully."));
}