本文整理汇总了C#中TidyNet.Lexer.UngetToken方法的典型用法代码示例。如果您正苦于以下问题:C# Lexer.UngetToken方法的具体用法?C# Lexer.UngetToken怎么用?C# Lexer.UngetToken使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类TidyNet.Lexer
的用法示例。
在下文中一共展示了Lexer.UngetToken方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Parse
public virtual void Parse(Lexer lexer, Node head, short mode)
{
Node node;
int HasTitle = 0;
int HasBase = 0;
TagTable tt = lexer.Options.tt;
while (true)
{
node = lexer.GetToken(Lexer.IgnoreWhitespace);
if (node == null)
{
break;
}
if (node.Tag == head.Tag && node.Type == Node.EndTag)
{
head.Closed = true;
break;
}
if (node.Type == Node.TextNode)
{
lexer.UngetToken();
break;
}
/* deal with comments etc. */
if (Node.InsertMisc(head, node))
{
continue;
}
if (node.Type == Node.DocTypeTag)
{
Node.InsertDocType(lexer, head, node);
continue;
}
/* discard unknown tags */
if (node.Tag == null)
{
Report.Warning(lexer, head, node, Report.DISCARDING_UNEXPECTED);
continue;
}
if (!((node.Tag.Model & ContentModel.Head) != 0))
{
lexer.UngetToken();
break;
}
if (node.Type == Node.StartTag || node.Type == Node.StartEndTag)
{
if (node.Tag == tt.TagTitle)
{
++HasTitle;
if (HasTitle > 1)
{
Report.Warning(lexer, head, node, Report.TOO_MANY_ELEMENTS);
}
}
else if (node.Tag == tt.TagBase)
{
++HasBase;
if (HasBase > 1)
{
Report.Warning(lexer, head, node, Report.TOO_MANY_ELEMENTS);
}
}
else if (node.Tag == tt.TagNoscript)
{
Report.Warning(lexer, head, node, Report.TAG_NOT_ALLOWED_IN);
}
Node.InsertNodeAtEnd(head, node);
TidyNet.ParserImpl.parseTag(lexer, node, Lexer.IgnoreWhitespace);
continue;
}
/* discard unexpected text nodes and end tags */
Report.Warning(lexer, head, node, Report.DISCARDING_UNEXPECTED);
}
if (HasTitle == 0)
{
Report.Warning(lexer, head, null, Report.MISSING_TITLE_ELEMENT);
Node.InsertNodeAtEnd(head, lexer.InferredTag("title"));
}
}
示例2: parseDocument
/*
HTML is the top level element
*/
public static Node parseDocument(Lexer lexer)
{
Node node, document, html;
Node doctype = null;
TagTable tt = lexer.Options.tt;
document = lexer.NewNode();
document.Type = Node.RootNode;
while (true)
{
node = lexer.GetToken(Lexer.IgnoreWhitespace);
if (node == null)
{
break;
}
/* deal with comments etc. */
if (Node.InsertMisc(document, node))
{
continue;
}
if (node.Type == Node.DocTypeTag)
{
if (doctype == null)
{
Node.InsertNodeAtEnd(document, node);
doctype = node;
}
else
{
Report.Warning(lexer, document, node, Report.DISCARDING_UNEXPECTED);
}
continue;
}
if (node.Type == Node.EndTag)
{
Report.Warning(lexer, document, node, Report.DISCARDING_UNEXPECTED); //TODO?
continue;
}
if (node.Type != Node.StartTag || node.Tag != tt.TagHtml)
{
lexer.UngetToken();
html = lexer.InferredTag("html");
}
else
{
html = node;
}
Node.InsertNodeAtEnd(document, html);
ParseHTML.Parse(lexer, html, (short) 0); // TODO?
break;
}
return document;
}