本文整理汇总了Java中org.htmlparser.NodeFilter类的典型用法代码示例。如果您正苦于以下问题:Java NodeFilter类的具体用法?Java NodeFilter怎么用?Java NodeFilter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
NodeFilter类属于org.htmlparser包,在下文中一共展示了NodeFilter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: parserUrl
import org.htmlparser.NodeFilter; //导入依赖的package包/类
@Override
public NodeList parserUrl(Parser parser) {
NodeFilter hrefNodeFilter = new NodeFilter() {
@Override
public boolean accept(Node node) {
if (node.getText().startsWith("a href=")) {
return true;
} else {
return false;
}
}
};
try {
return parser.extractAllNodesThatMatch(hrefNodeFilter);
} catch (ParserException e) {
e.printStackTrace();
}
return null;
}
示例2: parseMessage
import org.htmlparser.NodeFilter; //导入依赖的package包/类
/**
* parses the body of the message, and returns a parsed representation
* See {@link http://htmlparser.sourceforge.net/} for details
* @param url the url that the message resulted from
* @param message the Message to parse
* @return a NodeList containing the various Nodes making up the page
*/
public Object parseMessage(HttpUrl url, Message message) {
String contentType = message.getHeader("Content-Type");
if (contentType == null || !contentType.matches("text/html.*")) {
return null;
}
byte[] content = message.getContent();
if (content == null || content.length == 0) {
return null;
}
Parser parser = Parser.createParser(new String(content), null);
try {
NodeList nodelist = parser.extractAllNodesThatMatch(new NodeFilter() {
public boolean accept(Node node) {
return true;
}
});
return nodelist;
} catch (ParserException pe) {
_logger.severe(pe.toString());
return null;
}
}
示例3: getGangliaAttribute
import org.htmlparser.NodeFilter; //导入依赖的package包/类
public List<String> getGangliaAttribute(String clusterName)
throws ParserException, MalformedURLException, IOException {
String url = gangliaMetricUrl.replaceAll(clusterPattern, clusterName);
Parser parser = new Parser(new URL(url).openConnection());
NodeFilter nodeFilter = new AndFilter(new TagNameFilter("select"),
new HasAttributeFilter("id", "metrics-picker"));
NodeList nodeList = parser.extractAllNodesThatMatch(nodeFilter);
SimpleNodeIterator iterator = nodeList.elements();
List<String> metricList = new ArrayList<String>();
while (iterator.hasMoreNodes()) {
Node node = iterator.nextNode();
SimpleNodeIterator childIterator = node.getChildren().elements();
while (childIterator.hasMoreNodes()) {
OptionTag children = (OptionTag) childIterator.nextNode();
metricList.add(children.getOptionText());
}
}
return metricList;
}
示例4: main
import org.htmlparser.NodeFilter; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
Parser parser = new Parser(new URL("http://10.8.75.3/ganglia/?r=hour&cs=&ce=&s=by+name&c=Zookeeper_Cluster&tab=m&vn=&hide-hf=false").openConnection());
NodeFilter nodeFilter = new AndFilter(new TagNameFilter("select"),
new HasAttributeFilter("id", "metrics-picker"));
NodeList nodeList = parser.extractAllNodesThatMatch(nodeFilter);
SimpleNodeIterator iterator = nodeList.elements();
while (iterator.hasMoreNodes()) {
Node node = iterator.nextNode();
SimpleNodeIterator childIterator = node.getChildren().elements();
while (childIterator.hasMoreNodes()) {
OptionTag children = (OptionTag) childIterator.nextNode();
System.out.println(children.getOptionText());
}
}
}
示例5: parsePageInfo
import org.htmlparser.NodeFilter; //导入依赖的package包/类
/***
* 解析小区的页数
*
* @param url
* @return
* @throws IOException
* @throws ParserException
*/
private int parsePageInfo(final String url) throws IOException, ParserException {
Parser parser = new Parser(CommonHttpURLConnection.getURLConnection(url));
NodeFilter nodeFilter = new HasAttributeFilter("class", "pagenumber");
NodeList nodeList = parser.extractAllNodesThatMatch(nodeFilter);
for (Node node : nodeList.toNodeArray()) {
if (!(node instanceof Div)) {
continue;
}
for (Node innerNode : node.getChildren().elementAt(1).getChildren().toNodeArray()) {
if (!(innerNode instanceof TextNode)) {
continue;
}
String pageStr = innerNode.toPlainTextString();
if (!pageStr.contains("/")) {
continue;
}
pageStr = pageStr.substring(pageStr.indexOf("/") + 1);
try {
return Integer.parseInt(pageStr);
} catch (Exception e) {
}
}
}
return 0;
}
示例6: parseSpan
import org.htmlparser.NodeFilter; //导入依赖的package包/类
/***
* 解析价格
*
* @param node
* @return
*/
private String parseSpan(Node node) {
StringBuilder sb = new StringBuilder();
NodeList priceNodeList = new NodeList();
NodeFilter nodeFilter = new TagNameFilter("span");
node.collectInto(priceNodeList, nodeFilter);
for (Node spanNode : priceNodeList.toNodeArray()) {
if (spanNode instanceof Span) {
String attribute = ((Span) spanNode).getAttribute("class");
sb.append(MappingSet.NUMBER_MAPPING.get(attribute));
}
}
return CharMatcher.WHITESPACE.removeFrom(sb.toString());
}
示例7: run
import org.htmlparser.NodeFilter; //导入依赖的package包/类
/***
* 爬取透明网最近的预售证信息
* @param url
* @throws InterruptedException
* @throws IOException
* @throws ParserException
*/
public void run(String url) throws InterruptedException, IOException, ParserException {
URLConnection urlConnection = CommonHttpURLConnection.getURLConnection(url);
Parser parser = new Parser(urlConnection);
NodeFilter nodeFilter = new HasAttributeFilter("class", "sale1");
NodeList nodeList = parser.extractAllNodesThatMatch(nodeFilter);
if (nodeList.toNodeArray().length > 0) {
Node[] sellCreditNodeArray = nodeList.elementAt(0).getChildren().toNodeArray();
for (int i = 2; i < sellCreditNodeArray.length; i++) {
if (sellCreditNodeArray[i] instanceof TableRow) {
SellCreditInfo sellCreditInfo = parseSellParser(sellCreditNodeArray[i]);
log.info("get sell credit info:{}", sellCreditInfo);
//该预售证是否已经爬过
HouseInfo houseInfo = dataOP.getHouseInfoByDepartmentNameAndSellCredit(sellCreditInfo);
if(houseInfo != null){
log.info("already parsing sell credit:{}",sellCreditInfo);
break;
}
dataOP.insertSellCreditInfo(sellCreditInfo);
if(i==2) continue;
parseHouseInfo(sellCreditInfo);
}
}
}
}
示例8: parseSpan
import org.htmlparser.NodeFilter; //导入依赖的package包/类
private String parseSpan(Node node) {
StringBuilder sb = new StringBuilder();
NodeList priceNodeList = new NodeList();
NodeFilter nodeFilter = new TagNameFilter("span");
node.collectInto(priceNodeList, nodeFilter);
for (Node spanNode : priceNodeList.toNodeArray()) {
if (spanNode instanceof Span) {
String attribute = ((Span) spanNode).getAttribute("class");
sb.append(MappingSet.NUMBER_MAPPING.get(attribute));
}
}
return sb.toString();
}
示例9: parseDailyBriefInfo
import org.htmlparser.NodeFilter; //导入依赖的package包/类
public List<DailyBriefInfo> parseDailyBriefInfo() throws IOException, ParserException {
Parser parser = new Parser(CommonHttpURLConnection.getURLConnection("http://www.tmsf.com/index.jsp"));
NodeFilter nodeFilter = new HasAttributeFilter("id", "myCont5");
NodeList nodeList = parser.extractAllNodesThatMatch(nodeFilter);
if (nodeList.toNodeArray().length == 0) {
return Collections.EMPTY_LIST;
}
List<DailyBriefInfo> dailyBriefInfoList = new ArrayList<>();
//到1970/01/01 00:00:00的小时数
int parseHour = (int) (Clock.systemUTC().millis() / (1000 * 3600));
//到1970/01/01 00:00:00的天数
int parseDay = (int) parseHour / 24;
NodeList infoNodeList = nodeList.elementAt(0).getChildren().elementAt(1)
.getChildren().elementAt(1).getChildren();
for (int i = 5; i <= 13; i = i + 2) {
DailyBriefInfo dailyBriefInfo = new DailyBriefInfo(CharMatcher.WHITESPACE.trimFrom(infoNodeList.elementAt(i).getChildren().elementAt(1).toPlainTextString()),
Integer.parseInt(CharMatcher.WHITESPACE.trimFrom(infoNodeList.elementAt(i).getChildren().elementAt(3).toPlainTextString())),
Integer.parseInt(CharMatcher.WHITESPACE.trimFrom(infoNodeList.elementAt(i).getChildren().elementAt(5).toPlainTextString())),
Integer.parseInt(CharMatcher.WHITESPACE.trimFrom(infoNodeList.elementAt(i).getChildren().elementAt(7).toPlainTextString())),
parseDay,parseHour);
dailyBriefInfoList.add(dailyBriefInfo);
dataOP.insertBriefDealInfo(dailyBriefInfo);
ESOP.writeToES("log/daily_brief_info_es", JSONObject.toJSONString(dailyBriefInfo));
}
return dailyBriefInfoList;
}
示例10: parsePageInfo
import org.htmlparser.NodeFilter; //导入依赖的package包/类
/**
* 爬取当前楼幢的页数
*
* @return
* @throws InterruptedException
* @throws IOException
* @throws Exception
*/
public int parsePageInfo(String url, DepartmentInfo departmentInfo) throws ParserException, IOException {
Parser parser = new Parser(CommonHttpURLConnection.getURLConnection(url));
int page = 0;
//解析页数
NodeFilter nodeFilter = new HasAttributeFilter("class", "spagenext");
NodeList nodeList = parser.extractAllNodesThatMatch(nodeFilter);
if (nodeList.size() == 0) {
return page;
}
for (Node pageNode : nodeList.elementAt(0).getChildren().toNodeArray()) {
if (pageNode instanceof Span) {
try {
String tmp = pageNode.toPlainTextString();
page = Integer.parseInt(tmp.substring(tmp.indexOf("/") + 1, tmp.indexOf("总数") - 1).trim());
break;
} catch (Exception e) {
}
}
}
log.info("get total page [{}] for department:[{}]", page, departmentInfo.toString());
return page;
}
示例11: readTextAndLinkAndTitle
import org.htmlparser.NodeFilter; //导入依赖的package包/类
/**
* 分别读纯文本和链接.
* @param result 网页的内容
* @throws Exception
*/
public static void readTextAndLinkAndTitle(String result) throws Exception {
Parser parser;
NodeList nodelist;
parser = Parser.createParser(result, "utf8");
NodeFilter textFilter = new NodeClassFilter(TextNode.class);
NodeFilter linkFilter = new NodeClassFilter(LinkTag.class);
NodeFilter titleFilter = new NodeClassFilter(TitleTag.class);
OrFilter lastFilter = new OrFilter();
lastFilter.setPredicates(new NodeFilter[] { textFilter, linkFilter, titleFilter });
nodelist = parser.parse(lastFilter);
Node[] nodes = nodelist.toNodeArray();
String line = "";
for (int i = 0; i < nodes.length; i++) {
Node node = nodes[i];
if (node instanceof TextNode) {
TextNode textnode = (TextNode) node;
line = textnode.getText();
} else if (node instanceof LinkTag) {
LinkTag link = (LinkTag) node;
line = link.getLink();
} else if (node instanceof TitleTag) {
TitleTag titlenode = (TitleTag) node;
line = titlenode.getTitle();
}
if (isTrimEmpty(line))
continue;
System.out.println(line);
}
}
示例12: extractTextByTextNode
import org.htmlparser.NodeFilter; //导入依赖的package包/类
public static List<String> extractTextByTextNode(String content){
List<String> doc=new ArrayList<String>();//每个元素为一个段落
if (content == null) {
return doc;
}
try{
Parser parser = Parser.createParser(content, "utf8");
NodeFilter textFilter = new NodeClassFilter(TextNode.class);
NodeList nodelist=parser.extractAllNodesThatMatch(textFilter);
HashMap<String,Integer> parentWeight=new HashMap<String,Integer>();
for (int i = 0; i < nodelist.size(); i++) {
Node textnode = (Node) nodelist.elementAt(i);
if(textnode.toPlainTextString().trim().length()>0)
log.debug(i+": "+" content: "+textnode.toPlainTextString());
if(isInformativeStricter(textnode,parentWeight)){
log.debug(i+": "+" content: "+textnode.toPlainTextString());
doc.add(textnode.toPlainTextString());
}
}
}catch(Exception e){
e.printStackTrace();
log.error("Text extractor has encountered a problem!! "+e.getMessage());
}
return doc;
}
示例13: filterSelectNode
import org.htmlparser.NodeFilter; //导入依赖的package包/类
private NodeList filterSelectNode(String responseBody) throws ParserException {
Parser parser = Parser.createParser(responseBody, HTTP.ISO_8859_1);
return parser.extractAllNodesThatMatch(new NodeFilter() {
@Override
public boolean accept(Node node) {
if (node.getText().startsWith("select")) {
return true;
}
return false;
}
});
}
示例14: filterTable
import org.htmlparser.NodeFilter; //导入依赖的package包/类
private NodeList filterTable(String responseBody) throws ParserException {
Parser parser = Parser.createParser(responseBody, HTTP.ISO_8859_1);
return parser.extractAllNodesThatMatch(new NodeFilter() {
@Override
public boolean accept(Node node) {
if (node.getText().toUpperCase().startsWith("TABLE") || node.getText().toUpperCase().startsWith("H3")) {
return true;
}
return false;
}
});
}
示例15: extracLinks
import org.htmlparser.NodeFilter; //导入依赖的package包/类
/**
* 获取一个网站上的a链接
* @param url
* @return
*/
public static Set<String> extracLinks(String url) {
Set<String> links = new HashSet<String>();
try {
Parser parser = new Parser(url);
parser.setEncoding("utf-8");
// 过滤 <frame >标签的 filter,用来提取 frame 标签里的 src 属性所表示的链接
@SuppressWarnings("serial")
NodeFilter frameFilter = new NodeFilter() {
public boolean accept(Node node) {
if (node.getText().startsWith("frame src=")) {
return true;
} else {
return false;
}
}
};
// OrFilter 来设置过滤 <a> 标签,和 <frame> 标签
OrFilter linkFilter = new OrFilter(new NodeClassFilter(LinkTag.class), frameFilter);
// 得到所有经过过滤的标签
NodeList list = parser.extractAllNodesThatMatch(linkFilter);
for (int i = 0; i < list.size(); i++) {
Node tag = list.elementAt(i);
if (tag instanceof LinkTag) {
// <a> 标签
LinkTag link = (LinkTag) tag;
String linkUrl = link.getLink();
links.add(linkUrl);
} else {
// 提取 frame 里 src 属性的链接如 <frame src="test.html"/>
String frame = tag.getText();
int start = frame.indexOf("src=");
frame = frame.substring(start);
int end = frame.indexOf(" ");
if (end == -1) {
end = frame.indexOf(">");
}
String frameUrl = frame.substring(5, end - 1);
links.add(frameUrl);
}
}
} catch (ParserException e) {
logger.error("", e);
}
return links;
}