本文整理匯總了Java中org.apache.hadoop.util.StringUtils.unEscapeString方法的典型用法代碼示例。如果您正苦於以下問題:Java StringUtils.unEscapeString方法的具體用法?Java StringUtils.unEscapeString怎麽用?Java StringUtils.unEscapeString使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.util.StringUtils
的用法示例。
在下文中一共展示了StringUtils.unEscapeString方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: getInputPaths
import org.apache.hadoop.util.StringUtils; //導入方法依賴的package包/類
public static Path[] getInputPaths(JobContext context) {
String dirs = context.getConfiguration().get(INPUT_DIR, "");
// LOG.info(System.getProperty("user.dir"));
LOG.info("dirs=" + dirs);
String[] list = StringUtils.split(dirs);
Path[] result = new Path[list.length];
for (int i = 0; i < list.length; i++) {
result[i] = new Path(StringUtils.unEscapeString(list[i]));
}
return result;
}
示例2: getInputPaths
import org.apache.hadoop.util.StringUtils; //導入方法依賴的package包/類
/**
* Get the list of input {@link Path}s for the map-reduce job.
*
* @param conf The configuration of the job
* @return the list of input {@link Path}s for the map-reduce job.
*/
public static Path[] getInputPaths(JobConf conf) {
String dirs = conf.get(org.apache.hadoop.mapreduce.lib.input.
FileInputFormat.INPUT_DIR, "");
String [] list = StringUtils.split(dirs);
Path[] result = new Path[list.length];
for (int i = 0; i < list.length; i++) {
result[i] = new Path(StringUtils.unEscapeString(list[i]));
}
return result;
}
示例3: getInputPaths
import org.apache.hadoop.util.StringUtils; //導入方法依賴的package包/類
/**
* Get the list of input {@link Path}s for the map-reduce job.
*
* @param context The job
* @return the list of input {@link Path}s for the map-reduce job.
*/
public static Path[] getInputPaths(JobContext context) {
String dirs = context.getConfiguration().get(INPUT_DIR, "");
String [] list = StringUtils.split(dirs);
Path[] result = new Path[list.length];
for (int i = 0; i < list.length; i++) {
result[i] = new Path(StringUtils.unEscapeString(list[i]));
}
return result;
}
示例4: listStatus0
import org.apache.hadoop.util.StringUtils; //導入方法依賴的package包/類
protected List<FileStatus> listStatus0(Configuration conf) throws IOException {
List<FileStatus> result = new ArrayList<FileStatus>();
Path[] dirs = new Path[1];
dirs[0] = new Path(StringUtils.unEscapeString(conf.get(INPUT_DIR, "")));
if (dirs.length == 0) {
throw new IOException("No input paths specified in job");
}
// get tokens for all the required FileSystems..
// TokenCache.obtainTokensForNamenodes(job.getCredentials(), dirs,
// job.getConfiguration());
// Whether we need to recursive look into the directory structure
boolean recursive = conf.getBoolean(INPUT_DIR_RECURSIVE, false);
List<IOException> errors = new ArrayList<IOException>();
// creates a MultiPathFilter with the hiddenFileFilter and the
// user provided one (if any).
List<PathFilter> filters = new ArrayList<PathFilter>();
PathFilter inputFilter = new MultiPathFilter(filters);
for (int i=0; i < dirs.length; ++i) {
Path p = dirs[i];
FileSystem fs = p.getFileSystem(conf);
FileStatus[] matches = fs.globStatus(p, inputFilter);
if (matches == null) {
errors.add(new IOException("Input path does not exist: " + p));
} else if (matches.length == 0) {
errors.add(new IOException("Input Pattern " + p + " matches 0 files"));
} else {
for (FileStatus globStat: matches) {
if (globStat.isDirectory()) {
RemoteIterator<LocatedFileStatus> iter = fs.listLocatedStatus(globStat.getPath());
while (iter.hasNext()) {
LocatedFileStatus stat = iter.next();
if (inputFilter.accept(stat.getPath())) {
if (recursive && stat.isDirectory()) {
addInputPathRecursively(result, fs, stat.getPath(),
inputFilter);
} else {
result.add(stat);
}
}
}
} else {
result.add(globStat);
}
}
}
}
if (!errors.isEmpty()) {
throw new InvalidInputException(errors);
}
return result;
}
示例5: unescape
import org.apache.hadoop.util.StringUtils; //導入方法依賴的package包/類
private static String unescape(String string) {
return StringUtils.unEscapeString(string, StringUtils.ESCAPE_CHAR,
charsToEscape);
}