当前位置: 首页>>代码示例>>Java>>正文


Java HarFileSystem.getHarHash方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.HarFileSystem.getHarHash方法的典型用法代码示例。如果您正苦于以下问题:Java HarFileSystem.getHarHash方法的具体用法?Java HarFileSystem.getHarHash怎么用?Java HarFileSystem.getHarHash使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.HarFileSystem的用法示例。


在下文中一共展示了HarFileSystem.getHarHash方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: map

import org.apache.hadoop.fs.HarFileSystem; //导入方法依赖的package包/类
public void map(LongWritable key, Text value,
    OutputCollector<IntWritable, Text> out,
    Reporter reporter) throws IOException {
  String line  = value.toString();
  MapStat mstat = new MapStat(line);
  Path relPath = new Path(mstat.pathname);
  int hash = HarFileSystem.getHarHash(relPath);
  String towrite = null;
  Path srcPath = realPath(relPath, rootPath);
  long startPos = partStream.getPos();
  if (mstat.isDir) { 
    towrite = relPath.toString() + " " + "dir none " + 0 + " " + 0 + " ";
    StringBuffer sbuff = new StringBuffer();
    sbuff.append(towrite);
    for (String child: mstat.children) {
      sbuff.append(child + " ");
    }
    towrite = sbuff.toString();
    //reading directories is also progress
    reporter.progress();
  }
  else {
    FileSystem srcFs = srcPath.getFileSystem(conf);
    FileStatus srcStatus = srcFs.getFileStatus(srcPath);
    FSDataInputStream input = srcFs.open(srcStatus.getPath());
    reporter.setStatus("Copying file " + srcStatus.getPath() + 
        " to archive.");
    copyData(srcStatus.getPath(), input, partStream, reporter);
    towrite = relPath.toString() + " file " + partname + " " + startPos
    + " " + srcStatus.getLen() + " ";
  }
  out.collect(new IntWritable(hash), new Text(towrite));
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:34,代码来源:HadoopArchives.java

示例2: map

import org.apache.hadoop.fs.HarFileSystem; //导入方法依赖的package包/类
public void map(LongWritable key, HarEntry value,
    OutputCollector<IntWritable, Text> out,
    Reporter reporter) throws IOException {
  Path relPath = new Path(value.path);
  int hash = HarFileSystem.getHarHash(relPath);
  String towrite = null;
  Path srcPath = realPath(relPath, rootPath);
  long startPos = partStream.getPos();
  FileSystem srcFs = srcPath.getFileSystem(conf);
  FileStatus srcStatus = srcFs.getFileStatus(srcPath);
  String propStr = encodeProperties(srcStatus);
  if (value.isDir()) { 
    towrite = encodeName(relPath.toString())
              + " dir " + propStr + " 0 0 ";
    StringBuffer sbuff = new StringBuffer();
    sbuff.append(towrite);
    for (String child: value.children) {
      sbuff.append(encodeName(child) + " ");
    }
    towrite = sbuff.toString();
    //reading directories is also progress
    reporter.progress();
  }
  else {
    FSDataInputStream input = srcFs.open(srcStatus.getPath());
    reporter.setStatus("Copying file " + srcStatus.getPath() + 
        " to archive.");
    copyData(srcStatus.getPath(), input, partStream, reporter);
    towrite = encodeName(relPath.toString())
              + " file " + partname + " " + startPos
              + " " + srcStatus.getLen() + " " + propStr + " ";
  }
  out.collect(new IntWritable(hash), new Text(towrite));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:HadoopArchives.java

示例3: map

import org.apache.hadoop.fs.HarFileSystem; //导入方法依赖的package包/类
public void map(LongWritable key, Text value, OutputCollector<IntWritable, Text> out,
    Reporter reporter) throws IOException {
  reporter.setStatus("Passing file " + value + " to archive.");
  reporter.progress();

  HarStatus harStatus = new HarStatus(value.toString());
  int hash = HarFileSystem.getHarHash(harStatus.getName());
  out.collect(new IntWritable(hash), value);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:10,代码来源:HadoopArchives.java

示例4: map

import org.apache.hadoop.fs.HarFileSystem; //导入方法依赖的package包/类
public void map(LongWritable key, HarEntry value,
    OutputCollector<IntWritable, Text> out,
    Reporter reporter) throws IOException {
  Path relPath = new Path(value.path);
  int hash = HarFileSystem.getHarHash(relPath);
  String towrite = null;
  Path srcPath = realPath(relPath, rootPath);
  long startPos = partStream.getPos();
  FileSystem srcFs = srcPath.getFileSystem(conf);
  FileStatus srcStatus = srcFs.getFileStatus(srcPath);
  String propStr = URLEncoder.encode(
                      srcStatus.getModificationTime() + " "
                    + srcStatus.getAccessTime() + " "
                    + srcStatus.getPermission().toShort() + " "
                    + URLEncoder.encode(srcStatus.getOwner(), "UTF-8") + " "
                    + URLEncoder.encode(srcStatus.getGroup(), "UTF-8"),
                   "UTF-8");
  if (value.isDir()) { 
    towrite = URLEncoder.encode(relPath.toString(),"UTF-8")  
              + " dir " + propStr + " 0 0 ";
    StringBuffer sbuff = new StringBuffer();
    sbuff.append(towrite);
    for (String child: value.children) {
      sbuff.append(URLEncoder.encode(child,"UTF-8") + " ");
    }
    towrite = sbuff.toString();
    //reading directories is also progress
    reporter.progress();
  }
  else {
    FSDataInputStream input = srcFs.open(srcStatus.getPath());
    reporter.setStatus("Copying file " + srcStatus.getPath() + 
        " to archive.");
    copyData(srcStatus.getPath(), input, partStream, reporter);
    towrite = URLEncoder.encode(relPath.toString(),"UTF-8")
              + " file " + partname + " " + startPos
              + " " + srcStatus.getLen() + " " + propStr + " ";
  }
  out.collect(new IntWritable(hash), new Text(towrite));
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:41,代码来源:HadoopArchives.java

示例5: map

import org.apache.hadoop.fs.HarFileSystem; //导入方法依赖的package包/类
public void map(LongWritable key, Text value,
    OutputCollector<IntWritable, Text> out,
    Reporter reporter) throws IOException {
  String line  = value.toString();
  MapStat mstat = new MapStat(line);
  Path srcPath = new Path(mstat.pathname);
  String towrite = null;
  Path relPath = makeRelative(srcPath);
  int hash = HarFileSystem.getHarHash(relPath);
  long startPos = partStream.getPos();
  if (mstat.isDir) { 
    towrite = relPath.toString() + " " + "dir none " + 0 + " " + 0 + " ";
    StringBuffer sbuff = new StringBuffer();
    sbuff.append(towrite);
    for (String child: mstat.children) {
      sbuff.append(child + " ");
    }
    towrite = sbuff.toString();
    //reading directories is also progress
    reporter.progress();
  }
  else {
    FileSystem srcFs = srcPath.getFileSystem(conf);
    FileStatus srcStatus = srcFs.getFileStatus(srcPath);
    FSDataInputStream input = srcFs.open(srcStatus.getPath());
    reporter.setStatus("Copying file " + srcStatus.getPath() + 
        " to archive.");
    copyData(srcStatus.getPath(), input, partStream, reporter);
    towrite = relPath.toString() + " file " + partname + " " + startPos
    + " " + srcStatus.getLen() + " ";
  }
  out.collect(new IntWritable(hash), new Text(towrite));
}
 
开发者ID:thisisvoa,项目名称:hadoop-0.20,代码行数:34,代码来源:HadoopArchives.java

示例6: calculateHarHash

import org.apache.hadoop.fs.HarFileSystem; //导入方法依赖的package包/类
protected Integer calculateHarHash() {
    return HarFileSystem.getHarHash(new Path(path.replace("%2F", "/")));
}
 
开发者ID:trenner,项目名称:ahar,代码行数:4,代码来源:IndexEntry.java


注:本文中的org.apache.hadoop.fs.HarFileSystem.getHarHash方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。