当前位置: 首页>>代码示例>>Java>>正文


Java HarFileSystem类代码示例

本文整理汇总了Java中org.apache.hadoop.fs.HarFileSystem的典型用法代码示例。如果您正苦于以下问题:Java HarFileSystem类的具体用法?Java HarFileSystem怎么用?Java HarFileSystem使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


HarFileSystem类属于org.apache.hadoop.fs包,在下文中一共展示了HarFileSystem类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: configure

import org.apache.hadoop.fs.HarFileSystem; //导入依赖的package包/类
/**
 * Configure the reducer: open the _index and _masterindex files for writing
 */
public void configure(JobConf conf) {
  this.conf = conf;
  tmpOutputDir = FileOutputFormat.getWorkOutputPath(this.conf);
  masterIndex = new Path(tmpOutputDir, HarFileSystem.MASTER_INDEX_NAME);
  index = new Path(tmpOutputDir, HarFileSystem.INDEX_NAME);
  try {
    fs = masterIndex.getFileSystem(conf);
    if (fs.exists(masterIndex)) {
      fs.delete(masterIndex, false);
    }
    if (fs.exists(index)) {
      fs.delete(index, false);
    }
    indexStream = fs.create(index);
    outStream = fs.create(masterIndex);
    String version = VERSION + " \n";
    outStream.write(version.getBytes());
    
  } catch(IOException e) {
    throw new RuntimeException(e);
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:26,代码来源:HadoopArchives.java

示例2: configure

import org.apache.hadoop.fs.HarFileSystem; //导入依赖的package包/类
public void configure(JobConf conf) {
  this.conf = conf;
  tmpOutputDir = FileOutputFormat.getWorkOutputPath(this.conf);
  masterIndex = new Path(tmpOutputDir, "_masterindex");
  index = new Path(tmpOutputDir, "_index");
  try {
    fs = masterIndex.getFileSystem(conf);
    if (fs.exists(masterIndex)) {
      fs.delete(masterIndex, false);
    }
    if (fs.exists(index)) {
      fs.delete(index, false);
    }
    indexStream = fs.create(index);
    outStream = fs.create(masterIndex);
    String version = HarFileSystem.VERSION + " \n";
    outStream.write(version.getBytes());
    
  } catch(IOException e) {
    throw new RuntimeException(e);
  }
}
 
开发者ID:thisisvoa,项目名称:hadoop-0.20,代码行数:23,代码来源:HadoopArchives.java

示例3: createEmptyHarArchive

import org.apache.hadoop.fs.HarFileSystem; //导入依赖的package包/类
/**
 * Create an empty Har archive in the FileSystem fs at the Path p.
 * 
 * @param fs the file system to create the Har archive in
 * @param p the path to create the Har archive at
 * @throws IOException in the event of error
 */
private static void createEmptyHarArchive(FileSystem fs, Path p)
    throws IOException {
  fs.mkdirs(p);
  OutputStream out = fs.create(new Path(p, "_masterindex"));
  out.write(Integer.toString(HarFileSystem.VERSION).getBytes());
  out.close();
  fs.create(new Path(p, "_index")).close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:TestHarFileSystemWithHA.java

示例4: map

import org.apache.hadoop.fs.HarFileSystem; //导入依赖的package包/类
public void map(LongWritable key, HarEntry value,
    OutputCollector<IntWritable, Text> out,
    Reporter reporter) throws IOException {
  Path relPath = new Path(value.path);
  int hash = HarFileSystem.getHarHash(relPath);
  String towrite = null;
  Path srcPath = realPath(relPath, rootPath);
  long startPos = partStream.getPos();
  FileSystem srcFs = srcPath.getFileSystem(conf);
  FileStatus srcStatus = srcFs.getFileStatus(srcPath);
  String propStr = encodeProperties(srcStatus);
  if (value.isDir()) { 
    towrite = encodeName(relPath.toString())
              + " dir " + propStr + " 0 0 ";
    StringBuffer sbuff = new StringBuffer();
    sbuff.append(towrite);
    for (String child: value.children) {
      sbuff.append(encodeName(child) + " ");
    }
    towrite = sbuff.toString();
    //reading directories is also progress
    reporter.progress();
  }
  else {
    FSDataInputStream input = srcFs.open(srcStatus.getPath());
    reporter.setStatus("Copying file " + srcStatus.getPath() + 
        " to archive.");
    copyData(srcStatus.getPath(), input, partStream, reporter);
    towrite = encodeName(relPath.toString())
              + " file " + partname + " " + startPos
              + " " + srcStatus.getLen() + " " + propStr + " ";
  }
  out.collect(new IntWritable(hash), new Text(towrite));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:HadoopArchives.java

示例5: testCopyToLocal

import org.apache.hadoop.fs.HarFileSystem; //导入依赖的package包/类
@Test
/*
 * Tests copying from archive file system to a local file system
 */
public void testCopyToLocal() throws Exception {
  final String fullHarPathStr = makeArchive();

  // make path to copy the file to:
  final String tmpDir
    = System.getProperty("test.build.data","build/test/data") + "/work-dir/har-fs-tmp";
  final Path tmpPath = new Path(tmpDir);
  final LocalFileSystem localFs = FileSystem.getLocal(new Configuration());
  localFs.delete(tmpPath, true);
  localFs.mkdirs(tmpPath);
  assertTrue(localFs.exists(tmpPath));
  
  // Create fresh HarFs:
  final HarFileSystem harFileSystem = new HarFileSystem(fs);
  try {
    final URI harUri = new URI(fullHarPathStr);
    harFileSystem.initialize(harUri, fs.getConf());
    
    final Path sourcePath = new Path(fullHarPathStr + Path.SEPARATOR + "a");
    final Path targetPath = new Path(tmpPath, "straus");
    // copy the Har file to a local file system:
    harFileSystem.copyToLocalFile(false, sourcePath, targetPath);
    FileStatus straus = localFs.getFileStatus(targetPath);
    // the file should contain just 1 character:
    assertEquals(1, straus.getLen());
  } finally {
    harFileSystem.close();
    localFs.delete(tmpPath, true);      
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:TestHadoopArchives.java

示例6: map

import org.apache.hadoop.fs.HarFileSystem; //导入依赖的package包/类
public void map(LongWritable key, Text value,
    OutputCollector<IntWritable, Text> out,
    Reporter reporter) throws IOException {
  String line  = value.toString();
  MapStat mstat = new MapStat(line);
  Path relPath = new Path(mstat.pathname);
  int hash = HarFileSystem.getHarHash(relPath);
  String towrite = null;
  Path srcPath = realPath(relPath, rootPath);
  long startPos = partStream.getPos();
  if (mstat.isDir) { 
    towrite = relPath.toString() + " " + "dir none " + 0 + " " + 0 + " ";
    StringBuffer sbuff = new StringBuffer();
    sbuff.append(towrite);
    for (String child: mstat.children) {
      sbuff.append(child + " ");
    }
    towrite = sbuff.toString();
    //reading directories is also progress
    reporter.progress();
  }
  else {
    FileSystem srcFs = srcPath.getFileSystem(conf);
    FileStatus srcStatus = srcFs.getFileStatus(srcPath);
    FSDataInputStream input = srcFs.open(srcStatus.getPath());
    reporter.setStatus("Copying file " + srcStatus.getPath() + 
        " to archive.");
    copyData(srcStatus.getPath(), input, partStream, reporter);
    towrite = relPath.toString() + " file " + partname + " " + startPos
    + " " + srcStatus.getLen() + " ";
  }
  out.collect(new IntWritable(hash), new Text(towrite));
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:34,代码来源:HadoopArchives.java

示例7: map

import org.apache.hadoop.fs.HarFileSystem; //导入依赖的package包/类
public void map(LongWritable key, Text value, OutputCollector<IntWritable, Text> out,
    Reporter reporter) throws IOException {
  reporter.setStatus("Passing file " + value + " to archive.");
  reporter.progress();

  HarStatus harStatus = new HarStatus(value.toString());
  int hash = HarFileSystem.getHarHash(harStatus.getName());
  out.collect(new IntWritable(hash), value);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:10,代码来源:HadoopArchives.java

示例8: getParityBlocks

import org.apache.hadoop.fs.HarFileSystem; //导入依赖的package包/类
/**
 * gets the parity blocks corresponding to file
 * returns the parity blocks in case of DFS
 * and the part blocks containing parity blocks
 * in case of HAR FS
 */
private static BlockLocation[] getParityBlocks(final Path filePath,
                                        final long blockSize,
                                        final long numStripes,
                                        final RaidInfo raidInfo) 
  throws IOException {
  FileSystem parityFS = raidInfo.parityPair.getFileSystem();
  
  // get parity file metadata
  FileStatus parityFileStatus = raidInfo.parityPair.getFileStatus(); 
  long parityFileLength = parityFileStatus.getLen();

  if (parityFileLength != numStripes * raidInfo.parityBlocksPerStripe *
      blockSize) {
    throw new IOException("expected parity file of length" + 
                          (numStripes * raidInfo.parityBlocksPerStripe *
                           blockSize) +
                          " but got parity file of length " + 
                          parityFileLength);
  }

  BlockLocation[] parityBlocks = 
    parityFS.getFileBlockLocations(parityFileStatus, 0L, parityFileLength);
  
  if (parityFS instanceof DistributedFileSystem ||
      parityFS instanceof DistributedRaidFileSystem) {
    long parityBlockSize = parityFileStatus.getBlockSize();
    if (parityBlockSize != blockSize) {
      throw new IOException("file block size is " + blockSize + 
                            " but parity file block size is " + 
                            parityBlockSize);
    }
  } else if (parityFS instanceof HarFileSystem) {
    LOG.debug("HAR FS found");
  } else {
    LOG.warn("parity file system is not of a supported type");
  }
  
  return parityBlocks;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:46,代码来源:RaidUtils.java

示例9: map

import org.apache.hadoop.fs.HarFileSystem; //导入依赖的package包/类
public void map(LongWritable key, HarEntry value,
    OutputCollector<IntWritable, Text> out,
    Reporter reporter) throws IOException {
  Path relPath = new Path(value.path);
  int hash = HarFileSystem.getHarHash(relPath);
  String towrite = null;
  Path srcPath = realPath(relPath, rootPath);
  long startPos = partStream.getPos();
  FileSystem srcFs = srcPath.getFileSystem(conf);
  FileStatus srcStatus = srcFs.getFileStatus(srcPath);
  String propStr = URLEncoder.encode(
                      srcStatus.getModificationTime() + " "
                    + srcStatus.getAccessTime() + " "
                    + srcStatus.getPermission().toShort() + " "
                    + URLEncoder.encode(srcStatus.getOwner(), "UTF-8") + " "
                    + URLEncoder.encode(srcStatus.getGroup(), "UTF-8"),
                   "UTF-8");
  if (value.isDir()) { 
    towrite = URLEncoder.encode(relPath.toString(),"UTF-8")  
              + " dir " + propStr + " 0 0 ";
    StringBuffer sbuff = new StringBuffer();
    sbuff.append(towrite);
    for (String child: value.children) {
      sbuff.append(URLEncoder.encode(child,"UTF-8") + " ");
    }
    towrite = sbuff.toString();
    //reading directories is also progress
    reporter.progress();
  }
  else {
    FSDataInputStream input = srcFs.open(srcStatus.getPath());
    reporter.setStatus("Copying file " + srcStatus.getPath() + 
        " to archive.");
    copyData(srcStatus.getPath(), input, partStream, reporter);
    towrite = URLEncoder.encode(relPath.toString(),"UTF-8")
              + " file " + partname + " " + startPos
              + " " + srcStatus.getLen() + " " + propStr + " ";
  }
  out.collect(new IntWritable(hash), new Text(towrite));
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:41,代码来源:HadoopArchives.java

示例10: map

import org.apache.hadoop.fs.HarFileSystem; //导入依赖的package包/类
public void map(LongWritable key, Text value,
    OutputCollector<IntWritable, Text> out,
    Reporter reporter) throws IOException {
  String line  = value.toString();
  MapStat mstat = new MapStat(line);
  Path srcPath = new Path(mstat.pathname);
  String towrite = null;
  Path relPath = makeRelative(srcPath);
  int hash = HarFileSystem.getHarHash(relPath);
  long startPos = partStream.getPos();
  if (mstat.isDir) { 
    towrite = relPath.toString() + " " + "dir none " + 0 + " " + 0 + " ";
    StringBuffer sbuff = new StringBuffer();
    sbuff.append(towrite);
    for (String child: mstat.children) {
      sbuff.append(child + " ");
    }
    towrite = sbuff.toString();
    //reading directories is also progress
    reporter.progress();
  }
  else {
    FileSystem srcFs = srcPath.getFileSystem(conf);
    FileStatus srcStatus = srcFs.getFileStatus(srcPath);
    FSDataInputStream input = srcFs.open(srcStatus.getPath());
    reporter.setStatus("Copying file " + srcStatus.getPath() + 
        " to archive.");
    copyData(srcStatus.getPath(), input, partStream, reporter);
    towrite = relPath.toString() + " file " + partname + " " + startPos
    + " " + srcStatus.getLen() + " ";
  }
  out.collect(new IntWritable(hash), new Text(towrite));
}
 
开发者ID:thisisvoa,项目名称:hadoop-0.20,代码行数:34,代码来源:HadoopArchives.java


注:本文中的org.apache.hadoop.fs.HarFileSystem类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。