当前位置: 首页>>代码示例>>Java>>正文


Java FileSystem.append方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.append方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.append方法的具体用法?Java FileSystem.append怎么用?Java FileSystem.append使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileSystem的用法示例。


在下文中一共展示了FileSystem.append方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: open

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
protected void open(Path dstPath, CompressionCodec codeC,
    CompressionType compType, Configuration conf, FileSystem hdfs)
        throws IOException {
  if (useRawLocalFileSystem) {
    if (hdfs instanceof LocalFileSystem) {
      hdfs = ((LocalFileSystem)hdfs).getRaw();
    } else {
      logger.warn("useRawLocalFileSystem is set to true but file system " +
          "is not of type LocalFileSystem: " + hdfs.getClass().getName());
    }
  }
  if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) {
    outStream = hdfs.append(dstPath);
  } else {
    outStream = hdfs.create(dstPath);
  }
  writer = SequenceFile.createWriter(conf, outStream,
      serializer.getKeyClass(), serializer.getValueClass(), compType, codeC);

  registerCurrentStream(outStream, hdfs, dstPath);
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:22,代码来源:HDFSSequenceFile.java

示例2: writeAndAppend

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void writeAndAppend(FileSystem fs, Path p,
    int lengthForCreate, int lengthForAppend) throws IOException {
  // Creating a file with 4096 blockSize to write multiple blocks
  FSDataOutputStream stream = fs.create(
      p, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
  try {
    AppendTestUtil.write(stream, 0, lengthForCreate);
    stream.close();
    
    stream = fs.append(p);
    AppendTestUtil.write(stream, lengthForCreate, lengthForAppend);
    stream.close();
  } finally {
    IOUtils.closeStream(stream);
  }
  
  int totalLength = lengthForCreate + lengthForAppend; 
  assertEquals(totalLength, fs.getFileStatus(p).getLen());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestFileAppendRestart.java

示例3: testFileNotFound

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * FileNotFoundException is expected for appending to a non-exisiting file
 * 
 * @throws FileNotFoundException as the result
 */
@Test(expected = FileNotFoundException.class)
public void testFileNotFound() throws IOException {
  Configuration conf = new HdfsConfiguration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = cluster.getFileSystem();
  try {
    Path file1 = new Path("/nonexistingfile.dat");
    fs.append(file1);
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestFileAppend.java

示例4: appendWithTwoFs

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void appendWithTwoFs(Path p, FileSystem fs1, FileSystem fs2)
    throws IOException {
  FSDataOutputStream stm = fs1.create(p);
  try {
    AppendTestUtil.write(stm, 0, SEGMENT_LENGTH);
  } finally {
    stm.close();
  }
  
  stm = fs2.append(p);
  try {
    AppendTestUtil.write(stm, SEGMENT_LENGTH, SEGMENT_LENGTH);
  } finally {
    stm.close();
  }    
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestAppendDifferentChecksum.java

示例5: append

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/** Try openning a file for append. */
private static FSDataOutputStream append(FileSystem fs, Path p) throws Exception {
  for(int i = 0; i < 10; i++) {
    try {
      return fs.append(p);
    } catch(RemoteException re) {
      if (re.getClassName().equals(RecoveryInProgressException.class.getName())) {
        AppendTestUtil.LOG.info("Will sleep and retry, i=" + i +", p="+p, re);
        Thread.sleep(1000);
      }
      else
        throw re;
    }
  }
  throw new IOException("Cannot append to " + p);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestReadWhileWriting.java

示例6: checkAppend

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Test whether the file system supports append and return the answer.
 * @param fs the target file system
 */
private boolean checkAppend(FileSystem fs) {
  boolean canAppend = true;

  try {
    fs.append(basePath);
  } catch (IOException ex) {
    if (ex.getMessage().equals("Not supported")) {
      canAppend = false;
    }
  }

  return canAppend;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:18,代码来源:RollingFileSystemSink.java

示例7: Writer

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
Writer(Configuration conf, Option... opts) throws IOException {
  BlockSizeOption blockSizeOption =
      Options.getOption(BlockSizeOption.class, opts);
  BufferSizeOption bufferSizeOption =
      Options.getOption(BufferSizeOption.class, opts);
  ReplicationOption replicationOption =
      Options.getOption(ReplicationOption.class, opts);

  FileOption fileOption = Options.getOption(FileOption.class, opts);
  AppendIfExistsOption appendIfExistsOption = Options.getOption(
      AppendIfExistsOption.class, opts);
  StreamOption streamOption = Options.getOption(StreamOption.class, opts);

  // check consistency of options
  if ((fileOption == null) == (streamOption == null)) {
    throw new IllegalArgumentException("file or stream must be specified");
  }
  if (fileOption == null && (blockSizeOption != null ||
                             bufferSizeOption != null ||
                             replicationOption != null)) {
    throw new IllegalArgumentException("file modifier options not " +
                                       "compatible with stream");
  }

  FSDataOutputStream out;
  boolean ownStream = fileOption != null;
  if (ownStream) {
    Path p = fileOption.getValue();
    FileSystem fs;
    fs = p.getFileSystem(conf);
    int bufferSize = bufferSizeOption == null ? getBufferSize(conf) :
                     bufferSizeOption.getValue();
    short replication = replicationOption == null ?
                        fs.getDefaultReplication(p) :
                        (short) replicationOption.getValue();
    long blockSize = blockSizeOption == null ? fs.getDefaultBlockSize(p) :
                     blockSizeOption.getValue();

    if (appendIfExistsOption != null && appendIfExistsOption.getValue()
        && fs.exists(p)) {
      // Read the file and verify header details
      try (WALFile.Reader reader =
               new WALFile.Reader(conf, WALFile.Reader.file(p), new Reader.OnlyHeaderOption())){
        if (reader.getVersion() != VERSION[3]) {
          throw new VersionMismatchException(VERSION[3], reader.getVersion());
        }
        sync = reader.getSync();
      }
      out = fs.append(p, bufferSize);
      this.appendMode = true;
    } else {
      out = fs.create(p, true, bufferSize, replication, blockSize);
    }
  } else {
    out = streamOption.getValue();
  }

  init(conf, out, ownStream);
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:60,代码来源:WALFile.java

示例8: doOpen

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
protected void doOpen(Configuration conf, Path dstPath, FileSystem hdfs) throws IOException {
  if (useRawLocalFileSystem) {
    if (hdfs instanceof LocalFileSystem) {
      hdfs = ((LocalFileSystem)hdfs).getRaw();
    } else {
      logger.warn("useRawLocalFileSystem is set to true but file system " +
          "is not of type LocalFileSystem: " + hdfs.getClass().getName());
    }
  }

  boolean appending = false;
  if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) {
    outStream = hdfs.append(dstPath);
    appending = true;
  } else {
    outStream = hdfs.create(dstPath);
  }

  serializer = EventSerializerFactory.getInstance(
      serializerType, serializerContext, outStream);
  if (appending && !serializer.supportsReopen()) {
    outStream.close();
    serializer = null;
    throw new IOException("serializer (" + serializerType +
        ") does not support append");
  }

  // must call superclass to check for replication issues
  registerCurrentStream(outStream, hdfs, dstPath);

  if (appending) {
    serializer.afterReopen();
  } else {
    serializer.afterCreate();
  }
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:37,代码来源:HDFSDataStream.java

示例9: doOpen

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
protected void doOpen(Configuration conf,
  Path dstPath, FileSystem hdfs) throws
  IOException {
  if(useRawLocalFileSystem) {
    if(hdfs instanceof LocalFileSystem) {
      hdfs = ((LocalFileSystem)hdfs).getRaw();
    } else {
      logger.warn("useRawLocalFileSystem is set to true but file system " +
          "is not of type LocalFileSystem: " + hdfs.getClass().getName());
    }
  }

  boolean appending = false;
  if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile
          (dstPath)) {
    outStream = hdfs.append(dstPath);
    appending = true;
  } else {
    outStream = hdfs.create(dstPath);
  }

  serializer = EventSerializerFactory.getInstance(
      serializerType, serializerContext, outStream);
  if (appending && !serializer.supportsReopen()) {
    outStream.close();
    serializer = null;
    throw new IOException("serializer (" + serializerType +
        ") does not support append");
  }

  // must call superclass to check for replication issues
  registerCurrentStream(outStream, hdfs, dstPath);

  if (appending) {
    serializer.afterReopen();
  } else {
    serializer.afterCreate();
  }
}
 
开发者ID:Transwarp-DE,项目名称:Transwarp-Sample-Code,代码行数:40,代码来源:HDFSDataStream.java

示例10: testAppendTwice

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/** Test two consecutive appends on a file with a full block. */
@Test
public void testAppendTwice() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  final FileSystem fs1 = cluster.getFileSystem();
  final FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(conf);
  try {

    final Path p = new Path("/testAppendTwice/foo");
    final int len = 1 << 16;
    final byte[] fileContents = AppendTestUtil.initBuffer(len);

    {
      // create a new file with a full block.
      FSDataOutputStream out = fs2.create(p, true, 4096, (short)1, len);
      out.write(fileContents, 0, len);
      out.close();
    }

    //1st append does not add any data so that the last block remains full
    //and the last block in INodeFileUnderConstruction is a BlockInfo
    //but not BlockInfoUnderConstruction. 
    fs2.append(p);
    
    //2nd append should get AlreadyBeingCreatedException
    fs1.append(p);
    Assert.fail();
  } catch(RemoteException re) {
    AppendTestUtil.LOG.info("Got an exception:", re);
    Assert.assertEquals(AlreadyBeingCreatedException.class.getName(),
        re.getClassName());
  } finally {
    fs2.close();
    fs1.close();
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:TestFileAppend.java

示例11: testAlgoSwitchRandomized

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Test which randomly alternates between appending with
 * CRC32 and with CRC32C, crossing several block boundaries.
 * Then, checks that all of the data can be read back correct.
 */
@Test(timeout=RANDOM_TEST_RUNTIME*2)
public void testAlgoSwitchRandomized() throws IOException {
  FileSystem fsWithCrc32 = createFsWithChecksum("CRC32", 512);
  FileSystem fsWithCrc32C = createFsWithChecksum("CRC32C", 512);

  Path p = new Path("/testAlgoSwitchRandomized");
  long seed = Time.now();
  System.out.println("seed: " + seed);
  Random r = new Random(seed);
  
  // Create empty to start
  IOUtils.closeStream(fsWithCrc32.create(p));
  
  long st = Time.now();
  int len = 0;
  while (Time.now() - st < RANDOM_TEST_RUNTIME) {
    int thisLen = r.nextInt(500);
    FileSystem fs = (r.nextBoolean() ? fsWithCrc32 : fsWithCrc32C);
    FSDataOutputStream stm = fs.append(p);
    try {
      AppendTestUtil.write(stm, len, thisLen);
    } finally {
      stm.close();
    }
    len += thisLen;
  }
  
  AppendTestUtil.check(fsWithCrc32, p, len);
  AppendTestUtil.check(fsWithCrc32C, p, len);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TestAppendDifferentChecksum.java

示例12: recoverFile

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void recoverFile(final FileSystem fs) throws Exception {
  LOG.info("Recovering File Lease");

  // set the soft limit to be 1 second so that the
  // namenode triggers lease recovery upon append request
  cluster.setLeasePeriod(1000, HdfsConstants.LEASE_HARDLIMIT_PERIOD);

  // Trying recovery
  int tries = 60;
  boolean recovered = false;
  FSDataOutputStream out = null;
  while (!recovered && tries-- > 0) {
    try {
      out = fs.append(file1);
      LOG.info("Successfully opened for append");
      recovered = true;
    } catch (IOException e) {
      LOG.info("Failed open for append, waiting on lease recovery");
      try {
        Thread.sleep(1000);
      } catch (InterruptedException ex) {
        // ignore it and try again
      }
    }
  }
  if (out != null) {
    out.close();
  }
  if (!recovered) {
    fail("Recovery should take < 1 min");
  }
  LOG.info("Past out lease recovery");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:TestFileAppend4.java

示例13: testEarlierVersionEditLog

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Earlier versions of HDFS didn't persist block allocation to the edit log.
 * This makes sure that we can still load an edit log when the OP_CLOSE
 * is the opcode which adds all of the blocks. This is a regression
 * test for HDFS-2773.
 * This test uses a tarred pseudo-distributed cluster from Hadoop 1.0
 * which has a multi-block file. This is similar to the tests in
 * {@link TestDFSUpgradeFromImage} but none of those images include
 * a multi-block file.
 */
@Test
public void testEarlierVersionEditLog() throws Exception {
  final Configuration conf = new HdfsConfiguration();
      
  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_1_0_MULTIBLOCK_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-1.0");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));

  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);
  File dataDir = new File(dfsDir, "data");
  GenericTestUtils.assertExists(dataDir);
  
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDir.getAbsolutePath());
  
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
    .format(false)
    .manageDataDfsDirs(false)
    .manageNameDfsDirs(false)
    .numDataNodes(1)
    .startupOption(StartupOption.UPGRADE)
    .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/user/todd/4blocks");
    // Read it without caring about the actual data within - we just need
    // to make sure that the block states and locations are OK.
    DFSTestUtil.readFile(fs, testPath);
    
    // Ensure that we can append to it - if the blocks were in some funny
    // state we'd get some kind of issue here. 
    FSDataOutputStream stm = fs.append(testPath);
    try {
      stm.write(1);
    } finally {
      IOUtils.closeStream(stm);
    }
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:58,代码来源:TestPersistBlocks.java

示例14: open

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Override
public void open(String filePath, CompressionCodec codec,
    CompressionType cType) throws IOException {
  Configuration conf = new Configuration();
  Path dstPath = new Path(filePath);
  FileSystem hdfs = dstPath.getFileSystem(conf);
  if (useRawLocalFileSystem) {
    if (hdfs instanceof LocalFileSystem) {
      hdfs = ((LocalFileSystem)hdfs).getRaw();
    } else {
      logger.warn("useRawLocalFileSystem is set to true but file system " +
          "is not of type LocalFileSystem: " + hdfs.getClass().getName());
    }
  }
  boolean appending = false;
  if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) {
    fsOut = hdfs.append(dstPath);
    appending = true;
  } else {
    fsOut = hdfs.create(dstPath);
  }
  if (compressor == null) {
    compressor = CodecPool.getCompressor(codec, conf);
  }
  cmpOut = codec.createOutputStream(fsOut, compressor);
  serializer = EventSerializerFactory.getInstance(serializerType,
      serializerContext, cmpOut);
  if (appending && !serializer.supportsReopen()) {
    cmpOut.close();
    serializer = null;
    throw new IOException("serializer (" + serializerType
        + ") does not support append");
  }

  registerCurrentStream(fsOut, hdfs, dstPath);

  if (appending) {
    serializer.afterReopen();
  } else {
    serializer.afterCreate();
  }
  isFinished = false;
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:44,代码来源:HDFSCompressedDataStream.java

示例15: testDecommissionWithOpenfile

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test(timeout=120000)
public void testDecommissionWithOpenfile() throws IOException, InterruptedException {
  LOG.info("Starting test testDecommissionWithOpenfile");
  
  //At most 4 nodes will be decommissioned
  startCluster(1, 7, conf);
      
  FileSystem fileSys = cluster.getFileSystem(0);
  FSNamesystem ns = cluster.getNamesystem(0);
  
  String openFile = "/testDecommissionWithOpenfile.dat";
         
  writeFile(fileSys, new Path(openFile), (short)3);   
  // make sure the file was open for write
  FSDataOutputStream fdos =  fileSys.append(new Path(openFile)); 
  
  LocatedBlocks lbs = NameNodeAdapter.getBlockLocations(cluster.getNameNode(0), openFile, 0, fileSize);
            
  DatanodeInfo[] dnInfos4LastBlock = lbs.getLastLocatedBlock().getLocations();
  DatanodeInfo[] dnInfos4FirstBlock = lbs.get(0).getLocations();
  
  ArrayList<String> nodes = new ArrayList<String>();
  ArrayList<DatanodeInfo> dnInfos = new ArrayList<DatanodeInfo>();

  DatanodeManager dm = ns.getBlockManager().getDatanodeManager();
  for (DatanodeInfo datanodeInfo : dnInfos4FirstBlock) {
    DatanodeInfo found = datanodeInfo;
    for (DatanodeInfo dif: dnInfos4LastBlock) {
      if (datanodeInfo.equals(dif)) {
       found = null;
      }
    }
    if (found != null) {
      nodes.add(found.getXferAddr());
      dnInfos.add(dm.getDatanode(found));
    }
  }
  //decommission one of the 3 nodes which have last block
  nodes.add(dnInfos4LastBlock[0].getXferAddr());
  dnInfos.add(dm.getDatanode(dnInfos4LastBlock[0]));
  
  writeConfigFile(excludeFile, nodes);
  refreshNodes(ns, conf);  
  for (DatanodeInfo dn : dnInfos) {
    waitNodeState(dn, AdminStates.DECOMMISSIONED);
  }           

  fdos.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:50,代码来源:TestDecommission.java


注:本文中的org.apache.hadoop.fs.FileSystem.append方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。