当前位置: 首页>>代码示例>>Java>>正文


Java FSDataOutputStream类代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FSDataOutputStream的典型用法代码示例。如果您正苦于以下问题:Java FSDataOutputStream类的具体用法?Java FSDataOutputStream怎么用?Java FSDataOutputStream使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


FSDataOutputStream类属于org.apache.hadoop.fs包,在下文中一共展示了FSDataOutputStream类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testUnfinishedBlockRead

import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
/**
 * Test that that writes to an incomplete block are available to a reader
 */
@Test (timeout = 30000)
public void testUnfinishedBlockRead()
  throws IOException {
  // create a new file in the root, write data, do no close
  Path file1 = new Path("/unfinished-block");
  FSDataOutputStream stm = TestFileCreation.createFile(fileSystem, file1, 1);

  // write partial block and sync
  int partialBlockSize = blockSize / 2;
  writeFileAndSync(stm, partialBlockSize);

  // Make sure a client can read it before it is closed
  checkCanRead(fileSystem, file1, partialBlockSize);

  stm.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestFileConcurrentReader.java

示例2: createFile

import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
private static byte[] createFile(FileSystem fs, Path name, long length, 
    short replication, long blocksize) throws IOException {
  final FSDataOutputStream out = fs.create(name, false, 4096,
      replication, blocksize);
  try {
    for(long n = length; n > 0; ) {
      ran.nextBytes(buffer);
      final int w = n < buffer.length? (int)n: buffer.length;
      out.write(buffer, 0, w);
      md5.update(buffer, 0, w);
      n -= w;
    }
  } finally {
    IOUtils.closeStream(out);
  }
  return md5.digest();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestFiHftp.java

示例3: save

import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
/**
 * Save a matrix partition to file.
 *
 * @param output the output
 * @param partitionMeta the meta
 * @throws IOException the io exception
 */
public void save(DataOutputStream output , ModelPartitionMeta partitionMeta) throws IOException {
  FSDataOutputStream dataOutputStream = new FSDataOutputStream(output, null,
      partitionMeta != null ? partitionMeta.getOffset() : 0);
  dataOutputStream.writeInt(rows.size());
  long offset;
  for (Map.Entry<Integer, ServerRow> entry : rows.entrySet()) {
    offset = dataOutputStream.getPos();
    dataOutputStream.writeInt(entry.getKey());
    ServerRow row = entry.getValue();
    row.writeTo(dataOutputStream);
    if (partitionMeta != null) {
      partitionMeta.setRowMeta(new RowOffset(entry.getKey(), offset));
    }
  }
}
 
开发者ID:Tencent,项目名称:angel,代码行数:23,代码来源:ServerPartition.java

示例4: copyPartitions

import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
private void copyPartitions(Path mapOutputPath, Path indexPath)
  throws IOException {
  FileSystem localFs = FileSystem.getLocal(jobConf);
  FileSystem rfs = ((LocalFileSystem)localFs).getRaw();
  FSDataOutputStream rawOutput = rfs.create(mapOutputPath, true, BUF_SIZE);
  SpillRecord spillRecord = new SpillRecord(numberOfPartitions);
  IndexRecord indexRecord = new IndexRecord();
  for (int i = 0; i < numberOfPartitions; i++) {
    indexRecord.startOffset = rawOutput.getPos();
    byte buffer[] = outStreams[i].toByteArray();
    IFileOutputStream checksumOutput = new IFileOutputStream(rawOutput);
    checksumOutput.write(buffer);
    // Write checksum.
    checksumOutput.finish();
    // Write index record
    indexRecord.rawLength = (long)buffer.length;
    indexRecord.partLength = rawOutput.getPos() - indexRecord.startOffset;
    spillRecord.putIndex(indexRecord, i);
    reporter.progress();
  }
  rawOutput.close();
  spillRecord.writeToFile(indexPath, jobConf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestMerge.java

示例5: run

import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
@Override
public void run() {
  System.out.println("Workload starting ");
  for (int i = 0; i < numberOfFiles; i++) {
    Path filename = new Path(id + "." + i);
    try {
      System.out.println("Workload processing file " + filename);
      FSDataOutputStream stm = createFile(fs, filename, replication);
      DFSOutputStream dfstream = (DFSOutputStream)
                                             (stm.getWrappedStream());
      dfstream.setArtificialSlowdown(1000);
      writeFile(stm, myseed);
      stm.close();
      checkFile(fs, filename, replication, numBlocks, fileSize, myseed);
    } catch (Throwable e) {
      System.out.println("Workload exception " + e);
      assertTrue(e.toString(), false);
    }

    // increment the stamp to indicate that another file is done.
    synchronized (this) {
      stamp++;
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestDatanodeDeath.java

示例6: makeRenamePending

import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
public void makeRenamePending(FileFolder dst) throws IOException {

      // Propose (but don't do) the rename.
      Path home = fs.getHomeDirectory();
      String relativeHomeDir = getRelativePath(home.toString());
      NativeAzureFileSystem.FolderRenamePending pending =
          new NativeAzureFileSystem.FolderRenamePending(
              relativeHomeDir + "/" + this.getName(),
              relativeHomeDir + "/" + dst.getName(), null,
              (NativeAzureFileSystem) fs);

      // Get the rename pending file contents.
      String renameDescription = pending.makeRenamePendingFileContents();

      // Create a rename-pending file and write rename information to it.
      final String renamePendingStr = this.getName() + "-RenamePending.json";
      Path renamePendingFile = new Path(renamePendingStr);
      FSDataOutputStream out = fs.create(renamePendingFile, true);
      assertTrue(out != null);
      writeString(out, renameDescription);
    }
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:NativeAzureFileSystemBaseTest.java

示例7: testRewritingClusterIdToPB

import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
@Test
public void testRewritingClusterIdToPB() throws Exception {
  TEST_UTIL.startMiniZKCluster();
  TEST_UTIL.startMiniDFSCluster(1);
  TEST_UTIL.createRootDir();
  TEST_UTIL.getConfiguration().setBoolean("hbase.replication", true);
  Path rootDir = FSUtils.getRootDir(TEST_UTIL.getConfiguration());
  FileSystem fs = rootDir.getFileSystem(TEST_UTIL.getConfiguration());
  Path filePath = new Path(rootDir, HConstants.CLUSTER_ID_FILE_NAME);
  FSDataOutputStream s = null;
  try {
    s = fs.create(filePath);
    s.writeUTF(UUID.randomUUID().toString());
  } finally {
    if (s != null) {
      s.close();
    }
  }
  TEST_UTIL.startMiniHBaseCluster(1, 1);
  HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
  assertEquals(1, master.getServerManager().getOnlineServersList().size());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestClusterId.java

示例8: create

import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
FSDataOutputStream create(PathData item, boolean lazyPersist)
    throws IOException {
  try {
    if (lazyPersist) {
      EnumSet<CreateFlag> createFlags = EnumSet.of(CREATE, LAZY_PERSIST);
      return create(item.path,
                    FsPermission.getFileDefault().applyUMask(
                        FsPermission.getUMask(getConf())),
                    createFlags,
                    getConf().getInt("io.file.buffer.size", 4096),
                    lazyPersist ? 1 : getDefaultReplication(item.path),
                    getDefaultBlockSize(),
                    null,
                    null);
    } else {
      return create(item.path, true);
    }
  } finally { // might have been created but stream was interrupted
    deleteOnExit(item.path);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:CommandWithDestination.java

示例9: writeTrailer

import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
public static void writeTrailer(FSDataOutputStream stream, ProcedureStoreTracker tracker)
    throws IOException {
  long offset = stream.getPos();

  // Write EOF Entry
  ProcedureWALEntry.newBuilder()
    .setType(ProcedureWALEntry.Type.EOF)
    .build().writeDelimitedTo(stream);

  // Write Tracker
  tracker.writeTo(stream);

  stream.write(TRAILER_VERSION);
  StreamUtils.writeLong(stream, TRAILER_MAGIC);
  StreamUtils.writeLong(stream, offset);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:ProcedureWALFormat.java

示例10: createInternal

import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
@Override
public FSDataOutputStream createInternal(final Path f,
    final EnumSet<CreateFlag> flag, final FsPermission absolutePermission,
    final int bufferSize, final short replication, final long blockSize,
    final Progressable progress, final ChecksumOpt checksumOpt,
    final boolean createParent) throws AccessControlException,
    FileAlreadyExistsException, FileNotFoundException,
    ParentNotDirectoryException, UnsupportedFileSystemException,
    UnresolvedLinkException, IOException {
  InodeTree.ResolveResult<AbstractFileSystem> res;
  try {
    res = fsState.resolve(getUriPath(f), false);
  } catch (FileNotFoundException e) {
    if (createParent) {
      throw readOnlyMountTable("create", f);
    } else {
      throw e;
    }
  }
  assert(res.remainingPath != null);
  return res.targetFileSystem.createInternal(res.remainingPath, flag,
      absolutePermission, bufferSize, replication,
      blockSize, progress, checksumOpt,
      createParent);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:26,代码来源:ViewFs.java

示例11: append

import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
/** Try openning a file for append. */
private static FSDataOutputStream append(FileSystem fs, Path p) throws Exception {
  for(int i = 0; i < 10; i++) {
    try {
      return fs.append(p);
    } catch(RemoteException re) {
      if (re.getClassName().equals(RecoveryInProgressException.class.getName())) {
        AppendTestUtil.LOG.info("Will sleep and retry, i=" + i +", p="+p, re);
        Thread.sleep(1000);
      }
      else
        throw re;
    }
  }
  throw new IOException("Cannot append to " + p);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestReadWhileWriting.java

示例12: wrapIfNecessary

import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
/**
 * Wraps a given FSDataOutputStream with a CryptoOutputStream. The size of the
 * data buffer required for the stream is specified by the
 * "mapreduce.job.encrypted-intermediate-data.buffer.kb" Job configuration
 * variable.
 * 
 * @param conf
 * @param out
 * @return FSDataOutputStream
 * @throws IOException
 */
public static FSDataOutputStream wrapIfNecessary(Configuration conf,
    FSDataOutputStream out) throws IOException {
  if (isEncryptedSpillEnabled(conf)) {
    out.write(ByteBuffer.allocate(8).putLong(out.getPos()).array());
    byte[] iv = createIV(conf);
    out.write(iv);
    if (LOG.isDebugEnabled()) {
      LOG.debug("IV written to Stream ["
          + Base64.encodeBase64URLSafeString(iv) + "]");
    }
    return new CryptoFSDataOutputStream(out, CryptoCodec.getInstance(conf),
        getBufferSize(conf), getEncryptionKey(), iv);
  } else {
    return out;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:CryptoUtils.java

示例13: writeSnapshotInfo

import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
/**
 * Write the snapshot description into the working directory of a snapshot
 * @param snapshot description of the snapshot being taken
 * @param workingDir working directory of the snapshot
 * @param fs {@link FileSystem} on which the snapshot should be taken
 * @throws IOException if we can't reach the filesystem and the file cannot be cleaned up on
 *           failure
 */
public static void writeSnapshotInfo(SnapshotDescription snapshot, Path workingDir, FileSystem fs)
    throws IOException {
  FsPermission perms = FSUtils.getFilePermissions(fs, fs.getConf(),
    HConstants.DATA_FILE_UMASK_KEY);
  Path snapshotInfo = new Path(workingDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE);
  try {
    FSDataOutputStream out = FSUtils.create(fs, snapshotInfo, perms, true);
    try {
      snapshot.writeTo(out);
    } finally {
      out.close();
    }
  } catch (IOException e) {
    // if we get an exception, try to remove the snapshot info
    if (!fs.delete(snapshotInfo, false)) {
      String msg = "Couldn't delete snapshot info file: " + snapshotInfo;
      LOG.error(msg);
      throw new IOException(msg);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:SnapshotDescriptionUtils.java

示例14: testCreatedFileIsImmediatelyVisible

import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
@Test
public void testCreatedFileIsImmediatelyVisible() throws Throwable {
  describe("verify that a newly created file exists as soon as open returns");
  Path path = path("testCreatedFileIsImmediatelyVisible");
  FSDataOutputStream out = null;
  try {
    out = getFileSystem().create(path,
                                 false,
                                 4096,
                                 (short) 1,
                                 1024);
    if (!getFileSystem().exists(path)) {

      if (isSupported(IS_BLOBSTORE)) {
        // object store: downgrade to a skip so that the failure is visible
        // in test results
        skip("Filesystem is an object store and newly created files are not immediately visible");
      }
      assertPathExists("expected path to be visible before anything written",
                       path);
    }
  } finally {
    IOUtils.closeStream(out);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:AbstractContractCreateTest.java

示例15: methodsToTest

import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
@Parameters(name = "method: {0}")
public static Object[] methodsToTest() {
  List<Method> methods = FluentIterable
      .of(FSDataOutputStream.class.getDeclaredMethods())
      .filter(new Predicate<Method>() {
        @Override
        public boolean apply(Method input) {
          if (Modifier.isStatic(input.getModifiers())) {
            return false;
          }
          if (!Modifier.isPublic(input.getModifiers())) {
            return false;
          }
          return Arrays.asList(input.getExceptionTypes()).contains(IOException.class);
        }
      }).toList();

  return methods.toArray();
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:20,代码来源:TestFSDataOutputStreamWrapper.java


注:本文中的org.apache.hadoop.fs.FSDataOutputStream类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。