当前位置: 首页>>代码示例>>Java>>正文


Java FSDataOutputStream.writeBytes方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FSDataOutputStream.writeBytes方法的典型用法代码示例。如果您正苦于以下问题:Java FSDataOutputStream.writeBytes方法的具体用法?Java FSDataOutputStream.writeBytes怎么用?Java FSDataOutputStream.writeBytes使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FSDataOutputStream的用法示例。


在下文中一共展示了FSDataOutputStream.writeBytes方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testFileCloseStatus

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testFileCloseStatus() throws IOException {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  DistributedFileSystem fs = cluster.getFileSystem();
  try {
    // create a new file.
    Path file = new Path("/simpleFlush.dat");
    FSDataOutputStream output = fs.create(file);
    // write to file
    output.writeBytes("Some test data");
    output.flush();
    assertFalse("File status should be open", fs.isFileClosed(file));
    output.close();
    assertTrue("File status should be closed", fs.isFileClosed(file));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestDistributedFileSystem.java

示例2: createWordsFile

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private static void createWordsFile(Path inpFile, Configuration conf)
    throws IOException {
  final FileSystem fs = inpFile.getFileSystem(conf);
  if (fs.exists(inpFile)) {
    return;
  }
  FSDataOutputStream out = fs.create(inpFile);
  try {
    // 1024*4 unique words --- repeated 5 times => 5*2K words
    int REPLICAS=5, NUMLINES=1024, NUMWORDSPERLINE=4;
    final String WORD = "zymurgy"; // 7 bytes + 4 id bytes
    final Formatter fmt = new Formatter(new StringBuilder());
    for (int i = 0; i < REPLICAS; i++) {
      for (int j = 1; j <= NUMLINES*NUMWORDSPERLINE; j+=NUMWORDSPERLINE) {
        ((StringBuilder)fmt.out()).setLength(0);
        for (int k = 0; k < NUMWORDSPERLINE; ++k) {
          fmt.format("%s%04d ", WORD, j + k);
        }
        ((StringBuilder)fmt.out()).append("\n");
        out.writeBytes(fmt.toString());
      }
    }
  } finally {
    out.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestJobCounters.java

示例3: testInvalidateOverReplicatedBlock

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
 * Test over replicated block should get invalidated when decreasing the
 * replication for a partial block.
 */
@Test
public void testInvalidateOverReplicatedBlock() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
      .build();
  try {
    final FSNamesystem namesystem = cluster.getNamesystem();
    final BlockManager bm = namesystem.getBlockManager();
    FileSystem fs = cluster.getFileSystem();
    Path p = new Path(MiniDFSCluster.getBaseDirectory(), "/foo1");
    FSDataOutputStream out = fs.create(p, (short) 2);
    out.writeBytes("HDFS-3119: " + p);
    out.hsync();
    fs.setReplication(p, (short) 1);
    out.close();
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, p);
    assertEquals("Expected only one live replica for the block", 1, bm
        .countNodes(block.getLocalBlock()).liveReplicas());
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestOverReplicatedBlocks.java

示例4: writeConfigFile

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private void writeConfigFile(Path name, List<String> nodes) 
  throws IOException {
  // delete if it already exists
  if (localFileSys.exists(name)) {
    localFileSys.delete(name, true);
  }

  FSDataOutputStream stm = localFileSys.create(name);
  
  if (nodes != null) {
    for (Iterator<String> it = nodes.iterator(); it.hasNext();) {
      String node = it.next();
      stm.writeBytes(node);
      stm.writeBytes("\n");
    }
  }
  stm.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestDecommission.java

示例5: addMockStoreFiles

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private FileStatus[] addMockStoreFiles(int count, MasterServices services, Path storedir)
    throws IOException {
  // get the existing store files
  FileSystem fs = services.getMasterFileSystem().getFileSystem();
  fs.mkdirs(storedir);
  // create the store files in the parent
  for (int i = 0; i < count; i++) {
    Path storeFile = new Path(storedir, "_store" + i);
    FSDataOutputStream dos = fs.create(storeFile, true);
    dos.writeBytes("Some data: " + i);
    dos.close();
  }
  LOG.debug("Adding " + count + " store files to the storedir:" + storedir);
  // make sure the mock store files are there
  FileStatus[] storeFiles = fs.listStatus(storedir);
  assertEquals("Didn't have expected store files", count, storeFiles.length);
  return storeFiles;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestCatalogJanitor.java

示例6: writeConfigFile

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private void writeConfigFile(Path name, ArrayList<String> nodes) 
  throws IOException {
  // delete if it already exists
  if (localFileSys.exists(name)) {
    localFileSys.delete(name, true);
  }

  FSDataOutputStream stm = localFileSys.create(name);
  
  if (nodes != null) {
    for (Iterator<String> it = nodes.iterator(); it.hasNext();) {
      String node = it.next();
      stm.writeBytes(node);
      stm.writeBytes("\n");
    }
  }
  stm.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestGetConf.java

示例7: call

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@Override
public FSDataOutputStream call() throws IOException {
  try {
    FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
    FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),
        HConstants.DATA_FILE_UMASK_KEY);
    Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);
    fs.mkdirs(tmpDir);
    HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);
    final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);
    out.writeBytes(InetAddress.getLocalHost().toString());
    out.flush();
    return out;
  } catch(RemoteException e) {
    if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){
      return null;
    } else {
      throw e;
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:HBaseFsck.java

示例8: createLogFile

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
 * Create simple log file
 * 
 * @return
 * @throws IOException
 */

private Path createLogFile() throws IOException {

  FileContext files = FileContext.getLocalFSFileContext();

  Path ws = new Path(workSpace.getAbsoluteFile().getAbsolutePath());

  files.delete(ws, true);
  Path workSpacePath = new Path(workSpace.getAbsolutePath(), "log");
  files.mkdir(workSpacePath, null, true);

  LOG.info("create logfile.log");
  Path logfile1 = new Path(workSpacePath, "logfile.log");

  FSDataOutputStream os = files.create(logfile1,
      EnumSet.of(CreateFlag.CREATE));
  os.writeBytes("4 3" + EL + "1 3" + EL + "4 44" + EL);
  os.writeBytes("2 3" + EL + "1 3" + EL + "0 45" + EL);
  os.writeBytes("4 3" + EL + "1 3" + EL + "1 44" + EL);

  os.flush();
  os.close();
  LOG.info("create logfile1.log");

  Path logfile2 = new Path(workSpacePath, "logfile1.log");

  os = files.create(logfile2, EnumSet.of(CreateFlag.CREATE));
  os.writeBytes("4 3" + EL + "1 3" + EL + "3 44" + EL);
  os.writeBytes("2 3" + EL + "1 3" + EL + "0 45" + EL);
  os.writeBytes("4 3" + EL + "1 3" + EL + "1 44" + EL);

  os.flush();
  os.close();

  return workSpacePath;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:43,代码来源:TestLogalyzer.java

示例9: main

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException{
	String uri="hdfs://localhost:9000/";
	Configuration conf = new Configuration();
	FileSystem fs = FileSystem.get(URI.create(uri), conf);
	
	FSDataOutputStream out=null;
	out = fs.create(new Path(uri+"hello.txt"));
	out.writeBytes("Hello, World!");
	IOUtils.closeStream(out);

}
 
开发者ID:aadishgoel2013,项目名称:Hadoop-Codes,代码行数:12,代码来源:writing.java

示例10: testLeaseAfterRenameAndRecreate

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
 * Test that we can open up a file for write, move it to another location,
 * and then create a new file in the previous location, without causing any
 * lease conflicts.  This is possible because we now use unique inode IDs
 * to identify files to the NameNode.
 */
@Test
public void testLeaseAfterRenameAndRecreate() throws Exception {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
  try {
    final Path path1 = new Path("/test-file");
    final String contents1 = "contents1";
    final Path path2 = new Path("/test-file-new-location");
    final String contents2 = "contents2";

    // open a file to get a lease
    FileSystem fs = cluster.getFileSystem();
    FSDataOutputStream out1 = fs.create(path1);
    out1.writeBytes(contents1);
    Assert.assertTrue(hasLease(cluster, path1));
    Assert.assertEquals(1, leaseCount(cluster));

    DistributedFileSystem fs2 = (DistributedFileSystem)
        FileSystem.newInstance(fs.getUri(), fs.getConf());
    fs2.rename(path1, path2);

    FSDataOutputStream out2 = fs2.create(path1);
    out2.writeBytes(contents2);
    out2.close();

    // The first file should still be open and valid
    Assert.assertTrue(hasLease(cluster, path2));
    out1.close();

    // Contents should be as expected
    DistributedFileSystem fs3 = (DistributedFileSystem)
        FileSystem.newInstance(fs.getUri(), fs.getConf());
    Assert.assertEquals(contents1, DFSTestUtil.readFile(fs3, path2));
    Assert.assertEquals(contents2, DFSTestUtil.readFile(fs3, path1));
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:44,代码来源:TestLease.java

示例11: createTempFile

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private Path createTempFile(String filename, String contents)
    throws IOException {
  Path path = new Path(TEST_ROOT_DIR, filename);
  FSDataOutputStream os = localFs.create(path);
  os.writeBytes(contents);
  os.close();
  localFs.setPermission(path, new FsPermission("700"));
  return path;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:TestSpeculativeExecution.java

示例12: createTempFile

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private Path createTempFile(String filename, String contents)
    throws IOException {
  Path path = new Path(TEST_ROOT_DIR, filename);
  Configuration conf = new Configuration();
  FSDataOutputStream os = FileSystem.getLocal(conf).create(path);
  os.writeBytes(contents);
  os.close();
  return path;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:TestJobClientGetJob.java

示例13: testAddBlockUC

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
 * Test adding new blocks but without closing the corresponding the file
 */
@Test
public void testAddBlockUC() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  final Path file1 = new Path("/file1");
  DFSTestUtil.createFile(fs, file1, BLOCKSIZE - 1, REPLICATION, 0L);
  
  FSDataOutputStream out = null;
  try {
    // append files without closing the streams
    out = fs.append(file1);
    String appendContent = "appending-content";
    out.writeBytes(appendContent);
    ((DFSOutputStream) out.getWrappedStream()).hsync(
        EnumSet.of(SyncFlag.UPDATE_LENGTH));
    
    // restart NN
    cluster.restartNameNode(true);
    FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
    
    INodeFile fileNode = fsdir.getINode4Write(file1.toString()).asFile();
    BlockInfoContiguous[] fileBlocks = fileNode.getBlocks();
    assertEquals(2, fileBlocks.length);
    assertEquals(BLOCKSIZE, fileBlocks[0].getNumBytes());
    assertEquals(BlockUCState.COMPLETE, fileBlocks[0].getBlockUCState());
    assertEquals(appendContent.length() - 1, fileBlocks[1].getNumBytes());
    assertEquals(BlockUCState.UNDER_CONSTRUCTION,
        fileBlocks[1].getBlockUCState());
  } finally {
    if (out != null) {
      out.close();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:TestAddBlock.java

示例14: testDataNodeRedirect

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private void testDataNodeRedirect(Path path) throws IOException {
  // Create the file
  if (hdfs.exists(path)) {
    hdfs.delete(path, true);
  }
  FSDataOutputStream out = hdfs.create(path, (short) 1);
  out.writeBytes("0123456789");
  out.close();

  // Get the path's block location so we can determine
  // if we were redirected to the right DN.
  BlockLocation[] locations = hdfs.getFileBlockLocations(path, 0, 10);
  String xferAddr = locations[0].getNames()[0];

  // Connect to the NN to get redirected
  URL u = hftpFs.getNamenodeURL(
      "/data" + ServletUtil.encodePath(path.toUri().getPath()),
      "ugi=userx,groupy");
  HttpURLConnection conn = (HttpURLConnection) u.openConnection();
  HttpURLConnection.setFollowRedirects(true);
  conn.connect();
  conn.getInputStream();

  boolean checked = false;
  // Find the datanode that has the block according to locations
  // and check that the URL was redirected to this DN's info port
  for (DataNode node : cluster.getDataNodes()) {
    DatanodeRegistration dnR = DataNodeTestUtils.getDNRegistrationForBP(node,
        blockPoolId);
    if (dnR.getXferAddr().equals(xferAddr)) {
      checked = true;
      assertEquals(dnR.getInfoPort(), conn.getURL().getPort());
    }
  }
  assertTrue("The test never checked that location of "
      + "the block and hftp desitnation are the same", checked);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:TestHftpFileSystem.java

示例15: testGetPos

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
 * Tests getPos() functionality.
 */
@Test
public void testGetPos() throws IOException {
  final Path testFile = new Path("/testfile+1");
  // Write a test file.
  FSDataOutputStream out = hdfs.create(testFile, true);
  out.writeBytes("0123456789");
  out.close();

  FSDataInputStream in = hftpFs.open(testFile);

  // Test read().
  for (int i = 0; i < 5; ++i) {
    assertEquals(i, in.getPos());
    in.read();
  }

  // Test read(b, off, len).
  assertEquals(5, in.getPos());
  byte[] buffer = new byte[10];
  assertEquals(2, in.read(buffer, 0, 2));
  assertEquals(7, in.getPos());

  // Test read(b).
  int bytesRead = in.read(buffer);
  assertEquals(7 + bytesRead, in.getPos());

  // Test EOF.
  for (int i = 0; i < 100; ++i) {
    in.read();
  }
  assertEquals(10, in.getPos());
  in.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:TestHftpFileSystem.java


注:本文中的org.apache.hadoop.fs.FSDataOutputStream.writeBytes方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。