当前位置: 首页>>代码示例>>Java>>正文


Java EditLogFileOutputStream类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream的典型用法代码示例。如果您正苦于以下问题:Java EditLogFileOutputStream类的具体用法?Java EditLogFileOutputStream怎么用?Java EditLogFileOutputStream使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


EditLogFileOutputStream类属于org.apache.hadoop.hdfs.server.namenode包,在下文中一共展示了EditLogFileOutputStream类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createEmptyInProgressEditLog

import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入依赖的package包/类
private static void createEmptyInProgressEditLog(MiniDFSCluster cluster,
    NameNode nn, boolean writeHeader) throws IOException {
  long txid = nn.getNamesystem().getEditLog().getLastWrittenTxId();
  URI sharedEditsUri = cluster.getSharedEditsDir(0, 1);
  File sharedEditsDir = new File(sharedEditsUri.getPath());
  StorageDirectory storageDir = new StorageDirectory(sharedEditsDir);
  File inProgressFile = NameNodeAdapter.getInProgressEditsFile(storageDir,
      txid + 1);
  assertTrue("Failed to create in-progress edits file",
      inProgressFile.createNewFile());
  
  if (writeHeader) {
    DataOutputStream out = new DataOutputStream(new FileOutputStream(
        inProgressFile));
    EditLogFileOutputStream.writeHeader(
        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION, out);
    out.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestHAStateTransitions.java

示例2: testOneOperation

import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入依赖的package包/类
@Test
public void testOneOperation() throws Exception {
  File editsDir = createEditsDir();
  DummyServerCore core = new DummyServerCore();
  EditLogFileOutputStream out = initEdits(editsDir);
  ServerLogReaderPreTransactional logReader = new ServerLogReaderPreTransactional(core,
      Util.stringAsURI(editsDir.getAbsolutePath()));
  core.logReader = logReader;
  Thread coreThread, logReaderThread;
  
  coreThread = new Thread(core);
  logReaderThread = new Thread(logReader);
  
  logReaderThread.start();
  coreThread.start();
  writeOperation(out, 1000, true);
  Thread.sleep(500);
  core.shutdown();
  logReaderThread.join();
  coreThread.join();
  
  Assert.assertEquals(1, core.notifications.size());
  Assert.assertEquals(1000, core.notifications.poll().txId);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:25,代码来源:TestPreTransactionalServerLogReader.java

示例3: testGetInputStreamNoValidationNoCheckLastTxId

import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入依赖的package包/类
@Test
public void testGetInputStreamNoValidationNoCheckLastTxId() throws Exception {
  setupTest("test-get-input-stream-no-validation-no-check-last-txid");
  File tempEditsFile = FSEditLogTestUtil.createTempEditsFile(
      "test-get-input-stream-with-validation");
  try {
    EditLogOutputStream bkeos = bkjm.startLogSegment(1);
    EditLogOutputStream elfos =
        new EditLogFileOutputStream(tempEditsFile, null);
    elfos.create();
    FSEditLogTestUtil.populateStreams(1, 100, bkeos, elfos);
    EditLogInputStream bkeis =
        getJournalInputStreamDontCheckLastTxId(bkjm, 1);
    EditLogInputStream elfis = new EditLogFileInputStream(tempEditsFile);
    Map<String, EditLogInputStream> streamByName =
        ImmutableMap.of("BookKeeper", bkeis, "File", elfis);
    FSEditLogTestUtil.assertStreamsAreEquivalent(100, streamByName);
  } finally {
    if (!tempEditsFile.delete()) {
      LOG.warn("Unable to delete edits file: " +
          tempEditsFile.getAbsolutePath());
    }
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:25,代码来源:TestBookKeeperJournalManager.java

示例4: createEmptyInProgressEditLog

import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入依赖的package包/类
private static void createEmptyInProgressEditLog(MiniDFSCluster cluster,
    NameNode nn, boolean writeHeader) throws IOException {
  long txid = nn.getNamesystem().getEditLog().getLastWrittenTxId();
  URI sharedEditsUri = cluster.getSharedEditsDir(0, 1);
  File sharedEditsDir = new File(sharedEditsUri.getPath());
  StorageDirectory storageDir = new StorageDirectory(sharedEditsDir);
  File inProgressFile = NameNodeAdapter.getInProgressEditsFile(storageDir,
      txid + 1);
  assertTrue("Failed to create in-progress edits file",
      inProgressFile.createNewFile());
  
  if (writeHeader) {
    DataOutputStream out = new DataOutputStream(new FileOutputStream(
        inProgressFile));
    EditLogFileOutputStream.writeHeader(out);
    out.close();
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:19,代码来源:TestHAStateTransitions.java

示例5: setUp

import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入依赖的package包/类
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  // setting some dummy kerberos settings to be able to test a mis-setting
  System.setProperty("java.security.krb5.realm", "foo");
  System.setProperty("java.security.krb5.kdc", "localhost:0");

  File minidfsDir = new File("target/minidfs").getAbsoluteFile();
  if (!minidfsDir.exists()) {
    Assert.assertTrue(minidfsDir.mkdirs());
  }
  System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
  Configuration conf = new HdfsConfiguration();
  conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
  conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
  UserGroupInformation.createUserForTesting("foo", new String[]{"all", "supergroup"});
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
  miniDFS = new MiniDFSCluster.Builder(conf).build();
}
 
开发者ID:streamsets,项目名称:datacollector,代码行数:21,代码来源:HdfsDestinationPipelineRunIT.java

示例6: beforeClass

import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入依赖的package包/类
@BeforeClass
public static void beforeClass() throws Exception {
  // setting some dummy kerberos settings to be able to test a mis-setting
  System.setProperty("java.security.krb5.realm", "foo");
  System.setProperty("java.security.krb5.kdc", "localhost:0");

  File minidfsDir = new File("target/minidfs").getAbsoluteFile();
  if (!minidfsDir.exists()) {
    Assert.assertTrue(minidfsDir.mkdirs());
  }
  System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
  Configuration conf = new HdfsConfiguration();
  conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
  conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
  UserGroupInformation.createUserForTesting("foo", new String[]{ "all", "supergroup"});
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
  miniDFS = new MiniDFSCluster.Builder(conf).build();
  PipelineOperationsStandaloneIT.beforeClass(getPipelineJson());
}
 
开发者ID:streamsets,项目名称:datacollector,代码行数:20,代码来源:HdfsDestinationPipelineOperationsIT.java

示例7: setUpClass

import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入依赖的package包/类
@BeforeClass
public static void setUpClass() throws Exception {
  //setting some dummy kerberos settings to be able to test a mis-setting
  System.setProperty("java.security.krb5.realm", "foo");
  System.setProperty("java.security.krb5.kdc", "localhost:0");

  File minidfsDir = new File("target/minidfs").getAbsoluteFile();
  if (!minidfsDir.exists()) {
    Assert.assertTrue(minidfsDir.mkdirs());
  }
  Set<PosixFilePermission> set = new HashSet<PosixFilePermission>();
  set.add(PosixFilePermission.OWNER_EXECUTE);
  set.add(PosixFilePermission.OWNER_READ);
  set.add(PosixFilePermission.OWNER_WRITE);
  set.add(PosixFilePermission.OTHERS_READ);
  java.nio.file.Files.setPosixFilePermissions(minidfsDir.toPath(), set);
  System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
  Configuration conf = new HdfsConfiguration();
  conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
  conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
  fooUgi = UserGroupInformation.createUserForTesting("foo", new String[]{ "all"});
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
  FileSystem.closeAll();
  miniDFS = new MiniDFSCluster.Builder(conf).build();
  miniDFS.getFileSystem().setPermission(new Path("/"), FsPermission.createImmutable((short)0777));
}
 
开发者ID:streamsets,项目名称:datacollector,代码行数:27,代码来源:BaseHdfsTargetIT.java

示例8: setUpBeforeClass

import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  minidfsDir = new File("target/minidfs-" + UUID.randomUUID()).getAbsoluteFile();
  assertTrue(minidfsDir.mkdirs());
  System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
  Configuration conf = new HdfsConfiguration();
  conf.set("dfs.namenode.fs-limits.min-block-size", String.valueOf(32));
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
  miniDFS = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  dir = new Path(miniDFS.getURI()+"/dir");
  FileSystem fs = miniDFS.getFileSystem();
  fs.mkdirs(dir);
  writeFile(fs, new Path(dir + "/forAllTests/" + "path"), 1000);
  dummyEtc = new File(minidfsDir, "dummy-etc");
  assertTrue(dummyEtc.mkdirs());
  Configuration dummyConf = new Configuration(false);
  for (String file : new String[]{"core", "hdfs", "mapred", "yarn"}) {
    File siteXml = new File(dummyEtc, file + "-site.xml");
    FileOutputStream out = new FileOutputStream(siteXml);
    dummyConf.writeXml(out);
    out.close();
  }
  resourcesDir = minidfsDir.getAbsolutePath();
  hadoopConfDir = dummyEtc.getName();
  System.setProperty("sdc.resources.dir", resourcesDir);;
}
 
开发者ID:streamsets,项目名称:datacollector,代码行数:27,代码来源:ClusterHDFSSourceIT.java

示例9: setUp

import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
  admin = UserGroupInformation.createUserForTesting(
      System.getProperty("user.name"), new String[] { "supergroup" });
  admin.doAs(new PrivilegedExceptionAction<Void>() {
    @Override
    public Void run() throws Exception {
      System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, "target/test/data");
      Configuration conf = new HdfsConfiguration();
      conf.setBoolean("sentry.authorization-provider.include-hdfs-authz-as-acl", true);
      conf.set(DFS_NAMENODE_AUTHORIZATION_PROVIDER_KEY,
          MockSentryAuthorizationProvider.class.getName());
      conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
      EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
      miniDFS = new MiniDFSCluster.Builder(conf).build();
      return null;
    }
  });
}
 
开发者ID:apache,项目名称:incubator-sentry,代码行数:20,代码来源:TestSentryAuthorizationProvider.java

示例10: setupTest

import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入依赖的package包/类
@BeforeClass
public static void setupTest() {
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);

  // Track calls to posix_fadvise.
  NativeIO.POSIX.setCacheManipulator(tracker);
  
  // Normally, we wait for a few megabytes of data to be read or written 
  // before dropping the cache.  This is to avoid an excessive number of
  // JNI calls to the posix_fadvise function.  However, for the purpose
  // of this test, we want to use small files and see all fadvise calls
  // happen.
  BlockSender.CACHE_DROP_INTERVAL_BYTES = 4096;
  BlockReceiver.CACHE_DROP_LAG_BYTES = 4096;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:TestCachingStrategy.java

示例11: startMiniDFSCluster

import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入依赖的package包/类
/**
 * Start a minidfscluster.
 * Can only create one.
 * @param servers How many DNs to start.
 * @param hosts hostnames DNs to run on.
 * @throws Exception
 * @see {@link #shutdownMiniDFSCluster()}
 * @return The mini dfs cluster created.
 */
public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
throws Exception {
  createDirsAndSetProperties();
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);

  // Error level to skip some warnings specific to the minicluster. See HBASE-4709
  org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
      setLevel(org.apache.log4j.Level.ERROR);
  org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
      setLevel(org.apache.log4j.Level.ERROR);


  this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
    true, null, null, hosts, null);

  // Set this just-started cluster as our filesystem.
  setFs();

  // Wait for the cluster to be totally up
  this.dfsCluster.waitClusterUp();

  //reset the test directory for test file system
  dataTestDirOnTestFS = null;

  return this.dfsCluster;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:HBaseTestingUtility.java

示例12: initEdits

import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入依赖的package包/类
public static EditLogFileOutputStream initEdits(File editsDir) throws IOException {
  File edits = getFileWithCurrent(editsDir, "edits");
  File fstime = getFileWithCurrent(editsDir, "fstime");

  if (!edits.createNewFile())
    throw new IOException("Failed to create edits file");
  EditLogFileOutputStream out = new EditLogFileOutputStream(edits, null);
  out.create();
  if (!fstime.createNewFile())
    throw new IOException("Failed to create fstime file");
  
  return out;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:14,代码来源:TestPreTransactionalServerLogReader.java

示例13: beginRoll

import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入依赖的package包/类
private EditLogFileOutputStream beginRoll(File editsDir,
    EditLogFileOutputStream editsOutput)
        throws IOException {
  File editsNew = getFileWithCurrent(editsDir, "edits.new");

  editsOutput.close();
  if (!editsNew.createNewFile())
    throw new IOException("Failed to create edits.new file");
  EditLogFileOutputStream out = new EditLogFileOutputStream(editsNew, null);
  out.create();
  Assert.assertTrue(editsNew.exists());

  return out;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:15,代码来源:TestPreTransactionalServerLogReader.java

示例14: writeOperation

import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入依赖的package包/类
private void writeOperation(EditLogFileOutputStream out,
    long txId, boolean forceSync) throws IOException {
  FSEditLogOp.AddOp op = FSEditLogOp.AddOp.getUniqueInstance();
  op.setTransactionId(txId);
  op.set(INodeId.GRANDFATHER_INODE_ID, "/a/b", (short)3, 100L, 100L, 100L, new BlockInfo[0],
      PermissionStatus.createImmutable("x", "y", FsPermission.getDefault()),
      "x", "y");
  out.write(op);
  LOG.info("Wrote operation " + txId);
  if (txId % 10 == 0 || forceSync) {
    out.setReadyToFlush();
    out.flush();
    LOG.info("Flushed operation " + txId);
  }  
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:16,代码来源:TestPreTransactionalServerLogReader.java

示例15: testMultipleOperations

import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入依赖的package包/类
@Test
public void testMultipleOperations() throws Exception {
  File editsDir = createEditsDir();
  DummyServerCore core = new DummyServerCore();
  EditLogFileOutputStream out = initEdits(editsDir);
  ServerLogReaderPreTransactional logReader = new ServerLogReaderPreTransactional(core,
      Util.stringAsURI(editsDir.getAbsolutePath()));
  core.logReader = logReader;
  Thread coreThread, logReaderThread;
  long txCount = 1000;
  
  coreThread = new Thread(core);
  logReaderThread = new Thread(logReader);
  
  logReaderThread.start();
  coreThread.start();
  for (long txId = 0; txId < txCount; txId ++) {
    writeOperation(out, txId, false);
  }
  
  // flush
  out.setReadyToFlush();
  out.flush();
  
  Thread.sleep(500);
  core.shutdown();
  logReaderThread.join();
  coreThread.join();
  
  Assert.assertEquals(1000, core.notifications.size());
  for (long txId = 0; txId < txCount; txId ++)
    Assert.assertEquals(txId, core.notifications.poll().txId);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:34,代码来源:TestPreTransactionalServerLogReader.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。