当前位置: 首页>>代码示例>>Java>>正文


Java EditLogFileOutputStream.setShouldSkipFsyncForTesting方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream.setShouldSkipFsyncForTesting方法的典型用法代码示例。如果您正苦于以下问题:Java EditLogFileOutputStream.setShouldSkipFsyncForTesting方法的具体用法?Java EditLogFileOutputStream.setShouldSkipFsyncForTesting怎么用?Java EditLogFileOutputStream.setShouldSkipFsyncForTesting使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream的用法示例。


在下文中一共展示了EditLogFileOutputStream.setShouldSkipFsyncForTesting方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setUp

import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入方法依赖的package包/类
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  // setting some dummy kerberos settings to be able to test a mis-setting
  System.setProperty("java.security.krb5.realm", "foo");
  System.setProperty("java.security.krb5.kdc", "localhost:0");

  File minidfsDir = new File("target/minidfs").getAbsoluteFile();
  if (!minidfsDir.exists()) {
    Assert.assertTrue(minidfsDir.mkdirs());
  }
  System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
  Configuration conf = new HdfsConfiguration();
  conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
  conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
  UserGroupInformation.createUserForTesting("foo", new String[]{"all", "supergroup"});
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
  miniDFS = new MiniDFSCluster.Builder(conf).build();
}
 
开发者ID:streamsets,项目名称:datacollector,代码行数:21,代码来源:HdfsDestinationPipelineRunIT.java

示例2: beforeClass

import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入方法依赖的package包/类
@BeforeClass
public static void beforeClass() throws Exception {
  // setting some dummy kerberos settings to be able to test a mis-setting
  System.setProperty("java.security.krb5.realm", "foo");
  System.setProperty("java.security.krb5.kdc", "localhost:0");

  File minidfsDir = new File("target/minidfs").getAbsoluteFile();
  if (!minidfsDir.exists()) {
    Assert.assertTrue(minidfsDir.mkdirs());
  }
  System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
  Configuration conf = new HdfsConfiguration();
  conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
  conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
  UserGroupInformation.createUserForTesting("foo", new String[]{ "all", "supergroup"});
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
  miniDFS = new MiniDFSCluster.Builder(conf).build();
  PipelineOperationsStandaloneIT.beforeClass(getPipelineJson());
}
 
开发者ID:streamsets,项目名称:datacollector,代码行数:20,代码来源:HdfsDestinationPipelineOperationsIT.java

示例3: setUpClass

import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入方法依赖的package包/类
@BeforeClass
public static void setUpClass() throws Exception {
  //setting some dummy kerberos settings to be able to test a mis-setting
  System.setProperty("java.security.krb5.realm", "foo");
  System.setProperty("java.security.krb5.kdc", "localhost:0");

  File minidfsDir = new File("target/minidfs").getAbsoluteFile();
  if (!minidfsDir.exists()) {
    Assert.assertTrue(minidfsDir.mkdirs());
  }
  Set<PosixFilePermission> set = new HashSet<PosixFilePermission>();
  set.add(PosixFilePermission.OWNER_EXECUTE);
  set.add(PosixFilePermission.OWNER_READ);
  set.add(PosixFilePermission.OWNER_WRITE);
  set.add(PosixFilePermission.OTHERS_READ);
  java.nio.file.Files.setPosixFilePermissions(minidfsDir.toPath(), set);
  System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
  Configuration conf = new HdfsConfiguration();
  conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
  conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
  fooUgi = UserGroupInformation.createUserForTesting("foo", new String[]{ "all"});
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
  FileSystem.closeAll();
  miniDFS = new MiniDFSCluster.Builder(conf).build();
  miniDFS.getFileSystem().setPermission(new Path("/"), FsPermission.createImmutable((short)0777));
}
 
开发者ID:streamsets,项目名称:datacollector,代码行数:27,代码来源:BaseHdfsTargetIT.java

示例4: setUpBeforeClass

import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入方法依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  minidfsDir = new File("target/minidfs-" + UUID.randomUUID()).getAbsoluteFile();
  assertTrue(minidfsDir.mkdirs());
  System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
  Configuration conf = new HdfsConfiguration();
  conf.set("dfs.namenode.fs-limits.min-block-size", String.valueOf(32));
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
  miniDFS = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  dir = new Path(miniDFS.getURI()+"/dir");
  FileSystem fs = miniDFS.getFileSystem();
  fs.mkdirs(dir);
  writeFile(fs, new Path(dir + "/forAllTests/" + "path"), 1000);
  dummyEtc = new File(minidfsDir, "dummy-etc");
  assertTrue(dummyEtc.mkdirs());
  Configuration dummyConf = new Configuration(false);
  for (String file : new String[]{"core", "hdfs", "mapred", "yarn"}) {
    File siteXml = new File(dummyEtc, file + "-site.xml");
    FileOutputStream out = new FileOutputStream(siteXml);
    dummyConf.writeXml(out);
    out.close();
  }
  resourcesDir = minidfsDir.getAbsolutePath();
  hadoopConfDir = dummyEtc.getName();
  System.setProperty("sdc.resources.dir", resourcesDir);;
}
 
开发者ID:streamsets,项目名称:datacollector,代码行数:27,代码来源:ClusterHDFSSourceIT.java

示例5: setupTest

import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入方法依赖的package包/类
@BeforeClass
public static void setupTest() {
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);

  // Track calls to posix_fadvise.
  NativeIO.POSIX.setCacheManipulator(tracker);
  
  // Normally, we wait for a few megabytes of data to be read or written 
  // before dropping the cache.  This is to avoid an excessive number of
  // JNI calls to the posix_fadvise function.  However, for the purpose
  // of this test, we want to use small files and see all fadvise calls
  // happen.
  BlockSender.CACHE_DROP_INTERVAL_BYTES = 4096;
  BlockReceiver.CACHE_DROP_LAG_BYTES = 4096;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:TestCachingStrategy.java

示例6: startMiniDFSCluster

import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入方法依赖的package包/类
/**
 * Start a minidfscluster.
 * Can only create one.
 * @param servers How many DNs to start.
 * @param hosts hostnames DNs to run on.
 * @throws Exception
 * @see {@link #shutdownMiniDFSCluster()}
 * @return The mini dfs cluster created.
 */
public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
throws Exception {
  createDirsAndSetProperties();
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);

  // Error level to skip some warnings specific to the minicluster. See HBASE-4709
  org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
      setLevel(org.apache.log4j.Level.ERROR);
  org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
      setLevel(org.apache.log4j.Level.ERROR);


  this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
    true, null, null, hosts, null);

  // Set this just-started cluster as our filesystem.
  setFs();

  // Wait for the cluster to be totally up
  this.dfsCluster.waitClusterUp();

  //reset the test directory for test file system
  dataTestDirOnTestFS = null;

  return this.dfsCluster;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:HBaseTestingUtility.java

示例7: startMiniDFSCluster

import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入方法依赖的package包/类
/**
 * Start a minidfscluster.
 * Can only create one.
 * @param servers How many DNs to start.
 * @param hosts hostnames DNs to run on.
 * @throws Exception
 * @see {@link #shutdownMiniDFSCluster()}
 * @return The mini dfs cluster created.
 */
public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
throws Exception {
  createDirsAndSetProperties();
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);

  // Error level to skip some warnings specific to the minicluster. See HBASE-4709
  org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
      setLevel(org.apache.log4j.Level.ERROR);
  org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
      setLevel(org.apache.log4j.Level.ERROR);


  this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
    true, null, null, hosts, null);

  // Set this just-started cluster as our filesystem.
  FileSystem fs = this.dfsCluster.getFileSystem();
  FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));

  // Wait for the cluster to be totally up
  this.dfsCluster.waitClusterUp();

  //reset the test directory for test file system
  dataTestDirOnTestFS = null;

  return this.dfsCluster;
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:37,代码来源:HBaseTestingUtility.java

示例8: beforeClass

import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入方法依赖的package包/类
@BeforeClass
public static void beforeClass() throws Exception {
  //setup kafka to read from
  KafkaTestUtil.startZookeeper();
  KafkaTestUtil.startKafkaBrokers(1);
  KafkaTestUtil.createTopic(TOPIC, 1, 1);
  producer = KafkaTestUtil.createProducer(KafkaTestUtil.getMetadataBrokerURI(), true);
  produceRecords(RECORDS_PRODUCED);

  // setting some dummy kerberos settings to be able to test a mis-setting
  System.setProperty("java.security.krb5.realm", "foo");
  System.setProperty("java.security.krb5.kdc", "localhost:0");

  File minidfsDir = new File("target/minidfs").getAbsoluteFile();
  if (!minidfsDir.exists()) {
    Assert.assertTrue(minidfsDir.mkdirs());
  }
  System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
  Configuration conf = new HdfsConfiguration();
  conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
  conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
  UserGroupInformation.createUserForTesting("foo", new String[]{"all", "supergroup"});
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
  miniDFS = new MiniDFSCluster.Builder(conf).build();

  //setup Cluster and start pipeline
  YarnConfiguration entries = new YarnConfiguration();
  //TODO: Investigate why this is required for test to pass. Is yarn messing with the miniDFS cluster configuration?
  entries.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
  entries.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
  ClusterUtil.setupCluster(TEST_NAME, getPipelineJson(), entries);
  serverURI = ClusterUtil.getServerURI();
  miniSDC = ClusterUtil.getMiniSDC();
}
 
开发者ID:streamsets,项目名称:datacollector,代码行数:35,代码来源:KafkaToHDFSIT.java

示例9: setUpClass

import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入方法依赖的package包/类
@BeforeClass
public static void setUpClass() throws Exception {
  // Conf dir
  new File(confDir).mkdirs();

  //setting some dummy kerberos settings to be able to test a mis-setting
  System.setProperty("java.security.krb5.realm", "foo");
  System.setProperty("java.security.krb5.kdc", "localhost:0");

  File minidfsDir = new File(baseDir, "minidfs").getAbsoluteFile();
  if (!minidfsDir.exists()) {
    Assert.assertTrue(minidfsDir.mkdirs());
  }
  Set<PosixFilePermission> set = new HashSet<>();
  set.add(PosixFilePermission.OWNER_EXECUTE);
  set.add(PosixFilePermission.OWNER_READ);
  set.add(PosixFilePermission.OWNER_WRITE);
  set.add(PosixFilePermission.OTHERS_READ);
  java.nio.file.Files.setPosixFilePermissions(minidfsDir.toPath(), set);
  System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
  Configuration conf = new HdfsConfiguration();
  conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
  conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
  conf.set("dfs.namenode.acls.enabled", "true");
  UserGroupInformation fooUgi = UserGroupInformation.createUserForTesting("foo", new String[]{"all"});
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
  FileSystem.closeAll();
  miniDFS = new MiniDFSCluster.Builder(conf).build();
  miniDFS.getFileSystem().setPermission(new Path("/"), FsPermission.createImmutable((short)0777));
  fs = miniDFS.getFileSystem();
  writeConfiguration(miniDFS.getConfiguration(0), confDir + "core-site.xml");
  writeConfiguration(miniDFS.getConfiguration(0), confDir + "hdfs-site.xml");
}
 
开发者ID:streamsets,项目名称:datacollector,代码行数:34,代码来源:HdfsMetadataExecutorIT.java

示例10: startMiniDFSCluster

import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入方法依赖的package包/类
public MiniDFSCluster startMiniDFSCluster(int servers, final  String racks[], String hosts[])
    throws Exception {
  createDirsAndSetProperties();
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);

  // Error level to skip some warnings specific to the minicluster. See HBASE-4709
  org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
      setLevel(org.apache.log4j.Level.ERROR);
  org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
      setLevel(org.apache.log4j.Level.ERROR);

  TraceUtil.initTracer(conf);

  this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
      true, null, racks, hosts, null);

  // Set this just-started cluster as our filesystem.
  setFs();

  // Wait for the cluster to be totally up
  this.dfsCluster.waitClusterUp();

  //reset the test directory for test file system
  dataTestDirOnTestFS = null;
  String dataTestDir = getDataTestDir().toString();
  conf.set(HConstants.HBASE_DIR, dataTestDir);
  LOG.debug("Setting {} to {}", HConstants.HBASE_DIR, dataTestDir);

  return this.dfsCluster;
}
 
开发者ID:apache,项目名称:hbase,代码行数:31,代码来源:HBaseTestingUtility.java

示例11: setupTest

import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入方法依赖的package包/类
@BeforeClass
public static void setupTest() {
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);

  // Track calls to posix_fadvise.
  NativeIO.POSIX.cacheTracker = tracker;
  
  // Normally, we wait for a few megabytes of data to be read or written 
  // before dropping the cache.  This is to avoid an excessive number of
  // JNI calls to the posix_fadvise function.  However, for the purpose
  // of this test, we want to use small files and see all fadvise calls
  // happen.
  BlockSender.CACHE_DROP_INTERVAL_BYTES = 4096;
  BlockReceiver.CACHE_DROP_LAG_BYTES = 4096;
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:16,代码来源:TestCachingStrategy.java

示例12: setupDFSCluster

import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入方法依赖的package包/类
@BeforeClass
public static void setupDFSCluster() throws Exception {
  conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH, false);
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
  conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TEST_ROOT_DIR);
  miniDFSCluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
  fs = miniDFSCluster.getFileSystem();
  conf.set("fs.defaultFS", fs.getUri().toString());
  conf.setBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_OPTIMIZE_LOCAL_FETCH, false);
}
 
开发者ID:apache,项目名称:tez,代码行数:13,代码来源:TestSecureShuffle.java

示例13: setupClass

import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入方法依赖的package包/类
@BeforeClass
public static void setupClass() throws Exception {
  conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH, false);
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
  conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TEST_ROOT_DIR);
  dfsCluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
  fs = dfsCluster.getFileSystem();
  conf.set("fs.defaultFS", fs.getUri().toString());

  setupTezCluster();
}
 
开发者ID:apache,项目名称:tez,代码行数:14,代码来源:TestAnalyzer.java

示例14: setupCluster

import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入方法依赖的package包/类
@BeforeClass
public static void setupCluster() throws Exception {
  conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH, false);
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
  conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TEST_ROOT_DIR);
  miniDFSCluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
  fs = miniDFSCluster.getFileSystem();
  conf.set("fs.defaultFS", fs.getUri().toString());

  setupTezCluster();
}
 
开发者ID:apache,项目名称:tez,代码行数:14,代码来源:TestHistoryParser.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream.setShouldSkipFsyncForTesting方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。