当前位置: 首页>>代码示例>>Java>>正文


Java FSImageTestUtil.findLatestImageFile方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.findLatestImageFile方法的典型用法代码示例。如果您正苦于以下问题:Java FSImageTestUtil.findLatestImageFile方法的具体用法?Java FSImageTestUtil.findLatestImageFile怎么用?Java FSImageTestUtil.findLatestImageFile使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil的用法示例。


在下文中一共展示了FSImageTestUtil.findLatestImageFile方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testOfflineImageViewer

import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
 * Test if the OfflineImageViewerPB can correctly parse a fsimage containing
 * snapshots
 */
@Test
public void testOfflineImageViewer() throws Exception {
  runTestSnapshot(1);
  
  // retrieve the fsimage. Note that we already save namespace to fsimage at
  // the end of each iteration of runTestSnapshot.
  File originalFsimage = FSImageTestUtil.findLatestImageFile(
      FSImageTestUtil.getFSImage(
      cluster.getNameNode()).getStorage().getStorageDir(0));
  assertNotNull("Didn't generate or can't find fsimage", originalFsimage);
  StringWriter output = new StringWriter();
  PrintWriter o = new PrintWriter(output);
  PBImageXmlWriter v = new PBImageXmlWriter(new Configuration(), o);
  v.visit(new RandomAccessFile(originalFsimage, "r"));
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:20,代码来源:TestSnapshot.java

示例2: testOfflineImageViewer

import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
 * Test if the OfflineImageViewer can correctly parse a fsimage containing
 * snapshots
 */
@Test
public void testOfflineImageViewer() throws Exception {
  runTestSnapshot(1);
  
  // retrieve the fsimage. Note that we already save namespace to fsimage at
  // the end of each iteration of runTestSnapshot.
  File originalFsimage = FSImageTestUtil.findLatestImageFile(
      FSImageTestUtil.getFSImage(
      cluster.getNameNode()).getStorage().getStorageDir(0));
  assertNotNull("Didn't generate or can't find fsimage", originalFsimage);
  StringWriter output = new StringWriter();
  PrintWriter o = new PrintWriter(output);
  PBImageXmlWriter v = new PBImageXmlWriter(new Configuration(), o);
  v.visit(new RandomAccessFile(originalFsimage, "r"));
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:20,代码来源:TestSnapshot.java

示例3: testOfflineImageViewer

import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
 * Test if the OfflineImageViewerPB can correctly parse a fsimage containing
 * snapshots
 */
@Test
public void testOfflineImageViewer() throws Exception {
  runTestSnapshot(1);
  
  // retrieve the fsimage. Note that we already save namespace to fsimage at
  // the end of each iteration of runTestSnapshot.
  File originalFsimage = FSImageTestUtil.findLatestImageFile(
      FSImageTestUtil.getFSImage(
      cluster.getNameNode()).getStorage().getStorageDir(0));
  assertNotNull("Didn't generate or can't find fsimage", originalFsimage);
  PrintStream o = new PrintStream(NullOutputStream.NULL_OUTPUT_STREAM);
  PBImageXmlWriter v = new PBImageXmlWriter(new Configuration(), o);
  v.visit(new RandomAccessFile(originalFsimage, "r"));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestSnapshot.java

示例4: testOfflineImageViewerOnEncryptionZones

import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
 * Test running the OfflineImageViewer on a system with encryption zones.
 */
@Test(timeout = 60000)
public void testOfflineImageViewerOnEncryptionZones() throws Exception {
  final int len = 8196;
  final Path zoneParent = new Path("/zones");
  final Path zone1 = new Path(zoneParent, "zone1");
  final Path zone1File = new Path(zone1, "file");
  fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
  dfsAdmin.createEncryptionZone(zone1, TEST_KEY);
  DFSTestUtil.createFile(fs, zone1File, len, (short) 1, 0xFEED);
  fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
  fs.saveNamespace();

  File originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil
      .getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
  if (originalFsimage == null) {
    throw new RuntimeException("Didn't generate or can't find fsimage");
  }

  // Run the XML OIV processor
  ByteArrayOutputStream output = new ByteArrayOutputStream();
  PrintStream pw = new PrintStream(output);
  PBImageXmlWriter v = new PBImageXmlWriter(new Configuration(), pw);
  v.visit(new RandomAccessFile(originalFsimage, "r"));
  final String xml = output.toString();
  SAXParser parser = SAXParserFactory.newInstance().newSAXParser();
  parser.parse(new InputSource(new StringReader(xml)), new DefaultHandler());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestEncryptionZones.java

示例5: createOriginalFSImage

import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
 * Create a populated namespace for later testing. Save its contents to a data
 * structure and store its fsimage location. We only want to generate the
 * fsimage file once and use it for multiple tests.
 */
@BeforeClass
public static void createOriginalFSImage() throws IOException {
  MiniDFSCluster cluster = null;
  Configuration conf = new Configuration();

  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    DistributedFileSystem hdfs = cluster.getFileSystem();
    // Create a name space with XAttributes
    Path dir = new Path("/dir1");
    hdfs.mkdirs(dir);
    hdfs.setXAttr(dir, "user.attr1", "value1".getBytes());
    hdfs.setXAttr(dir, "user.attr2", "value2".getBytes());
    // Write results to the fsimage file
    hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
    hdfs.saveNamespace();

    List<XAttr> attributes = new ArrayList<XAttr>();
    attributes.add(XAttrHelper.buildXAttr("user.attr1", "value1".getBytes()));

    attr1JSon = JsonUtil.toJsonString(attributes, null);

    attributes.add(XAttrHelper.buildXAttr("user.attr2", "value2".getBytes()));

    // Determine the location of the fsimage file
    originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil
        .getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
    if (originalFsimage == null) {
      throw new RuntimeException("Didn't generate or can't find fsimage");
    }
    LOG.debug("original FS image file is " + originalFsimage);
  } finally {
    if (cluster != null)
      cluster.shutdown();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:43,代码来源:TestOfflineImageViewerForXAttr.java

示例6: testOfflineImageViewerOnEncryptionZones

import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
 * Test running the OfflineImageViewer on a system with encryption zones.
 */
@Test(timeout = 60000)
public void testOfflineImageViewerOnEncryptionZones() throws Exception {
  final int len = 8196;
  final Path zoneParent = new Path("/zones");
  final Path zone1 = new Path(zoneParent, "zone1");
  final Path zone1File = new Path(zone1, "file");
  fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
  dfsAdmin.createEncryptionZone(zone1, TEST_KEY);
  DFSTestUtil.createFile(fs, zone1File, len, (short) 1, 0xFEED);
  fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
  fs.saveNamespace();

  File originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil
      .getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
  if (originalFsimage == null) {
    throw new RuntimeException("Didn't generate or can't find fsimage");
  }

  // Run the XML OIV processor
  StringWriter output = new StringWriter();
  PrintWriter pw = new PrintWriter(output);
  PBImageXmlWriter v = new PBImageXmlWriter(new Configuration(), pw);
  v.visit(new RandomAccessFile(originalFsimage, "r"));
  final String xml = output.getBuffer().toString();
  SAXParser parser = SAXParserFactory.newInstance().newSAXParser();
  parser.parse(new InputSource(new StringReader(xml)), new DefaultHandler());
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:31,代码来源:TestEncryptionZones.java

示例7: testOfflineImageViewer

import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
 * Test if the OfflineImageViewer can correctly parse a fsimage containing
 * snapshots
 */
@Test
public void testOfflineImageViewer() throws Throwable {
  runTestSnapshot(SNAPSHOT_ITERATION_NUMBER);
  
  // retrieve the fsimage. Note that we already save namespace to fsimage at
  // the end of each iteration of runTestSnapshot.
  File originalFsimage = FSImageTestUtil.findLatestImageFile(
      FSImageTestUtil.getFSImage(
      cluster.getNameNode()).getStorage().getStorageDir(0));
  assertNotNull("Didn't generate or can't find fsimage", originalFsimage);
  
  String ROOT = System.getProperty("test.build.data", "build/test/data");
  File testFile = new File(ROOT, "/image");
  String xmlImage = ROOT + "/image_xml";
  boolean success = false;
  
  try {
    DFSTestUtil.copyFile(originalFsimage, testFile);
    XmlImageVisitor v = new XmlImageVisitor(xmlImage, true);
    OfflineImageViewer oiv = new OfflineImageViewer(testFile.getPath(), v,
        true);
    oiv.go();
    success = true;
  } finally {
    if (testFile.exists()) {
      testFile.delete();
    }
    // delete the xml file if the parsing is successful
    if (success) {
      File xmlImageFile = new File(xmlImage);
      if (xmlImageFile.exists()) {
        xmlImageFile.delete();
      }
    }
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:41,代码来源:TestSnapshot.java

示例8: createOriginalFSImage

import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
@BeforeClass
public static void createOriginalFSImage() throws IOException {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    conf.setLong(
        DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
    conf.setLong(
        DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
    conf.setBoolean(
        DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL,
        "RULE:[2:[email protected]$0]([email protected]*FOO.COM)s/@.*//" + "DEFAULT");
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    DistributedFileSystem hdfs = cluster.getFileSystem();

    // Create a reasonable namespace
    for (int i = 0; i < NUM_DIRS; i++) {
      Path dir = new Path("/dir" + i);
      hdfs.mkdirs(dir);
      writtenFiles.put(dir.toString(), pathToFileEntry(hdfs, dir.toString()));
      for (int j = 0; j < FILES_PER_DIR; j++) {
        Path file = new Path(dir, "file" + j);
        FSDataOutputStream o = hdfs.create(file);
        o.write(23);
        o.close();

        writtenFiles.put(file.toString(),
            pathToFileEntry(hdfs, file.toString()));
      }
    }

    // Create an empty directory
    Path emptydir = new Path("/emptydir");
    hdfs.mkdirs(emptydir);
    writtenFiles.put(emptydir.toString(), hdfs.getFileStatus(emptydir));

    //Create a directory whose name should be escaped in XML
    Path invalidXMLDir = new Path("/dirContainingInvalidXMLChar\u0000here");
    hdfs.mkdirs(invalidXMLDir);

    // Get delegation tokens so we log the delegation token op
    Token<?>[] delegationTokens = hdfs
        .addDelegationTokens(TEST_RENEWER, null);
    for (Token<?> t : delegationTokens) {
      LOG.debug("got token " + t);
    }

    final Path snapshot = new Path("/snapshot");
    hdfs.mkdirs(snapshot);
    hdfs.allowSnapshot(snapshot);
    hdfs.mkdirs(new Path("/snapshot/1"));
    hdfs.delete(snapshot, true);

    // Set XAttrs so the fsimage contains XAttr ops
    final Path xattr = new Path("/xattr");
    hdfs.mkdirs(xattr);
    hdfs.setXAttr(xattr, "user.a1", new byte[]{ 0x31, 0x32, 0x33 });
    hdfs.setXAttr(xattr, "user.a2", new byte[]{ 0x37, 0x38, 0x39 });
    // OIV should be able to handle empty value XAttrs
    hdfs.setXAttr(xattr, "user.a3", null);
    writtenFiles.put(xattr.toString(), hdfs.getFileStatus(xattr));

    // Write results to the fsimage file
    hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
    hdfs.saveNamespace();

    // Determine location of fsimage file
    originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil
        .getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
    if (originalFsimage == null) {
      throw new RuntimeException("Didn't generate or can't find fsimage");
    }
    LOG.debug("original FS image file is " + originalFsimage);
  } finally {
    if (cluster != null)
      cluster.shutdown();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:81,代码来源:TestOfflineImageViewer.java

示例9: createOriginalFSImage

import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
 * Create a populated namespace for later testing. Save its contents to a
 * data structure and store its fsimage location. We only want to generate
 * the fsimage file once and use it for multiple tests.
 */
@BeforeClass
public static void createOriginalFSImage() throws IOException {
  MiniDFSCluster cluster = null;
  Configuration conf = new Configuration();

  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    DistributedFileSystem hdfs = cluster.getFileSystem();
    Path parentDir = new Path("/parentDir");
    Path childDir1 = new Path(parentDir, "childDir1");
    Path childDir2 = new Path(parentDir, "childDir2");
    Path dirForLinks = new Path("/dirForLinks");
    hdfs.mkdirs(parentDir);
    hdfs.mkdirs(childDir1);
    hdfs.mkdirs(childDir2);
    hdfs.mkdirs(dirForLinks);
    hdfs.setQuota(parentDir, 10, 1024*1024*1024);

    Path file1OnParentDir = new Path(parentDir, "file1");
    try (FSDataOutputStream o = hdfs.create(file1OnParentDir)) {
      o.write("123".getBytes());
    }
    try (FSDataOutputStream o = hdfs.create(new Path(parentDir, "file2"))) {
      o.write("1234".getBytes());
    }
    try (FSDataOutputStream o = hdfs.create(new Path(childDir1, "file3"))) {
      o.write("123".getBytes());
    }
    try (FSDataOutputStream o = hdfs.create(new Path(parentDir, "file4"))) {
      o.write("123".getBytes());
    }
    Path link1 = new Path("/link1");
    Path link2 = new Path("/dirForLinks/linkfordir1");
    hdfs.createSymlink(new Path("/parentDir/file4"), link1, true);
    summaryFromDFS = hdfs.getContentSummary(parentDir);
    emptyDirSummaryFromDFS = hdfs.getContentSummary(childDir2);
    fileSummaryFromDFS = hdfs.getContentSummary(file1OnParentDir);
    symLinkSummaryFromDFS = hdfs.getContentSummary(link1);
    hdfs.createSymlink(childDir1, link2, true);
    symLinkSummaryForDirContainsFromDFS = hdfs.getContentSummary(new Path(
        "/dirForLinks"));
    // Write results to the fsimage file
    hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
    hdfs.saveNamespace();
    // Determine the location of the fsimage file
    originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil
        .getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
    if (originalFsimage == null) {
      throw new RuntimeException("Didn't generate or can't find fsimage");
    }
    LOG.debug("original FS image file is " + originalFsimage);
  } finally {
    if (cluster != null)
      cluster.shutdown();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:63,代码来源:TestOfflineImageViewerForContentSummary.java

示例10: createOriginalFSImage

import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
/**
 * Create a populated namespace for later testing. Save its contents to a
 * data structure and store its fsimage location.
 * We only want to generate the fsimage file once and use it for
 * multiple tests.
 */
@BeforeClass
public static void createOriginalFSImage() throws IOException {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    DistributedFileSystem hdfs = cluster.getFileSystem();

    // Create a reasonable namespace with ACLs
    Path dir = new Path("/dirWithNoAcl");
    hdfs.mkdirs(dir);
    writtenAcls.put(dir.toString(), hdfs.getAclStatus(dir));

    dir = new Path("/dirWithDefaultAcl");
    hdfs.mkdirs(dir);
    hdfs.setAcl(dir, Lists.newArrayList(
        aclEntry(DEFAULT, USER, ALL),
        aclEntry(DEFAULT, USER, "foo", ALL),
        aclEntry(DEFAULT, GROUP, READ_EXECUTE),
        aclEntry(DEFAULT, OTHER, NONE)));
    writtenAcls.put(dir.toString(), hdfs.getAclStatus(dir));

    Path file = new Path("/noAcl");
    FSDataOutputStream o = hdfs.create(file);
    o.write(23);
    o.close();
    writtenAcls.put(file.toString(), hdfs.getAclStatus(file));

    file = new Path("/withAcl");
    o = hdfs.create(file);
    o.write(23);
    o.close();
    hdfs.setAcl(file, Lists.newArrayList(
        aclEntry(ACCESS, USER, READ_WRITE),
        aclEntry(ACCESS, USER, "foo", READ),
        aclEntry(ACCESS, GROUP, READ),
        aclEntry(ACCESS, OTHER, NONE)));
    writtenAcls.put(file.toString(), hdfs.getAclStatus(file));

    file = new Path("/withSeveralAcls");
    o = hdfs.create(file);
    o.write(23);
    o.close();
    hdfs.setAcl(file, Lists.newArrayList(
        aclEntry(ACCESS, USER, READ_WRITE),
        aclEntry(ACCESS, USER, "foo", READ_WRITE),
        aclEntry(ACCESS, USER, "bar", READ),
        aclEntry(ACCESS, GROUP, READ),
        aclEntry(ACCESS, GROUP, "group", READ),
        aclEntry(ACCESS, OTHER, NONE)));
    writtenAcls.put(file.toString(), hdfs.getAclStatus(file));

    // Write results to the fsimage file
    hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
    hdfs.saveNamespace();

    // Determine the location of the fsimage file
    originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil
        .getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
    if (originalFsimage == null) {
      throw new RuntimeException("Didn't generate or can't find fsimage");
    }
    LOG.debug("original FS image file is " + originalFsimage);
  } finally {
    if (cluster != null)
      cluster.shutdown();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:77,代码来源:TestOfflineImageViewerForAcl.java

示例11: testFileSize

import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
private void testFileSize(int numBytes) throws IOException,
    UnresolvedLinkException, SnapshotAccessControlException {
  fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  File orgFsimage = null;
  Path file = new Path("/eczone/striped");
  FSDataOutputStream out = fs.create(file, true);
  byte[] bytes = DFSTestUtil.generateSequentialBytes(0, numBytes);
  out.write(bytes);
  out.close();

  // Write results to the fsimage file
  fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
  fs.saveNamespace();

  // Determine location of fsimage file
  orgFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil
      .getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
  if (orgFsimage == null) {
    throw new RuntimeException("Didn't generate or can't find fsimage");
  }
  FSImageLoader loader = FSImageLoader.load(orgFsimage.getAbsolutePath());
  String fileStatus = loader.getFileStatus("/eczone/striped");
  long expectedFileSize = bytes.length;

  // Verify space consumed present in BlockInfoStriped
  FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
  INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
  assertTrue("Invalid block size", fileNode.getBlocks().length > 0);
  long actualFileSize = 0;
  for (BlockInfo blockInfo : fileNode.getBlocks()) {
    assertTrue("Didn't find block striped information",
        blockInfo instanceof BlockInfoStriped);
    actualFileSize += blockInfo.getNumBytes();
  }

  assertEquals("Wrongly computed file size contains striped blocks",
      expectedFileSize, actualFileSize);

  // Verify space consumed present in filestatus
  String EXPECTED_FILE_SIZE = "\"length\":"
      + String.valueOf(expectedFileSize);
  assertTrue(
      "Wrongly computed file size contains striped blocks, file status:"
          + fileStatus + ". Expected file size is : " + EXPECTED_FILE_SIZE,
      fileStatus.contains(EXPECTED_FILE_SIZE));
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:47,代码来源:TestOfflineImageViewerWithStripedBlocks.java

示例12: createOriginalFSImage

import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
@BeforeClass
public static void createOriginalFSImage() throws IOException {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL,
        "RULE:[2:[email protected]$0]([email protected]*FOO.COM)s/@.*//" + "DEFAULT");
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    cluster.waitActive();
    FileSystem hdfs = cluster.getFileSystem();
    
    int filesize = 256;
    
    // Create a reasonable namespace 
    for(int i = 0; i < NUM_DIRS; i++)  {
      Path dir = new Path("/dir" + i);
      hdfs.mkdirs(dir);
      writtenFiles.put(dir.toString(), pathToFileEntry(hdfs, dir.toString()));
      for(int j = 0; j < FILES_PER_DIR; j++) {
        Path file = new Path(dir, "file" + j);
        FSDataOutputStream o = hdfs.create(file);
        o.write(new byte[ filesize++ ]);
        o.close();
        
        writtenFiles.put(file.toString(), pathToFileEntry(hdfs, file.toString()));
      }
    }

    // Get delegation tokens so we log the delegation token op
    Token<?>[] delegationTokens = 
        hdfs.addDelegationTokens(TEST_RENEWER, null);
    for (Token<?> t : delegationTokens) {
      LOG.debug("got token " + t);
    }

    // Write results to the fsimage file
    cluster.getNameNodeRpc()
        .setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
    cluster.getNameNodeRpc().saveNamespace();
    
    // Determine location of fsimage file
    originalFsimage = FSImageTestUtil.findLatestImageFile(
        FSImageTestUtil.getFSImage(
        cluster.getNameNode()).getStorage().getStorageDir(0));
    if (originalFsimage == null) {
      throw new RuntimeException("Didn't generate or can't find fsimage");
    }
    LOG.debug("original FS image file is " + originalFsimage);
  } finally {
    if(cluster != null)
      cluster.shutdown();
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:57,代码来源:TestOfflineImageViewer.java

示例13: createOriginalFSImage

import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; //导入方法依赖的package包/类
@BeforeClass
public static void createOriginalFSImage() throws IOException {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    conf.setLong(
        DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
    conf.setLong(
        DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
    conf.setBoolean(
        DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL,
        "RULE:[2:[email protected]$0]([email protected]*FOO.COM)s/@.*//" + "DEFAULT");
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    DistributedFileSystem hdfs = cluster.getFileSystem();

    // Create a reasonable namespace
    for (int i = 0; i < NUM_DIRS; i++) {
      Path dir = new Path("/dir" + i);
      hdfs.mkdirs(dir);
      writtenFiles.put(dir.toString(), pathToFileEntry(hdfs, dir.toString()));
      for (int j = 0; j < FILES_PER_DIR; j++) {
        Path file = new Path(dir, "file" + j);
        FSDataOutputStream o = hdfs.create(file);
        o.write(23);
        o.close();

        writtenFiles.put(file.toString(),
            pathToFileEntry(hdfs, file.toString()));
      }
    }

    // Get delegation tokens so we log the delegation token op
    Token<?>[] delegationTokens = hdfs
        .addDelegationTokens(TEST_RENEWER, null);
    for (Token<?> t : delegationTokens) {
      LOG.debug("got token " + t);
    }

    final Path snapshot = new Path("/snapshot");
    hdfs.mkdirs(snapshot);
    hdfs.allowSnapshot(snapshot);
    hdfs.mkdirs(new Path("/snapshot/1"));
    hdfs.delete(snapshot, true);

    // Write results to the fsimage file
    hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
    hdfs.saveNamespace();

    // Determine location of fsimage file
    originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil
        .getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
    if (originalFsimage == null) {
      throw new RuntimeException("Didn't generate or can't find fsimage");
    }
    LOG.debug("original FS image file is " + originalFsimage);
  } finally {
    if (cluster != null)
      cluster.shutdown();
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:63,代码来源:TestOfflineImageViewer.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.findLatestImageFile方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。