当前位置: 首页>>代码示例>>Java>>正文


Java TestHdfsHelper类代码示例

本文整理汇总了Java中org.apache.hadoop.test.TestHdfsHelper的典型用法代码示例。如果您正苦于以下问题:Java TestHdfsHelper类的具体用法?Java TestHdfsHelper怎么用?Java TestHdfsHelper使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


TestHdfsHelper类属于org.apache.hadoop.test包,在下文中一共展示了TestHdfsHelper类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testGlobFilter

import org.apache.hadoop.test.TestHdfsHelper; //导入依赖的package包/类
@Test
@TestDir
@TestJetty
@TestHdfs
public void testGlobFilter() throws Exception {
  createHttpFSServer(false);

  FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
  fs.mkdirs(new Path("/tmp"));
  fs.create(new Path("/tmp/foo.txt")).close();

  String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
  URL url = new URL(TestJettyHelper.getJettyURL(),
                    MessageFormat.format("/webhdfs/v1/tmp?user.name={0}&op=liststatus&filter=f*", user));
  HttpURLConnection conn = (HttpURLConnection) url.openConnection();
  Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
  BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
  reader.readLine();
  reader.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestHttpFSServer.java

示例2: testOpenOffsetLength

import org.apache.hadoop.test.TestHdfsHelper; //导入依赖的package包/类
@Test
@TestDir
@TestJetty
@TestHdfs
public void testOpenOffsetLength() throws Exception {
  createHttpFSServer(false);

  byte[] array = new byte[]{0, 1, 2, 3};
  FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
  fs.mkdirs(new Path("/tmp"));
  OutputStream os = fs.create(new Path("/tmp/foo"));
  os.write(array);
  os.close();

  String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
  URL url = new URL(TestJettyHelper.getJettyURL(),
                    MessageFormat.format("/webhdfs/v1/tmp/foo?user.name={0}&op=open&offset=1&length=2", user));
  HttpURLConnection conn = (HttpURLConnection) url.openConnection();
  Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
  InputStream is = conn.getInputStream();
  Assert.assertEquals(1, is.read());
  Assert.assertEquals(2, is.read());
  Assert.assertEquals(-1, is.read());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestHttpFSServer.java

示例3: testGlobFilter

import org.apache.hadoop.test.TestHdfsHelper; //导入依赖的package包/类
@Test
@TestDir
@TestJetty
@TestHdfs
public void testGlobFilter() throws Exception {
  createHttpFSServer(false);

  FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
  fs.mkdirs(new Path("/tmp"));
  fs.create(new Path("/tmp/foo.txt")).close();

  String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
  URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat
      .format("/webhdfs/v1/tmp?user.name={0}&op=liststatus&filter=f*", user));
  HttpURLConnection conn = (HttpURLConnection) url.openConnection();
  Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
  BufferedReader reader =
      new BufferedReader(new InputStreamReader(conn.getInputStream()));
  reader.readLine();
  reader.close();
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:22,代码来源:TestHttpFSServer.java

示例4: testOpenOffsetLength

import org.apache.hadoop.test.TestHdfsHelper; //导入依赖的package包/类
@Test
@TestDir
@TestJetty
@TestHdfs
public void testOpenOffsetLength() throws Exception {
  createHttpFSServer(false);

  byte[] array = new byte[]{0, 1, 2, 3};
  FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
  fs.mkdirs(new Path("/tmp"));
  OutputStream os = fs.create(new Path("/tmp/foo"));
  os.write(array);
  os.close();

  String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
  URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat
      .format("/webhdfs/v1/tmp/foo?user.name={0}&op=open&offset=1&length=2",
          user));
  HttpURLConnection conn = (HttpURLConnection) url.openConnection();
  Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
  InputStream is = conn.getInputStream();
  Assert.assertEquals(1, is.read());
  Assert.assertEquals(2, is.read());
  Assert.assertEquals(-1, is.read());
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:26,代码来源:TestHttpFSServer.java

示例5: testPerms

import org.apache.hadoop.test.TestHdfsHelper; //导入依赖的package包/类
/**
 * Validate that files are created with 755 permissions when no
 * 'permissions' attribute is specified, and when 'permissions'
 * is specified, that value is honored.
 */
@Test
@TestDir
@TestJetty
@TestHdfs
public void testPerms() throws Exception {
  createHttpFSServer(false);

  FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
  fs.mkdirs(new Path("/perm"));

  createWithHttp("/perm/none", null);
  String statusJson = getStatus("/perm/none", "GETFILESTATUS");
  Assert.assertTrue("755".equals(getPerms(statusJson)));

  createWithHttp("/perm/p-777", "777");
  statusJson = getStatus("/perm/p-777", "GETFILESTATUS");
  Assert.assertTrue("777".equals(getPerms(statusJson)));

  createWithHttp("/perm/p-654", "654");
  statusJson = getStatus("/perm/p-654", "GETFILESTATUS");
  Assert.assertTrue("654".equals(getPerms(statusJson)));

  createWithHttp("/perm/p-321", "321");
  statusJson = getStatus("/perm/p-321", "GETFILESTATUS");
  Assert.assertTrue("321".equals(getPerms(statusJson)));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:TestHttpFSServer.java

示例6: fileSystemExecutorNoNameNode

import org.apache.hadoop.test.TestHdfsHelper; //导入依赖的package包/类
@Test
@TestException(exception = FileSystemAccessException.class, msgRegExp = "H06.*")
@TestDir
@TestHdfs
public void fileSystemExecutorNoNameNode() throws Exception {
  String dir = TestDirHelper.getTestDir().getAbsolutePath();
  String services = StringUtils.join(",",
    Arrays.asList(InstrumentationService.class.getName(),
                  SchedulerService.class.getName(),
                  FileSystemAccessService.class.getName()));
  Configuration hadoopConf = new Configuration(false);
  hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
  createHadoopConf(hadoopConf);

  Configuration conf = new Configuration(false);
  conf.set("server.services", services);
  Server server = new Server("server", dir, dir, dir, dir, conf);
  server.init();
  FileSystemAccess fsAccess = server.get(FileSystemAccess.class);

  Configuration hdfsConf = fsAccess.getFileSystemConfiguration();
  hdfsConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, "");
  fsAccess.execute("u", hdfsConf, new FileSystemAccess.FileSystemExecutor<Void>() {
    @Override
    public Void execute(FileSystem fs) throws IOException {
      return null;
    }
  });
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:TestFileSystemAccessService.java

示例7: fileSystemExecutorNoNameNode

import org.apache.hadoop.test.TestHdfsHelper; //导入依赖的package包/类
@Test
@TestException(exception = FileSystemAccessException.class,
    msgRegExp = "H06.*")
@TestDir
@TestHdfs
public void fileSystemExecutorNoNameNode() throws Exception {
  String dir = TestDirHelper.getTestDir().getAbsolutePath();
  String services = StringUtils.join(",", Arrays
          .asList(InstrumentationService.class.getName(),
              SchedulerService.class.getName(),
              FileSystemAccessService.class.getName()));
  Configuration hadoopConf = new Configuration(false);
  hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
      TestHdfsHelper.getHdfsConf()
          .get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
  createHadoopConf(hadoopConf);

  Configuration conf = new Configuration(false);
  conf.set("server.services", services);
  Server server = new Server("server", dir, dir, dir, dir, conf);
  server.init();
  FileSystemAccess fsAccess = server.get(FileSystemAccess.class);

  Configuration hdfsConf = fsAccess.getFileSystemConfiguration();
  hdfsConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, "");
  fsAccess.execute("u", hdfsConf,
      new FileSystemAccess.FileSystemExecutor<Void>() {
        @Override
        public Void execute(FileSystem fs) throws IOException {
          return null;
        }
      });
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:34,代码来源:TestFileSystemAccessService.java

示例8: getProxiedFSTestDir

import org.apache.hadoop.test.TestHdfsHelper; //导入依赖的package包/类
@Override
protected Path getProxiedFSTestDir() {
  return TestHdfsHelper.getHdfsTestDir();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:5,代码来源:TestHttpFSWithHttpFSFileSystem.java

示例9: getProxiedFSURI

import org.apache.hadoop.test.TestHdfsHelper; //导入依赖的package包/类
@Override
protected String getProxiedFSURI() {
  return TestHdfsHelper.getHdfsConf().get(
    CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:TestHttpFSWithHttpFSFileSystem.java

示例10: getProxiedFSConf

import org.apache.hadoop.test.TestHdfsHelper; //导入依赖的package包/类
@Override
protected Configuration getProxiedFSConf() {
  return TestHdfsHelper.getHdfsConf();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:5,代码来源:TestHttpFSWithHttpFSFileSystem.java

示例11: testXAttrs

import org.apache.hadoop.test.TestHdfsHelper; //导入依赖的package包/类
/**
 * Validate XAttr get/set/remove calls.
 */
@Test
@TestDir
@TestJetty
@TestHdfs
public void testXAttrs() throws Exception {
  final String name1 = "user.a1";
  final byte[] value1 = new byte[]{0x31, 0x32, 0x33};
  final String name2 = "user.a2";
  final byte[] value2 = new byte[]{0x41, 0x42, 0x43};
  final String dir = "/xattrTest";
  final String path = dir + "/file";
  
  createHttpFSServer(false);
  
  FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
  fs.mkdirs(new Path(dir));
  
  createWithHttp(path,null);
  String statusJson = getStatus(path, "GETXATTRS");
  Map<String, byte[]> xAttrs = getXAttrs(statusJson);
  Assert.assertEquals(0, xAttrs.size());
  
  // Set two xattrs
  putCmd(path, "SETXATTR", setXAttrParam(name1, value1));
  putCmd(path, "SETXATTR", setXAttrParam(name2, value2));
  statusJson = getStatus(path, "GETXATTRS");
  xAttrs = getXAttrs(statusJson);
  Assert.assertEquals(2, xAttrs.size());
  Assert.assertArrayEquals(value1, xAttrs.get(name1));
  Assert.assertArrayEquals(value2, xAttrs.get(name2));
  
  // Remove one xattr
  putCmd(path, "REMOVEXATTR", "xattr.name=" + name1);
  statusJson = getStatus(path, "GETXATTRS");
  xAttrs = getXAttrs(statusJson);
  Assert.assertEquals(1, xAttrs.size());
  Assert.assertArrayEquals(value2, xAttrs.get(name2));
  
  // Remove another xattr, then there is no xattr
  putCmd(path, "REMOVEXATTR", "xattr.name=" + name2);
  statusJson = getStatus(path, "GETXATTRS");
  xAttrs = getXAttrs(statusJson);
  Assert.assertEquals(0, xAttrs.size());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:48,代码来源:TestHttpFSServer.java

示例12: testDirAcls

import org.apache.hadoop.test.TestHdfsHelper; //导入依赖的package包/类
/**
 * Test ACL operations on a directory, including default ACLs.
 * General strategy is to use GETFILESTATUS and GETACLSTATUS to verify:
 * <ol>
 *   <li>Initial status with no ACLs</li>
 *   <li>The addition of a default ACL</li>
 *   <li>The removal of default ACLs</li>
 * </ol>
 *
 * @throws Exception
 */
@Test
@TestDir
@TestJetty
@TestHdfs
public void testDirAcls() throws Exception {
  final String defUser1 = "default:user:glarch:r-x";
  final String defSpec1 = "aclspec=" + defUser1;
  final String dir = "/aclDirTest";
  String statusJson;
  List<String> aclEntries;

  createHttpFSServer(false);

  FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
  fs.mkdirs(new Path(dir));

  /* getfilestatus and liststatus don't have 'aclBit' in their reply */
  statusJson = getStatus(dir, "GETFILESTATUS");
  Assert.assertEquals(-1, statusJson.indexOf("aclBit"));

  /* No ACLs, either */
  statusJson = getStatus(dir, "GETACLSTATUS");
  aclEntries = getAclEntries(statusJson);
  Assert.assertTrue(aclEntries.size() == 0);

  /* Give it a default ACL and verify */
  putCmd(dir, "SETACL", defSpec1);
  statusJson = getStatus(dir, "GETFILESTATUS");
  Assert.assertNotEquals(-1, statusJson.indexOf("aclBit"));
  statusJson = getStatus(dir, "GETACLSTATUS");
  aclEntries = getAclEntries(statusJson);
  Assert.assertTrue(aclEntries.size() == 5);
  /* 4 Entries are default:(user|group|mask|other):perm */
  Assert.assertTrue(aclEntries.contains(defUser1));

  /* Remove the default ACL and re-verify */
  putCmd(dir, "REMOVEDEFAULTACL", null);
  statusJson = getStatus(dir, "GETFILESTATUS");
  Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
  statusJson = getStatus(dir, "GETACLSTATUS");
  aclEntries = getAclEntries(statusJson);
  Assert.assertTrue(aclEntries.size() == 0);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:55,代码来源:TestHttpFSServer.java

示例13: createHttpFSServer

import org.apache.hadoop.test.TestHdfsHelper; //导入依赖的package包/类
private void createHttpFSServer() throws Exception {
  File homeDir = TestDirHelper.getTestDir();
  Assert.assertTrue(new File(homeDir, "conf").mkdir());
  Assert.assertTrue(new File(homeDir, "log").mkdir());
  Assert.assertTrue(new File(homeDir, "temp").mkdir());
  HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath());

  File secretFile = new File(new File(homeDir, "conf"), "secret");
  Writer w = new FileWriter(secretFile);
  w.write("secret");
  w.close();

  //HDFS configuration
  File hadoopConfDir = new File(new File(homeDir, "conf"), "hadoop-conf");
  hadoopConfDir.mkdirs();
  String fsDefaultName = TestHdfsHelper.getHdfsConf()
    .get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
  Configuration conf = new Configuration(false);
  conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
  File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
  OutputStream os = new FileOutputStream(hdfsSite);
  conf.writeXml(os);
  os.close();

  conf = new Configuration(false);
  conf.set("httpfs.proxyuser.client.hosts", "*");
  conf.set("httpfs.proxyuser.client.groups", "*");

  conf.set("httpfs.authentication.type", "kerberos");

  conf.set("httpfs.authentication.signature.secret.file",
           secretFile.getAbsolutePath());
  File httpfsSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
  os = new FileOutputStream(httpfsSite);
  conf.writeXml(os);
  os.close();

  ClassLoader cl = Thread.currentThread().getContextClassLoader();
  URL url = cl.getResource("webapp");
  WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs");
  Server server = TestJettyHelper.getJettyServer();
  server.addHandler(context);
  server.start();
  HttpFSServerWebApp.get().setAuthority(TestJettyHelper.getAuthority());
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:46,代码来源:TestHttpFSWithKerberos.java


注:本文中的org.apache.hadoop.test.TestHdfsHelper类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。