当前位置: 首页>>代码示例>>Java>>正文


Java NamenodeWebHdfsMethods类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods的典型用法代码示例。如果您正苦于以下问题:Java NamenodeWebHdfsMethods类的具体用法?Java NamenodeWebHdfsMethods怎么用?Java NamenodeWebHdfsMethods使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


NamenodeWebHdfsMethods类属于org.apache.hadoop.hdfs.server.namenode.web.resources包,在下文中一共展示了NamenodeWebHdfsMethods类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: initWebHdfs

import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; //导入依赖的package包/类
private void initWebHdfs(Configuration conf) throws IOException {
  if (WebHdfsFileSystem.isEnabled(conf, HttpServer2.LOG)) {
    // set user pattern based on configuration file
    UserParam.setUserPattern(conf.get(
        DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
        DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));

    // add authentication filter for webhdfs
    final String className = conf.get(
        DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY,
        DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT);
    final String name = className;

    final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
    Map<String, String> params = getAuthFilterParams(conf);
    HttpServer2.defineFilter(httpServer.getWebAppContext(), name, className,
        params, new String[] { pathSpec });
    HttpServer2.LOG.info("Added filter '" + name + "' (class=" + className
        + ")");

    // add webhdfs packages
    httpServer.addJerseyResourcePackage(NamenodeWebHdfsMethods.class
        .getPackage().getName() + ";" + Param.class.getPackage().getName(),
        pathSpec);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:NameNodeHttpServer.java

示例2: initWebHdfs

import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; //导入依赖的package包/类
private void initWebHdfs(Configuration conf) throws IOException {
  // set user pattern based on configuration file
  UserParam.setUserPattern(conf.get(
      HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
      HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));

  // add authentication filter for webhdfs
  final String className = conf.get(
      DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY,
      DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT);
  final String name = className;

  final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
  Map<String, String> params = getAuthFilterParams(conf);
  HttpServer2.defineFilter(httpServer.getWebAppContext(), name, className,
      params, new String[] { pathSpec });
  HttpServer2.LOG.info("Added filter '" + name + "' (class=" + className
      + ")");

  // add webhdfs packages
  httpServer.addJerseyResourcePackage(NamenodeWebHdfsMethods.class
      .getPackage().getName() + ";" + Param.class.getPackage().getName(),
      pathSpec);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:25,代码来源:NameNodeHttpServer.java

示例3: getRemoteIp

import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; //导入依赖的package包/类
private static InetAddress getRemoteIp() {
  InetAddress ip = Server.getRemoteIp();
  if (ip != null) {
    return ip;
  }
  return NamenodeWebHdfsMethods.getRemoteIp();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:FSNamesystem.java

示例4: getClientMachine

import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; //导入依赖的package包/类
private static String getClientMachine() {
  String clientMachine = NamenodeWebHdfsMethods.getRemoteAddress();
  if (clientMachine == null) { //not a web client
    clientMachine = Server.getRemoteAddress();
  }
  if (clientMachine == null) { //not a RPC client
    clientMachine = "";
  }
  return clientMachine;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:NameNodeRpcServer.java

示例5: testNamenodeRestart

import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; //导入依赖的package包/类
/** Test client retry with namenode restarting. */
@Test(timeout=300000)
public void testNamenodeRestart() throws Exception {
  ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
  final Configuration conf = WebHdfsTestUtil.createConf();
  TestDFSClientRetries.namenodeRestartTest(conf, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:TestWebHDFS.java

示例6: testDelegationTokenWebHdfsApi

import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; //导入依赖的package包/类
@SuppressWarnings("deprecation")
@Test
public void testDelegationTokenWebHdfsApi() throws Exception {
  ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
  final String uri = WebHdfsFileSystem.SCHEME  + "://"
      + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
  //get file system as JobTracker
  final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
      "JobTracker", new String[]{"user"});
  final WebHdfsFileSystem webhdfs = ugi.doAs(
      new PrivilegedExceptionAction<WebHdfsFileSystem>() {
    @Override
    public WebHdfsFileSystem run() throws Exception {
      return (WebHdfsFileSystem)FileSystem.get(new URI(uri), config);
    }
  });

  { //test addDelegationTokens(..)
    Credentials creds = new Credentials();
    final Token<?> tokens[] = webhdfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(1, tokens.length);
    Assert.assertEquals(1, creds.numberOfTokens());
    Assert.assertSame(tokens[0], creds.getAllTokens().iterator().next());
    checkTokenIdentifier(ugi, tokens[0]);
    final Token<?> tokens2[] = webhdfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(0, tokens2.length);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestDelegationToken.java

示例7: testNamenodeRestart

import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; //导入依赖的package包/类
/** Test client retry with namenode restarting. */
@Test(timeout=300000)
public void testNamenodeRestart() throws Exception {
  GenericTestUtils.setLogLevel(NamenodeWebHdfsMethods.LOG, Level.ALL);
  final Configuration conf = WebHdfsTestUtil.createConf();
  TestDFSClientRetries.namenodeRestartTest(conf, true);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:8,代码来源:TestWebHDFS.java

示例8: testDelegationTokenWebHdfsApi

import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; //导入依赖的package包/类
@Test
public void testDelegationTokenWebHdfsApi() throws Exception {
  GenericTestUtils.setLogLevel(NamenodeWebHdfsMethods.LOG, Level.ALL);
  final String uri = WebHdfsConstants.WEBHDFS_SCHEME + "://"
      + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
  //get file system as JobTracker
  final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
      "JobTracker", new String[]{"user"});
  final WebHdfsFileSystem webhdfs = ugi.doAs(
      new PrivilegedExceptionAction<WebHdfsFileSystem>() {
    @Override
    public WebHdfsFileSystem run() throws Exception {
      return (WebHdfsFileSystem)FileSystem.get(new URI(uri), config);
    }
  });

  { //test addDelegationTokens(..)
    Credentials creds = new Credentials();
    final Token<?> tokens[] = webhdfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(1, tokens.length);
    Assert.assertEquals(1, creds.numberOfTokens());
    Assert.assertSame(tokens[0], creds.getAllTokens().iterator().next());
    checkTokenIdentifier(ugi, tokens[0]);
    final Token<?> tokens2[] = webhdfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(0, tokens2.length);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:28,代码来源:TestDelegationToken.java

示例9: setLogLevel

import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; //导入依赖的package包/类
static private void setLogLevel() {
  ((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
  ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);

  ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.OFF);
  ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.OFF);
  ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:9,代码来源:TestWebHdfsWithMultipleNameNodes.java

示例10: setLogLevel

import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; //导入依赖的package包/类
static private void setLogLevel() {
  ((Log4JLogger) LOG).getLogger().setLevel(Level.ALL);
  ((Log4JLogger) NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);

  ((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.OFF);
  ((Log4JLogger) LeaseManager.LOG).getLogger().setLevel(Level.OFF);
  ((Log4JLogger) LogFactory.getLog(FSNamesystem.class)).getLogger()
      .setLevel(Level.OFF);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:10,代码来源:TestWebHdfsWithMultipleNameNodes.java

示例11: testNamenodeRestart

import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; //导入依赖的package包/类
/**
 * Test client retry with namenode restarting.
 */
@Test(timeout = 900000)
public void testNamenodeRestart() throws Exception {
  ((Log4JLogger) NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
  final Configuration conf = WebHdfsTestUtil.createConf();
  TestDFSClientRetries.namenodeRestartTest(conf, true);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:10,代码来源:TestWebHDFS.java

示例12: testDelegationTokenWebHdfsApi

import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; //导入依赖的package包/类
@SuppressWarnings("deprecation")
@Test
public void testDelegationTokenWebHdfsApi() throws Exception {
  ((Log4JLogger) NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
  final String uri = WebHdfsFileSystem.SCHEME + "://" +
      config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
  //get file system as JobTracker
  final UserGroupInformation ugi = UserGroupInformation
      .createUserForTesting("JobTracker", new String[]{"user"});
  final WebHdfsFileSystem webhdfs =
      ugi.doAs(new PrivilegedExceptionAction<WebHdfsFileSystem>() {
            @Override
            public WebHdfsFileSystem run() throws Exception {
              return (WebHdfsFileSystem) FileSystem.get(new URI(uri), config);
            }
          });

  { //test addDelegationTokens(..)
    Credentials creds = new Credentials();
    final Token<?> tokens[] =
        webhdfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(1, tokens.length);
    Assert.assertEquals(1, creds.numberOfTokens());
    Assert.assertSame(tokens[0], creds.getAllTokens().iterator().next());
    checkTokenIdentifier(ugi, tokens[0]);
    final Token<?> tokens2[] =
        webhdfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(0, tokens2.length);
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:31,代码来源:TestDelegationToken.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。