当前位置: 首页>>代码示例>>Java>>正文


Java SWebHdfsFileSystem类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.web.SWebHdfsFileSystem的典型用法代码示例。如果您正苦于以下问题:Java SWebHdfsFileSystem类的具体用法?Java SWebHdfsFileSystem怎么用?Java SWebHdfsFileSystem使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


SWebHdfsFileSystem类属于org.apache.hadoop.hdfs.web包,在下文中一共展示了SWebHdfsFileSystem类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: init

import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem; //导入依赖的package包/类
/**
 * Initializes the service.
 *
 * @throws ServiceException thrown if the service could not be initialized.
 */
@Override
protected void init() throws ServiceException {

  long updateInterval = getServiceConfig().getLong(UPDATE_INTERVAL, DAY);
  long maxLifetime = getServiceConfig().getLong(MAX_LIFETIME, 7 * DAY);
  long renewInterval = getServiceConfig().getLong(RENEW_INTERVAL, DAY);
  tokenKind = (HttpFSServerWebApp.get().isSslEnabled())
              ? SWebHdfsFileSystem.TOKEN_KIND : WebHdfsFileSystem.TOKEN_KIND;
  secretManager = new DelegationTokenSecretManager(tokenKind, updateInterval,
                                                   maxLifetime,
                                                   renewInterval, HOUR);
  try {
    secretManager.startThreads();
  } catch (IOException ex) {
    throw new ServiceException(ServiceException.ERROR.S12,
                               DelegationTokenManager.class.getSimpleName(),
                               ex.toString(), ex);
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:25,代码来源:DelegationTokenManagerService.java

示例2: getHaNnWebHdfsAddresses

import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem; //导入依赖的package包/类
/**
 * Returns list of InetSocketAddress corresponding to HA NN HTTP addresses from
 * the configuration.
 *
 * @return list of InetSocketAddresses
 */
public static Map<String, Map<String, InetSocketAddress>> getHaNnWebHdfsAddresses(
    Configuration conf, String scheme) {
  if (WebHdfsFileSystem.SCHEME.equals(scheme)) {
    return getAddresses(conf, null,
        DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
  } else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) {
    return getAddresses(conf, null,
        DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY);
  } else {
    throw new IllegalArgumentException("Unsupported scheme: " + scheme);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:DFSUtil.java

示例3: generateDelegationToken

import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem; //导入依赖的package包/类
private Token<? extends TokenIdentifier> generateDelegationToken(
    final NameNode namenode, final UserGroupInformation ugi,
    final String renewer) throws IOException {
  final Credentials c = DelegationTokenSecretManager.createCredentials(
      namenode, ugi, renewer != null? renewer: ugi.getShortUserName());
  if (c == null) {
    return null;
  }
  final Token<? extends TokenIdentifier> t = c.getAllTokens().iterator().next();
  Text kind = request.getScheme().equals("http") ? WebHdfsFileSystem.TOKEN_KIND
      : SWebHdfsFileSystem.TOKEN_KIND;
  t.setKind(kind);
  return t;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:NamenodeWebHdfsMethods.java

示例4: testSWebHdfsCustomDefaultPorts

import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem; //导入依赖的package包/类
@Test
public void testSWebHdfsCustomDefaultPorts() throws IOException {
  URI uri = URI.create("swebhdfs://localhost");
  SWebHdfsFileSystem fs = (SWebHdfsFileSystem) FileSystem.get(uri, conf);

  assertEquals(456, fs.getDefaultPort());
  assertEquals(uri, fs.getUri());
  assertEquals("127.0.0.1:456", fs.getCanonicalServiceName());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:TestHttpFSPorts.java

示例5: testSwebHdfsCustomUriPortWithCustomDefaultPorts

import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem; //导入依赖的package包/类
@Test
public void testSwebHdfsCustomUriPortWithCustomDefaultPorts() throws IOException {
  URI uri = URI.create("swebhdfs://localhost:789");
  SWebHdfsFileSystem fs = (SWebHdfsFileSystem) FileSystem.get(uri, conf);

  assertEquals(456, fs.getDefaultPort());
  assertEquals(uri, fs.getUri());
  assertEquals("127.0.0.1:789", fs.getCanonicalServiceName());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:TestHttpFSPorts.java

示例6: createSWebHdfsFileSystem

import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem; //导入依赖的package包/类
/**
 * Returns a new {@link SWebHdfsFileSystem}, with the given configuration.
 *
 * @param conf configuration
 * @return new SWebHdfsFileSystem
 */
private static SWebHdfsFileSystem createSWebHdfsFileSystem(
    Configuration conf) {
  SWebHdfsFileSystem fs = new SWebHdfsFileSystem();
  fs.setConf(conf);
  return fs;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:13,代码来源:SWebHdfs.java

示例7: testManagementOperationsSWebHdfsFileSystem

import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem; //导入依赖的package包/类
@Test
@TestDir
public void testManagementOperationsSWebHdfsFileSystem() throws Exception {
  try {
    System.setProperty(HttpFSServerWebApp.NAME +
        ServerWebApp.SSL_ENABLED, "true");
    testManagementOperations(SWebHdfsFileSystem.TOKEN_KIND);
  } finally {
    System.getProperties().remove(HttpFSServerWebApp.NAME +
        ServerWebApp.SSL_ENABLED);
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:13,代码来源:TestHttpFSKerberosAuthenticationHandler.java

示例8: generateDelegationToken

import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem; //导入依赖的package包/类
private Token<? extends TokenIdentifier> generateDelegationToken(
    final NameNode namenode, final UserGroupInformation ugi,
    final String renewer) throws IOException {
  final Credentials c = DelegationTokenSecretManager.createCredentials(
      namenode, ugi, renewer != null? renewer: ugi.getShortUserName());
  final Token<? extends TokenIdentifier> t = c.getAllTokens().iterator().next();
  Text kind = request.getScheme().equals("http") ? WebHdfsFileSystem.TOKEN_KIND
      : SWebHdfsFileSystem.TOKEN_KIND;
  t.setKind(kind);
  return t;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:12,代码来源:NamenodeWebHdfsMethods.java

示例9: getFileSystemClass

import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem; //导入依赖的package包/类
@Override
protected Class getFileSystemClass() {
  return SWebHdfsFileSystem.class;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:5,代码来源:TestHttpFSFWithSWebhdfsFileSystem.java

示例10: getKind

import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem; //导入依赖的package包/类
@Override
public Text getKind() {
  return SWebHdfsFileSystem.TOKEN_KIND;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:5,代码来源:DelegationTokenIdentifier.java

示例11: put

import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem; //导入依赖的package包/类
private Response put(
    final InputStream in,
    final String nnId,
    final String fullpath,
    final PutOpParam op,
    final PermissionParam permission,
    final OverwriteParam overwrite,
    final BufferSizeParam bufferSize,
    final ReplicationParam replication,
    final BlockSizeParam blockSize
    ) throws IOException, URISyntaxException {
  final DataNode datanode = (DataNode)context.getAttribute("datanode");

  switch(op.getValue()) {
  case CREATE:
  {
    final Configuration conf = new Configuration(datanode.getConf());
    conf.set(FsPermission.UMASK_LABEL, "000");

    final int b = bufferSize.getValue(conf);
    DFSClient dfsclient = newDfsClient(nnId, conf);
    FSDataOutputStream out = null;
    try {
      out = dfsclient.createWrappedOutputStream(dfsclient.create(
          fullpath, permission.getFsPermission(), 
          overwrite.getValue() ?
              EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) :
              EnumSet.of(CreateFlag.CREATE),
          replication.getValue(conf), blockSize.getValue(conf), null,
          b, null), null);
      IOUtils.copyBytes(in, out, b);
      out.close();
      out = null;
      dfsclient.close();
      dfsclient = null;
    } finally {
      IOUtils.cleanup(LOG, out);
      IOUtils.cleanup(LOG, dfsclient);
    }
    final String scheme = "http".equals(request.getScheme()) ?
    WebHdfsFileSystem.SCHEME : SWebHdfsFileSystem.SCHEME;
    final URI uri = new URI(scheme, nnId, fullpath, null, null);
    return Response.created(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
  }
  default:
    throw new UnsupportedOperationException(op + " is not supported");
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:49,代码来源:DatanodeWebHdfsMethods.java

示例12: put

import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem; //导入依赖的package包/类
private Response put(
    final InputStream in,
    final String nnId,
    final String fullpath,
    final PutOpParam op,
    final PermissionParam permission,
    final OverwriteParam overwrite,
    final BufferSizeParam bufferSize,
    final ReplicationParam replication,
    final BlockSizeParam blockSize
    ) throws IOException, URISyntaxException {
  final DataNode datanode = (DataNode)context.getAttribute("datanode");

  switch(op.getValue()) {
  case CREATE:
  {
    final Configuration conf = new Configuration(datanode.getConf());
    conf.set(FsPermission.UMASK_LABEL, "000");

    final int b = bufferSize.getValue(conf);
    DFSClient dfsclient = newDfsClient(nnId, conf);
    FSDataOutputStream out = null;
    try {
      out = new FSDataOutputStream(dfsclient.create(
          fullpath, permission.getFsPermission(), 
          overwrite.getValue() ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
              : EnumSet.of(CreateFlag.CREATE),
          replication.getValue(conf), blockSize.getValue(conf), null, b, null), null);
      IOUtils.copyBytes(in, out, b);
      out.close();
      out = null;
      dfsclient.close();
      dfsclient = null;
    } finally {
      IOUtils.cleanup(LOG, out);
      IOUtils.cleanup(LOG, dfsclient);
    }
    final String scheme = "http".equals(request.getScheme()) ?
    WebHdfsFileSystem.SCHEME : SWebHdfsFileSystem.SCHEME;
    final URI uri = new URI(scheme, nnId, fullpath, null, null);
    return Response.created(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
  }
  default:
    throw new UnsupportedOperationException(op + " is not supported");
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:47,代码来源:DatanodeWebHdfsMethods.java


注:本文中的org.apache.hadoop.hdfs.web.SWebHdfsFileSystem类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。