当前位置: 首页>>代码示例>>Java>>正文


Java Param.toSortedString方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.web.resources.Param.toSortedString方法的典型用法代码示例。如果您正苦于以下问题:Java Param.toSortedString方法的具体用法?Java Param.toSortedString怎么用?Java Param.toSortedString使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.web.resources.Param的用法示例。


在下文中一共展示了Param.toSortedString方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: toUrl

import org.apache.hadoop.hdfs.web.resources.Param; //导入方法依赖的package包/类
URL toUrl(final HttpOpParam.Op op, final Path fspath,
    final Param<?,?>... parameters) throws IOException {
  //initialize URI path and query
  final String path = PATH_PREFIX
      + (fspath == null? "/": makeQualified(fspath).toUri().getPath());
  final String query = op.toQueryString()
      + '&' + new UserParam(ugi)
      + Param.toSortedString("&", parameters);
  final URL url;
  if (op.equals(PutOpParam.Op.RENEWDELEGATIONTOKEN)
      || op.equals(GetOpParam.Op.GETDELEGATIONTOKEN)) {
    // Skip adding delegation token for getting or renewing delegation token,
    // because these operations require kerberos authentication.
    url = getNamenodeURL(path, query);
  } else {
    url = getNamenodeURL(path, addDt2Query(query));
  }
  if (LOG.isTraceEnabled()) {
    LOG.trace("url=" + url);
  }
  return url;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:23,代码来源:WebHdfsFileSystem.java

示例2: testWebHdfsOffsetAndLength

import org.apache.hadoop.hdfs.web.resources.Param; //导入方法依赖的package包/类
@Test
public void testWebHdfsOffsetAndLength() throws Exception{
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  final int OFFSET = 42;
  final int LENGTH = 512;
  final String PATH = "/foo";
  byte[] CONTENTS = new byte[1024];
  RANDOM.nextBytes(CONTENTS);
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    final WebHdfsFileSystem fs =
        WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
    try (OutputStream os = fs.create(new Path(PATH))) {
      os.write(CONTENTS);
    }
    InetSocketAddress addr = cluster.getNameNode().getHttpAddress();
    URL url = new URL("http", addr.getHostString(), addr
        .getPort(), WebHdfsFileSystem.PATH_PREFIX + PATH + "?op=OPEN" +
        Param.toSortedString("&", new OffsetParam((long) OFFSET),
                             new LengthParam((long) LENGTH))
    );
    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
    conn.setInstanceFollowRedirects(true);
    Assert.assertEquals(LENGTH, conn.getContentLength());
    byte[] subContents = new byte[LENGTH];
    byte[] realContents = new byte[LENGTH];
    System.arraycopy(CONTENTS, OFFSET, subContents, 0, LENGTH);
    IOUtils.readFully(conn.getInputStream(), realContents);
    Assert.assertArrayEquals(subContents, realContents);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:TestWebHDFS.java

示例3: testWebHdfsOffsetAndLength

import org.apache.hadoop.hdfs.web.resources.Param; //导入方法依赖的package包/类
@Test
public void testWebHdfsOffsetAndLength() throws Exception{
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  final int OFFSET = 42;
  final int LENGTH = 512;
  final String PATH = "/foo";
  byte[] CONTENTS = new byte[1024];
  RANDOM.nextBytes(CONTENTS);
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    final WebHdfsFileSystem fs =
        WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
    try (OutputStream os = fs.create(new Path(PATH))) {
      os.write(CONTENTS);
    }
    InetSocketAddress addr = cluster.getNameNode().getHttpAddress();
    URL url = new URL("http", addr.getHostString(), addr
        .getPort(), WebHdfsFileSystem.PATH_PREFIX + PATH + "?op=OPEN" +
        Param.toSortedString("&", new OffsetParam((long) OFFSET),
                             new LengthParam((long) LENGTH))
    );
    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
    conn.setInstanceFollowRedirects(true);
    Assert.assertEquals(LENGTH, conn.getContentLength());
    byte[] subContents = new byte[LENGTH];
    byte[] realContents = new byte[LENGTH];
    System.arraycopy(CONTENTS, OFFSET, subContents, 0, LENGTH);
    IOUtils.readFully(conn.getInputStream(), realContents);
    Assert.assertArrayEquals(subContents, realContents);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:37,代码来源:TestWebHDFS.java

示例4: toUrl

import org.apache.hadoop.hdfs.web.resources.Param; //导入方法依赖的package包/类
URL toUrl(final HttpOpParam.Op op, final Path fspath,
    final Param<?,?>... parameters) throws IOException {
  //initialize URI path and query
  final String path = PATH_PREFIX
      + (fspath == null? "/": makeQualified(fspath).toUri().getRawPath());
  final String query = op.toQueryString()
      + Param.toSortedString("&", getAuthParameters(op))
      + Param.toSortedString("&", parameters);
  final URL url = getNamenodeURL(path, query);
  if (LOG.isTraceEnabled()) {
    LOG.trace("url=" + url);
  }
  return url;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:15,代码来源:WebHdfsFileSystem.java

示例5: redirectURI

import org.apache.hadoop.hdfs.web.resources.Param; //导入方法依赖的package包/类
private URI redirectURI(final NameNode namenode,
    final UserGroupInformation ugi, final DelegationParam delegation,
    final UserParam username, final DoAsParam doAsUser,
    final String path, final HttpOpParam.Op op, final long openOffset,
    final long blocksize,
    final Param<?, ?>... parameters) throws URISyntaxException, IOException {
  final Configuration conf = (Configuration)context.getAttribute(JspHelper.CURRENT_CONF);
  final DatanodeInfo dn = chooseDatanode(namenode, path, op, openOffset,
      blocksize, conf);

  final String delegationQuery;
  if (!UserGroupInformation.isSecurityEnabled()) {
    //security disabled
    delegationQuery = Param.toSortedString("&", doAsUser, username);
  } else if (delegation.getValue() != null) {
    //client has provided a token
    delegationQuery = "&" + delegation;
  } else {
    //generate a token
    final Token<? extends TokenIdentifier> t = generateDelegationToken(
        namenode, ugi, request.getUserPrincipal().getName());
    delegationQuery = "&" + new DelegationParam(t.encodeToUrlString());
  }
  final String query = op.toQueryString() + delegationQuery
      + "&" + new NamenodeRpcAddressParam(namenode)
      + Param.toSortedString("&", parameters);
  final String uripath = WebHdfsFileSystem.PATH_PREFIX + path;

  final URI uri = new URI("http", null, dn.getHostName(), dn.getInfoPort(),
      uripath, query, null);
  if (LOG.isTraceEnabled()) {
    LOG.trace("redirectURI=" + uri);
  }
  return uri;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:36,代码来源:NamenodeWebHdfsMethods.java

示例6: toUrl

import org.apache.hadoop.hdfs.web.resources.Param; //导入方法依赖的package包/类
URL toUrl(final HttpOpParam.Op op, final Path fspath,
    final Param<?, ?>... parameters) throws IOException {
  //initialize URI path and query
  final String path = PATH_PREFIX +
      (fspath == null ? "/" : makeQualified(fspath).toUri().getPath());
  final String query =
      op.toQueryString() + Param.toSortedString("&", getAuthParameters(op)) +
          Param.toSortedString("&", parameters);
  final URL url = getNamenodeURL(path, query);
  if (LOG.isTraceEnabled()) {
    LOG.trace("url=" + url);
  }
  return url;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:15,代码来源:WebHdfsFileSystem.java

示例7: redirectURI

import org.apache.hadoop.hdfs.web.resources.Param; //导入方法依赖的package包/类
private URI redirectURI(final NameNode namenode,
    final UserGroupInformation ugi, final DelegationParam delegation,
    final UserParam username, final DoAsParam doAsUser, final String path,
    final HttpOpParam.Op op, final long openOffset, final long blocksize,
    final Param<?, ?>... parameters) throws URISyntaxException, IOException {
  final Configuration conf =
      (Configuration) context.getAttribute(JspHelper.CURRENT_CONF);
  final DatanodeInfo dn =
      chooseDatanode(namenode, path, op, openOffset, blocksize, conf);

  final String delegationQuery;
  if (!UserGroupInformation.isSecurityEnabled()) {
    //security disabled
    delegationQuery = Param.toSortedString("&", doAsUser, username);
  } else if (delegation.getValue() != null) {
    //client has provided a token
    delegationQuery = "&" + delegation;
  } else {
    //generate a token
    final Token<? extends TokenIdentifier> t =
        generateDelegationToken(namenode, ugi,
            request.getUserPrincipal().getName());
    delegationQuery = "&" + new DelegationParam(t.encodeToUrlString());
  }
  final String query = op.toQueryString() + delegationQuery + "&" +
      new NamenodeRpcAddressParam(namenode) +
      Param.toSortedString("&", parameters);
  final String uripath = WebHdfsFileSystem.PATH_PREFIX + path;

  final URI uri =
      new URI("http", null, dn.getHostName(), dn.getInfoPort(), uripath,
          query, null);
  if (LOG.isTraceEnabled()) {
    LOG.trace("redirectURI=" + uri);
  }
  return uri;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:38,代码来源:NamenodeWebHdfsMethods.java

示例8: redirectURI

import org.apache.hadoop.hdfs.web.resources.Param; //导入方法依赖的package包/类
private URI redirectURI(final NameNode namenode,
    final UserGroupInformation ugi, final DelegationParam delegation,
    final UserParam username, final DoAsParam doAsUser,
    final String path, final HttpOpParam.Op op, final long openOffset,
    final long blocksize,
    final Param<?, ?>... parameters) throws URISyntaxException, IOException {
  final DatanodeInfo dn = chooseDatanode(namenode, path, op, openOffset,
      blocksize);

  final String delegationQuery;
  if (!UserGroupInformation.isSecurityEnabled()) {
    //security disabled
    delegationQuery = Param.toSortedString("&", doAsUser, username);
  } else if (delegation.getValue() != null) {
    //client has provided a token
    delegationQuery = "&" + delegation;
  } else {
    //generate a token
    final Token<? extends TokenIdentifier> t = generateDelegationToken(
        namenode, ugi, request.getUserPrincipal().getName());
    delegationQuery = "&" + new DelegationParam(t.encodeToUrlString());
  }
  final String query = op.toQueryString() + delegationQuery
      + Param.toSortedString("&", parameters);
  final String uripath = WebHdfsFileSystem.PATH_PREFIX + path;

  final URI uri = new URI("http", null, dn.getHostName(), dn.getInfoPort(),
      uripath, query, null);
  if (LOG.isTraceEnabled()) {
    LOG.trace("redirectURI=" + uri);
  }
  return uri;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:34,代码来源:NamenodeWebHdfsMethods.java

示例9: testUGICacheSecure

import org.apache.hadoop.hdfs.web.resources.Param; //导入方法依赖的package包/类
@Test
public void testUGICacheSecure() throws Exception {
  // fake turning on security so api thinks it should use tokens
  SecurityUtil.setAuthenticationMethod(KERBEROS, conf);
  UserGroupInformation.setConfiguration(conf);

  UserGroupInformation ugi = UserGroupInformation
      .createRemoteUser("test-user");
  ugi.setAuthenticationMethod(KERBEROS);
  ugi = UserGroupInformation.createProxyUser("test-proxy-user", ugi);
  UserGroupInformation.setLoginUser(ugi);

  List<Token<DelegationTokenIdentifier>> tokens = Lists.newArrayList();
  getWebHdfsFileSystem(ugi, conf, tokens);

  String uri1 = WebHdfsFileSystem.PATH_PREFIX
      + PATH
      + "?op=OPEN"
      + Param.toSortedString("&", new NamenodeAddressParam("127.0.0.1:1010"),
          new OffsetParam((long) OFFSET), new LengthParam((long) LENGTH),
          new DelegationParam(tokens.get(0).encodeToUrlString()));

  String uri2 = WebHdfsFileSystem.PATH_PREFIX
      + PATH
      + "?op=OPEN"
      + Param.toSortedString("&", new NamenodeAddressParam("127.0.0.1:1010"),
          new OffsetParam((long) OFFSET), new LengthParam((long) LENGTH),
          new DelegationParam(tokens.get(1).encodeToUrlString()));

  DataNodeUGIProvider ugiProvider1 = new DataNodeUGIProvider(
      new ParameterParser(new QueryStringDecoder(URI.create(uri1)), conf));
  UserGroupInformation ugi11 = ugiProvider1.ugi();
  UserGroupInformation ugi12 = ugiProvider1.ugi();

  Assert.assertEquals(
      "With UGI cache, two UGIs returned by the same token should be same",
      ugi11, ugi12);

  DataNodeUGIProvider ugiProvider2 = new DataNodeUGIProvider(
      new ParameterParser(new QueryStringDecoder(URI.create(uri2)), conf));
  UserGroupInformation url21 = ugiProvider2.ugi();
  UserGroupInformation url22 = ugiProvider2.ugi();

  Assert.assertEquals(
      "With UGI cache, two UGIs returned by the same token should be same",
      url21, url22);

  Assert.assertNotEquals(
      "With UGI cache, two UGIs for the different token should not be same",
      ugi11, url22);

  awaitCacheEmptyDueToExpiration();
  ugi12 = ugiProvider1.ugi();
  url22 = ugiProvider2.ugi();

  String msg = "With cache eviction, two UGIs returned" +
  " by the same token should not be same";
  Assert.assertNotEquals(msg, ugi11, ugi12);
  Assert.assertNotEquals(msg, url21, url22);

  Assert.assertNotEquals(
      "With UGI cache, two UGIs for the different token should not be same",
      ugi11, url22);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:65,代码来源:TestDataNodeUGIProvider.java

示例10: testUGICacheInSecure

import org.apache.hadoop.hdfs.web.resources.Param; //导入方法依赖的package包/类
@Test
public void testUGICacheInSecure() throws Exception {
  String uri1 = WebHdfsFileSystem.PATH_PREFIX
      + PATH
      + "?op=OPEN"
      + Param.toSortedString("&", new OffsetParam((long) OFFSET),
          new LengthParam((long) LENGTH), new UserParam("root"));

  String uri2 = WebHdfsFileSystem.PATH_PREFIX
      + PATH
      + "?op=OPEN"
      + Param.toSortedString("&", new OffsetParam((long) OFFSET),
          new LengthParam((long) LENGTH), new UserParam("hdfs"));

  DataNodeUGIProvider ugiProvider1 = new DataNodeUGIProvider(
      new ParameterParser(new QueryStringDecoder(URI.create(uri1)), conf));
  UserGroupInformation ugi11 = ugiProvider1.ugi();
  UserGroupInformation ugi12 = ugiProvider1.ugi();

  Assert.assertEquals(
      "With UGI cache, two UGIs for the same user should be same", ugi11,
      ugi12);

  DataNodeUGIProvider ugiProvider2 = new DataNodeUGIProvider(
      new ParameterParser(new QueryStringDecoder(URI.create(uri2)), conf));
  UserGroupInformation url21 = ugiProvider2.ugi();
  UserGroupInformation url22 = ugiProvider2.ugi();

  Assert.assertEquals(
      "With UGI cache, two UGIs for the same user should be same", url21,
      url22);

  Assert.assertNotEquals(
      "With UGI cache, two UGIs for the different user should not be same",
      ugi11, url22);

  awaitCacheEmptyDueToExpiration();
  ugi12 = ugiProvider1.ugi();
  url22 = ugiProvider2.ugi();

  String msg = "With cache eviction, two UGIs returned by" +
  " the same user should not be same";
  Assert.assertNotEquals(msg, ugi11, ugi12);
  Assert.assertNotEquals(msg, url21, url22);

  Assert.assertNotEquals(
      "With UGI cache, two UGIs for the different user should not be same",
      ugi11, url22);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:50,代码来源:TestDataNodeUGIProvider.java

示例11: redirectURI

import org.apache.hadoop.hdfs.web.resources.Param; //导入方法依赖的package包/类
private URI redirectURI(final NameNode namenode,
    final UserGroupInformation ugi, final DelegationParam delegation,
    final UserParam username, final DoAsParam doAsUser,
    final String path, final HttpOpParam.Op op, final long openOffset,
    final long blocksize,
    final Param<?, ?>... parameters) throws URISyntaxException, IOException {
  final DatanodeInfo dn;
  try {
    dn = chooseDatanode(namenode, path, op, openOffset, blocksize);
  } catch (InvalidTopologyException ite) {
    throw new IOException("Failed to find datanode, suggest to check cluster health.", ite);
  }

  final String delegationQuery;
  if (!UserGroupInformation.isSecurityEnabled()) {
    //security disabled
    delegationQuery = Param.toSortedString("&", doAsUser, username);
  } else if (delegation.getValue() != null) {
    //client has provided a token
    delegationQuery = "&" + delegation;
  } else {
    //generate a token
    final Token<? extends TokenIdentifier> t = generateDelegationToken(
        namenode, ugi, request.getUserPrincipal().getName());
    delegationQuery = "&" + new DelegationParam(t.encodeToUrlString());
  }
  final String query = op.toQueryString() + delegationQuery
      + "&" + new NamenodeAddressParam(namenode)
      + Param.toSortedString("&", parameters);
  final String uripath = WebHdfsFileSystem.PATH_PREFIX + path;

  final String scheme = request.getScheme();
  int port = "http".equals(scheme) ? dn.getInfoPort() : dn
      .getInfoSecurePort();
  final URI uri = new URI(scheme, null, dn.getHostName(), port, uripath,
      query, null);

  if (LOG.isTraceEnabled()) {
    LOG.trace("redirectURI=" + uri);
  }
  return uri;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:43,代码来源:NamenodeWebHdfsMethods.java


注:本文中的org.apache.hadoop.hdfs.web.resources.Param.toSortedString方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。