当前位置: 首页>>代码示例>>Java>>正文


Java HttpOpParam类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.web.resources.HttpOpParam的典型用法代码示例。如果您正苦于以下问题:Java HttpOpParam类的具体用法?Java HttpOpParam怎么用?Java HttpOpParam使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


HttpOpParam类属于org.apache.hadoop.hdfs.web.resources包,在下文中一共展示了HttpOpParam类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: init

import org.apache.hadoop.hdfs.web.resources.HttpOpParam; //导入依赖的package包/类
private void init(final UserGroupInformation ugi,
    final DelegationParam delegation, final String nnId,
    final UriFsPathParam path, final HttpOpParam<?> op,
    final Param<?, ?>... parameters) throws IOException {
  if (LOG.isTraceEnabled()) {
    LOG.trace("HTTP " + op.getValue().getType() + ": " + op + ", " + path
        + ", ugi=" + ugi + Param.toSortedString(", ", parameters));
  }
  if (nnId == null) {
    throw new IllegalArgumentException(NamenodeAddressParam.NAME
        + " is not specified.");
  }

  //clear content type
  response.setContentType(null);
  
  if (UserGroupInformation.isSecurityEnabled()) {
    //add a token for RPC.
    final Token<DelegationTokenIdentifier> token = deserializeToken
            (delegation.getValue(), nnId);
    ugi.addToken(token);
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:24,代码来源:DatanodeWebHdfsMethods.java

示例2: validateResponse

import org.apache.hadoop.hdfs.web.resources.HttpOpParam; //导入依赖的package包/类
private static Map<?, ?> validateResponse(final HttpOpParam.Op op,
    final HttpURLConnection conn, boolean unwrapException) throws IOException {
  final int code = conn.getResponseCode();
  if (code != op.getExpectedHttpResponseCode()) {
    final Map<?, ?> m;
    try {
      m = jsonParse(conn, true);
    } catch(Exception e) {
      throw new IOException("Unexpected HTTP response: code=" + code + " != "
          + op.getExpectedHttpResponseCode() + ", " + op.toQueryString()
          + ", message=" + conn.getResponseMessage(), e);
    }

    if (m == null) {
      throw new IOException("Unexpected HTTP response: code=" + code + " != "
          + op.getExpectedHttpResponseCode() + ", " + op.toQueryString()
          + ", message=" + conn.getResponseMessage());
    } else if (m.get(RemoteException.class.getSimpleName()) == null) {
      return m;
    }

    final RemoteException re = JsonUtil.toRemoteException(m);
    throw unwrapException? toIOException(re): re;
  }
  return null;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:27,代码来源:WebHdfsFileSystem.java

示例3: getAuthParameters

import org.apache.hadoop.hdfs.web.resources.HttpOpParam; //导入依赖的package包/类
Param<?,?>[] getAuthParameters(final HttpOpParam.Op op) throws IOException {
  List<Param<?,?>> authParams = Lists.newArrayList();    
  // Skip adding delegation token for token operations because these
  // operations require authentication.
  Token<?> token = null;
  if (UserGroupInformation.isSecurityEnabled() && !op.getRequireAuth()) {
    token = getDelegationToken();
  }
  if (token != null) {
    authParams.add(new DelegationParam(token.encodeToUrlString()));
  } else {
    UserGroupInformation userUgi = ugi;
    UserGroupInformation realUgi = userUgi.getRealUser();
    if (realUgi != null) { // proxy user
      authParams.add(new DoAsParam(userUgi.getShortUserName()));
      userUgi = realUgi;
    }
    authParams.add(new UserParam(userUgi.getShortUserName()));
  }
  return authParams.toArray(new Param<?,?>[0]);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:22,代码来源:WebHdfsFileSystem.java

示例4: twoStepWrite

import org.apache.hadoop.hdfs.web.resources.HttpOpParam; //导入依赖的package包/类
/**
 * Two-step Create/Append:
 * Step 1) Submit a Http request with neither auto-redirect nor data. 
 * Step 2) Submit another Http request with the URL from the Location header with data.
 * 
 * The reason of having two-step create/append is for preventing clients to
 * send out the data before the redirect. This issue is addressed by the
 * "Expect: 100-continue" header in HTTP/1.1; see RFC 2616, Section 8.2.3.
 * Unfortunately, there are software library bugs (e.g. Jetty 6 http server
 * and Java 6 http client), which do not correctly implement "Expect:
 * 100-continue". The two-step create/append is a temporary workaround for
 * the software library bugs.
 */
HttpURLConnection twoStepWrite() throws IOException {
  //Step 1) Submit a Http request with neither auto-redirect nor data. 
  connect(false);
  validateResponse(HttpOpParam.TemporaryRedirectOp.valueOf(op), conn, false);
  final String redirect = conn.getHeaderField("Location");
  disconnect();
  checkRetry = false;
  
  //Step 2) Submit another Http request with the URL from the Location header with data.
  conn = (HttpURLConnection)URLUtils.openConnection(new URL(redirect));
  conn.setRequestProperty("Content-Type", MediaType.APPLICATION_OCTET_STREAM);
  conn.setChunkedStreamingMode(32 << 10); //32kB-chunk
  connect();
  return conn;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:29,代码来源:WebHdfsFileSystem.java

示例5: getResponse

import org.apache.hadoop.hdfs.web.resources.HttpOpParam; //导入依赖的package包/类
void getResponse(boolean getJsonAndDisconnect) throws IOException {
  try {
    connect();
    final int code = conn.getResponseCode();
    if (!redirected && op.getRedirect()
        && code != op.getExpectedHttpResponseCode()) {
      final String redirect = conn.getHeaderField("Location");
      json = validateResponse(HttpOpParam.TemporaryRedirectOp.valueOf(op),
          conn, false);
      disconnect();
  
      checkRetry = false;
      conn = (HttpURLConnection)URLUtils.openConnection(new URL(redirect));
      connect();
    }

    json = validateResponse(op, conn, false);
    if (json == null && getJsonAndDisconnect) {
      json = jsonParse(conn, false);
    }
  } finally {
    if (getJsonAndDisconnect) {
      disconnect();
    }
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:27,代码来源:WebHdfsFileSystem.java

示例6: write

import org.apache.hadoop.hdfs.web.resources.HttpOpParam; //导入依赖的package包/类
FSDataOutputStream write(final HttpOpParam.Op op,
    final HttpURLConnection conn, final int bufferSize) throws IOException {
  return new FSDataOutputStream(new BufferedOutputStream(
      conn.getOutputStream(), bufferSize), statistics) {
    @Override
    public void close() throws IOException {
      try {
        super.close();
      } finally {
        try {
          validateResponse(op, conn, true);
        } finally {
          conn.disconnect();
        }
      }
    }
  };
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:19,代码来源:WebHdfsFileSystem.java

示例7: create

import org.apache.hadoop.hdfs.web.resources.HttpOpParam; //导入依赖的package包/类
@Override
public FSDataOutputStream create(final Path f, final FsPermission permission,
    final boolean overwrite, final int bufferSize, final short replication,
    final long blockSize, final Progressable progress) throws IOException {
  statistics.incrementWriteOps(1);

  final HttpOpParam.Op op = PutOpParam.Op.CREATE;
  return new Runner(op, f, 
      new PermissionParam(applyUMask(permission)),
      new OverwriteParam(overwrite),
      new BufferSizeParam(bufferSize),
      new ReplicationParam(replication),
      new BlockSizeParam(blockSize))
    .run()
    .write(bufferSize);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:17,代码来源:WebHdfsFileSystem.java

示例8: listStatus

import org.apache.hadoop.hdfs.web.resources.HttpOpParam; //导入依赖的package包/类
@Override
public FileStatus[] listStatus(final Path f) throws IOException {
  statistics.incrementReadOps(1);

  final HttpOpParam.Op op = GetOpParam.Op.LISTSTATUS;
  final Map<?, ?> json  = run(op, f);
  final Map<?, ?> rootmap = (Map<?, ?>)json.get(FileStatus.class.getSimpleName() + "es");
  final Object[] array = (Object[])rootmap.get(FileStatus.class.getSimpleName());

  //convert FileStatus
  final FileStatus[] statuses = new FileStatus[array.length];
  for(int i = 0; i < array.length; i++) {
    final Map<?, ?> m = (Map<?, ?>)array[i];
    statuses[i] = makeQualified(JsonUtil.toFileStatus(m, false), f);
  }
  return statuses;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:18,代码来源:WebHdfsFileSystem.java

示例9: init

import org.apache.hadoop.hdfs.web.resources.HttpOpParam; //导入依赖的package包/类
private void init(final UserGroupInformation ugi,
    final DelegationParam delegation, final InetSocketAddress nnRpcAddr,
    final UriFsPathParam path, final HttpOpParam<?> op,
    final Param<?, ?>... parameters) throws IOException {
  if (LOG.isTraceEnabled()) {
    LOG.trace("HTTP " + op.getValue().getType() + ": " + op + ", " + path
        + ", ugi=" + ugi + Param.toSortedString(", ", parameters));
  }
  if (nnRpcAddr == null) {
    throw new IllegalArgumentException(NamenodeRpcAddressParam.NAME
        + " is not specified.");
  }

  //clear content type
  response.setContentType(null);
  
  if (UserGroupInformation.isSecurityEnabled()) {
    //add a token for RPC.
    final Token<DelegationTokenIdentifier> token = 
        new Token<DelegationTokenIdentifier>();
    token.decodeFromUrlString(delegation.getValue());
    SecurityUtil.setTokenService(token, nnRpcAddr);
    token.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
    ugi.addToken(token);
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:27,代码来源:DatanodeWebHdfsMethods.java

示例10: twoStepWrite

import org.apache.hadoop.hdfs.web.resources.HttpOpParam; //导入依赖的package包/类
/**
 * Two-step Create/Append:
 * Step 1) Submit a Http request with neither auto-redirect nor data.
 * Step 2) Submit another Http request with the URL from the Location header
 * with data.
 * <p/>
 * The reason of having two-step create/append is for preventing clients to
 * send out the data before the redirect. This issue is addressed by the
 * "Expect: 100-continue" header in HTTP/1.1; see RFC 2616, Section 8.2.3.
 * Unfortunately, there are software library bugs (e.g. Jetty 6 http server
 * and Java 6 http client), which do not correctly implement "Expect:
 * 100-continue". The two-step create/append is a temporary workaround for
 * the software library bugs.
 */
HttpURLConnection twoStepWrite() throws IOException {
  //Step 1) Submit a Http request with neither auto-redirect nor data. 
  connect(false);
  validateResponse(HttpOpParam.TemporaryRedirectOp.valueOf(op), conn,
      false);
  final String redirect = conn.getHeaderField("Location");
  disconnect();
  checkRetry = false;
  
  //Step 2) Submit another Http request with the URL from the Location header with data.
  conn = (HttpURLConnection) new URL(redirect).openConnection();
  conn.setRequestProperty("Content-Type",
      MediaType.APPLICATION_OCTET_STREAM);
  conn.setChunkedStreamingMode(32 << 10); //32kB-chunk
  connect();
  return conn;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:32,代码来源:WebHdfsFileSystem.java

示例11: getResponse

import org.apache.hadoop.hdfs.web.resources.HttpOpParam; //导入依赖的package包/类
void getResponse(boolean getJsonAndDisconnect) throws IOException {
  try {
    connect();
    final int code = conn.getResponseCode();
    if (!redirected && op.getRedirect() &&
        code != op.getExpectedHttpResponseCode()) {
      final String redirect = conn.getHeaderField("Location");
      json = validateResponse(HttpOpParam.TemporaryRedirectOp.valueOf(op),
          conn, false);
      disconnect();

      checkRetry = false;
      conn = (HttpURLConnection) new URL(redirect).openConnection();
      connect();
    }

    json = validateResponse(op, conn, false);
    if (json == null && getJsonAndDisconnect) {
      json = jsonParse(conn, false);
    }
  } finally {
    if (getJsonAndDisconnect) {
      disconnect();
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:27,代码来源:WebHdfsFileSystem.java

示例12: write

import org.apache.hadoop.hdfs.web.resources.HttpOpParam; //导入依赖的package包/类
FSDataOutputStream write(final HttpOpParam.Op op,
    final HttpURLConnection conn, final int bufferSize) throws IOException {
  return new FSDataOutputStream(
      new BufferedOutputStream(conn.getOutputStream(), bufferSize),
      statistics) {
    @Override
    public void close() throws IOException {
      try {
        super.close();
      } finally {
        try {
          validateResponse(op, conn, true);
        } finally {
          conn.disconnect();
        }
      }
    }
  };
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:20,代码来源:WebHdfsFileSystem.java

示例13: listStatus

import org.apache.hadoop.hdfs.web.resources.HttpOpParam; //导入依赖的package包/类
@Override
public FileStatus[] listStatus(final Path f) throws IOException {
  statistics.incrementReadOps(1);

  final HttpOpParam.Op op = GetOpParam.Op.LISTSTATUS;
  final Map<?, ?> json = run(op, f);
  final Map<?, ?> rootmap =
      (Map<?, ?>) json.get(FileStatus.class.getSimpleName() + "es");
  final Object[] array =
      (Object[]) rootmap.get(FileStatus.class.getSimpleName());

  //convert FileStatus
  final FileStatus[] statuses = new FileStatus[array.length];
  for (int i = 0; i < array.length; i++) {
    final Map<?, ?> m = (Map<?, ?>) array[i];
    statuses[i] = makeQualified(JsonUtil.toFileStatus(m, false), f);
  }
  return statuses;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:20,代码来源:WebHdfsFileSystem.java

示例14: init

import org.apache.hadoop.hdfs.web.resources.HttpOpParam; //导入依赖的package包/类
private void init(final UserGroupInformation ugi,
    final DelegationParam delegation, final InetSocketAddress nnRpcAddr,
    final UriFsPathParam path, final HttpOpParam<?> op,
    final Param<?, ?>... parameters) throws IOException {
  if (LOG.isTraceEnabled()) {
    LOG.trace("HTTP " + op.getValue().getType() + ": " + op + ", " + path +
        ", ugi=" + ugi + Param.toSortedString(", ", parameters));
  }
  if (nnRpcAddr == null) {
    throw new IllegalArgumentException(
        NamenodeRpcAddressParam.NAME + " is not specified.");
  }

  //clear content type
  response.setContentType(null);
  
  if (UserGroupInformation.isSecurityEnabled()) {
    //add a token for RPC.
    final Token<DelegationTokenIdentifier> token =
        new Token<>();
    token.decodeFromUrlString(delegation.getValue());
    SecurityUtil.setTokenService(token, nnRpcAddr);
    token.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
    ugi.addToken(token);
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:27,代码来源:DatanodeWebHdfsMethods.java

示例15: twoStepWrite

import org.apache.hadoop.hdfs.web.resources.HttpOpParam; //导入依赖的package包/类
/**
 * Two-step Create/Append:
 * Step 1) Submit a Http request with neither auto-redirect nor data. 
 * Step 2) Submit another Http request with the URL from the Location header with data.
 * 
 * The reason of having two-step create/append is for preventing clients to
 * send out the data before the redirect. This issue is addressed by the
 * "Expect: 100-continue" header in HTTP/1.1; see RFC 2616, Section 8.2.3.
 * Unfortunately, there are software library bugs (e.g. Jetty 6 http server
 * and Java 6 http client), which do not correctly implement "Expect:
 * 100-continue". The two-step create/append is a temporary workaround for
 * the software library bugs.
 */
HttpURLConnection twoStepWrite() throws IOException {
  //Step 1) Submit a Http request with neither auto-redirect nor data. 
  connect(false);
  validateResponse(HttpOpParam.TemporaryRedirectOp.valueOf(op), conn, false);
  final String redirect = conn.getHeaderField("Location");
  disconnect();
  checkRetry = false;
  
  //Step 2) Submit another Http request with the URL from the Location header with data.
  conn = (HttpURLConnection)new URL(redirect).openConnection();
  conn.setRequestProperty("Content-Type", MediaType.APPLICATION_OCTET_STREAM);
  conn.setChunkedStreamingMode(32 << 10); //use 32kB-chunks
  connect();
  return conn;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:29,代码来源:WebHdfsFileSystem.java


注:本文中的org.apache.hadoop.hdfs.web.resources.HttpOpParam类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。