当前位置: 首页>>代码示例>>Java>>正文


Java JsonUtil.toJsonString方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.web.JsonUtil.toJsonString方法的典型用法代码示例。如果您正苦于以下问题:Java JsonUtil.toJsonString方法的具体用法?Java JsonUtil.toJsonString怎么用?Java JsonUtil.toJsonString使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.web.JsonUtil的用法示例。


在下文中一共展示了JsonUtil.toJsonString方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: exceptionCaught

import org.apache.hadoop.hdfs.web.JsonUtil; //导入方法依赖的package包/类
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause)
        throws Exception {
  Exception e = cause instanceof Exception ? (Exception) cause : new
    Exception(cause);
  final String output = JsonUtil.toJsonString(e);
  ByteBuf content = Unpooled.wrappedBuffer(output.getBytes(Charsets.UTF_8));
  final DefaultFullHttpResponse resp = new DefaultFullHttpResponse(
          HTTP_1_1, INTERNAL_SERVER_ERROR, content);

  resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
  if (e instanceof IllegalArgumentException) {
    resp.setStatus(BAD_REQUEST);
  } else if (e instanceof FileNotFoundException) {
    resp.setStatus(NOT_FOUND);
  } else if (e instanceof IOException) {
    resp.setStatus(FORBIDDEN);
  }
  resp.headers().set(CONTENT_LENGTH, resp.content().readableBytes());
  resp.headers().set(CONNECTION, CLOSE);
  ctx.write(resp).addListener(ChannelFutureListener.CLOSE);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:23,代码来源:FSImageHandler.java

示例2: exceptionCaught

import org.apache.hadoop.hdfs.web.JsonUtil; //导入方法依赖的package包/类
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause)
        throws Exception {
  Exception e = cause instanceof Exception ? (Exception) cause : new
    Exception(cause);
  final String output = JsonUtil.toJsonString(e);
  ByteBuf content = Unpooled.wrappedBuffer(output.getBytes(Charsets.UTF_8));
  final DefaultFullHttpResponse resp = new DefaultFullHttpResponse(
          HTTP_1_1, INTERNAL_SERVER_ERROR, content);

  resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
  if (e instanceof IllegalArgumentException) {
    resp.setStatus(BAD_REQUEST);
  } else if (e instanceof FileNotFoundException) {
    resp.setStatus(NOT_FOUND);
  }

  resp.headers().set(CONTENT_LENGTH, resp.content().readableBytes());
  resp.headers().set(CONNECTION, CLOSE);
  ctx.write(resp).addListener(ChannelFutureListener.CLOSE);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:FSImageHandler.java

示例3: delete

import org.apache.hadoop.hdfs.web.JsonUtil; //导入方法依赖的package包/类
private Response delete(
    final UserGroupInformation ugi,
    final DelegationParam delegation,
    final UserParam username,
    final DoAsParam doAsUser,
    final String fullpath,
    final DeleteOpParam op,
    final RecursiveParam recursive
    ) throws IOException {
  final NameNode namenode = (NameNode)context.getAttribute("name.node");

  switch(op.getValue()) {
  case DELETE:
  {
    final boolean b = namenode.getRpcServer().delete(fullpath, recursive.getValue());
    final String js = JsonUtil.toJsonString("boolean", b);
    return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
  }
  default:
    throw new UnsupportedOperationException(op + " is not supported");
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:23,代码来源:NamenodeWebHdfsMethods.java

示例4: delete

import org.apache.hadoop.hdfs.web.JsonUtil; //导入方法依赖的package包/类
private Response delete(final UserGroupInformation ugi,
    final DelegationParam delegation, final UserParam username,
    final DoAsParam doAsUser, final String fullpath, final DeleteOpParam op,
    final RecursiveParam recursive) throws IOException {
  final NameNode namenode = (NameNode) context.getAttribute("name.node");

  switch (op.getValue()) {
    case DELETE: {
      final boolean b =
          namenode.getRpcServer().delete(fullpath, recursive.getValue());
      final String js = JsonUtil.toJsonString("boolean", b);
      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
    }
    default:
      throw new UnsupportedOperationException(op + " is not supported");
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:18,代码来源:NamenodeWebHdfsMethods.java

示例5: delete

import org.apache.hadoop.hdfs.web.JsonUtil; //导入方法依赖的package包/类
private Response delete(
    final UserGroupInformation ugi,
    final DelegationParam delegation,
    final UserParam username,
    final DoAsParam doAsUser,
    final String fullpath,
    final DeleteOpParam op,
    final RecursiveParam recursive
    ) throws IOException {
  final NameNode namenode = (NameNode)context.getAttribute("name.node");

  switch(op.getValue()) {
  case DELETE:
  {
    final boolean b = getRPCServer(namenode).delete(fullpath, recursive.getValue());
    final String js = JsonUtil.toJsonString("boolean", b);
    return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
  }
  default:
    throw new UnsupportedOperationException(op + " is not supported");
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:23,代码来源:NamenodeWebHdfsMethods.java

示例6: post

import org.apache.hadoop.hdfs.web.JsonUtil; //导入方法依赖的package包/类
private Response post(
    final UserGroupInformation ugi,
    final DelegationParam delegation,
    final UserParam username,
    final DoAsParam doAsUser,
    final String fullpath,
    final PostOpParam op,
    final ConcatSourcesParam concatSrcs,
    final BufferSizeParam bufferSize,
    final ExcludeDatanodesParam excludeDatanodes,
    final NewLengthParam newLength
    ) throws IOException, URISyntaxException {
  final NameNode namenode = (NameNode)context.getAttribute("name.node");
  final NamenodeProtocols np = getRPCServer(namenode);

  switch(op.getValue()) {
  case APPEND:
  {
    final URI uri = redirectURI(namenode, ugi, delegation, username,
        doAsUser, fullpath, op.getValue(), -1L, -1L,
        excludeDatanodes.getValue(), bufferSize);
    return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
  }
  case CONCAT:
  {
    np.concat(fullpath, concatSrcs.getAbsolutePaths());
    return Response.ok().build();
  }
  case TRUNCATE:
  {
    // We treat each rest request as a separate client.
    final boolean b = np.truncate(fullpath, newLength.getValue(), 
        "DFSClient_" + DFSUtil.getSecureRandom().nextLong());
    final String js = JsonUtil.toJsonString("boolean", b);
    return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
  }
  default:
    throw new UnsupportedOperationException(op + " is not supported");
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:41,代码来源:NamenodeWebHdfsMethods.java

示例7: delete

import org.apache.hadoop.hdfs.web.JsonUtil; //导入方法依赖的package包/类
private Response delete(
    final UserGroupInformation ugi,
    final DelegationParam delegation,
    final UserParam username,
    final DoAsParam doAsUser,
    final String fullpath,
    final DeleteOpParam op,
    final RecursiveParam recursive,
    final SnapshotNameParam snapshotName
    ) throws IOException {
  final NameNode namenode = (NameNode)context.getAttribute("name.node");
  final NamenodeProtocols np = getRPCServer(namenode);

  switch(op.getValue()) {
  case DELETE: {
    final boolean b = np.delete(fullpath, recursive.getValue());
    final String js = JsonUtil.toJsonString("boolean", b);
    return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
  }
  case DELETESNAPSHOT: {
    np.deleteSnapshot(fullpath, snapshotName.getValue());
    return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
  }
  default:
    throw new UnsupportedOperationException(op + " is not supported");
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:NamenodeWebHdfsMethods.java

示例8: getXAttrs

import org.apache.hadoop.hdfs.web.JsonUtil; //导入方法依赖的package包/类
/**
 * Return the JSON formatted XAttrs of the specified file.
 *
 * @param path
 *          a path specifies a file
 * @return JSON formatted XAttrs
 * @throws IOException
 *           if failed to serialize fileStatus to JSON.
 */
String getXAttrs(String path, List<String> names, String encoder)
        throws IOException {

  List<XAttr> xAttrs = getXAttrList(path);
  List<XAttr> filtered;
  if (names == null || names.size() == 0) {
    filtered = xAttrs;
  } else {
    filtered = Lists.newArrayListWithCapacity(names.size());
    for (String name : names) {
      XAttr search = XAttrHelper.buildXAttr(name);

      boolean found = false;
      for (XAttr aXAttr : xAttrs) {
        if (aXAttr.getNameSpace() == search.getNameSpace()
                && aXAttr.getName().equals(search.getName())) {

          filtered.add(aXAttr);
          found = true;
          break;
        }
      }

      if (!found) {
        throw new IOException(
                "At least one of the attributes provided was not found.");
      }
    }

  }
  return JsonUtil.toJsonString(filtered,
          new XAttrEncodingParam(encoder).getEncoding());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:43,代码来源:FSImageLoader.java

示例9: getAclStatus

import org.apache.hadoop.hdfs.web.JsonUtil; //导入方法依赖的package包/类
/**
 * Return the JSON formatted ACL status of the specified file.
 * @param path a path specifies a file
 * @return JSON formatted AclStatus
 * @throws IOException if failed to serialize fileStatus to JSON.
 */
String getAclStatus(String path) throws IOException {
  PermissionStatus p = getPermissionStatus(path);
  List<AclEntry> aclEntryList = getAclEntryList(path);
  FsPermission permission = p.getPermission();
  AclStatus.Builder builder = new AclStatus.Builder();
  builder.owner(p.getUserName()).group(p.getGroupName())
      .addEntries(aclEntryList).setPermission(permission)
      .stickyBit(permission.getStickyBit());
  AclStatus aclStatus = builder.build();
  return JsonUtil.toJsonString(aclStatus);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:FSImageLoader.java

示例10: getXAttrs

import org.apache.hadoop.hdfs.web.JsonUtil; //导入方法依赖的package包/类
/**
 * Return the JSON formatted XAttrs of the specified file.
 *
 * @param path
 *          a path specifies a file
 * @return JSON formatted XAttrs
 * @throws IOException
 *           if failed to serialize fileStatus to JSON.
 */
String getXAttrs(String path, List<String> names, String encoder)
    throws IOException {

  List<XAttr> xAttrs = getXAttrList(path);
  List<XAttr> filtered;
  if (names == null || names.size() == 0) {
    filtered = xAttrs;
  } else {
    filtered = Lists.newArrayListWithCapacity(names.size());
    for (String name : names) {
      XAttr search = XAttrHelper.buildXAttr(name);

      boolean found = false;
      for (XAttr aXAttr : xAttrs) {
        if (aXAttr.getNameSpace() == search.getNameSpace()
            && aXAttr.getName().equals(search.getName())) {

          filtered.add(aXAttr);
          found = true;
          break;
        }
      }

      if (!found) {
        throw new IOException(
            "At least one of the attributes provided was not found.");
      }
    }

  }
  return JsonUtil.toJsonString(filtered,
      new XAttrEncodingParam(encoder).getEncoding());
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:43,代码来源:FSImageLoader.java

示例11: createOriginalFSImage

import org.apache.hadoop.hdfs.web.JsonUtil; //导入方法依赖的package包/类
/**
 * Create a populated namespace for later testing. Save its contents to a data
 * structure and store its fsimage location. We only want to generate the
 * fsimage file once and use it for multiple tests.
 */
@BeforeClass
public static void createOriginalFSImage() throws IOException {
  MiniDFSCluster cluster = null;
  Configuration conf = new Configuration();

  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    DistributedFileSystem hdfs = cluster.getFileSystem();
    // Create a name space with XAttributes
    Path dir = new Path("/dir1");
    hdfs.mkdirs(dir);
    hdfs.setXAttr(dir, "user.attr1", "value1".getBytes());
    hdfs.setXAttr(dir, "user.attr2", "value2".getBytes());
    // Write results to the fsimage file
    hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
    hdfs.saveNamespace();

    List<XAttr> attributes = new ArrayList<XAttr>();
    attributes.add(XAttrHelper.buildXAttr("user.attr1", "value1".getBytes()));

    attr1JSon = JsonUtil.toJsonString(attributes, null);

    attributes.add(XAttrHelper.buildXAttr("user.attr2", "value2".getBytes()));

    // Determine the location of the fsimage file
    originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil
        .getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
    if (originalFsimage == null) {
      throw new RuntimeException("Didn't generate or can't find fsimage");
    }
    LOG.debug("original FS image file is " + originalFsimage);
  } finally {
    if (cluster != null)
      cluster.shutdown();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:43,代码来源:TestOfflineImageViewerForXAttr.java

示例12: toResponse

import org.apache.hadoop.hdfs.web.JsonUtil; //导入方法依赖的package包/类
@Override
public Response toResponse(Exception e) {
  if (LOG.isTraceEnabled()) {
    LOG.trace("GOT EXCEPITION", e);
  }

  //clear content type
  response.setContentType(null);

  //Convert exception
  if (e instanceof ParamException) {
    final ParamException paramexception = (ParamException)e;
    e = new IllegalArgumentException("Invalid value for webhdfs parameter \""
        + paramexception.getParameterName() + "\": "
        + e.getCause().getMessage(), e);
  }
  if (e instanceof ContainerException) {
    e = toCause(e);
  }
  if (e instanceof RemoteException) {
    e = ((RemoteException)e).unwrapRemoteException();
  }

  if (e instanceof SecurityException) {
    e = toCause(e);
  }
  
  //Map response status
  final Response.Status s;
  if (e instanceof SecurityException) {
    s = Response.Status.FORBIDDEN;
  } else if (e instanceof AuthorizationException) {
    s = Response.Status.FORBIDDEN;
  } else if (e instanceof FileNotFoundException) {
    s = Response.Status.NOT_FOUND;
  } else if (e instanceof IOException) {
    s = Response.Status.FORBIDDEN;
  } else if (e instanceof UnsupportedOperationException) {
    s = Response.Status.BAD_REQUEST;
  } else if (e instanceof IllegalArgumentException) {
    s = Response.Status.BAD_REQUEST;
  } else {
    LOG.warn("INTERNAL_SERVER_ERROR", e);
    s = Response.Status.INTERNAL_SERVER_ERROR;
  }
 
  final String js = JsonUtil.toJsonString(e);
  return Response.status(s).type(MediaType.APPLICATION_JSON).entity(js).build();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:50,代码来源:ExceptionHandler.java

示例13: post

import org.apache.hadoop.hdfs.web.JsonUtil; //导入方法依赖的package包/类
private Response post(
    final UserGroupInformation ugi,
    final DelegationParam delegation,
    final UserParam username,
    final DoAsParam doAsUser,
    final String fullpath,
    final PostOpParam op,
    final ConcatSourcesParam concatSrcs,
    final BufferSizeParam bufferSize,
    final ExcludeDatanodesParam excludeDatanodes,
    final NewLengthParam newLength
    ) throws IOException, URISyntaxException {
  final NameNode namenode = (NameNode)context.getAttribute("name.node");
  final NamenodeProtocols np = getRPCServer(namenode);

  switch(op.getValue()) {
  case APPEND:
  {
    final URI uri = redirectURI(namenode, ugi, delegation, username,
        doAsUser, fullpath, op.getValue(), -1L, -1L,
        excludeDatanodes.getValue(), bufferSize);
    return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
  }
  case CONCAT:
  {
    np.concat(fullpath, concatSrcs.getAbsolutePaths());
    return Response.ok().build();
  }
  case TRUNCATE:
  {
    if (newLength.getValue() == null) {
      throw new IllegalArgumentException(
          "newLength parameter is Missing");
    }
    // We treat each rest request as a separate client.
    final boolean b = np.truncate(fullpath, newLength.getValue(), 
        "DFSClient_" + DFSUtil.getSecureRandom().nextLong());
    final String js = JsonUtil.toJsonString("boolean", b);
    return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
  }
  default:
    throw new UnsupportedOperationException(op + " is not supported");
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:45,代码来源:NamenodeWebHdfsMethods.java

示例14: toResponse

import org.apache.hadoop.hdfs.web.JsonUtil; //导入方法依赖的package包/类
@Override
public Response toResponse(Exception e) {
  if (LOG.isTraceEnabled()) {
    LOG.trace("GOT EXCEPITION", e);
  }

  //clear content type
  response.setContentType(null);

  //Convert exception
  if (e instanceof ParamException) {
    final ParamException paramexception = (ParamException)e;
    e = new IllegalArgumentException("Invalid value for webhdfs parameter \""
        + paramexception.getParameterName() + "\": "
        + e.getCause().getMessage(), e);
  }
  if (e instanceof ContainerException) {
    e = toCause(e);
  }
  if (e instanceof RemoteException) {
    e = ((RemoteException)e).unwrapRemoteException();
  }

  //Map response status
  final Response.Status s;
  if (e instanceof SecurityException) {
    s = Response.Status.UNAUTHORIZED;
  } else if (e instanceof AuthorizationException) {
    s = Response.Status.UNAUTHORIZED;
  } else if (e instanceof FileNotFoundException) {
    s = Response.Status.NOT_FOUND;
  } else if (e instanceof IOException) {
    s = Response.Status.FORBIDDEN;
  } else if (e instanceof UnsupportedOperationException) {
    s = Response.Status.BAD_REQUEST;
  } else if (e instanceof IllegalArgumentException) {
    s = Response.Status.BAD_REQUEST;
  } else {
    LOG.warn("INTERNAL_SERVER_ERROR", e);
    s = Response.Status.INTERNAL_SERVER_ERROR;
  }
 
  final String js = JsonUtil.toJsonString(e);
  return Response.status(s).type(MediaType.APPLICATION_JSON).entity(js).build();
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:46,代码来源:ExceptionHandler.java

示例15: toResponse

import org.apache.hadoop.hdfs.web.JsonUtil; //导入方法依赖的package包/类
@Override
public Response toResponse(Exception e) {
  if (LOG.isTraceEnabled()) {
    LOG.trace("GOT EXCEPITION", e);
  }

  //clear content type
  response.setContentType(null);

  //Convert exception
  if (e instanceof ParamException) {
    final ParamException paramexception = (ParamException) e;
    e = new IllegalArgumentException(
        "Invalid value for webhdfs parameter \"" +
            paramexception.getParameterName() + "\": " +
            e.getCause().getMessage(), e);
  }
  if (e instanceof ContainerException) {
    e = toCause(e);
  }
  if (e instanceof RemoteException) {
    e = ((RemoteException) e).unwrapRemoteException();
  }

  //Map response status
  final Response.Status s;
  if (e instanceof SecurityException) {
    s = Response.Status.UNAUTHORIZED;
  } else if (e instanceof AuthorizationException) {
    s = Response.Status.UNAUTHORIZED;
  } else if (e instanceof FileNotFoundException) {
    s = Response.Status.NOT_FOUND;
  } else if (e instanceof IOException) {
    s = Response.Status.FORBIDDEN;
  } else if (e instanceof UnsupportedOperationException) {
    s = Response.Status.BAD_REQUEST;
  } else if (e instanceof IllegalArgumentException) {
    s = Response.Status.BAD_REQUEST;
  } else {
    LOG.warn("INTERNAL_SERVER_ERROR", e);
    s = Response.Status.INTERNAL_SERVER_ERROR;
  }

  final String js = JsonUtil.toJsonString(e);
  return Response.status(s).type(MediaType.APPLICATION_JSON).entity(js)
      .build();
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:48,代码来源:ExceptionHandler.java


注:本文中的org.apache.hadoop.hdfs.web.JsonUtil.toJsonString方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。