本文整理汇总了Java中org.apache.hadoop.fs.http.client.HttpFSFileSystem类的典型用法代码示例。如果您正苦于以下问题:Java HttpFSFileSystem类的具体用法?Java HttpFSFileSystem怎么用?Java HttpFSFileSystem使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
HttpFSFileSystem类属于org.apache.hadoop.fs.http.client包,在下文中一共展示了HttpFSFileSystem类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: aclStatusToJSON
import org.apache.hadoop.fs.http.client.HttpFSFileSystem; //导入依赖的package包/类
/** Converts an <code>AclStatus</code> object into a JSON object.
*
* @param aclStatus AclStatus object
*
* @return The JSON representation of the ACLs for the file
*/
@SuppressWarnings({"unchecked"})
private static Map<String,Object> aclStatusToJSON(AclStatus aclStatus) {
Map<String,Object> json = new LinkedHashMap<String,Object>();
Map<String,Object> inner = new LinkedHashMap<String,Object>();
JSONArray entriesArray = new JSONArray();
inner.put(HttpFSFileSystem.OWNER_JSON, aclStatus.getOwner());
inner.put(HttpFSFileSystem.GROUP_JSON, aclStatus.getGroup());
inner.put(HttpFSFileSystem.ACL_STICKY_BIT_JSON, aclStatus.isStickyBit());
for ( AclEntry e : aclStatus.getEntries() ) {
entriesArray.add(e.toString());
}
inner.put(HttpFSFileSystem.ACL_ENTRIES_JSON, entriesArray);
json.put(HttpFSFileSystem.ACL_STATUS_JSON, inner);
return json;
}
示例2: toJsonInner
import org.apache.hadoop.fs.http.client.HttpFSFileSystem; //导入依赖的package包/类
/**
* Return in inner part of the JSON for the status - used by both the
* GETFILESTATUS and LISTSTATUS calls.
* @param emptyPathSuffix Whether or not to include PATH_SUFFIX_JSON
* @return The JSONish Map
*/
public Map<String,Object> toJsonInner(boolean emptyPathSuffix) {
Map<String,Object> json = new LinkedHashMap<String,Object>();
json.put(HttpFSFileSystem.PATH_SUFFIX_JSON,
(emptyPathSuffix) ? "" : fileStatus.getPath().getName());
json.put(HttpFSFileSystem.TYPE_JSON,
HttpFSFileSystem.FILE_TYPE.getType(fileStatus).toString());
json.put(HttpFSFileSystem.LENGTH_JSON, fileStatus.getLen());
json.put(HttpFSFileSystem.OWNER_JSON, fileStatus.getOwner());
json.put(HttpFSFileSystem.GROUP_JSON, fileStatus.getGroup());
json.put(HttpFSFileSystem.PERMISSION_JSON,
HttpFSFileSystem.permissionToString(fileStatus.getPermission()));
json.put(HttpFSFileSystem.ACCESS_TIME_JSON, fileStatus.getAccessTime());
json.put(HttpFSFileSystem.MODIFICATION_TIME_JSON,
fileStatus.getModificationTime());
json.put(HttpFSFileSystem.BLOCK_SIZE_JSON, fileStatus.getBlockSize());
json.put(HttpFSFileSystem.REPLICATION_JSON, fileStatus.getReplication());
if ( (aclStatus != null) && !(aclStatus.getEntries().isEmpty()) ) {
json.put(HttpFSFileSystem.ACL_BIT_JSON,true);
}
return json;
}
示例3: xAttrsToJSON
import org.apache.hadoop.fs.http.client.HttpFSFileSystem; //导入依赖的package包/类
/**
* Converts xAttrs to a JSON object.
*
* @param xAttrs file xAttrs.
* @param encoding format of xattr values.
*
* @return The JSON representation of the xAttrs.
* @throws IOException
*/
@SuppressWarnings({"unchecked", "rawtypes"})
private static Map xAttrsToJSON(Map<String, byte[]> xAttrs,
XAttrCodec encoding) throws IOException {
Map jsonMap = new LinkedHashMap();
JSONArray jsonArray = new JSONArray();
if (xAttrs != null) {
for (Entry<String, byte[]> e : xAttrs.entrySet()) {
Map json = new LinkedHashMap();
json.put(HttpFSFileSystem.XATTR_NAME_JSON, e.getKey());
if (e.getValue() != null) {
json.put(HttpFSFileSystem.XATTR_VALUE_JSON,
XAttrCodec.encodeValue(e.getValue(), encoding));
}
jsonArray.add(json);
}
}
jsonMap.put(HttpFSFileSystem.XATTRS_JSON, jsonArray);
return jsonMap;
}
示例4: test
import org.apache.hadoop.fs.http.client.HttpFSFileSystem; //导入依赖的package包/类
private void test(String method, String operation, String contentType,
boolean upload, boolean error) throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.reset(request);
Mockito.when(request.getMethod()).thenReturn(method);
Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).thenReturn(operation);
Mockito.when(request.getParameter(HttpFSParametersProvider.DataParam.NAME)).
thenReturn(Boolean.toString(upload));
Mockito.when(request.getContentType()).thenReturn(contentType);
FilterChain chain = Mockito.mock(FilterChain.class);
Filter filter = new CheckUploadContentTypeFilter();
filter.doFilter(request, response, chain);
if (error) {
Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
Mockito.contains("Data upload"));
}
else {
Mockito.verify(chain).doFilter(request, response);
}
}
示例5: testManagementOperationErrors
import org.apache.hadoop.fs.http.client.HttpFSFileSystem; //导入依赖的package包/类
private void testManagementOperationErrors(AuthenticationHandler handler)
throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
thenReturn(DelegationTokenOperation.GETDELEGATIONTOKEN.toString());
Mockito.when(request.getMethod()).thenReturn("FOO");
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).sendError(
Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
Mockito.startsWith("Wrong HTTP method"));
Mockito.reset(response);
Mockito.when(request.getMethod()).
thenReturn(DelegationTokenOperation.GETDELEGATIONTOKEN.getHttpMethod());
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).sendError(
Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED),
Mockito.contains("requires SPNEGO"));
}
示例6: fileStatusToJSONRaw
import org.apache.hadoop.fs.http.client.HttpFSFileSystem; //导入依赖的package包/类
@SuppressWarnings({"unchecked", "deprecation"})
private static Map fileStatusToJSONRaw(FileStatus status,
boolean emptyPathSuffix) {
Map json = new LinkedHashMap();
json.put(HttpFSFileSystem.PATH_SUFFIX_JSON,
(emptyPathSuffix) ? "" : status.getPath().getName());
json.put(HttpFSFileSystem.TYPE_JSON,
HttpFSFileSystem.FILE_TYPE.getType(status).toString());
json.put(HttpFSFileSystem.LENGTH_JSON, status.getLen());
json.put(HttpFSFileSystem.OWNER_JSON, status.getOwner());
json.put(HttpFSFileSystem.GROUP_JSON, status.getGroup());
json.put(HttpFSFileSystem.PERMISSION_JSON,
HttpFSFileSystem.permissionToString(status.getPermission()));
json.put(HttpFSFileSystem.ACCESS_TIME_JSON, status.getAccessTime());
json.put(HttpFSFileSystem.MODIFICATION_TIME_JSON,
status.getModificationTime());
json.put(HttpFSFileSystem.BLOCK_SIZE_JSON, status.getBlockSize());
json.put(HttpFSFileSystem.REPLICATION_JSON, status.getReplication());
return json;
}
示例7: contentSummaryToJSON
import org.apache.hadoop.fs.http.client.HttpFSFileSystem; //导入依赖的package包/类
/**
* Converts a <code>ContentSummary</code> object into a JSON array
* object.
*
* @param contentSummary
* the content summary
* @return The JSON representation of the content summary.
*/
@SuppressWarnings({"unchecked"})
private static Map contentSummaryToJSON(ContentSummary contentSummary) {
Map json = new LinkedHashMap();
json.put(HttpFSFileSystem.CONTENT_SUMMARY_DIRECTORY_COUNT_JSON,
contentSummary.getDirectoryCount());
json.put(HttpFSFileSystem.CONTENT_SUMMARY_FILE_COUNT_JSON,
contentSummary.getFileCount());
json.put(HttpFSFileSystem.CONTENT_SUMMARY_LENGTH_JSON,
contentSummary.getLength());
json.put(HttpFSFileSystem.CONTENT_SUMMARY_QUOTA_JSON,
contentSummary.getQuota());
json.put(HttpFSFileSystem.CONTENT_SUMMARY_SPACE_CONSUMED_JSON,
contentSummary.getSpaceConsumed());
json.put(HttpFSFileSystem.CONTENT_SUMMARY_SPACE_QUOTA_JSON,
contentSummary.getSpaceQuota());
Map response = new LinkedHashMap();
response.put(HttpFSFileSystem.CONTENT_SUMMARY_JSON, json);
return response;
}
示例8: test
import org.apache.hadoop.fs.http.client.HttpFSFileSystem; //导入依赖的package包/类
private void test(String method, String operation, String contentType,
boolean upload, boolean error) throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.reset(request);
Mockito.when(request.getMethod()).thenReturn(method);
Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM))
.thenReturn(operation);
Mockito.when(request.getParameter(HttpFSParametersProvider.DataParam.NAME)).
thenReturn(Boolean.toString(upload));
Mockito.when(request.getContentType()).thenReturn(contentType);
FilterChain chain = Mockito.mock(FilterChain.class);
Filter filter = new CheckUploadContentTypeFilter();
filter.doFilter(request, response, chain);
if (error) {
Mockito.verify(response)
.sendError(Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
Mockito.contains("Data upload"));
} else {
Mockito.verify(chain).doFilter(request, response);
}
}
示例9: testManagementOperationErrors
import org.apache.hadoop.fs.http.client.HttpFSFileSystem; //导入依赖的package包/类
private void testManagementOperationErrors(AuthenticationHandler handler)
throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
thenReturn(DelegationTokenOperation.GETDELEGATIONTOKEN.toString());
Mockito.when(request.getMethod()).thenReturn("FOO");
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response)
.sendError(Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
Mockito.startsWith("Wrong HTTP method"));
Mockito.reset(response);
Mockito.when(request.getMethod()).
thenReturn(DelegationTokenOperation.GETDELEGATIONTOKEN.getHttpMethod());
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response)
.sendError(Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED),
Mockito.contains("requires SPNEGO"));
}
示例10: enforceRootPath
import org.apache.hadoop.fs.http.client.HttpFSFileSystem; //导入依赖的package包/类
private void enforceRootPath(HttpFSFileSystem.Operation op, String path) {
if (!path.equals("/")) {
throw new UnsupportedOperationException(
MessageFormat.format("Operation [{0}], invalid path [{1}], must be '/'",
op, path));
}
}
示例11: delete
import org.apache.hadoop.fs.http.client.HttpFSFileSystem; //导入依赖的package包/类
/**
* Binding to handle DELETE requests.
*
* @param path the path for operation.
* @param op the HttpFS operation of the request.
* @param params the HttpFS parameters of the request.
*
* @return the request response.
*
* @throws IOException thrown if an IO error occurred. Thrown exceptions are
* handled by {@link HttpFSExceptionProvider}.
* @throws FileSystemAccessException thrown if a FileSystemAccess releated
* error occurred. Thrown exceptions are handled by
* {@link HttpFSExceptionProvider}.
*/
@DELETE
@Path("{path:.*}")
@Produces(MediaType.APPLICATION_JSON)
public Response delete(@PathParam("path") String path,
@QueryParam(OperationParam.NAME) OperationParam op,
@Context Parameters params,
@Context HttpServletRequest request)
throws IOException, FileSystemAccessException {
UserGroupInformation user = HttpUserGroupInformation.get();
Response response;
path = makeAbsolute(path);
MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
MDC.put("hostname", request.getRemoteAddr());
switch (op.value()) {
case DELETE: {
Boolean recursive =
params.get(RecursiveParam.NAME, RecursiveParam.class);
AUDIT_LOG.info("[{}] recursive [{}]", path, recursive);
FSOperations.FSDelete command =
new FSOperations.FSDelete(path, recursive);
JSONObject json = fsExecute(user, command);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
default: {
throw new IOException(
MessageFormat.format("Invalid HTTP DELETE operation [{0}]",
op.value()));
}
}
return response;
}
示例12: doFilter
import org.apache.hadoop.fs.http.client.HttpFSFileSystem; //导入依赖的package包/类
/**
* Enforces the content-type to be application/octet-stream for
* POST and PUT requests.
*
* @param request servlet request.
* @param response servlet response.
* @param chain filter chain.
*
* @throws IOException thrown if an IO error occurrs.
* @throws ServletException thrown if a servet error occurrs.
*/
@Override
public void doFilter(ServletRequest request, ServletResponse response,
FilterChain chain)
throws IOException, ServletException {
boolean contentTypeOK = true;
HttpServletRequest httpReq = (HttpServletRequest) request;
HttpServletResponse httpRes = (HttpServletResponse) response;
String method = httpReq.getMethod();
if (method.equals("PUT") || method.equals("POST")) {
String op = httpReq.getParameter(HttpFSFileSystem.OP_PARAM);
if (op != null && UPLOAD_OPERATIONS.contains(
StringUtils.toUpperCase(op))) {
if ("true".equalsIgnoreCase(httpReq.getParameter(HttpFSParametersProvider.DataParam.NAME))) {
String contentType = httpReq.getContentType();
contentTypeOK =
HttpFSFileSystem.UPLOAD_CONTENT_TYPE.equalsIgnoreCase(contentType);
}
}
}
if (contentTypeOK) {
chain.doFilter(httpReq, httpRes);
}
else {
httpRes.sendError(HttpServletResponse.SC_BAD_REQUEST,
"Data upload requests must have content-type set to '" +
HttpFSFileSystem.UPLOAD_CONTENT_TYPE + "'");
}
}
示例13: toJson
import org.apache.hadoop.fs.http.client.HttpFSFileSystem; //导入依赖的package包/类
/**
* Return a Map suitable for conversion into JSON.
* @return A JSONish Map
*/
@SuppressWarnings({"unchecked"})
public Map<String,Object> toJson() {
Map<String,Object> json = new LinkedHashMap<String,Object>();
Map<String,Object> inner = new LinkedHashMap<String,Object>();
JSONArray statuses = new JSONArray();
for (StatusPair s : statusPairs) {
statuses.add(s.toJsonInner(false));
}
inner.put(HttpFSFileSystem.FILE_STATUS_JSON, statuses);
json.put(HttpFSFileSystem.FILE_STATUSES_JSON, inner);
return json;
}
示例14: fileChecksumToJSON
import org.apache.hadoop.fs.http.client.HttpFSFileSystem; //导入依赖的package包/类
/**
* Converts a <code>FileChecksum</code> object into a JSON array
* object.
*
* @param checksum file checksum.
*
* @return The JSON representation of the file checksum.
*/
@SuppressWarnings({"unchecked"})
private static Map fileChecksumToJSON(FileChecksum checksum) {
Map json = new LinkedHashMap();
json.put(HttpFSFileSystem.CHECKSUM_ALGORITHM_JSON, checksum.getAlgorithmName());
json.put(HttpFSFileSystem.CHECKSUM_BYTES_JSON,
org.apache.hadoop.util.StringUtils.byteToHexString(checksum.getBytes()));
json.put(HttpFSFileSystem.CHECKSUM_LENGTH_JSON, checksum.getLength());
Map response = new LinkedHashMap();
response.put(HttpFSFileSystem.FILE_CHECKSUM_JSON, json);
return response;
}
示例15: contentSummaryToJSON
import org.apache.hadoop.fs.http.client.HttpFSFileSystem; //导入依赖的package包/类
/**
* Converts a <code>ContentSummary</code> object into a JSON array
* object.
*
* @param contentSummary the content summary
*
* @return The JSON representation of the content summary.
*/
@SuppressWarnings({"unchecked"})
private static Map contentSummaryToJSON(ContentSummary contentSummary) {
Map json = new LinkedHashMap();
json.put(HttpFSFileSystem.CONTENT_SUMMARY_DIRECTORY_COUNT_JSON, contentSummary.getDirectoryCount());
json.put(HttpFSFileSystem.CONTENT_SUMMARY_FILE_COUNT_JSON, contentSummary.getFileCount());
json.put(HttpFSFileSystem.CONTENT_SUMMARY_LENGTH_JSON, contentSummary.getLength());
json.put(HttpFSFileSystem.CONTENT_SUMMARY_QUOTA_JSON, contentSummary.getQuota());
json.put(HttpFSFileSystem.CONTENT_SUMMARY_SPACE_CONSUMED_JSON, contentSummary.getSpaceConsumed());
json.put(HttpFSFileSystem.CONTENT_SUMMARY_SPACE_QUOTA_JSON, contentSummary.getSpaceQuota());
Map response = new LinkedHashMap();
response.put(HttpFSFileSystem.CONTENT_SUMMARY_JSON, json);
return response;
}