本文整理汇总了Java中org.apache.hadoop.hdfs.web.resources.PostOpParam.getValue方法的典型用法代码示例。如果您正苦于以下问题:Java PostOpParam.getValue方法的具体用法?Java PostOpParam.getValue怎么用?Java PostOpParam.getValue使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.web.resources.PostOpParam
的用法示例。
在下文中一共展示了PostOpParam.getValue方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: post
import org.apache.hadoop.hdfs.web.resources.PostOpParam; //导入方法依赖的package包/类
private Response post(final UserGroupInformation ugi,
final DelegationParam delegation, final UserParam username,
final DoAsParam doAsUser, final String fullpath, final PostOpParam op,
final ConcatSourcesParam concatSrcs, final BufferSizeParam bufferSize)
throws IOException, URISyntaxException {
final NameNode namenode = (NameNode) context.getAttribute("name.node");
switch (op.getValue()) {
case APPEND: {
final URI uri =
redirectURI(namenode, ugi, delegation, username, doAsUser, fullpath,
op.getValue(), -1L, -1L, bufferSize);
return Response.temporaryRedirect(uri)
.type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case CONCAT: {
namenode.getRpcServer().concat(fullpath, concatSrcs.getAbsolutePaths());
return Response.ok().build();
}
default:
throw new UnsupportedOperationException(op + " is not supported");
}
}
示例2: post
import org.apache.hadoop.hdfs.web.resources.PostOpParam; //导入方法依赖的package包/类
private Response post(
final InputStream in,
final String nnId,
final String fullpath,
final PostOpParam op,
final BufferSizeParam bufferSize
) throws IOException {
final DataNode datanode = (DataNode)context.getAttribute("datanode");
switch(op.getValue()) {
case APPEND:
{
final Configuration conf = new Configuration(datanode.getConf());
final int b = bufferSize.getValue(conf);
DFSClient dfsclient = newDfsClient(nnId, conf);
FSDataOutputStream out = null;
try {
out = dfsclient.append(fullpath, b, null, null);
IOUtils.copyBytes(in, out, b);
out.close();
out = null;
dfsclient.close();
dfsclient = null;
} finally {
IOUtils.cleanup(LOG, out);
IOUtils.cleanup(LOG, dfsclient);
}
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
default:
throw new UnsupportedOperationException(op + " is not supported");
}
}
示例3: post
import org.apache.hadoop.hdfs.web.resources.PostOpParam; //导入方法依赖的package包/类
private Response post(
final UserGroupInformation ugi,
final DelegationParam delegation,
final UserParam username,
final DoAsParam doAsUser,
final String fullpath,
final PostOpParam op,
final ConcatSourcesParam concatSrcs,
final BufferSizeParam bufferSize
) throws IOException, URISyntaxException {
final NameNode namenode = (NameNode)context.getAttribute("name.node");
switch(op.getValue()) {
case APPEND:
{
final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
fullpath, op.getValue(), -1L, -1L, bufferSize);
return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case CONCAT:
{
namenode.getRpcServer().concat(fullpath, concatSrcs.getAbsolutePaths());
return Response.ok().build();
}
default:
throw new UnsupportedOperationException(op + " is not supported");
}
}
示例4: post
import org.apache.hadoop.hdfs.web.resources.PostOpParam; //导入方法依赖的package包/类
private Response post(
final InputStream in,
final UserGroupInformation ugi,
final DelegationParam delegation,
final InetSocketAddress nnRpcAddr,
final String fullpath,
final PostOpParam op,
final BufferSizeParam bufferSize
) throws IOException {
final DataNode datanode = (DataNode)context.getAttribute("datanode");
switch(op.getValue()) {
case APPEND:
{
final Configuration conf = new Configuration(datanode.getConf());
final int b = bufferSize.getValue(conf);
DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
FSDataOutputStream out = null;
try {
out = dfsclient.append(fullpath, b, null, null);
IOUtils.copyBytes(in, out, b);
out.close();
out = null;
dfsclient.close();
dfsclient = null;
} finally {
IOUtils.cleanup(LOG, out);
IOUtils.cleanup(LOG, dfsclient);
}
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
default:
throw new UnsupportedOperationException(op + " is not supported");
}
}
示例5: post
import org.apache.hadoop.hdfs.web.resources.PostOpParam; //导入方法依赖的package包/类
private Response post(final InputStream in, final UserGroupInformation ugi,
final DelegationParam delegation, final InetSocketAddress nnRpcAddr,
final String fullpath, final PostOpParam op,
final BufferSizeParam bufferSize) throws IOException {
final DataNode datanode = (DataNode) context.getAttribute("datanode");
switch (op.getValue()) {
case APPEND: {
final Configuration conf = new Configuration(datanode.getConf());
final int b = bufferSize.getValue(conf);
DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
FSDataOutputStream out = null;
try {
out = dfsclient.append(fullpath, b, null, null);
IOUtils.copyBytes(in, out, b);
out.close();
out = null;
dfsclient.close();
dfsclient = null;
} finally {
IOUtils.cleanup(LOG, out);
IOUtils.cleanup(LOG, dfsclient);
}
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
default:
throw new UnsupportedOperationException(op + " is not supported");
}
}
示例6: post
import org.apache.hadoop.hdfs.web.resources.PostOpParam; //导入方法依赖的package包/类
private Response post(
final UserGroupInformation ugi,
final DelegationParam delegation,
final UserParam username,
final DoAsParam doAsUser,
final String fullpath,
final PostOpParam op,
final ConcatSourcesParam concatSrcs,
final BufferSizeParam bufferSize
) throws IOException, URISyntaxException {
final NameNode namenode = (NameNode)context.getAttribute("name.node");
switch(op.getValue()) {
case APPEND:
{
final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
fullpath, op.getValue(), -1L, -1L, bufferSize);
return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case CONCAT:
{
getRPCServer(namenode).concat(fullpath, concatSrcs.getAbsolutePaths());
return Response.ok().build();
}
default:
throw new UnsupportedOperationException(op + " is not supported");
}
}