本文整理汇总了Java中org.apache.hadoop.fs.FsServerDefaults类的典型用法代码示例。如果您正苦于以下问题:Java FsServerDefaults类的具体用法?Java FsServerDefaults怎么用?Java FsServerDefaults使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
FsServerDefaults类属于org.apache.hadoop.fs包,在下文中一共展示了FsServerDefaults类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testServerDefaults
import org.apache.hadoop.fs.FsServerDefaults; //导入依赖的package包/类
/**
* Test that server default values can be retrieved on the client side
*/
@Test
public void testServerDefaults() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT);
conf.setInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT);
conf.setInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
conf.setInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT + 1);
conf.setInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(DFSConfigKeys.DFS_REPLICATION_DEFAULT + 1)
.build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
try {
FsServerDefaults serverDefaults = fs.getServerDefaults();
assertEquals(DFS_BLOCK_SIZE_DEFAULT, serverDefaults.getBlockSize());
assertEquals(DFS_BYTES_PER_CHECKSUM_DEFAULT, serverDefaults.getBytesPerChecksum());
assertEquals(DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT, serverDefaults.getWritePacketSize());
assertEquals(DFS_REPLICATION_DEFAULT + 1, serverDefaults.getReplication());
assertEquals(IO_FILE_BUFFER_SIZE_DEFAULT, serverDefaults.getFileBufferSize());
} finally {
fs.close();
cluster.shutdown();
}
}
示例2: testServerDefaults
import org.apache.hadoop.fs.FsServerDefaults; //导入依赖的package包/类
/**
* Test that server default values can be retrieved on the client side
*/
@Test
public void testServerDefaults() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT);
conf.setInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT);
conf.setInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
conf.setInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT + 1);
conf.setInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(DFSConfigKeys.DFS_REPLICATION_DEFAULT + 1)
.build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
try {
FsServerDefaults serverDefaults = fs.getServerDefaults(new Path("/"));
assertEquals(DFS_BLOCK_SIZE_DEFAULT, serverDefaults.getBlockSize());
assertEquals(DFS_BYTES_PER_CHECKSUM_DEFAULT, serverDefaults.getBytesPerChecksum());
assertEquals(DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT, serverDefaults.getWritePacketSize());
assertEquals(DFS_REPLICATION_DEFAULT + 1, serverDefaults.getReplication());
assertEquals(IO_FILE_BUFFER_SIZE_DEFAULT, serverDefaults.getFileBufferSize());
} finally {
fs.close();
cluster.shutdown();
}
}
示例3: getServerDefaults
import org.apache.hadoop.fs.FsServerDefaults; //导入依赖的package包/类
/**
* Get server default values for a number of configuration params.
*
* @see ClientProtocol#getServerDefaults()
*/
public FsServerDefaults getServerDefaults() throws IOException {
long now = Time.now();
if (now - serverDefaultsLastUpdate > SERVER_DEFAULTS_VALIDITY_PERIOD) {
ClientActionHandler handler = new ClientActionHandler() {
@Override
public Object doAction(ClientProtocol namenode)
throws RemoteException, IOException {
return namenode.getServerDefaults();
}
};
serverDefaults = (FsServerDefaults) doClientActionWithRetry(handler,
"getServerDefaults");
serverDefaultsLastUpdate = now;
}
return serverDefaults;
}
示例4: testRespectsServerDefaults
import org.apache.hadoop.fs.FsServerDefaults; //导入依赖的package包/类
@Test
public void testRespectsServerDefaults() throws Exception {
FsServerDefaults targetDefs =
fcTarget.getDefaultFileSystem().getServerDefaults(new Path("/"));
FsServerDefaults viewDefs =
fcView.getDefaultFileSystem().getServerDefaults(new Path("/data"));
assertEquals(targetDefs.getReplication(), viewDefs.getReplication());
assertEquals(targetDefs.getBlockSize(), viewDefs.getBlockSize());
assertEquals(targetDefs.getBytesPerChecksum(),
viewDefs.getBytesPerChecksum());
assertEquals(targetDefs.getFileBufferSize(),
viewDefs.getFileBufferSize());
assertEquals(targetDefs.getWritePacketSize(),
viewDefs.getWritePacketSize());
assertEquals(targetDefs.getEncryptDataTransfer(),
viewDefs.getEncryptDataTransfer());
assertEquals(targetDefs.getTrashInterval(), viewDefs.getTrashInterval());
assertEquals(targetDefs.getChecksumType(), viewDefs.getChecksumType());
fcView.create(new Path("/data/file"), EnumSet.of(CreateFlag.CREATE))
.close();
FileStatus stat =
fcTarget.getFileStatus(new Path(targetTestRoot, "data/file"));
assertEquals(targetDefs.getReplication(), stat.getReplication());
}
示例5: testServerDefaultsInternalDir
import org.apache.hadoop.fs.FsServerDefaults; //导入依赖的package包/类
@Test
public void testServerDefaultsInternalDir() throws Exception {
FsServerDefaults localDefs = LocalConfigKeys.getServerDefaults();
FsServerDefaults viewDefs = fcView
.getDefaultFileSystem().getServerDefaults(new Path("/internalDir"));
assertEquals(localDefs.getReplication(), viewDefs.getReplication());
assertEquals(localDefs.getBlockSize(), viewDefs.getBlockSize());
assertEquals(localDefs.getBytesPerChecksum(),
viewDefs.getBytesPerChecksum());
assertEquals(localDefs.getFileBufferSize(),
viewDefs.getFileBufferSize());
assertEquals(localDefs.getWritePacketSize(),
viewDefs.getWritePacketSize());
assertEquals(localDefs.getEncryptDataTransfer(),
viewDefs.getEncryptDataTransfer());
assertEquals(localDefs.getTrashInterval(), viewDefs.getTrashInterval());
assertEquals(localDefs.getChecksumType(), viewDefs.getChecksumType());
}
示例6: testServerDefaults
import org.apache.hadoop.fs.FsServerDefaults; //导入依赖的package包/类
/**
* Test that server default values can be retrieved on the client side
*/
public void testServerDefaults() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, FSConstants.DEFAULT_BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, FSConstants.DEFAULT_BYTES_PER_CHECKSUM);
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, FSConstants.DEFAULT_WRITE_PACKET_SIZE);
conf.setInt("dfs.replication", FSConstants.DEFAULT_REPLICATION_FACTOR + 1);
conf.setInt("io.file.buffer.size", FSConstants.DEFAULT_FILE_BUFFER_SIZE);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(FSConstants.DEFAULT_REPLICATION_FACTOR + 1)
.build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
try {
FsServerDefaults serverDefaults = fs.getServerDefaults();
assertEquals(FSConstants.DEFAULT_BLOCK_SIZE, serverDefaults.getBlockSize());
assertEquals(FSConstants.DEFAULT_BYTES_PER_CHECKSUM, serverDefaults.getBytesPerChecksum());
assertEquals(FSConstants.DEFAULT_WRITE_PACKET_SIZE, serverDefaults.getWritePacketSize());
assertEquals(FSConstants.DEFAULT_REPLICATION_FACTOR + 1, serverDefaults.getReplication());
assertEquals(FSConstants.DEFAULT_FILE_BUFFER_SIZE, serverDefaults.getFileBufferSize());
} finally {
fs.close();
cluster.shutdown();
}
}
示例7: HdfsFileWriter
import org.apache.hadoop.fs.FsServerDefaults; //导入依赖的package包/类
public HdfsFileWriter(FileSystem fileSystem, Path path) throws IOException {
LOG.debug("Creating writer on {}", path);
this.path = path;
Configuration conf = fileSystem.getConf();
FsServerDefaults fsDefaults = fileSystem.getServerDefaults(path);
EnumSet<CreateFlag> flags = EnumSet.of(CreateFlag.CREATE,
CreateFlag.OVERWRITE);
if (Boolean.getBoolean(HDFS_SYNC_BLOCK)) {
flags.add(CreateFlag.SYNC_BLOCK);
}
outputStream = fileSystem.create(path, FsPermission.getDefault()
.applyUMask(FsPermission.getUMask(conf)), flags, fsDefaults
.getFileBufferSize(), fsDefaults.getReplication(), fsDefaults
.getBlockSize(), null);
}
示例8: getServerDefaults
import org.apache.hadoop.fs.FsServerDefaults; //导入依赖的package包/类
public static FsServerDefaults getServerDefaults() throws IOException {
return new FsServerDefaults(
BLOCK_SIZE_DEFAULT,
BYTES_PER_CHECKSUM_DEFAULT,
CLIENT_WRITE_PACKET_SIZE_DEFAULT,
REPLICATION_DEFAULT,
STREAM_BUFFER_SIZE_DEFAULT,
ENCRYPT_DATA_TRANSFER_DEFAULT,
FS_TRASH_INTERVAL_DEFAULT,
CHECKSUM_TYPE_DEFAULT);
}
示例9: getServerDefaults
import org.apache.hadoop.fs.FsServerDefaults; //导入依赖的package包/类
protected static FsServerDefaults getServerDefaults() throws IOException {
return new FsServerDefaults(
BLOCK_SIZE_DEFAULT,
BYTES_PER_CHECKSUM_DEFAULT,
CLIENT_WRITE_PACKET_SIZE_DEFAULT,
REPLICATION_DEFAULT,
STREAM_BUFFER_SIZE_DEFAULT,
ENCRYPT_DATA_TRANSFER_DEFAULT,
FS_TRASH_INTERVAL_DEFAULT,
CHECKSUM_TYPE_DEFAULT);
}
示例10: getServerDefaults
import org.apache.hadoop.fs.FsServerDefaults; //导入依赖的package包/类
/**
* Get server default values for a number of configuration params.
* @see ClientProtocol#getServerDefaults()
*/
public FsServerDefaults getServerDefaults() throws IOException {
long now = Time.monotonicNow();
if ((serverDefaults == null) ||
(now - serverDefaultsLastUpdate > SERVER_DEFAULTS_VALIDITY_PERIOD)) {
serverDefaults = namenode.getServerDefaults();
serverDefaultsLastUpdate = now;
}
assert serverDefaults != null;
return serverDefaults;
}
示例11: NameNodeConnector
import org.apache.hadoop.fs.FsServerDefaults; //导入依赖的package包/类
public NameNodeConnector(String name, URI nameNodeUri, Path idPath,
List<Path> targetPaths, Configuration conf,
int maxNotChangedIterations)
throws IOException {
this.nameNodeUri = nameNodeUri;
this.idPath = idPath;
this.targetPaths = targetPaths == null || targetPaths.isEmpty() ? Arrays
.asList(new Path("/")) : targetPaths;
this.maxNotChangedIterations = maxNotChangedIterations;
this.namenode = NameNodeProxies.createProxy(conf, nameNodeUri,
NamenodeProtocol.class).getProxy();
this.client = NameNodeProxies.createProxy(conf, nameNodeUri,
ClientProtocol.class, fallbackToSimpleAuth).getProxy();
this.fs = (DistributedFileSystem)FileSystem.get(nameNodeUri, conf);
final NamespaceInfo namespaceinfo = namenode.versionRequest();
this.blockpoolID = namespaceinfo.getBlockPoolID();
final FsServerDefaults defaults = fs.getServerDefaults(new Path("/"));
this.keyManager = new KeyManager(blockpoolID, namenode,
defaults.getEncryptDataTransfer(), conf);
// if it is for test, we do not create the id file
out = checkAndMarkRunning();
if (out == null) {
// Exit if there is another one running.
throw new IOException("Another " + name + " is running.");
}
}
示例12: convert
import org.apache.hadoop.fs.FsServerDefaults; //导入依赖的package包/类
public static FsServerDefaults convert(FsServerDefaultsProto fs) {
if (fs == null) return null;
return new FsServerDefaults(
fs.getBlockSize(), fs.getBytesPerChecksum(),
fs.getWritePacketSize(), (short) fs.getReplication(),
fs.getFileBufferSize(),
fs.getEncryptDataTransfer(),
fs.getTrashInterval(),
PBHelper.convert(fs.getChecksumType()));
}
示例13: getServerDefaults
import org.apache.hadoop.fs.FsServerDefaults; //导入依赖的package包/类
@Override
public GetServerDefaultsResponseProto getServerDefaults(
RpcController controller, GetServerDefaultsRequestProto req)
throws ServiceException {
try {
FsServerDefaults result = server.getServerDefaults();
return GetServerDefaultsResponseProto.newBuilder()
.setServerDefaults(PBHelper.convert(result))
.build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
示例14: getServerDefaults
import org.apache.hadoop.fs.FsServerDefaults; //导入依赖的package包/类
@Override
public FsServerDefaults getServerDefaults() throws IOException {
GetServerDefaultsRequestProto req = VOID_GET_SERVER_DEFAULT_REQUEST;
try {
return PBHelper
.convert(rpcProxy.getServerDefaults(null, req).getServerDefaults());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
示例15: testGetServerDefaults
import org.apache.hadoop.fs.FsServerDefaults; //导入依赖的package包/类
@Test(timeout=60000)
public void testGetServerDefaults() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
DistributedFileSystem dfs = cluster.getFileSystem();
FsServerDefaults fsServerDefaults = dfs.getServerDefaults();
Assert.assertNotNull(fsServerDefaults);
} finally {
cluster.shutdown();
}
}