本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.getDefaultBlockSize方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.getDefaultBlockSize方法的具体用法?Java FileSystem.getDefaultBlockSize怎么用?Java FileSystem.getDefaultBlockSize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.fs.FileSystem
的用法示例。
在下文中一共展示了FileSystem.getDefaultBlockSize方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testBlockSize
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
@SuppressWarnings("deprecation")
public void testBlockSize() throws Exception {
FileSystem fs = getFileSystem();
long defaultBlockSize = fs.getDefaultBlockSize();
assertEquals("incorrect blocksize",
S3AFileSystem.DEFAULT_BLOCKSIZE, defaultBlockSize);
long newBlockSize = defaultBlockSize * 2;
fs.getConf().setLong(Constants.FS_S3A_BLOCK_SIZE, newBlockSize);
Path dir = path("testBlockSize");
Path file = new Path(dir, "file");
createFile(fs, file, true, dataset(1024, 'a', 'z' - 'a'));
FileStatus fileStatus = fs.getFileStatus(file);
assertEquals("Double default block size in stat(): " + fileStatus,
newBlockSize,
fileStatus.getBlockSize());
// check the listing & assert that the block size is picked up by
// this route too.
boolean found = false;
FileStatus[] listing = fs.listStatus(dir);
for (FileStatus stat : listing) {
LOG.info("entry: {}", stat);
if (file.equals(stat.getPath())) {
found = true;
assertEquals("Double default block size in ls(): " + stat,
newBlockSize,
stat.getBlockSize());
}
}
assertTrue("Did not find " + fileStatsToString(listing, ", "), found);
}
示例2: splitRealFiles
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void splitRealFiles(String[] args) throws IOException {
Configuration conf = new Configuration();
Job job = Job.getInstance();
FileSystem fs = FileSystem.get(conf);
if (!(fs instanceof DistributedFileSystem)) {
throw new IOException("Wrong file system: " + fs.getClass().getName());
}
long blockSize = fs.getDefaultBlockSize();
DummyInputFormat inFormat = new DummyInputFormat();
for (int i = 0; i < args.length; i++) {
FileInputFormat.addInputPaths(job, args[i]);
}
inFormat.setMinSplitSizeRack(blockSize);
inFormat.setMaxSplitSize(10 * blockSize);
List<InputSplit> splits = inFormat.getSplits(job);
System.out.println("Total number of splits " + splits.size());
for (int i = 0; i < splits.size(); ++i) {
CombineFileSplit fileSplit = (CombineFileSplit) splits.get(i);
System.out.println("Split[" + i + "] " + fileSplit);
}
}
示例3: execute
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return The URI of the created file.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public Void execute(FileSystem fs) throws IOException {
if (replication == -1) {
replication = fs.getDefaultReplication(path);
}
if (blockSize == -1) {
blockSize = fs.getDefaultBlockSize(path);
}
FsPermission fsPermission = new FsPermission(permission);
int bufferSize = fs.getConf().getInt("httpfs.buffer.size", 4096);
OutputStream os = fs.create(path, fsPermission, override, bufferSize, replication, blockSize, null);
IOUtils.copyBytes(is, os, bufferSize, true);
os.close();
return null;
}
示例4: Writer
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
Writer(Configuration conf, Option... opts) throws IOException {
BlockSizeOption blockSizeOption =
Options.getOption(BlockSizeOption.class, opts);
BufferSizeOption bufferSizeOption =
Options.getOption(BufferSizeOption.class, opts);
ReplicationOption replicationOption =
Options.getOption(ReplicationOption.class, opts);
FileOption fileOption = Options.getOption(FileOption.class, opts);
AppendIfExistsOption appendIfExistsOption = Options.getOption(
AppendIfExistsOption.class, opts);
StreamOption streamOption = Options.getOption(StreamOption.class, opts);
// check consistency of options
if ((fileOption == null) == (streamOption == null)) {
throw new IllegalArgumentException("file or stream must be specified");
}
if (fileOption == null && (blockSizeOption != null ||
bufferSizeOption != null ||
replicationOption != null)) {
throw new IllegalArgumentException("file modifier options not " +
"compatible with stream");
}
FSDataOutputStream out;
boolean ownStream = fileOption != null;
if (ownStream) {
Path p = fileOption.getValue();
FileSystem fs;
fs = p.getFileSystem(conf);
int bufferSize = bufferSizeOption == null ? getBufferSize(conf) :
bufferSizeOption.getValue();
short replication = replicationOption == null ?
fs.getDefaultReplication(p) :
(short) replicationOption.getValue();
long blockSize = blockSizeOption == null ? fs.getDefaultBlockSize(p) :
blockSizeOption.getValue();
if (appendIfExistsOption != null && appendIfExistsOption.getValue()
&& fs.exists(p)) {
// Read the file and verify header details
try (WALFile.Reader reader =
new WALFile.Reader(conf, WALFile.Reader.file(p), new Reader.OnlyHeaderOption())){
if (reader.getVersion() != VERSION[3]) {
throw new VersionMismatchException(VERSION[3], reader.getVersion());
}
sync = reader.getSync();
}
out = fs.append(p, bufferSize);
this.appendMode = true;
} else {
out = fs.create(p, true, bufferSize, replication, blockSize);
}
} else {
out = streamOption.getValue();
}
init(conf, out, ownStream);
}
示例5: getBlockSize
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
* @return the block size of the source file if we need to preserve either
* the block size or the checksum type. Otherwise the default block
* size of the target FS.
*/
private static long getBlockSize(
EnumSet<FileAttribute> fileAttributes,
FileStatus sourceFile, FileSystem targetFS, Path tmpTargetPath) {
boolean preserve = fileAttributes.contains(FileAttribute.BLOCKSIZE)
|| fileAttributes.contains(FileAttribute.CHECKSUMTYPE);
return preserve ? sourceFile.getBlockSize() : targetFS
.getDefaultBlockSize(tmpTargetPath);
}
示例6: touchFile
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private static void touchFile(String path, boolean createMultipleBlocks,
ChecksumOpt checksumOpt) throws Exception {
FileSystem fs;
DataOutputStream outputStream = null;
try {
fs = cluster.getFileSystem();
final Path qualifiedPath = new Path(path).makeQualified(fs.getUri(),
fs.getWorkingDirectory());
final long blockSize = createMultipleBlocks ? NON_DEFAULT_BLOCK_SIZE : fs
.getDefaultBlockSize(qualifiedPath) * 2;
FsPermission permission = FsPermission.getFileDefault().applyUMask(
FsPermission.getUMask(fs.getConf()));
outputStream = fs.create(qualifiedPath, permission,
EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), 0,
(short) (fs.getDefaultReplication(qualifiedPath) * 2), blockSize,
null, checksumOpt);
byte[] bytes = new byte[DEFAULT_FILE_SIZE];
outputStream.write(bytes);
long fileSize = DEFAULT_FILE_SIZE;
if (createMultipleBlocks) {
while (fileSize < 2*blockSize) {
outputStream.write(bytes);
outputStream.flush();
fileSize += DEFAULT_FILE_SIZE;
}
}
pathList.add(qualifiedPath);
++nFiles;
FileStatus fileStatus = fs.getFileStatus(qualifiedPath);
System.out.println(fileStatus.getBlockSize());
System.out.println(fileStatus.getReplication());
}
finally {
IOUtils.cleanup(null, outputStream);
}
}