本文整理汇总了Java中org.apache.hadoop.fs.ChecksumFileSystem.getRawFileSystem方法的典型用法代码示例。如果您正苦于以下问题:Java ChecksumFileSystem.getRawFileSystem方法的具体用法?Java ChecksumFileSystem.getRawFileSystem怎么用?Java ChecksumFileSystem.getRawFileSystem使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.fs.ChecksumFileSystem
的用法示例。
在下文中一共展示了ChecksumFileSystem.getRawFileSystem方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: run
import org.apache.hadoop.fs.ChecksumFileSystem; //导入方法依赖的package包/类
@Override
public int run(String[] args) throws IOException {
// silence the minidfs cluster
Log hadoopLog = LogFactory.getLog("org");
if (hadoopLog instanceof Log4JLogger) {
((Log4JLogger) hadoopLog).getLogger().setLevel(Level.WARN);
}
int reps = 1;
if (args.length == 1) {
try {
reps = Integer.parseInt(args[0]);
} catch (NumberFormatException e) {
printUsage();
return -1;
}
} else if (args.length > 1) {
printUsage();
return -1;
}
Configuration conf = getConf();
// the size of the file to write
long SIZE = conf.getLong("dfsthroughput.file.size",
10L * 1024 * 1024 * 1024);
BUFFER_SIZE = conf.getInt("dfsthroughput.buffer.size", 4 * 1024);
String localDir = conf.get("mapred.temp.dir");
if (localDir == null) {
localDir = conf.get("hadoop.tmp.dir");
conf.set("mapred.temp.dir", localDir);
}
dir = new LocalDirAllocator("mapred.temp.dir");
System.setProperty("test.build.data", localDir);
System.out.println("Local = " + localDir);
ChecksumFileSystem checkedLocal = FileSystem.getLocal(conf);
FileSystem rawLocal = checkedLocal.getRawFileSystem();
for(int i=0; i < reps; ++i) {
writeAndReadLocalFile("local", conf, SIZE);
writeAndReadFile(rawLocal, "raw", conf, SIZE);
writeAndReadFile(checkedLocal, "checked", conf, SIZE);
}
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf)
.racks(new String[]{"/foo"}).build();
cluster.waitActive();
FileSystem dfs = cluster.getFileSystem();
for(int i=0; i < reps; ++i) {
writeAndReadFile(dfs, "dfs", conf, SIZE);
}
} finally {
if (cluster != null) {
cluster.shutdown();
// clean up minidfs junk
rawLocal.delete(new Path(localDir, "dfs"), true);
}
}
return 0;
}
示例2: run
import org.apache.hadoop.fs.ChecksumFileSystem; //导入方法依赖的package包/类
@Override
public int run(String[] args) throws IOException {
// silence the minidfs cluster
Log hadoopLog = LogFactory.getLog("org");
if (hadoopLog instanceof Log4JLogger) {
GenericTestUtils.setLogLevel(hadoopLog, Level.WARN);
}
int reps = 1;
if (args.length == 1) {
try {
reps = Integer.parseInt(args[0]);
} catch (NumberFormatException e) {
printUsage();
return -1;
}
} else if (args.length > 1) {
printUsage();
return -1;
}
Configuration conf = getConf();
// the size of the file to write
long SIZE = conf.getLong("dfsthroughput.file.size",
10L * 1024 * 1024 * 1024);
BUFFER_SIZE = conf.getInt("dfsthroughput.buffer.size", 4 * 1024);
String localDir = conf.get("mapred.temp.dir");
if (localDir == null) {
localDir = conf.get("hadoop.tmp.dir");
conf.set("mapred.temp.dir", localDir);
}
dir = new LocalDirAllocator("mapred.temp.dir");
System.setProperty("test.build.data", localDir);
System.out.println("Local = " + localDir);
ChecksumFileSystem checkedLocal = FileSystem.getLocal(conf);
FileSystem rawLocal = checkedLocal.getRawFileSystem();
for(int i=0; i < reps; ++i) {
writeAndReadLocalFile("local", conf, SIZE);
writeAndReadFile(rawLocal, "raw", conf, SIZE);
writeAndReadFile(checkedLocal, "checked", conf, SIZE);
}
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf)
.racks(new String[]{"/foo"}).build();
cluster.waitActive();
FileSystem dfs = cluster.getFileSystem();
for(int i=0; i < reps; ++i) {
writeAndReadFile(dfs, "dfs", conf, SIZE);
}
} finally {
if (cluster != null) {
cluster.shutdown();
// clean up minidfs junk
rawLocal.delete(new Path(localDir, "dfs"), true);
}
}
return 0;
}
示例3: run
import org.apache.hadoop.fs.ChecksumFileSystem; //导入方法依赖的package包/类
public int run(String[] args) throws IOException {
// silence the minidfs cluster
Log hadoopLog = LogFactory.getLog("org");
if (hadoopLog instanceof Log4JLogger) {
((Log4JLogger) hadoopLog).getLogger().setLevel(Level.WARN);
}
int reps = 1;
if (args.length == 1) {
try {
reps = Integer.parseInt(args[0]);
} catch (NumberFormatException e) {
printUsage();
return -1;
}
} else if (args.length > 1) {
printUsage();
return -1;
}
Configuration conf = getConf();
// the size of the file to write
long SIZE = conf.getLong("dfsthroughput.file.size",
10L * 1024 * 1024 * 1024);
BUFFER_SIZE = conf.getInt("dfsthroughput.buffer.size", 4 * 1024);
String localDir = conf.get("mapred.temp.dir");
dir = new LocalDirAllocator("mapred.temp.dir");
System.setProperty("test.build.data", localDir);
System.out.println("Local = " + localDir);
ChecksumFileSystem checkedLocal = FileSystem.getLocal(conf);
FileSystem rawLocal = checkedLocal.getRawFileSystem();
for(int i=0; i < reps; ++i) {
writeAndReadLocalFile("local", conf, SIZE);
writeAndReadFile(rawLocal, "raw", conf, SIZE);
writeAndReadFile(checkedLocal, "checked", conf, SIZE);
}
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster(conf, 1, true, new String[]{"/foo"});
cluster.waitActive();
FileSystem dfs = cluster.getFileSystem();
for(int i=0; i < reps; ++i) {
writeAndReadFile(dfs, "dfs", conf, SIZE);
}
} finally {
if (cluster != null) {
cluster.shutdown();
// clean up minidfs junk
rawLocal.delete(new Path(localDir, "dfs"), true);
}
}
return 0;
}
示例4: run
import org.apache.hadoop.fs.ChecksumFileSystem; //导入方法依赖的package包/类
@Override
public int run(String[] args) throws IOException {
// silence the minidfs cluster
Log hadoopLog = LogFactory.getLog("org");
if (hadoopLog instanceof Log4JLogger) {
((Log4JLogger) hadoopLog).getLogger().setLevel(Level.WARN);
}
int reps = 1;
if (args.length == 1) {
try {
reps = Integer.parseInt(args[0]);
} catch (NumberFormatException e) {
printUsage();
return -1;
}
} else if (args.length > 1) {
printUsage();
return -1;
}
Configuration conf = getConf();
// the size of the file to write
long SIZE =
conf.getLong("dfsthroughput.file.size", 10L * 1024 * 1024 * 1024);
BUFFER_SIZE = conf.getInt("dfsthroughput.buffer.size", 4 * 1024);
String localDir = conf.get("mapred.temp.dir");
if (localDir == null) {
localDir = conf.get("hadoop.tmp.dir");
conf.set("mapred.temp.dir", localDir);
}
dir = new LocalDirAllocator("mapred.temp.dir");
System.setProperty("test.build.data", localDir);
System.out.println("Local = " + localDir);
ChecksumFileSystem checkedLocal = FileSystem.getLocal(conf);
FileSystem rawLocal = checkedLocal.getRawFileSystem();
for (int i = 0; i < reps; ++i) {
writeAndReadLocalFile("local", conf, SIZE);
writeAndReadFile(rawLocal, "raw", conf, SIZE);
writeAndReadFile(checkedLocal, "checked", conf, SIZE);
}
MiniDFSCluster cluster = null;
try {
cluster =
new MiniDFSCluster.Builder(conf).racks(new String[]{"/foo"}).build();
cluster.waitActive();
FileSystem dfs = cluster.getFileSystem();
for (int i = 0; i < reps; ++i) {
writeAndReadFile(dfs, "dfs", conf, SIZE);
}
} finally {
if (cluster != null) {
cluster.shutdown();
// clean up minidfs junk
rawLocal.delete(new Path(localDir, "dfs"), true);
}
}
return 0;
}
示例5: run
import org.apache.hadoop.fs.ChecksumFileSystem; //导入方法依赖的package包/类
public int run(String[] args) throws IOException {
// silence the minidfs cluster
Log hadoopLog = LogFactory.getLog("org");
if (hadoopLog instanceof Log4JLogger) {
((Log4JLogger) hadoopLog).getLogger().setLevel(Level.WARN);
}
int reps = 1;
if (args.length == 1) {
try {
reps = Integer.parseInt(args[0]);
} catch (NumberFormatException e) {
printUsage();
return -1;
}
} else if (args.length > 1) {
printUsage();
return -1;
}
Configuration conf = getConf();
// the size of the file to write
long SIZE = conf.getLong("dfsthroughput.file.size",
10L * 1024 * 1024 * 1024);
BUFFER_SIZE = conf.getInt("dfsthroughput.buffer.size", 4 * 1024);
String localDir = conf.get("mapred.temp.dir");
dir = new LocalDirAllocator("mapred.temp.dir");
System.setProperty("test.build.data", localDir);
System.out.println("Local = " + localDir);
ChecksumFileSystem checkedLocal = FileSystem.getLocal(conf);
FileSystem rawLocal = checkedLocal.getRawFileSystem();
for(int i=0; i < reps; ++i) {
writeAndReadLocalFile("local", conf, SIZE);
writeAndReadFile(rawLocal, "raw", conf, SIZE);
writeAndReadFile(checkedLocal, "checked", conf, SIZE);
}
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf)
.racks(new String[]{"/foo"}).build();
cluster.waitActive();
FileSystem dfs = cluster.getFileSystem();
for(int i=0; i < reps; ++i) {
writeAndReadFile(dfs, "dfs", conf, SIZE);
}
} finally {
if (cluster != null) {
cluster.shutdown();
// clean up minidfs junk
rawLocal.delete(new Path(localDir, "dfs"), true);
}
}
return 0;
}