本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream.create方法的典型用法代码示例。如果您正苦于以下问题:Java EditLogFileOutputStream.create方法的具体用法?Java EditLogFileOutputStream.create怎么用?Java EditLogFileOutputStream.create使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream
的用法示例。
在下文中一共展示了EditLogFileOutputStream.create方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: initEdits
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入方法依赖的package包/类
public static EditLogFileOutputStream initEdits(File editsDir) throws IOException {
File edits = getFileWithCurrent(editsDir, "edits");
File fstime = getFileWithCurrent(editsDir, "fstime");
if (!edits.createNewFile())
throw new IOException("Failed to create edits file");
EditLogFileOutputStream out = new EditLogFileOutputStream(edits, null);
out.create();
if (!fstime.createNewFile())
throw new IOException("Failed to create fstime file");
return out;
}
示例2: beginRoll
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入方法依赖的package包/类
private EditLogFileOutputStream beginRoll(File editsDir,
EditLogFileOutputStream editsOutput)
throws IOException {
File editsNew = getFileWithCurrent(editsDir, "edits.new");
editsOutput.close();
if (!editsNew.createNewFile())
throw new IOException("Failed to create edits.new file");
EditLogFileOutputStream out = new EditLogFileOutputStream(editsNew, null);
out.create();
Assert.assertTrue(editsNew.exists());
return out;
}
示例3: initEdits
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入方法依赖的package包/类
public static EditLogFileOutputStream initEdits(File editsDir)
throws IOException {
File edits = TestPreTransactionalServerLogReader.getFileWithCurrent(
editsDir, "edits_inprogress_0000000000000000000");
if (!edits.createNewFile())
throw new IOException("Failed to create edits file");
EditLogFileOutputStream out = new EditLogFileOutputStream(edits, null);
out.create();
return out;
}
示例4: testPreserveEditLogs
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; //导入方法依赖的package包/类
@Test
public void testPreserveEditLogs() throws Exception {
conf = new HdfsConfiguration();
conf = UpgradeUtilities.initializeStorageStateConf(1, conf);
String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION, false);
log("Normal NameNode upgrade", 1);
File[] created =
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
for (final File createdDir : created) {
List<String> fileNameList =
IOUtils.listDirectory(createdDir, EditLogsFilter.INSTANCE);
for (String fileName : fileNameList) {
String tmpFileName = fileName + ".tmp";
File existingFile = new File(createdDir, fileName);
File tmpFile = new File(createdDir, tmpFileName);
Files.move(existingFile.toPath(), tmpFile.toPath());
File newFile = new File(createdDir, fileName);
Preconditions.checkState(newFile.createNewFile(),
"Cannot create new edits log file in " + createdDir);
EditLogFileInputStream in = new EditLogFileInputStream(tmpFile,
HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID,
false);
EditLogFileOutputStream out = new EditLogFileOutputStream(conf, newFile,
(int)tmpFile.length());
out.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION + 1);
FSEditLogOp logOp = in.readOp();
while (logOp != null) {
out.write(logOp);
logOp = in.readOp();
}
out.setReadyToFlush();
out.flushAndSync(true);
out.close();
Files.delete(tmpFile.toPath());
}
}
cluster = createCluster();
DFSInotifyEventInputStream ieis =
cluster.getFileSystem().getInotifyEventStream(0);
EventBatch batch = ieis.poll();
Event[] events = batch.getEvents();
assertTrue("Should be able to get transactions before the upgrade.",
events.length > 0);
assertEquals(events[0].getEventType(), Event.EventType.CREATE);
assertEquals(((CreateEvent) events[0]).getPath(), "/TestUpgrade");
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
}