本文整理汇总了Java中org.apache.hadoop.fs.FSDataOutputStream.flush方法的典型用法代码示例。如果您正苦于以下问题:Java FSDataOutputStream.flush方法的具体用法?Java FSDataOutputStream.flush怎么用?Java FSDataOutputStream.flush使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.fs.FSDataOutputStream
的用法示例。
在下文中一共展示了FSDataOutputStream.flush方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testFileCloseStatus
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testFileCloseStatus() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
DistributedFileSystem fs = cluster.getFileSystem();
try {
// create a new file.
Path file = new Path("/simpleFlush.dat");
FSDataOutputStream output = fs.create(file);
// write to file
output.writeBytes("Some test data");
output.flush();
assertFalse("File status should be open", fs.isFileClosed(file));
output.close();
assertTrue("File status should be closed", fs.isFileClosed(file));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
示例2: writeAppState
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private void writeAppState() throws IllegalArgumentException, IOException {
String interalStatePath =
appContext.getConf().get(AngelConf.ANGEL_APP_SERILIZE_STATE_FILE);
LOG.info("start to write app state to file " + interalStatePath);
if (interalStatePath == null) {
LOG.error("can not find app state serilize file, exit");
return;
}
Path stateFilePath = new Path(interalStatePath);
FileSystem fs = stateFilePath.getFileSystem(appContext.getConf());
if (fs.exists(stateFilePath)) {
fs.delete(stateFilePath, false);
}
FSDataOutputStream out = fs.create(stateFilePath);
appContext.getApp().serilize(out);
out.flush();
out.close();
LOG.info("write app state over");
}
示例3: writeFile
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
void writeFile(Path file, FSDataOutputStream stm, int size)
throws IOException {
long blocksBefore = stm.getPos() / BLOCK_SIZE;
TestFileCreation.writeFile(stm, BLOCK_SIZE);
// need to make sure the full block is completely flushed to the DataNodes
// (see FSOutputSummer#flush)
stm.flush();
int blocksAfter = 0;
// wait until the block is allocated by DataStreamer
BlockLocation[] locatedBlocks;
while(blocksAfter <= blocksBefore) {
locatedBlocks = DFSClientAdapter.getDFSClient(hdfs).getBlockLocations(
file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS);
blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
}
}
示例4: writeIncompleteFile
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private FSDataOutputStream writeIncompleteFile(FileSystem fileSys, Path name,
short repl) throws IOException {
// create and write a file that contains three blocks of data
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), repl,
blockSize);
byte[] buffer = new byte[fileSize];
Random rand = new Random(seed);
rand.nextBytes(buffer);
stm.write(buffer);
// need to make sure that we actually write out both file blocks
// (see FSOutputSummer#flush)
stm.flush();
// Do not close stream, return it
// so that it is not garbage collected
return stm;
}
示例5: testDelete
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
* Tests that we delete dangling files properly
*/
@Test
public void testDelete() throws Exception {
Path danglingFile = new Path("/crashedInTheMiddle");
// Create a file and leave it dangling and try to delete it.
FSDataOutputStream stream = fs.create(danglingFile);
stream.write(new byte[] { 1, 2, 3 });
stream.flush();
// Now we should still only see a zero-byte file in this place
FileStatus fileStatus = fs.getFileStatus(danglingFile);
assertNotNull(fileStatus);
assertEquals(0, fileStatus.getLen());
assertEquals(1, getNumTempBlobs());
// Run WasbFsck -delete to delete the file.
runFsck("-delete");
// Now we should see no trace of the file.
assertEquals(0, getNumTempBlobs());
assertFalse(fs.exists(danglingFile));
}
示例6: testBasicReadWriteIO
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
public void testBasicReadWriteIO() throws IOException {
FSDataOutputStream writeData = fs.create(new Path(TEST_PATH));
writeData.write(TEST_DATA.getBytes());
writeData.flush();
writeData.close();
FSDataInputStream readData = fs.open(new Path(TEST_PATH));
BufferedReader br = new BufferedReader(new InputStreamReader(readData));
String line = "";
StringBuffer stringBuffer = new StringBuffer();
while ((line = br.readLine()) != null) {
stringBuffer.append(line);
}
br.close();
assert(TEST_DATA.equals(stringBuffer.toString()));
}
示例7: testBasicReadWriteIO
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
public void testBasicReadWriteIO() throws IOException {
FSDataOutputStream writeStream = fs.create(new Path(TEST_PATH));
writeStream.write(TEST_DATA.getBytes());
writeStream.flush();
writeStream.close();
FSDataInputStream readStream = fs.open(new Path(TEST_PATH));
BufferedReader br = new BufferedReader(new InputStreamReader(readStream));
String line = "";
StringBuffer stringBuffer = new StringBuffer();
while ((line = br.readLine()) != null) {
stringBuffer.append(line);
}
br.close();
assert(TEST_DATA.equals(stringBuffer.toString()));
}
示例8: call
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@Override
public FSDataOutputStream call() throws IOException {
try {
FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),
HConstants.DATA_FILE_UMASK_KEY);
Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);
fs.mkdirs(tmpDir);
HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);
final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);
out.writeBytes(InetAddress.getLocalHost().toString());
out.flush();
return out;
} catch(RemoteException e) {
if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){
return null;
} else {
throw e;
}
}
}
示例9: savePartitions
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private void savePartitions(Path matrixPath, FileSystem fs,
List<Integer> partitionIds, int startPos, int endPos,
PSModelFilesMeta serverMatrixMeta) throws IOException {
Path destFile = new Path(matrixPath, ModelFilesUtils.fileName(context.getPs().getServerId(), partitionIds.get(startPos)));
Path tmpDestFile = HdfsUtil.toTmpPath(destFile);
FSDataOutputStream out = fs.create(tmpDestFile);
long streamPos = 0;
ServerPartition partition = null;
for (int i = startPos; i < endPos; i++) {
LOG.info("Write partition " + partitionIds.get(i) + " of matrix " + matrixName + " to "
+ tmpDestFile);
streamPos = out.getPos();
partition = partitionMaps.get(partitionIds.get(i));
PartitionKey partKey = partition.getPartitionKey();
ModelPartitionMeta partMeta = new ModelPartitionMeta(partKey.getPartitionId(), partKey.getStartRow(),
partKey.getEndRow(), partKey.getStartCol(), partKey.getEndCol(), partition.elementNum(),
destFile.getName(), streamPos, 0);
partition.save(out, partMeta);
partMeta.setLength(out.getPos() - streamPos);
serverMatrixMeta.addPartitionMeta(partitionIds.get(i), partMeta);
}
out.flush();
out.close();
HdfsUtil.rename(tmpDestFile, destFile, fs);
}
示例10: commitMatrix
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
* Combine all output files of a model to a combine directory
* @param matrixId matrix id
* @param errorLogs error logs
*/
private void commitMatrix(int matrixId, Vector<String> errorLogs) {
LOG.info("start commit matrix " + matrixId);
// Init matrix files meta
List<ParameterServerId> psIds = new ArrayList<>(context.getMatrixMetaManager().getMasterPsIds(matrixId));
MatrixMeta meta = context.getMatrixMetaManager().getMatrix(matrixId);
Map<String, String> kvMap = meta.getAttributes();
ModelFilesMeta filesMeta = new ModelFilesMeta(matrixId, meta.getName(), meta.getRowType().getNumber(),
meta.getRowNum(), meta.getColNum(), meta.getBlockRowNum(), meta.getBlockColNum(), kvMap);
try {
// Move output files
Path srcPath = new Path(tmpOutputPath, ModelFilesConstent.resultDirName);
Path destPath = new Path(tmpCombinePath, meta.getName());
PartitionCommitOp partCommitOp = new PartitionCommitOp(srcPath, destPath, psIds, errorLogs, filesMeta, 0, psIds.size());
fileOpExecutor.execute(partCommitOp);
partCommitOp.join();
// Write the meta file
long startTs = System.currentTimeMillis();
Path metaFile = new Path(destPath, ModelFilesConstent.modelMetaFileName);
Path tmpMetaFile = HdfsUtil.toTmpPath(metaFile);
FSDataOutputStream metaOut = fs.create(tmpMetaFile, (short) 1);
filesMeta.write(metaOut);
metaOut.flush();
metaOut.close();
HdfsUtil.rename(tmpMetaFile, metaFile, fs);
LOG.info("commit meta file use time=" + (System.currentTimeMillis() - startTs));
} catch (Throwable x) {
errorLogs.add("move output files for matrix " + meta.getName() + " failed, error msg = " + x.getMessage());
LOG.error("move output files for matrix " + meta.getName() + " failed.", x);
}
}
示例11: writeFileToHadoop
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
public synchronized void writeFileToHadoop(List<ConsumerRecord<String, String>> buffer) {
Configuration configuration = new Configuration();
String str;
StringBuffer stringBuffer = new StringBuffer();
try {
FileSystem fileSystem = FileSystem.get(configuration);
Path path = new Path("/user/hive/output/data.dat");
FSDataOutputStream fsDataOutputStream = fileSystem.create(path);
//fileWriter = new FileWriter(file,false);
//printWriter = new PrintWriter(fileWriter);
for (int i = 0; i < buffer.size(); i++) {
str = buffer.get(i).value() + "\t" + buffer.get(i).value() + "\n";
stringBuffer.append(str);
//printWriter.println(buffer.get(i).value() + "\t" + buffer.get(i).value());
}
fsDataOutputStream.write(stringBuffer.toString().getBytes(),0,stringBuffer.toString().getBytes().length);
fsDataOutputStream.flush();
fsDataOutputStream.close();
stringBuffer.delete(0,stringBuffer.length());
insertIntoHive();//存入hive中
//printWriter.flush();
} catch (IOException e) {
}
}
示例12: txt2dat
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
public static void txt2dat(Path dir, String inputFile, String outputFile)
throws IOException {
FileSystem fileSystem = dir.getFileSystem(new Configuration());
Path in = new Path(dir, inputFile);
Path out = new Path(dir, outputFile);
FSDataInputStream fsDataInputStream = fileSystem.open(in);
InputStreamReader inputStreamReader = new InputStreamReader(fsDataInputStream);
BufferedReader reader = new BufferedReader(inputStreamReader);
FSDataOutputStream writer = fileSystem.create(out);
try {
String line;
line = reader.readLine();
while (line != null){
String[] keyVal = line.split("\\t");
writer.writeLong(Long.parseLong(keyVal[0]));
for (String aij : keyVal[1].split(",")) {
writer.writeDouble(Double.parseDouble(aij));
}
line = reader.readLine();
}
} finally {
reader.close();
inputStreamReader.close();
fsDataInputStream.close();
writer.flush();
writer.close();
}
}
示例13: writeFile
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
* Serialize parquet metadata to json and write to a file
* @param parquetTableMetadata
* @param p
* @throws IOException
*/
private void writeFile(ParquetTableMetadata_v1 parquetTableMetadata, Path p) throws IOException {
JsonFactory jsonFactory = new JsonFactory();
jsonFactory.configure(Feature.AUTO_CLOSE_TARGET, false);
jsonFactory.configure(JsonParser.Feature.AUTO_CLOSE_SOURCE, false);
ObjectMapper mapper = new ObjectMapper(jsonFactory);
FSDataOutputStream os = fs.create(p);
mapper.writerWithDefaultPrettyPrinter().writeValue(os, parquetTableMetadata);
os.flush();
os.close();
}
示例14: testBlockTokenInLastLocatedBlock
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
* This test writes a file and gets the block locations without closing the
* file, and tests the block token in the last block. Block token is verified
* by ensuring it is of correct kind.
*
* @throws IOException
* @throws InterruptedException
*/
@Test
public void testBlockTokenInLastLocatedBlock() throws IOException,
InterruptedException {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(1).build();
cluster.waitActive();
try {
FileSystem fs = cluster.getFileSystem();
String fileName = "/testBlockTokenInLastLocatedBlock";
Path filePath = new Path(fileName);
FSDataOutputStream out = fs.create(filePath, (short) 1);
out.write(new byte[1000]);
// ensure that the first block is written out (see FSOutputSummer#flush)
out.flush();
LocatedBlocks locatedBlocks = cluster.getNameNodeRpc().getBlockLocations(
fileName, 0, 1000);
while (locatedBlocks.getLastLocatedBlock() == null) {
Thread.sleep(100);
locatedBlocks = cluster.getNameNodeRpc().getBlockLocations(fileName, 0,
1000);
}
Token<BlockTokenIdentifier> token = locatedBlocks.getLastLocatedBlock()
.getBlockToken();
Assert.assertEquals(BlockTokenIdentifier.KIND_NAME, token.getKind());
out.close();
} finally {
cluster.shutdown();
}
}
示例15: createLogFile
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
* Create simple log file
*
* @return
* @throws IOException
*/
private Path createLogFile() throws IOException {
FileContext files = FileContext.getLocalFSFileContext();
Path ws = new Path(workSpace.getAbsoluteFile().getAbsolutePath());
files.delete(ws, true);
Path workSpacePath = new Path(workSpace.getAbsolutePath(), "log");
files.mkdir(workSpacePath, null, true);
LOG.info("create logfile.log");
Path logfile1 = new Path(workSpacePath, "logfile.log");
FSDataOutputStream os = files.create(logfile1,
EnumSet.of(CreateFlag.CREATE));
os.writeBytes("4 3" + EL + "1 3" + EL + "4 44" + EL);
os.writeBytes("2 3" + EL + "1 3" + EL + "0 45" + EL);
os.writeBytes("4 3" + EL + "1 3" + EL + "1 44" + EL);
os.flush();
os.close();
LOG.info("create logfile1.log");
Path logfile2 = new Path(workSpacePath, "logfile1.log");
os = files.create(logfile2, EnumSet.of(CreateFlag.CREATE));
os.writeBytes("4 3" + EL + "1 3" + EL + "3 44" + EL);
os.writeBytes("2 3" + EL + "1 3" + EL + "0 45" + EL);
os.writeBytes("4 3" + EL + "1 3" + EL + "1 44" + EL);
os.flush();
os.close();
return workSpacePath;
}