本文整理汇总了Java中org.apache.hadoop.fs.FSDataOutputStream类的典型用法代码示例。如果您正苦于以下问题:Java FSDataOutputStream类的具体用法?Java FSDataOutputStream怎么用?Java FSDataOutputStream使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
FSDataOutputStream类属于org.apache.hadoop.fs包,在下文中一共展示了FSDataOutputStream类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testUnfinishedBlockRead
import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
/**
* Test that that writes to an incomplete block are available to a reader
*/
@Test (timeout = 30000)
public void testUnfinishedBlockRead()
throws IOException {
// create a new file in the root, write data, do no close
Path file1 = new Path("/unfinished-block");
FSDataOutputStream stm = TestFileCreation.createFile(fileSystem, file1, 1);
// write partial block and sync
int partialBlockSize = blockSize / 2;
writeFileAndSync(stm, partialBlockSize);
// Make sure a client can read it before it is closed
checkCanRead(fileSystem, file1, partialBlockSize);
stm.close();
}
示例2: createFile
import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
private static byte[] createFile(FileSystem fs, Path name, long length,
short replication, long blocksize) throws IOException {
final FSDataOutputStream out = fs.create(name, false, 4096,
replication, blocksize);
try {
for(long n = length; n > 0; ) {
ran.nextBytes(buffer);
final int w = n < buffer.length? (int)n: buffer.length;
out.write(buffer, 0, w);
md5.update(buffer, 0, w);
n -= w;
}
} finally {
IOUtils.closeStream(out);
}
return md5.digest();
}
示例3: save
import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
/**
* Save a matrix partition to file.
*
* @param output the output
* @param partitionMeta the meta
* @throws IOException the io exception
*/
public void save(DataOutputStream output , ModelPartitionMeta partitionMeta) throws IOException {
FSDataOutputStream dataOutputStream = new FSDataOutputStream(output, null,
partitionMeta != null ? partitionMeta.getOffset() : 0);
dataOutputStream.writeInt(rows.size());
long offset;
for (Map.Entry<Integer, ServerRow> entry : rows.entrySet()) {
offset = dataOutputStream.getPos();
dataOutputStream.writeInt(entry.getKey());
ServerRow row = entry.getValue();
row.writeTo(dataOutputStream);
if (partitionMeta != null) {
partitionMeta.setRowMeta(new RowOffset(entry.getKey(), offset));
}
}
}
示例4: copyPartitions
import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
private void copyPartitions(Path mapOutputPath, Path indexPath)
throws IOException {
FileSystem localFs = FileSystem.getLocal(jobConf);
FileSystem rfs = ((LocalFileSystem)localFs).getRaw();
FSDataOutputStream rawOutput = rfs.create(mapOutputPath, true, BUF_SIZE);
SpillRecord spillRecord = new SpillRecord(numberOfPartitions);
IndexRecord indexRecord = new IndexRecord();
for (int i = 0; i < numberOfPartitions; i++) {
indexRecord.startOffset = rawOutput.getPos();
byte buffer[] = outStreams[i].toByteArray();
IFileOutputStream checksumOutput = new IFileOutputStream(rawOutput);
checksumOutput.write(buffer);
// Write checksum.
checksumOutput.finish();
// Write index record
indexRecord.rawLength = (long)buffer.length;
indexRecord.partLength = rawOutput.getPos() - indexRecord.startOffset;
spillRecord.putIndex(indexRecord, i);
reporter.progress();
}
rawOutput.close();
spillRecord.writeToFile(indexPath, jobConf);
}
示例5: run
import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
@Override
public void run() {
System.out.println("Workload starting ");
for (int i = 0; i < numberOfFiles; i++) {
Path filename = new Path(id + "." + i);
try {
System.out.println("Workload processing file " + filename);
FSDataOutputStream stm = createFile(fs, filename, replication);
DFSOutputStream dfstream = (DFSOutputStream)
(stm.getWrappedStream());
dfstream.setArtificialSlowdown(1000);
writeFile(stm, myseed);
stm.close();
checkFile(fs, filename, replication, numBlocks, fileSize, myseed);
} catch (Throwable e) {
System.out.println("Workload exception " + e);
assertTrue(e.toString(), false);
}
// increment the stamp to indicate that another file is done.
synchronized (this) {
stamp++;
}
}
}
示例6: makeRenamePending
import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
public void makeRenamePending(FileFolder dst) throws IOException {
// Propose (but don't do) the rename.
Path home = fs.getHomeDirectory();
String relativeHomeDir = getRelativePath(home.toString());
NativeAzureFileSystem.FolderRenamePending pending =
new NativeAzureFileSystem.FolderRenamePending(
relativeHomeDir + "/" + this.getName(),
relativeHomeDir + "/" + dst.getName(), null,
(NativeAzureFileSystem) fs);
// Get the rename pending file contents.
String renameDescription = pending.makeRenamePendingFileContents();
// Create a rename-pending file and write rename information to it.
final String renamePendingStr = this.getName() + "-RenamePending.json";
Path renamePendingFile = new Path(renamePendingStr);
FSDataOutputStream out = fs.create(renamePendingFile, true);
assertTrue(out != null);
writeString(out, renameDescription);
}
示例7: testRewritingClusterIdToPB
import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
@Test
public void testRewritingClusterIdToPB() throws Exception {
TEST_UTIL.startMiniZKCluster();
TEST_UTIL.startMiniDFSCluster(1);
TEST_UTIL.createRootDir();
TEST_UTIL.getConfiguration().setBoolean("hbase.replication", true);
Path rootDir = FSUtils.getRootDir(TEST_UTIL.getConfiguration());
FileSystem fs = rootDir.getFileSystem(TEST_UTIL.getConfiguration());
Path filePath = new Path(rootDir, HConstants.CLUSTER_ID_FILE_NAME);
FSDataOutputStream s = null;
try {
s = fs.create(filePath);
s.writeUTF(UUID.randomUUID().toString());
} finally {
if (s != null) {
s.close();
}
}
TEST_UTIL.startMiniHBaseCluster(1, 1);
HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
assertEquals(1, master.getServerManager().getOnlineServersList().size());
}
示例8: create
import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
FSDataOutputStream create(PathData item, boolean lazyPersist)
throws IOException {
try {
if (lazyPersist) {
EnumSet<CreateFlag> createFlags = EnumSet.of(CREATE, LAZY_PERSIST);
return create(item.path,
FsPermission.getFileDefault().applyUMask(
FsPermission.getUMask(getConf())),
createFlags,
getConf().getInt("io.file.buffer.size", 4096),
lazyPersist ? 1 : getDefaultReplication(item.path),
getDefaultBlockSize(),
null,
null);
} else {
return create(item.path, true);
}
} finally { // might have been created but stream was interrupted
deleteOnExit(item.path);
}
}
示例9: writeTrailer
import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
public static void writeTrailer(FSDataOutputStream stream, ProcedureStoreTracker tracker)
throws IOException {
long offset = stream.getPos();
// Write EOF Entry
ProcedureWALEntry.newBuilder()
.setType(ProcedureWALEntry.Type.EOF)
.build().writeDelimitedTo(stream);
// Write Tracker
tracker.writeTo(stream);
stream.write(TRAILER_VERSION);
StreamUtils.writeLong(stream, TRAILER_MAGIC);
StreamUtils.writeLong(stream, offset);
}
示例10: createInternal
import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
@Override
public FSDataOutputStream createInternal(final Path f,
final EnumSet<CreateFlag> flag, final FsPermission absolutePermission,
final int bufferSize, final short replication, final long blockSize,
final Progressable progress, final ChecksumOpt checksumOpt,
final boolean createParent) throws AccessControlException,
FileAlreadyExistsException, FileNotFoundException,
ParentNotDirectoryException, UnsupportedFileSystemException,
UnresolvedLinkException, IOException {
InodeTree.ResolveResult<AbstractFileSystem> res;
try {
res = fsState.resolve(getUriPath(f), false);
} catch (FileNotFoundException e) {
if (createParent) {
throw readOnlyMountTable("create", f);
} else {
throw e;
}
}
assert(res.remainingPath != null);
return res.targetFileSystem.createInternal(res.remainingPath, flag,
absolutePermission, bufferSize, replication,
blockSize, progress, checksumOpt,
createParent);
}
示例11: append
import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
/** Try openning a file for append. */
private static FSDataOutputStream append(FileSystem fs, Path p) throws Exception {
for(int i = 0; i < 10; i++) {
try {
return fs.append(p);
} catch(RemoteException re) {
if (re.getClassName().equals(RecoveryInProgressException.class.getName())) {
AppendTestUtil.LOG.info("Will sleep and retry, i=" + i +", p="+p, re);
Thread.sleep(1000);
}
else
throw re;
}
}
throw new IOException("Cannot append to " + p);
}
示例12: wrapIfNecessary
import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
/**
* Wraps a given FSDataOutputStream with a CryptoOutputStream. The size of the
* data buffer required for the stream is specified by the
* "mapreduce.job.encrypted-intermediate-data.buffer.kb" Job configuration
* variable.
*
* @param conf
* @param out
* @return FSDataOutputStream
* @throws IOException
*/
public static FSDataOutputStream wrapIfNecessary(Configuration conf,
FSDataOutputStream out) throws IOException {
if (isEncryptedSpillEnabled(conf)) {
out.write(ByteBuffer.allocate(8).putLong(out.getPos()).array());
byte[] iv = createIV(conf);
out.write(iv);
if (LOG.isDebugEnabled()) {
LOG.debug("IV written to Stream ["
+ Base64.encodeBase64URLSafeString(iv) + "]");
}
return new CryptoFSDataOutputStream(out, CryptoCodec.getInstance(conf),
getBufferSize(conf), getEncryptionKey(), iv);
} else {
return out;
}
}
示例13: writeSnapshotInfo
import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
/**
* Write the snapshot description into the working directory of a snapshot
* @param snapshot description of the snapshot being taken
* @param workingDir working directory of the snapshot
* @param fs {@link FileSystem} on which the snapshot should be taken
* @throws IOException if we can't reach the filesystem and the file cannot be cleaned up on
* failure
*/
public static void writeSnapshotInfo(SnapshotDescription snapshot, Path workingDir, FileSystem fs)
throws IOException {
FsPermission perms = FSUtils.getFilePermissions(fs, fs.getConf(),
HConstants.DATA_FILE_UMASK_KEY);
Path snapshotInfo = new Path(workingDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE);
try {
FSDataOutputStream out = FSUtils.create(fs, snapshotInfo, perms, true);
try {
snapshot.writeTo(out);
} finally {
out.close();
}
} catch (IOException e) {
// if we get an exception, try to remove the snapshot info
if (!fs.delete(snapshotInfo, false)) {
String msg = "Couldn't delete snapshot info file: " + snapshotInfo;
LOG.error(msg);
throw new IOException(msg);
}
}
}
示例14: testCreatedFileIsImmediatelyVisible
import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
@Test
public void testCreatedFileIsImmediatelyVisible() throws Throwable {
describe("verify that a newly created file exists as soon as open returns");
Path path = path("testCreatedFileIsImmediatelyVisible");
FSDataOutputStream out = null;
try {
out = getFileSystem().create(path,
false,
4096,
(short) 1,
1024);
if (!getFileSystem().exists(path)) {
if (isSupported(IS_BLOBSTORE)) {
// object store: downgrade to a skip so that the failure is visible
// in test results
skip("Filesystem is an object store and newly created files are not immediately visible");
}
assertPathExists("expected path to be visible before anything written",
path);
}
} finally {
IOUtils.closeStream(out);
}
}
示例15: methodsToTest
import org.apache.hadoop.fs.FSDataOutputStream; //导入依赖的package包/类
@Parameters(name = "method: {0}")
public static Object[] methodsToTest() {
List<Method> methods = FluentIterable
.of(FSDataOutputStream.class.getDeclaredMethods())
.filter(new Predicate<Method>() {
@Override
public boolean apply(Method input) {
if (Modifier.isStatic(input.getModifiers())) {
return false;
}
if (!Modifier.isPublic(input.getModifiers())) {
return false;
}
return Arrays.asList(input.getExceptionTypes()).contains(IOException.class);
}
}).toList();
return methods.toArray();
}