本文整理汇总了Java中org.apache.flink.core.fs.FileSystem.create方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.create方法的具体用法?Java FileSystem.create怎么用?Java FileSystem.create使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.flink.core.fs.FileSystem
的用法示例。
在下文中一共展示了FileSystem.create方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: readStateData
import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
private void readStateData(
Path restoreFilePath,
StreamStateHandle remoteFileHandle) throws IOException {
FileSystem restoreFileSystem = restoreFilePath.getFileSystem();
FSDataInputStream inputStream = null;
FSDataOutputStream outputStream = null;
try {
inputStream = remoteFileHandle.openInputStream();
stateBackend.cancelStreamRegistry.registerCloseable(inputStream);
outputStream = restoreFileSystem.create(restoreFilePath, FileSystem.WriteMode.OVERWRITE);
stateBackend.cancelStreamRegistry.registerCloseable(outputStream);
byte[] buffer = new byte[8 * 1024];
while (true) {
int numBytes = inputStream.read(buffer);
if (numBytes == -1) {
break;
}
outputStream.write(buffer, 0, numBytes);
}
} finally {
if (inputStream != null && stateBackend.cancelStreamRegistry.unregisterCloseable(inputStream)) {
inputStream.close();
}
if (outputStream != null && stateBackend.cancelStreamRegistry.unregisterCloseable(outputStream)) {
outputStream.close();
}
}
}
示例2: testSimpleFileWriteAndRead
import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testSimpleFileWriteAndRead() throws Exception {
final Configuration conf = new Configuration();
conf.setString("s3.access.key", ACCESS_KEY);
conf.setString("s3.secret.key", SECRET_KEY);
final String testLine = "Hello Upload!";
FileSystem.initialize(conf);
final Path path = new Path("s3://" + BUCKET + '/' + TEST_DATA_DIR + "/test.txt");
final FileSystem fs = path.getFileSystem();
try {
try (FSDataOutputStream out = fs.create(path, WriteMode.OVERWRITE);
OutputStreamWriter writer = new OutputStreamWriter(out, StandardCharsets.UTF_8)) {
writer.write(testLine);
}
try (FSDataInputStream in = fs.open(path);
InputStreamReader ir = new InputStreamReader(in, StandardCharsets.UTF_8);
BufferedReader reader = new BufferedReader(ir)) {
String line = reader.readLine();
assertEquals(testLine, line);
}
}
finally {
fs.delete(path, false);
}
}
示例3: copy
import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
public static void copy(Path sourcePath, Path targetPath, boolean executable) throws IOException {
// TODO rewrite this to make it participate in the closable registry and the lifecycle of a task.
// we unwrap the file system to get raw streams without safety net
FileSystem sFS = FileSystem.getUnguardedFileSystem(sourcePath.toUri());
FileSystem tFS = FileSystem.getUnguardedFileSystem(targetPath.toUri());
if (!tFS.exists(targetPath)) {
if (sFS.getFileStatus(sourcePath).isDir()) {
tFS.mkdirs(targetPath);
FileStatus[] contents = sFS.listStatus(sourcePath);
for (FileStatus content : contents) {
String distPath = content.getPath().toString();
if (content.isDir()) {
if (distPath.endsWith("/")) {
distPath = distPath.substring(0, distPath.length() - 1);
}
}
String localPath = targetPath.toString() + distPath.substring(distPath.lastIndexOf("/"));
copy(content.getPath(), new Path(localPath), executable);
}
} else {
try (FSDataOutputStream lfsOutput = tFS.create(targetPath, FileSystem.WriteMode.NO_OVERWRITE); FSDataInputStream fsInput = sFS.open(sourcePath)) {
IOUtils.copyBytes(fsInput, lfsOutput);
//noinspection ResultOfMethodCallIgnored
new File(targetPath.toString()).setExecutable(executable);
} catch (IOException ioe) {
LOG.error("could not copy file to local file cache.", ioe);
}
}
}
}
示例4: open
import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
@Override
public void open(int taskNumber, int numTasks) throws IOException {
if (taskNumber < 0 || numTasks < 1) {
throw new IllegalArgumentException("TaskNumber: " + taskNumber + ", numTasks: " + numTasks);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Opening stream for output (" + (taskNumber+1) + "/" + numTasks + "). WriteMode=" + writeMode +
", OutputDirectoryMode=" + outputDirectoryMode);
}
Path p = this.outputFilePath;
if (p == null) {
throw new IOException("The file path is null.");
}
final FileSystem fs = p.getFileSystem();
// if this is a local file system, we need to initialize the local output directory here
if (!fs.isDistributedFS()) {
if (numTasks == 1 && outputDirectoryMode == OutputDirectoryMode.PARONLY) {
// output should go to a single file
// prepare local output path. checks for write mode and removes existing files in case of OVERWRITE mode
if(!fs.initOutPathLocalFS(p, writeMode, false)) {
// output preparation failed! Cancel task.
throw new IOException("Output path '" + p.toString() + "' could not be initialized. Canceling task...");
}
}
else {
// numTasks > 1 || outDirMode == OutputDirectoryMode.ALWAYS
if(!fs.initOutPathLocalFS(p, writeMode, true)) {
// output preparation failed! Cancel task.
throw new IOException("Output directory '" + p.toString() + "' could not be created. Canceling task...");
}
}
}
// Suffix the path with the parallel instance index, if needed
this.actualFilePath = (numTasks > 1 || outputDirectoryMode == OutputDirectoryMode.ALWAYS) ? p.suffix("/" + getDirectoryFileName(taskNumber)) : p;
// create output file
this.stream = fs.create(this.actualFilePath, writeMode);
// at this point, the file creation must have succeeded, or an exception has been thrown
this.fileCreated = true;
}
示例5: testDirectoryListing
import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testDirectoryListing() throws Exception {
final Configuration conf = new Configuration();
conf.setString("s3.access.key", ACCESS_KEY);
conf.setString("s3.secret.key", SECRET_KEY);
FileSystem.initialize(conf);
final Path directory = new Path("s3://" + BUCKET + '/' + TEST_DATA_DIR + "/testdir/");
final FileSystem fs = directory.getFileSystem();
// directory must not yet exist
assertFalse(fs.exists(directory));
try {
// create directory
assertTrue(fs.mkdirs(directory));
// seems the presto file system does not assume existence of empty directories in S3
assertTrue(fs.exists(directory));
// directory empty
assertEquals(0, fs.listStatus(directory).length);
// create some files
final int numFiles = 3;
for (int i = 0; i < numFiles; i++) {
Path file = new Path(directory, "/file-" + i);
try (FSDataOutputStream out = fs.create(file, WriteMode.NO_OVERWRITE);
OutputStreamWriter writer = new OutputStreamWriter(out, StandardCharsets.UTF_8)) {
writer.write("hello-" + i + "\n");
}
}
FileStatus[] files = fs.listStatus(directory);
assertNotNull(files);
assertEquals(3, files.length);
for (FileStatus status : files) {
assertFalse(status.isDir());
}
// now that there are files, the directory must exist
assertTrue(fs.exists(directory));
}
finally {
// clean up
fs.delete(directory, true);
}
// now directory must be gone
assertFalse(fs.exists(directory));
}
示例6: testSimpleFileWriteAndRead
import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testSimpleFileWriteAndRead() throws Exception {
final Configuration conf = new Configuration();
conf.setString("s3.access-key", ACCESS_KEY);
conf.setString("s3.secret-key", SECRET_KEY);
final String testLine = "Hello Upload!";
FileSystem.initialize(conf);
final Path path = new Path("s3://" + BUCKET + '/' + TEST_DATA_DIR + "/test.txt");
final FileSystem fs = path.getFileSystem();
try {
try (FSDataOutputStream out = fs.create(path, WriteMode.OVERWRITE);
OutputStreamWriter writer = new OutputStreamWriter(out, StandardCharsets.UTF_8)) {
writer.write(testLine);
}
try (FSDataInputStream in = fs.open(path);
InputStreamReader ir = new InputStreamReader(in, StandardCharsets.UTF_8);
BufferedReader reader = new BufferedReader(ir)) {
String line = reader.readLine();
assertEquals(testLine, line);
}
}
finally {
fs.delete(path, false);
}
}
示例7: testDirectoryListing
import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testDirectoryListing() throws Exception {
final Configuration conf = new Configuration();
conf.setString("s3.access-key", ACCESS_KEY);
conf.setString("s3.secret-key", SECRET_KEY);
FileSystem.initialize(conf);
final Path directory = new Path("s3://" + BUCKET + '/' + TEST_DATA_DIR + "/testdir/");
final FileSystem fs = directory.getFileSystem();
// directory must not yet exist
assertFalse(fs.exists(directory));
try {
// create directory
assertTrue(fs.mkdirs(directory));
// seems the presto file system does not assume existence of empty directories in S3
// assertTrue(fs.exists(directory));
// directory empty
assertEquals(0, fs.listStatus(directory).length);
// create some files
final int numFiles = 3;
for (int i = 0; i < numFiles; i++) {
Path file = new Path(directory, "/file-" + i);
try (FSDataOutputStream out = fs.create(file, WriteMode.NO_OVERWRITE);
OutputStreamWriter writer = new OutputStreamWriter(out, StandardCharsets.UTF_8)) {
writer.write("hello-" + i + "\n");
}
}
FileStatus[] files = fs.listStatus(directory);
assertNotNull(files);
assertEquals(3, files.length);
for (FileStatus status : files) {
assertFalse(status.isDir());
}
// now that there are files, the directory must exist
assertTrue(fs.exists(directory));
}
finally {
// clean up
fs.delete(directory, true);
}
// now directory must be gone
assertFalse(fs.exists(directory));
}
示例8: storeSavepointToHandle
import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
/**
* Stores the savepoint metadata file to a state handle.
*
* @param directory Target directory to store savepoint in
* @param savepoint Savepoint to be stored
*
* @return State handle to the checkpoint metadata
* @throws IOException Failures during store are forwarded
*/
static <T extends Savepoint> FileStateHandle storeSavepointToHandle(
String directory,
String filename,
T savepoint) throws IOException {
checkNotNull(directory, "Target directory");
checkNotNull(savepoint, "Savepoint");
final Path basePath = new Path(directory);
final Path metadataFilePath = new Path(basePath, filename);
final FileSystem fs = FileSystem.get(basePath.toUri());
boolean success = false;
try (FSDataOutputStream fdos = fs.create(metadataFilePath, WriteMode.NO_OVERWRITE);
DataOutputStream dos = new DataOutputStream(fdos))
{
// Write header
dos.writeInt(MAGIC_NUMBER);
dos.writeInt(savepoint.getVersion());
// Write savepoint
SavepointSerializer<T> serializer = SavepointSerializers.getSerializer(savepoint);
serializer.serialize(savepoint, dos);
// construct result handle
FileStateHandle handle = new FileStateHandle(metadataFilePath, dos.size());
// all good!
success = true;
return handle;
}
finally {
if (!success && fs.exists(metadataFilePath)) {
if (!fs.delete(metadataFilePath, true)) {
LOG.warn("Failed to delete file {} after failed metadata write.", metadataFilePath);
}
}
}
}
示例9: FixFileFsStateOutputStream
import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
public FixFileFsStateOutputStream(FileSystem fileSystem, Path path) throws IOException {
this.fileSystem = checkNotNull(fileSystem);
this.path = checkNotNull(path);
this.out = fileSystem.create(path, WriteMode.NO_OVERWRITE);
}
示例10: unzipPythonLibrary
import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
private static void unzipPythonLibrary(Path targetDir) throws IOException {
FileSystem targetFs = targetDir.getFileSystem();
ClassLoader classLoader = PythonPlanBinder.class.getClassLoader();
ZipInputStream zis = new ZipInputStream(classLoader.getResourceAsStream("python-source.zip"));
ZipEntry entry = zis.getNextEntry();
while (entry != null) {
String fileName = entry.getName();
Path newFile = new Path(targetDir, fileName);
if (entry.isDirectory()) {
targetFs.mkdirs(newFile);
} else {
try {
LOG.debug("Unzipping to {}.", newFile);
FSDataOutputStream fsDataOutputStream = targetFs.create(newFile, FileSystem.WriteMode.NO_OVERWRITE);
IOUtils.copyBytes(zis, fsDataOutputStream, false);
} catch (Exception e) {
zis.closeEntry();
zis.close();
throw new IOException("Failed to unzip flink python library.", e);
}
}
zis.closeEntry();
entry = zis.getNextEntry();
}
zis.closeEntry();
}
示例11: testDeletePathIfEmpty
import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
/**
* Test that {@link FileUtils#deletePathIfEmpty(FileSystem, Path)} deletes the path if it is
* empty. A path can only be empty if it is a directory which does not contain any
* files/directories.
*/
@Test
public void testDeletePathIfEmpty() throws IOException {
final Path basePath = new Path(hdfsURI);
final Path directory = new Path(basePath, UUID.randomUUID().toString());
final Path directoryFile = new Path(directory, UUID.randomUUID().toString());
final Path singleFile = new Path(basePath, UUID.randomUUID().toString());
FileSystem fs = basePath.getFileSystem();
fs.mkdirs(directory);
byte[] data = "HDFSTest#testDeletePathIfEmpty".getBytes(ConfigConstants.DEFAULT_CHARSET);
for (Path file: Arrays.asList(singleFile, directoryFile)) {
org.apache.flink.core.fs.FSDataOutputStream outputStream = fs.create(file, FileSystem.WriteMode.OVERWRITE);
outputStream.write(data);
outputStream.close();
}
// verify that the files have been created
assertTrue(fs.exists(singleFile));
assertTrue(fs.exists(directoryFile));
// delete the single file
assertFalse(FileUtils.deletePathIfEmpty(fs, singleFile));
assertTrue(fs.exists(singleFile));
// try to delete the non-empty directory
assertFalse(FileUtils.deletePathIfEmpty(fs, directory));
assertTrue(fs.exists(directory));
// delete the file contained in the directory
assertTrue(fs.delete(directoryFile, false));
// now the deletion should work
assertTrue(FileUtils.deletePathIfEmpty(fs, directory));
assertFalse(fs.exists(directory));
}
示例12: testPointerPathResolution
import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testPointerPathResolution() throws Exception {
final FileSystem fs = FileSystem.getLocalFileSystem();
final Path checkpointDir = new Path(tmp.newFolder().toURI());
final Path metadataFile = new Path(checkpointDir, AbstractFsCheckpointStorage.METADATA_FILE_NAME);
final String pointer1 = metadataFile.toString();
final String pointer2 = metadataFile.getParent().toString();
final String pointer3 = metadataFile.getParent().toString() + '/';
final FsStateBackend backend = new FsStateBackend(checkpointDir);
final byte[] data = new byte[23686];
new Random().nextBytes(data);
try (FSDataOutputStream out = fs.create(metadataFile, WriteMode.NO_OVERWRITE)) {
out.write(data);
}
StreamStateHandle handle1 = backend.resolveCheckpoint(pointer1);
StreamStateHandle handle2 = backend.resolveCheckpoint(pointer2);
StreamStateHandle handle3 = backend.resolveCheckpoint(pointer3);
assertNotNull(handle1);
assertNotNull(handle2);
assertNotNull(handle3);
validateContents(handle1, data);
validateContents(handle2, data);
validateContents(handle3, data);
}