本文整理汇总了Java中org.apache.flink.core.fs.FileSystem.exists方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.exists方法的具体用法?Java FileSystem.exists怎么用?Java FileSystem.exists使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.flink.core.fs.FileSystem
的用法示例。
在下文中一共展示了FileSystem.exists方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: registerCachedFile
import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
/**
* register cache files in program level
* @param entry contains all relevant information
* @param name user defined name of that file
* @throws java.io.IOException
*/
public void registerCachedFile(String name, DistributedCacheEntry entry) throws IOException {
if (!this.cacheFile.containsKey(name)) {
try {
URI u = new URI(entry.filePath);
if (!u.getPath().startsWith("/")) {
u = new File(entry.filePath).toURI();
}
FileSystem fs = FileSystem.get(u);
if (fs.exists(new Path(u.getPath()))) {
this.cacheFile.put(name, new DistributedCacheEntry(u.toString(), entry.isExecutable));
} else {
throw new IOException("File " + u.toString() + " doesn't exist.");
}
} catch (URISyntaxException ex) {
throw new IOException("Invalid path: " + entry.filePath, ex);
}
} else {
throw new IOException("cache file " + name + "already exists!");
}
}
示例2: removeSavepointFile
import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
/**
* Removes the savepoint meta data w/o loading and disposing it.
*
* @param path Path of savepoint to remove
* @throws IOException Failures during disposal are forwarded
*/
public static void removeSavepointFile(String path) throws IOException {
Preconditions.checkNotNull(path, "Path");
try {
LOG.info("Removing savepoint: {}.", path);
Path filePath = new Path(path);
FileSystem fs = FileSystem.get(filePath.toUri());
if (fs.exists(filePath)) {
if (!fs.delete(filePath, true)) {
throw new IOException("Failed to delete " + filePath + ".");
}
} else {
throw new IllegalArgumentException("Invalid path '" + filePath.toUri() + "'.");
}
} catch (Throwable t) {
throw new IOException("Failed to dispose savepoint " + path + ".", t);
}
}
示例3: testStoreExternalizedCheckpointsToSameDirectory
import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
/**
* Tests that multiple externalized checkpoints can be stored to the same
* directory.
*/
@Test
public void testStoreExternalizedCheckpointsToSameDirectory() throws Exception {
String root = tmp.newFolder().getAbsolutePath();
FileSystem fs = FileSystem.get(new Path(root).toUri());
// Store
SavepointV2 savepoint = new SavepointV2(
1929292,
CheckpointTestUtils.createOperatorStates(4, 24),
Collections.<MasterState>emptyList());
FileStateHandle store1 = SavepointStore.storeExternalizedCheckpointToHandle(root, savepoint);
fs.exists(store1.getFilePath());
assertTrue(store1.getFilePath().getPath().contains(SavepointStore.EXTERNALIZED_CHECKPOINT_METADATA_FILE));
FileStateHandle store2 = SavepointStore.storeExternalizedCheckpointToHandle(root, savepoint);
fs.exists(store2.getFilePath());
assertTrue(store2.getFilePath().getPath().contains(SavepointStore.EXTERNALIZED_CHECKPOINT_METADATA_FILE));
}
示例4: copy
import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
public static void copy(Path sourcePath, Path targetPath, boolean executable) throws IOException {
// TODO rewrite this to make it participate in the closable registry and the lifecycle of a task.
// we unwrap the file system to get raw streams without safety net
FileSystem sFS = FileSystem.getUnguardedFileSystem(sourcePath.toUri());
FileSystem tFS = FileSystem.getUnguardedFileSystem(targetPath.toUri());
if (!tFS.exists(targetPath)) {
if (sFS.getFileStatus(sourcePath).isDir()) {
tFS.mkdirs(targetPath);
FileStatus[] contents = sFS.listStatus(sourcePath);
for (FileStatus content : contents) {
String distPath = content.getPath().toString();
if (content.isDir()) {
if (distPath.endsWith("/")) {
distPath = distPath.substring(0, distPath.length() - 1);
}
}
String localPath = targetPath.toString() + distPath.substring(distPath.lastIndexOf("/"));
copy(content.getPath(), new Path(localPath), executable);
}
} else {
try (FSDataOutputStream lfsOutput = tFS.create(targetPath, FileSystem.WriteMode.NO_OVERWRITE); FSDataInputStream fsInput = sFS.open(sourcePath)) {
IOUtils.copyBytes(fsInput, lfsOutput);
//noinspection ResultOfMethodCallIgnored
new File(targetPath.toString()).setExecutable(executable);
} catch (IOException ioe) {
LOG.error("could not copy file to local file cache.", ioe);
}
}
}
}
示例5: deleteIfExists
import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
private static void deleteIfExists(Path path) throws IOException {
FileSystem fs = path.getFileSystem();
if (fs.exists(path)) {
fs.delete(path, true);
}
}
示例6: storeSavepointToHandle
import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
/**
* Stores the savepoint metadata file to a state handle.
*
* @param directory Target directory to store savepoint in
* @param savepoint Savepoint to be stored
*
* @return State handle to the checkpoint metadata
* @throws IOException Failures during store are forwarded
*/
static <T extends Savepoint> FileStateHandle storeSavepointToHandle(
String directory,
String filename,
T savepoint) throws IOException {
checkNotNull(directory, "Target directory");
checkNotNull(savepoint, "Savepoint");
final Path basePath = new Path(directory);
final Path metadataFilePath = new Path(basePath, filename);
final FileSystem fs = FileSystem.get(basePath.toUri());
boolean success = false;
try (FSDataOutputStream fdos = fs.create(metadataFilePath, WriteMode.NO_OVERWRITE);
DataOutputStream dos = new DataOutputStream(fdos))
{
// Write header
dos.writeInt(MAGIC_NUMBER);
dos.writeInt(savepoint.getVersion());
// Write savepoint
SavepointSerializer<T> serializer = SavepointSerializers.getSerializer(savepoint);
serializer.serialize(savepoint, dos);
// construct result handle
FileStateHandle handle = new FileStateHandle(metadataFilePath, dos.size());
// all good!
success = true;
return handle;
}
finally {
if (!success && fs.exists(metadataFilePath)) {
if (!fs.delete(metadataFilePath, true)) {
LOG.warn("Failed to delete file {} after failed metadata write.", metadataFilePath);
}
}
}
}
示例7: loadSavepointWithHandle
import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
/**
* Loads the savepoint at the specified path. This methods returns the savepoint, as well as the
* handle to the metadata.
*
* @param savepointFileOrDirectory Path to the parent savepoint directory or the meta data file.
* @param classLoader The class loader used to resolve serialized classes from legacy savepoint formats.
* @return The loaded savepoint
*
* @throws IOException Failures during load are forwarded
*/
public static Tuple2<Savepoint, StreamStateHandle> loadSavepointWithHandle(
String savepointFileOrDirectory,
ClassLoader classLoader) throws IOException {
checkNotNull(savepointFileOrDirectory, "savepointFileOrDirectory");
checkNotNull(classLoader, "classLoader");
Path path = new Path(savepointFileOrDirectory);
LOG.info("Loading savepoint from {}", path);
FileSystem fs = FileSystem.get(path.toUri());
FileStatus status = fs.getFileStatus(path);
// If this is a directory, we need to find the meta data file
if (status.isDir()) {
Path candidatePath = new Path(path, SAVEPOINT_METADATA_FILE);
if (fs.exists(candidatePath)) {
path = candidatePath;
LOG.info("Using savepoint file in {}", path);
} else {
throw new IOException("Cannot find meta data file in directory " + path
+ ". Please try to load the savepoint directly from the meta data file "
+ "instead of the directory.");
}
}
// load the savepoint
final Savepoint savepoint;
try (DataInputStream dis = new DataInputViewStreamWrapper(fs.open(path))) {
int magicNumber = dis.readInt();
if (magicNumber == MAGIC_NUMBER) {
int version = dis.readInt();
SavepointSerializer<?> serializer = SavepointSerializers.getSerializer(version);
savepoint = serializer.deserialize(dis, classLoader);
} else {
throw new RuntimeException("Unexpected magic number. This can have multiple reasons: " +
"(1) You are trying to load a Flink 1.0 savepoint, which is not supported by this " +
"version of Flink. (2) The file you were pointing to is not a savepoint at all. " +
"(3) The savepoint file has been corrupted.");
}
}
// construct the stream handle to the metadata file
// we get the size best-effort
long size = 0;
try {
size = fs.getFileStatus(path).getLen();
}
catch (Exception ignored) {
// we don't know the size, but we don't want to fail the savepoint loading for that
}
StreamStateHandle metadataHandle = new FileStateHandle(path, size);
return new Tuple2<>(savepoint, metadataHandle);
}
示例8: testBlobServerRecovery
import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
/**
* Helper to test that the {@link BlobServer} recovery from its HA store works.
*
* <p>Uploads two BLOBs to one {@link BlobServer} and expects a second one to be able to retrieve
* them via a shared HA store upon request of a {@link BlobCacheService}.
*
* @param config
* blob server configuration (including HA settings like {@link HighAvailabilityOptions#HA_STORAGE_PATH}
* and {@link HighAvailabilityOptions#HA_CLUSTER_ID}) used to set up <tt>blobStore</tt>
* @param blobStore
* shared HA blob store to use
*
* @throws IOException
* in case of failures
*/
public static void testBlobServerRecovery(final Configuration config, final BlobStore blobStore) throws IOException {
final String clusterId = config.getString(HighAvailabilityOptions.HA_CLUSTER_ID);
String storagePath = config.getString(HighAvailabilityOptions.HA_STORAGE_PATH) + "/" + clusterId;
Random rand = new Random();
try (
BlobServer server0 = new BlobServer(config, blobStore);
BlobServer server1 = new BlobServer(config, blobStore);
// use VoidBlobStore as the HA store to force download from server[1]'s HA store
BlobCacheService cache1 = new BlobCacheService(
new InetSocketAddress("localhost", server1.getPort()), config,
new VoidBlobStore())) {
server0.start();
server1.start();
// Random data
byte[] expected = new byte[1024];
rand.nextBytes(expected);
byte[] expected2 = Arrays.copyOfRange(expected, 32, 288);
BlobKey[] keys = new BlobKey[2];
BlobKey nonHAKey;
// Put job-related HA data
JobID[] jobId = new JobID[] { new JobID(), new JobID() };
keys[0] = put(server0, jobId[0], expected, PERMANENT_BLOB); // Request 1
keys[1] = put(server0, jobId[1], expected2, PERMANENT_BLOB); // Request 2
// put non-HA data
nonHAKey = put(server0, jobId[0], expected2, TRANSIENT_BLOB);
verifyKeyDifferentHashEquals(keys[1], nonHAKey);
// check that the storage directory exists
final Path blobServerPath = new Path(storagePath, "blob");
FileSystem fs = blobServerPath.getFileSystem();
assertTrue("Unknown storage dir: " + blobServerPath, fs.exists(blobServerPath));
// Verify HA requests from cache1 (connected to server1) with no immediate access to the file
verifyContents(cache1, jobId[0], keys[0], expected);
verifyContents(cache1, jobId[1], keys[1], expected2);
// Verify non-HA file is not accessible from server1
verifyDeleted(cache1, jobId[0], nonHAKey);
// Remove again
server1.cleanupJob(jobId[0]);
server1.cleanupJob(jobId[1]);
// Verify everything is clean
assertTrue("HA storage directory does not exist", fs.exists(new Path(storagePath)));
if (fs.exists(blobServerPath)) {
final org.apache.flink.core.fs.FileStatus[] recoveryFiles =
fs.listStatus(blobServerPath);
ArrayList<String> filenames = new ArrayList<>(recoveryFiles.length);
for (org.apache.flink.core.fs.FileStatus file: recoveryFiles) {
filenames.add(file.toString());
}
fail("Unclean state backend: " + filenames);
}
}
}
示例9: run
import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
@Override
public void run(SourceFunction.SourceContext<TimestampedFileInputSplit> context) throws Exception {
Path p = new Path(path);
FileSystem fileSystem = FileSystem.get(p.toUri());
if (!fileSystem.exists(p)) {
throw new FileNotFoundException("The provided file path " + path + " does not exist.");
}
checkpointLock = context.getCheckpointLock();
switch (watchType) {
case PROCESS_CONTINUOUSLY:
while (isRunning) {
synchronized (checkpointLock) {
monitorDirAndForwardSplits(fileSystem, context);
}
Thread.sleep(interval);
}
// here we do not need to set the running to false and the
// globalModificationTime to Long.MAX_VALUE because to arrive here,
// either close() or cancel() have already been called, so this
// is already done.
break;
case PROCESS_ONCE:
synchronized (checkpointLock) {
// the following check guarantees that if we restart
// after a failure and we managed to have a successful
// checkpoint, we will not reprocess the directory.
if (globalModificationTime == Long.MIN_VALUE) {
monitorDirAndForwardSplits(fileSystem, context);
globalModificationTime = Long.MAX_VALUE;
}
isRunning = false;
}
break;
default:
isRunning = false;
throw new RuntimeException("Unknown WatchType" + watchType);
}
}