当前位置: 首页>>代码示例>>Java>>正文


Java FileSystem.delete方法代码示例

本文整理汇总了Java中org.apache.flink.core.fs.FileSystem.delete方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.delete方法的具体用法?Java FileSystem.delete怎么用?Java FileSystem.delete使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.flink.core.fs.FileSystem的用法示例。


在下文中一共展示了FileSystem.delete方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testCompression

import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testCompression() throws Exception {
	// given
	final Path outputPath = new Path(File.createTempFile("avro-output-file", "avro").getAbsolutePath());
	final AvroOutputFormat<User> outputFormat = new AvroOutputFormat<>(outputPath, User.class);
	outputFormat.setWriteMode(FileSystem.WriteMode.OVERWRITE);

	final Path compressedOutputPath = new Path(File.createTempFile("avro-output-file", "compressed.avro").getAbsolutePath());
	final AvroOutputFormat<User> compressedOutputFormat = new AvroOutputFormat<>(compressedOutputPath, User.class);
	compressedOutputFormat.setWriteMode(FileSystem.WriteMode.OVERWRITE);
	compressedOutputFormat.setCodec(Codec.SNAPPY);

	// when
	output(outputFormat);
	output(compressedOutputFormat);

	// then
	assertTrue(fileSize(outputPath) > fileSize(compressedOutputPath));

	// cleanup
	FileSystem fs = FileSystem.getLocalFileSystem();
	fs.delete(outputPath, false);
	fs.delete(compressedOutputPath, false);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:25,代码来源:AvroOutputFormatTest.java

示例2: testCompression

import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testCompression() throws Exception {
	// given
	final Path outputPath = new Path(File.createTempFile("avro-output-file", "avro").getAbsolutePath());
	final AvroOutputFormat<User> outputFormat = new AvroOutputFormat<>(outputPath, User.class);
	outputFormat.setWriteMode(FileSystem.WriteMode.OVERWRITE);

	final Path compressedOutputPath = new Path(File.createTempFile("avro-output-file", "compressed.avro").getAbsolutePath());
	final AvroOutputFormat<User> compressedOutputFormat = new AvroOutputFormat<>(compressedOutputPath, User.class);
	compressedOutputFormat.setWriteMode(FileSystem.WriteMode.OVERWRITE);
	compressedOutputFormat.setCodec(AvroOutputFormat.Codec.SNAPPY);

	// when
	output(outputFormat);
	output(compressedOutputFormat);

	// then
	assertTrue(fileSize(outputPath) > fileSize(compressedOutputPath));

	// cleanup
	FileSystem fs = FileSystem.getLocalFileSystem();
	fs.delete(outputPath, false);
	fs.delete(compressedOutputPath, false);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:25,代码来源:AvroOutputFormatTest.java

示例3: testGenericRecord

import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testGenericRecord() throws IOException {
	final Path outputPath = new Path(File.createTempFile("avro-output-file", "generic.avro").getAbsolutePath());
	final AvroOutputFormat<GenericRecord> outputFormat = new AvroOutputFormat<>(outputPath, GenericRecord.class);
	Schema schema = new Schema.Parser().parse("{\"type\":\"record\", \"name\":\"user\", \"fields\": [{\"name\":\"user_name\", \"type\":\"string\"}, {\"name\":\"favorite_number\", \"type\":\"int\"}, {\"name\":\"favorite_color\", \"type\":\"string\"}]}");
	outputFormat.setWriteMode(FileSystem.WriteMode.OVERWRITE);
	outputFormat.setSchema(schema);
	output(outputFormat, schema);

	GenericDatumReader<GenericRecord> reader = new GenericDatumReader<>(schema);
	DataFileReader<GenericRecord> dataFileReader = new DataFileReader<>(new File(outputPath.getPath()), reader);

	while (dataFileReader.hasNext()) {
		GenericRecord record = dataFileReader.next();
		assertEquals(record.get("user_name").toString(), "testUser");
		assertEquals(record.get("favorite_number"), 1);
		assertEquals(record.get("favorite_color").toString(), "blue");
	}

	//cleanup
	FileSystem fs = FileSystem.getLocalFileSystem();
	fs.delete(outputPath, false);

}
 
开发者ID:axbaretto,项目名称:flink,代码行数:25,代码来源:AvroOutputFormatTest.java

示例4: cleanUp

import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
@AfterClass
public static void cleanUp() throws IOException {
	if (!skipTest) {
		// initialize configuration with valid credentials
		final Configuration conf = new Configuration();
		conf.setString("s3.access.key", ACCESS_KEY);
		conf.setString("s3.secret.key", SECRET_KEY);
		FileSystem.initialize(conf);

		final Path directory = new Path("s3://" + BUCKET + '/' + TEST_DATA_DIR);
		final FileSystem fs = directory.getFileSystem();

		// clean up
		fs.delete(directory, true);

		// now directory must be gone
		assertFalse(fs.exists(directory));

		// reset configuration
		FileSystem.initialize(new Configuration());
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:23,代码来源:HadoopS3FileSystemITCase.java

示例5: removeSavepointFile

import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Removes the savepoint meta data w/o loading and disposing it.
 *
 * @param path Path of savepoint to remove
 * @throws IOException Failures during disposal are forwarded
 */
public static void removeSavepointFile(String path) throws IOException {
	Preconditions.checkNotNull(path, "Path");

	try {
		LOG.info("Removing savepoint: {}.", path);

		Path filePath = new Path(path);
		FileSystem fs = FileSystem.get(filePath.toUri());

		if (fs.exists(filePath)) {
			if (!fs.delete(filePath, true)) {
				throw new IOException("Failed to delete " + filePath + ".");
			}
		} else {
			throw new IllegalArgumentException("Invalid path '" + filePath.toUri() + "'.");
		}
	} catch (Throwable t) {
		throw new IOException("Failed to dispose savepoint " + path + ".", t);
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:27,代码来源:SavepointStore.java

示例6: FsNegativeRunningJobsRegistry

import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Creates a new registry that writes its files to the given FileSystem at
 * the given working directory path.
 * 
 * <p>The initialization will attempt to write to the given working directory, in
 * order to catch setup/configuration errors early.
 *
 * @param fileSystem The FileSystem to use for the marker files.
 * @param workingDirectory The working directory for files to track the job status.
 *
 * @throws IOException Thrown, if the specified directory cannot be accessed.
 */
public FsNegativeRunningJobsRegistry(FileSystem fileSystem, Path workingDirectory) throws IOException {
	this.fileSystem = checkNotNull(fileSystem, "fileSystem");
	this.basePath = checkNotNull(workingDirectory, "workingDirectory");

	// to be safe, attempt to write to the working directory, to
	// catch problems early
	final Path testFile = new Path(workingDirectory, ".registry_test");
	try {
		createFile(testFile, false);
	}
	catch (IOException e) {
		throw new IOException("Unable to write to working directory: " + workingDirectory, e);
	}
	finally {
		fileSystem.delete(testFile, false);
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:30,代码来源:FsNegativeRunningJobsRegistry.java

示例7: testSimpleFileWriteAndRead

import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testSimpleFileWriteAndRead() throws Exception {
	final Configuration conf = new Configuration();
	conf.setString("s3.access.key", ACCESS_KEY);
	conf.setString("s3.secret.key", SECRET_KEY);

	final String testLine = "Hello Upload!";

	FileSystem.initialize(conf);

	final Path path = new Path("s3://" + BUCKET + '/' + TEST_DATA_DIR + "/test.txt");
	final FileSystem fs = path.getFileSystem();

	try {
		try (FSDataOutputStream out = fs.create(path, WriteMode.OVERWRITE);
				OutputStreamWriter writer = new OutputStreamWriter(out, StandardCharsets.UTF_8)) {
			writer.write(testLine);
		}

		try (FSDataInputStream in = fs.open(path);
				InputStreamReader ir = new InputStreamReader(in, StandardCharsets.UTF_8);
				BufferedReader reader = new BufferedReader(ir)) {
			String line = reader.readLine();
			assertEquals(testLine, line);
		}
	}
	finally {
		fs.delete(path, false);
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:31,代码来源:HadoopS3FileSystemITCase.java

示例8: deleteIfExists

import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
private static void deleteIfExists(Path path) throws IOException {
	FileSystem fs = path.getFileSystem();
	if (fs.exists(path)) {
		fs.delete(path, true);
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:7,代码来源:PythonPlanBinder.java

示例9: testDirectoryListing

import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testDirectoryListing() throws Exception {
	final Configuration conf = new Configuration();
	conf.setString("s3.access.key", ACCESS_KEY);
	conf.setString("s3.secret.key", SECRET_KEY);

	FileSystem.initialize(conf);

	final Path directory = new Path("s3://" + BUCKET + '/' + TEST_DATA_DIR + "/testdir/");
	final FileSystem fs = directory.getFileSystem();

	// directory must not yet exist
	assertFalse(fs.exists(directory));

	try {
		// create directory
		assertTrue(fs.mkdirs(directory));

		// seems the presto file system does not assume existence of empty directories in S3
		assertTrue(fs.exists(directory));

		// directory empty
		assertEquals(0, fs.listStatus(directory).length);

		// create some files
		final int numFiles = 3;
		for (int i = 0; i < numFiles; i++) {
			Path file = new Path(directory, "/file-" + i);
			try (FSDataOutputStream out = fs.create(file, WriteMode.NO_OVERWRITE);
					OutputStreamWriter writer = new OutputStreamWriter(out, StandardCharsets.UTF_8)) {
				writer.write("hello-" + i + "\n");
			}
		}

		FileStatus[] files = fs.listStatus(directory);
		assertNotNull(files);
		assertEquals(3, files.length);

		for (FileStatus status : files) {
			assertFalse(status.isDir());
		}

		// now that there are files, the directory must exist
		assertTrue(fs.exists(directory));
	}
	finally {
		// clean up
		fs.delete(directory, true);
	}

	// now directory must be gone
	assertFalse(fs.exists(directory));
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:54,代码来源:HadoopS3FileSystemITCase.java

示例10: testSimpleFileWriteAndRead

import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testSimpleFileWriteAndRead() throws Exception {
	final Configuration conf = new Configuration();
	conf.setString("s3.access-key", ACCESS_KEY);
	conf.setString("s3.secret-key", SECRET_KEY);

	final String testLine = "Hello Upload!";

	FileSystem.initialize(conf);

	final Path path = new Path("s3://" + BUCKET + '/' + TEST_DATA_DIR + "/test.txt");
	final FileSystem fs = path.getFileSystem();

	try {
		try (FSDataOutputStream out = fs.create(path, WriteMode.OVERWRITE);
				OutputStreamWriter writer = new OutputStreamWriter(out, StandardCharsets.UTF_8)) {
			writer.write(testLine);
		}

		try (FSDataInputStream in = fs.open(path);
				InputStreamReader ir = new InputStreamReader(in, StandardCharsets.UTF_8);
				BufferedReader reader = new BufferedReader(ir)) {
			String line = reader.readLine();
			assertEquals(testLine, line);
		}
	}
	finally {
		fs.delete(path, false);
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:31,代码来源:PrestoS3FileSystemITCase.java

示例11: testDirectoryListing

import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
@Test
	public void testDirectoryListing() throws Exception {
		final Configuration conf = new Configuration();
		conf.setString("s3.access-key", ACCESS_KEY);
		conf.setString("s3.secret-key", SECRET_KEY);

		FileSystem.initialize(conf);

		final Path directory = new Path("s3://" + BUCKET + '/' + TEST_DATA_DIR + "/testdir/");
		final FileSystem fs = directory.getFileSystem();

		// directory must not yet exist
		assertFalse(fs.exists(directory));

		try {
			// create directory
			assertTrue(fs.mkdirs(directory));

			// seems the presto file system does not assume existence of empty directories in S3
//			assertTrue(fs.exists(directory));

			// directory empty
			assertEquals(0, fs.listStatus(directory).length);

			// create some files
			final int numFiles = 3;
			for (int i = 0; i < numFiles; i++) {
				Path file = new Path(directory, "/file-" + i);
				try (FSDataOutputStream out = fs.create(file, WriteMode.NO_OVERWRITE);
						OutputStreamWriter writer = new OutputStreamWriter(out, StandardCharsets.UTF_8)) {
					writer.write("hello-" + i + "\n");
				}
			}

			FileStatus[] files = fs.listStatus(directory);
			assertNotNull(files);
			assertEquals(3, files.length);

			for (FileStatus status : files) {
				assertFalse(status.isDir());
			}

			// now that there are files, the directory must exist
			assertTrue(fs.exists(directory));
		}
		finally {
			// clean up
			fs.delete(directory, true);
		}

		// now directory must be gone
		assertFalse(fs.exists(directory));
	}
 
开发者ID:axbaretto,项目名称:flink,代码行数:54,代码来源:PrestoS3FileSystemITCase.java

示例12: storeSavepointToHandle

import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Stores the savepoint metadata file to a state handle.
 *
 * @param directory Target directory to store savepoint in
 * @param savepoint Savepoint to be stored
 *
 * @return State handle to the checkpoint metadata
 * @throws IOException Failures during store are forwarded
 */
static <T extends Savepoint> FileStateHandle storeSavepointToHandle(
		String directory,
		String filename,
		T savepoint) throws IOException {

	checkNotNull(directory, "Target directory");
	checkNotNull(savepoint, "Savepoint");

	final Path basePath = new Path(directory);
	final Path metadataFilePath = new Path(basePath, filename);

	final FileSystem fs = FileSystem.get(basePath.toUri());

	boolean success = false;
	try (FSDataOutputStream fdos = fs.create(metadataFilePath, WriteMode.NO_OVERWRITE);
			DataOutputStream dos = new DataOutputStream(fdos))
	{
		// Write header
		dos.writeInt(MAGIC_NUMBER);
		dos.writeInt(savepoint.getVersion());

		// Write savepoint
		SavepointSerializer<T> serializer = SavepointSerializers.getSerializer(savepoint);
		serializer.serialize(savepoint, dos);

		// construct result handle
		FileStateHandle handle = new FileStateHandle(metadataFilePath, dos.size());

		// all good!
		success = true;
		return handle;
	}
	finally {
		if (!success && fs.exists(metadataFilePath)) {
			if (!fs.delete(metadataFilePath, true)) {
				LOG.warn("Failed to delete file {} after failed metadata write.", metadataFilePath);
			}
		}
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:50,代码来源:SavepointStore.java

示例13: discardState

import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Discard the state by deleting the file that stores the state. If the parent directory
 * of the state is empty after deleting the state file, it is also deleted.
 *
 * @throws Exception Thrown, if the file deletion (not the directory deletion) fails.
 */
@Override
public void discardState() throws Exception {
	FileSystem fs = getFileSystem();

	fs.delete(filePath, false);

	if (fs.getKind() == FileSystemKind.FILE_SYSTEM) {
		try {
			FileUtils.deletePathIfEmpty(fs, filePath.getParent());
		} catch (Exception ignored) {}
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:19,代码来源:FileStateHandle.java

示例14: deleteSavepointDirectory

import org.apache.flink.core.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Deletes a savepoint directory.
 *
 * @param savepointDirectory Recursively deletes the given directory
 * @throws IOException FileSystem operation failures are forwarded
 */
public static void deleteSavepointDirectory(@Nonnull String savepointDirectory) throws IOException {
	Path path = new Path(savepointDirectory);
	FileSystem fs = FileSystem.get(path.toUri());
	fs.delete(path, true);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:12,代码来源:SavepointStore.java


注:本文中的org.apache.flink.core.fs.FileSystem.delete方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。