本文整理汇总了Java中org.apache.hadoop.mapred.FileAlreadyExistsException类的典型用法代码示例。如果您正苦于以下问题:Java FileAlreadyExistsException类的具体用法?Java FileAlreadyExistsException怎么用?Java FileAlreadyExistsException使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
FileAlreadyExistsException类属于org.apache.hadoop.mapred包,在下文中一共展示了FileAlreadyExistsException类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: checkOutputSpecs
import org.apache.hadoop.mapred.FileAlreadyExistsException; //导入依赖的package包/类
public void checkOutputSpecs(JobContext job
) throws FileAlreadyExistsException, IOException{
// Ensure that the output directory is set and not already there
Path outDir = getOutputPath(job);
if (outDir == null) {
throw new InvalidJobConfException("Output directory not set.");
}
// get delegation token for outDir's file system
TokenCache.obtainTokensForNamenodes(job.getCredentials(),
new Path[] { outDir }, job.getConfiguration());
if (outDir.getFileSystem(job.getConfiguration()).exists(outDir)) {
throw new FileAlreadyExistsException("Output directory " + outDir +
" already exists");
}
}
示例2: create
import org.apache.hadoop.mapred.FileAlreadyExistsException; //导入依赖的package包/类
/**
* Create a new file. Three possibilities:
* - This is a data node and you're trying to create a unqualified file => write locally.
* - This is a client node and you're trying to create unqualified file => pick a random data node and write there.
* - The path you provide is qualified => write to that node.
*/
@Override
public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize,
short replication, long blockSize, Progressable progress) throws IOException {
final Path absolutePath = toAbsolutePath(f);
checkPath(absolutePath);
// Handle root
if (absolutePath.isRoot()) {
throw new AccessControlException("Cannot create " + f);
}
if(!isRemoteFile(f)){
if (isDirectory(absolutePath)) {
throw new FileAlreadyExistsException("Directory already exists: " + f);
}
// Only canonicalized path/remote files are allowed
throw new IOException("Cannot create non-canonical path " + f);
}
try {
RemotePath remotePath = getRemotePath(absolutePath);
return getDelegateFileSystem(remotePath.address).create(remotePath.path, permission, overwrite, bufferSize, replication, blockSize, progress);
} catch (IllegalArgumentException e) {
throw (IOException) (new IOException("Cannot create file " + absolutePath).initCause(e));
}
}
示例3: append
import org.apache.hadoop.mapred.FileAlreadyExistsException; //导入依赖的package包/类
/**
* Create a new file. Three possibilities:
* - This is a data node and you're trying to append a unqualified file => write locally.
* - The path you provide is qualified => write to that node.
*
* If this is a client node and you try to write to a unqualified file, we'll throw
*/
@Override
public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException {
Path absolutePath = toAbsolutePath(f);
checkPath(absolutePath);
// Handle root
if (absolutePath.isRoot()) {
throw new AccessControlException("Cannot open " + f);
}
if(!isRemoteFile(f)){
if (isDirectory(absolutePath)) {
throw new FileAlreadyExistsException("Directory already exists: " + f);
}
// Only fully canonicalized/remote files are allowed
throw new IOException("Cannot create non-canonical path " + f);
}
try {
RemotePath remotePath = getRemotePath(absolutePath);
FileSystem delegate = getDelegateFileSystem(remotePath.address);
return delegate.append(remotePath.path, bufferSize, progress);
} catch (IllegalArgumentException e) {
throw (FileNotFoundException) (new FileNotFoundException("No file " + absolutePath).initCause(e));
}
}
示例4: checkOutputSpecs
import org.apache.hadoop.mapred.FileAlreadyExistsException; //导入依赖的package包/类
public void checkOutputSpecs(FileSystem ignored, JobConf job)
throws FileAlreadyExistsException,
InvalidJobConfException, IOException {
// Ensure that the output directory is set and not already there
Path outDir = getOutputPath(job);
if (outDir == null && job.getNumReduceTasks() != 0) {
throw new InvalidJobConfException("Output directory not set in JobConf.");
}
if (outDir != null) {
FileSystem fs = outDir.getFileSystem(job);
// normalize the output directory
outDir = fs.makeQualified(outDir);
setOutputPath(job, outDir);
// get delegation token for the outDir's file system
TokenCache.obtainTokensForNamenodes(job.getCredentials(),
new Path[] {outDir}, job);
// check its existence
if (fs.exists(outDir)) {
throw new FileAlreadyExistsException("Output directory " + outDir +
" already exists");
}
}
}
示例5: checkOutputSpecs
import org.apache.hadoop.mapred.FileAlreadyExistsException; //导入依赖的package包/类
public void checkOutputSpecs(JobContext job
) throws FileAlreadyExistsException, IOException{
// Ensure that the output directory is set and not already there
Path outDir = getOutputPath(job);
if (outDir == null) {
throw new InvalidJobConfException("Output directory not set.");
}
// get delegation token for outDir's file system
TokenCache.obtainTokensForNamenodes(job.getCredentials(),
new Path[] {outDir},
job.getConfiguration());
if (outDir.getFileSystem(job.getConfiguration()).exists(outDir)) {
throw new FileAlreadyExistsException("Output directory " + outDir +
" already exists");
}
}
示例6: execute
import org.apache.hadoop.mapred.FileAlreadyExistsException; //导入依赖的package包/类
/**
* Executes job.
* @throws IOException
* @throws InterruptedException
* @throws ClassNotFoundException
*/
@SuppressWarnings("unused")
public int execute( JobConf job )
throws IOException
{
boolean isFileAlreadyExists = false;
try {
Path[] listedPaths = getListedPaths( );
if ( listedPaths == null || listedPaths.length == 0 ) {
return 1;
} else {
for( int i = 0; i < listedPaths.length; i++ ) {
FileInputFormat.addInputPath( job, listedPaths[i] );
}
}
JobClient.runJob( job );
LOG.info( "Job executed" );
return 0;
} catch( FileAlreadyExistsException fae ) {
// If the output directory already exists in Hadoop, no need to process data again simply return the path of existing directory
isFileAlreadyExists = true;
return 1;
}
}
示例7: checkOutputSpecs
import org.apache.hadoop.mapred.FileAlreadyExistsException; //导入依赖的package包/类
/**
* Overridden to avoid throwing an exception if the specified directory
* for export already exists.
*/
@Override
public void checkOutputSpecs(JobContext job) throws FileAlreadyExistsException, IOException {
Path outDir = getOutputPath(job);
if(outDir == null) {
throw new InvalidJobConfException("Output directory not set.");
} else {
TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[]{outDir}, job.getConfiguration());
/*
if(outDir.getFileSystem(job.getConfiguration()).exists(outDir)) {
System.out.println("Output dir already exists, no problem");
throw new FileAlreadyExistsException("Output directory " + outDir + " already exists");
}
*/
}
}
示例8: checkOutputSpecs
import org.apache.hadoop.mapred.FileAlreadyExistsException; //导入依赖的package包/类
@Override
public void checkOutputSpecs(FileSystem ignored, JobConf job) throws FileAlreadyExistsException, InvalidJobConfException, IOException {
// Ensure that the output directory is set and not already there
Path outDir = getOutputPath(job);
if (outDir == null && job.getNumReduceTasks() != 0) {
throw new InvalidJobConfException("Output directory not set in JobConf.");
}
if (outDir != null) {
FileSystem fs = outDir.getFileSystem(job);
// normalize the output directory
outDir = fs.makeQualified(outDir);
setOutputPath(job, outDir);
// get delegation token for the outDir's file system
TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[]{outDir}, job);
String jobUuid = job.get("zephyr.job.uuid");
if (jobUuid == null)
throw new InvalidJobConfException("This output format REQUIRES the value zephyr.job.uuid to be specified in the job configuration!");
// // check its existence
// if (fs.exists(outDir)) {
// throw new FileAlreadyExistsException("Output directory " + outDir
// + " already exists");
// }
}
}
示例9: checkOutputSpecs
import org.apache.hadoop.mapred.FileAlreadyExistsException; //导入依赖的package包/类
@Override
public void checkOutputSpecs(JobContext job
) throws InvalidJobConfException, IOException {
// Ensure that the output directory is set
Path outDir = getOutputPath(job);
if (outDir == null) {
throw new InvalidJobConfException("Output directory not set in JobConf.");
}
final Configuration jobConf = job.getConfiguration();
// get delegation token for outDir's file system
TokenCache.obtainTokensForNamenodes(job.getCredentials(),
new Path[] { outDir }, jobConf);
final FileSystem fs = outDir.getFileSystem(jobConf);
if (fs.exists(outDir)) {
// existing output dir is considered empty iff its only content is the
// partition file.
//
final FileStatus[] outDirKids = fs.listStatus(outDir);
boolean empty = false;
if (outDirKids != null && outDirKids.length == 1) {
final FileStatus st = outDirKids[0];
final String fname = st.getPath().getName();
empty =
!st.isDirectory() && TeraInputFormat.PARTITION_FILENAME.equals(fname);
}
if (TeraSort.getUseSimplePartitioner(job) || !empty) {
throw new FileAlreadyExistsException("Output directory " + outDir
+ " already exists");
}
}
}
示例10: checkOutputSpecs
import org.apache.hadoop.mapred.FileAlreadyExistsException; //导入依赖的package包/类
@Override
public void checkOutputSpecs(FileSystem ignored, JobConf job)
throws FileAlreadyExistsException, InvalidJobConfException, IOException {
String tableName = job.get(OUTPUT_TABLE);
if(tableName == null) {
throw new IOException("Must specify table name");
}
}
示例11: checkOutputSpecs
import org.apache.hadoop.mapred.FileAlreadyExistsException; //导入依赖的package包/类
/**
* Checks to make sure the configuration is valid, the output path doesn't already exist, and that
* a connection to BigQuery can be established.
*/
@Override
public void checkOutputSpecs(JobContext job) throws FileAlreadyExistsException, IOException {
Configuration conf = job.getConfiguration();
// Validate the output configuration.
BigQueryOutputConfiguration.validateConfiguration(conf);
// Get the output path.
Path outputPath = BigQueryOutputConfiguration.getGcsOutputPath(conf);
LOG.info("Using output path '{}'.", outputPath);
// Error if the output path already exists.
FileSystem outputFileSystem = outputPath.getFileSystem(conf);
if (outputFileSystem.exists(outputPath)) {
throw new IOException("The output path '" + outputPath + "' already exists.");
}
// Error if compression is set as there's mixed support in BigQuery.
if (FileOutputFormat.getCompressOutput(job)) {
throw new IOException("Compression isn't supported for this OutputFormat.");
}
// Error if unable to create a BigQuery helper.
try {
new BigQueryFactory().getBigQueryHelper(conf);
} catch (GeneralSecurityException gse) {
throw new IOException("Failed to create BigQuery client", gse);
}
// Let delegate process its checks.
getDelegate(conf).checkOutputSpecs(job);
}
示例12: checkOutputSpecs
import org.apache.hadoop.mapred.FileAlreadyExistsException; //导入依赖的package包/类
@Override
public void checkOutputSpecs(JobContext job) throws IOException {
try {
super.checkOutputSpecs(job);
} catch (FileAlreadyExistsException e) {
// delete existing files before overwriting them
final Path outDir = getOutputPath(job);
outDir.getFileSystem(job.getConfiguration()).delete(outDir, true);
}
}