本文整理匯總了Java中org.apache.hadoop.fs.Path.getFileSystem方法的典型用法代碼示例。如果您正苦於以下問題:Java Path.getFileSystem方法的具體用法?Java Path.getFileSystem怎麽用?Java Path.getFileSystem使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.fs.Path
的用法示例。
在下文中一共展示了Path.getFileSystem方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: testDfsClientFailover
import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
/**
* Make sure that client failover works when an active NN dies and the standby
* takes over.
*/
@Test
public void testDfsClientFailover() throws IOException, URISyntaxException {
FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
DFSTestUtil.createFile(fs, TEST_FILE,
FILE_LENGTH_TO_VERIFY, (short)1, 1L);
assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY);
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY);
// Check that it functions even if the URL becomes canonicalized
// to include a port number.
Path withPort = new Path("hdfs://" +
HATestUtil.getLogicalHostname(cluster) + ":" +
NameNode.DEFAULT_PORT + "/" + TEST_FILE.toUri().getPath());
FileSystem fs2 = withPort.getFileSystem(fs.getConf());
assertTrue(fs2.exists(withPort));
fs.close();
}
示例2: convertInputToPaths
import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
private List<Path> convertInputToPaths() throws IOException {
List<String> inputs = args.getInput();
List<Path> paths = new ArrayList<>(inputs.size());
for (String input : inputs) {
Path p = new Path(input);
FileSystem fs = p.getFileSystem(conf);
FileStatus fstat = fs.getFileStatus(p);
if (fstat.isFile()) {
paths.add(p);
} else if (fstat.isDirectory()) {
for (FileStatus child : fs.listStatus(p)) {
if (child.isFile()) {
paths.add(child.getPath());
}
}
} else {
throw new IllegalStateException("Unable to handle that which is not file nor directory: " + p);
}
}
return paths;
}
示例3: getPossiblyCompressedOutputStream
import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
/**
* Returns a {@link OutputStream} for a file that might need
* compression.
*/
static OutputStream getPossiblyCompressedOutputStream(Path file,
Configuration conf)
throws IOException {
FileSystem fs = file.getFileSystem(conf);
JobConf jConf = new JobConf(conf);
if (org.apache.hadoop.mapred.FileOutputFormat.getCompressOutput(jConf)) {
// get the codec class
Class<? extends CompressionCodec> codecClass =
org.apache.hadoop.mapred.FileOutputFormat
.getOutputCompressorClass(jConf,
GzipCodec.class);
// get the codec implementation
CompressionCodec codec = ReflectionUtils.newInstance(codecClass, conf);
// add the appropriate extension
file = file.suffix(codec.getDefaultExtension());
if (isCompressionEmulationEnabled(conf)) {
FSDataOutputStream fileOut = fs.create(file, false);
return new DataOutputStream(codec.createOutputStream(fileOut));
}
}
return fs.create(file, false);
}
示例4: open
import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
@Override
public void open(String filePath, CompressionCodec codeC,
CompressionType compType) throws IOException {
Configuration conf = new Configuration();
Path dstPath = new Path(filePath);
FileSystem hdfs = dstPath.getFileSystem(conf);
open(dstPath, codeC, compType, conf, hdfs);
}
示例5: createWriters
import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
private static SequenceFile.Writer[] createWriters(Path testdir,
Configuration conf, int srcs, Path[] src) throws IOException {
for (int i = 0; i < srcs; ++i) {
src[i] = new Path(testdir, Integer.toString(i + 10, 36));
}
SequenceFile.Writer out[] = new SequenceFile.Writer[srcs];
for (int i = 0; i < srcs; ++i) {
out[i] = new SequenceFile.Writer(testdir.getFileSystem(conf), conf,
src[i], IntWritable.class, IntWritable.class);
}
return out;
}
示例6: cleanupJob
import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
@Override
@Deprecated
public void cleanupJob(JobContext context) throws IOException {
if (hasOutputPath()) {
Path pendingJobAttemptsPath = getPendingJobAttemptsPath();
FileSystem fs = pendingJobAttemptsPath
.getFileSystem(context.getConfiguration());
fs.delete(pendingJobAttemptsPath, true);
} else {
LOG.warn("Output Path is null in cleanupJob()");
}
}
示例7: configure
import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
private void configure(JobConf conf, Path inDir, Path outDir, String input,
Class<? extends Mapper> map,
Class<? extends Reducer> reduce)
throws IOException {
// set up the input file system and write input text.
FileSystem inFs = inDir.getFileSystem(conf);
FileSystem outFs = outDir.getFileSystem(conf);
outFs.delete(outDir, true);
if (!inFs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
{
// write input into input file
DataOutputStream file = inFs.create(new Path(inDir, "part-0"));
file.writeBytes(input);
file.close();
}
// configure the mapred Job which creates a tempfile in map.
conf.setJobName("testmap");
conf.setMapperClass(map);
conf.setReducerClass(reduce);
conf.setNumMapTasks(1);
conf.setNumReduceTasks(0);
FileInputFormat.setInputPaths(conf, inDir);
FileOutputFormat.setOutputPath(conf, outDir);
String TEST_ROOT_DIR = new Path(System.getProperty("test.build.data",
"/tmp")).toString().replace(' ', '+');
conf.set("test.build.data", TEST_ROOT_DIR);
}
示例8: delete
import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
/**
* Delete a file or a directory (recursively)
* @param filePath file or directory path
* @param conf hadoop configuration
* @return
*/
public static boolean delete(String filePath, Configuration conf) {
filePath = normalizeFileName(filePath);
try {
Path path = new Path(filePath);
FileSystem fs = path.getFileSystem(conf);
fs.delete(path, true);
return true;
} catch (IOException e) {
LOGGER.warn("Unable to delete file " + filePath + ": " + e.getMessage());
return false;
}
}
示例9: compareFileLengths
import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
private void compareFileLengths(FileStatus sourceFileStatus, Path target,
Configuration configuration, long targetLen)
throws IOException {
final Path sourcePath = sourceFileStatus.getPath();
FileSystem fs = sourcePath.getFileSystem(configuration);
if (fs.getFileStatus(sourcePath).getLen() != targetLen)
throw new IOException("Mismatch in length of source:" + sourcePath
+ " and target:" + target);
}
示例10: setConf
import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
Path rootDir;
try {
rootDir = FSUtils.getRootDir(conf);
rootDir.getFileSystem(conf);
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
示例11: ChunkWriter
import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
public ChunkWriter(Path outDir, Configuration conf) throws IOException {
this.outDir = outDir;
fs = outDir.getFileSystem(conf);
blocksize = conf.getInt(GRIDMIX_GEN_BLOCKSIZE, 1 << 28);
replicas = (short) conf.getInt(GRIDMIX_GEN_REPLICATION, 3);
maxFileBytes = conf.getLong(GRIDMIX_GEN_CHUNK, 1L << 30);
nextDestination();
}
示例12: getSplits
import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
@Override
public List<InputSplit> getSplits(JobContext jobCtxt) throws IOException {
final JobConf jobConf = new JobConf(jobCtxt.getConfiguration());
final JobClient client = new JobClient(jobConf);
ClusterStatus stat = client.getClusterStatus(true);
int numTrackers = stat.getTaskTrackers();
final int fileCount = jobConf.getInt(GRIDMIX_DISTCACHE_FILE_COUNT, -1);
// Total size of distributed cache files to be generated
final long totalSize = jobConf.getLong(GRIDMIX_DISTCACHE_BYTE_COUNT, -1);
// Get the path of the special file
String distCacheFileList = jobConf.get(GRIDMIX_DISTCACHE_FILE_LIST);
if (fileCount < 0 || totalSize < 0 || distCacheFileList == null) {
throw new RuntimeException("Invalid metadata: #files (" + fileCount
+ "), total_size (" + totalSize + "), filelisturi ("
+ distCacheFileList + ")");
}
Path sequenceFile = new Path(distCacheFileList);
FileSystem fs = sequenceFile.getFileSystem(jobConf);
FileStatus srcst = fs.getFileStatus(sequenceFile);
// Consider the number of TTs * mapSlotsPerTracker as number of mappers.
int numMapSlotsPerTracker = jobConf.getInt(TTConfig.TT_MAP_SLOTS, 2);
int numSplits = numTrackers * numMapSlotsPerTracker;
List<InputSplit> splits = new ArrayList<InputSplit>(numSplits);
LongWritable key = new LongWritable();
BytesWritable value = new BytesWritable();
// Average size of data to be generated by each map task
final long targetSize = Math.max(totalSize / numSplits,
DistributedCacheEmulator.AVG_BYTES_PER_MAP);
long splitStartPosition = 0L;
long splitEndPosition = 0L;
long acc = 0L;
long bytesRemaining = srcst.getLen();
SequenceFile.Reader reader = null;
try {
reader = new SequenceFile.Reader(fs, sequenceFile, jobConf);
while (reader.next(key, value)) {
// If adding this file would put this split past the target size,
// cut the last split and put this file in the next split.
if (acc + key.get() > targetSize && acc != 0) {
long splitSize = splitEndPosition - splitStartPosition;
splits.add(new FileSplit(
sequenceFile, splitStartPosition, splitSize, (String[])null));
bytesRemaining -= splitSize;
splitStartPosition = splitEndPosition;
acc = 0L;
}
acc += key.get();
splitEndPosition = reader.getPosition();
}
} finally {
if (reader != null) {
reader.close();
}
}
if (bytesRemaining != 0) {
splits.add(new FileSplit(
sequenceFile, splitStartPosition, bytesRemaining, (String[])null));
}
return splits;
}
示例13: getSplits
import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
@Override
public List<InputSplit> getSplits(JobContext job) throws IOException {
// generate splits
List<InputSplit> splitList = new ArrayList<InputSplit>();
for (FileStatus file: listStatus(job)) {
if (file.isDirectory()) {
continue;
}
Path path = file.getPath();
FileSystem fs = path.getFileSystem(job.getConfiguration());
FSDataInputStream fileIn = fs.open(path);
LineReader in = new LineReader(fileIn, job.getConfiguration());
int lineLen = 0;
while(true) {
Text lineText = new Text();
lineLen = in.readLine(lineText);
if(lineLen <= 0) {
break;
}
Matcher m = LINE_PATTERN.matcher(lineText.toString());
if((m != null) && m.matches()) {
TableName tableName = TableName.valueOf(m.group(1));
int startRow = Integer.parseInt(m.group(2));
int rows = Integer.parseInt(m.group(3));
int totalRows = Integer.parseInt(m.group(4));
int clients = Integer.parseInt(m.group(5));
boolean flushCommits = Boolean.parseBoolean(m.group(6));
boolean writeToWAL = Boolean.parseBoolean(m.group(7));
boolean useTags = Boolean.parseBoolean(m.group(8));
int noOfTags = Integer.parseInt(m.group(9));
LOG.debug("tableName=" + tableName +
" split["+ splitList.size() + "] " +
" startRow=" + startRow +
" rows=" + rows +
" totalRows=" + totalRows +
" clients=" + clients +
" flushCommits=" + flushCommits +
" writeToWAL=" + writeToWAL +
" useTags=" + useTags +
" noOfTags=" + noOfTags);
PeInputSplit newSplit =
new PeInputSplit(tableName, startRow, rows, totalRows, clients,
flushCommits, writeToWAL, useTags, noOfTags);
splitList.add(newSplit);
}
}
in.close();
}
LOG.info("Total # of splits: " + splitList.size());
return splitList;
}
示例14: Writer
import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
Writer(Configuration conf, Option... opts) throws IOException {
BlockSizeOption blockSizeOption =
Options.getOption(BlockSizeOption.class, opts);
BufferSizeOption bufferSizeOption =
Options.getOption(BufferSizeOption.class, opts);
ReplicationOption replicationOption =
Options.getOption(ReplicationOption.class, opts);
FileOption fileOption = Options.getOption(FileOption.class, opts);
AppendIfExistsOption appendIfExistsOption = Options.getOption(
AppendIfExistsOption.class, opts);
StreamOption streamOption = Options.getOption(StreamOption.class, opts);
// check consistency of options
if ((fileOption == null) == (streamOption == null)) {
throw new IllegalArgumentException("file or stream must be specified");
}
if (fileOption == null && (blockSizeOption != null ||
bufferSizeOption != null ||
replicationOption != null)) {
throw new IllegalArgumentException("file modifier options not " +
"compatible with stream");
}
FSDataOutputStream out;
boolean ownStream = fileOption != null;
if (ownStream) {
Path p = fileOption.getValue();
FileSystem fs;
fs = p.getFileSystem(conf);
int bufferSize = bufferSizeOption == null ? getBufferSize(conf) :
bufferSizeOption.getValue();
short replication = replicationOption == null ?
fs.getDefaultReplication(p) :
(short) replicationOption.getValue();
long blockSize = blockSizeOption == null ? fs.getDefaultBlockSize(p) :
blockSizeOption.getValue();
if (appendIfExistsOption != null && appendIfExistsOption.getValue()
&& fs.exists(p)) {
// Read the file and verify header details
try (WALFile.Reader reader =
new WALFile.Reader(conf, WALFile.Reader.file(p), new Reader.OnlyHeaderOption())){
if (reader.getVersion() != VERSION[3]) {
throw new VersionMismatchException(VERSION[3], reader.getVersion());
}
sync = reader.getSync();
}
out = fs.append(p, bufferSize);
this.appendMode = true;
} else {
out = fs.create(p, true, bufferSize, replication, blockSize);
}
} else {
out = streamOption.getValue();
}
init(conf, out, ownStream);
}
示例15: setOutputDirectory
import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
protected void setOutputDirectory() throws IOException{
String actionType = conf.get(AngelConf.ANGEL_ACTION_TYPE, AngelConf.DEFAULT_ANGEL_ACTION_TYPE);
RunningMode runningMode = RunningMode.valueOf(conf.get(AngelConf.ANGEL_RUNNING_MODE, AngelConf.DEFAULT_ANGEL_RUNNING_MODE));
LOG.info("running mode = " + runningMode);
boolean deleteOnExist =
conf.getBoolean(AngelConf.ANGEL_JOB_OUTPUT_PATH_DELETEONEXIST,
AngelConf.DEFAULT_ANGEL_JOB_OUTPUT_PATH_DELETEONEXIST);
String path = null;
if (!actionType.matches("predict")) {
path = conf.get(AngelConf.ANGEL_SAVE_MODEL_PATH);
} else {
path = conf.get(AngelConf.ANGEL_PREDICT_PATH);
}
if(path == null) {
throw new IOException("output directory is null. you must set "
+ AngelConf.ANGEL_SAVE_MODEL_PATH + " at training mode or set "
+ AngelConf.ANGEL_PREDICT_PATH + " at predict mode");
}
conf.set(AngelConf.ANGEL_JOB_OUTPUT_PATH, path);
Path outputPath = new Path(path);
FileSystem outFs = outputPath.getFileSystem(conf);
if (outFs.exists(outputPath)) {
if (deleteOnExist) {
outFs.delete(outputPath, true);
} else {
throw new IOException("output path " + outputPath + " already exist, please check");
}
}
Path outputParentPath = outputPath.getParent();
if (!outFs.exists(outputParentPath)) {
LOG.info("Make dir for model output parent path: " + outputParentPath);
if (!outFs.mkdirs(outputParentPath)) {
throw new IOException("Failed to make dir for model output parent path: " + outputParentPath);
}
}
if(runningMode == RunningMode.ANGEL_PS_WORKER) {
String logPathStr = conf.get(AngelConf.ANGEL_LOG_PATH);
if (logPathStr != null) {
Path logPath = new Path(logPathStr);
FileSystem logFs = logPath.getFileSystem(conf);
if (logFs.exists(logPath)) {
if (deleteOnExist) {
logFs.delete(logPath, true);
} else {
throw new IOException("log path " + logPath + " already exist, please check");
}
}
}
}
Path tmpOutputPath = HdfsUtil.generateTmpDirectory(conf, getAppId(), outputPath);
internalStateFile = new Path(HdfsUtil.generateTmpDirectory(conf, getAppId(), outputPath), "state");
conf.set(AngelConf.ANGEL_JOB_TMP_OUTPUT_PATH, tmpOutputPath.toString());
LOG.info(AngelConf.ANGEL_JOB_TMP_OUTPUT_PATH + "=" + tmpOutputPath.toString());
LOG.info("internal state file is " + internalStateFile);
conf.set(AngelConf.ANGEL_APP_SERILIZE_STATE_FILE, internalStateFile.toString());
}