本文整理汇总了Java中org.apache.hadoop.fs.Path类的典型用法代码示例。如果您正苦于以下问题:Java Path类的具体用法?Java Path怎么用?Java Path使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Path类属于org.apache.hadoop.fs包,在下文中一共展示了Path类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: checkOuterConsistency
import org.apache.hadoop.fs.Path; //导入依赖的package包/类
private static void checkOuterConsistency(Job job, Path[] src)
throws IOException {
Path outf = FileOutputFormat.getOutputPath(job);
FileStatus[] outlist = cluster.getFileSystem().listStatus(outf, new
Utils.OutputFileUtils.OutputFilesFilter());
assertEquals("number of part files is more than 1. It is" + outlist.length,
1, outlist.length);
assertTrue("output file with zero length" + outlist[0].getLen(),
0 < outlist[0].getLen());
SequenceFile.Reader r =
new SequenceFile.Reader(cluster.getFileSystem(),
outlist[0].getPath(), job.getConfiguration());
IntWritable k = new IntWritable();
IntWritable v = new IntWritable();
while (r.next(k, v)) {
assertEquals("counts does not match", v.get(),
countProduct(k, src, job.getConfiguration()));
}
r.close();
}
示例2: main
import org.apache.hadoop.fs.Path; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
BasicConfigurator.configure();
Configuration conf = new Configuration();
conf.setQuietMode(true);
Job job = Job.getInstance(conf, "WordCount");
job.setJarByClass(HadoopWordCount.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setMapperClass(Map.class);
job.setCombinerClass(Reduce.class);
job.setReducerClass(Reduce.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1] + "_" + System.currentTimeMillis()));
long t = System.currentTimeMillis();
job.waitForCompletion(true);
System.out.println("TotalTime=" + (System.currentTimeMillis() - t));
}
示例3: mkdirs
import org.apache.hadoop.fs.Path; //导入依赖的package包/类
/**
* Convenience method, so that we don't open a new connection when using this
* method from within another method. Otherwise every API invocation incurs
* the overhead of opening/closing a TCP connection.
*/
private boolean mkdirs(FTPClient client, Path file, FsPermission permission)
throws IOException {
boolean created = true;
Path workDir = new Path(client.printWorkingDirectory());
Path absolute = makeAbsolute(workDir, file);
String pathName = absolute.getName();
if (!exists(client, absolute)) {
Path parent = absolute.getParent();
created = (parent == null || mkdirs(client, parent, FsPermission
.getDirDefault()));
if (created) {
String parentDir = parent.toUri().getPath();
client.changeWorkingDirectory(parentDir);
created = created && client.makeDirectory(pathName);
}
} else if (isFile(client, absolute)) {
throw new ParentNotDirectoryException(String.format(
"Can't make directory for path %s since it is a file.", absolute));
}
return created;
}
示例4: buildModel
import org.apache.hadoop.fs.Path; //导入依赖的package包/类
/**
* @param sparkContext active Spark Context
* @param trainData training data on which to build a model
* @param hyperParameters ordered list of hyper parameter values to use in building model
* @param candidatePath directory where additional model files can be written
* @return a {@link PMML} representation of a model trained on the given data
*/
@Override
public PMML buildModel(JavaSparkContext sparkContext,
JavaRDD<String> trainData,
List<?> hyperParameters,
Path candidatePath) {
int numClusters = (Integer) hyperParameters.get(0);
Preconditions.checkArgument(numClusters > 1);
log.info("Building KMeans Model with {} clusters", numClusters);
JavaRDD<Vector> trainingData = parsedToVectorRDD(trainData.map(MLFunctions.PARSE_FN));
KMeansModel kMeansModel = KMeans.train(trainingData.rdd(), numClusters, maxIterations,
numberOfRuns, initializationStrategy);
return kMeansModelToPMML(kMeansModel, fetchClusterCountsFromModel(trainingData, kMeansModel));
}
示例5: testCodecs
import org.apache.hadoop.fs.Path; //导入依赖的package包/类
/**
* Test a data block encoder on the given HFile. Output results to console.
* @param kvLimit The limit of KeyValue which will be analyzed.
* @param hfilePath an HFile path on the file system.
* @param compressionName Compression algorithm used for comparison.
* @param doBenchmark Run performance benchmarks.
* @param doVerify Verify correctness.
* @throws IOException When pathName is incorrect.
*/
public static void testCodecs(Configuration conf, int kvLimit,
String hfilePath, String compressionName, boolean doBenchmark,
boolean doVerify) throws IOException {
// create environment
Path path = new Path(hfilePath);
CacheConfig cacheConf = new CacheConfig(conf);
FileSystem fs = FileSystem.get(conf);
StoreFile hsf = new StoreFile(fs, path, conf, cacheConf,
BloomType.NONE);
StoreFile.Reader reader = hsf.createReader();
reader.loadFileInfo();
KeyValueScanner scanner = reader.getStoreFileScanner(true, true);
// run the utilities
DataBlockEncodingTool comp = new DataBlockEncodingTool(compressionName);
int majorVersion = reader.getHFileVersion();
comp.useHBaseChecksum = majorVersion > 2
|| (majorVersion == 2 && reader.getHFileMinorVersion() >= HFileReaderV2.MINOR_VERSION_WITH_CHECKSUM);
comp.checkStatistics(scanner, kvLimit);
if (doVerify) {
comp.verifyCodecs(scanner, kvLimit);
}
if (doBenchmark) {
comp.benchmarkCodecs();
}
comp.displayStatistics();
// cleanup
scanner.close();
reader.close(cacheConf.shouldEvictOnClose());
}
示例6: testReadWriteOps
import org.apache.hadoop.fs.Path; //导入依赖的package包/类
/**
* Test NN ReadOps Count and WriteOps Count
*/
@Test
public void testReadWriteOps() throws Exception {
MetricsRecordBuilder rb = getMetrics(NN_METRICS);
long startWriteCounter = MetricsAsserts.getLongCounter("TransactionsNumOps",
rb);
Path file1_Path = new Path(TEST_ROOT_DIR_PATH, "ReadData.dat");
//Perform create file operation
createFile(file1_Path, 1024 * 1024,(short)2);
// Perform read file operation on earlier created file
readFile(fs, file1_Path);
MetricsRecordBuilder rbNew = getMetrics(NN_METRICS);
assertTrue(MetricsAsserts.getLongCounter("TransactionsNumOps", rbNew) >
startWriteCounter);
}
示例7: testFstat
import org.apache.hadoop.fs.Path; //导入依赖的package包/类
@Test (timeout = 30000)
public void testFstat() throws Exception {
FileOutputStream fos = new FileOutputStream(
new File(TEST_DIR, "testfstat"));
NativeIO.POSIX.Stat stat = NativeIO.POSIX.getFstat(fos.getFD());
fos.close();
LOG.info("Stat: " + String.valueOf(stat));
String owner = stat.getOwner();
String expectedOwner = System.getProperty("user.name");
if (Path.WINDOWS) {
UserGroupInformation ugi =
UserGroupInformation.createRemoteUser(expectedOwner);
final String adminsGroupString = "Administrators";
if (Arrays.asList(ugi.getGroupNames()).contains(adminsGroupString)) {
expectedOwner = adminsGroupString;
}
}
assertEquals(expectedOwner, owner);
assertNotNull(stat.getGroup());
assertTrue(!stat.getGroup().isEmpty());
assertEquals("Stat mode field should indicate a regular file", S_IFREG,
stat.getMode() & S_IFMT);
}
示例8: codecTestMapFile
import org.apache.hadoop.fs.Path; //导入依赖的package包/类
private void codecTestMapFile(Class<? extends CompressionCodec> clazz,
CompressionType type, int records) throws Exception {
FileSystem fs = FileSystem.get(conf);
LOG.info("Creating MapFiles with " + records +
" records using codec " + clazz.getSimpleName());
Path path = new Path(new Path(
System.getProperty("test.build.data", "/tmp")),
clazz.getSimpleName() + "-" + type + "-" + records);
LOG.info("Writing " + path);
createMapFile(conf, fs, path, clazz.newInstance(), type, records);
MapFile.Reader reader = new MapFile.Reader(path, conf);
Text key1 = new Text("002");
assertNotNull(reader.get(key1, new Text()));
Text key2 = new Text("004");
assertNotNull(reader.get(key2, new Text()));
}
示例9: blockReport_03
import org.apache.hadoop.fs.Path; //导入依赖的package包/类
/**
* Test writes a file and closes it.
* Block reported is generated with a bad GS for a single block.
* Block report is forced and the check for # of corrupted blocks is performed.
*
* @throws IOException in case of an error
*/
@Test(timeout=300000)
public void blockReport_03() throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
writeFile(METHOD_NAME, FILE_SIZE, filePath);
// all blocks belong to the same file, hence same BP
DataNode dn = cluster.getDataNodes().get(DN_N0);
String poolId = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports = getBlockReports(dn, poolId, true, false);
sendBlockReports(dnR, poolId, reports);
printStats();
assertThat("Wrong number of corrupt blocks",
cluster.getNamesystem().getCorruptReplicaBlocks(), is(1L));
assertThat("Wrong number of PendingDeletion blocks",
cluster.getNamesystem().getPendingDeletionBlocks(), is(0L));
}
示例10: testBooleanValues
import org.apache.hadoop.fs.Path; //导入依赖的package包/类
public void testBooleanValues() throws IOException {
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
appendProperty("test.bool1", "true");
appendProperty("test.bool2", "false");
appendProperty("test.bool3", " true ");
appendProperty("test.bool4", " false ");
appendProperty("test.bool5", "foo");
appendProperty("test.bool6", "TRUE");
appendProperty("test.bool7", "FALSE");
appendProperty("test.bool8", "");
endConfig();
Path fileResource = new Path(CONFIG);
conf.addResource(fileResource);
assertEquals(true, conf.getBoolean("test.bool1", false));
assertEquals(false, conf.getBoolean("test.bool2", true));
assertEquals(true, conf.getBoolean("test.bool3", false));
assertEquals(false, conf.getBoolean("test.bool4", true));
assertEquals(true, conf.getBoolean("test.bool5", true));
assertEquals(true, conf.getBoolean("test.bool6", false));
assertEquals(false, conf.getBoolean("test.bool7", true));
assertEquals(false, conf.getBoolean("test.bool8", false));
}
示例11: testCompareTo
import org.apache.hadoop.fs.Path; //导入依赖的package包/类
@Test
public void testCompareTo() throws IOException {
Path path1 = new Path("path1");
Path path2 = new Path("path2");
FileStatus fileStatus1 =
new FileStatus(1, true, 1, 1, 1, 1, FsPermission.valueOf("-rw-rw-rw-"),
"one", "one", null, path1);
FileStatus fileStatus2 =
new FileStatus(1, true, 1, 1, 1, 1, FsPermission.valueOf("-rw-rw-rw-"),
"one", "one", null, path2);
assertTrue(fileStatus1.compareTo(fileStatus2) < 0);
assertTrue(fileStatus2.compareTo(fileStatus1) > 0);
List<FileStatus> statList = new ArrayList<>();
statList.add(fileStatus1);
statList.add(fileStatus2);
assertTrue(Collections.binarySearch(statList, fileStatus1) > -1);
}
示例12: addInputPathRecursively
import org.apache.hadoop.fs.Path; //导入依赖的package包/类
/**
* Add files in the input path recursively into the results.
* @param result
* The List to store all files.
* @param fs
* The FileSystem.
* @param path
* The input path.
* @param inputFilter
* The input filter that can be used to filter files/dirs.
* @throws IOException
*/
protected void addInputPathRecursively(List<FileStatus> result,
FileSystem fs, Path path, PathFilter inputFilter)
throws IOException {
RemoteIterator<LocatedFileStatus> iter = fs.listLocatedStatus(path);
while (iter.hasNext()) {
LocatedFileStatus stat = iter.next();
if (inputFilter.accept(stat.getPath())) {
if (stat.isDirectory()) {
addInputPathRecursively(result, fs, stat.getPath(), inputFilter);
} else {
result.add(stat);
}
}
}
}
示例13: testCleanupRemainders
import org.apache.hadoop.fs.Path; //导入依赖的package包/类
@Test(timeout=10000)
public void testCleanupRemainders() throws Exception {
Assume.assumeTrue(NativeIO.isAvailable());
Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
File path = new File(TEST_BASE, "testCleanupRemainders");
path.mkdirs();
String remainder1 = path.getAbsolutePath() +
Path.SEPARATOR + "woot2_remainder1";
String remainder2 = path.getAbsolutePath() +
Path.SEPARATOR + "woot2_remainder2";
createTempFile(remainder1);
createTempFile(remainder2);
SharedFileDescriptorFactory.create("woot2_",
new String[] { path.getAbsolutePath() });
// creating the SharedFileDescriptorFactory should have removed
// the remainders
Assert.assertFalse(new File(remainder1).exists());
Assert.assertFalse(new File(remainder2).exists());
FileUtil.fullyDelete(path);
}
示例14: getByNamespace
import org.apache.hadoop.fs.Path; //导入依赖的package包/类
@Override
public Map<String, HTableDescriptor> getByNamespace(String name)
throws IOException {
Map<String, HTableDescriptor> htds = new TreeMap<String, HTableDescriptor>();
List<Path> tableDirs =
FSUtils.getLocalTableDirs(fs, FSUtils.getNamespaceDir(rootdir, name));
for (Path d: tableDirs) {
HTableDescriptor htd = null;
try {
htd = get(FSUtils.getTableName(d));
} catch (FileNotFoundException fnfe) {
// inability of retrieving one HTD shouldn't stop getting the remaining
LOG.warn("Trouble retrieving htd", fnfe);
}
if (htd == null) continue;
htds.put(FSUtils.getTableName(d).getNameAsString(), htd);
}
return htds;
}
示例15: run
import org.apache.hadoop.fs.Path; //导入依赖的package包/类
/**
* This is the main routine for launching a distributed random write job.
* It runs 10 maps/node and each node writes 1 gig of data to a DFS file.
* The reduce doesn't do anything.
*
* @throws IOException
*/
public int run(String[] args) throws Exception {
if (args.length == 0) {
return printUsage();
}
Job job = createJob(getConf());
FileOutputFormat.setOutputPath(job, new Path(args[0]));
Date startTime = new Date();
System.out.println("Job started: " + startTime);
int ret = job.waitForCompletion(true) ? 0 : 1;
Date endTime = new Date();
System.out.println("Job ended: " + endTime);
System.out.println("The job took " +
(endTime.getTime() - startTime.getTime()) /1000 +
" seconds.");
return ret;
}