本文整理汇总了Java中org.apache.hadoop.fs.FsConstants类的典型用法代码示例。如果您正苦于以下问题:Java FsConstants类的具体用法?Java FsConstants怎么用?Java FsConstants使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
FsConstants类属于org.apache.hadoop.fs包,在下文中一共展示了FsConstants类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setUp
import org.apache.hadoop.fs.FsConstants; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
initializeTargetTestRoot();
// Make user and data dirs - we creates links to them in the mount table
fsTarget.mkdirs(new Path(targetTestRoot,"user"));
fsTarget.mkdirs(new Path(targetTestRoot,"data"));
fsTarget.mkdirs(new Path(targetTestRoot,"dir2"));
fsTarget.mkdirs(new Path(targetTestRoot,"dir3"));
FileSystemTestHelper.createFile(fsTarget, new Path(targetTestRoot,"aFile"));
// Now we use the mount fs to set links to user and dir
// in the test root
// Set up the defaultMT in the config with our mount point links
conf = ViewFileSystemTestSetup.createConfig();
setupMountPoints();
fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf);
}
示例2: clusterSetupAtBegining
import org.apache.hadoop.fs.FsConstants; //导入依赖的package包/类
@BeforeClass
public static void clusterSetupAtBegining() throws IOException,
LoginException, URISyntaxException {
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
cluster.waitClusterUp();
fHdfs = cluster.getFileSystem();
defaultWorkingDirectory = fHdfs.makeQualified( new Path("/user/" +
UserGroupInformation.getCurrentUser().getShortUserName()));
fHdfs.mkdirs(defaultWorkingDirectory);
// Setup the ViewFS to be used for all tests.
Configuration conf = ViewFileSystemTestSetup.createConfig();
ConfigUtil.addLink(conf, "/vfstmp", new URI(fHdfs.getUri() + "/hdfstmp"));
ConfigUtil.addLink(conf, "/tmp", new URI(fHdfs.getUri() + "/tmp"));
vfs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
assertEquals(ViewFileSystem.class, vfs.getClass());
}
示例3: setUp
import org.apache.hadoop.fs.FsConstants; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
fcTarget = fc;
fcTarget2 = fc2;
targetTestRoot = fileContextTestHelper.getAbsoluteTestRootPath(fc);
targetTestRoot2 = fileContextTestHelper.getAbsoluteTestRootPath(fc2);
fcTarget.delete(targetTestRoot, true);
fcTarget2.delete(targetTestRoot2, true);
fcTarget.mkdir(targetTestRoot, new FsPermission((short)0750), true);
fcTarget2.mkdir(targetTestRoot2, new FsPermission((short)0750), true);
fsViewConf = ViewFileSystemTestSetup.createConfig();
setupMountPoints();
fcView = FileContext.getFileContext(FsConstants.VIEWFS_URI, fsViewConf);
}
示例4: setUp
import org.apache.hadoop.fs.FsConstants; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
fcTarget = fc;
fcTarget2 = fc2;
targetTestRoot = fileContextTestHelper.getAbsoluteTestRootPath(fc);
targetTestRoot2 = fileContextTestHelper.getAbsoluteTestRootPath(fc2);
fcTarget.delete(targetTestRoot, true);
fcTarget2.delete(targetTestRoot2, true);
fcTarget.mkdir(targetTestRoot, new FsPermission((short) 0750), true);
fcTarget2.mkdir(targetTestRoot2, new FsPermission((short) 0750), true);
fsViewConf = ViewFileSystemTestSetup.createConfig();
setupMountPoints();
fcView = FileContext.getFileContext(FsConstants.VIEWFS_URI, fsViewConf);
}
示例5: setUp
import org.apache.hadoop.fs.FsConstants; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
fsTarget = fHdfs;
fsTarget2 = fHdfs2;
targetTestRoot = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
targetTestRoot2 = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget2);
fsTarget.delete(targetTestRoot, true);
fsTarget2.delete(targetTestRoot2, true);
fsTarget.mkdirs(targetTestRoot);
fsTarget2.mkdirs(targetTestRoot2);
fsViewConf = ViewFileSystemTestSetup.createConfig();
setupMountPoints();
fsView = FileSystem.get(FsConstants.VIEWFS_URI, fsViewConf);
}
示例6: setup
import org.apache.hadoop.fs.FsConstants; //导入依赖的package包/类
@BeforeClass
public static void setup() throws URISyntaxException{
try {
Path fswd = FileSystem.get(getConf()).getWorkingDirectory();
Configuration vConf = ViewFileSystemTestSetup.createConfig(false);
ConfigUtil.addLink(vConf, "/usr", new URI(fswd.toString()));
fs = FileSystem.get(FsConstants.VIEWFS_URI, vConf);
fs.setWorkingDirectory(new Path("/usr"));
listFile = new Path("target/tmp/listing").makeQualified(fs.getUri(),
fs.getWorkingDirectory());
target = new Path("target/tmp/target").makeQualified(fs.getUri(),
fs.getWorkingDirectory());
root = new Path("target/tmp").makeQualified(fs.getUri(),
fs.getWorkingDirectory()).toString();
TestDistCpUtils.delete(fs, root);
} catch (IOException e) {
LOG.error("Exception encountered ", e);
}
}
示例7: clusterSetupAtBegining
import org.apache.hadoop.fs.FsConstants; //导入依赖的package包/类
@BeforeClass
public static void clusterSetupAtBegining() throws IOException,
LoginException, URISyntaxException {
CONF.setLong(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT);
CONF.setInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT);
CONF.setInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
CONF.setInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT + 1);
CONF.setInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT);
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(DFS_REPLICATION_DEFAULT + 1).build();
cluster.waitClusterUp();
fHdfs = cluster.getFileSystem();
fileSystemTestHelper.createFile(fHdfs, testFileName);
Configuration conf = ViewFileSystemTestSetup.createConfig();
ConfigUtil.addLink(conf, "/tmp", new URI(fHdfs.getUri().toString() +
"/tmp"));
vfs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
testFileDirPath = new Path (testFileDir);
testFilePath = new Path (testFileName);
}
示例8: setUp
import org.apache.hadoop.fs.FsConstants; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
initializeTargetTestRoot();
// Make user and data dirs - we creates links to them in the mount table
fsTarget.mkdirs(new Path(targetTestRoot,"user"));
fsTarget.mkdirs(new Path(targetTestRoot,"data"));
fsTarget.mkdirs(new Path(targetTestRoot,"dir2"));
fsTarget.mkdirs(new Path(targetTestRoot,"dir3"));
FileSystemTestHelper.createFile(fsTarget, new Path(targetTestRoot,"aFile"));
// Now we use the mount fs to set links to user and dir
// in the test root
// Set up the defaultMT in the config with our mount point links
//Configuration conf = new Configuration();
conf = ViewFileSystemTestSetup.createConfig();
setupMountPoints();
fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf);
}
示例9: clusterSetupAtBegining
import org.apache.hadoop.fs.FsConstants; //导入依赖的package包/类
@BeforeClass
public static void clusterSetupAtBegining()
throws IOException, LoginException, URISyntaxException {
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
cluster.waitClusterUp();
fHdfs = cluster.getFileSystem();
defaultWorkingDirectory = fHdfs.makeQualified(new Path(
"/user/" + UserGroupInformation.getCurrentUser().getShortUserName()));
fHdfs.mkdirs(defaultWorkingDirectory);
// Setup the ViewFS to be used for all tests.
Configuration conf = ViewFileSystemTestSetup.createConfig();
ConfigUtil.addLink(conf, "/vfstmp", new URI(fHdfs.getUri() + "/hdfstmp"));
ConfigUtil.addLink(conf, "/tmp", new URI(fHdfs.getUri() + "/tmp"));
vfs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
assertEquals(ViewFileSystem.class, vfs.getClass());
}
示例10: setup
import org.apache.hadoop.fs.FsConstants; //导入依赖的package包/类
@BeforeClass
public static void setup() throws URISyntaxException{
try {
Path fswd = FileSystem.get(getConf()).getWorkingDirectory();
Configuration vConf = ViewFileSystemTestSetup.createConfig(false);
ConfigUtil.addLink(vConf, "/usr", new URI(fswd.toString()));
fs = FileSystem.get(FsConstants.VIEWFS_URI, vConf);
fs.setWorkingDirectory(new Path("/usr"));
listFile = new Path("target/tmp/root/listing").makeQualified(fs.getUri(),
fs.getWorkingDirectory());
target = new Path("target/tmp/root/target").makeQualified(fs.getUri(),
fs.getWorkingDirectory());
root = new Path("target/tmp/root").makeQualified(fs.getUri(),
fs.getWorkingDirectory()).toString();
TestDistCpUtils.delete(fs, root);
} catch (IOException e) {
LOG.error("Exception encountered ", e);
}
}