本文整理匯總了Java中org.apache.hadoop.fs.RawLocalFileSystem類的典型用法代碼示例。如果您正苦於以下問題:Java RawLocalFileSystem類的具體用法?Java RawLocalFileSystem怎麽用?Java RawLocalFileSystem使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
RawLocalFileSystem類屬於org.apache.hadoop.fs包,在下文中一共展示了RawLocalFileSystem類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: startMiniCluster
import org.apache.hadoop.fs.RawLocalFileSystem; //導入依賴的package包/類
@BeforeClass
public static void startMiniCluster() throws Exception {
File targetDir = new File(System.getProperty("user.dir"), "target");
File macDir = new File(targetDir, DelimitedIngestMiniClusterTest.class.getSimpleName() + "_cluster");
if (macDir.exists()) {
FileUtils.deleteDirectory(macDir);
}
MiniAccumuloConfigImpl config = new MiniAccumuloConfigImpl(macDir, ROOT_PASSWORD);
config.setNumTservers(1);
config.setInstanceName(INSTANCE_NAME);
config.setSiteConfig(Collections.singletonMap("fs.file.impl", RawLocalFileSystem.class.getName()));
config.useMiniDFS(true);
MAC = new MiniAccumuloClusterImpl(config);
MAC.start();
FS = FileSystem.get(MAC.getMiniDfs().getConfiguration(0));
ARGS = new DelimitedIngestArguments();
ARGS.setUsername("root");
ARGS.setPassword(ROOT_PASSWORD);
ARGS.setInstanceName(INSTANCE_NAME);
ARGS.setZooKeepers(MAC.getZooKeepers());
ARGS.setConfiguration(MAC.getMiniDfs().getConfiguration(0));
}
示例2: testInitExistingWorkingDirectoryInSafeMode
import org.apache.hadoop.fs.RawLocalFileSystem; //導入依賴的package包/類
@Test
public void testInitExistingWorkingDirectoryInSafeMode() throws Exception {
LOG.info("Starting testInitExistingWorkingDirectoryInSafeMode");
tearDown();
// Setup file system to inject startup conditions
FileSystem fs = spy(new RawLocalFileSystem());
doReturn(true).when(fs).isDirectory(any(Path.class));
try {
initAndStartStore(fs);
} catch (Exception e) {
Assert.fail("Exception should not be thrown: " + e);
}
// Make sure that directory creation was not attempted
verify(fs, times(1)).isDirectory(any(Path.class));
verify(fs, times(0)).mkdirs(any(Path.class));
}
示例3: testInitNonExistingWorkingDirectoryInSafeMode
import org.apache.hadoop.fs.RawLocalFileSystem; //導入依賴的package包/類
@Test
public void testInitNonExistingWorkingDirectoryInSafeMode() throws Exception {
LOG.info("Starting testInitNonExistingWorkingDirectoryInSafeMode");
tearDown();
// Setup file system to inject startup conditions
FileSystem fs = spy(new RawLocalFileSystem());
doReturn(false).when(fs).isDirectory(any(Path.class));
doThrow(new IOException()).when(fs).mkdirs(any(Path.class));
try {
initAndStartStore(fs);
Assert.fail("Exception should have been thrown");
} catch (Exception e) {
// Expected failure
}
// Make sure that directory creation was attempted
verify(fs, times(1)).isDirectory(any(Path.class));
verify(fs, times(1)).mkdirs(any(Path.class));
}
示例4: sameVolRename
import org.apache.hadoop.fs.RawLocalFileSystem; //導入依賴的package包/類
/**
* Rename srcPath to dstPath on the same volume. This is the same
* as RawLocalFileSystem's rename method, except that it will not
* fall back to a copy, and it will create the target directory
* if it doesn't exist.
*/
private void sameVolRename(Path srcPath,
Path dstPath) throws IOException {
RawLocalFileSystem rfs = (RawLocalFileSystem)this.rfs;
File src = rfs.pathToFile(srcPath);
File dst = rfs.pathToFile(dstPath);
if (!dst.getParentFile().exists()) {
if (!dst.getParentFile().mkdirs()) {
throw new IOException("Unable to rename " + src + " to "
+ dst + ": couldn't create parent directory");
}
}
if (!src.renameTo(dst)) {
throw new IOException("Unable to rename " + src + " to " + dst);
}
}
示例5: generateConfig
import org.apache.hadoop.fs.RawLocalFileSystem; //導入依賴的package包/類
private final String generateConfig(int rsPort) {
StringBuilder sb = new StringBuilder();
Map<String, Object> confMap = new TreeMap<String, Object>();
confMap.put(HConstants.CLUSTER_DISTRIBUTED, true);
if (rsPort > 0) {
confMap.put(HConstants.REGIONSERVER_PORT, rsPort);
confMap.put(HConstants.REGIONSERVER_INFO_PORT_AUTO, true);
}
confMap.put(HConstants.ZOOKEEPER_CLIENT_PORT, zkClientPort);
confMap.put(HConstants.MASTER_PORT, masterPort);
confMap.put(HConstants.HREGION_MAX_FILESIZE, MAX_FILE_SIZE_OVERRIDE);
confMap.put("fs.file.impl", RawLocalFileSystem.class.getName());
sb.append("<configuration>\n");
for (Map.Entry<String, Object> entry : confMap.entrySet()) {
sb.append(" <property>\n");
sb.append(" <name>" + entry.getKey() + "</name>\n");
sb.append(" <value>" + entry.getValue() + "</value>\n");
sb.append(" </property>\n");
}
sb.append("</configuration>\n");
return sb.toString();
}
示例6: copyToLocalFileSystem
import org.apache.hadoop.fs.RawLocalFileSystem; //導入依賴的package包/類
/**
* Download the file from dfs to local file.
*
* @param fs
* @param destinationFile
* @param dfsFile
* @param conf
* @return
* @throws IOException
*/
public static File copyToLocalFileSystem(FileSystem fs, String destinationPath, String destinationFile, String dfsFile, Configuration conf)
throws IOException
{
File destinationDir = new File(destinationPath);
if (!destinationDir.exists() && !destinationDir.mkdirs()) {
throw new RuntimeException("Unable to create local directory");
}
try (RawLocalFileSystem localFileSystem = new RawLocalFileSystem()) {
// allow app user to access local dir
FsPermission permissions = new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE);
localFileSystem.setPermission(new Path(destinationDir.getAbsolutePath()), permissions);
Path dfsFilePath = new Path(dfsFile);
File localFile = new File(destinationDir, destinationFile);
FileUtil.copy(fs, dfsFilePath, localFile, false, conf);
// set permissions on actual file to be read-only for user
permissions = new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE);
localFileSystem.setPermission(new Path(localFile.getAbsolutePath()), permissions);
return localFile;
}
}
示例7: ExternalSortExec
import org.apache.hadoop.fs.RawLocalFileSystem; //導入依賴的package包/類
private ExternalSortExec(final TaskAttemptContext context, final SortNode plan)
throws PhysicalPlanningException {
super(context, plan.getInSchema(), plan.getOutSchema(), null, plan.getSortKeys());
this.plan = plan;
this.defaultFanout = context.getConf().getIntVar(ConfVars.EXECUTOR_EXTERNAL_SORT_FANOUT);
if (defaultFanout < 2) {
throw new PhysicalPlanningException(ConfVars.EXECUTOR_EXTERNAL_SORT_FANOUT.varname + " cannot be lower than 2");
}
// TODO - sort buffer and core num should be changed to use the allocated container resource.
this.sortBufferBytesNum = context.getQueryContext().getInt(SessionVars.EXTSORT_BUFFER_SIZE) * StorageUnit.MB;
this.allocatedCoreNum = context.getConf().getIntVar(ConfVars.EXECUTOR_EXTERNAL_SORT_THREAD_NUM);
this.localDirAllocator = new LocalDirAllocator(ConfVars.WORKER_TEMPORAL_DIR.varname);
this.localFS = new RawLocalFileSystem();
this.intermediateMeta = CatalogUtil.newTableMeta(BuiltinStorages.DRAW, context.getConf());
this.inputStats = new TableStats();
this.sortAlgorithm = getSortAlgorithm(context.getQueryContext(), sortSpecs);
LOG.info(sortAlgorithm.name() + " sort is selected");
}
示例8: init
import org.apache.hadoop.fs.RawLocalFileSystem; //導入依賴的package包/類
public void init() throws IOException {
keySchema = PlannerUtil.sortSpecsToSchema(sortSpecs);
keyProjector = new KeyProjector(inSchema, keySchema.toArray());
BSTIndex bst = new BSTIndex(context.getConf());
this.comp = new BaseTupleComparator(keySchema, sortSpecs);
Path storeTablePath = new Path(context.getWorkDir(), "output");
LOG.info("Output data directory: " + storeTablePath);
FileSystem fs = new RawLocalFileSystem();
fs.mkdirs(storeTablePath);
this.appender = (FileAppender) ((FileTablespace) TablespaceManager.getDefault())
.getAppender(meta, outSchema, new Path(storeTablePath, "output"));
this.appender.enableStats(keySchema.getAllColumns());
this.appender.init();
this.indexWriter = bst.getIndexWriter(new Path(storeTablePath, "index"),
BSTIndex.TWO_LEVEL_INDEX, keySchema, comp, true);
this.indexWriter.init();
super.init();
}
示例9: ExternalSortExec
import org.apache.hadoop.fs.RawLocalFileSystem; //導入依賴的package包/類
private ExternalSortExec(final TaskAttemptContext context, final AbstractStorageManager sm, final SortNode plan)
throws PhysicalPlanningException {
super(context, plan.getInSchema(), plan.getOutSchema(), null, plan.getSortKeys());
this.plan = plan;
this.meta = CatalogUtil.newTableMeta(StoreType.ROWFILE);
this.defaultFanout = context.getConf().getIntVar(ConfVars.EXECUTOR_EXTERNAL_SORT_FANOUT);
if (defaultFanout < 2) {
throw new PhysicalPlanningException(ConfVars.EXECUTOR_EXTERNAL_SORT_FANOUT.varname + " cannot be lower than 2");
}
// TODO - sort buffer and core num should be changed to use the allocated container resource.
this.sortBufferBytesNum = context.getConf().getLongVar(ConfVars.EXECUTOR_EXTERNAL_SORT_BUFFER_SIZE) * 1048576L;
this.allocatedCoreNum = context.getConf().getIntVar(ConfVars.EXECUTOR_EXTERNAL_SORT_THREAD_NUM);
this.executorService = Executors.newFixedThreadPool(this.allocatedCoreNum);
this.inMemoryTable = new ArrayList<Tuple>(100000);
this.sortTmpDir = getExecutorTmpDir();
localDirAllocator = new LocalDirAllocator(ConfVars.WORKER_TEMPORAL_DIR.varname);
localFS = new RawLocalFileSystem();
}
示例10: buildLogCopier
import org.apache.hadoop.fs.RawLocalFileSystem; //導入依賴的package包/類
private LogCopier buildLogCopier(Config config, Path sinkLogDir, Path appWorkDir) throws IOException {
FileSystem rawLocalFs = this.closer.register(new RawLocalFileSystem());
rawLocalFs.initialize(URI.create(ConfigurationKeys.LOCAL_FS_URI), new Configuration());
LogCopier.Builder builder = LogCopier.newBuilder()
.useSrcFileSystem(this.fs)
.useDestFileSystem(rawLocalFs)
.readFrom(getHdfsLogDir(appWorkDir))
.writeTo(sinkLogDir)
.acceptsLogFileExtensions(ImmutableSet.of(ApplicationConstants.STDOUT, ApplicationConstants.STDERR));
if (config.hasPath(GobblinYarnConfigurationKeys.LOG_COPIER_MAX_FILE_SIZE)) {
builder.useMaxBytesPerLogFile(config.getBytes(GobblinYarnConfigurationKeys.LOG_COPIER_MAX_FILE_SIZE));
}
if (config.hasPath(GobblinYarnConfigurationKeys.LOG_COPIER_SCHEDULER)) {
builder.useScheduler(config.getString(GobblinYarnConfigurationKeys.LOG_COPIER_SCHEDULER));
}
return builder.build();
}
示例11: test1
import org.apache.hadoop.fs.RawLocalFileSystem; //導入依賴的package包/類
public void test1() throws IOException, ParseException {
String schema = "s1:string, s2:string";
String storage = "[s1, s2]COMPRESS BY gz SECURE BY uid:user1 gid:users perm:744 SERIALIZE BY pig";
RawLocalFileSystem rawLFS = new RawLocalFileSystem();
fs = new LocalFileSystem(rawLFS);
Path path1 = new Path(path.toString() + "1");
Runtime.getRuntime().exec("rm -rf " + path1.toString());
fs = path.getFileSystem(conf);
BasicTable.Writer writer = new BasicTable.Writer(path1, schema, storage,
conf);
writer.finish();
ByteArrayOutputStream bos = new ByteArrayOutputStream();
PrintStream ps = new PrintStream(bos);
System.out.println("start dumpinfo ===========");
BasicTable.dumpInfo(path1.toString(), ps, conf);
Assert.assertEquals(true, bos.toString().contains("Serializer: pig"));
Assert.assertEquals(true, bos.toString().contains("Compressor: gz"));
Assert.assertEquals(true, bos.toString().contains(
"Schema : s1:string,s2:string"));
}
示例12: testWriteRecord5
import org.apache.hadoop.fs.RawLocalFileSystem; //導入依賴的package包/類
@Test
public void testWriteRecord5() throws IOException, ParseException {
String STR_SCHEMA = "r1:record(f1:int, f2:long), r2:record(r3:record(f3:float, f4))";
String STR_STORAGE = "[r1.f1]; [r2.r3]; [r1.f2, r2.r3.f3]";
conf = new Configuration();
conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
conf.setInt("table.input.split.minSize", 64 * 1024);
conf.set("table.output.tfile.compression", "none");
RawLocalFileSystem rawLFS = new RawLocalFileSystem();
fs = new LocalFileSystem(rawLFS);
path = new Path(fs.getWorkingDirectory(), this.getClass().getSimpleName());
fs = path.getFileSystem(conf);
// drop any previous tables
BasicTable.drop(path, conf);
// Build Table and column groups
BasicTable.Writer writer = null;
try {
writer = new BasicTable.Writer(path, STR_SCHEMA, STR_STORAGE, conf);
Assert.fail("Should throw exception");
} catch (Exception e) {
System.out.println(e);
}
}
示例13: testWriteRecord6
import org.apache.hadoop.fs.RawLocalFileSystem; //導入依賴的package包/類
@Test
public void testWriteRecord6() throws IOException, ParseException {
String STR_SCHEMA = "r1:record(f1:int, f2:long), r2:record(r3:record(f3:float, f4))";
String STR_STORAGE = "[r1.f1]; [r1.f2, r2.r3.f3]; [r2.r3]";
conf = new Configuration();
conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
conf.setInt("table.input.split.minSize", 64 * 1024);
conf.set("table.output.tfile.compression", "none");
RawLocalFileSystem rawLFS = new RawLocalFileSystem();
fs = new LocalFileSystem(rawLFS);
path = new Path(fs.getWorkingDirectory(), this.getClass().getSimpleName());
fs = path.getFileSystem(conf);
// drop any previous tables
BasicTable.drop(path, conf);
// Build Table and column groups
BasicTable.Writer writer = null;
try {
writer = new BasicTable.Writer(path, STR_SCHEMA, STR_STORAGE, conf);
Assert.fail("Should throw exception");
} catch (Exception e) {
System.out.println(e);
}
}
示例14: testWriteMap1
import org.apache.hadoop.fs.RawLocalFileSystem; //導入依賴的package包/類
@Test
public void testWriteMap1() throws IOException, ParseException {
String STR_SCHEMA = " m2:map(map(map(string)))";
String STR_STORAGE = "[m2#{k}#{j}]";
conf = new Configuration();
conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
conf.setInt("table.input.split.minSize", 64 * 1024);
conf.set("table.output.tfile.compression", "none");
RawLocalFileSystem rawLFS = new RawLocalFileSystem();
fs = new LocalFileSystem(rawLFS);
path = new Path(fs.getWorkingDirectory(), this.getClass().getSimpleName());
fs = path.getFileSystem(conf);
// drop any previous tables
BasicTable.drop(path, conf);
// Build Table and column groups
BasicTable.Writer writer = null;
try {
writer = new BasicTable.Writer(path, STR_SCHEMA, STR_STORAGE, conf);
Assert.fail("Should throw exception");
} catch (Exception e) {
System.out.println(e);
}
}
示例15: testWriteMap2
import org.apache.hadoop.fs.RawLocalFileSystem; //導入依賴的package包/類
@Test
public void testWriteMap2() throws IOException, ParseException {
String STR_SCHEMA = " m2:map(map(map(string)))";
String STR_STORAGE = "[m2.{k}]";
conf = new Configuration();
conf.setInt("table.output.tfile.minBlock.size", 64 * 1024);
conf.setInt("table.input.split.minSize", 64 * 1024);
conf.set("table.output.tfile.compression", "none");
RawLocalFileSystem rawLFS = new RawLocalFileSystem();
fs = new LocalFileSystem(rawLFS);
path = new Path(fs.getWorkingDirectory(), this.getClass().getSimpleName());
fs = path.getFileSystem(conf);
// drop any previous tables
BasicTable.drop(path, conf);
// Build Table and column groups
BasicTable.Writer writer = null;
try {
writer = new BasicTable.Writer(path, STR_SCHEMA, STR_STORAGE, conf);
Assert.fail("Should throw exception");
} catch (Exception e) {
System.out.println(e);
}
}