本文整理汇总了Java中org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil.toConfiguration方法的典型用法代码示例。如果您正苦于以下问题:Java ConfigurationUtil.toConfiguration方法的具体用法?Java ConfigurationUtil.toConfiguration怎么用?Java ConfigurationUtil.toConfiguration使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil
的用法示例。
在下文中一共展示了ConfigurationUtil.toConfiguration方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: fs
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入方法依赖的package包/类
/**
* Run a filesystem command. Any output from this command is written to
* stdout or stderr as appropriate.
* @param cmd Filesystem command to run along with its arguments as one
* string.
* @throws IOException
*/
public static int fs(String cmd) throws IOException {
ScriptPigContext ctx = getScriptContext();
FsShell shell = new FsShell(ConfigurationUtil.toConfiguration(ctx
.getPigContext().getProperties()));
int code = -1;
if (cmd != null) {
String[] cmdTokens = cmd.split("\\s+");
if (!cmdTokens[0].startsWith("-")) cmdTokens[0] = "-" + cmdTokens[0];
try {
code = shell.run(cmdTokens);
} catch (Exception e) {
throw new IOException("Run filesystem command failed", e);
}
}
return code;
}
示例2: initRightLoader
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入方法依赖的package包/类
private void initRightLoader(int [] splitsToBeRead) throws IOException{
PigContext pc = (PigContext) ObjectSerializer
.deserialize(PigMapReduce.sJobConfInternal.get().get("pig.pigContext"));
Configuration conf = ConfigurationUtil.toConfiguration(pc.getProperties());
// Hadoop security need this property to be set
if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) {
conf.set(MRConfiguration.JOB_CREDENTIALS_BINARY,
System.getenv("HADOOP_TOKEN_FILE_LOCATION"));
}
//create ReadToEndLoader that will read the given splits in order
loader = new ReadToEndLoader((LoadFunc)PigContext.instantiateFuncFromSpec(rightLoaderFuncSpec),
conf, inpLocation, splitsToBeRead);
}
示例3: EmptyPigStats
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入方法依赖的package包/类
public EmptyPigStats(PigContext pigContext, POStore poStore) {
super.pigContext = pigContext;
super.startTime = super.endTime = System.currentTimeMillis();
super.userId = System.getProperty("user.name");
Configuration conf = ConfigurationUtil.toConfiguration(pigContext.getProperties());
// initialize empty stats
OutputStats os = new OutputStats(null, -1, -1, true);
os.setConf(conf);
os.setPOStore(poStore);
this.outputStatsList = Collections.unmodifiableList(Arrays.asList(os));
InputStats is = new InputStats(null, -1, -1, true);
is.setConf(conf);
this.inputStatsList = Collections.unmodifiableList(Arrays.asList(is));
}
示例4: fs
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入方法依赖的package包/类
/**
* Run a filesystem command. Any output from this command is written to
* stdout or stderr as appropriate.
* @param cmd Filesystem command to run along with its arguments as one
* string.
* @throws IOException
*/
public static int fs(String cmd) throws IOException {
ScriptPigContext ctx = getScriptContext();
FsShell shell = new FsShell(ConfigurationUtil.toConfiguration(ctx
.getPigContext().getProperties()));
int code = -1;
if (cmd != null) {
String[] cmdTokens = cmd.split("\\s+");
if (!cmdTokens[0].startsWith("-")) cmdTokens[0] = "-" + cmdTokens[0];
try {
code = shell.run(cmdTokens);
} catch (Exception e) {
throw new IOException("Run filesystem command failed", e);
}
}
return code;
}
示例5: testGroupConstWithParallel
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入方法依赖的package包/类
/**
* Test parallelism for group by constant
* @throws Throwable
*/
@Test
public void testGroupConstWithParallel() throws Throwable {
PigContext pc = new PigContext(ExecType.MAPREDUCE, cluster.getProperties());
pc.defaultParallel = 100;
pc.connect();
String query = "a = load 'input';\n" + "b = group a by 1;" + "store b into 'output';";
PigServer pigServer = new PigServer( ExecType.MAPREDUCE, cluster.getProperties() );
PhysicalPlan pp = Util.buildPp( pigServer, query );
MROperPlan mrPlan = Util.buildMRPlan(pp, pc);
ConfigurationValidator.validatePigProperties(pc.getProperties());
Configuration conf = ConfigurationUtil.toConfiguration(pc.getProperties());
JobControlCompiler jcc = new JobControlCompiler(pc, conf);
JobControl jobControl = jcc.compile(mrPlan, "Test");
Job job = jobControl.getWaitingJobs().get(0);
int parallel = job.getJobConf().getNumReduceTasks();
assertEquals("parallism", 1, parallel);
}
示例6: checkDefaultParallelResult
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入方法依赖的package包/类
@Override
public void checkDefaultParallelResult(PhysicalPlan pp, PigContext pc) throws Exception {
MROperPlan mrPlan = Util.buildMRPlan(pp, pc);
ConfigurationValidator.validatePigProperties(pc.getProperties());
Configuration conf = ConfigurationUtil.toConfiguration(pc.getProperties());
JobControlCompiler jcc = new JobControlCompiler(pc, conf);
JobControl jobControl = jcc.compile(mrPlan, "Test");
Job job = jobControl.getWaitingJobs().get(0);
int parallel = job.getJobConf().getNumReduceTasks();
assertEquals(100, parallel);
Util.assertParallelValues(100, -1, -1, 100, job.getJobConf());
}
示例7: perTestInitialize
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入方法依赖的package包/类
@Before
public void perTestInitialize() {
props = new Properties();
props.setProperty(PigConfiguration.PIG_SCHEMA_TUPLE_ENABLED, "true");
conf = ConfigurationUtil.toConfiguration(props);
pigContext = new PigContext(ExecType.LOCAL, props);
}
示例8: simpleTest2
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入方法依赖的package包/类
@Test
public void simpleTest2() throws Exception {
PrintWriter w = new PrintWriter(new FileWriter(PIG_FILE));
w.println("A = load '" + INPUT_FILE + "' as (a0:int, a1:int, a2:int);");
w.println("B = filter A by a0 == 3;");
w.println("C = limit B 1;");
w.println("dump C;");
w.close();
try {
String[] args = { "-Dstop.on.failure=true", "-Dopt.multiquery=false", "-Daggregate.warning=false", "-x", execType, PIG_FILE };
PigStats stats = PigRunner.run(args, new TestNotificationListener(execType));
assertTrue(stats instanceof EmptyPigStats);
assertTrue(stats.isSuccessful());
assertEquals(0, stats.getNumberJobs());
assertEquals(stats.getJobGraph().size(), 0);
Configuration conf = ConfigurationUtil.toConfiguration(stats.getPigProperties());
assertTrue(conf.getBoolean("stop.on.failure", false));
assertTrue(!conf.getBoolean("aggregate.warning", true));
assertTrue(!conf.getBoolean(PigConfiguration.PIG_OPT_MULTIQUERY, true));
assertTrue(conf.getBoolean("opt.fetch", true));
} finally {
new File(PIG_FILE).delete();
Util.deleteFile(cluster, OUTPUT_FILE);
}
}
示例9: checkJobControlCompilerErrResult
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入方法依赖的package包/类
@Override
public void checkJobControlCompilerErrResult(PhysicalPlan pp, PigContext pc) throws Exception {
MROperPlan mrPlan = Util.buildMRPlan(pp, pc);
mrPlan.remove(mrPlan.getRoots().get(0));
mrPlan.remove(mrPlan.getRoots().get(0));
ConfigurationValidator.validatePigProperties(pc.getProperties());
Configuration conf = ConfigurationUtil.toConfiguration(pc.getProperties());
JobControlCompiler jcc = new JobControlCompiler(pc, conf);
try {
jcc.compile(mrPlan, "Test");
} catch (JobCreationException jce) {
assertTrue(jce.getErrorCode() == 1068);
}
}
示例10: load
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入方法依赖的package包/类
public DataBag load(LoadFunc lfunc, PigContext pigContext) throws IOException {
DataBag content = BagFactory.getInstance().newDefaultBag();
ReadToEndLoader loader = new ReadToEndLoader(lfunc,
ConfigurationUtil.toConfiguration(pigContext.getProperties()), file, 0);
Tuple f = null;
while ((f = loader.getNext()) != null) {
content.add(f);
}
return content;
}
示例11: testSFPig
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
@Test
public void testSFPig() throws Exception {
PigServer mrPigServer = new PigServer(ExecType.MAPREDUCE);
String inputStr = "amy\tbob\tcharlene\tdavid\terin\tfrank";
Util.createInputFile(cluster, "testSFPig-input.txt", new String[]
{inputStr});
DataByteArray[] input = { new DataByteArray("amy"),
new DataByteArray("bob"), new DataByteArray("charlene"),
new DataByteArray("david"), new DataByteArray("erin"),
new DataByteArray("frank") };
Tuple f1 = Util.loadTuple(TupleFactory.getInstance().
newTuple(input.length), input);
String outputLocation = "testSFPig-output.txt";
String query = "a = load 'testSFPig-input.txt';" +
"store a into '" + outputLocation + "';";
mrPigServer.setBatchOn();
Util.registerMultiLineQuery(mrPigServer, query);
mrPigServer.executeBatch();
LoadFunc lfunc = new ReadToEndLoader(new PigStorage(), ConfigurationUtil.
toConfiguration(cluster.getProperties()), outputLocation, 0);
Tuple f2 = lfunc.getNext();
Util.deleteFile(cluster, "testSFPig-input.txt");
Util.deleteFile(cluster, outputLocation);
assertEquals(f1, f2);
}
示例12: getExecConf
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入方法依赖的package包/类
public JobConf getExecConf(Properties properties) throws ExecException {
JobConf jc = null;
// Check existence of user provided configs
String isHadoopConfigsOverriden = properties.getProperty("pig.use.overriden.hadoop.configs");
if (isHadoopConfigsOverriden != null && isHadoopConfigsOverriden.equals("true")) {
jc = new JobConf(ConfigurationUtil.toConfiguration(properties));
} else {
// Check existence of hadoop-site.xml or core-site.xml in
// classpath if user provided confs are not being used
Configuration testConf = new Configuration();
ClassLoader cl = testConf.getClassLoader();
URL hadoop_site = cl.getResource(HADOOP_SITE);
URL core_site = cl.getResource(CORE_SITE);
if (hadoop_site == null && core_site == null) {
throw new ExecException(
"Cannot find hadoop configurations in classpath "
+ "(neither hadoop-site.xml nor core-site.xml was found in the classpath)."
+ " If you plan to use local mode, please put -x local option in command line",
4010);
}
jc = new JobConf();
}
jc.addResource("pig-cluster-hadoop-site.xml");
jc.addResource(YARN_SITE);
return jc;
}
示例13: setUp
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
PigContext pc = new PigContext(ExecType.LOCAL, new Properties());
pc.connect();
conf = new Configuration(
ConfigurationUtil.toConfiguration(pc.getFs().getConfiguration())
);
}
示例14: createInputFile
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入方法依赖的package包/类
/**
* @param pigContext
* @param fileName
* @param input
* @throws IOException
*/
public static void createInputFile(PigContext pigContext,
String fileName, String[] input) throws IOException {
Configuration conf = ConfigurationUtil.toConfiguration(
pigContext.getProperties());
createInputFile(FileSystem.get(conf), fileName, input);
}
示例15: simpleTest
import org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil; //导入方法依赖的package包/类
@Test
public void simpleTest() throws Exception {
PrintWriter w = new PrintWriter(new FileWriter(PIG_FILE));
w.println("A = load '" + INPUT_FILE + "' as (a0:int, a1:int, a2:int);");
w.println("B = group A by a0;");
w.println("C = foreach B generate group, COUNT(A);");
w.println("store C into '" + OUTPUT_FILE + "';");
w.close();
try {
String[] args = { "-Dstop.on.failure=true", "-Dopt.multiquery=false", "-Dopt.fetch=false", "-Daggregate.warning=false", "-x", execType, PIG_FILE };
PigStats stats = PigRunner.run(args, new TestNotificationListener(execType));
assertTrue(stats.isSuccessful());
assertEquals(1, stats.getNumberJobs());
String name = stats.getOutputNames().get(0);
assertEquals(OUTPUT_FILE, name);
assertEquals(12, stats.getBytesWritten());
assertEquals(3, stats.getRecordWritten());
assertEquals("A,B,C",
((JobStats)stats.getJobGraph().getSinks().get(0)).getAlias());
Configuration conf = ConfigurationUtil.toConfiguration(stats.getPigProperties());
assertTrue(conf.getBoolean("stop.on.failure", false));
assertTrue(!conf.getBoolean("aggregate.warning", true));
assertTrue(!conf.getBoolean(PigConfiguration.PIG_OPT_MULTIQUERY, true));
assertTrue(!conf.getBoolean("opt.fetch", true));
} finally {
new File(PIG_FILE).delete();
Util.deleteFile(cluster, OUTPUT_FILE);
}
}