本文整理匯總了Java中org.apache.hadoop.mapred.JobConf.get方法的典型用法代碼示例。如果您正苦於以下問題:Java JobConf.get方法的具體用法?Java JobConf.get怎麽用?Java JobConf.get使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.mapred.JobConf
的用法示例。
在下文中一共展示了JobConf.get方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: configure
import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
@Override // MapReduceBase
public void configure(JobConf conf) {
try {
config = new ConfigExtractor(conf);
ConfigExtractor.dumpOptions(config);
filesystem = config.getBaseDirectory().getFileSystem(conf);
} catch (Exception e) {
LOG.error("Unable to setup slive " + StringUtils.stringifyException(e));
throw new RuntimeException("Unable to setup slive configuration", e);
}
if(conf.get(MRJobConfig.TASK_ATTEMPT_ID) != null ) {
this.taskId = TaskAttemptID.forName(conf.get(MRJobConfig.TASK_ATTEMPT_ID))
.getTaskID().getId();
} else {
// So that branch-1/0.20 can run this same code as well
this.taskId = TaskAttemptID.forName(conf.get("mapred.task.id"))
.getTaskID().getId();
}
}
示例2: finalize
import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
static private void finalize(Configuration conf, JobConf jobconf,
final Path destPath, String presevedAttributes) throws IOException {
if (presevedAttributes == null) {
return;
}
EnumSet<FileAttribute> preseved = FileAttribute.parse(presevedAttributes);
if (!preseved.contains(FileAttribute.USER)
&& !preseved.contains(FileAttribute.GROUP)
&& !preseved.contains(FileAttribute.PERMISSION)) {
return;
}
FileSystem dstfs = destPath.getFileSystem(conf);
Path dstdirlist = new Path(jobconf.get(DST_DIR_LIST_LABEL));
try (SequenceFile.Reader in =
new SequenceFile.Reader(jobconf, Reader.file(dstdirlist))) {
Text dsttext = new Text();
FilePair pair = new FilePair();
for(; in.next(dsttext, pair); ) {
Path absdst = new Path(destPath, pair.output);
updateDestStatus(pair.input, dstfs.getFileStatus(absdst),
preseved, dstfs);
}
}
}
示例3: validateInput
import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
public void validateInput(JobConf job) throws IOException {
// expecting exactly one path
Path [] tableNames = FileInputFormat.getInputPaths(job);
if (tableNames == null || tableNames.length > 1) {
throw new IOException("expecting one table name");
}
// connected to table?
if (getHTable() == null) {
throw new IOException("could not connect to table '" +
tableNames[0].getName() + "'");
}
// expecting at least one column
String colArg = job.get(COLUMN_LIST);
if (colArg == null || colArg.length() == 0) {
throw new IOException("expecting at least one column");
}
}
示例4: getInputPaths
import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
public static Path[] getInputPaths(JobConf context) {
String dirs = context.get(INPUT_DIR, "");
LOG.info("dirs=" + dirs);
String[] list = StringUtils.split(dirs);
Path[] result = new Path[list.length];
for (int i = 0; i < list.length; i++) {
result[i] = new Path(StringUtils.unEscapeString(list[i]));
}
return result;
}
示例5: checkOutputSpecs
import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
@Override
public void checkOutputSpecs(FileSystem ignored, JobConf job)
throws FileAlreadyExistsException, InvalidJobConfException, IOException {
String tableName = job.get(OUTPUT_TABLE);
if (tableName == null) {
throw new IOException("Must specify table name");
}
}
示例6: configure
import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
public void configure(JobConf job) {
String userTypeName = job.get(USER_TYPE_NAME_KEY);
if (null == userTypeName) {
throw new RuntimeException("Unconfigured parameter: "
+ USER_TYPE_NAME_KEY);
}
LOG.info("User type name set to " + userTypeName);
this.userRecord = null;
try {
Configuration conf = new Configuration();
Class userClass = Class.forName(userTypeName, true,
Thread.currentThread().getContextClassLoader());
this.userRecord =
(SqoopRecord) ReflectionUtils.newInstance(userClass, conf);
} catch (ClassNotFoundException cnfe) {
// handled by the next block.
LOG.error("ClassNotFound exception: " + cnfe.toString());
} catch (Exception e) {
LOG.error("Got an exception reflecting user class: " + e.toString());
}
if (null == this.userRecord) {
LOG.error("Could not instantiate user record of type " + userTypeName);
throw new RuntimeException("Could not instantiate user record of type "
+ userTypeName);
}
}
示例7: getPipeCommand
import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
String getPipeCommand(JobConf job) {
String str = job.get("stream.combine.streamprocessor");
try {
if (str != null) {
return URLDecoder.decode(str, "UTF-8");
}
} catch (UnsupportedEncodingException e) {
System.err.println("stream.combine.streamprocessor" +
" in jobconf not found");
}
return null;
}
示例8: initialize
import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
@Override
protected void initialize(JobConf job) throws IOException {
Path[] tableNames = FileInputFormat.getInputPaths(job);
String colArg = job.get(COLUMN_LIST);
String[] colNames = colArg.split(" ");
byte [][] m_cols = new byte[colNames.length][];
for (int i = 0; i < m_cols.length; i++) {
m_cols[i] = Bytes.toBytes(colNames[i]);
}
setInputColumns(m_cols);
Connection connection = ConnectionFactory.createConnection(job);
initializeTable(connection, TableName.valueOf(tableNames[0].getName()));
}
示例9: getPipeCommand
import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
String getPipeCommand(JobConf job) {
String str = job.get("stream.map.streamprocessor");
if (str == null) {
return str;
}
try {
return URLDecoder.decode(str, "UTF-8");
}
catch (UnsupportedEncodingException e) {
System.err.println("stream.map.streamprocessor in jobconf not found");
return null;
}
}
示例10: configure
import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
public void configure(JobConf jconf) {
conf = jconf;
try {
// read the cached files (unzipped, unjarred and text)
// and put it into a single file TEST_ROOT_DIR/test.txt
String TEST_ROOT_DIR = jconf.get("test.build.data","/tmp");
Path file = new Path("file:///", TEST_ROOT_DIR);
FileSystem fs = FileSystem.getLocal(conf);
if (!fs.mkdirs(file)) {
throw new IOException("Mkdirs failed to create " + file.toString());
}
Path fileOut = new Path(file, "test.txt");
fs.delete(fileOut, true);
DataOutputStream out = fs.create(fileOut);
String[] symlinks = new String[6];
symlinks[0] = ".";
symlinks[1] = "testjar";
symlinks[2] = "testzip";
symlinks[3] = "testtgz";
symlinks[4] = "testtargz";
symlinks[5] = "testtar";
for (int i = 0; i < symlinks.length; i++) {
// read out the files from these archives
File f = new File(symlinks[i]);
File txt = new File(f, "test.txt");
FileInputStream fin = new FileInputStream(txt);
BufferedReader reader = new BufferedReader(new InputStreamReader(fin));
String str = reader.readLine();
reader.close();
out.writeBytes(str);
out.writeBytes("\n");
}
out.close();
} catch (IOException ie) {
System.out.println(StringUtils.stringifyException(ie));
}
}
示例11: getPipeCommand
import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
String getPipeCommand(JobConf job) {
String str = job.get("stream.reduce.streamprocessor");
if (str == null) {
return str;
}
try {
return URLDecoder.decode(str, "UTF-8");
} catch (UnsupportedEncodingException e) {
System.err.println("stream.reduce.streamprocessor in jobconf not found");
return null;
}
}
示例12: addInputPath
import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
/**
* Add a {@link Path} with a custom {@link InputFormat} to the list of
* inputs for the map-reduce job.
*
* @param conf The configuration of the job
* @param path {@link Path} to be added to the list of inputs for the job
* @param inputFormatClass {@link InputFormat} class to use for this path
*/
public static void addInputPath(JobConf conf, Path path,
Class<? extends InputFormat> inputFormatClass) {
String inputFormatMapping = path.toString() + ";"
+ inputFormatClass.getName();
String inputFormats = conf.get("mapreduce.input.multipleinputs.dir.formats");
conf.set("mapreduce.input.multipleinputs.dir.formats",
inputFormats == null ? inputFormatMapping : inputFormats + ","
+ inputFormatMapping);
conf.setInputFormat(DelegatingInputFormat.class);
}
示例13: configure
import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
public void configure(JobConf conf) {
this.conf = conf;
// this is tightly tied to map reduce
// since it does not expose an api
// to get the partition
partId = conf.getInt(MRJobConfig.TASK_PARTITION, -1);
// create a file name using the partition
// we need to write to this directory
tmpOutputDir = FileOutputFormat.getWorkOutputPath(conf);
blockSize = conf.getLong(HAR_BLOCKSIZE_LABEL, blockSize);
// get the output path and write to the tmp
// directory
partname = "part-" + partId;
tmpOutput = new Path(tmpOutputDir, partname);
rootPath = (conf.get(SRC_PARENT_LABEL, null) == null) ? null :
new Path(conf.get(SRC_PARENT_LABEL));
if (rootPath == null) {
throw new RuntimeException("Unable to read parent " +
"path for har from config");
}
try {
destFs = tmpOutput.getFileSystem(conf);
//this was a stale copy
if (destFs.exists(tmpOutput)) {
destFs.delete(tmpOutput, false);
}
partStream = destFs.create(tmpOutput, false, conf.getInt("io.file.buffer.size", 4096),
destFs.getDefaultReplication(tmpOutput), blockSize);
} catch(IOException ie) {
throw new RuntimeException("Unable to open output file " + tmpOutput, ie);
}
buffer = new byte[buf_size];
}
示例14: getSplits
import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
/**
* Produce splits such that each is no greater than the quotient of the
* total size and the number of splits requested.
* @param job The handle to the JobConf object
* @param numSplits Number of splits requested
*/
public InputSplit[] getSplits(JobConf job, int numSplits
) throws IOException {
final int srcCount = job.getInt(OP_COUNT_LABEL, -1);
final int targetcount = srcCount / numSplits;
String srclist = job.get(OP_LIST_LABEL, "");
if (srcCount < 0 || "".equals(srclist)) {
throw new RuntimeException("Invalid metadata: #files(" + srcCount +
") listuri(" + srclist + ")");
}
Path srcs = new Path(srclist);
FileSystem fs = srcs.getFileSystem(job);
List<FileSplit> splits = new ArrayList<FileSplit>(numSplits);
Text key = new Text();
FileOperation value = new FileOperation();
long prev = 0L;
int count = 0; //count src
try (SequenceFile.Reader in = new SequenceFile.Reader(fs, srcs, job)) {
for ( ; in.next(key, value); ) {
long curr = in.getPosition();
long delta = curr - prev;
if (++count > targetcount) {
count = 0;
splits.add(new FileSplit(srcs, prev, delta, (String[])null));
prev = curr;
}
}
}
long remaining = fs.getFileStatus(srcs).getLen() - prev;
if (remaining != 0) {
splits.add(new FileSplit(srcs, prev, remaining, (String[])null));
}
LOG.info("numSplits=" + numSplits + ", splits.size()=" + splits.size());
return splits.toArray(new FileSplit[splits.size()]);
}
示例15: HiveReaderSetting
import org.apache.hadoop.mapred.JobConf; //導入方法依賴的package包/類
public HiveReaderSetting( final FileSplit split, final JobConf job ){
config = new Configuration();
disableSkipBlock = job.getBoolean( "mds.disable.block.skip" , false );
disableFilterPushdown = job.getBoolean( "mds.disable.filter.pushdown" , false );
Set<String> pathNameSet= createPathSet( split.getPath() );
List<ExprNodeGenericFuncDesc> filterExprs = new ArrayList<ExprNodeGenericFuncDesc>();
String filterExprSerialized = job.get( TableScanDesc.FILTER_EXPR_CONF_STR );
if( filterExprSerialized != null ){
filterExprs.add( SerializationUtilities.deserializeExpression(filterExprSerialized) );
}
MapWork mapWork;
try{
mapWork = Utilities.getMapWork(job);
}catch( Exception e ){
mapWork = null;
}
if( mapWork == null ){
node = createExpressionNode( filterExprs );
isVectorModeFlag = false;
return;
}
node = createExpressionNode( filterExprs );
for( Map.Entry<String,PartitionDesc> pathsAndParts: mapWork.getPathToPartitionInfo().entrySet() ){
if( ! pathNameSet.contains( pathsAndParts.getKey() ) ){
continue;
}
Properties props = pathsAndParts.getValue().getTableDesc().getProperties();
if( props.containsKey( "mds.expand" ) ){
config.set( "spread.reader.expand.column" , props.getProperty( "mds.expand" ) );
}
if( props.containsKey( "mds.flatten" ) ){
config.set( "spread.reader.flatten.column" , props.getProperty( "mds.flatten" ) );
}
}
config.set( "spread.reader.read.column.names" , createReadColumnNames( job.get( ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR , null ) ) );
// Next Hive vesion;
// Utilities.getUseVectorizedInputFileFormat(job)
isVectorModeFlag = Utilities.isVectorMode( job );
}