本文整理汇总了Java中org.apache.hadoop.mapreduce.lib.input.SequenceFileAsBinaryInputFormat类的典型用法代码示例。如果您正苦于以下问题:Java SequenceFileAsBinaryInputFormat类的具体用法?Java SequenceFileAsBinaryInputFormat怎么用?Java SequenceFileAsBinaryInputFormat使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
SequenceFileAsBinaryInputFormat类属于org.apache.hadoop.mapreduce.lib.input包,在下文中一共展示了SequenceFileAsBinaryInputFormat类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: readFileToSearch
import org.apache.hadoop.mapreduce.lib.input.SequenceFileAsBinaryInputFormat; //导入依赖的package包/类
private static SortedSet<byte[]> readFileToSearch(final Configuration conf,
final FileSystem fs, final LocatedFileStatus keyFileStatus) throws IOException,
InterruptedException {
SortedSet<byte []> result = new TreeSet<byte []>(Bytes.BYTES_COMPARATOR);
// Return entries that are flagged Counts.UNDEFINED in the value. Return the row. This is
// what is missing.
TaskAttemptContext context = new TaskAttemptContextImpl(conf, new TaskAttemptID());
try (SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader rr =
new SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader()) {
InputSplit is =
new FileSplit(keyFileStatus.getPath(), 0, keyFileStatus.getLen(), new String [] {});
rr.initialize(is, context);
while (rr.nextKeyValue()) {
rr.getCurrentKey();
BytesWritable bw = rr.getCurrentValue();
if (Verify.VerifyReducer.whichType(bw.getBytes()) == Verify.Counts.UNDEFINED) {
byte[] key = new byte[rr.getCurrentKey().getLength()];
System.arraycopy(rr.getCurrentKey().getBytes(), 0, key, 0, rr.getCurrentKey()
.getLength());
result.add(key);
}
}
}
return result;
}
示例2: readFileToSearch
import org.apache.hadoop.mapreduce.lib.input.SequenceFileAsBinaryInputFormat; //导入依赖的package包/类
private static SortedSet<byte[]> readFileToSearch(final Configuration conf,
final FileSystem fs, final LocatedFileStatus keyFileStatus) throws IOException,
InterruptedException {
SortedSet<byte []> result = new TreeSet<>(Bytes.BYTES_COMPARATOR);
// Return entries that are flagged Counts.UNDEFINED in the value. Return the row. This is
// what is missing.
TaskAttemptContext context = new TaskAttemptContextImpl(conf, new TaskAttemptID());
try (SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader rr =
new SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader()) {
InputSplit is =
new FileSplit(keyFileStatus.getPath(), 0, keyFileStatus.getLen(), new String [] {});
rr.initialize(is, context);
while (rr.nextKeyValue()) {
rr.getCurrentKey();
BytesWritable bw = rr.getCurrentValue();
if (Verify.VerifyReducer.whichType(bw.getBytes()) == Verify.Counts.UNDEFINED) {
byte[] key = new byte[rr.getCurrentKey().getLength()];
System.arraycopy(rr.getCurrentKey().getBytes(), 0, key, 0, rr.getCurrentKey()
.getLength());
result.add(key);
}
}
}
return result;
}
示例3: addInputPath
import org.apache.hadoop.mapreduce.lib.input.SequenceFileAsBinaryInputFormat; //导入依赖的package包/类
/**
* Java wrapper for
* {@link SequenceFileAsBinaryInputFormat#addInputPath(org.apache.hadoop.mapreduce.Job, org.apache.hadoop.fs.Path)}.
*
* @param ctx the JavaScript context
* @param thisObj the 'this' object
* @param args the function arguments
* @param func the function being called
*/
@JSStaticFunction
public static void addInputPath(final Context ctx, final Scriptable thisObj, final Object[] args,
final Function func) {
FileInputFormatHelper.addInputPath(SequenceFileAsBinaryInputFormat.class, ctx, thisObj, args);
}
示例4: addInputPaths
import org.apache.hadoop.mapreduce.lib.input.SequenceFileAsBinaryInputFormat; //导入依赖的package包/类
/**
* Java wrapper for {@link SequenceFileAsBinaryInputFormat#addInputPaths(org.apache.hadoop.mapreduce.Job, String)}.
*
* @param ctx the JavaScript context
* @param thisObj the 'this' object
* @param args the function arguments
* @param func the function being called
*/
@JSStaticFunction
public static void addInputPaths(final Context ctx, final Scriptable thisObj, final Object[] args,
final Function func) {
FileInputFormatHelper.addInputPaths(SequenceFileAsBinaryInputFormat.class, ctx, thisObj, args);
}
示例5: getInputPathFilter
import org.apache.hadoop.mapreduce.lib.input.SequenceFileAsBinaryInputFormat; //导入依赖的package包/类
/**
* Java wrapper for
* {@link SequenceFileAsBinaryInputFormat#getInputPathFilter(org.apache.hadoop.mapreduce.JobContext)}.
*
* @param ctx the JavaScript context
* @param thisObj the 'this' object
* @param args the function arguments
* @param func the function being called
*
* @return class name for the input path filter or undefined
*/
@JSStaticFunction
public static Object getInputPathFilter(final Context ctx, final Scriptable thisObj, final Object[] args,
final Function func) {
return FileInputFormatHelper.getInputPathFilter(SequenceFileAsBinaryInputFormat.class, ctx, thisObj, args);
}
示例6: getInputPaths
import org.apache.hadoop.mapreduce.lib.input.SequenceFileAsBinaryInputFormat; //导入依赖的package包/类
/**
* Java wrapper for {@link SequenceFileAsBinaryInputFormat#getInputPaths(org.apache.hadoop.mapreduce.JobContext)}.
*
* @param ctx the JavaScript context
* @param thisObj the 'this' object
* @param args the function arguments
* @param func the function being called
*
* @return array of input paths
*/
@JSStaticFunction
public static Object getInputPaths(final Context ctx, final Scriptable thisObj, final Object[] args,
final Function func) {
return FileInputFormatHelper.getInputPaths(SequenceFileAsBinaryInputFormat.class, ctx, thisObj, args);
}
示例7: getMaxSplitSize
import org.apache.hadoop.mapreduce.lib.input.SequenceFileAsBinaryInputFormat; //导入依赖的package包/类
/**
* Java wrapper for {@link SequenceFileAsBinaryInputFormat#getMaxSplitSize(org.apache.hadoop.mapreduce.JobContext)}.
*
* @param ctx the JavaScript context
* @param thisObj the 'this' object
* @param args the function arguments
* @param func the function being called
*
* @return the max split size
*/
@JSStaticFunction
public static Object getMaxSplitSize(final Context ctx, final Scriptable thisObj, final Object[] args,
final Function func) {
return FileInputFormatHelper.getMaxSplitSize(SequenceFileAsBinaryInputFormat.class, ctx, thisObj, args);
}
示例8: getMinSplitSize
import org.apache.hadoop.mapreduce.lib.input.SequenceFileAsBinaryInputFormat; //导入依赖的package包/类
/**
* Java wrapper for {@link SequenceFileAsBinaryInputFormat#getMinSplitSize(org.apache.hadoop.mapreduce.JobContext)}.
*
* @param ctx the JavaScript context
* @param thisObj the 'this' object
* @param args the function arguments
* @param func the function being called
*
* @return the max split size
*/
@JSStaticFunction
public static Object getMinSplitSize(final Context ctx, final Scriptable thisObj, final Object[] args,
final Function func) {
return FileInputFormatHelper.getMinSplitSize(SequenceFileAsBinaryInputFormat.class, ctx, thisObj, args);
}
示例9: setInputPathFilter
import org.apache.hadoop.mapreduce.lib.input.SequenceFileAsBinaryInputFormat; //导入依赖的package包/类
/**
* Wraps {@link SequenceFileAsBinaryInputFormat#setInputPathFilter(org.apache.hadoop.mapreduce.Job, Class)}.
*
* @param ctx the JavaScript context (unused)
* @param thisObj the 'this' object of the caller
* @param args the arguments for the call
* @param func the function called (unused)
*/
@JSStaticFunction
public static void setInputPathFilter(final Context ctx, final Scriptable thisObj, final Object[] args,
final Function func) {
FileInputFormatHelper.setInputPathFilter(SequenceFileAsBinaryInputFormat.class, ctx, thisObj, args);
}
示例10: setInputPaths
import org.apache.hadoop.mapreduce.lib.input.SequenceFileAsBinaryInputFormat; //导入依赖的package包/类
/**
* Java wrapper for
* {@link SequenceFileAsBinaryInputFormat#setInputPaths(org.apache.hadoop.mapreduce.Job,
* org.apache.hadoop.fs.Path...)} and
* {@link SequenceFileAsBinaryInputFormat#setInputPaths(org.apache.hadoop.mapreduce.Job, String)}.
*
* @param ctx the JavaScript context
* @param thisObj the 'this' object
* @param args the function arguments
* @param func the function being called
*/
@JSStaticFunction
public static void setInputPaths(final Context ctx, final Scriptable thisObj, final Object[] args,
final Function func) {
FileInputFormatHelper.setInputPaths(SequenceFileAsBinaryInputFormat.class, ctx, thisObj, args);
}
示例11: setMaxInputSplitSize
import org.apache.hadoop.mapreduce.lib.input.SequenceFileAsBinaryInputFormat; //导入依赖的package包/类
/**
* Java wrapper for
* {@link SequenceFileAsBinaryInputFormat#setMaxInputSplitSize(org.apache.hadoop.mapreduce.Job, long)}.
*
* @param ctx the JavaScript context
* @param thisObj the 'this' object
* @param args the function arguments
* @param func the function being called
*/
@JSStaticFunction
public static void setMaxInputSplitSize(final Context ctx, final Scriptable thisObj, final Object[] args,
final Function func) {
FileInputFormatHelper.setMaxInputSplitSize(SequenceFileAsBinaryInputFormat.class, ctx, thisObj, args);
}
示例12: setMinInputSplitSize
import org.apache.hadoop.mapreduce.lib.input.SequenceFileAsBinaryInputFormat; //导入依赖的package包/类
/**
* Java wrapper for
* {@link SequenceFileAsBinaryInputFormat#setMinInputSplitSize(org.apache.hadoop.mapreduce.Job, long)}.
*
* @param ctx the JavaScript context
* @param thisObj the 'this' object
* @param args the function arguments
* @param func the function being called
*/
@JSStaticFunction
public static void setMinInputSplitSize(final Context ctx, final Scriptable thisObj, final Object[] args,
final Function func) {
FileInputFormatHelper.setMinInputSplitSize(SequenceFileAsBinaryInputFormat.class, ctx, thisObj, args);
}