本文整理汇总了Java中org.apache.hadoop.io.ByteWritable.get方法的典型用法代码示例。如果您正苦于以下问题:Java ByteWritable.get方法的具体用法?Java ByteWritable.get怎么用?Java ByteWritable.get使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.io.ByteWritable
的用法示例。
在下文中一共展示了ByteWritable.get方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getPartition
import org.apache.hadoop.io.ByteWritable; //导入方法依赖的package包/类
@Override
public int getPartition(ByteWritable key, RowNumberWritable value, int numPartitions) {
if (key.get() == (byte) RowNumberJob.COUNTER_MARKER) {
return value.getPartition();
} else {
return Partitioner.partitionForValue(value, numPartitions);
}
}
示例2: configure
import org.apache.hadoop.io.ByteWritable; //导入方法依赖的package包/类
@Override
@SuppressWarnings("deprecation")
public void configure(JobConf job)
{
super.configure(job);
//get the number colums per block
//load the offset mapping
byte matrixIndex=representativeMatrixes.get(0);
try
{
Path thisPath=new Path(job.get(MRConfigurationNames.MR_MAP_INPUT_FILE));
FileSystem fs = IOUtilFunctions.getFileSystem(thisPath, job);
thisPath = thisPath.makeQualified(fs);
String filename=thisPath.toString();
Path headerPath=new Path(job.getStrings(CSVReblockMR.SMALLEST_FILE_NAME_PER_INPUT)[matrixIndex]).makeQualified(fs);
if(headerPath.toString().equals(filename))
headerFile=true;
ByteWritable key=new ByteWritable();
OffsetCount value=new OffsetCount();
Path p=new Path(job.get(CSVReblockMR.ROWID_FILE_NAME));
SequenceFile.Reader reader = null;
try {
reader = new SequenceFile.Reader(fs, p, job);
while (reader.next(key, value)) {
if(key.get()==matrixIndex && filename.equals(value.filename))
offsetMap.put(value.fileOffset, value.count);
}
}
finally {
IOUtilFunctions.closeSilently(reader);
}
}
catch (IOException e) {
throw new RuntimeException(e);
}
CSVReblockInstruction ins=csv_reblock_instructions.get(0).get(0);
_delim = ins.delim;
ignoreFirstLine=ins.hasHeader;
idxRow = new IndexedBlockRow();
int maxBclen=0;
for(ArrayList<CSVReblockInstruction> insv: csv_reblock_instructions)
for(CSVReblockInstruction in: insv)
{
if(maxBclen<in.bclen)
maxBclen=in.bclen;
}
//always dense since common csv usecase
idxRow.getRow().data.reset(1, maxBclen, false);
}
示例3: unwrap
import org.apache.hadoop.io.ByteWritable; //导入方法依赖的package包/类
@Override
public Integer unwrap(ByteWritable writableValue) {
return (int) writableValue.get();
}
示例4: unwrap
import org.apache.hadoop.io.ByteWritable; //导入方法依赖的package包/类
@Override
public Long unwrap(ByteWritable writableValue) {
return (long) writableValue.get();
}
示例5: unwrap
import org.apache.hadoop.io.ByteWritable; //导入方法依赖的package包/类
@Override
public Short unwrap(ByteWritable writableValue) {
return (short) writableValue.get();
}
示例6: unwrap
import org.apache.hadoop.io.ByteWritable; //导入方法依赖的package包/类
@Override
public Byte unwrap(ByteWritable writableValue) {
return writableValue.get();
}
示例7: toJavaScript
import org.apache.hadoop.io.ByteWritable; //导入方法依赖的package包/类
/**
* Takes in a {@link ByteWritable} and returns a {@link byte}.
*
* @param scope the JavaScript scope
* @param writable the value to convert
*
* @return the JavaScript array equivalent
*/
@Override
public Object toJavaScript(final Scriptable scope, final ByteWritable writable) {
return writable.get();
}