本文整理汇总了Scala中org.apache.hadoop.fs.FSDataInputStream类的典型用法代码示例。如果您正苦于以下问题:Scala FSDataInputStream类的具体用法?Scala FSDataInputStream怎么用?Scala FSDataInputStream使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了FSDataInputStream类的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。
示例1: DataBuffer
//设置package包名称以及导入依赖的类
package com.esri.gdb
import java.nio.{ByteBuffer, ByteOrder}
import org.apache.hadoop.fs.FSDataInputStream
class DataBuffer(dataInput: FSDataInputStream) extends Serializable {
private var bytes = new Array[Byte](1024)
private var byteBuffer = ByteBuffer.wrap(bytes).order(ByteOrder.LITTLE_ENDIAN)
def readBytes(length: Int) = {
if (length > bytes.length) {
bytes = new Array[Byte](length)
byteBuffer = ByteBuffer.wrap(bytes).order(ByteOrder.LITTLE_ENDIAN)
}
else {
byteBuffer.clear
}
dataInput.readFully(bytes, 0, length)
byteBuffer
}
def seek(position: Long) = {
dataInput.seek(position)
this
}
def close() {
dataInput.close()
}
}
object DataBuffer {
def apply(dataInput: FSDataInputStream) = {
new DataBuffer(dataInput)
}
}
示例2: HdfsIndexInput
//设置package包名称以及导入依赖的类
package top.myetl.lucenerdd.store
import org.apache.hadoop.fs.{FSDataInputStream, FileSystem, Path}
import org.apache.lucene.store.{BufferedIndexInput, IndexInput}
import org.apache.spark.Logging
class HdfsIndexInput(fileSystem: FileSystem,path: Path, resourceDescription: String)
extends BufferedIndexInput(resourceDescription) with Logging{
logInfo("hdfsIndexInput: "+path)
val fileLength: Long = fileSystem.getFileStatus(path).getLen
val inputStream: FSDataInputStream = fileSystem.open(path)
var isClone: Boolean = false
override def seekInternal(pos: Long): Unit = {
}
override def readInternal(b: Array[Byte], offset: Int, length: Int): Unit = {
inputStream.readFully(getFilePointer, b, offset, length)
}
override def length(): Long = fileLength
override def close(): Unit = {
if(!isClone){
logDebug("close HdfsIndexInput .."+path)
inputStream.close()
}
}
override def clone(): BufferedIndexInput = {
logDebug("clone HdfsIndexInput "+path)
val in: HdfsIndexInput = super.clone().asInstanceOf[HdfsIndexInput]
in.isClone = true
in
}
}