本文整理汇总了Scala中org.apache.parquet.filter2.compat.FilterCompat类的典型用法代码示例。如果您正苦于以下问题:Scala FilterCompat类的具体用法?Scala FilterCompat怎么用?Scala FilterCompat使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了FilterCompat类的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。
示例1: apply
//设置package包名称以及导入依赖的类
package io.eels.component.parquet.avro
import io.eels.Predicate
import io.eels.component.parquet.{ParquetPredicateBuilder, ParquetReaderConfig}
import org.apache.avro.Schema
import org.apache.avro.generic.GenericRecord
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.parquet.avro.{AvroParquetReader, AvroReadSupport}
import org.apache.parquet.filter2.compat.FilterCompat
import org.apache.parquet.hadoop.ParquetReader
def apply(path: Path,
predicate: Option[Predicate],
projectionSchema: Option[Schema])(implicit conf: Configuration): ParquetReader[GenericRecord] = {
// The parquet reader can use a projection by setting a projected schema onto a conf object
def configuration(): Configuration = {
val newconf = new Configuration(conf)
projectionSchema.foreach { it =>
AvroReadSupport.setAvroReadSchema(newconf, it)
AvroReadSupport.setRequestedProjection(newconf, it)
}
//conf.set(ParquetInputFormat.DICTIONARY_FILTERING_ENABLED, "true")
newconf.set(org.apache.parquet.hadoop.ParquetFileReader.PARQUET_READ_PARALLELISM, config.parallelism.toString)
newconf
}
// a filter is set when we have a predicate for the read
def filter(): FilterCompat.Filter = predicate.map(ParquetPredicateBuilder.build)
.map(FilterCompat.get)
.getOrElse(FilterCompat.NOOP)
AvroParquetReader.builder[GenericRecord](path)
.withCompatibility(false)
.withConf(configuration())
.withFilter(filter())
.build()
.asInstanceOf[ParquetReader[GenericRecord]]
}
}
示例2: apply
//设置package包名称以及导入依赖的类
package io.eels.component.parquet
import com.sksamuel.exts.Logging
import io.eels.{Predicate, Row}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.parquet.filter2.compat.FilterCompat
import org.apache.parquet.hadoop.api.ReadSupport
import org.apache.parquet.hadoop.{ParquetInputFormat, ParquetReader}
import org.apache.parquet.schema.Type
def apply(path: Path,
predicate: Option[Predicate],
readSchema: Option[Type],
dictionaryFiltering: Boolean)(implicit conf: Configuration): ParquetReader[Row] = {
logger.debug(s"Opening parquet reader for $path")
// The parquet reader can use a projection by setting a projected schema onto the supplied conf object
def configuration(): Configuration = {
val newconf = new Configuration(conf)
readSchema.foreach { it =>
newconf.set(ReadSupport.PARQUET_READ_SCHEMA, it.toString)
}
newconf.set(ParquetInputFormat.DICTIONARY_FILTERING_ENABLED, dictionaryFiltering.toString)
newconf.set(org.apache.parquet.hadoop.ParquetFileReader.PARQUET_READ_PARALLELISM, config.parallelism.toString)
newconf
}
// a filter is set when we have a predicate for the read
def filter(): FilterCompat.Filter = predicate.map(ParquetPredicateBuilder.build)
.map(FilterCompat.get)
.getOrElse(FilterCompat.NOOP)
ParquetReader.builder(new RowReadSupport, path)
.withConf(configuration())
.withFilter(filter())
.build()
}
}