本文整理汇总了Java中org.apache.spark.sql.types.StructType.size方法的典型用法代码示例。如果您正苦于以下问题:Java StructType.size方法的具体用法?Java StructType.size怎么用?Java StructType.size使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.spark.sql.types.StructType
的用法示例。
在下文中一共展示了StructType.size方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: UnsafeFixedWidthAggregationMap
import org.apache.spark.sql.types.StructType; //导入方法依赖的package包/类
/**
* Create a new UnsafeFixedWidthAggregationMap.
*
* @param emptyAggregationBuffer the default value for new keys (a "zero" of the agg. function)
* @param aggregationBufferSchema the schema of the aggregation buffer, used for row conversion.
* @param groupingKeySchema the schema of the grouping key, used for row conversion.
* @param taskMemoryManager the memory manager used to allocate our Unsafe memory structures.
* @param initialCapacity the initial capacity of the map (a sizing hint to avoid re-hashing).
* @param pageSizeBytes the data page size, in bytes; limits the maximum record size.
* @param enablePerfMetrics if true, performance metrics will be recorded (has minor perf impact)
*/
public UnsafeFixedWidthAggregationMap(
InternalRow emptyAggregationBuffer,
StructType aggregationBufferSchema,
StructType groupingKeySchema,
TaskMemoryManager taskMemoryManager,
int initialCapacity,
long pageSizeBytes,
boolean enablePerfMetrics) {
this.aggregationBufferSchema = aggregationBufferSchema;
this.currentAggregationBuffer = new UnsafeRow(aggregationBufferSchema.size());
this.groupingKeyProjection = UnsafeProjection.createFromSchema(groupingKeySchema);
this.groupingKeySchema = groupingKeySchema;
this.map =
new BytesToBytesMap(taskMemoryManager, initialCapacity, pageSizeBytes, enablePerfMetrics);
this.enablePerfMetrics = enablePerfMetrics;
// Initialize the buffer for aggregation value
final UnsafeProjection valueProjection = UnsafeProjection.createFromSchema(aggregationBufferSchema);
this.emptyAggregationBuffer = valueProjection.apply(emptyAggregationBuffer).getBytes();
}
示例2: getPrefixComparator
import org.apache.spark.sql.types.StructType; //导入方法依赖的package包/类
/**
* Creates the prefix comparator for the first field in the given schema, in ascending order.
*/
public static PrefixComparator getPrefixComparator(StructType schema) {
if (schema.size() != 0) {
return getPrefixComparator(
new SortOrder(new BoundReference(0, schema.get(0).dataType),
SortOrder.SortDirection.Ascending));
} else {
return (a, b) -> 0;
}
}
示例3: UnsafeKVExternalSorter
import org.apache.spark.sql.types.StructType; //导入方法依赖的package包/类
public UnsafeKVExternalSorter(
StructType keySchema,
StructType valueSchema,
//BlockManager blockManager,
long pageSizeBytes,
@Nullable BytesToBytesMap map) throws IOException {
this.keySchema = keySchema;
this.valueSchema = valueSchema;
final TaskContext taskContext = TaskContext.get();
prefixComputer = SortPrefixUtils.createPrefixGenerator(keySchema);
PrefixComparator prefixComparator = SortPrefixUtils.getPrefixComparator(keySchema);
BaseOrdering ordering = BaseOrdering.create(keySchema);
KVComparator recordComparator = new KVComparator(ordering, keySchema.size());
TaskMemoryManager taskMemoryManager = taskContext.taskMemoryManager();
if (map == null) {
sorter = UnsafeExternalSorter.create(
taskMemoryManager,
//blockManager,
taskContext,
recordComparator,
prefixComparator,
/* initialSize */ 4096,
pageSizeBytes);
} else {
// During spilling, the array in map will not be used, so we can borrow that and use it
// as the underline array for in-memory sorter (it's always large enough).
// Since we will not grow the array, it's fine to pass `null` as consumer.
final UnsafeInMemorySorter inMemSorter = new UnsafeInMemorySorter(
null, taskMemoryManager, recordComparator, prefixComparator, map.getArray());
// We cannot use the destructive iterator here because we are reusing the existing memory
// pages in BytesToBytesMap to hold records during sorting.
// The only new memory we are allocating is the pointer/prefix array.
BytesToBytesMap.MapIterator iter = map.iterator();
final int numKeyFields = keySchema.size();
UnsafeRow row = new UnsafeRow(numKeyFields);
while (iter.hasNext()) {
final BytesToBytesMap.Location loc = iter.next();
final Object baseObject = loc.getKeyAddress().getBaseObject();
final long baseOffset = loc.getKeyAddress().getBaseOffset();
// Get encoded memory address
// baseObject + baseOffset point to the beginning of the key data in the map, but that
// the KV-pair's length data is stored in the word immediately before that address
MemoryBlock page = loc.getMemoryPage();
long address = taskMemoryManager.encodePageNumberAndOffset(page, baseOffset - 8);
// Compute prefix
row.pointTo(baseObject, baseOffset, loc.getKeyLength());
final long prefix = prefixComputer.computePrefix(row);
inMemSorter.insertRecord(address, prefix);
}
sorter = UnsafeExternalSorter.createWithExistingInMemorySorter(
taskMemoryManager,
//blockManager,
taskContext,
new KVComparator(ordering, keySchema.size()),
prefixComparator,
/* initialSize */ 4096,
pageSizeBytes,
inMemSorter);
// reset the map, so we can re-use it to insert new records. the inMemSorter will not used
// anymore, so the underline array could be used by map again.
map.reset();
}
}