本文整理汇总了Golang中github.com/sburnett/transformer.GroupRecords函数的典型用法代码示例。如果您正苦于以下问题:Golang GroupRecords函数的具体用法?Golang GroupRecords怎么用?Golang GroupRecords使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了GroupRecords函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: joinMacWithFlowId
func joinMacWithFlowId(inputChan, outputChan chan *store.Record) {
var (
session SessionKey
localIp []byte
)
grouper := transformer.GroupRecords(inputChan, &session, &localIp)
for grouper.NextGroup() {
var macAddress []byte
for grouper.NextRecord() {
record := grouper.Read()
switch record.DatabaseIndex {
case 0:
lex.DecodeOrDie(record.Value, &macAddress)
case 1:
if macAddress != nil {
var (
remoteIp []byte
sequenceNumber int32
timestamp int64
flowId int32
)
lex.DecodeOrDie(record.Key, &sequenceNumber, &remoteIp, ×tamp, &flowId)
outputChan <- &store.Record{
Key: lex.EncodeOrDie(&session, macAddress, remoteIp, timestamp, int64(math.MaxInt64), sequenceNumber, flowId),
}
}
}
}
}
}
示例2: joinDomainsWithWhitelist
func joinDomainsWithWhitelist(inputChan, outputChan chan *store.Record) {
var session SessionKey
grouper := transformer.GroupRecords(inputChan, &session)
for grouper.NextGroup() {
var whitelist []string
for grouper.NextRecord() {
record := grouper.Read()
switch record.DatabaseIndex {
case 0:
lex.DecodeOrDie(record.Value, &whitelist)
sort.Sort(sort.StringSlice(whitelist))
case 1:
if whitelist == nil {
continue
}
var domain string
remainder := lex.DecodeOrDie(record.Key, &domain)
for i := 0; i < len(domain); i++ {
if i > 0 && domain[i-1] != '.' {
continue
}
idx := sort.SearchStrings(whitelist, domain[i:])
if idx >= len(whitelist) || whitelist[idx] != domain[i:] {
continue
}
outputChan <- &store.Record{
Key: lex.Concatenate(grouper.CurrentGroupPrefix, remainder, lex.EncodeOrDie(whitelist[idx])),
}
}
}
}
}
}
示例3: summarizeFilesystemUsage
func summarizeFilesystemUsage(inputChan, outputChan chan *store.Record) {
var filesystem string
var timestamp int64
grouper := transformer.GroupRecords(inputChan, &filesystem, ×tamp)
for grouper.NextGroup() {
usage := make(map[string]int64)
for grouper.NextRecord() {
record := grouper.Read()
var node string
lex.DecodeOrDie(record.Key, &node)
var used int64
lex.DecodeOrDie(record.Value, &used)
if used > usage[node] {
usage[node] = used
}
}
for node, used := range usage {
outputChan <- &store.Record{
Key: lex.EncodeOrDie(filesystem, timestamp, node),
Value: lex.EncodeOrDie(used),
}
}
}
}
示例4: joinAddressIdsWithMacAddresses
func joinAddressIdsWithMacAddresses(inputChan, outputChan chan *store.Record) {
var (
session SessionKey
addressId int32
)
grouper := transformer.GroupRecords(inputChan, &session, &addressId)
for grouper.NextGroup() {
var macAddress []byte
for grouper.NextRecord() {
record := grouper.Read()
switch record.DatabaseIndex {
case 0:
macAddress = record.Value
case 1:
if macAddress != nil {
var unusedSequenceNumber int32
remainder := lex.DecodeOrDie(record.Key, &unusedSequenceNumber)
outputChan <- &store.Record{
Key: lex.Concatenate(lex.EncodeOrDie(&session), macAddress, remainder),
}
}
}
}
}
}
示例5: flattenLookupsToNodeMacAndTimestamp
func flattenLookupsToNodeMacAndTimestamp(inputChan, outputChan chan *store.Record) {
var nodeId, macAddress, domain string
grouper := transformer.GroupRecords(inputChan, &nodeId, &macAddress, &domain)
for grouper.NextGroup() {
totalCounts := make(map[int64]int64)
for grouper.NextRecord() {
record := grouper.Read()
var (
anonymizationContext string
sessionId int64
sequenceNumber int32
)
lex.DecodeOrDie(record.Key, &anonymizationContext, &sessionId, &sequenceNumber)
var count int64
lex.DecodeOrDie(record.Value, &count)
timestamp := truncateTimestampToHour(sessionId + convertSecondsToMicroseconds(30)*int64(sequenceNumber))
totalCounts[timestamp] += count
}
for timestamp, totalCount := range totalCounts {
outputChan <- &store.Record{
Key: lex.EncodeOrDie(nodeId, macAddress, domain, timestamp),
Value: lex.EncodeOrDie(totalCount),
}
}
}
}
示例6: joinMacWithLookups
func joinMacWithLookups(inputChan, outputChan chan *store.Record) {
var (
session SessionKey
addressId int32
)
grouper := transformer.GroupRecords(inputChan, &session, &addressId)
for grouper.NextGroup() {
var macAddress []byte
for grouper.NextRecord() {
record := grouper.Read()
switch record.DatabaseIndex {
case 0:
lex.DecodeOrDie(record.Value, &macAddress)
case 1:
if macAddress != nil {
var (
sequenceNumber int32
domain string
)
lex.DecodeOrDie(record.Key, &sequenceNumber, &domain)
outputChan <- &store.Record{
Key: lex.EncodeOrDie(session.NodeId, macAddress, domain, session.AnonymizationContext, session.SessionId, sequenceNumber),
Value: record.Value,
}
}
}
}
}
}
示例7: aggregateStatisticsReducer
func aggregateStatisticsReducer(inputChan, outputChan chan *store.Record) {
var nodeId []byte
grouper := transformer.GroupRecords(inputChan, &nodeId)
for grouper.NextGroup() {
aggregateStatistics := newAggregateStatistics()
for grouper.NextRecord() {
record := grouper.Read()
var statistics AggregateStatistics
if err := proto.Unmarshal(record.Value, &statistics); err != nil {
panic(err)
}
*aggregateStatistics.Traces += *statistics.Traces
*aggregateStatistics.Packets += *statistics.Packets
*aggregateStatistics.PacketSeriesDropped += *statistics.PacketSeriesDropped
*aggregateStatistics.PcapDropped += *statistics.PcapDropped
*aggregateStatistics.InterfaceDropped += *statistics.InterfaceDropped
*aggregateStatistics.Flows += *statistics.Flows
*aggregateStatistics.DroppedFlows += *statistics.DroppedFlows
*aggregateStatistics.Bytes += *statistics.Bytes
}
encodedStatistics, err := proto.Marshal(aggregateStatistics)
if err != nil {
panic(err)
}
outputChan <- &store.Record{
Key: lex.EncodeOrDie(nodeId),
Value: encodedStatistics,
}
}
}
示例8: inferReboots
func inferReboots(inputChan, outputChan chan *store.Record) {
var node string
grouper := transformer.GroupRecords(inputChan, &node)
for grouper.NextGroup() {
lastUptime := int64(-1)
maxReboot := int64(-1)
for grouper.NextRecord() {
record := grouper.Read()
var timestamp int64
lex.DecodeOrDie(record.Key, ×tamp)
var uptime int64
lex.DecodeOrDie(record.Value, &uptime)
if lastUptime >= 0 && lastUptime > uptime {
if maxReboot > -1 {
outputChan <- &store.Record{
Key: lex.EncodeOrDie(node, maxReboot),
}
}
maxReboot = int64(-1)
}
reboot := timestamp - uptime
if maxReboot < reboot {
maxReboot = reboot
}
lastUptime = uptime
}
if maxReboot > -1 {
outputChan <- &store.Record{
Key: lex.EncodeOrDie(node, maxReboot),
}
}
}
}
示例9: joinMacAndSizes
func joinMacAndSizes(inputChan, outputChan chan *store.Record) {
var session SessionKey
var flowId int32
grouper := transformer.GroupRecords(inputChan, &session, &flowId)
for grouper.NextGroup() {
var currentMacAddresses [][]byte
for grouper.NextRecord() {
record := grouper.Read()
if record.DatabaseIndex == 0 {
lex.DecodeOrDie(record.Value, ¤tMacAddresses)
continue
}
if currentMacAddresses == nil {
continue
}
var sequenceNumber int32
lex.DecodeOrDie(record.Key, &sequenceNumber)
var timestamps, sizes []int64
lex.DecodeOrDie(record.Value, ×tamps, &sizes)
if len(timestamps) != len(sizes) {
panic(fmt.Errorf("timestamps and sizes must be the same size"))
}
for _, currentMacAddress := range currentMacAddresses {
for idx, timestamp := range timestamps {
outputChan <- &store.Record{
Key: lex.EncodeOrDie(&session, currentMacAddress, timestamp, flowId, sequenceNumber),
Value: lex.EncodeOrDie(sizes[idx]),
}
}
}
}
}
}
示例10: summarizeSizesByTimestamp
func summarizeSizesByTimestamp(inputChan, outputChan chan *store.Record) {
quantileComputer := NewQuantileSample(101)
quantilesToKeep := []int{0, 1, 5, 10, 25, 50, 75, 90, 95, 99, 100}
var experiment, node string
var timestamp int64
grouper := transformer.GroupRecords(inputChan, &experiment, &node, ×tamp)
for grouper.NextGroup() {
for grouper.NextRecord() {
record := grouper.Read()
var statsValue StatsValue
lex.DecodeOrDie(record.Value, &statsValue)
quantileComputer.Append(statsValue.Size)
}
count := int64(quantileComputer.Count())
quantiles := quantileComputer.Quantiles()
quantileComputer.Reset()
statistics := []interface{}{count}
for idx := range quantilesToKeep {
statistics = append(statistics, quantiles[idx])
}
outputChan <- &store.Record{
Key: lex.EncodeOrDie(experiment, node, timestamp),
Value: lex.EncodeOrDie(statistics...),
}
}
}
示例11: summarizeInterarrivalTimes
func summarizeInterarrivalTimes(inputChan, outputChan chan *store.Record) {
quantileComputer := NewQuantileSample(21)
var experiment, node string
grouper := transformer.GroupRecords(inputChan, &experiment, &node)
for grouper.NextGroup() {
for grouper.NextRecord() {
record := grouper.Read()
var timestamp int64
lex.DecodeOrDie(record.Key, ×tamp)
quantileComputer.Append(timestamp)
}
count := int64(quantileComputer.Count())
quantiles := quantileComputer.Quantiles()
statistics := []interface{}{count}
for _, q := range quantiles {
statistics = append(statistics, q)
}
quantileComputer.Reset()
outputChan <- &store.Record{
Key: lex.EncodeOrDie(experiment, node),
Value: lex.EncodeOrDie(statistics...),
}
}
}
示例12: joinMacAndFlowId
func joinMacAndFlowId(inputChan, outputChan chan *store.Record) {
var session SessionKey
var ipAddress []byte
grouper := transformer.GroupRecords(inputChan, &session, &ipAddress)
for grouper.NextGroup() {
var currentMacAddress []byte
for grouper.NextRecord() {
record := grouper.Read()
if record.DatabaseIndex == 0 {
currentMacAddress = record.Value
continue
}
if currentMacAddress == nil {
continue
}
var sequenceNumber int32
lex.DecodeOrDie(record.Key, &sequenceNumber)
var flowIds []int32
lex.DecodeOrDie(record.Value, &flowIds)
for _, flowId := range flowIds {
outputChan <- &store.Record{
Key: lex.Concatenate(lex.EncodeOrDie(&session, flowId, sequenceNumber), currentMacAddress),
}
}
}
}
}
示例13: joinDomainsWithSizes
func joinDomainsWithSizes(inputChan, outputChan chan *store.Record) {
var (
session SessionKey
flowId int32
)
grouper := transformer.GroupRecords(inputChan, &session, &flowId)
for grouper.NextGroup() {
var domains, macAddresses [][]byte
for grouper.NextRecord() {
record := grouper.Read()
switch record.DatabaseIndex {
case 0:
lex.DecodeOrDie(record.Value, &domains, &macAddresses)
case 1:
if domains != nil && macAddresses != nil {
var (
sequenceNumber int32
timestamp int64
)
lex.DecodeOrDie(record.Key, &sequenceNumber, ×tamp)
for idx, domain := range domains {
outputChan <- &store.Record{
Key: lex.EncodeOrDie(session.NodeId, domain, timestamp, macAddresses[idx], session.AnonymizationContext, session.SessionId, flowId, sequenceNumber),
Value: record.Value,
}
}
}
}
}
}
}
示例14: aggregateStatisticsReduceBySession
func aggregateStatisticsReduceBySession(inputChan, outputChan chan *store.Record) {
var session SessionKey
grouper := transformer.GroupRecords(inputChan, &session)
for grouper.NextGroup() {
aggregateStatistics := newAggregateStatistics()
var pcapDropped, interfaceDropped int64
var lastPcapDropped, lastInterfaceDropped int64
var pcapDroppedBaseline, interfaceDroppedBaseline int64
for grouper.NextRecord() {
record := grouper.Read()
var statistics AggregateStatistics
if err := proto.Unmarshal(record.Value, &statistics); err != nil {
panic(err)
}
if lastPcapDropped > *statistics.PcapDropped {
pcapDroppedBaseline += math.MaxUint32
pcapDropped = 0
}
lastPcapDropped = *statistics.PcapDropped
pcapDropped = maxInt64(pcapDropped, *statistics.PcapDropped)
if lastInterfaceDropped > *statistics.InterfaceDropped {
interfaceDroppedBaseline += math.MaxUint32
interfaceDropped = 0
}
lastInterfaceDropped = *statistics.InterfaceDropped
interfaceDropped = maxInt64(interfaceDropped, *statistics.InterfaceDropped)
*aggregateStatistics.Traces += *statistics.Traces
*aggregateStatistics.Packets += *statistics.Packets
*aggregateStatistics.PacketSeriesDropped += *statistics.PacketSeriesDropped
*aggregateStatistics.Flows += *statistics.Flows
*aggregateStatistics.DroppedFlows += *statistics.DroppedFlows
*aggregateStatistics.Bytes += *statistics.Bytes
}
*aggregateStatistics.PcapDropped = pcapDroppedBaseline + pcapDropped
*aggregateStatistics.InterfaceDropped = interfaceDroppedBaseline + interfaceDropped
encodedStatistics, err := proto.Marshal(aggregateStatistics)
if err != nil {
panic(err)
}
outputChan <- &store.Record{
Key: lex.EncodeOrDie(&session),
Value: encodedStatistics,
}
}
}
示例15: flattenLookupsToNodeAndMac
func flattenLookupsToNodeAndMac(inputChan, outputChan chan *store.Record) {
var nodeId, macAddress, domain string
grouper := transformer.GroupRecords(inputChan, &nodeId, &macAddress, &domain)
for grouper.NextGroup() {
var totalCount int64
for grouper.NextRecord() {
record := grouper.Read()
var count int64
lex.DecodeOrDie(record.Value, &count)
totalCount += count
}
outputChan <- &store.Record{
Key: lex.EncodeOrDie(nodeId, macAddress, domain),
Value: lex.EncodeOrDie(totalCount),
}
}
}