本文整理汇总了Golang中github.com/sburnett/transformer/store.Manager.Seeker方法的典型用法代码示例。如果您正苦于以下问题:Golang Manager.Seeker方法的具体用法?Golang Manager.Seeker怎么用?Golang Manager.Seeker使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/sburnett/transformer/store.Manager
的用法示例。
在下文中一共展示了Manager.Seeker方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: BytesPerDevicePipeline
func BytesPerDevicePipeline(levelDbManager store.Manager, bytesPerDevicePostgresStore store.Writer) transformer.Pipeline {
tracesStore := levelDbManager.Seeker("traces")
availabilityIntervalsStore := levelDbManager.Seeker("consistent-ranges")
sessionsStore := levelDbManager.ReadingDeleter("bytesperdevice-session")
addressTableStore := levelDbManager.SeekingWriter("bytesperdevice-address-table")
flowTableStore := levelDbManager.SeekingWriter("bytesperdevice-flow-table")
packetsStore := levelDbManager.SeekingWriter("bytesperdevice-packets")
flowIdToMacStore := levelDbManager.SeekingWriter("bytesperdevice-flow-id-to-mac")
flowIdToMacsStore := levelDbManager.SeekingWriter("bytesperdevice-flow-id-to-macs")
bytesPerDeviceUnreducedStore := levelDbManager.SeekingWriter("bytesperdevice-unreduced")
bytesPerDeviceSessionStore := levelDbManager.ReadingWriter("bytesperdevice-reduced-sessions")
bytesPerDeviceStore := levelDbManager.ReadingWriter("bytesperdevice")
traceKeyRangesStore := levelDbManager.ReadingDeleter("bytesperdevice-trace-key-ranges")
consolidatedTraceKeyRangesStore := levelDbManager.ReadingDeleter("bytesperdevice-consolidated-trace-key-ranges")
newTracesStore := store.NewRangeExcludingReader(store.NewRangeIncludingReader(tracesStore, availabilityIntervalsStore), traceKeyRangesStore)
return append([]transformer.PipelineStage{
transformer.PipelineStage{
Name: "BytesPerDeviceMapper",
Reader: newTracesStore,
Transformer: transformer.MakeMultipleOutputsDoFunc(bytesPerDeviceMapper, 3),
Writer: store.NewMuxingWriter(addressTableStore, flowTableStore, packetsStore),
},
SessionPipelineStage(newTracesStore, sessionsStore),
transformer.PipelineStage{
Name: "JoinMacAndFlowId",
Reader: store.NewPrefixIncludingReader(store.NewDemuxingSeeker(addressTableStore, flowTableStore), sessionsStore),
Transformer: transformer.TransformFunc(joinMacAndFlowId),
Writer: flowIdToMacStore,
},
transformer.PipelineStage{
Name: "FlattenMacAddresses",
Reader: store.NewPrefixIncludingReader(flowIdToMacStore, sessionsStore),
Transformer: transformer.TransformFunc(flattenMacAddresses),
Writer: flowIdToMacsStore,
},
transformer.PipelineStage{
Name: "JoinMacAndSizes",
Reader: store.NewPrefixIncludingReader(store.NewDemuxingSeeker(flowIdToMacsStore, packetsStore), sessionsStore),
Transformer: transformer.TransformFunc(joinMacAndSizes),
Writer: bytesPerDeviceUnreducedStore,
},
transformer.PipelineStage{
Name: "ReduceBytesPerDeviceSession",
Reader: store.NewPrefixIncludingReader(bytesPerDeviceUnreducedStore, sessionsStore),
Transformer: transformer.TransformFunc(reduceBytesPerDeviceSession),
Writer: bytesPerDeviceSessionStore,
},
transformer.PipelineStage{
Name: "ReduceBytesPerDevice",
Reader: bytesPerDeviceSessionStore,
Transformer: transformer.TransformFunc(reduceBytesPerDevice),
Writer: bytesPerDeviceStore,
},
transformer.PipelineStage{
Name: "BytesPerDevicePostgres",
Reader: bytesPerDeviceStore,
Writer: bytesPerDevicePostgresStore,
},
}, TraceKeyRangesPipeline(newTracesStore, traceKeyRangesStore, consolidatedTraceKeyRangesStore)...)
}
示例2: PackagesPipeline
func PackagesPipeline(levelDbManager, csvManager, sqliteManager store.Manager) transformer.Pipeline {
logsStore := levelDbManager.Seeker("logs")
installedPackagesStore := levelDbManager.ReadingWriter("installed-packages")
versionChangesStore := levelDbManager.ReadingWriter("version-changes")
var node, packageName string
var timestamp int64
var version string
csvStore := csvManager.Writer("packages.csv", []string{"node", "package", "timestamp"}, []string{"version"}, &node, &packageName, ×tamp, &version)
sqliteStore := sqliteManager.Writer("packages", []string{"node", "package", "timestamp"}, []string{"version"}, &node, &packageName, ×tamp, &version)
return []transformer.PipelineStage{
transformer.PipelineStage{
Name: "OpkgListInstalled",
Reader: ReadOnlySomeLogs(logsStore, "opkg_list-installed"),
Transformer: transformer.MakeDoFunc(extractInstalledPackages),
Writer: installedPackagesStore,
},
transformer.PipelineStage{
Name: "DetectVersionChanges",
Reader: installedPackagesStore,
Transformer: transformer.TransformFunc(detectChangedPackageVersions),
Writer: versionChangesStore,
},
transformer.PipelineStage{
Name: "WriteVersionChangesSqlite",
Reader: versionChangesStore,
Writer: sqliteStore,
},
transformer.PipelineStage{
Name: "WriteVersionChangesCsv",
Reader: versionChangesStore,
Writer: csvStore,
},
}
}
示例3: UptimePipeline
func UptimePipeline(levelDbManager, csvManager, sqliteManager store.Manager) transformer.Pipeline {
logsStore := levelDbManager.Seeker("logs")
uptimeStore := levelDbManager.ReadingWriter("uptime")
var node string
var timestamp, uptime int64
csvStore := csvManager.Writer("uptime.csv", []string{"node", "timestamp"}, []string{"uptime"}, &node, ×tamp, &uptime)
sqliteStore := sqliteManager.Writer("uptime", []string{"node", "timestamp"}, []string{"uptime"}, &node, ×tamp, &uptime)
return []transformer.PipelineStage{
transformer.PipelineStage{
Name: "Uptime",
Reader: ReadOnlySomeLogs(logsStore, "uptime"),
Transformer: transformer.MakeMapFunc(extractUptime),
Writer: uptimeStore,
},
transformer.PipelineStage{
Name: "WriteUptimeCsv",
Reader: uptimeStore,
Writer: csvStore,
},
transformer.PipelineStage{
Name: "WriteUptimeSqlite",
Reader: uptimeStore,
Writer: sqliteStore,
},
}
}
示例4: MemoryUsagePipeline
func MemoryUsagePipeline(levelDbManager, csvManager, sqliteManager store.Manager) transformer.Pipeline {
logsStore := levelDbManager.Seeker("logs")
memoryUsageStore := levelDbManager.ReadingWriter("memory")
var node string
var timestamp, used, free int64
csvStore := csvManager.Writer("memory.csv", []string{"node", "timestamp"}, []string{"used", "free"}, &node, ×tamp, &used, &free)
sqliteStore := sqliteManager.Writer("memory", []string{"node", "timestamp"}, []string{"used", "free"}, &node, ×tamp, &used, &free)
return []transformer.PipelineStage{
transformer.PipelineStage{
Name: "Memory",
Reader: ReadOnlySomeLogs(logsStore, "top"),
Transformer: transformer.MakeDoFunc(extractMemoryUsage),
Writer: memoryUsageStore,
},
transformer.PipelineStage{
Name: "WriteMemoryUsageCsv",
Reader: memoryUsageStore,
Writer: csvStore,
},
transformer.PipelineStage{
Name: "WriteMemoryUsageSqlite",
Reader: memoryUsageStore,
Writer: sqliteStore,
},
}
}
示例5: RebootsPipeline
func RebootsPipeline(levelDbManager, csvManager, sqliteManager store.Manager) transformer.Pipeline {
uptimeStore := levelDbManager.Seeker("uptime")
rebootsStore := levelDbManager.ReadingWriter("reboots")
var node string
var timestamp int64
rebootsCsvStore := csvManager.Writer("reboots.csv", []string{"node", "boot_timestamp"}, []string{}, &node, ×tamp)
rebootsSqliteStore := sqliteManager.Writer("reboots", []string{"node", "boot_timestamp"}, []string{}, &node, ×tamp)
return []transformer.PipelineStage{
transformer.PipelineStage{
Name: "InferReboots",
Reader: uptimeStore,
Transformer: transformer.TransformFunc(inferReboots),
Writer: rebootsStore,
},
transformer.PipelineStage{
Name: "WriteRebootsCsv",
Reader: rebootsStore,
Writer: rebootsCsvStore,
},
transformer.PipelineStage{
Name: "WriteRebootsSqlite",
Reader: rebootsStore,
Writer: rebootsSqliteStore,
},
}
}
示例6: BytesPerMinutePipeline
func BytesPerMinutePipeline(levelDbManager store.Manager, bytesPerHourPostgresStore store.Writer) transformer.Pipeline {
tracesStore := levelDbManager.Seeker("traces")
mappedStore := levelDbManager.ReadingWriter("bytesperminute-mapped")
bytesPerMinuteStore := levelDbManager.ReadingWriter("bytesperminute")
bytesPerHourStore := levelDbManager.ReadingWriter("bytesperhour")
traceKeyRangesStore := levelDbManager.ReadingDeleter("bytesperminute-trace-key-ranges")
consolidatedTraceKeyRangesStore := levelDbManager.ReadingDeleter("bytesperminute-consolidated-trace-key-ranges")
return append([]transformer.PipelineStage{
transformer.PipelineStage{
Name: "BytesPerMinuteMapper",
Reader: store.NewRangeExcludingReader(tracesStore, traceKeyRangesStore),
Transformer: transformer.MakeDoTransformer(bytesPerMinuteMapper(transformer.NewNonce())),
Writer: mappedStore,
},
transformer.PipelineStage{
Name: "BytesPerMinuteReducer",
Reader: mappedStore,
Transformer: transformer.TransformFunc(bytesPerMinuteReducer),
Writer: bytesPerMinuteStore,
},
transformer.PipelineStage{
Name: "BytesPerHourReducer",
Reader: bytesPerMinuteStore,
Transformer: transformer.TransformFunc(bytesPerHourReducer),
Writer: bytesPerHourStore,
},
transformer.PipelineStage{
Name: "BytesPerHourPostgres",
Reader: bytesPerHourStore,
Writer: bytesPerHourPostgresStore,
},
}, TraceKeyRangesPipeline(store.NewRangeExcludingReader(tracesStore, traceKeyRangesStore), traceKeyRangesStore, consolidatedTraceKeyRangesStore)...)
}
示例7: FilterNodesPipeline
func FilterNodesPipeline(nodeId string, levelDbManager store.Manager) transformer.Pipeline {
tracesStore := levelDbManager.Seeker("traces")
filteredStore := levelDbManager.Writer(fmt.Sprintf("filtered-%s", nodeId))
return []transformer.PipelineStage{
transformer.PipelineStage{
Name: "FilterNode",
Reader: FilterNodes(tracesStore, nodeId),
Writer: filteredStore,
},
}
}
示例8: AvailabilityPipeline
func AvailabilityPipeline(levelDbManager store.Manager, jsonWriter io.Writer, timestamp int64) transformer.Pipeline {
tracesStore := levelDbManager.Seeker("traces")
intervalsStore := levelDbManager.ReadingWriter("availability-intervals")
consolidatedStore := levelDbManager.ReadingDeleter("availability-consolidated")
nodesStore := levelDbManager.ReadingDeleter("availability-nodes")
excludeRangesStore := levelDbManager.ReadingDeleter("availability-done")
consistentRangesStore := levelDbManager.ReadingDeleter("consistent-ranges")
return []transformer.PipelineStage{
transformer.PipelineStage{
Name: "AvailabilityIntervals",
Reader: store.NewRangeExcludingReader(tracesStore, excludeRangesStore),
Transformer: transformer.TransformFunc(availabilityIntervals),
Writer: intervalsStore,
},
transformer.PipelineStage{
Name: "ConsolidateAvailabilityIntervals",
Reader: intervalsStore,
Transformer: transformer.TransformFunc(consolidateAvailabilityIntervals),
Writer: store.NewTruncatingWriter(consolidatedStore),
},
transformer.PipelineStage{
Name: "AvailabilityReducer",
Reader: consolidatedStore,
Transformer: transformer.TransformFunc(availabilityReducer),
Writer: store.NewTruncatingWriter(nodesStore),
},
transformer.PipelineStage{
Name: "AvailabilityJson",
Reader: nodesStore,
Writer: &availabilityJsonStore{writer: jsonWriter, timestamp: timestamp},
},
transformer.PipelineStage{
Name: "GenerateExcludedRanges",
Reader: consolidatedStore,
Transformer: transformer.MakeMapFunc(generateExcludedRanges),
Writer: store.NewTruncatingWriter(excludeRangesStore),
},
transformer.PipelineStage{
Name: "GenerateConsistentRanges",
Reader: excludeRangesStore,
Transformer: transformer.MakeDoFunc(generateConsistentRanges),
Writer: store.NewTruncatingWriter(consistentRangesStore),
},
}
}
示例9: DevicesCountPipeline
func DevicesCountPipeline(levelDbManager store.Manager) transformer.Pipeline {
logsStore := levelDbManager.Seeker("logs")
devicesCountStore := levelDbManager.Writer("devices-count")
return []transformer.PipelineStage{
transformer.PipelineStage{
Name: "ExtractEthernetCount",
Reader: ReadOnlySomeLogs(logsStore, "swconfig_ports"),
Transformer: transformer.MakeDoFunc(extractEthernetCount),
Writer: devicesCountStore,
},
transformer.PipelineStage{
Name: "ExtractWirelessCount",
Reader: ReadOnlySomeLogs(logsStore, "iw_station_count"),
Transformer: transformer.MakeDoFunc(extractWirelessCount),
Writer: devicesCountStore,
},
}
}
示例10: DisjointPackagesPipeline
func DisjointPackagesPipeline(levelDbManager, csvManager store.Manager) transformer.Pipeline {
logsStore := levelDbManager.Seeker("logs")
disjointPackagesStore := levelDbManager.ReadingWriter("disjoint-packages")
var filename, node string
var timestamp int64
csvStore := csvManager.Writer("not-disjoint.csv", []string{"filename", "node", "timestamp"}, []string{}, &filename, &node, ×tamp)
return []transformer.PipelineStage{
transformer.PipelineStage{
Name: "DisjointPackages",
Reader: logsStore,
Transformer: transformer.MakeDoFunc(detectDisjointPackagesError),
Writer: disjointPackagesStore,
},
transformer.PipelineStage{
Name: "WriteDisjointPackagesCsv",
Reader: disjointPackagesStore,
Writer: csvStore,
},
}
}
示例11: IpRoutePipeline
func IpRoutePipeline(levelDbManager, sqliteManager store.Manager) transformer.Pipeline {
logsStore := levelDbManager.Seeker("logs")
defaultRoutesStore := levelDbManager.ReadingWriter("default-routes")
var node string
var timestamp int64
var gateway string
sqliteStore := sqliteManager.Writer("defaultroutes", []string{"node", "timestamp"}, []string{"gateway"}, &node, ×tamp, &gateway)
return []transformer.PipelineStage{
transformer.PipelineStage{
Name: "ExtractDefaultRoute",
Reader: ReadOnlySomeLogs(logsStore, "iproute"),
Transformer: transformer.MakeDoFunc(extractDefaultRoute),
Writer: defaultRoutesStore,
},
transformer.PipelineStage{
Name: "WriteDefaultRoutesSqlite",
Reader: defaultRoutesStore,
Writer: sqliteStore,
},
}
}
示例12: FilesystemUsagePipeline
func FilesystemUsagePipeline(levelDbManager, csvManager store.Manager) transformer.Pipeline {
logsStore := levelDbManager.Seeker("logs")
filesystemUsageStore := levelDbManager.ReadingWriter("filesystem")
var mount, node string
var timestamp, used, free int64
csvStore := csvManager.Writer("filesystem.csv", []string{"mount", "node", "timestamp"}, []string{"used", "free"}, &mount, &node, ×tamp, &used, &free)
return []transformer.PipelineStage{
transformer.PipelineStage{
Name: "Filesystem",
Reader: ReadOnlySomeLogs(logsStore, "df"),
Transformer: transformer.MakeDoFunc(extractFilesystemUsage),
Writer: filesystemUsageStore,
},
transformer.PipelineStage{
Name: "WriteFilesystemUsageCsv",
Reader: filesystemUsageStore,
Writer: csvStore,
},
}
}
示例13: AggregateStatisticsPipeline
func AggregateStatisticsPipeline(levelDbManager store.Manager, jsonWriter io.Writer) transformer.Pipeline {
tracesStore := levelDbManager.Seeker("traces")
availabilityIntervalsStore := levelDbManager.Seeker("consistent-ranges")
traceAggregatesStore := levelDbManager.SeekingWriter("statistics-trace-aggregates")
sessionAggregatesStore := levelDbManager.ReadingWriter("statistics-session-aggregates")
nodeAggregatesStore := levelDbManager.ReadingWriter("statistics-node-aggregates")
sessionsStore := levelDbManager.ReadingDeleter("statistics-sessions")
traceKeyRangesStore := levelDbManager.ReadingDeleter("statistics-trace-key-ranges")
consolidatedTraceKeyRangesStore := levelDbManager.ReadingDeleter("statistics-consolidated-trace-key-ranges")
newTracesStore := store.NewRangeExcludingReader(store.NewRangeIncludingReader(tracesStore, availabilityIntervalsStore), traceKeyRangesStore)
return append([]transformer.PipelineStage{
transformer.PipelineStage{
Name: "AggregateStatisticsMapper",
Reader: store.NewRangeExcludingReader(tracesStore, traceKeyRangesStore),
Transformer: transformer.MakeMapFunc(aggregateStatisticsMapper),
Writer: traceAggregatesStore,
},
SessionPipelineStage(newTracesStore, sessionsStore),
transformer.PipelineStage{
Name: "AggregateStatisticsReduceBySession",
Reader: store.NewPrefixIncludingReader(traceAggregatesStore, sessionsStore),
Transformer: transformer.TransformFunc(aggregateStatisticsReduceBySession),
Writer: sessionAggregatesStore,
},
transformer.PipelineStage{
Name: "AggregateStatisticsReducer",
Reader: sessionAggregatesStore,
Transformer: transformer.TransformFunc(aggregateStatisticsReducer),
Writer: nodeAggregatesStore,
},
transformer.PipelineStage{
Name: "AggregateStatisticsJson",
Reader: nodeAggregatesStore,
Writer: &aggregateStatisticsJsonStore{writer: jsonWriter},
},
}, TraceKeyRangesPipeline(newTracesStore, traceKeyRangesStore, consolidatedTraceKeyRangesStore)...)
}
示例14: LookupsPerDevicePipeline
func LookupsPerDevicePipeline(levelDbManager store.Manager) transformer.Pipeline {
tracesStore := levelDbManager.Seeker("traces")
availabilityIntervalsStore := levelDbManager.Seeker("consistent-ranges")
addressIdStore := levelDbManager.Seeker("bytesperdomain-address-id-table")
addressIdToDomainStore := levelDbManager.SeekingWriter("lookupsperdevice-address-id-to-domain")
lookupsPerDeviceSharded := levelDbManager.ReadingWriter("lookupsperdevice-sharded")
lookupsPerDeviceStore := levelDbManager.Writer("lookupsperdevice-lookups-per-device")
lookupsPerDevicePerHourStore := levelDbManager.Writer("lookupsperdevice-lookups-per-device-per-hour")
consistentTracesStore := store.NewRangeIncludingReader(tracesStore, availabilityIntervalsStore)
return []transformer.PipelineStage{
transformer.PipelineStage{
Name: "LookupsPerDeviceMapper",
Reader: consistentTracesStore,
Transformer: transformer.MakeDoFunc(lookupsPerDeviceMapper),
Writer: addressIdToDomainStore,
},
transformer.PipelineStage{
Name: "JoinMacWithLookups",
Reader: store.NewDemuxingSeeker(addressIdStore, addressIdToDomainStore),
Transformer: transformer.TransformFunc(joinMacWithLookups),
Writer: lookupsPerDeviceSharded,
},
transformer.PipelineStage{
Name: "FlattenLookupsToNodeAndMac",
Reader: lookupsPerDeviceSharded,
Transformer: transformer.TransformFunc(flattenLookupsToNodeAndMac),
Writer: lookupsPerDeviceStore,
},
transformer.PipelineStage{
Name: "FlattenLookupsToNodeMacAndTimestamp",
Reader: lookupsPerDeviceSharded,
Transformer: transformer.TransformFunc(flattenLookupsToNodeMacAndTimestamp),
Writer: lookupsPerDevicePerHourStore,
},
}
}
示例15: BytesPerDomainPipeline
func BytesPerDomainPipeline(levelDbManager store.Manager, bytesPerDomainPostgresStore store.Writer) transformer.Pipeline {
tracesStore := levelDbManager.Seeker("traces")
availabilityIntervalsStore := levelDbManager.Seeker("consistent-ranges")
traceKeyRangesStore := levelDbManager.ReadingDeleter("bytesperdomain-trace-key-ranges")
consolidatedTraceKeyRangesStore := levelDbManager.ReadingDeleter("bytesperdomain-consolidated-trace-key-ranges")
addressIdTableStore := levelDbManager.SeekingWriter("bytesperdomain-address-id-table")
aRecordTableStore := levelDbManager.SeekingWriter("bytesperdomain-a-record-table")
cnameRecordTableStore := levelDbManager.SeekingWriter("bytesperdomain-cname-record-table")
flowIpsTableStore := levelDbManager.SeekingWriter("bytesperdomain-flow-ips-table")
addressIpTableStore := levelDbManager.SeekingWriter("bytesperdomain-address-ip-table")
bytesPerTimestampShardedStore := levelDbManager.SeekingWriter("bytesperdomain-bytes-per-timestamp-sharded")
whitelistStore := levelDbManager.SeekingWriter("bytesperdomain-whitelist")
aRecordsWithMacStore := levelDbManager.SeekingWriter("bytesperdomain-a-records-with-mac")
cnameRecordsWithMacStore := levelDbManager.SeekingWriter("bytesperdomain-cname-records-with-mac")
allDnsMappingsStore := levelDbManager.SeekingWriter("bytesperdomain-all-dns-mappings")
allWhitelistedMappingsStore := levelDbManager.SeekingWriter("bytesperdomain-all-whitelisted-mappings")
flowMacsTableStore := levelDbManager.SeekingWriter("bytesperdomain-flow-macs-table")
flowDomainsTableStore := levelDbManager.SeekingWriter("bytesperdomain-flow-domains-table")
flowDomainsGroupedTableStore := levelDbManager.SeekingWriter("bytesperdomain-flow-domains-grouped-table")
bytesPerDomainShardedStore := levelDbManager.ReadingWriter("bytesperdomain-bytes-per-domain-sharded")
bytesPerDomainPerDeviceStore := levelDbManager.ReadingWriter("bytesperdomain-bytes-per-domain-per-device")
bytesPerDomainStore := levelDbManager.ReadingWriter("bytesperdomain-bytes-per-domain")
sessionsStore := levelDbManager.ReadingDeleter("bytesperdomain-sessions")
excludeOldSessions := func(stor store.Seeker) store.Seeker {
return store.NewPrefixIncludingReader(stor, sessionsStore)
}
newTracesStore := store.NewRangeExcludingReader(store.NewRangeIncludingReader(tracesStore, availabilityIntervalsStore), traceKeyRangesStore)
return append([]transformer.PipelineStage{
transformer.PipelineStage{
Name: "BytesPerDomainMapper",
Reader: newTracesStore,
Transformer: transformer.MakeMultipleOutputsDoFunc(bytesPerDomainMapper, 7),
Writer: store.NewMuxingWriter(addressIdTableStore, aRecordTableStore, cnameRecordTableStore, flowIpsTableStore, addressIpTableStore, bytesPerTimestampShardedStore, whitelistStore),
},
SessionPipelineStage(newTracesStore, sessionsStore),
transformer.PipelineStage{
Name: "JoinAAddressIdsWithMacAddresses",
Reader: excludeOldSessions(store.NewDemuxingSeeker(addressIdTableStore, aRecordTableStore)),
Transformer: transformer.TransformFunc(joinAddressIdsWithMacAddresses),
Writer: aRecordsWithMacStore,
},
transformer.PipelineStage{
Name: "JoinCnameAddressIdsWithMacAddresses",
Reader: excludeOldSessions(store.NewDemuxingSeeker(addressIdTableStore, cnameRecordTableStore)),
Transformer: transformer.TransformFunc(joinAddressIdsWithMacAddresses),
Writer: cnameRecordsWithMacStore,
},
transformer.PipelineStage{
Name: "JoinARecordsWithCnameRecords",
Reader: excludeOldSessions(store.NewDemuxingSeeker(aRecordsWithMacStore, cnameRecordsWithMacStore)),
Transformer: transformer.TransformFunc(joinARecordsWithCnameRecords),
Writer: allDnsMappingsStore,
},
transformer.PipelineStage{
Name: "EmitARecords",
Reader: excludeOldSessions(aRecordsWithMacStore),
Transformer: transformer.MakeDoFunc(emitARecords),
Writer: allDnsMappingsStore,
},
transformer.PipelineStage{
Name: "JoinDomainsWithWhitelist",
Reader: excludeOldSessions(store.NewDemuxingSeeker(whitelistStore, allDnsMappingsStore)),
Transformer: transformer.TransformFunc(joinDomainsWithWhitelist),
Writer: allWhitelistedMappingsStore,
},
transformer.PipelineStage{
Name: "JoinMacWithFlowId",
Reader: excludeOldSessions(store.NewDemuxingSeeker(addressIpTableStore, flowIpsTableStore)),
Transformer: transformer.TransformFunc(joinMacWithFlowId),
Writer: flowMacsTableStore,
},
transformer.PipelineStage{
Name: "JoinWhitelistedDomainsWithFlows",
Reader: excludeOldSessions(store.NewDemuxingSeeker(allWhitelistedMappingsStore, flowMacsTableStore)),
Transformer: transformer.TransformFunc(joinWhitelistedDomainsWithFlows),
Writer: flowDomainsTableStore,
},
transformer.PipelineStage{
Name: "GroupDomainsAndMacAddresses",
Reader: excludeOldSessions(flowDomainsTableStore),
Transformer: transformer.TransformFunc(groupDomainsAndMacAddresses),
Writer: flowDomainsGroupedTableStore,
},
transformer.PipelineStage{
Name: "JoinDomainsWithSizes",
Reader: excludeOldSessions(store.NewDemuxingSeeker(flowDomainsGroupedTableStore, bytesPerTimestampShardedStore)),
Transformer: transformer.TransformFunc(joinDomainsWithSizes),
Writer: bytesPerDomainShardedStore,
},
transformer.PipelineStage{
Name: "FlattenIntoBytesPerDevice",
Reader: bytesPerDomainShardedStore,
Transformer: transformer.TransformFunc(flattenIntoBytesPerDevice),
Writer: bytesPerDomainPerDeviceStore,
},
transformer.PipelineStage{
Name: "FlattenIntoBytesPerTimestamp",
Reader: bytesPerDomainShardedStore,
Transformer: transformer.TransformFunc(flattenIntoBytesPerTimestamp),
Writer: bytesPerDomainStore,
//.........这里部分代码省略.........