本文整理汇总了Golang中github.com/sburnett/transformer/store.Manager.ReadingWriter方法的典型用法代码示例。如果您正苦于以下问题:Golang Manager.ReadingWriter方法的具体用法?Golang Manager.ReadingWriter怎么用?Golang Manager.ReadingWriter使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/sburnett/transformer/store.Manager
的用法示例。
在下文中一共展示了Manager.ReadingWriter方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: BytesPerDevicePipeline
func BytesPerDevicePipeline(levelDbManager store.Manager, bytesPerDevicePostgresStore store.Writer) transformer.Pipeline {
tracesStore := levelDbManager.Seeker("traces")
availabilityIntervalsStore := levelDbManager.Seeker("consistent-ranges")
sessionsStore := levelDbManager.ReadingDeleter("bytesperdevice-session")
addressTableStore := levelDbManager.SeekingWriter("bytesperdevice-address-table")
flowTableStore := levelDbManager.SeekingWriter("bytesperdevice-flow-table")
packetsStore := levelDbManager.SeekingWriter("bytesperdevice-packets")
flowIdToMacStore := levelDbManager.SeekingWriter("bytesperdevice-flow-id-to-mac")
flowIdToMacsStore := levelDbManager.SeekingWriter("bytesperdevice-flow-id-to-macs")
bytesPerDeviceUnreducedStore := levelDbManager.SeekingWriter("bytesperdevice-unreduced")
bytesPerDeviceSessionStore := levelDbManager.ReadingWriter("bytesperdevice-reduced-sessions")
bytesPerDeviceStore := levelDbManager.ReadingWriter("bytesperdevice")
traceKeyRangesStore := levelDbManager.ReadingDeleter("bytesperdevice-trace-key-ranges")
consolidatedTraceKeyRangesStore := levelDbManager.ReadingDeleter("bytesperdevice-consolidated-trace-key-ranges")
newTracesStore := store.NewRangeExcludingReader(store.NewRangeIncludingReader(tracesStore, availabilityIntervalsStore), traceKeyRangesStore)
return append([]transformer.PipelineStage{
transformer.PipelineStage{
Name: "BytesPerDeviceMapper",
Reader: newTracesStore,
Transformer: transformer.MakeMultipleOutputsDoFunc(bytesPerDeviceMapper, 3),
Writer: store.NewMuxingWriter(addressTableStore, flowTableStore, packetsStore),
},
SessionPipelineStage(newTracesStore, sessionsStore),
transformer.PipelineStage{
Name: "JoinMacAndFlowId",
Reader: store.NewPrefixIncludingReader(store.NewDemuxingSeeker(addressTableStore, flowTableStore), sessionsStore),
Transformer: transformer.TransformFunc(joinMacAndFlowId),
Writer: flowIdToMacStore,
},
transformer.PipelineStage{
Name: "FlattenMacAddresses",
Reader: store.NewPrefixIncludingReader(flowIdToMacStore, sessionsStore),
Transformer: transformer.TransformFunc(flattenMacAddresses),
Writer: flowIdToMacsStore,
},
transformer.PipelineStage{
Name: "JoinMacAndSizes",
Reader: store.NewPrefixIncludingReader(store.NewDemuxingSeeker(flowIdToMacsStore, packetsStore), sessionsStore),
Transformer: transformer.TransformFunc(joinMacAndSizes),
Writer: bytesPerDeviceUnreducedStore,
},
transformer.PipelineStage{
Name: "ReduceBytesPerDeviceSession",
Reader: store.NewPrefixIncludingReader(bytesPerDeviceUnreducedStore, sessionsStore),
Transformer: transformer.TransformFunc(reduceBytesPerDeviceSession),
Writer: bytesPerDeviceSessionStore,
},
transformer.PipelineStage{
Name: "ReduceBytesPerDevice",
Reader: bytesPerDeviceSessionStore,
Transformer: transformer.TransformFunc(reduceBytesPerDevice),
Writer: bytesPerDeviceStore,
},
transformer.PipelineStage{
Name: "BytesPerDevicePostgres",
Reader: bytesPerDeviceStore,
Writer: bytesPerDevicePostgresStore,
},
}, TraceKeyRangesPipeline(newTracesStore, traceKeyRangesStore, consolidatedTraceKeyRangesStore)...)
}
示例2: UptimePipeline
func UptimePipeline(levelDbManager, csvManager, sqliteManager store.Manager) transformer.Pipeline {
logsStore := levelDbManager.Seeker("logs")
uptimeStore := levelDbManager.ReadingWriter("uptime")
var node string
var timestamp, uptime int64
csvStore := csvManager.Writer("uptime.csv", []string{"node", "timestamp"}, []string{"uptime"}, &node, ×tamp, &uptime)
sqliteStore := sqliteManager.Writer("uptime", []string{"node", "timestamp"}, []string{"uptime"}, &node, ×tamp, &uptime)
return []transformer.PipelineStage{
transformer.PipelineStage{
Name: "Uptime",
Reader: ReadOnlySomeLogs(logsStore, "uptime"),
Transformer: transformer.MakeMapFunc(extractUptime),
Writer: uptimeStore,
},
transformer.PipelineStage{
Name: "WriteUptimeCsv",
Reader: uptimeStore,
Writer: csvStore,
},
transformer.PipelineStage{
Name: "WriteUptimeSqlite",
Reader: uptimeStore,
Writer: sqliteStore,
},
}
}
示例3: PackagesPipeline
func PackagesPipeline(levelDbManager, csvManager, sqliteManager store.Manager) transformer.Pipeline {
logsStore := levelDbManager.Seeker("logs")
installedPackagesStore := levelDbManager.ReadingWriter("installed-packages")
versionChangesStore := levelDbManager.ReadingWriter("version-changes")
var node, packageName string
var timestamp int64
var version string
csvStore := csvManager.Writer("packages.csv", []string{"node", "package", "timestamp"}, []string{"version"}, &node, &packageName, ×tamp, &version)
sqliteStore := sqliteManager.Writer("packages", []string{"node", "package", "timestamp"}, []string{"version"}, &node, &packageName, ×tamp, &version)
return []transformer.PipelineStage{
transformer.PipelineStage{
Name: "OpkgListInstalled",
Reader: ReadOnlySomeLogs(logsStore, "opkg_list-installed"),
Transformer: transformer.MakeDoFunc(extractInstalledPackages),
Writer: installedPackagesStore,
},
transformer.PipelineStage{
Name: "DetectVersionChanges",
Reader: installedPackagesStore,
Transformer: transformer.TransformFunc(detectChangedPackageVersions),
Writer: versionChangesStore,
},
transformer.PipelineStage{
Name: "WriteVersionChangesSqlite",
Reader: versionChangesStore,
Writer: sqliteStore,
},
transformer.PipelineStage{
Name: "WriteVersionChangesCsv",
Reader: versionChangesStore,
Writer: csvStore,
},
}
}
示例4: IndexTarballsPipeline
func IndexTarballsPipeline(tarballsPath string, levelDbManager store.Manager) transformer.Pipeline {
allTarballsPattern := filepath.Join(tarballsPath, "all", "health", "*", "*", "health_*.tar.gz")
dailyTarballsPattern := filepath.Join(tarballsPath, "by-date", "*", "health", "*", "health_*.tar.gz")
tarnamesStore := levelDbManager.ReadingWriter("tarnames")
tarnamesIndexedStore := levelDbManager.ReadingWriter("tarnames-indexed")
logsStore := levelDbManager.Writer("logs")
return []transformer.PipelineStage{
transformer.PipelineStage{
Name: "ScanLogTarballs",
Reader: store.NewGlobReader(allTarballsPattern),
Writer: tarnamesStore,
},
transformer.PipelineStage{
Name: "ScanDailyLogTarballs",
Reader: store.NewGlobReader(dailyTarballsPattern),
Writer: tarnamesStore,
},
transformer.PipelineStage{
Name: "ReadLogTarballs",
Reader: store.NewDemuxingReader(tarnamesStore, tarnamesIndexedStore),
Transformer: transformer.MakeMultipleOutputsGroupDoFunc(IndexTarballs, 2),
Writer: store.NewMuxingWriter(logsStore, tarnamesIndexedStore),
},
}
}
示例5: MemoryUsagePipeline
func MemoryUsagePipeline(levelDbManager, csvManager, sqliteManager store.Manager) transformer.Pipeline {
logsStore := levelDbManager.Seeker("logs")
memoryUsageStore := levelDbManager.ReadingWriter("memory")
var node string
var timestamp, used, free int64
csvStore := csvManager.Writer("memory.csv", []string{"node", "timestamp"}, []string{"used", "free"}, &node, ×tamp, &used, &free)
sqliteStore := sqliteManager.Writer("memory", []string{"node", "timestamp"}, []string{"used", "free"}, &node, ×tamp, &used, &free)
return []transformer.PipelineStage{
transformer.PipelineStage{
Name: "Memory",
Reader: ReadOnlySomeLogs(logsStore, "top"),
Transformer: transformer.MakeDoFunc(extractMemoryUsage),
Writer: memoryUsageStore,
},
transformer.PipelineStage{
Name: "WriteMemoryUsageCsv",
Reader: memoryUsageStore,
Writer: csvStore,
},
transformer.PipelineStage{
Name: "WriteMemoryUsageSqlite",
Reader: memoryUsageStore,
Writer: sqliteStore,
},
}
}
示例6: RebootsPipeline
func RebootsPipeline(levelDbManager, csvManager, sqliteManager store.Manager) transformer.Pipeline {
uptimeStore := levelDbManager.Seeker("uptime")
rebootsStore := levelDbManager.ReadingWriter("reboots")
var node string
var timestamp int64
rebootsCsvStore := csvManager.Writer("reboots.csv", []string{"node", "boot_timestamp"}, []string{}, &node, ×tamp)
rebootsSqliteStore := sqliteManager.Writer("reboots", []string{"node", "boot_timestamp"}, []string{}, &node, ×tamp)
return []transformer.PipelineStage{
transformer.PipelineStage{
Name: "InferReboots",
Reader: uptimeStore,
Transformer: transformer.TransformFunc(inferReboots),
Writer: rebootsStore,
},
transformer.PipelineStage{
Name: "WriteRebootsCsv",
Reader: rebootsStore,
Writer: rebootsCsvStore,
},
transformer.PipelineStage{
Name: "WriteRebootsSqlite",
Reader: rebootsStore,
Writer: rebootsSqliteStore,
},
}
}
示例7: AvailabilityPipeline
func AvailabilityPipeline(levelDbManager store.Manager, jsonWriter io.Writer, timestamp int64) transformer.Pipeline {
tracesStore := levelDbManager.Seeker("traces")
intervalsStore := levelDbManager.ReadingWriter("availability-intervals")
consolidatedStore := levelDbManager.ReadingDeleter("availability-consolidated")
nodesStore := levelDbManager.ReadingDeleter("availability-nodes")
excludeRangesStore := levelDbManager.ReadingDeleter("availability-done")
consistentRangesStore := levelDbManager.ReadingDeleter("consistent-ranges")
return []transformer.PipelineStage{
transformer.PipelineStage{
Name: "AvailabilityIntervals",
Reader: store.NewRangeExcludingReader(tracesStore, excludeRangesStore),
Transformer: transformer.TransformFunc(availabilityIntervals),
Writer: intervalsStore,
},
transformer.PipelineStage{
Name: "ConsolidateAvailabilityIntervals",
Reader: intervalsStore,
Transformer: transformer.TransformFunc(consolidateAvailabilityIntervals),
Writer: store.NewTruncatingWriter(consolidatedStore),
},
transformer.PipelineStage{
Name: "AvailabilityReducer",
Reader: consolidatedStore,
Transformer: transformer.TransformFunc(availabilityReducer),
Writer: store.NewTruncatingWriter(nodesStore),
},
transformer.PipelineStage{
Name: "AvailabilityJson",
Reader: nodesStore,
Writer: &availabilityJsonStore{writer: jsonWriter, timestamp: timestamp},
},
transformer.PipelineStage{
Name: "GenerateExcludedRanges",
Reader: consolidatedStore,
Transformer: transformer.MakeMapFunc(generateExcludedRanges),
Writer: store.NewTruncatingWriter(excludeRangesStore),
},
transformer.PipelineStage{
Name: "GenerateConsistentRanges",
Reader: excludeRangesStore,
Transformer: transformer.MakeDoFunc(generateConsistentRanges),
Writer: store.NewTruncatingWriter(consistentRangesStore),
},
}
}
示例8: IndexTarballsPipeline
func IndexTarballsPipeline(tarballsPath string, levelDbManager store.Manager) transformer.Pipeline {
tarballsPattern := filepath.Join(tarballsPath, "*", "*", "*.tar.gz")
tarnamesStore := levelDbManager.ReadingWriter("tarnames")
tarnamesIndexedStore := levelDbManager.ReadingWriter("tarnames-indexed")
tracesStore := levelDbManager.Writer("traces")
return []transformer.PipelineStage{
transformer.PipelineStage{
Name: "ScanTraceTarballs",
Reader: store.NewGlobReader(tarballsPattern),
Writer: tarnamesStore,
},
transformer.PipelineStage{
Name: "IndexTraces",
Transformer: transformer.MakeMultipleOutputsGroupDoFunc(IndexTarballs, 2),
Reader: store.NewDemuxingReader(tarnamesStore, tarnamesIndexedStore),
Writer: store.NewMuxingWriter(tracesStore, tarnamesIndexedStore),
},
}
}
示例9: DisjointPackagesPipeline
func DisjointPackagesPipeline(levelDbManager, csvManager store.Manager) transformer.Pipeline {
logsStore := levelDbManager.Seeker("logs")
disjointPackagesStore := levelDbManager.ReadingWriter("disjoint-packages")
var filename, node string
var timestamp int64
csvStore := csvManager.Writer("not-disjoint.csv", []string{"filename", "node", "timestamp"}, []string{}, &filename, &node, ×tamp)
return []transformer.PipelineStage{
transformer.PipelineStage{
Name: "DisjointPackages",
Reader: logsStore,
Transformer: transformer.MakeDoFunc(detectDisjointPackagesError),
Writer: disjointPackagesStore,
},
transformer.PipelineStage{
Name: "WriteDisjointPackagesCsv",
Reader: disjointPackagesStore,
Writer: csvStore,
},
}
}
示例10: IpRoutePipeline
func IpRoutePipeline(levelDbManager, sqliteManager store.Manager) transformer.Pipeline {
logsStore := levelDbManager.Seeker("logs")
defaultRoutesStore := levelDbManager.ReadingWriter("default-routes")
var node string
var timestamp int64
var gateway string
sqliteStore := sqliteManager.Writer("defaultroutes", []string{"node", "timestamp"}, []string{"gateway"}, &node, ×tamp, &gateway)
return []transformer.PipelineStage{
transformer.PipelineStage{
Name: "ExtractDefaultRoute",
Reader: ReadOnlySomeLogs(logsStore, "iproute"),
Transformer: transformer.MakeDoFunc(extractDefaultRoute),
Writer: defaultRoutesStore,
},
transformer.PipelineStage{
Name: "WriteDefaultRoutesSqlite",
Reader: defaultRoutesStore,
Writer: sqliteStore,
},
}
}
示例11: FilesystemUsagePipeline
func FilesystemUsagePipeline(levelDbManager, csvManager store.Manager) transformer.Pipeline {
logsStore := levelDbManager.Seeker("logs")
filesystemUsageStore := levelDbManager.ReadingWriter("filesystem")
var mount, node string
var timestamp, used, free int64
csvStore := csvManager.Writer("filesystem.csv", []string{"mount", "node", "timestamp"}, []string{"used", "free"}, &mount, &node, ×tamp, &used, &free)
return []transformer.PipelineStage{
transformer.PipelineStage{
Name: "Filesystem",
Reader: ReadOnlySomeLogs(logsStore, "df"),
Transformer: transformer.MakeDoFunc(extractFilesystemUsage),
Writer: filesystemUsageStore,
},
transformer.PipelineStage{
Name: "WriteFilesystemUsageCsv",
Reader: filesystemUsageStore,
Writer: csvStore,
},
}
}
示例12: SummarizeHealthPipeline
func SummarizeHealthPipeline(levelDbManager, csvManager store.Manager) transformer.Pipeline {
memoryStore := levelDbManager.Reader("memory")
memoryUsageByDayStore := levelDbManager.ReadingWriter("memory-usage-by-day")
memoryUsageByDaySummarizedStore := levelDbManager.ReadingWriter("memory-usage-by-day-summarized")
filesystemStore := levelDbManager.Reader("filesystem")
filesystemUsageByDayStore := levelDbManager.ReadingWriter("filesystem-usage-by-day")
filesystemUsageByDaySummarizedStore := levelDbManager.ReadingWriter("filesystem-usage-by-day-summarized")
var timestamp, usage int64
var filesystem, node string
memoryUsageSummaryCsv := csvManager.Writer("memory-usage-summary.csv", []string{"timestamp", "node"}, []string{"usage"}, ×tamp, &node, &usage)
filesystemUsageSummaryCsv := csvManager.Writer("filesystem-usage-summary.csv", []string{"filesystem", "timestamp", "node"}, []string{"usage"}, &filesystem, ×tamp, &node, &usage)
return []transformer.PipelineStage{
transformer.PipelineStage{
Name: "OrderMemoryUsageByTimestamp",
Reader: memoryStore,
Transformer: transformer.MakeMapFunc(orderRecordsByDay),
Writer: memoryUsageByDayStore,
},
transformer.PipelineStage{
Name: "SummarizeMemoryUsage",
Reader: memoryUsageByDayStore,
Transformer: transformer.TransformFunc(summarizeMemoryUsage),
Writer: memoryUsageByDaySummarizedStore,
},
transformer.PipelineStage{
Name: "WriteMemoryUsageSummaryCsv",
Reader: memoryUsageByDaySummarizedStore,
Writer: memoryUsageSummaryCsv,
},
transformer.PipelineStage{
Name: "OrderFilesystemUsageByTimestamp",
Reader: filesystemStore,
Transformer: transformer.MakeMapFunc(orderFilesystemRecordsByDay),
Writer: filesystemUsageByDayStore,
},
transformer.PipelineStage{
Name: "SummarizeFilesystemUsage",
Reader: filesystemUsageByDayStore,
Transformer: transformer.TransformFunc(summarizeFilesystemUsage),
Writer: filesystemUsageByDaySummarizedStore,
},
transformer.PipelineStage{
Name: "WriteFilesystemUsageSummaryCsv",
Reader: filesystemUsageByDaySummarizedStore,
Writer: filesystemUsageSummaryCsv,
},
}
}
示例13: BytesPerMinutePipeline
func BytesPerMinutePipeline(levelDbManager store.Manager, bytesPerHourPostgresStore store.Writer) transformer.Pipeline {
tracesStore := levelDbManager.Seeker("traces")
mappedStore := levelDbManager.ReadingWriter("bytesperminute-mapped")
bytesPerMinuteStore := levelDbManager.ReadingWriter("bytesperminute")
bytesPerHourStore := levelDbManager.ReadingWriter("bytesperhour")
traceKeyRangesStore := levelDbManager.ReadingDeleter("bytesperminute-trace-key-ranges")
consolidatedTraceKeyRangesStore := levelDbManager.ReadingDeleter("bytesperminute-consolidated-trace-key-ranges")
return append([]transformer.PipelineStage{
transformer.PipelineStage{
Name: "BytesPerMinuteMapper",
Reader: store.NewRangeExcludingReader(tracesStore, traceKeyRangesStore),
Transformer: transformer.MakeDoTransformer(bytesPerMinuteMapper(transformer.NewNonce())),
Writer: mappedStore,
},
transformer.PipelineStage{
Name: "BytesPerMinuteReducer",
Reader: mappedStore,
Transformer: transformer.TransformFunc(bytesPerMinuteReducer),
Writer: bytesPerMinuteStore,
},
transformer.PipelineStage{
Name: "BytesPerHourReducer",
Reader: bytesPerMinuteStore,
Transformer: transformer.TransformFunc(bytesPerHourReducer),
Writer: bytesPerHourStore,
},
transformer.PipelineStage{
Name: "BytesPerHourPostgres",
Reader: bytesPerHourStore,
Writer: bytesPerHourPostgresStore,
},
}, TraceKeyRangesPipeline(store.NewRangeExcludingReader(tracesStore, traceKeyRangesStore), traceKeyRangesStore, consolidatedTraceKeyRangesStore)...)
}
示例14: AggregateStatisticsPipeline
func AggregateStatisticsPipeline(levelDbManager store.Manager, jsonWriter io.Writer) transformer.Pipeline {
tracesStore := levelDbManager.Seeker("traces")
availabilityIntervalsStore := levelDbManager.Seeker("consistent-ranges")
traceAggregatesStore := levelDbManager.SeekingWriter("statistics-trace-aggregates")
sessionAggregatesStore := levelDbManager.ReadingWriter("statistics-session-aggregates")
nodeAggregatesStore := levelDbManager.ReadingWriter("statistics-node-aggregates")
sessionsStore := levelDbManager.ReadingDeleter("statistics-sessions")
traceKeyRangesStore := levelDbManager.ReadingDeleter("statistics-trace-key-ranges")
consolidatedTraceKeyRangesStore := levelDbManager.ReadingDeleter("statistics-consolidated-trace-key-ranges")
newTracesStore := store.NewRangeExcludingReader(store.NewRangeIncludingReader(tracesStore, availabilityIntervalsStore), traceKeyRangesStore)
return append([]transformer.PipelineStage{
transformer.PipelineStage{
Name: "AggregateStatisticsMapper",
Reader: store.NewRangeExcludingReader(tracesStore, traceKeyRangesStore),
Transformer: transformer.MakeMapFunc(aggregateStatisticsMapper),
Writer: traceAggregatesStore,
},
SessionPipelineStage(newTracesStore, sessionsStore),
transformer.PipelineStage{
Name: "AggregateStatisticsReduceBySession",
Reader: store.NewPrefixIncludingReader(traceAggregatesStore, sessionsStore),
Transformer: transformer.TransformFunc(aggregateStatisticsReduceBySession),
Writer: sessionAggregatesStore,
},
transformer.PipelineStage{
Name: "AggregateStatisticsReducer",
Reader: sessionAggregatesStore,
Transformer: transformer.TransformFunc(aggregateStatisticsReducer),
Writer: nodeAggregatesStore,
},
transformer.PipelineStage{
Name: "AggregateStatisticsJson",
Reader: nodeAggregatesStore,
Writer: &aggregateStatisticsJsonStore{writer: jsonWriter},
},
}, TraceKeyRangesPipeline(newTracesStore, traceKeyRangesStore, consolidatedTraceKeyRangesStore)...)
}
示例15: LookupsPerDevicePipeline
func LookupsPerDevicePipeline(levelDbManager store.Manager) transformer.Pipeline {
tracesStore := levelDbManager.Seeker("traces")
availabilityIntervalsStore := levelDbManager.Seeker("consistent-ranges")
addressIdStore := levelDbManager.Seeker("bytesperdomain-address-id-table")
addressIdToDomainStore := levelDbManager.SeekingWriter("lookupsperdevice-address-id-to-domain")
lookupsPerDeviceSharded := levelDbManager.ReadingWriter("lookupsperdevice-sharded")
lookupsPerDeviceStore := levelDbManager.Writer("lookupsperdevice-lookups-per-device")
lookupsPerDevicePerHourStore := levelDbManager.Writer("lookupsperdevice-lookups-per-device-per-hour")
consistentTracesStore := store.NewRangeIncludingReader(tracesStore, availabilityIntervalsStore)
return []transformer.PipelineStage{
transformer.PipelineStage{
Name: "LookupsPerDeviceMapper",
Reader: consistentTracesStore,
Transformer: transformer.MakeDoFunc(lookupsPerDeviceMapper),
Writer: addressIdToDomainStore,
},
transformer.PipelineStage{
Name: "JoinMacWithLookups",
Reader: store.NewDemuxingSeeker(addressIdStore, addressIdToDomainStore),
Transformer: transformer.TransformFunc(joinMacWithLookups),
Writer: lookupsPerDeviceSharded,
},
transformer.PipelineStage{
Name: "FlattenLookupsToNodeAndMac",
Reader: lookupsPerDeviceSharded,
Transformer: transformer.TransformFunc(flattenLookupsToNodeAndMac),
Writer: lookupsPerDeviceStore,
},
transformer.PipelineStage{
Name: "FlattenLookupsToNodeMacAndTimestamp",
Reader: lookupsPerDeviceSharded,
Transformer: transformer.TransformFunc(flattenLookupsToNodeMacAndTimestamp),
Writer: lookupsPerDevicePerHourStore,
},
}
}