本文整理汇总了Python中nupic.algorithms.anomaly_likelihood.AnomalyLikelihood.computeLogLikelihood方法的典型用法代码示例。如果您正苦于以下问题:Python AnomalyLikelihood.computeLogLikelihood方法的具体用法?Python AnomalyLikelihood.computeLogLikelihood怎么用?Python AnomalyLikelihood.computeLogLikelihood使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nupic.algorithms.anomaly_likelihood.AnomalyLikelihood
的用法示例。
在下文中一共展示了AnomalyLikelihood.computeLogLikelihood方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: runAvogadroAnomaly
# 需要导入模块: from nupic.algorithms.anomaly_likelihood import AnomalyLikelihood [as 别名]
# 或者: from nupic.algorithms.anomaly_likelihood.AnomalyLikelihood import computeLogLikelihood [as 别名]
def runAvogadroAnomaly(metric, options):
"""
Create a new HTM Model, fetch the data from the local DB, process it in NuPIC,
and save the results to a new CSV output file.
:param metric: AvogadroAgent metric class
:param options: CLI Options
"""
model = createModel(metric)
model.enableInference({"predictedField": metric.name})
fetched = metric.fetch(prefix=options.prefix, start=None)
resultFile = open(os.path.join(options.prefix, metric.name + "-result.csv"),
"wb")
csvWriter = csv.writer(resultFile)
csvWriter.writerow(["timestamp", metric.name, "raw_anomaly_score",
"anomaly_likelihood", "color"])
headers = ("timestamp", metric.name)
anomalyLikelihood = AnomalyLikelihood()
for (ts, value) in fetched:
try:
value = float(value)
except (ValueError, TypeError):
continue
if not math.isnan(value):
modelInput = dict(zip(headers, (ts, value)))
modelInput[metric.name] = float(value)
modelInput["timestamp"] = datetime.datetime.fromtimestamp(
float(modelInput["timestamp"]))
result = model.run(modelInput)
anomalyScore = result.inferences["anomalyScore"]
likelihood = anomalyLikelihood.anomalyProbability(
modelInput[metric.name], anomalyScore, modelInput["timestamp"])
logLikelihood = anomalyLikelihood.computeLogLikelihood(likelihood)
if logLikelihood > .5:
color = "red"
elif logLikelihood > .4 and logLikelihood <= .5:
color = "yellow"
else:
color = "green"
csvWriter.writerow([modelInput["timestamp"], float(value),
anomalyScore, logLikelihood, color])
else:
resultFile.flush()
示例2: _ModelRunner
# 需要导入模块: from nupic.algorithms.anomaly_likelihood import AnomalyLikelihood [as 别名]
# 或者: from nupic.algorithms.anomaly_likelihood.AnomalyLikelihood import computeLogLikelihood [as 别名]
#.........这里部分代码省略.........
model.enableInference(modelSpec["inferenceArgs"])
return model
@staticmethod
def _createCsvReader(fileObj):
# We'll be operating on csvs with arbitrarily long fields
csv.field_size_limit(2**27)
# Make sure readline() works on windows too
os.linesep = "\n"
return csv.reader(fileObj, dialect="excel")
@classmethod
def _emitOutputMessage(cls, dataRow, anomalyProbability):
"""Emit output message to stdout
:param list dataRow: the two-tuple data row on which anomalyProbability was
computed, whose first element is datetime timestamp and second element is
the float scalar value
:param float anomalyProbability: computed anomaly probability value
"""
message = "%s\n" % (json.dumps([dataRow[0].isoformat(), dataRow[1], anomalyProbability]),)
sys.stdout.write(message)
sys.stdout.flush()
def _computeAnomalyProbability(self, fields):
""" Compute anomaly log likelihood score
:param tuple fields: Two-tuple input metric data row
(<datetime-timestamp>, <float-scalar>)
:returns: Log-scaled anomaly probability
:rtype: float
"""
# Generate raw anomaly score
inputRecord = self._modelRecordEncoder.encode(fields)
rawAnomalyScore = self._model.run(inputRecord).inferences["anomalyScore"]
# Generate anomaly likelihood score
anomalyProbability = self._anomalyLikelihood.anomalyProbability(
value=fields[1],
anomalyScore=rawAnomalyScore,
timestamp=fields[0])
return self._anomalyLikelihood.computeLogLikelihood(anomalyProbability)
def run(self):
""" Run the model: ingest and process the input metric data and emit output
messages containing anomaly scores
"""
numRowsToSkip = self._inputSpec["rowOffset"]
datetimeFormat = self._inputSpec["datetimeFormat"]
inputRowTimestampIndex = self._inputSpec["timestampIndex"]
inputRowValueIndex = self._inputSpec["valueIndex"]
g_log.info("Processing model=%s", self._modelId)
for inputRow in self._csvReader:
g_log.debug("Got inputRow=%r", inputRow)
if numRowsToSkip > 0:
numRowsToSkip -= 1
g_log.debug("Skipping header row %s; %s rows left to skip",
inputRow, numRowsToSkip)
continue
# Extract timestamp and value
# NOTE: the order must match the `inputFields` that we passed to the
# Aggregator constructor
fields = [
date_time_utils.parseDatetime(inputRow[inputRowTimestampIndex],
datetimeFormat),
float(inputRow[inputRowValueIndex])
]
# Aggregate
aggRow, _ = self._aggregator.next(fields, None)
g_log.debug("Aggregator returned %s for %s", aggRow, fields)
if aggRow is not None:
self._emitOutputMessage(
dataRow=aggRow,
anomalyProbability=self._computeAnomalyProbability(aggRow))
# Reap remaining data from aggregator
aggRow, _ = self._aggregator.next(None, curInputBookmark=None)
g_log.debug("Aggregator reaped %s in final call", aggRow)
if aggRow is not None:
self._emitOutputMessage(
dataRow=aggRow,
anomalyProbability=self._computeAnomalyProbability(aggRow))
示例3: _ModelRunner
# 需要导入模块: from nupic.algorithms.anomaly_likelihood import AnomalyLikelihood [as 别名]
# 或者: from nupic.algorithms.anomaly_likelihood.AnomalyLikelihood import computeLogLikelihood [as 别名]
#.........这里部分代码省略.........
self._modelId = modelId
# NOTE: ModelRecordEncoder is implemented in the pull request
# https://github.com/numenta/nupic/pull/2432 that is not yet in master.
self._modelRecordEncoder = record_stream.ModelRecordEncoder(
fields=self._INPUT_RECORD_SCHEMA)
self._model = self._createModel(stats=stats)
self._anomalyLikelihood = AnomalyLikelihood()
@classmethod
def _createModel(cls, stats):
"""Instantiate and configure an OPF model
:param dict stats: Metric data stats per stats_schema.json in the
unicorn_backend package.
:returns: OPF Model instance
"""
# Generate swarm params
swarmParams = getScalarMetricWithTimeOfDayAnomalyParams(
metricData=[0],
minVal=stats["min"],
maxVal=stats["max"],
minResolution=stats.get("minResolution"))
model = ModelFactory.create(modelConfig=swarmParams["modelConfig"])
model.enableLearning()
model.enableInference(swarmParams["inferenceArgs"])
return model
@classmethod
def _readInputMessages(cls):
"""Create a generator that waits for and yields input messages from
stdin
yields two-tuple (<timestamp>, <scalar-value>), where <timestamp> is the
`datetime.datetime` timestamp of the metric data sample and <scalar-value>
is the floating point value of the metric data sample.
"""
while True:
message = sys.stdin.readline()
if message:
timestamp, scalarValue = json.loads(message)
yield (datetime.utcfromtimestamp(timestamp), scalarValue)
else:
# Front End closed the pipe (or died)
break
@classmethod
def _emitOutputMessage(cls, rowIndex, anomalyProbability):
"""Emit output message to stdout
:param int rowIndex: 0-based index of corresponding input sample
:param float anomalyProbability: computed anomaly probability value
"""
message = "%s\n" % (json.dumps([rowIndex, anomalyProbability]),)
sys.stdout.write(message)
sys.stdout.flush()
def _computeAnomalyProbability(self, inputRow):
""" Compute anomaly log likelihood score
:param tuple inputRow: Two-tuple input metric data row
(<datetime-timestamp>, <float-scalar>)
:returns: Log-scaled anomaly probability
:rtype: float
"""
# Generate raw anomaly score
inputRecord = self._modelRecordEncoder.encode(inputRow)
rawAnomalyScore = self._model.run(inputRecord).inferences["anomalyScore"]
# Generate anomaly likelihood score
anomalyProbability = self._anomalyLikelihood.anomalyProbability(
value=inputRow[1],
anomalyScore=rawAnomalyScore,
timestamp=inputRow[0])
return self._anomalyLikelihood.computeLogLikelihood(anomalyProbability)
def run(self):
""" Run the model: ingest and process the input metric data and emit output
messages containing anomaly scores
"""
g_log.info("Processing model=%s", self._modelId)
for rowIndex, inputRow in enumerate(self._readInputMessages()):
anomalyProbability = self._computeAnomalyProbability(inputRow)
self._emitOutputMessage(rowIndex=rowIndex,
anomalyProbability=anomalyProbability)
示例4: runAnomaly
# 需要导入模块: from nupic.algorithms.anomaly_likelihood import AnomalyLikelihood [as 别名]
# 或者: from nupic.algorithms.anomaly_likelihood.AnomalyLikelihood import computeLogLikelihood [as 别名]
def runAnomaly(options):
"""
Create and run a CLA Model on the given dataset (based on the hotgym anomaly
client in NuPIC).
"""
# Load the model params JSON
with open("model_params.json") as fp:
modelParams = json.load(fp)
# Update the resolution value for the encoder
sensorParams = modelParams['modelParams']['sensorParams']
numBuckets = modelParams['modelParams']['sensorParams']['encoders']['value'].pop('numBuckets')
resolution = options.resolution
if resolution is None:
resolution = max(0.001,
(options.max - options.min) / numBuckets)
print "Using resolution value: {0}".format(resolution)
sensorParams['encoders']['value']['resolution'] = resolution
model = ModelFactory.create(modelParams)
model.enableInference({'predictedField': 'value'})
with open (options.inputFile) as fin:
# Open file and setup headers
# Here we write the log likelihood value as the 'anomaly score'
# The actual CLA outputs are labeled 'raw anomaly score'
reader = csv.reader(fin)
csvWriter = csv.writer(open(options.outputFile,"wb"))
csvWriter.writerow(["timestamp", "value",
"_raw_score", "likelihood_score", "log_likelihood_score"])
headers = reader.next()
# The anomaly likelihood object
anomalyLikelihood = AnomalyLikelihood()
# Iterate through each record in the CSV file
print "Starting processing at",datetime.datetime.now()
for i, record in enumerate(reader, start=1):
# Convert input data to a dict so we can pass it into the model
inputData = dict(zip(headers, record))
inputData["value"] = float(inputData["value"])
inputData["dttm"] = dateutil.parser.parse(inputData["dttm"])
#inputData["dttm"] = datetime.datetime.now()
# Send it to the CLA and get back the raw anomaly score
result = model.run(inputData)
anomalyScore = result.inferences['anomalyScore']
# Compute the Anomaly Likelihood
likelihood = anomalyLikelihood.anomalyProbability(
inputData["value"], anomalyScore, inputData["dttm"])
logLikelihood = anomalyLikelihood.computeLogLikelihood(likelihood)
if likelihood > 0.9999:
print "Anomaly detected:",inputData['dttm'],inputData['value'],likelihood
# Write results to the output CSV file
csvWriter.writerow([inputData["dttm"], inputData["value"],
anomalyScore, likelihood, logLikelihood])
# Progress report
if (i%1000) == 0: print i,"records processed"
print "Completed processing",i,"records at",datetime.datetime.now()
print "Anomaly scores for",options.inputFile,
print "have been written to",options.outputFile
示例5: str
# 需要导入模块: from nupic.algorithms.anomaly_likelihood import AnomalyLikelihood [as 别名]
# 或者: from nupic.algorithms.anomaly_likelihood.AnomalyLikelihood import computeLogLikelihood [as 别名]
anomalyScore, likelihood, logLikelihood = 'None', 'None', 'None'
pred_result = shifter.shift(result)
if result.inferences["multiStepBestPredictions"][1]:
prediction = result.inferences["multiStepBestPredictions"][1]
print prediction
else:
prediction = 'None'
if not PREDICT or prediction == 'None':
# Anomaly-Stats:
anomalyScore = result.inferences["anomalyScore"]
AnomalyScores.append(anomalyScore)
# By default 0.5 for the first 600 iterations! TODO: Still not quite sure if that's alright...
likelihood = anomalyLikelihood.anomalyProbability(event[0] + numpy.array([event[1]]), anomalyScore, modelInput["timestamp"])
logLikelihood = anomalyLikelihood.computeLogLikelihood(likelihood)
LikelihoodScores.append([modelInput["timestamp"], modelInput["event"], likelihood])
prediction = 'None'
# NOTE: change mag to scalar -more general! -Typecasting for DB
data = {"eventType": str(event.type),
"lat": float(event.latitude),
"lng": float(event.longitude),
"depth": float(event.depth),
"scalar": float(event.mag),
"timestamp": str(event.time),
"AnomalyScore": float(anomalyScore),
"Anomaly_mean": (float(numpy.mean(AnomalyScores)), WINDOWSIZE),
"AnomalyLikelihood": float(likelihood),
"logLikelihood": float(logLikelihood),