本文整理汇总了Python中nupic.encoders.date.DateEncoder.encodeIntoArray方法的典型用法代码示例。如果您正苦于以下问题:Python DateEncoder.encodeIntoArray方法的具体用法?Python DateEncoder.encodeIntoArray怎么用?Python DateEncoder.encodeIntoArray使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nupic.encoders.date.DateEncoder
的用法示例。
在下文中一共展示了DateEncoder.encodeIntoArray方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: NumentaTMLowLevelDetector
# 需要导入模块: from nupic.encoders.date import DateEncoder [as 别名]
# 或者: from nupic.encoders.date.DateEncoder import encodeIntoArray [as 别名]
class NumentaTMLowLevelDetector(AnomalyDetector):
"""The 'numentaTM' detector, but not using the CLAModel or network API """
def __init__(self, *args, **kwargs):
super(NumentaTMLowLevelDetector, self).__init__(*args, **kwargs)
self.valueEncoder = None
self.encodedValue = None
self.timestampEncoder = None
self.encodedTimestamp = None
self.sp = None
self.spOutput = None
self.tm = None
self.anomalyLikelihood = None
# Set this to False if you want to get results based on raw scores
# without using AnomalyLikelihood. This will give worse results, but
# useful for checking the efficacy of AnomalyLikelihood. You will need
# to re-optimize the thresholds when running with this setting.
self.useLikelihood = True
def getAdditionalHeaders(self):
"""Returns a list of strings."""
return ["raw_score"]
def initialize(self):
# Initialize the RDSE with a resolution; calculated from the data min and
# max, the resolution is specific to the data stream.
rangePadding = abs(self.inputMax - self.inputMin) * 0.2
minVal = self.inputMin - rangePadding
maxVal = (self.inputMax + rangePadding
if self.inputMin != self.inputMax
else self.inputMin + 1)
numBuckets = 130.0
resolution = max(0.001, (maxVal - minVal) / numBuckets)
self.valueEncoder = RandomDistributedScalarEncoder(resolution, seed=42)
self.encodedValue = np.zeros(self.valueEncoder.getWidth(),
dtype=np.uint32)
# Initialize the timestamp encoder
self.timestampEncoder = DateEncoder(timeOfDay=(21, 9.49, ))
self.encodedTimestamp = np.zeros(self.timestampEncoder.getWidth(),
dtype=np.uint32)
inputWidth = (self.timestampEncoder.getWidth() +
self.valueEncoder.getWidth())
self.sp = SpatialPooler(**{
"globalInhibition": True,
"columnDimensions": [2048],
"inputDimensions": [inputWidth],
"potentialRadius": inputWidth,
"numActiveColumnsPerInhArea": 40,
"seed": 1956,
"potentialPct": 0.8,
"boostStrength": 0.0,
"synPermActiveInc": 0.003,
"synPermConnected": 0.2,
"synPermInactiveDec": 0.0005,
})
self.spOutput = np.zeros(2048, dtype=np.float32)
self.tm = TemporalMemory(**{
"activationThreshold": 20,
"cellsPerColumn": 32,
"columnDimensions": (2048,),
"initialPermanence": 0.24,
"maxSegmentsPerCell": 128,
"maxSynapsesPerSegment": 128,
"minThreshold": 13,
"maxNewSynapseCount": 31,
"permanenceDecrement": 0.008,
"permanenceIncrement": 0.04,
"seed": 1960,
})
if self.useLikelihood:
learningPeriod = math.floor(self.probationaryPeriod / 2.0)
self.anomalyLikelihood = anomaly_likelihood.AnomalyLikelihood(
claLearningPeriod=learningPeriod,
estimationSamples=self.probationaryPeriod - learningPeriod,
reestimationPeriod=100
)
def handleRecord(self, inputData):
"""Returns a tuple (anomalyScore, rawScore)."""
# Encode the input data record
self.valueEncoder.encodeIntoArray(
inputData["value"], self.encodedValue)
self.timestampEncoder.encodeIntoArray(
inputData["timestamp"], self.encodedTimestamp)
# Run the encoded data through the spatial pooler
self.sp.compute(np.concatenate((self.encodedTimestamp,
self.encodedValue,)),
True, self.spOutput)
#.........这里部分代码省略.........
示例2: DistalTimestamps1CellPerColumnDetector
# 需要导入模块: from nupic.encoders.date import DateEncoder [as 别名]
# 或者: from nupic.encoders.date.DateEncoder import encodeIntoArray [as 别名]
class DistalTimestamps1CellPerColumnDetector(AnomalyDetector):
"""The 'numenta' detector, with the following changes:
- Use pure Temporal Memory, not the classic TP that uses backtracking.
- Don't spatial pool the timestamp. Pass it in as distal input.
- 1 cell per column.
- Use w=41 in the scalar encoding, rather than w=21, to make up for the
lost timestamp input to the spatial pooler.
"""
def __init__(self, *args, **kwargs):
super(DistalTimestamps1CellPerColumnDetector, self).__init__(*args,
**kwargs)
self.valueEncoder = None
self.encodedValue = None
self.timestampEncoder = None
self.encodedTimestamp = None
self.activeExternalCells = []
self.prevActiveExternalCells = []
self.sp = None
self.spOutput = None
self.etm = None
self.anomalyLikelihood = None
def getAdditionalHeaders(self):
"""Returns a list of strings."""
return ["raw_score"]
def initialize(self):
rangePadding = abs(self.inputMax - self.inputMin) * 0.2
minVal = self.inputMin - rangePadding
maxVal = (self.inputMax + rangePadding
if self.inputMin != self.inputMax
else self.inputMin + 1)
numBuckets = 130.0
resolution = max(0.001, (maxVal - minVal) / numBuckets)
self.valueEncoder = RandomDistributedScalarEncoder(resolution,
w=41,
seed=42)
self.encodedValue = np.zeros(self.valueEncoder.getWidth(),
dtype=np.uint32)
self.timestampEncoder = DateEncoder(timeOfDay=(21,9.49,))
self.encodedTimestamp = np.zeros(self.timestampEncoder.getWidth(),
dtype=np.uint32)
inputWidth = self.valueEncoder.getWidth()
self.sp = SpatialPooler(**{
"globalInhibition": True,
"columnDimensions": [2048],
"inputDimensions": [inputWidth],
"potentialRadius": inputWidth,
"numActiveColumnsPerInhArea": 40,
"seed": 1956,
"potentialPct": 0.8,
"boostStrength": 0.0,
"synPermActiveInc": 0.003,
"synPermConnected": 0.2,
"synPermInactiveDec": 0.0005,
})
self.spOutput = np.zeros(2048, dtype=np.float32)
self.etm = ExtendedTemporalMemory(**{
"activationThreshold": 13,
"cellsPerColumn": 1,
"columnDimensions": (2048,),
"basalInputDimensions": (self.timestampEncoder.getWidth(),),
"initialPermanence": 0.21,
"maxSegmentsPerCell": 128,
"maxSynapsesPerSegment": 32,
"minThreshold": 10,
"maxNewSynapseCount": 20,
"permanenceDecrement": 0.1,
"permanenceIncrement": 0.1,
"seed": 1960,
"checkInputs": False,
})
learningPeriod = math.floor(self.probationaryPeriod / 2.0)
self.anomalyLikelihood = anomaly_likelihood.AnomalyLikelihood(
claLearningPeriod=learningPeriod,
estimationSamples=self.probationaryPeriod - learningPeriod,
reestimationPeriod=100
)
def handleRecord(self, inputData):
"""Returns a tuple (anomalyScore, rawScore)."""
self.valueEncoder.encodeIntoArray(inputData["value"],
self.encodedValue)
self.timestampEncoder.encodeIntoArray(inputData["timestamp"],
self.encodedTimestamp)
self.prevActiveExternalCells = self.activeExternalCells
self.activeExternalCells = self.encodedTimestamp.nonzero()[0]
#.........这里部分代码省略.........
示例3: runHotgym
# 需要导入模块: from nupic.encoders.date import DateEncoder [as 别名]
# 或者: from nupic.encoders.date.DateEncoder import encodeIntoArray [as 别名]
def runHotgym(numRecords):
with open(_PARAMS_PATH, "r") as f:
modelParams = yaml.safe_load(f)["modelParams"]
enParams = modelParams["sensorParams"]["encoders"]
spParams = modelParams["spParams"]
tmParams = modelParams["tmParams"]
timeOfDayEncoder = DateEncoder(
timeOfDay=enParams["timestamp_timeOfDay"]["timeOfDay"])
weekendEncoder = DateEncoder(
weekend=enParams["timestamp_weekend"]["weekend"])
scalarEncoder = RandomDistributedScalarEncoder(
enParams["consumption"]["resolution"])
encodingWidth = (timeOfDayEncoder.getWidth()
+ weekendEncoder.getWidth()
+ scalarEncoder.getWidth())
sp = SpatialPooler(
inputDimensions=(encodingWidth,),
columnDimensions=(spParams["columnCount"],),
potentialPct=spParams["potentialPct"],
potentialRadius=encodingWidth,
globalInhibition=spParams["globalInhibition"],
localAreaDensity=spParams["localAreaDensity"],
numActiveColumnsPerInhArea=spParams["numActiveColumnsPerInhArea"],
synPermInactiveDec=spParams["synPermInactiveDec"],
synPermActiveInc=spParams["synPermActiveInc"],
synPermConnected=spParams["synPermConnected"],
boostStrength=spParams["boostStrength"],
seed=spParams["seed"],
wrapAround=True
)
tm = TemporalMemory(
columnDimensions=(tmParams["columnCount"],),
cellsPerColumn=tmParams["cellsPerColumn"],
activationThreshold=tmParams["activationThreshold"],
initialPermanence=tmParams["initialPerm"],
connectedPermanence=spParams["synPermConnected"],
minThreshold=tmParams["minThreshold"],
maxNewSynapseCount=tmParams["newSynapseCount"],
permanenceIncrement=tmParams["permanenceInc"],
permanenceDecrement=tmParams["permanenceDec"],
predictedSegmentDecrement=0.0,
maxSegmentsPerCell=tmParams["maxSegmentsPerCell"],
maxSynapsesPerSegment=tmParams["maxSynapsesPerSegment"],
seed=tmParams["seed"]
)
classifier = SDRClassifierFactory.create()
results = []
with open(_INPUT_FILE_PATH, "r") as fin:
reader = csv.reader(fin)
headers = reader.next()
reader.next()
reader.next()
for count, record in enumerate(reader):
if count >= numRecords: break
# Convert data string into Python date object.
dateString = datetime.datetime.strptime(record[0], "%m/%d/%y %H:%M")
# Convert data value string into float.
consumption = float(record[1])
# To encode, we need to provide zero-filled numpy arrays for the encoders
# to populate.
timeOfDayBits = numpy.zeros(timeOfDayEncoder.getWidth())
weekendBits = numpy.zeros(weekendEncoder.getWidth())
consumptionBits = numpy.zeros(scalarEncoder.getWidth())
# Now we call the encoders to create bit representations for each value.
timeOfDayEncoder.encodeIntoArray(dateString, timeOfDayBits)
weekendEncoder.encodeIntoArray(dateString, weekendBits)
scalarEncoder.encodeIntoArray(consumption, consumptionBits)
# Concatenate all these encodings into one large encoding for Spatial
# Pooling.
encoding = numpy.concatenate(
[timeOfDayBits, weekendBits, consumptionBits]
)
# Create an array to represent active columns, all initially zero. This
# will be populated by the compute method below. It must have the same
# dimensions as the Spatial Pooler.
activeColumns = numpy.zeros(spParams["columnCount"])
# Execute Spatial Pooling algorithm over input space.
sp.compute(encoding, True, activeColumns)
activeColumnIndices = numpy.nonzero(activeColumns)[0]
# Execute Temporal Memory algorithm over active mini-columns.
tm.compute(activeColumnIndices, learn=True)
activeCells = tm.getActiveCells()
# Get the bucket info for this input value for classification.
bucketIdx = scalarEncoder.getBucketIndices(consumption)[0]
#.........这里部分代码省略.........
示例4: runHotgym
# 需要导入模块: from nupic.encoders.date import DateEncoder [as 别名]
# 或者: from nupic.encoders.date.DateEncoder import encodeIntoArray [as 别名]
def runHotgym(numRecords):
with open(_PARAMS_PATH, "r") as f:
modelParams = yaml.safe_load(f)["modelParams"]
enParams = modelParams["sensorParams"]["encoders"]
spParams = modelParams["spParams"]
tmParams = modelParams["tmParams"]
timeOfDayEncoder = DateEncoder(
timeOfDay=enParams["timestamp_timeOfDay"]["timeOfDay"])
weekendEncoder = DateEncoder(
weekend=enParams["timestamp_weekend"]["weekend"])
scalarEncoder = RandomDistributedScalarEncoder(
enParams["consumption"]["resolution"])
encodingWidth = (timeOfDayEncoder.getWidth()
+ weekendEncoder.getWidth()
+ scalarEncoder.getWidth())
sp = SpatialPooler(
# How large the input encoding will be.
inputDimensions=(encodingWidth),
# How many mini-columns will be in the Spatial Pooler.
columnDimensions=(spParams["columnCount"]),
# What percent of the columns"s receptive field is available for potential
# synapses?
potentialPct=spParams["potentialPct"],
# This means that the input space has no topology.
globalInhibition=spParams["globalInhibition"],
localAreaDensity=spParams["localAreaDensity"],
# Roughly 2%, giving that there is only one inhibition area because we have
# turned on globalInhibition (40 / 2048 = 0.0195)
numActiveColumnsPerInhArea=spParams["numActiveColumnsPerInhArea"],
# How quickly synapses grow and degrade.
synPermInactiveDec=spParams["synPermInactiveDec"],
synPermActiveInc=spParams["synPermActiveInc"],
synPermConnected=spParams["synPermConnected"],
# boostStrength controls the strength of boosting. Boosting encourages
# efficient usage of SP columns.
boostStrength=spParams["boostStrength"],
# Random number generator seed.
seed=spParams["seed"],
# TODO: is this useful?
# Determines if inputs at the beginning and end of an input dimension should
# be considered neighbors when mapping columns to inputs.
wrapAround=False
)
tm = TemporalMemory(
# Must be the same dimensions as the SP
columnDimensions=(tmParams["columnCount"],),
# How many cells in each mini-column.
cellsPerColumn=tmParams["cellsPerColumn"],
# A segment is active if it has >= activationThreshold connected synapses
# that are active due to infActiveState
activationThreshold=tmParams["activationThreshold"],
initialPermanence=tmParams["initialPerm"],
# TODO: This comes from the SP params, is this normal
connectedPermanence=spParams["synPermConnected"],
# Minimum number of active synapses for a segment to be considered during
# search for the best-matching segments.
minThreshold=tmParams["minThreshold"],
# The max number of synapses added to a segment during learning
maxNewSynapseCount=tmParams["newSynapseCount"],
permanenceIncrement=tmParams["permanenceInc"],
permanenceDecrement=tmParams["permanenceDec"],
predictedSegmentDecrement=0.0,
maxSegmentsPerCell=tmParams["maxSegmentsPerCell"],
maxSynapsesPerSegment=tmParams["maxSynapsesPerSegment"],
seed=tmParams["seed"]
)
classifier = SDRClassifierFactory.create()
results = []
with open(_INPUT_FILE_PATH, "r") as fin:
reader = csv.reader(fin)
headers = reader.next()
reader.next()
reader.next()
for count, record in enumerate(reader):
if count >= numRecords: break
# Convert data string into Python date object.
dateString = datetime.datetime.strptime(record[0], "%m/%d/%y %H:%M")
# Convert data value string into float.
consumption = float(record[1])
# To encode, we need to provide zero-filled numpy arrays for the encoders
# to populate.
timeOfDayBits = numpy.zeros(timeOfDayEncoder.getWidth())
weekendBits = numpy.zeros(weekendEncoder.getWidth())
consumptionBits = numpy.zeros(scalarEncoder.getWidth())
# Now we call the encoders create bit representations for each value.
timeOfDayEncoder.encodeIntoArray(dateString, timeOfDayBits)
weekendEncoder.encodeIntoArray(dateString, weekendBits)
scalarEncoder.encodeIntoArray(consumption, consumptionBits)
# Concatenate all these encodings into one large encoding for Spatial
#.........这里部分代码省略.........