本文整理汇总了Python中nupic.encoders.random_distributed_scalar.RandomDistributedScalarEncoder.encodeIntoArray方法的典型用法代码示例。如果您正苦于以下问题:Python RandomDistributedScalarEncoder.encodeIntoArray方法的具体用法?Python RandomDistributedScalarEncoder.encodeIntoArray怎么用?Python RandomDistributedScalarEncoder.encodeIntoArray使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nupic.encoders.random_distributed_scalar.RandomDistributedScalarEncoder
的用法示例。
在下文中一共展示了RandomDistributedScalarEncoder.encodeIntoArray方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testVerbosity
# 需要导入模块: from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder [as 别名]
# 或者: from nupic.encoders.random_distributed_scalar.RandomDistributedScalarEncoder import encodeIntoArray [as 别名]
def testVerbosity(self):
"""
Test that nothing is printed out when verbosity=0
"""
_stdout = sys.stdout
sys.stdout = _stringio = StringIO()
encoder = RandomDistributedScalarEncoder(name="mv", resolution=1.0, verbosity=0)
output = numpy.zeros(encoder.getWidth(), dtype=defaultDtype)
encoder.encodeIntoArray(23.0, output)
encoder.getBucketIndices(23.0)
sys.stdout = _stdout
self.assertEqual(len(_stringio.getvalue()), 0, "zero verbosity doesn't lead to zero output")
示例2: runSimpleSequence
# 需要导入模块: from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder [as 别名]
# 或者: from nupic.encoders.random_distributed_scalar.RandomDistributedScalarEncoder import encodeIntoArray [as 别名]
def runSimpleSequence(self, resets, repetitions=1):
scalarEncoder = RandomDistributedScalarEncoder(0.88, n=2048, w=41)
instances = self._createInstances(cellsPerColumn=32)
times = [0.0] * len(self.contestants)
duration = 10000 * repetitions
increment = 4
sequenceLength = 25
sequence = (i % (sequenceLength * 4)
for i in xrange(0, duration * increment, increment))
t = 0
encodedValue = numpy.zeros(2048, dtype=numpy.int32)
for value in sequence:
scalarEncoder.encodeIntoArray(value, output=encodedValue)
activeBits = encodedValue.nonzero()[0]
for i in xrange(len(self.contestants)):
tmInstance = instances[i]
computeFn = self.contestants[i][2]
if resets:
if value == 0:
tmInstance.reset()
start = time.clock()
computeFn(tmInstance, encodedValue, activeBits)
times[i] += time.clock() - start
printProgressBar(t, duration, 50)
t += 1
clearProgressBar(50)
results = []
for i in xrange(len(self.contestants)):
name = self.contestants[i][3]
results.append((name,
times[i],))
return results
示例3: runHotgym
# 需要导入模块: from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder [as 别名]
# 或者: from nupic.encoders.random_distributed_scalar.RandomDistributedScalarEncoder import encodeIntoArray [as 别名]
def runHotgym(self, cellsPerColumn, repetitions=1):
scalarEncoder = RandomDistributedScalarEncoder(0.88, n=2048, w=41)
instances = self._createInstances(cellsPerColumn=cellsPerColumn)
times = [0.0] * len(self.contestants)
t = 0
duration = HOTGYM_LENGTH * repetitions
for _ in xrange(repetitions):
with open(HOTGYM_PATH) as fin:
reader = csv.reader(fin)
reader.next()
reader.next()
reader.next()
encodedValue = numpy.zeros(2048, dtype=numpy.uint32)
for timeStr, valueStr in reader:
value = float(valueStr)
scalarEncoder.encodeIntoArray(value, output=encodedValue)
activeBits = encodedValue.nonzero()[0]
for i in xrange(len(self.contestants)):
tmInstance = instances[i]
computeFn = self.contestants[i][2]
start = time.clock()
computeFn(tmInstance, encodedValue, activeBits)
times[i] += time.clock() - start
printProgressBar(t, duration, 50)
t += 1
clearProgressBar(50)
results = []
for i in xrange(len(self.contestants)):
name = self.contestants[i][3]
results.append((name,
times[i],))
return results
示例4: BaseNetwork
# 需要导入模块: from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder [as 别名]
# 或者: from nupic.encoders.random_distributed_scalar.RandomDistributedScalarEncoder import encodeIntoArray [as 别名]
#.........这里部分代码省略.........
"cellsPerColumn": self.cellsPerColumn,
"columnDimensions": (self.numColumns,),
"initialPermanence": 0.24,
"maxSegmentsPerCell": 128,
"maxSynapsesPerSegment": 128,
"minThreshold": 13,
"maxNewSynapseCount": 31,
"permanenceDecrement": 0.008,
"permanenceIncrement": 0.04,
"seed": 1960,
}
self.tm = TemporalMemory(**self.tmParams)
# Sanity
if self.runSanity:
self.sanity = sanity.SPTMInstance(self.sp, self.tm)
def handleRecord(self, scalarValue, label=None, skipEncoding=False,
learningMode=True):
"""Process one record."""
if self.runSanity:
self.sanity.waitForUserContinue()
# Encode the input data record if it hasn't already been encoded.
if not skipEncoding:
self.encodeValue(scalarValue)
# Run the encoded data through the spatial pooler
self.sp.compute(self.encoderOutput, learningMode, self.spOutput)
self.spOutputNZ = self.spOutput.nonzero()[0]
# WARNING: this needs to happen here, before the TM runs.
self.previouslyPredictiveCells = self.tm.getPredictiveCells()
# Run SP output through temporal memory
self.tm.compute(self.spOutputNZ)
self.predictedActiveCells = _computePredictedActiveCells(
self.tm.getActiveCells(), self.previouslyPredictiveCells)
# Anomaly score
self.anomalyScore = _computeAnomalyScore(self.spOutputNZ,
self.previouslyPredictiveCells,
self.cellsPerColumn)
# Run Sanity
if self.runSanity:
self.sanity.appendTimestep(self.getEncoderOutputNZ(),
self.getSpOutputNZ(),
self.previouslyPredictiveCells,
{
'value': scalarValue,
'label':label
})
def encodeValue(self, scalarValue):
self.encoder.encodeIntoArray(scalarValue, self.encoderOutput)
def getEncoderResolution(self):
"""
Compute the Random Distributed Scalar Encoder (RDSE) resolution. It's
calculated from the data min and max, specific to the data stream.
"""
if self.inputMin is None or self.inputMax is None:
return self.defaultEncoderResolution
else:
rangePadding = abs(self.inputMax - self.inputMin) * 0.2
minVal = self.inputMin - rangePadding
maxVal = (self.inputMax + rangePadding
if self.inputMin != self.inputMax
else self.inputMin + 1)
numBuckets = 130.0
return max(self.defaultEncoderResolution, (maxVal - minVal) / numBuckets)
def getEncoderOutputNZ(self):
return self.encoderOutput.nonzero()[0]
def getSpOutputNZ(self):
return self.spOutputNZ
def getTmPredictiveCellsNZ(self):
return self.tm.getPredictiveCells()
def getTmActiveCellsNZ(self):
return self.tm.getActiveCells()
def getTmPredictedActiveCellsNZ(self):
return self.predictedActiveCells
def getRawAnomalyScore(self):
return self.anomalyScore
示例5: NumentaTMLowLevelDetector
# 需要导入模块: from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder [as 别名]
# 或者: from nupic.encoders.random_distributed_scalar.RandomDistributedScalarEncoder import encodeIntoArray [as 别名]
class NumentaTMLowLevelDetector(AnomalyDetector):
"""The 'numentaTM' detector, but not using the CLAModel or network API """
def __init__(self, *args, **kwargs):
super(NumentaTMLowLevelDetector, self).__init__(*args, **kwargs)
self.valueEncoder = None
self.encodedValue = None
self.timestampEncoder = None
self.encodedTimestamp = None
self.sp = None
self.spOutput = None
self.tm = None
self.anomalyLikelihood = None
# Set this to False if you want to get results based on raw scores
# without using AnomalyLikelihood. This will give worse results, but
# useful for checking the efficacy of AnomalyLikelihood. You will need
# to re-optimize the thresholds when running with this setting.
self.useLikelihood = True
def getAdditionalHeaders(self):
"""Returns a list of strings."""
return ["raw_score"]
def initialize(self):
# Initialize the RDSE with a resolution; calculated from the data min and
# max, the resolution is specific to the data stream.
rangePadding = abs(self.inputMax - self.inputMin) * 0.2
minVal = self.inputMin - rangePadding
maxVal = (self.inputMax + rangePadding
if self.inputMin != self.inputMax
else self.inputMin + 1)
numBuckets = 130.0
resolution = max(0.001, (maxVal - minVal) / numBuckets)
self.valueEncoder = RandomDistributedScalarEncoder(resolution, seed=42)
self.encodedValue = np.zeros(self.valueEncoder.getWidth(),
dtype=np.uint32)
# Initialize the timestamp encoder
self.timestampEncoder = DateEncoder(timeOfDay=(21, 9.49, ))
self.encodedTimestamp = np.zeros(self.timestampEncoder.getWidth(),
dtype=np.uint32)
inputWidth = (self.timestampEncoder.getWidth() +
self.valueEncoder.getWidth())
self.sp = SpatialPooler(**{
"globalInhibition": True,
"columnDimensions": [2048],
"inputDimensions": [inputWidth],
"potentialRadius": inputWidth,
"numActiveColumnsPerInhArea": 40,
"seed": 1956,
"potentialPct": 0.8,
"boostStrength": 0.0,
"synPermActiveInc": 0.003,
"synPermConnected": 0.2,
"synPermInactiveDec": 0.0005,
})
self.spOutput = np.zeros(2048, dtype=np.float32)
self.tm = TemporalMemory(**{
"activationThreshold": 20,
"cellsPerColumn": 32,
"columnDimensions": (2048,),
"initialPermanence": 0.24,
"maxSegmentsPerCell": 128,
"maxSynapsesPerSegment": 128,
"minThreshold": 13,
"maxNewSynapseCount": 31,
"permanenceDecrement": 0.008,
"permanenceIncrement": 0.04,
"seed": 1960,
})
if self.useLikelihood:
learningPeriod = math.floor(self.probationaryPeriod / 2.0)
self.anomalyLikelihood = anomaly_likelihood.AnomalyLikelihood(
claLearningPeriod=learningPeriod,
estimationSamples=self.probationaryPeriod - learningPeriod,
reestimationPeriod=100
)
def handleRecord(self, inputData):
"""Returns a tuple (anomalyScore, rawScore)."""
# Encode the input data record
self.valueEncoder.encodeIntoArray(
inputData["value"], self.encodedValue)
self.timestampEncoder.encodeIntoArray(
inputData["timestamp"], self.encodedTimestamp)
# Run the encoded data through the spatial pooler
self.sp.compute(np.concatenate((self.encodedTimestamp,
self.encodedValue,)),
True, self.spOutput)
#.........这里部分代码省略.........
示例6: runHotgym
# 需要导入模块: from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder [as 别名]
# 或者: from nupic.encoders.random_distributed_scalar.RandomDistributedScalarEncoder import encodeIntoArray [as 别名]
def runHotgym(numRecords):
with open(_PARAMS_PATH, "r") as f:
modelParams = yaml.safe_load(f)["modelParams"]
enParams = modelParams["sensorParams"]["encoders"]
spParams = modelParams["spParams"]
tmParams = modelParams["tmParams"]
timeOfDayEncoder = DateEncoder(
timeOfDay=enParams["timestamp_timeOfDay"]["timeOfDay"])
weekendEncoder = DateEncoder(
weekend=enParams["timestamp_weekend"]["weekend"])
scalarEncoder = RandomDistributedScalarEncoder(
enParams["consumption"]["resolution"])
encodingWidth = (timeOfDayEncoder.getWidth()
+ weekendEncoder.getWidth()
+ scalarEncoder.getWidth())
sp = SpatialPooler(
inputDimensions=(encodingWidth,),
columnDimensions=(spParams["columnCount"],),
potentialPct=spParams["potentialPct"],
potentialRadius=encodingWidth,
globalInhibition=spParams["globalInhibition"],
localAreaDensity=spParams["localAreaDensity"],
numActiveColumnsPerInhArea=spParams["numActiveColumnsPerInhArea"],
synPermInactiveDec=spParams["synPermInactiveDec"],
synPermActiveInc=spParams["synPermActiveInc"],
synPermConnected=spParams["synPermConnected"],
boostStrength=spParams["boostStrength"],
seed=spParams["seed"],
wrapAround=True
)
tm = TemporalMemory(
columnDimensions=(tmParams["columnCount"],),
cellsPerColumn=tmParams["cellsPerColumn"],
activationThreshold=tmParams["activationThreshold"],
initialPermanence=tmParams["initialPerm"],
connectedPermanence=spParams["synPermConnected"],
minThreshold=tmParams["minThreshold"],
maxNewSynapseCount=tmParams["newSynapseCount"],
permanenceIncrement=tmParams["permanenceInc"],
permanenceDecrement=tmParams["permanenceDec"],
predictedSegmentDecrement=0.0,
maxSegmentsPerCell=tmParams["maxSegmentsPerCell"],
maxSynapsesPerSegment=tmParams["maxSynapsesPerSegment"],
seed=tmParams["seed"]
)
classifier = SDRClassifierFactory.create()
results = []
with open(_INPUT_FILE_PATH, "r") as fin:
reader = csv.reader(fin)
headers = reader.next()
reader.next()
reader.next()
for count, record in enumerate(reader):
if count >= numRecords: break
# Convert data string into Python date object.
dateString = datetime.datetime.strptime(record[0], "%m/%d/%y %H:%M")
# Convert data value string into float.
consumption = float(record[1])
# To encode, we need to provide zero-filled numpy arrays for the encoders
# to populate.
timeOfDayBits = numpy.zeros(timeOfDayEncoder.getWidth())
weekendBits = numpy.zeros(weekendEncoder.getWidth())
consumptionBits = numpy.zeros(scalarEncoder.getWidth())
# Now we call the encoders to create bit representations for each value.
timeOfDayEncoder.encodeIntoArray(dateString, timeOfDayBits)
weekendEncoder.encodeIntoArray(dateString, weekendBits)
scalarEncoder.encodeIntoArray(consumption, consumptionBits)
# Concatenate all these encodings into one large encoding for Spatial
# Pooling.
encoding = numpy.concatenate(
[timeOfDayBits, weekendBits, consumptionBits]
)
# Create an array to represent active columns, all initially zero. This
# will be populated by the compute method below. It must have the same
# dimensions as the Spatial Pooler.
activeColumns = numpy.zeros(spParams["columnCount"])
# Execute Spatial Pooling algorithm over input space.
sp.compute(encoding, True, activeColumns)
activeColumnIndices = numpy.nonzero(activeColumns)[0]
# Execute Temporal Memory algorithm over active mini-columns.
tm.compute(activeColumnIndices, learn=True)
activeCells = tm.getActiveCells()
# Get the bucket info for this input value for classification.
bucketIdx = scalarEncoder.getBucketIndices(consumption)[0]
#.........这里部分代码省略.........
示例7: DistalTimestamps1CellPerColumnDetector
# 需要导入模块: from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder [as 别名]
# 或者: from nupic.encoders.random_distributed_scalar.RandomDistributedScalarEncoder import encodeIntoArray [as 别名]
class DistalTimestamps1CellPerColumnDetector(AnomalyDetector):
"""The 'numenta' detector, with the following changes:
- Use pure Temporal Memory, not the classic TP that uses backtracking.
- Don't spatial pool the timestamp. Pass it in as distal input.
- 1 cell per column.
- Use w=41 in the scalar encoding, rather than w=21, to make up for the
lost timestamp input to the spatial pooler.
"""
def __init__(self, *args, **kwargs):
super(DistalTimestamps1CellPerColumnDetector, self).__init__(*args,
**kwargs)
self.valueEncoder = None
self.encodedValue = None
self.timestampEncoder = None
self.encodedTimestamp = None
self.activeExternalCells = []
self.prevActiveExternalCells = []
self.sp = None
self.spOutput = None
self.etm = None
self.anomalyLikelihood = None
def getAdditionalHeaders(self):
"""Returns a list of strings."""
return ["raw_score"]
def initialize(self):
rangePadding = abs(self.inputMax - self.inputMin) * 0.2
minVal = self.inputMin - rangePadding
maxVal = (self.inputMax + rangePadding
if self.inputMin != self.inputMax
else self.inputMin + 1)
numBuckets = 130.0
resolution = max(0.001, (maxVal - minVal) / numBuckets)
self.valueEncoder = RandomDistributedScalarEncoder(resolution,
w=41,
seed=42)
self.encodedValue = np.zeros(self.valueEncoder.getWidth(),
dtype=np.uint32)
self.timestampEncoder = DateEncoder(timeOfDay=(21,9.49,))
self.encodedTimestamp = np.zeros(self.timestampEncoder.getWidth(),
dtype=np.uint32)
inputWidth = self.valueEncoder.getWidth()
self.sp = SpatialPooler(**{
"globalInhibition": True,
"columnDimensions": [2048],
"inputDimensions": [inputWidth],
"potentialRadius": inputWidth,
"numActiveColumnsPerInhArea": 40,
"seed": 1956,
"potentialPct": 0.8,
"boostStrength": 0.0,
"synPermActiveInc": 0.003,
"synPermConnected": 0.2,
"synPermInactiveDec": 0.0005,
})
self.spOutput = np.zeros(2048, dtype=np.float32)
self.etm = ExtendedTemporalMemory(**{
"activationThreshold": 13,
"cellsPerColumn": 1,
"columnDimensions": (2048,),
"basalInputDimensions": (self.timestampEncoder.getWidth(),),
"initialPermanence": 0.21,
"maxSegmentsPerCell": 128,
"maxSynapsesPerSegment": 32,
"minThreshold": 10,
"maxNewSynapseCount": 20,
"permanenceDecrement": 0.1,
"permanenceIncrement": 0.1,
"seed": 1960,
"checkInputs": False,
})
learningPeriod = math.floor(self.probationaryPeriod / 2.0)
self.anomalyLikelihood = anomaly_likelihood.AnomalyLikelihood(
claLearningPeriod=learningPeriod,
estimationSamples=self.probationaryPeriod - learningPeriod,
reestimationPeriod=100
)
def handleRecord(self, inputData):
"""Returns a tuple (anomalyScore, rawScore)."""
self.valueEncoder.encodeIntoArray(inputData["value"],
self.encodedValue)
self.timestampEncoder.encodeIntoArray(inputData["timestamp"],
self.encodedTimestamp)
self.prevActiveExternalCells = self.activeExternalCells
self.activeExternalCells = self.encodedTimestamp.nonzero()[0]
#.........这里部分代码省略.........
示例8: runHotgym
# 需要导入模块: from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder [as 别名]
# 或者: from nupic.encoders.random_distributed_scalar.RandomDistributedScalarEncoder import encodeIntoArray [as 别名]
def runHotgym(numRecords):
with open(_PARAMS_PATH, "r") as f:
modelParams = yaml.safe_load(f)["modelParams"]
enParams = modelParams["sensorParams"]["encoders"]
spParams = modelParams["spParams"]
tmParams = modelParams["tmParams"]
timeOfDayEncoder = DateEncoder(
timeOfDay=enParams["timestamp_timeOfDay"]["timeOfDay"])
weekendEncoder = DateEncoder(
weekend=enParams["timestamp_weekend"]["weekend"])
scalarEncoder = RandomDistributedScalarEncoder(
enParams["consumption"]["resolution"])
encodingWidth = (timeOfDayEncoder.getWidth()
+ weekendEncoder.getWidth()
+ scalarEncoder.getWidth())
sp = SpatialPooler(
# How large the input encoding will be.
inputDimensions=(encodingWidth),
# How many mini-columns will be in the Spatial Pooler.
columnDimensions=(spParams["columnCount"]),
# What percent of the columns"s receptive field is available for potential
# synapses?
potentialPct=spParams["potentialPct"],
# This means that the input space has no topology.
globalInhibition=spParams["globalInhibition"],
localAreaDensity=spParams["localAreaDensity"],
# Roughly 2%, giving that there is only one inhibition area because we have
# turned on globalInhibition (40 / 2048 = 0.0195)
numActiveColumnsPerInhArea=spParams["numActiveColumnsPerInhArea"],
# How quickly synapses grow and degrade.
synPermInactiveDec=spParams["synPermInactiveDec"],
synPermActiveInc=spParams["synPermActiveInc"],
synPermConnected=spParams["synPermConnected"],
# boostStrength controls the strength of boosting. Boosting encourages
# efficient usage of SP columns.
boostStrength=spParams["boostStrength"],
# Random number generator seed.
seed=spParams["seed"],
# TODO: is this useful?
# Determines if inputs at the beginning and end of an input dimension should
# be considered neighbors when mapping columns to inputs.
wrapAround=False
)
tm = TemporalMemory(
# Must be the same dimensions as the SP
columnDimensions=(tmParams["columnCount"],),
# How many cells in each mini-column.
cellsPerColumn=tmParams["cellsPerColumn"],
# A segment is active if it has >= activationThreshold connected synapses
# that are active due to infActiveState
activationThreshold=tmParams["activationThreshold"],
initialPermanence=tmParams["initialPerm"],
# TODO: This comes from the SP params, is this normal
connectedPermanence=spParams["synPermConnected"],
# Minimum number of active synapses for a segment to be considered during
# search for the best-matching segments.
minThreshold=tmParams["minThreshold"],
# The max number of synapses added to a segment during learning
maxNewSynapseCount=tmParams["newSynapseCount"],
permanenceIncrement=tmParams["permanenceInc"],
permanenceDecrement=tmParams["permanenceDec"],
predictedSegmentDecrement=0.0,
maxSegmentsPerCell=tmParams["maxSegmentsPerCell"],
maxSynapsesPerSegment=tmParams["maxSynapsesPerSegment"],
seed=tmParams["seed"]
)
classifier = SDRClassifierFactory.create()
results = []
with open(_INPUT_FILE_PATH, "r") as fin:
reader = csv.reader(fin)
headers = reader.next()
reader.next()
reader.next()
for count, record in enumerate(reader):
if count >= numRecords: break
# Convert data string into Python date object.
dateString = datetime.datetime.strptime(record[0], "%m/%d/%y %H:%M")
# Convert data value string into float.
consumption = float(record[1])
# To encode, we need to provide zero-filled numpy arrays for the encoders
# to populate.
timeOfDayBits = numpy.zeros(timeOfDayEncoder.getWidth())
weekendBits = numpy.zeros(weekendEncoder.getWidth())
consumptionBits = numpy.zeros(scalarEncoder.getWidth())
# Now we call the encoders create bit representations for each value.
timeOfDayEncoder.encodeIntoArray(dateString, timeOfDayBits)
weekendEncoder.encodeIntoArray(dateString, weekendBits)
scalarEncoder.encodeIntoArray(consumption, consumptionBits)
# Concatenate all these encodings into one large encoding for Spatial
#.........这里部分代码省略.........