本文整理汇总了Python中nupic.algorithms.temporal_memory.TemporalMemory.getActiveCells方法的典型用法代码示例。如果您正苦于以下问题:Python TemporalMemory.getActiveCells方法的具体用法?Python TemporalMemory.getActiveCells怎么用?Python TemporalMemory.getActiveCells使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nupic.algorithms.temporal_memory.TemporalMemory
的用法示例。
在下文中一共展示了TemporalMemory.getActiveCells方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testZeroActiveColumns
# 需要导入模块: from nupic.algorithms.temporal_memory import TemporalMemory [as 别名]
# 或者: from nupic.algorithms.temporal_memory.TemporalMemory import getActiveCells [as 别名]
def testZeroActiveColumns(self):
tm = TemporalMemory(
columnDimensions=[32],
cellsPerColumn=4,
activationThreshold=3,
initialPermanence=.21,
connectedPermanence=.5,
minThreshold=2,
maxNewSynapseCount=3,
permanenceIncrement=.10,
permanenceDecrement=.10,
predictedSegmentDecrement=0.0,
seed=42)
previousActiveColumns = [0]
previousActiveCells = [0, 1, 2, 3]
expectedActiveCells = [4]
segment = tm.createSegment(expectedActiveCells[0])
tm.connections.createSynapse(segment, previousActiveCells[0], .5)
tm.connections.createSynapse(segment, previousActiveCells[1], .5)
tm.connections.createSynapse(segment, previousActiveCells[2], .5)
tm.connections.createSynapse(segment, previousActiveCells[3], .5)
tm.compute(previousActiveColumns, True)
self.assertFalse(len(tm.getActiveCells()) == 0)
self.assertFalse(len(tm.getWinnerCells()) == 0)
self.assertFalse(len(tm.getPredictiveCells()) == 0)
zeroColumns = []
tm.compute(zeroColumns, True)
self.assertTrue(len(tm.getActiveCells()) == 0)
self.assertTrue(len(tm.getWinnerCells()) == 0)
self.assertTrue(len(tm.getPredictiveCells()) == 0)
示例2: testActivateCorrectlyPredictiveCells
# 需要导入模块: from nupic.algorithms.temporal_memory import TemporalMemory [as 别名]
# 或者: from nupic.algorithms.temporal_memory.TemporalMemory import getActiveCells [as 别名]
def testActivateCorrectlyPredictiveCells(self):
tm = TemporalMemory(
columnDimensions=[32],
cellsPerColumn=4,
activationThreshold=3,
initialPermanence=.21,
connectedPermanence=.5,
minThreshold=2,
maxNewSynapseCount=3,
permanenceIncrement=.10,
permanenceDecrement=.10,
predictedSegmentDecrement=0.0,
seed=42)
previousActiveColumns = [0]
activeColumns = [1]
previousActiveCells = [0,1,2,3]
expectedActiveCells = [4]
activeSegment = tm.createSegment(expectedActiveCells[0])
tm.connections.createSynapse(activeSegment, previousActiveCells[0], .5)
tm.connections.createSynapse(activeSegment, previousActiveCells[1], .5)
tm.connections.createSynapse(activeSegment, previousActiveCells[2], .5)
tm.connections.createSynapse(activeSegment, previousActiveCells[3], .5)
tm.compute(previousActiveColumns, True)
self.assertEqual(expectedActiveCells, tm.getPredictiveCells())
tm.compute(activeColumns, True)
self.assertEqual(expectedActiveCells, tm.getActiveCells())
示例3: testBurstUnpredictedColumns
# 需要导入模块: from nupic.algorithms.temporal_memory import TemporalMemory [as 别名]
# 或者: from nupic.algorithms.temporal_memory.TemporalMemory import getActiveCells [as 别名]
def testBurstUnpredictedColumns(self):
tm = TemporalMemory(
columnDimensions=[32],
cellsPerColumn=4,
activationThreshold=3,
initialPermanence=.21,
connectedPermanence=.5,
minThreshold=2,
maxNewSynapseCount=3,
permanenceIncrement=.10,
permanenceDecrement=.10,
predictedSegmentDecrement=0.0,
seed=42)
activeColumns = [0]
burstingCells = [0, 1, 2, 3]
tm.compute(activeColumns, True)
self.assertEqual(burstingCells, tm.getActiveCells())
示例4: range
# 需要导入模块: from nupic.algorithms.temporal_memory import TemporalMemory [as 别名]
# 或者: from nupic.algorithms.temporal_memory.TemporalMemory import getActiveCells [as 别名]
# Step 3: send this simple sequence to the temporal memory for learning
# We repeat the sequence 10 times
for i in range(10):
# Send each letter in the sequence in order
for j in range(5):
activeColumns = set([i for i, j in zip(count(), x[j]) if j == 1])
# The compute method performs one step of learning and/or inference. Note:
# here we just perform learning but you can perform prediction/inference and
# learning in the same step if you want (online learning).
tm.compute(activeColumns, learn = True)
# The following print statements can be ignored.
# Useful for tracing internal states
print("active cells " + str(tm.getActiveCells()))
print("predictive cells " + str(tm.getPredictiveCells()))
print("winner cells " + str(tm.getWinnerCells()))
print("# of active segments " + str(tm.connections.numSegments()))
# The reset command tells the TM that a sequence just ended and essentially
# zeros out all the states. It is not strictly necessary but it's a bit
# messier without resets, and the TM learns quicker with resets.
tm.reset()
#######################################################################
#
# Step 3: send the same sequence of vectors and look at predictions made by
# temporal memory
for j in range(5):
示例5: runHotgym
# 需要导入模块: from nupic.algorithms.temporal_memory import TemporalMemory [as 别名]
# 或者: from nupic.algorithms.temporal_memory.TemporalMemory import getActiveCells [as 别名]
def runHotgym(numRecords):
with open(_PARAMS_PATH, "r") as f:
modelParams = yaml.safe_load(f)["modelParams"]
enParams = modelParams["sensorParams"]["encoders"]
spParams = modelParams["spParams"]
tmParams = modelParams["tmParams"]
timeOfDayEncoder = DateEncoder(
timeOfDay=enParams["timestamp_timeOfDay"]["timeOfDay"])
weekendEncoder = DateEncoder(
weekend=enParams["timestamp_weekend"]["weekend"])
scalarEncoder = RandomDistributedScalarEncoder(
enParams["consumption"]["resolution"])
encodingWidth = (timeOfDayEncoder.getWidth()
+ weekendEncoder.getWidth()
+ scalarEncoder.getWidth())
sp = SpatialPooler(
inputDimensions=(encodingWidth,),
columnDimensions=(spParams["columnCount"],),
potentialPct=spParams["potentialPct"],
potentialRadius=encodingWidth,
globalInhibition=spParams["globalInhibition"],
localAreaDensity=spParams["localAreaDensity"],
numActiveColumnsPerInhArea=spParams["numActiveColumnsPerInhArea"],
synPermInactiveDec=spParams["synPermInactiveDec"],
synPermActiveInc=spParams["synPermActiveInc"],
synPermConnected=spParams["synPermConnected"],
boostStrength=spParams["boostStrength"],
seed=spParams["seed"],
wrapAround=True
)
tm = TemporalMemory(
columnDimensions=(tmParams["columnCount"],),
cellsPerColumn=tmParams["cellsPerColumn"],
activationThreshold=tmParams["activationThreshold"],
initialPermanence=tmParams["initialPerm"],
connectedPermanence=spParams["synPermConnected"],
minThreshold=tmParams["minThreshold"],
maxNewSynapseCount=tmParams["newSynapseCount"],
permanenceIncrement=tmParams["permanenceInc"],
permanenceDecrement=tmParams["permanenceDec"],
predictedSegmentDecrement=0.0,
maxSegmentsPerCell=tmParams["maxSegmentsPerCell"],
maxSynapsesPerSegment=tmParams["maxSynapsesPerSegment"],
seed=tmParams["seed"]
)
classifier = SDRClassifierFactory.create()
results = []
with open(_INPUT_FILE_PATH, "r") as fin:
reader = csv.reader(fin)
headers = reader.next()
reader.next()
reader.next()
for count, record in enumerate(reader):
if count >= numRecords: break
# Convert data string into Python date object.
dateString = datetime.datetime.strptime(record[0], "%m/%d/%y %H:%M")
# Convert data value string into float.
consumption = float(record[1])
# To encode, we need to provide zero-filled numpy arrays for the encoders
# to populate.
timeOfDayBits = numpy.zeros(timeOfDayEncoder.getWidth())
weekendBits = numpy.zeros(weekendEncoder.getWidth())
consumptionBits = numpy.zeros(scalarEncoder.getWidth())
# Now we call the encoders to create bit representations for each value.
timeOfDayEncoder.encodeIntoArray(dateString, timeOfDayBits)
weekendEncoder.encodeIntoArray(dateString, weekendBits)
scalarEncoder.encodeIntoArray(consumption, consumptionBits)
# Concatenate all these encodings into one large encoding for Spatial
# Pooling.
encoding = numpy.concatenate(
[timeOfDayBits, weekendBits, consumptionBits]
)
# Create an array to represent active columns, all initially zero. This
# will be populated by the compute method below. It must have the same
# dimensions as the Spatial Pooler.
activeColumns = numpy.zeros(spParams["columnCount"])
# Execute Spatial Pooling algorithm over input space.
sp.compute(encoding, True, activeColumns)
activeColumnIndices = numpy.nonzero(activeColumns)[0]
# Execute Temporal Memory algorithm over active mini-columns.
tm.compute(activeColumnIndices, learn=True)
activeCells = tm.getActiveCells()
# Get the bucket info for this input value for classification.
bucketIdx = scalarEncoder.getBucketIndices(consumption)[0]
#.........这里部分代码省略.........
示例6: testAddSegmentToCellWithFewestSegments
# 需要导入模块: from nupic.algorithms.temporal_memory import TemporalMemory [as 别名]
# 或者: from nupic.algorithms.temporal_memory.TemporalMemory import getActiveCells [as 别名]
def testAddSegmentToCellWithFewestSegments(self):
grewOnCell1 = False
grewOnCell2 = False
for seed in xrange(100):
tm = TemporalMemory(
columnDimensions=[32],
cellsPerColumn=4,
activationThreshold=3,
initialPermanence=.2,
connectedPermanence=.50,
minThreshold=2,
maxNewSynapseCount=4,
permanenceIncrement=.10,
permanenceDecrement=.10,
predictedSegmentDecrement=0.02,
seed=seed)
prevActiveColumns = [1, 2, 3, 4]
activeColumns = [0]
prevActiveCells = [4, 5, 6, 7]
nonMatchingCells = [0, 3]
activeCells = [0, 1, 2, 3]
segment1 = tm.createSegment(nonMatchingCells[0])
tm.connections.createSynapse(segment1, prevActiveCells[0], .5)
segment2 = tm.createSegment(nonMatchingCells[1])
tm.connections.createSynapse(segment2, prevActiveCells[1], .5)
tm.compute(prevActiveColumns, True)
tm.compute(activeColumns, True)
self.assertEqual(activeCells, tm.getActiveCells())
self.assertEqual(3, tm.connections.numSegments())
self.assertEqual(1, tm.connections.numSegments(0))
self.assertEqual(1, tm.connections.numSegments(3))
self.assertEqual(1, tm.connections.numSynapses(segment1))
self.assertEqual(1, tm.connections.numSynapses(segment2))
segments = list(tm.connections.segmentsForCell(1))
if len(segments) == 0:
segments2 = list(tm.connections.segmentsForCell(2))
self.assertFalse(len(segments2) == 0)
grewOnCell2 = True
segments.append(segments2[0])
else:
grewOnCell1 = True
self.assertEqual(1, len(segments))
synapses = list(tm.connections.synapsesForSegment(segments[0]))
self.assertEqual(4, len(synapses))
columnChecklist = set(prevActiveColumns)
for synapse in synapses:
synapseData = tm.connections.dataForSynapse(synapse)
self.assertAlmostEqual(.2, synapseData.permanence)
column = tm.columnForCell(synapseData.presynapticCell)
self.assertTrue(column in columnChecklist)
columnChecklist.remove(column)
self.assertTrue(len(columnChecklist) == 0)
self.assertTrue(grewOnCell1)
self.assertTrue(grewOnCell2)
示例7: runHotgym
# 需要导入模块: from nupic.algorithms.temporal_memory import TemporalMemory [as 别名]
# 或者: from nupic.algorithms.temporal_memory.TemporalMemory import getActiveCells [as 别名]
#.........这里部分代码省略.........
# be considered neighbors when mapping columns to inputs.
wrapAround=False
)
tm = TemporalMemory(
# Must be the same dimensions as the SP
columnDimensions=(tmParams["columnCount"],),
# How many cells in each mini-column.
cellsPerColumn=tmParams["cellsPerColumn"],
# A segment is active if it has >= activationThreshold connected synapses
# that are active due to infActiveState
activationThreshold=tmParams["activationThreshold"],
initialPermanence=tmParams["initialPerm"],
# TODO: This comes from the SP params, is this normal
connectedPermanence=spParams["synPermConnected"],
# Minimum number of active synapses for a segment to be considered during
# search for the best-matching segments.
minThreshold=tmParams["minThreshold"],
# The max number of synapses added to a segment during learning
maxNewSynapseCount=tmParams["newSynapseCount"],
permanenceIncrement=tmParams["permanenceInc"],
permanenceDecrement=tmParams["permanenceDec"],
predictedSegmentDecrement=0.0,
maxSegmentsPerCell=tmParams["maxSegmentsPerCell"],
maxSynapsesPerSegment=tmParams["maxSynapsesPerSegment"],
seed=tmParams["seed"]
)
classifier = SDRClassifierFactory.create()
results = []
with open(_INPUT_FILE_PATH, "r") as fin:
reader = csv.reader(fin)
headers = reader.next()
reader.next()
reader.next()
for count, record in enumerate(reader):
if count >= numRecords: break
# Convert data string into Python date object.
dateString = datetime.datetime.strptime(record[0], "%m/%d/%y %H:%M")
# Convert data value string into float.
consumption = float(record[1])
# To encode, we need to provide zero-filled numpy arrays for the encoders
# to populate.
timeOfDayBits = numpy.zeros(timeOfDayEncoder.getWidth())
weekendBits = numpy.zeros(weekendEncoder.getWidth())
consumptionBits = numpy.zeros(scalarEncoder.getWidth())
# Now we call the encoders create bit representations for each value.
timeOfDayEncoder.encodeIntoArray(dateString, timeOfDayBits)
weekendEncoder.encodeIntoArray(dateString, weekendBits)
scalarEncoder.encodeIntoArray(consumption, consumptionBits)
# Concatenate all these encodings into one large encoding for Spatial
# Pooling.
encoding = numpy.concatenate(
[timeOfDayBits, weekendBits, consumptionBits]
)
# Create an array to represent active columns, all initially zero. This
# will be populated by the compute method below. It must have the same
# dimensions as the Spatial Pooler.
activeColumns = numpy.zeros(spParams["columnCount"])
# Execute Spatial Pooling algorithm over input space.
sp.compute(encoding, True, activeColumns)
activeColumnIndices = numpy.nonzero(activeColumns)[0]
# Execute Temporal Memory algorithm over active mini-columns.
tm.compute(activeColumnIndices, learn=True)
activeCells = tm.getActiveCells()
# Get the bucket info for this input value for classification.
bucketIdx = scalarEncoder.getBucketIndices(consumption)[0]
# Run classifier to translate active cells back to scalar value.
classifierResult = classifier.compute(
recordNum=count,
patternNZ=activeCells,
classification={
"bucketIdx": bucketIdx,
"actValue": consumption
},
learn=True,
infer=True
)
# Print the best prediction for 1 step out.
oneStepConfidence, oneStep = sorted(
zip(classifierResult[1], classifierResult["actualValues"]),
reverse=True
)[0]
print("1-step: {:16} ({:4.4}%)".format(oneStep, oneStepConfidence * 100))
results.append([oneStep, oneStepConfidence * 100, None, None])
return results