本文整理汇总了Python中nupic.research.TP.TP类的典型用法代码示例。如果您正苦于以下问题:Python TP类的具体用法?Python TP怎么用?Python TP使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了TP类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self,
numberOfCols =500,
burnIn =2, # Used for evaluating the prediction score
collectStats =False, # If true, collect training and inference stats
seed =42,
verbosity =VERBOSITY,
predictionMethod = 'random', # "random" or "zeroth"
**kwargs
):
# Init the base class
TP.__init__(self,
numberOfCols = numberOfCols,
cellsPerColumn = 1,
burnIn = burnIn,
collectStats = collectStats,
seed = seed,
verbosity = verbosity)
self.predictionMethod = predictionMethod
#---------------------------------------------------------------------------------
# Create basic data structures for keeping track of column statistics
# Number of times each column has been active during learning
self.columnCount = numpy.zeros(numberOfCols, dtype="int32")
# Running average of input density
self.averageDensity = 0.05
示例2: testCheckpointMiddleOfSequence
def testCheckpointMiddleOfSequence(self):
# Create a model and give it some inputs to learn.
tp1 = TP(numberOfCols=100, cellsPerColumn=12, verbosity=VERBOSITY)
sequences = [self.generateSequence() for _ in xrange(5)]
train = list(itertools.chain.from_iterable(sequences[:3] +
[sequences[3][:5]]))
for bottomUpInput in train:
if bottomUpInput is None:
tp1.reset()
else:
tp1.compute(bottomUpInput, True, True)
# Serialize and deserialized the TP.
checkpointPath = os.path.join(self._tmpDir, 'a')
tp1.saveToFile(checkpointPath)
tp2 = pickle.loads(pickle.dumps(tp1))
tp2.loadFromFile(checkpointPath)
# Check that the TPs are the same.
self.assertTPsEqual(tp1, tp2)
# Feed some data into the models.
test = list(itertools.chain.from_iterable([sequences[3][5:]] +
sequences[3:]))
for bottomUpInput in test:
if bottomUpInput is None:
tp1.reset()
tp2.reset()
else:
result1 = tp1.compute(bottomUpInput, True, True)
result2 = tp2.compute(bottomUpInput, True, True)
self.assertTPsEqual(tp1, tp2)
self.assertTrue(numpy.array_equal(result1, result2))
示例3: reset
def reset(self):
""" Reset the state of all cells.
This is normally used between sequences while training. All internal states
are reset to 0.
"""
if self.verbosity >= 3:
print "TP Reset"
self._setStatePointers()
self.cells4.reset()
TP.reset(self)
示例4: testCheckpointMiddleOfSequence2
def testCheckpointMiddleOfSequence2(self):
"""More complex test of checkpointing in the middle of a sequence."""
tp1 = TP(2048, 32, 0.21, 0.5, 11, 20, 0.1, 0.1, 1.0, 0.0, 14, False, 5, 2,
False, 1960, 0, False, '', 3, 10, 5, 0, 32, 128, 32, 'normal')
tp2 = TP(2048, 32, 0.21, 0.5, 11, 20, 0.1, 0.1, 1.0, 0.0, 14, False, 5, 2,
False, 1960, 0, False, '', 3, 10, 5, 0, 32, 128, 32, 'normal')
with resource_stream(__name__, 'data/tp_input.csv') as fin:
reader = csv.reader(fin)
records = []
for bottomUpInStr in fin:
bottomUpIn = numpy.array(eval('[' + bottomUpInStr.strip() + ']'),
dtype='int32')
records.append(bottomUpIn)
i = 1
for r in records[:250]:
print i
i += 1
output1 = tp1.compute(r, True, True)
output2 = tp2.compute(r, True, True)
self.assertTrue(numpy.array_equal(output1, output2))
print 'Serializing and deserializing models.'
savePath1 = os.path.join(self._tmpDir, 'tp1.bin')
tp1.saveToFile(savePath1)
tp3 = pickle.loads(pickle.dumps(tp1))
tp3.loadFromFile(savePath1)
savePath2 = os.path.join(self._tmpDir, 'tp2.bin')
tp2.saveToFile(savePath2)
tp4 = pickle.loads(pickle.dumps(tp2))
tp4.loadFromFile(savePath2)
self.assertTPsEqual(tp1, tp3)
self.assertTPsEqual(tp2, tp4)
for r in records[250:]:
print i
i += 1
out1 = tp1.compute(r, True, True)
out2 = tp2.compute(r, True, True)
out3 = tp3.compute(r, True, True)
out4 = tp4.compute(r, True, True)
self.assertTrue(numpy.array_equal(out1, out2))
self.assertTrue(numpy.array_equal(out1, out3))
self.assertTrue(numpy.array_equal(out1, out4))
self.assertTPsEqual(tp1, tp2)
self.assertTPsEqual(tp1, tp3)
self.assertTPsEqual(tp2, tp4)
示例5: __init__
def __init__(self,
numberOfCols=16384, cellsPerColumn=8,
initialPerm=0.5, connectedPerm=0.5,
minThreshold=164, newSynapseCount=164,
permanenceInc=0.1, permanenceDec=0.0,
activationThreshold=164,
pamLength=10,
checkpointDir=None):
self.tp = TP(numberOfCols=numberOfCols, cellsPerColumn=cellsPerColumn,
initialPerm=initialPerm, connectedPerm=connectedPerm,
minThreshold=minThreshold, newSynapseCount=newSynapseCount,
permanenceInc=permanenceInc, permanenceDec=permanenceDec,
# 1/2 of the on bits = (16384 * .02) / 2
activationThreshold=activationThreshold,
globalDecay=0, burnIn=1,
#verbosity=3, # who knows what this does...
checkSynapseConsistency=False,
pamLength=pamLength)
self.checkpointDir = checkpointDir
self.checkpointPklPath = None
self.checkpointDataPath = None
self._initCheckpoint()
示例6: _initEphemerals
def _initEphemerals(self):
"""
Initialize all ephemeral members after being restored to a pickled state.
"""
TP._initEphemerals(self)
# ---------------------------------------------------------------------------------
# cells4 specific initialization
# If True, let C++ allocate memory for activeState, predictedState, and
# learnState. In this case we can retrieve copies of these states but can't
# set them directly from Python. If False, Python can allocate them as
# numpy arrays and we can pass pointers to the C++ using setStatePointers
self.allocateStatesInCPP = False
# Set this to true for debugging or accessing learning states
self.retrieveLearningStates = False
if self.makeCells4Ephemeral:
self.cells4 = Cells4(
self.numberOfCols,
self.cellsPerColumn,
self.activationThreshold,
self.minThreshold,
self.newSynapseCount,
self.segUpdateValidDuration,
self.initialPerm,
self.connectedPerm,
self.permanenceMax,
self.permanenceDec,
self.permanenceInc,
self.globalDecay,
self.doPooling,
self.seed,
self.allocateStatesInCPP,
self.checkSynapseConsistency,
)
self.cells4.setVerbosity(self.verbosity)
self.cells4.setPamLength(self.pamLength)
self.cells4.setMaxAge(self.maxAge)
self.cells4.setMaxInfBacktrack(self.maxInfBacktrack)
self.cells4.setMaxLrnBacktrack(self.maxLrnBacktrack)
self.cells4.setMaxSeqLength(self.maxSeqLength)
self.cells4.setMaxSegmentsPerCell(self.maxSegmentsPerCell)
self.cells4.setMaxSynapsesPerCell(self.maxSynapsesPerSegment)
self._setStatePointers()
示例7: _getEphemeralMembers
def _getEphemeralMembers(self):
"""
List of our member variables that we don't need to be saved
"""
e = TP._getEphemeralMembers(self)
if self.makeCells4Ephemeral:
e.extend(['cells4'])
return e
示例8: basicTest2
def basicTest2(self, tp, numPatterns=100, numRepetitions=3, activity=15,
testTrimming=False, testRebuild=False):
"""Basic test (basic run of learning and inference)"""
# Create PY TP object that mirrors the one sent in.
tpPy = TP(numberOfCols=tp.numberOfCols, cellsPerColumn=tp.cellsPerColumn,
initialPerm=tp.initialPerm, connectedPerm=tp.connectedPerm,
minThreshold=tp.minThreshold, newSynapseCount=tp.newSynapseCount,
permanenceInc=tp.permanenceInc, permanenceDec=tp.permanenceDec,
permanenceMax=tp.permanenceMax, globalDecay=tp.globalDecay,
activationThreshold=tp.activationThreshold,
doPooling=tp.doPooling,
segUpdateValidDuration=tp.segUpdateValidDuration,
pamLength=tp.pamLength, maxAge=tp.maxAge,
maxSeqLength=tp.maxSeqLength,
maxSegmentsPerCell=tp.maxSegmentsPerCell,
maxSynapsesPerSegment=tp.maxSynapsesPerSegment,
seed=tp.seed, verbosity=tp.verbosity)
# Ensure we are copying over learning states for TPDiff
tp.retrieveLearningStates = True
verbosity = VERBOSITY
# Learn
# Build up sequences
sequence = fdrutils.generateCoincMatrix(nCoinc=numPatterns,
length=tp.numberOfCols,
activity=activity)
for r in xrange(numRepetitions):
for i in xrange(sequence.nRows()):
#if i > 11:
# setVerbosity(6, tp, tpPy)
if i % 10 == 0:
tp.reset()
tpPy.reset()
if verbosity >= 2:
print "\n\n ===================================\nPattern:",
print i, "Round:", r, "input:", sequence.getRow(i)
y1 = tp.learn(sequence.getRow(i))
y2 = tpPy.learn(sequence.getRow(i))
# Ensure everything continues to work well even if we continuously
# rebuild outSynapses structure
if testRebuild:
tp.cells4.rebuildOutSynapses()
if testTrimming:
tp.trimSegments()
tpPy.trimSegments()
if verbosity > 2:
print "\n ------ CPP states ------ ",
tp.printStates()
print "\n ------ PY states ------ ",
tpPy.printStates()
if verbosity > 6:
print "C++ cells: "
tp.printCells()
print "PY cells: "
tpPy.printCells()
if verbosity >= 3:
print "Num segments in PY and C++", tpPy.getNumSegments(), \
tp.getNumSegments()
# Check if the two TP's are identical or not. This check is slow so
# we do it every other iteration. Make it every iteration for debugging
# as needed.
self.assertTrue(fdrutils.tpDiff2(tp, tpPy, verbosity, False))
# Check that outputs are identical
self.assertLess(abs((y1 - y2).sum()), 3)
print "Learning completed"
self.assertTrue(fdrutils.tpDiff2(tp, tpPy, verbosity))
# TODO: Need to check - currently failing this
#checkCell0(tpPy)
# Remove unconnected synapses and check TP's again
# Test rebuild out synapses
print "Rebuilding outSynapses"
tp.cells4.rebuildOutSynapses()
self.assertTrue(fdrutils.tpDiff2(tp, tpPy, VERBOSITY))
print "Trimming segments"
tp.trimSegments()
tpPy.trimSegments()
self.assertTrue(fdrutils.tpDiff2(tp, tpPy, VERBOSITY))
# Save and reload after learning
print "Pickling and unpickling"
tp.makeCells4Ephemeral = False
#.........这里部分代码省略.........
示例9: __init__
def __init__(self,
numberOfCols = 500,
cellsPerColumn = 10,
initialPerm = 0.11, # TODO: check perm numbers with Ron
connectedPerm = 0.50,
minThreshold = 8,
newSynapseCount = 15,
permanenceInc = 0.10,
permanenceDec = 0.10,
permanenceMax = 1.0, # never exceed this value
globalDecay = 0.10,
activationThreshold = 12, # 3/4 of newSynapseCount TODO make fraction
doPooling = False, # allows to turn off pooling
segUpdateValidDuration = 5,
burnIn = 2, # Used for evaluating the prediction score
collectStats = False, # If true, collect training and inference stats
seed = 42,
verbosity = VERBOSITY,
checkSynapseConsistency = False,
# List (as string) of trivial predictions to compute alongside
# the full TP. See TrivialPredictor.py for a list of allowed methods
trivialPredictionMethods = '',
pamLength = 1,
maxInfBacktrack = 10,
maxLrnBacktrack = 5,
maxAge = 100000,
maxSeqLength = 32,
# Fixed size mode params
maxSegmentsPerCell = -1,
maxSynapsesPerSegment = -1,
# Output control
outputType = 'normal',
):
#---------------------------------------------------------------------------------
# Save our __init__ args for debugging
self._initArgsDict = _extractCallingMethodArgs()
#---------------------------------------------------------------------------------
# These two variables are for testing
# If set to True, Cells4 will perform (time consuming) invariance checks
self.checkSynapseConsistency = checkSynapseConsistency
# If set to False, Cells4 will *not* be treated as an ephemeral member
# and full TP10X pickling is possible. This is useful for testing
# pickle/unpickle without saving Cells4 to an external file
self.makeCells4Ephemeral = True
#---------------------------------------------------------------------------------
# Init the base class
TP.__init__(self,
numberOfCols = numberOfCols,
cellsPerColumn = cellsPerColumn,
initialPerm = initialPerm,
connectedPerm = connectedPerm,
minThreshold = minThreshold,
newSynapseCount = newSynapseCount,
permanenceInc = permanenceInc,
permanenceDec = permanenceDec,
permanenceMax = permanenceMax, # never exceed this value
globalDecay = globalDecay,
activationThreshold = activationThreshold,
doPooling = doPooling,
segUpdateValidDuration = segUpdateValidDuration,
burnIn = burnIn,
collectStats = collectStats,
seed = seed,
verbosity = verbosity,
trivialPredictionMethods = trivialPredictionMethods,
pamLength = pamLength,
maxInfBacktrack = maxInfBacktrack,
maxLrnBacktrack = maxLrnBacktrack,
maxAge = maxAge,
maxSeqLength = maxSeqLength,
maxSegmentsPerCell = maxSegmentsPerCell,
maxSynapsesPerSegment = maxSynapsesPerSegment,
outputType = outputType,
)
示例10: reset
def reset(self):
""" Reset the state of all cells.
This is normally used between sequences while training. All internal states
are reset to 0.
"""
TP.reset(self)
示例11: testCheckpointMiddleOfSequence2
def testCheckpointMiddleOfSequence2(self):
"""More complex test of checkpointing in the middle of a sequence."""
tp1 = TP(
2048,
32,
0.21,
0.5,
11,
20,
0.1,
0.1,
1.0,
0.0,
14,
False,
5,
2,
False,
1960,
0,
False,
"",
3,
10,
5,
0,
32,
128,
32,
"normal",
)
tp2 = TP(
2048,
32,
0.21,
0.5,
11,
20,
0.1,
0.1,
1.0,
0.0,
14,
False,
5,
2,
False,
1960,
0,
False,
"",
3,
10,
5,
0,
32,
128,
32,
"normal",
)
with resource_stream(__name__, "data/tp_input.csv") as fin:
reader = csv.reader(fin)
records = []
for bottomUpInStr in fin:
bottomUpIn = numpy.array(eval("[" + bottomUpInStr.strip() + "]"), dtype="int32")
records.append(bottomUpIn)
for r in records[:250]:
output1 = tp1.compute(r, True, True)
output2 = tp2.compute(r, True, True)
self.assertTrue(numpy.array_equal(output1, output2))
tp3 = pickle.loads(pickle.dumps(tp1))
tp4 = pickle.loads(pickle.dumps(tp2))
i = 0
for r in records[250:]:
print i
i += 1
out1 = tp1.compute(r, True, True)
out2 = tp2.compute(r, True, True)
out3 = tp3.compute(r, True, True)
out4 = tp4.compute(r, True, True)
self.assertTPsEqual(tp1, tp2)
self.assertTrue(numpy.array_equal(out1, out2))
self.assertTrue(numpy.array_equal(out1, out3))
self.assertTrue(numpy.array_equal(out1, out4))
示例12: ScalarEncoder
import pyaudio
import audioop
import math
from nupic.encoders import ScalarEncoder
from nupic.research.TP import TP
from termcolor import colored
# Create our NuPIC entities
enc = ScalarEncoder(n=50, w=3, minval=0, maxval=100,
clipInput=True, forced=True)
tp = TP(numberOfCols=50, cellsPerColumn=4, initialPerm=0.5,
connectedPerm=0.5, minThreshold=5, newSynapseCount=5,
permanenceInc=0.1, permanenceDec=0.1,
activationThreshold=3, globalDecay=0.1, burnIn=1,
checkSynapseConsistency=False, pamLength=3)
# Setup our PyAudio Stream
p = pyaudio.PyAudio()
stream = p.open(format = pyaudio.paInt16, channels = 1,
rate = int(p.get_device_info_by_index(0)['defaultSampleRate']),
input = True, frames_per_buffer = 1024*5)
print "%-48s %48s" % (colored("DECIBELS","green"),
colored("PREDICTION","red"))
b = 0
while 1:
示例13: main
def main(SEED, VERBOSITY):
# TP 作成
tp = TP(
numberOfCols = 100,
cellsPerColumn = 1,
initialPerm = 0.3,
connectedPerm = 0.5,
minThreshold = 4,
newSynapseCount = 7,
permanenceInc = 0.1,
permanenceDec = 0.05,
activationThreshold = 5,
globalDecay = 0,
burnIn = 1,
seed = SEED,
verbosity = VERBOSITY,
checkSynapseConsistency = True,
pamLength = 1000
)
print
trainingSet = _getSimplePatterns(10, 10)
for seq in trainingSet[0:5]:
_printOneTrainingVector(seq)
# TP学習
print
print 'Learning 1 ... A->A->A'
for _ in range(2):
for seq in trainingSet[0:5]:
for _ in range(10):
#tp.learn(seq)
tp.compute(seq, enableLearn = True, computeInfOutput=False)
tp.reset()
print
print 'Learning 2 ... A->B->C'
for _ in range(10):
for seq in trainingSet[0:5]:
tp.compute(seq, enableLearn = True, computeInfOutput=False)
tp.reset()
# TP 予測
# Learning 1のみだと, A->Aを出力するのみだが,
# その後, Learning 2もやると, A->A,Bを出力するようになる.
print
print 'Running inference'
for seq in trainingSet[0:5]:
# tp.reset()
# tp.resetStats()
tp.compute(seq, enableLearn = False, computeInfOutput = True)
tp.printStates(False, False)
示例14: Model
class Model():
def __init__(self,
numberOfCols=16384, cellsPerColumn=8,
initialPerm=0.5, connectedPerm=0.5,
minThreshold=164, newSynapseCount=164,
permanenceInc=0.1, permanenceDec=0.0,
activationThreshold=164,
pamLength=10,
checkpointDir=None):
self.tp = TP(numberOfCols=numberOfCols, cellsPerColumn=cellsPerColumn,
initialPerm=initialPerm, connectedPerm=connectedPerm,
minThreshold=minThreshold, newSynapseCount=newSynapseCount,
permanenceInc=permanenceInc, permanenceDec=permanenceDec,
# 1/2 of the on bits = (16384 * .02) / 2
activationThreshold=activationThreshold,
globalDecay=0, burnIn=1,
#verbosity=3, # who knows what this does...
checkSynapseConsistency=False,
pamLength=pamLength)
self.checkpointDir = checkpointDir
self.checkpointPklPath = None
self.checkpointDataPath = None
self._initCheckpoint()
def _initCheckpoint(self):
if self.checkpointDir:
if not os.path.exists(self.checkpointDir):
os.makedirs(self.checkpointDir)
self.checkpointPklPath = self.checkpointDir + "/model.pkl"
self.checkpointDataPath = self.checkpointDir + "/model.data"
def canCheckpoint(self):
return self.checkpointDir != None
def hasCheckpoint(self):
return (os.path.exists(self.checkpointPklPath) and
os.path.exists(self.checkpointDataPath))
def load(self):
if not self.checkpointDir:
raise(Exception("No checkpoint directory specified"))
if not self.hasCheckpoint():
raise(Exception("Could not find checkpoint file"))
with open(self.checkpointPklPath, 'rb') as f:
self.tp = pickle.load(f)
self.tp.loadFromFile(self.checkpointDataPath)
def save(self):
if not self.checkpointDir:
raise(Exception("No checkpoint directory specified"))
self.tp.saveToFile(self.checkpointDataPath)
with open(self.checkpointPklPath, 'wb') as f:
pickle.dump(self.tp, f)
def feedTerm(self, term, learn=True):
""" Feed a Term to model, returning next predicted Term """
tp = self.tp
array = numpy.array(term.toArray(), dtype="uint32")
tp.resetStats()
tp.compute(array, enableLearn = learn, computeInfOutput = True)
#print "ret: " + repr(ret)
#if ret.all() == array.all():
# print "EQUAL to input"
ret = tp.getStats()
#ret = tp.printStates()
print "ret: " + repr(ret)
print
print
print "*****************************************"
predictedCells = tp.getPredictedState()
predictedColumns = predictedCells.max(axis=1)
predictedBitmap = predictedColumns.nonzero()[0].tolist()
return Term().createFromBitmap(predictedBitmap)
def resetSequence(self):
print "RESET"
self.tp.reset()
示例15: xrange
# In[20]:
for column in xrange(4):
connected = np.zeros((24,), dtype="int")
sp.getConnectedSynapses(column, connected)
print connected
print 'STARTING TEMPORAL POOLING'
# In[21]:
tp = TP(numberOfCols=50, cellsPerColumn=2,
initialPerm=0.5, connectedPerm=0.5,
minThreshold=10, newSynapseCount=10,
permanenceInc=0.1, permanenceDec=0.0,
activationThreshold=8,
globalDecay=0, burnIn=1,
checkSynapseConsistency=False,
pamLength=10)
# In[22]:
for i in range(1):
for note in encoded_list:
tp.compute(note, enableLearn = True, computeInfOutput = False)
# This function prints the segments associated with every cell.$$$$
# If you really want to understand the TP, uncomment this line. By following
# every step you can get an excellent understanding for exactly how the TP
# learns.
# tp.printCells()