本文整理汇总了Python中nupic.research.TP.TP.reset方法的典型用法代码示例。如果您正苦于以下问题:Python TP.reset方法的具体用法?Python TP.reset怎么用?Python TP.reset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nupic.research.TP.TP
的用法示例。
在下文中一共展示了TP.reset方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testCheckpointMiddleOfSequence
# 需要导入模块: from nupic.research.TP import TP [as 别名]
# 或者: from nupic.research.TP.TP import reset [as 别名]
def testCheckpointMiddleOfSequence(self):
# Create a model and give it some inputs to learn.
tp1 = TP(numberOfCols=100, cellsPerColumn=12, verbosity=VERBOSITY)
sequences = [self.generateSequence() for _ in xrange(5)]
train = list(itertools.chain.from_iterable(sequences[:3] +
[sequences[3][:5]]))
for bottomUpInput in train:
if bottomUpInput is None:
tp1.reset()
else:
tp1.compute(bottomUpInput, True, True)
# Serialize and deserialized the TP.
checkpointPath = os.path.join(self._tmpDir, 'a')
tp1.saveToFile(checkpointPath)
tp2 = pickle.loads(pickle.dumps(tp1))
tp2.loadFromFile(checkpointPath)
# Check that the TPs are the same.
self.assertTPsEqual(tp1, tp2)
# Feed some data into the models.
test = list(itertools.chain.from_iterable([sequences[3][5:]] +
sequences[3:]))
for bottomUpInput in test:
if bottomUpInput is None:
tp1.reset()
tp2.reset()
else:
result1 = tp1.compute(bottomUpInput, True, True)
result2 = tp2.compute(bottomUpInput, True, True)
self.assertTPsEqual(tp1, tp2)
self.assertTrue(numpy.array_equal(result1, result2))
示例2: reset
# 需要导入模块: from nupic.research.TP import TP [as 别名]
# 或者: from nupic.research.TP.TP import reset [as 别名]
def reset(self):
""" Reset the state of all cells.
This is normally used between sequences while training. All internal states
are reset to 0.
"""
if self.verbosity >= 3:
print "TP Reset"
self._setStatePointers()
self.cells4.reset()
TP.reset(self)
示例3: main
# 需要导入模块: from nupic.research.TP import TP [as 别名]
# 或者: from nupic.research.TP.TP import reset [as 别名]
def main(SEED, VERBOSITY):
# TP 作成
tp = TP(
numberOfCols = 100,
cellsPerColumn = 1,
initialPerm = 0.3,
connectedPerm = 0.5,
minThreshold = 4,
newSynapseCount = 7,
permanenceInc = 0.1,
permanenceDec = 0.05,
activationThreshold = 5,
globalDecay = 0,
burnIn = 1,
seed = SEED,
verbosity = VERBOSITY,
checkSynapseConsistency = True,
pamLength = 1000
)
print
trainingSet = _getSimplePatterns(10, 10)
for seq in trainingSet[0:5]:
_printOneTrainingVector(seq)
# TP学習
print
print 'Learning 1 ... A->A->A'
for _ in range(2):
for seq in trainingSet[0:5]:
for _ in range(10):
#tp.learn(seq)
tp.compute(seq, enableLearn = True, computeInfOutput=False)
tp.reset()
print
print 'Learning 2 ... A->B->C'
for _ in range(10):
for seq in trainingSet[0:5]:
tp.compute(seq, enableLearn = True, computeInfOutput=False)
tp.reset()
# TP 予測
# Learning 1のみだと, A->Aを出力するのみだが,
# その後, Learning 2もやると, A->A,Bを出力するようになる.
print
print 'Running inference'
for seq in trainingSet[0:5]:
# tp.reset()
# tp.resetStats()
tp.compute(seq, enableLearn = False, computeInfOutput = True)
tp.printStates(False, False)
示例4: basicTest2
# 需要导入模块: from nupic.research.TP import TP [as 别名]
# 或者: from nupic.research.TP.TP import reset [as 别名]
def basicTest2(self, tp, numPatterns=100, numRepetitions=3, activity=15,
testTrimming=False, testRebuild=False):
"""Basic test (basic run of learning and inference)"""
# Create PY TP object that mirrors the one sent in.
tpPy = TP(numberOfCols=tp.numberOfCols, cellsPerColumn=tp.cellsPerColumn,
initialPerm=tp.initialPerm, connectedPerm=tp.connectedPerm,
minThreshold=tp.minThreshold, newSynapseCount=tp.newSynapseCount,
permanenceInc=tp.permanenceInc, permanenceDec=tp.permanenceDec,
permanenceMax=tp.permanenceMax, globalDecay=tp.globalDecay,
activationThreshold=tp.activationThreshold,
doPooling=tp.doPooling,
segUpdateValidDuration=tp.segUpdateValidDuration,
pamLength=tp.pamLength, maxAge=tp.maxAge,
maxSeqLength=tp.maxSeqLength,
maxSegmentsPerCell=tp.maxSegmentsPerCell,
maxSynapsesPerSegment=tp.maxSynapsesPerSegment,
seed=tp.seed, verbosity=tp.verbosity)
# Ensure we are copying over learning states for TPDiff
tp.retrieveLearningStates = True
verbosity = VERBOSITY
# Learn
# Build up sequences
sequence = fdrutils.generateCoincMatrix(nCoinc=numPatterns,
length=tp.numberOfCols,
activity=activity)
for r in xrange(numRepetitions):
for i in xrange(sequence.nRows()):
#if i > 11:
# setVerbosity(6, tp, tpPy)
if i % 10 == 0:
tp.reset()
tpPy.reset()
if verbosity >= 2:
print "\n\n ===================================\nPattern:",
print i, "Round:", r, "input:", sequence.getRow(i)
y1 = tp.learn(sequence.getRow(i))
y2 = tpPy.learn(sequence.getRow(i))
# Ensure everything continues to work well even if we continuously
# rebuild outSynapses structure
if testRebuild:
tp.cells4.rebuildOutSynapses()
if testTrimming:
tp.trimSegments()
tpPy.trimSegments()
if verbosity > 2:
print "\n ------ CPP states ------ ",
tp.printStates()
print "\n ------ PY states ------ ",
tpPy.printStates()
if verbosity > 6:
print "C++ cells: "
tp.printCells()
print "PY cells: "
tpPy.printCells()
if verbosity >= 3:
print "Num segments in PY and C++", tpPy.getNumSegments(), \
tp.getNumSegments()
# Check if the two TP's are identical or not. This check is slow so
# we do it every other iteration. Make it every iteration for debugging
# as needed.
self.assertTrue(fdrutils.tpDiff2(tp, tpPy, verbosity, False))
# Check that outputs are identical
self.assertLess(abs((y1 - y2).sum()), 3)
print "Learning completed"
self.assertTrue(fdrutils.tpDiff2(tp, tpPy, verbosity))
# TODO: Need to check - currently failing this
#checkCell0(tpPy)
# Remove unconnected synapses and check TP's again
# Test rebuild out synapses
print "Rebuilding outSynapses"
tp.cells4.rebuildOutSynapses()
self.assertTrue(fdrutils.tpDiff2(tp, tpPy, VERBOSITY))
print "Trimming segments"
tp.trimSegments()
tpPy.trimSegments()
self.assertTrue(fdrutils.tpDiff2(tp, tpPy, VERBOSITY))
# Save and reload after learning
print "Pickling and unpickling"
tp.makeCells4Ephemeral = False
#.........这里部分代码省略.........
示例5: reset
# 需要导入模块: from nupic.research.TP import TP [as 别名]
# 或者: from nupic.research.TP.TP import reset [as 别名]
def reset(self):
""" Reset the state of all cells.
This is normally used between sequences while training. All internal states
are reset to 0.
"""
TP.reset(self)
示例6: int
# 需要导入模块: from nupic.research.TP import TP [as 别名]
# 或者: from nupic.research.TP.TP import reset [as 别名]
encoded = enc.encode(decibel)
# Add our encoded representation to the temporal pooler.
tp.compute(encoded, enableLearn = True, computeInfOutput = True)
# For the curious:
#tp.printCells()
#tp.printStates(printPrevious=False, printLearnState=False)
predictedCells = tp.getPredictedState()
decval = 0
if predictedCells.any():
decval = predictedCells.max(axis=1).nonzero()[0][-1]
# This is more correct, but seems wonky...
#decval = int(enc.decode(predictedCells.max(axis=1).
#nonzero()[0])[0]["[0:100]"][0][0][1])
print "%-48s %48s" % (colored(("*"*(decibel/2))[:38],"green"),
colored(("#"*(decval))[:38],"red"))
if b >= 20:
b = 0
# If we have enough samples, reset the encoder to help it learn.
tp.reset()
print " "*35, "RESET!"
示例7: Model
# 需要导入模块: from nupic.research.TP import TP [as 别名]
# 或者: from nupic.research.TP.TP import reset [as 别名]
class Model():
def __init__(self,
numberOfCols=16384, cellsPerColumn=8,
initialPerm=0.5, connectedPerm=0.5,
minThreshold=164, newSynapseCount=164,
permanenceInc=0.1, permanenceDec=0.0,
activationThreshold=164,
pamLength=10,
checkpointDir=None):
self.tp = TP(numberOfCols=numberOfCols, cellsPerColumn=cellsPerColumn,
initialPerm=initialPerm, connectedPerm=connectedPerm,
minThreshold=minThreshold, newSynapseCount=newSynapseCount,
permanenceInc=permanenceInc, permanenceDec=permanenceDec,
# 1/2 of the on bits = (16384 * .02) / 2
activationThreshold=activationThreshold,
globalDecay=0, burnIn=1,
#verbosity=3, # who knows what this does...
checkSynapseConsistency=False,
pamLength=pamLength)
self.checkpointDir = checkpointDir
self.checkpointPklPath = None
self.checkpointDataPath = None
self._initCheckpoint()
def _initCheckpoint(self):
if self.checkpointDir:
if not os.path.exists(self.checkpointDir):
os.makedirs(self.checkpointDir)
self.checkpointPklPath = self.checkpointDir + "/model.pkl"
self.checkpointDataPath = self.checkpointDir + "/model.data"
def canCheckpoint(self):
return self.checkpointDir != None
def hasCheckpoint(self):
return (os.path.exists(self.checkpointPklPath) and
os.path.exists(self.checkpointDataPath))
def load(self):
if not self.checkpointDir:
raise(Exception("No checkpoint directory specified"))
if not self.hasCheckpoint():
raise(Exception("Could not find checkpoint file"))
with open(self.checkpointPklPath, 'rb') as f:
self.tp = pickle.load(f)
self.tp.loadFromFile(self.checkpointDataPath)
def save(self):
if not self.checkpointDir:
raise(Exception("No checkpoint directory specified"))
self.tp.saveToFile(self.checkpointDataPath)
with open(self.checkpointPklPath, 'wb') as f:
pickle.dump(self.tp, f)
def feedTerm(self, term, learn=True):
""" Feed a Term to model, returning next predicted Term """
tp = self.tp
array = numpy.array(term.toArray(), dtype="uint32")
tp.resetStats()
tp.compute(array, enableLearn = learn, computeInfOutput = True)
#print "ret: " + repr(ret)
#if ret.all() == array.all():
# print "EQUAL to input"
ret = tp.getStats()
#ret = tp.printStates()
print "ret: " + repr(ret)
print
print
print "*****************************************"
predictedCells = tp.getPredictedState()
predictedColumns = predictedCells.max(axis=1)
predictedBitmap = predictedColumns.nonzero()[0].tolist()
return Term().createFromBitmap(predictedBitmap)
def resetSequence(self):
print "RESET"
self.tp.reset()