本文整理汇总了Python中nupic.encoders.random_distributed_scalar.RandomDistributedScalarEncoder.getWidth方法的典型用法代码示例。如果您正苦于以下问题:Python RandomDistributedScalarEncoder.getWidth方法的具体用法?Python RandomDistributedScalarEncoder.getWidth怎么用?Python RandomDistributedScalarEncoder.getWidth使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nupic.encoders.random_distributed_scalar.RandomDistributedScalarEncoder
的用法示例。
在下文中一共展示了RandomDistributedScalarEncoder.getWidth方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testResolution
# 需要导入模块: from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder [as 别名]
# 或者: from nupic.encoders.random_distributed_scalar.RandomDistributedScalarEncoder import getWidth [as 别名]
def testResolution(self):
"""
Test that numbers within the same resolution return the same encoding.
Numbers outside the resolution should return different encodings.
"""
encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0)
# Since 23.0 is the first encoded number, it will be the offset.
# Since resolution is 1, 22.9 and 23.4 should have the same bucket index and
# encoding.
e23 = encoder.encode(23.0)
e23p1 = encoder.encode(23.1)
e22p9 = encoder.encode(22.9)
e24 = encoder.encode(24.0)
self.assertEqual(e23.sum(), encoder.w)
self.assertEqual((e23 == e23p1).sum(), encoder.getWidth(),
"Numbers within resolution don't have the same encoding")
self.assertEqual((e23 == e22p9).sum(), encoder.getWidth(),
"Numbers within resolution don't have the same encoding")
self.assertNotEqual((e23 == e24).sum(), encoder.getWidth(),
"Numbers outside resolution have the same encoding")
e22p9 = encoder.encode(22.5)
self.assertNotEqual((e23 == e22p9).sum(), encoder.getWidth(),
"Numbers outside resolution have the same encoding")
示例2: testSeed
# 需要导入模块: from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder [as 别名]
# 或者: from nupic.encoders.random_distributed_scalar.RandomDistributedScalarEncoder import getWidth [as 别名]
def testSeed(self):
"""
Test that initializing twice with the same seed returns identical encodings
and different when not specified
"""
encoder1 = RandomDistributedScalarEncoder(name="encoder1", resolution=1.0,
seed=42)
encoder2 = RandomDistributedScalarEncoder(name="encoder2", resolution=1.0,
seed=42)
encoder3 = RandomDistributedScalarEncoder(name="encoder3", resolution=1.0,
seed=-1)
encoder4 = RandomDistributedScalarEncoder(name="encoder4", resolution=1.0,
seed=-1)
e1 = encoder1.encode(23.0)
e2 = encoder2.encode(23.0)
e3 = encoder3.encode(23.0)
e4 = encoder4.encode(23.0)
self.assertEqual((e1 == e2).sum(), encoder1.getWidth(),
"Same seed gives rise to different encodings")
self.assertNotEqual((e1 == e3).sum(), encoder1.getWidth(),
"Different seeds gives rise to same encodings")
self.assertNotEqual((e3 == e4).sum(), encoder1.getWidth(),
"seeds of -1 give rise to same encodings")
示例3: testMapBucketIndexToNonZeroBits
# 需要导入模块: from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder [as 别名]
# 或者: from nupic.encoders.random_distributed_scalar.RandomDistributedScalarEncoder import getWidth [as 别名]
def testMapBucketIndexToNonZeroBits(self):
"""
Test that mapBucketIndexToNonZeroBits works and that max buckets and
clipping are handled properly.
"""
enc = RandomDistributedScalarEncoder(resolution=1.0, w=11, n=150)
# Set a low number of max buckets
enc._initializeBucketMap(10, None)
enc.encode(0.0)
enc.encode(-7.0)
enc.encode(7.0)
self.assertEqual(len(enc.bucketMap), enc._maxBuckets,
"_maxBuckets exceeded")
self.assertTrue(
(enc.mapBucketIndexToNonZeroBits(-1) == enc.bucketMap[0]).all(),
"mapBucketIndexToNonZeroBits did not handle negative index")
self.assertTrue(
(enc.mapBucketIndexToNonZeroBits(1000) == enc.bucketMap[9]).all(),
"mapBucketIndexToNonZeroBits did not handle negative index")
e23 = enc.encode(23.0)
e6 = enc.encode(6)
self.assertEqual((e23 == e6).sum(), enc.getWidth(),
"Values not clipped correctly during encoding")
e_8 = enc.encode(-8)
e_7 = enc.encode(-7)
self.assertEqual((e_8 == e_7).sum(), enc.getWidth(),
"Values not clipped correctly during encoding")
self.assertEqual(enc.getBucketIndices(-8)[0], 0,
"getBucketIndices returned negative bucket index")
self.assertEqual(enc.getBucketIndices(23)[0], enc._maxBuckets-1,
"getBucketIndices returned bucket index that is too large")
示例4: testVerbosity
# 需要导入模块: from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder [as 别名]
# 或者: from nupic.encoders.random_distributed_scalar.RandomDistributedScalarEncoder import getWidth [as 别名]
def testVerbosity(self):
"""
Test that nothing is printed out when verbosity=0
"""
_stdout = sys.stdout
sys.stdout = _stringio = StringIO()
encoder = RandomDistributedScalarEncoder(name="mv", resolution=1.0, verbosity=0)
output = numpy.zeros(encoder.getWidth(), dtype=defaultDtype)
encoder.encodeIntoArray(23.0, output)
encoder.getBucketIndices(23.0)
sys.stdout = _stdout
self.assertEqual(len(_stringio.getvalue()), 0, "zero verbosity doesn't lead to zero output")
示例5: testGetMethods
# 需要导入模块: from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder [as 别名]
# 或者: from nupic.encoders.random_distributed_scalar.RandomDistributedScalarEncoder import getWidth [as 别名]
def testGetMethods(self):
"""
Test that the getWidth, getDescription, and getDecoderOutputFieldTypes
methods work.
"""
enc = RandomDistributedScalarEncoder(name='theName', resolution=1.0, n=500)
self.assertEqual(enc.getWidth(), 500,
"getWidth doesn't return the correct result")
self.assertEqual(enc.getDescription(), [('theName', 0)],
"getDescription doesn't return the correct result")
self.assertEqual(enc.getDecoderOutputFieldTypes(),
(FieldMetaType.float, ),
"getDecoderOutputFieldTypes doesn't return the correct result")
示例6: BaseNetwork
# 需要导入模块: from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder [as 别名]
# 或者: from nupic.encoders.random_distributed_scalar.RandomDistributedScalarEncoder import getWidth [as 别名]
class BaseNetwork(object):
def __init__(self, inputMin=None, inputMax=None, runSanity=False):
self.inputMin = inputMin
self.inputMax = inputMax
self.runSanity = runSanity
self.encoder = None
self.encoderOutput = None
self.sp = None
self.spOutput = None
self.spOutputNZ = None
self.tm = None
self.anomalyScore = None
if runSanity:
self.sanity = None
self.defaultEncoderResolution = 0.0001
self.numColumns = 2048
self.cellsPerColumn = 32
self.predictedActiveCells = None
self.previouslyPredictiveCells = None
def initialize(self):
# Scalar Encoder
resolution = self.getEncoderResolution()
self.encoder = RandomDistributedScalarEncoder(resolution, seed=42)
self.encoderOutput = np.zeros(self.encoder.getWidth(), dtype=np.uint32)
# Spatial Pooler
spInputWidth = self.encoder.getWidth()
self.spParams = {
"globalInhibition": True,
"columnDimensions": [self.numColumns],
"inputDimensions": [spInputWidth],
"potentialRadius": spInputWidth,
"numActiveColumnsPerInhArea": 40,
"seed": 1956,
"potentialPct": 0.8,
"boostStrength": 5.0,
"synPermActiveInc": 0.003,
"synPermConnected": 0.2,
"synPermInactiveDec": 0.0005,
}
self.sp = SpatialPooler(**self.spParams)
self.spOutput = np.zeros(self.numColumns, dtype=np.uint32)
# Temporal Memory
self.tmParams = {
"activationThreshold": 20,
"cellsPerColumn": self.cellsPerColumn,
"columnDimensions": (self.numColumns,),
"initialPermanence": 0.24,
"maxSegmentsPerCell": 128,
"maxSynapsesPerSegment": 128,
"minThreshold": 13,
"maxNewSynapseCount": 31,
"permanenceDecrement": 0.008,
"permanenceIncrement": 0.04,
"seed": 1960,
}
self.tm = TemporalMemory(**self.tmParams)
# Sanity
if self.runSanity:
self.sanity = sanity.SPTMInstance(self.sp, self.tm)
def handleRecord(self, scalarValue, label=None, skipEncoding=False,
learningMode=True):
"""Process one record."""
if self.runSanity:
self.sanity.waitForUserContinue()
# Encode the input data record if it hasn't already been encoded.
if not skipEncoding:
self.encodeValue(scalarValue)
# Run the encoded data through the spatial pooler
self.sp.compute(self.encoderOutput, learningMode, self.spOutput)
self.spOutputNZ = self.spOutput.nonzero()[0]
# WARNING: this needs to happen here, before the TM runs.
self.previouslyPredictiveCells = self.tm.getPredictiveCells()
# Run SP output through temporal memory
self.tm.compute(self.spOutputNZ)
self.predictedActiveCells = _computePredictedActiveCells(
self.tm.getActiveCells(), self.previouslyPredictiveCells)
# Anomaly score
self.anomalyScore = _computeAnomalyScore(self.spOutputNZ,
self.previouslyPredictiveCells,
self.cellsPerColumn)
# Run Sanity
#.........这里部分代码省略.........
示例7: NumentaTMLowLevelDetector
# 需要导入模块: from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder [as 别名]
# 或者: from nupic.encoders.random_distributed_scalar.RandomDistributedScalarEncoder import getWidth [as 别名]
class NumentaTMLowLevelDetector(AnomalyDetector):
"""The 'numentaTM' detector, but not using the CLAModel or network API """
def __init__(self, *args, **kwargs):
super(NumentaTMLowLevelDetector, self).__init__(*args, **kwargs)
self.valueEncoder = None
self.encodedValue = None
self.timestampEncoder = None
self.encodedTimestamp = None
self.sp = None
self.spOutput = None
self.tm = None
self.anomalyLikelihood = None
# Set this to False if you want to get results based on raw scores
# without using AnomalyLikelihood. This will give worse results, but
# useful for checking the efficacy of AnomalyLikelihood. You will need
# to re-optimize the thresholds when running with this setting.
self.useLikelihood = True
def getAdditionalHeaders(self):
"""Returns a list of strings."""
return ["raw_score"]
def initialize(self):
# Initialize the RDSE with a resolution; calculated from the data min and
# max, the resolution is specific to the data stream.
rangePadding = abs(self.inputMax - self.inputMin) * 0.2
minVal = self.inputMin - rangePadding
maxVal = (self.inputMax + rangePadding
if self.inputMin != self.inputMax
else self.inputMin + 1)
numBuckets = 130.0
resolution = max(0.001, (maxVal - minVal) / numBuckets)
self.valueEncoder = RandomDistributedScalarEncoder(resolution, seed=42)
self.encodedValue = np.zeros(self.valueEncoder.getWidth(),
dtype=np.uint32)
# Initialize the timestamp encoder
self.timestampEncoder = DateEncoder(timeOfDay=(21, 9.49, ))
self.encodedTimestamp = np.zeros(self.timestampEncoder.getWidth(),
dtype=np.uint32)
inputWidth = (self.timestampEncoder.getWidth() +
self.valueEncoder.getWidth())
self.sp = SpatialPooler(**{
"globalInhibition": True,
"columnDimensions": [2048],
"inputDimensions": [inputWidth],
"potentialRadius": inputWidth,
"numActiveColumnsPerInhArea": 40,
"seed": 1956,
"potentialPct": 0.8,
"boostStrength": 0.0,
"synPermActiveInc": 0.003,
"synPermConnected": 0.2,
"synPermInactiveDec": 0.0005,
})
self.spOutput = np.zeros(2048, dtype=np.float32)
self.tm = TemporalMemory(**{
"activationThreshold": 20,
"cellsPerColumn": 32,
"columnDimensions": (2048,),
"initialPermanence": 0.24,
"maxSegmentsPerCell": 128,
"maxSynapsesPerSegment": 128,
"minThreshold": 13,
"maxNewSynapseCount": 31,
"permanenceDecrement": 0.008,
"permanenceIncrement": 0.04,
"seed": 1960,
})
if self.useLikelihood:
learningPeriod = math.floor(self.probationaryPeriod / 2.0)
self.anomalyLikelihood = anomaly_likelihood.AnomalyLikelihood(
claLearningPeriod=learningPeriod,
estimationSamples=self.probationaryPeriod - learningPeriod,
reestimationPeriod=100
)
def handleRecord(self, inputData):
"""Returns a tuple (anomalyScore, rawScore)."""
# Encode the input data record
self.valueEncoder.encodeIntoArray(
inputData["value"], self.encodedValue)
self.timestampEncoder.encodeIntoArray(
inputData["timestamp"], self.encodedTimestamp)
# Run the encoded data through the spatial pooler
self.sp.compute(np.concatenate((self.encodedTimestamp,
self.encodedValue,)),
True, self.spOutput)
#.........这里部分代码省略.........
示例8: runHotgym
# 需要导入模块: from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder [as 别名]
# 或者: from nupic.encoders.random_distributed_scalar.RandomDistributedScalarEncoder import getWidth [as 别名]
def runHotgym(numRecords):
with open(_PARAMS_PATH, "r") as f:
modelParams = yaml.safe_load(f)["modelParams"]
enParams = modelParams["sensorParams"]["encoders"]
spParams = modelParams["spParams"]
tmParams = modelParams["tmParams"]
timeOfDayEncoder = DateEncoder(
timeOfDay=enParams["timestamp_timeOfDay"]["timeOfDay"])
weekendEncoder = DateEncoder(
weekend=enParams["timestamp_weekend"]["weekend"])
scalarEncoder = RandomDistributedScalarEncoder(
enParams["consumption"]["resolution"])
encodingWidth = (timeOfDayEncoder.getWidth()
+ weekendEncoder.getWidth()
+ scalarEncoder.getWidth())
sp = SpatialPooler(
inputDimensions=(encodingWidth,),
columnDimensions=(spParams["columnCount"],),
potentialPct=spParams["potentialPct"],
potentialRadius=encodingWidth,
globalInhibition=spParams["globalInhibition"],
localAreaDensity=spParams["localAreaDensity"],
numActiveColumnsPerInhArea=spParams["numActiveColumnsPerInhArea"],
synPermInactiveDec=spParams["synPermInactiveDec"],
synPermActiveInc=spParams["synPermActiveInc"],
synPermConnected=spParams["synPermConnected"],
boostStrength=spParams["boostStrength"],
seed=spParams["seed"],
wrapAround=True
)
tm = TemporalMemory(
columnDimensions=(tmParams["columnCount"],),
cellsPerColumn=tmParams["cellsPerColumn"],
activationThreshold=tmParams["activationThreshold"],
initialPermanence=tmParams["initialPerm"],
connectedPermanence=spParams["synPermConnected"],
minThreshold=tmParams["minThreshold"],
maxNewSynapseCount=tmParams["newSynapseCount"],
permanenceIncrement=tmParams["permanenceInc"],
permanenceDecrement=tmParams["permanenceDec"],
predictedSegmentDecrement=0.0,
maxSegmentsPerCell=tmParams["maxSegmentsPerCell"],
maxSynapsesPerSegment=tmParams["maxSynapsesPerSegment"],
seed=tmParams["seed"]
)
classifier = SDRClassifierFactory.create()
results = []
with open(_INPUT_FILE_PATH, "r") as fin:
reader = csv.reader(fin)
headers = reader.next()
reader.next()
reader.next()
for count, record in enumerate(reader):
if count >= numRecords: break
# Convert data string into Python date object.
dateString = datetime.datetime.strptime(record[0], "%m/%d/%y %H:%M")
# Convert data value string into float.
consumption = float(record[1])
# To encode, we need to provide zero-filled numpy arrays for the encoders
# to populate.
timeOfDayBits = numpy.zeros(timeOfDayEncoder.getWidth())
weekendBits = numpy.zeros(weekendEncoder.getWidth())
consumptionBits = numpy.zeros(scalarEncoder.getWidth())
# Now we call the encoders to create bit representations for each value.
timeOfDayEncoder.encodeIntoArray(dateString, timeOfDayBits)
weekendEncoder.encodeIntoArray(dateString, weekendBits)
scalarEncoder.encodeIntoArray(consumption, consumptionBits)
# Concatenate all these encodings into one large encoding for Spatial
# Pooling.
encoding = numpy.concatenate(
[timeOfDayBits, weekendBits, consumptionBits]
)
# Create an array to represent active columns, all initially zero. This
# will be populated by the compute method below. It must have the same
# dimensions as the Spatial Pooler.
activeColumns = numpy.zeros(spParams["columnCount"])
# Execute Spatial Pooling algorithm over input space.
sp.compute(encoding, True, activeColumns)
activeColumnIndices = numpy.nonzero(activeColumns)[0]
# Execute Temporal Memory algorithm over active mini-columns.
tm.compute(activeColumnIndices, learn=True)
activeCells = tm.getActiveCells()
# Get the bucket info for this input value for classification.
bucketIdx = scalarEncoder.getBucketIndices(consumption)[0]
#.........这里部分代码省略.........
示例9: DistalTimestamps1CellPerColumnDetector
# 需要导入模块: from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder [as 别名]
# 或者: from nupic.encoders.random_distributed_scalar.RandomDistributedScalarEncoder import getWidth [as 别名]
class DistalTimestamps1CellPerColumnDetector(AnomalyDetector):
"""The 'numenta' detector, with the following changes:
- Use pure Temporal Memory, not the classic TP that uses backtracking.
- Don't spatial pool the timestamp. Pass it in as distal input.
- 1 cell per column.
- Use w=41 in the scalar encoding, rather than w=21, to make up for the
lost timestamp input to the spatial pooler.
"""
def __init__(self, *args, **kwargs):
super(DistalTimestamps1CellPerColumnDetector, self).__init__(*args,
**kwargs)
self.valueEncoder = None
self.encodedValue = None
self.timestampEncoder = None
self.encodedTimestamp = None
self.activeExternalCells = []
self.prevActiveExternalCells = []
self.sp = None
self.spOutput = None
self.etm = None
self.anomalyLikelihood = None
def getAdditionalHeaders(self):
"""Returns a list of strings."""
return ["raw_score"]
def initialize(self):
rangePadding = abs(self.inputMax - self.inputMin) * 0.2
minVal = self.inputMin - rangePadding
maxVal = (self.inputMax + rangePadding
if self.inputMin != self.inputMax
else self.inputMin + 1)
numBuckets = 130.0
resolution = max(0.001, (maxVal - minVal) / numBuckets)
self.valueEncoder = RandomDistributedScalarEncoder(resolution,
w=41,
seed=42)
self.encodedValue = np.zeros(self.valueEncoder.getWidth(),
dtype=np.uint32)
self.timestampEncoder = DateEncoder(timeOfDay=(21,9.49,))
self.encodedTimestamp = np.zeros(self.timestampEncoder.getWidth(),
dtype=np.uint32)
inputWidth = self.valueEncoder.getWidth()
self.sp = SpatialPooler(**{
"globalInhibition": True,
"columnDimensions": [2048],
"inputDimensions": [inputWidth],
"potentialRadius": inputWidth,
"numActiveColumnsPerInhArea": 40,
"seed": 1956,
"potentialPct": 0.8,
"boostStrength": 0.0,
"synPermActiveInc": 0.003,
"synPermConnected": 0.2,
"synPermInactiveDec": 0.0005,
})
self.spOutput = np.zeros(2048, dtype=np.float32)
self.etm = ExtendedTemporalMemory(**{
"activationThreshold": 13,
"cellsPerColumn": 1,
"columnDimensions": (2048,),
"basalInputDimensions": (self.timestampEncoder.getWidth(),),
"initialPermanence": 0.21,
"maxSegmentsPerCell": 128,
"maxSynapsesPerSegment": 32,
"minThreshold": 10,
"maxNewSynapseCount": 20,
"permanenceDecrement": 0.1,
"permanenceIncrement": 0.1,
"seed": 1960,
"checkInputs": False,
})
learningPeriod = math.floor(self.probationaryPeriod / 2.0)
self.anomalyLikelihood = anomaly_likelihood.AnomalyLikelihood(
claLearningPeriod=learningPeriod,
estimationSamples=self.probationaryPeriod - learningPeriod,
reestimationPeriod=100
)
def handleRecord(self, inputData):
"""Returns a tuple (anomalyScore, rawScore)."""
self.valueEncoder.encodeIntoArray(inputData["value"],
self.encodedValue)
self.timestampEncoder.encodeIntoArray(inputData["timestamp"],
self.encodedTimestamp)
self.prevActiveExternalCells = self.activeExternalCells
self.activeExternalCells = self.encodedTimestamp.nonzero()[0]
#.........这里部分代码省略.........
示例10: runHotgym
# 需要导入模块: from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder [as 别名]
# 或者: from nupic.encoders.random_distributed_scalar.RandomDistributedScalarEncoder import getWidth [as 别名]
def runHotgym(numRecords):
with open(_PARAMS_PATH, "r") as f:
modelParams = yaml.safe_load(f)["modelParams"]
enParams = modelParams["sensorParams"]["encoders"]
spParams = modelParams["spParams"]
tmParams = modelParams["tmParams"]
timeOfDayEncoder = DateEncoder(
timeOfDay=enParams["timestamp_timeOfDay"]["timeOfDay"])
weekendEncoder = DateEncoder(
weekend=enParams["timestamp_weekend"]["weekend"])
scalarEncoder = RandomDistributedScalarEncoder(
enParams["consumption"]["resolution"])
encodingWidth = (timeOfDayEncoder.getWidth()
+ weekendEncoder.getWidth()
+ scalarEncoder.getWidth())
sp = SpatialPooler(
# How large the input encoding will be.
inputDimensions=(encodingWidth),
# How many mini-columns will be in the Spatial Pooler.
columnDimensions=(spParams["columnCount"]),
# What percent of the columns"s receptive field is available for potential
# synapses?
potentialPct=spParams["potentialPct"],
# This means that the input space has no topology.
globalInhibition=spParams["globalInhibition"],
localAreaDensity=spParams["localAreaDensity"],
# Roughly 2%, giving that there is only one inhibition area because we have
# turned on globalInhibition (40 / 2048 = 0.0195)
numActiveColumnsPerInhArea=spParams["numActiveColumnsPerInhArea"],
# How quickly synapses grow and degrade.
synPermInactiveDec=spParams["synPermInactiveDec"],
synPermActiveInc=spParams["synPermActiveInc"],
synPermConnected=spParams["synPermConnected"],
# boostStrength controls the strength of boosting. Boosting encourages
# efficient usage of SP columns.
boostStrength=spParams["boostStrength"],
# Random number generator seed.
seed=spParams["seed"],
# TODO: is this useful?
# Determines if inputs at the beginning and end of an input dimension should
# be considered neighbors when mapping columns to inputs.
wrapAround=False
)
tm = TemporalMemory(
# Must be the same dimensions as the SP
columnDimensions=(tmParams["columnCount"],),
# How many cells in each mini-column.
cellsPerColumn=tmParams["cellsPerColumn"],
# A segment is active if it has >= activationThreshold connected synapses
# that are active due to infActiveState
activationThreshold=tmParams["activationThreshold"],
initialPermanence=tmParams["initialPerm"],
# TODO: This comes from the SP params, is this normal
connectedPermanence=spParams["synPermConnected"],
# Minimum number of active synapses for a segment to be considered during
# search for the best-matching segments.
minThreshold=tmParams["minThreshold"],
# The max number of synapses added to a segment during learning
maxNewSynapseCount=tmParams["newSynapseCount"],
permanenceIncrement=tmParams["permanenceInc"],
permanenceDecrement=tmParams["permanenceDec"],
predictedSegmentDecrement=0.0,
maxSegmentsPerCell=tmParams["maxSegmentsPerCell"],
maxSynapsesPerSegment=tmParams["maxSynapsesPerSegment"],
seed=tmParams["seed"]
)
classifier = SDRClassifierFactory.create()
results = []
with open(_INPUT_FILE_PATH, "r") as fin:
reader = csv.reader(fin)
headers = reader.next()
reader.next()
reader.next()
for count, record in enumerate(reader):
if count >= numRecords: break
# Convert data string into Python date object.
dateString = datetime.datetime.strptime(record[0], "%m/%d/%y %H:%M")
# Convert data value string into float.
consumption = float(record[1])
# To encode, we need to provide zero-filled numpy arrays for the encoders
# to populate.
timeOfDayBits = numpy.zeros(timeOfDayEncoder.getWidth())
weekendBits = numpy.zeros(weekendEncoder.getWidth())
consumptionBits = numpy.zeros(scalarEncoder.getWidth())
# Now we call the encoders create bit representations for each value.
timeOfDayEncoder.encodeIntoArray(dateString, timeOfDayBits)
weekendEncoder.encodeIntoArray(dateString, weekendBits)
scalarEncoder.encodeIntoArray(consumption, consumptionBits)
# Concatenate all these encodings into one large encoding for Spatial
#.........这里部分代码省略.........