本文整理汇总了Python中network.Network.connectLayers方法的典型用法代码示例。如果您正苦于以下问题:Python Network.connectLayers方法的具体用法?Python Network.connectLayers怎么用?Python Network.connectLayers使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类network.Network
的用法示例。
在下文中一共展示了Network.connectLayers方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: greaterThanTest
# 需要导入模块: from network import Network [as 别名]
# 或者: from network.Network import connectLayers [as 别名]
def greaterThanTest(debug=False):
"""
Tests to see if a network can determine if x is greater than y.
(output)
|
(hidden)
| \
(in1) (in2)
"""
network = Network(2,1)
hiddenLayer = network.createLayer(1, name="hidden", learningRate=0.1)
network.connectLayers(network.inputLayer, hiddenLayer, interconnected=True)
network.connectLayers(hiddenLayer, network.outputLayer, interconnected=True)
printLayer(network.inputLayer)
printLayer(hiddenLayer)
printLayer(network.outputLayer)
def genData():
return [[random.random(), random.random()]]
def genExpected(x):
return [[1.0] if y[0] > y[1] else [0.0] for y in x]
def calcDiff(x,y):
return sum([abs(a[0]-b[0]) for a,b in zip(x,y)])
runTest(network, genData, genExpected, calcDiff, 100, 20, 100, debug=debug)
printLayer(network.inputLayer)
printLayer(hiddenLayer)
printLayer(network.outputLayer)
print("=" * 80)
示例2: filterTest
# 需要导入模块: from network import Network [as 别名]
# 或者: from network.Network import connectLayers [as 别名]
def filterTest(debug=False):
"""
Tests to see if a network can filter negative numbers using a gating mechanism.
(output)
|
x - (gate)
| /
(in)
"""
network = Network(1,1)
gateLayer = network.createLayer(1, name="gate")
network.connectLayers(network.inputLayer, network.outputLayer, interconnected=True)
network.connectLayers(network.inputLayer, gateLayer, interconnected=True)
network.gateIncomingConnections(network.outputLayer, gateLayer)
printLayer(network.inputLayer)
printLayer(gateLayer)
printLayer(network.outputLayer)
def genData():
return [[2 * (random.random() - 0.5)] for _ in xrange(20)]
def genExpected(x):
return [val if val[0] > 0 else [0.0] for val in x]
def calcDiff(x,y):
return sum([abs(a[0]-b[0]) for a,b in zip(x,y)])
runTest(network, genData, genExpected, calcDiff, 100, 10, 200, debug=debug)
printLayer(network.inputLayer)
printLayer(gateLayer)
printLayer(network.outputLayer)
print("=" * 80)
示例3: simpleRecall
# 需要导入模块: from network import Network [as 别名]
# 或者: from network.Network import connectLayers [as 别名]
def simpleRecall():
network = Network(5, 4)
memoryLayer = network.createLayer(4, name="memory")
inputGateLayer = network.createLayer(4, name="inputGate", bias=True)
forgetGateLayer = network.createLayer(4, name="forgetGate", bias=True)
outputGateLayer = network.createLayer(4, name="outputGate", bias=True)
network.connectLayers(network.inputLayer, memoryLayer, interconnected=True)
network.connectLayers(network.inputLayer, inputGateLayer, interconnected=True)
network.connectLayers(network.inputLayer, forgetGateLayer, interconnected=True)
network.connectLayers(network.inputLayer, outputGateLayer, interconnected=True)
network.connectLayers(memoryLayer, inputGateLayer, recurrent=True, interconnected=True)
network.connectLayers(memoryLayer, forgetGateLayer, recurrent=True, interconnected=True)
network.connectLayers(memoryLayer, outputGateLayer, recurrent=True, interconnected=True)
network.connectLayers(memoryLayer, network.outputLayer, interconnected=False)
network.addSelfRecurrence(memoryLayer, forgetGateLayer)
network.gateIncomingConnections(network.outputLayer, outputGateLayer)
network.gateIncomingConnections(memoryLayer, inputGateLayer)
printLayer(network.inputLayer)
printLayer(inputGateLayer)
printLayer(memoryLayer)
printLayer(outputGateLayer)
printLayer(network.outputLayer)
inputSet = []
# Create set of input sequences.
for _ in xrange(200):
length = 15
# Fill 10 blank vectors.
inputSequence = [[0.0 for _ in xrange(5)] for __ in xrange(length)]
# Add one relevant vector.
relevant = [0.0, 0.0, 0.0, 0.0, 0.0]
relevant[random.randint(0, 3)] = 1.0
inputSequence.append(relevant)
random.shuffle(inputSequence)
# Add prompt
inputSequence.append([0.0, 0.0, 0.0, 0.0, 1.0])
# Create output sequence.
outputSequence = [[0.0] * 4] * (length + 1)
#outputSequence = [None] * 11
expected = [0.0, 0.0, 0.0, 0.0]
expected[relevant.index(1.0)] = 1.0
outputSequence.append(expected)
inputSet.append((inputSequence, outputSequence))
testSet = inputSet[:20]
inputSet = inputSet[20:]
def calcDiff(x,y):
diff = 0.0
zipped = zip(x,y)
for actual,expected in zipped[:-1]:
for a in actual:
if a > 0.5: diff += a
actual,expected = zipped[-1]
for a,e in zip(actual, expected):
if e == 1.0 and a < 0.5:
#print(a, " != 1")
diff += 1.0 - a
elif e == 0.0 and a > 0.5:
#print(a, " != 0")
diff += a
return diff
beforeDiff = calculateDiff(network, testSet, calcDiff, printValues=False)
print("Before diff: %f" % beforeDiff)
#for _ in xrange(100):
iterations = 0
while True:
print("Iteration %d" % iterations)
iterations += 1
for inputSequence, outputSequence in inputSet:
#print(".")
output = network.learn(inputSequence, outputSequence)
diff = calculateDiff(network, testSet, calcDiff, printValues=False)
print(diff)
if diff < 1: break
afterDiff = calculateDiff(network, testSet, calcDiff, printValues=False)
print("Before diff: %f" % beforeDiff)
print("After diff: %f" % afterDiff)
示例4: memoryTest
# 需要导入模块: from network import Network [as 别名]
# 或者: from network.Network import connectLayers [as 别名]
def memoryTest(debug=False):
"""
Tests to see if a network can remember a value.
Input sequences contain one value and one signals.
The signal indicates that the value should be let into the
memory cell and remembered.
(output)
|
(memory) )
|
x ---- (gate)
| |
(input) (input2)
"""
network = Network(2,1)
gateLayer = network.createLayer(1, name="gate", activationFunction=activation.TanH)
memoryLayer = network.createLayer(1, name="memory", activationFunction=activation.Identity)
network.connectLayers(network.inputLayer[:1], memoryLayer)
network.connectLayers(network.inputLayer[1:2], gateLayer)
network.connectLayers(memoryLayer, network.outputLayer)
network.gateIncomingConnections(memoryLayer, gateLayer)
network.addSelfRecurrence(memoryLayer)
printLayer(network.inputLayer)
printLayer(memoryLayer)
printLayer(gateLayer)
printLayer(network.outputLayer)
def genData():
sequence = [[random.random() - 0.5, 0.0] for _ in xrange(20)]
if random.random() < 0.8:
sequence.append([random.random() - 0.5, 1.0])
else:
sequence.append([random.random() - 0.5, 0.0])
random.shuffle(sequence)
return sequence
def genExpected(sequence):
val = None
index = None
for i, s in enumerate(sequence):
if s[1] == 1.0:
val = s[0]
index = i
expected = [[0.0] for _ in xrange(len(sequence))]
if val is not None:
for i in xrange(len(sequence) - index):
expected[i + index] = [val]
return expected
def calcDiff(x,y):
return sum([abs(a[0]-b[0]) if b is not None else 0.0 for a,b in zip(x,y)])
runTest(network, genData, genExpected, calcDiff, 100, 10, 100, debug=debug)
printLayer(network.inputLayer)
printLayer(memoryLayer)
printLayer(gateLayer)
printLayer(network.outputLayer)
示例5: forgetTest
# 需要导入模块: from network import Network [as 别名]
# 或者: from network.Network import connectLayers [as 别名]
def forgetTest(debug=False):
"""
Tests to see if a network can remember a value and then forget it.
Input sequences contain one value and two signals.
The first signal indicates that the value should be let into the
memory cell and remembered.
The second signal indicates that the previously remembered value
should be forgotten.
(output)
|
(memory) ) -- (forget)
| |
x ---- (gate) |
| | |
(input) (input2) (input3)
"""
network = Network(3,1)
gateLayer = network.createLayer(1, name="gate", activationFunction=activation.TanH)
forgetLayer = network.createLayer(1, name="forget", bias=True)
memoryLayer = network.createLayer(1, name="memory", activationFunction=activation.Identity)
network.connectLayers(network.inputLayer[:1], memoryLayer)
network.connectLayers(network.inputLayer[1:2], gateLayer)
network.connectLayers(network.inputLayer[2:3], forgetLayer)
network.connectLayers(memoryLayer, network.outputLayer)
network.gateIncomingConnections(memoryLayer, gateLayer)
network.addSelfRecurrence(memoryLayer, forgetLayer)
printLayer(network.inputLayer)
printLayer(memoryLayer)
printLayer(gateLayer)
printLayer(forgetLayer)
printLayer(network.outputLayer)
def genData():
length = 50
start = random.randint(0, length-2)
end = 0
while end <= start: end = random.randint(1, length-1)
sequence = [[random.random() - 0.5, 1.0 if _ == start else 0.0, 1.0 if _ == end else 0.0] for _ in xrange(length)]
return sequence
def genExpected(sequence):
expected = []
toRemember = 0.0
for val,remember,forget in sequence:
if remember == 1.0: toRemember = val
if forget == 1.0: toRemember = 0.0
expected.append([toRemember])
return expected
def calcDiff(x,y):
return sum([abs(a[0]-b[0]) for a,b in zip(x,y)])
runTest(network, genData, genExpected, calcDiff, 100, 10, 100, debug=debug)
printLayer(network.inputLayer)
printLayer(memoryLayer)
printLayer(gateLayer)
printLayer(forgetLayer)
printLayer(network.outputLayer)
示例6: shortTermTest
# 需要导入模块: from network import Network [as 别名]
# 或者: from network.Network import connectLayers [as 别名]
def shortTermTest():
"""
Tests forward time recurrent connections.
This network will project the input value forward in time.
The output neuron is responsible for comparing the current value to the
previous value.
(output)
|
---->(squash) --->
| | |
(input)------
"""
network = Network(1, 1)
squashLayer = network.createLayer(1, "squash", activationFunction=activation.Sigmoid)
network.connectLayers(network.inputLayer, squashLayer)
network.connectLayers(network.inputLayer, squashLayer, recurrent=True)
network.connectLayers(squashLayer, network.outputLayer)
printLayer(network.inputLayer)
printLayer(squashLayer)
printLayer(network.outputLayer)
training = []
test = []
def genData():
sequence = []
for _ in xrange(20):
sequence.append([random.random()])
return sequence
def genExpected(sequence):
expected = []
previous = 0.0
for x in sequence:
if x[0] > previous:
expected.append([1.0])
else:
expected.append([0.0])
previous = x[0]
return expected
training,test = createSequences(genData, genExpected, 1000, 1000)
trainNetwork(network, training, 100)
passed = 0
totalDiff = 0.0
for input,expected in test:
output = network.runSequence(input)
diff = 0.0
for actual,exp in zip(output,expected):
for a,e in zip(actual,exp):
if e == 1.0 and a < 0.5:
diff += 1.0 - a
elif e == 0.0 and a > 0.5:
diff += a
totalDiff += diff
if diff == 0.0: passed += 1
else:
'''
print(input)
print(output)
print(expected)
print(diff)
print("")
'''
print("Passed %d / %d" % (passed, len(test)))
print("Diff: %f" % totalDiff)
printLayer(network.inputLayer)
printLayer(squashLayer)
printLayer(network.outputLayer)
示例7: distractedRecall
# 需要导入模块: from network import Network [as 别名]
# 或者: from network.Network import connectLayers [as 别名]
def distractedRecall():
network = Network(10, 4)
memoryLayer = network.createLayer(8, name="memory")
inputGateLayer = network.createLayer(8, name="inputGate", bias=True)
forgetGateLayer = network.createLayer(8, name="forgetGate", bias=True)
outputGateLayer = network.createLayer(8, name="outputGate", bias=True)
network.connectLayers(network.inputLayer, memoryLayer, interconnected=True)
network.connectLayers(network.inputLayer, inputGateLayer, interconnected=True)
network.connectLayers(network.inputLayer, forgetGateLayer, interconnected=True)
network.connectLayers(network.inputLayer, outputGateLayer, interconnected=True)
network.connectLayers(memoryLayer, inputGateLayer, recurrent=True, interconnected=True)
network.connectLayers(memoryLayer, forgetGateLayer, recurrent=True, interconnected=True)
network.connectLayers(memoryLayer, outputGateLayer, recurrent=True, interconnected=True)
network.connectLayers(memoryLayer, network.outputLayer, interconnected=True)
network.addSelfRecurrence(memoryLayer, forgetGateLayer)
network.gateOutgoingConnections(memoryLayer, outputGateLayer)
network.gateIncomingConnections(memoryLayer, inputGateLayer)
inputSet = []
# Create set of input sequences.
for _ in xrange(100):
length = 10
# Create input sequence.
inputSequence = []
# Fill random distractors.
for _ in xrange(length):
inputVector = [0.0 for _ in xrange(10)]
inputVector[random.randint(4,7)] = 1
inputSequence.append(inputVector)
# Select two random vectors and inject random target symbols.
firstIndex = random.randint(0,length-1)
secondIndex = firstIndex
while secondIndex == firstIndex: secondIndex = random.randint(0,length-1)
if firstIndex > secondIndex:
temp = firstIndex
firstIndex = secondIndex
secondIndex = temp
firstTarget = random.randint(0,3)
secondTarget = random.randint(0,3)
#print(firstIndex, secondIndex)
#print(firstTarget, secondTarget)
inputSequence[firstIndex] = [1.0 if x == firstTarget else 0.0 for x in xrange(10)]
inputSequence[secondIndex] = [1.0 if x == secondTarget else 0.0 for x in xrange(10)]
# Add prompts
inputSequence.append([1.0 if x == 8 else 0.0 for x in xrange(10)])
inputSequence.append([1.0 if x == 9 else 0.0 for x in xrange(10)])
outputSequence = [[0] * 4] * length
#outputSequence = [None] * length
outputSequence.append([1 if x == firstTarget else 0 for x in xrange(4)])
outputSequence.append([1 if x == secondTarget else 0 for x in xrange(4)])
#for line in inputSequence: print(line)
#for line in outputSequence: print(line)
#sys.exit()
inputSet.append((inputSequence, outputSequence))
testSet = inputSet[:10]
inputSet = inputSet[10:]
def calcDiff(x,y):
diff = 0.0
zipped = zip(x,y)
for actual,expected in zipped[:-1]:
for a in actual:
if a > 0.5: diff += a
actual,expected = zipped[-1]
for a,e in zip(actual, expected):
if e == 1.0 and a < 0.5:
print(a, " != 1")
diff += 1.0 - a
elif e == 0 and a > 0.5:
print(a, " != 0")
diff += a
return diff
beforeDiff = calculateDiff(network, testSet, calcDiff, printValues=False)
print("Before diff: %f" % beforeDiff)
#for _ in xrange(10):
iterations = 0
while True:
print("Iteration %d" % iterations)
iterations += 1
for inputSequence, outputSequence in inputSet:
#print(".")
output = network.learn(inputSequence, outputSequence)
#.........这里部分代码省略.........