本文整理汇总了Python中nengo.spa.Vocabulary类的典型用法代码示例。如果您正苦于以下问题:Python Vocabulary类的具体用法?Python Vocabulary怎么用?Python Vocabulary使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Vocabulary类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_extend
def test_extend(rng):
v = Vocabulary(16, rng=rng)
v.parse('A+B')
assert v.keys == ['A', 'B']
assert not v.unitary
# Test extending the vocabulary
v.extend(['C', 'D'])
assert v.keys == ['A', 'B', 'C', 'D']
# Test extending the vocabulary with various unitary options
v.extend(['E', 'F'], unitary=['E'])
assert v.keys == ['A', 'B', 'C', 'D', 'E', 'F']
assert v.unitary == ['E']
# Check if 'E' is unitary
fft_val = np.fft.fft(v['E'].v)
fft_imag = fft_val.imag
fft_real = fft_val.real
fft_norms = np.sqrt(fft_imag ** 2 + fft_real ** 2)
assert np.allclose(fft_norms, np.ones(16))
v.extend(['G', 'H'], unitary=True)
assert v.keys == ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
assert v.unitary == ['E', 'G', 'H']
示例2: test_transform
def test_transform():
v1 = Vocabulary(32, rng=np.random.RandomState(7))
v2 = Vocabulary(64, rng=np.random.RandomState(8))
A = v1.parse('A')
B = v1.parse('B')
C = v1.parse('C')
t = v1.transform_to(v2)
assert v2.parse('A').compare(np.dot(t, A.v)) > 0.95
assert v2.parse('C+B').compare(np.dot(t, C.v + B.v)) > 0.95
t = v1.transform_to(v2, keys=['A', 'B'])
assert v2.parse('A').compare(np.dot(t, A.v)) > 0.95
assert v2.parse('B').compare(np.dot(t, C.v + B.v)) > 0.95
示例3: test_prob_cleanup
def test_prob_cleanup(rng):
v = Vocabulary(64, rng=rng)
assert 1.0 > v.prob_cleanup(0.7, 10000) > 0.9999
assert 0.9999 > v.prob_cleanup(0.6, 10000) > 0.999
assert 0.99 > v.prob_cleanup(0.5, 1000) > 0.9
v = Vocabulary(128, rng=rng)
assert 0.999 > v.prob_cleanup(0.4, 1000) > 0.997
assert 0.99 > v.prob_cleanup(0.4, 10000) > 0.97
assert 0.9 > v.prob_cleanup(0.4, 100000) > 0.8
示例4: test_am_wta
def test_am_wta(Simulator, plt, seed, rng):
"""Test the winner-take-all ability of the associative memory."""
D = 64
vocab = Vocabulary(D, rng=rng)
vocab.parse('A+B+C+D')
def input_func(t):
if t < 0.2:
return vocab.parse('A+0.8*B').v
elif t < 0.3:
return np.zeros(D)
else:
return vocab.parse('0.8*A+B').v
with nengo.Network('model', seed=seed) as m:
am = AssociativeMemory(vocab, wta_output=True)
in_node = nengo.Node(output=input_func, label='input')
nengo.Connection(in_node, am.input)
in_p = nengo.Probe(in_node)
out_p = nengo.Probe(am.output, synapse=0.03)
sim = Simulator(m)
sim.run(0.5)
t = sim.trange()
more_a = (t > 0.15) & (t < 0.2)
more_b = t > 0.45
plt.subplot(2, 1, 1)
plt.plot(t, nengo.spa.similarity(sim.data[in_p], vocab))
plt.ylabel("Input")
plt.ylim(top=1.1)
plt.legend(vocab.keys, loc='best')
plt.subplot(2, 1, 2)
plt.plot(t, nengo.spa.similarity(sim.data[out_p], vocab))
plt.plot(t[more_a], np.ones(t.shape)[more_a] * 0.8, c='g', lw=2)
plt.plot(t[more_b], np.ones(t.shape)[more_b] * 0.8, c='g', lw=2)
plt.ylabel("Output")
plt.legend(vocab.keys, loc='best')
assert similarity(sim.data[out_p][more_a], vocab.parse("A").v) > 0.8
assert similarity(sim.data[out_p][more_a], vocab.parse("B").v) < 0.2
assert similarity(sim.data[out_p][more_b], vocab.parse("B").v) > 0.8
assert similarity(sim.data[out_p][more_b], vocab.parse("A").v) < 0.2
示例5: test_am_assoc_mem_threshold
def test_am_assoc_mem_threshold(Simulator):
"""Standard associative memory (differing input and output vocabularies).
Options: threshold = 0.5, non-inhibitable, non-wta, does not output
utilities or thresholded utilities.
"""
rng = np.random.RandomState(1)
D = 64
vocab = Vocabulary(D, rng=rng)
vocab.parse('A+B+C+D')
D2 = int(D / 2)
vocab2 = Vocabulary(D2, rng=rng)
vocab2.parse('A+B+C+D')
def input_func(t):
if t < 0.5:
return vocab.parse('0.49*A').v
else:
return vocab.parse('0.79*A').v
m = nengo.Network('model', seed=123)
with m:
am = AssociativeMemory(vocab, vocab2, threshold=0.5)
in_node = nengo.Node(output=input_func, label='input')
out_node = nengo.Node(size_in=D2, label='output')
nengo.Connection(in_node, am.input)
nengo.Connection(am.output, out_node, synapse=0.03)
in_p = nengo.Probe(in_node)
out_p = nengo.Probe(out_node)
sim = Simulator(m)
sim.run(1.0)
assert np.allclose(sim.data[in_p][490:500], vocab.parse("0.49*A").v,
atol=.15, rtol=.01)
assert np.allclose(sim.data[in_p][-10:], vocab.parse("0.79*A").v,
atol=.15, rtol=.01)
assert np.allclose(sim.data[out_p][490:500], vocab2.parse("0").v,
atol=.15, rtol=.01)
assert np.allclose(sim.data[out_p][-10:], vocab2.parse("A").v,
atol=.15, rtol=.01)
示例6: test_transform
def test_transform(rng):
v1 = Vocabulary(32, rng=rng)
v2 = Vocabulary(64, rng=rng)
A = v1.parse("A")
B = v1.parse("B")
C = v1.parse("C")
t = v1.transform_to(v2)
assert v2.parse("A").compare(np.dot(t, A.v)) > 0.95
assert v2.parse("C+B").compare(np.dot(t, C.v + B.v)) > 0.9
t = v1.transform_to(v2, keys=["A", "B"])
assert v2.parse("A").compare(np.dot(t, A.v)) > 0.95
assert v2.parse("B").compare(np.dot(t, B.v)) > 0.95
示例7: test_include_pairs
def test_include_pairs():
v = Vocabulary(10)
v["A"]
v["B"]
v["C"]
assert v.key_pairs is None
v.include_pairs = True
assert v.key_pairs == ["A*B", "A*C", "B*C"]
v.include_pairs = False
assert v.key_pairs is None
v.include_pairs = True
v["D"]
assert v.key_pairs == ["A*B", "A*C", "B*C", "A*D", "B*D", "C*D"]
v = Vocabulary(12, include_pairs=True)
v["A"]
v["B"]
v["C"]
assert v.key_pairs == ["A*B", "A*C", "B*C"]
示例8: test_include_pairs
def test_include_pairs(rng):
v = Vocabulary(10, rng=rng)
v['A']
v['B']
v['C']
assert v.key_pairs is None
v.include_pairs = True
assert v.key_pairs == ['A*B', 'A*C', 'B*C']
v.include_pairs = False
assert v.key_pairs is None
v.include_pairs = True
v['D']
assert v.key_pairs == ['A*B', 'A*C', 'B*C', 'A*D', 'B*D', 'C*D']
v = Vocabulary(12, include_pairs=True)
v['A']
v['B']
v['C']
assert v.key_pairs == ['A*B', 'A*C', 'B*C']
示例9: initialize_vis_vocab
def initialize_vis_vocab(self, vis_dim, vis_sps):
if vis_sps.shape[0] != len(self.vis_sp_strs):
raise RuntimeError('Vocabulatory.initialize_vis_vocab: ' +
'Mismatch in shape of raw vision SPs and ' +
'number of vision SP labels.')
self.vis_dim = vis_dim
self.vis = Vocabulary(self.vis_dim)
for i, sp_str in enumerate(self.vis_sp_strs):
self.vis.add(sp_str, vis_sps[i, :])
示例10: test_create_pointer_warning
def test_create_pointer_warning(rng):
v = Vocabulary(2, rng=rng)
# five pointers shouldn't fit
with warns(UserWarning):
v.parse('A')
v.parse('B')
v.parse('C')
v.parse('D')
v.parse('E')
示例11: test_am_spa_interaction
def test_am_spa_interaction(Simulator, seed, rng):
"""Make sure associative memory interacts with other SPA modules."""
D = 16
vocab = Vocabulary(D, rng=rng)
vocab.parse('A+B+C+D')
D2 = int(D / 2)
vocab2 = Vocabulary(D2, rng=rng)
vocab2.parse('A+B+C+D')
def input_func(t):
return '0.49*A' if t < 0.5 else '0.79*A'
with nengo.spa.SPA(seed=seed) as m:
m.buf = nengo.spa.Buffer(D)
m.input = nengo.spa.Input(buf=input_func)
m.am = AssociativeMemory(vocab, vocab2,
input_keys=['A', 'B', 'C'],
output_keys=['B', 'C', 'D'],
default_output_key='A',
threshold=0.5,
inhibitable=True,
wta_output=True,
threshold_output=True)
cortical_actions = nengo.spa.Actions('am = buf')
m.c_act = nengo.spa.Cortical(cortical_actions)
# Check to see if model builds properly. No functionality test needed
Simulator(m)
示例12: test_subset
def test_subset(rng):
v1 = Vocabulary(32, rng=rng)
v1.parse('A+B+C+D+E+F+G')
# Test creating a vocabulary subset
v2 = v1.create_subset(['A', 'C', 'E'])
assert v2.keys == ['A', 'C', 'E']
assert v2['A'] == v1['A']
assert v2['C'] == v1['C']
assert v2['E'] == v1['E']
assert v2.parent is v1
# Test creating a subset from a subset (it should create off the parent)
v3 = v2.create_subset(['C', 'E'])
assert v3.parent is v2.parent and v2.parent is v1
v3.include_pairs = True
assert v3.key_pairs == ['C*E']
assert not v1.include_pairs
assert not v2.include_pairs
# Test transform_to between subsets (should be identity transform)
t = v1.transform_to(v2)
assert v2.parse('A').compare(np.dot(t, v1.parse('A').v)) >= 0.99999999
示例13: test_am_spa_interaction
def test_am_spa_interaction(Simulator):
"""Standard associative memory interacting with other SPA modules.
Options: threshold = 0.5, non-inhibitable, non-wta, does not output
utilities or thresholded utilities.
"""
rng = np.random.RandomState(1)
D = 16
vocab = Vocabulary(D, rng=rng)
vocab.parse('A+B+C+D')
D2 = int(D / 2)
vocab2 = Vocabulary(D2, rng=rng)
vocab2.parse('A+B+C+D')
def input_func(t):
if t < 0.5:
return '0.49*A'
else:
return '0.79*A'
m = nengo.spa.SPA('model', seed=123)
with m:
m.buf = nengo.spa.Buffer(D)
m.input = nengo.spa.Input(buf=input_func)
m.am = AssociativeMemory(vocab, vocab2, threshold=0.5)
cortical_actions = nengo.spa.Actions('am = buf')
m.c_act = nengo.spa.Cortical(cortical_actions)
# Check to see if model builds properly. No functionality test needed
Simulator(m)
示例14: test_am_defaults
def test_am_defaults(Simulator):
"""Default assoc memory.
Options: auto-associative, threshold = 0.3, non-inhibitable, non-wta,
does not output utilities or thresholded utilities.
"""
rng = np.random.RandomState(1)
D = 64
vocab = Vocabulary(D, rng=rng)
vocab.parse('A+B+C+D')
m = nengo.Network('model', seed=123)
with m:
am = AssociativeMemory(vocab)
in_node = nengo.Node(output=vocab.parse("A").v,
label='input')
out_node = nengo.Node(size_in=D, label='output')
nengo.Connection(in_node, am.input)
nengo.Connection(am.output, out_node, synapse=0.03)
in_p = nengo.Probe(in_node)
out_p = nengo.Probe(out_node)
sim = Simulator(m)
sim.run(1.0)
assert np.allclose(sim.data[in_p][-10:], vocab.parse("A").v,
atol=.1, rtol=.01)
assert np.allclose(sim.data[out_p][-10:], vocab.parse("A").v,
atol=.1, rtol=.01)
示例15: test_am_basic
def test_am_basic(Simulator, plt, seed, rng):
"""Basic associative memory test."""
D = 64
vocab = Vocabulary(D, rng=rng)
vocab.parse('A+B+C+D')
with nengo.Network('model', seed=seed) as m:
am = AssociativeMemory(vocab)
in_node = nengo.Node(output=vocab.parse("A").v, label='input')
nengo.Connection(in_node, am.input)
in_p = nengo.Probe(in_node)
out_p = nengo.Probe(am.output, synapse=0.03)
sim = Simulator(m)
sim.run(0.2)
t = sim.trange()
plt.subplot(2, 1, 1)
plt.plot(t, nengo.spa.similarity(sim.data[in_p], vocab))
plt.ylabel("Input")
plt.ylim(top=1.1)
plt.legend(vocab.keys, loc='best')
plt.subplot(2, 1, 2)
plt.plot(t, nengo.spa.similarity(sim.data[out_p], vocab))
plt.plot(t[t > 0.15], np.ones(t.shape)[t > 0.15] * 0.8, c='g', lw=2)
plt.ylabel("Output")
plt.legend(vocab.keys, loc='best')
assert similarity(sim.data[in_p][t > 0.15], vocab.parse("A").v) > 0.99
assert similarity(sim.data[out_p][t > 0.15], vocab.parse("A").v) > 0.8