本文整理汇总了Python中nengo.spa.Vocabulary.parse方法的典型用法代码示例。如果您正苦于以下问题:Python Vocabulary.parse方法的具体用法?Python Vocabulary.parse怎么用?Python Vocabulary.parse使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nengo.spa.Vocabulary
的用法示例。
在下文中一共展示了Vocabulary.parse方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_am_basic
# 需要导入模块: from nengo.spa import Vocabulary [as 别名]
# 或者: from nengo.spa.Vocabulary import parse [as 别名]
def test_am_basic(Simulator, plt, seed, rng):
"""Basic associative memory test."""
D = 64
vocab = Vocabulary(D, rng=rng)
vocab.parse('A+B+C+D')
with nengo.Network('model', seed=seed) as m:
am = AssociativeMemory(vocab)
in_node = nengo.Node(output=vocab.parse("A").v, label='input')
nengo.Connection(in_node, am.input)
in_p = nengo.Probe(in_node)
out_p = nengo.Probe(am.output, synapse=0.03)
sim = Simulator(m)
sim.run(0.2)
t = sim.trange()
plt.subplot(2, 1, 1)
plt.plot(t, nengo.spa.similarity(sim.data[in_p], vocab))
plt.ylabel("Input")
plt.ylim(top=1.1)
plt.legend(vocab.keys, loc='best')
plt.subplot(2, 1, 2)
plt.plot(t, nengo.spa.similarity(sim.data[out_p], vocab))
plt.plot(t[t > 0.15], np.ones(t.shape)[t > 0.15] * 0.8, c='g', lw=2)
plt.ylabel("Output")
plt.legend(vocab.keys, loc='best')
assert similarity(sim.data[in_p][t > 0.15], vocab.parse("A").v) > 0.99
assert similarity(sim.data[out_p][t > 0.15], vocab.parse("A").v) > 0.8
示例2: test_extend
# 需要导入模块: from nengo.spa import Vocabulary [as 别名]
# 或者: from nengo.spa.Vocabulary import parse [as 别名]
def test_extend(rng):
v = Vocabulary(16, rng=rng)
v.parse('A+B')
assert v.keys == ['A', 'B']
assert not v.unitary
# Test extending the vocabulary
v.extend(['C', 'D'])
assert v.keys == ['A', 'B', 'C', 'D']
# Test extending the vocabulary with various unitary options
v.extend(['E', 'F'], unitary=['E'])
assert v.keys == ['A', 'B', 'C', 'D', 'E', 'F']
assert v.unitary == ['E']
# Check if 'E' is unitary
fft_val = np.fft.fft(v['E'].v)
fft_imag = fft_val.imag
fft_real = fft_val.real
fft_norms = np.sqrt(fft_imag ** 2 + fft_real ** 2)
assert np.allclose(fft_norms, np.ones(16))
v.extend(['G', 'H'], unitary=True)
assert v.keys == ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
assert v.unitary == ['E', 'G', 'H']
示例3: test_am_spa_interaction
# 需要导入模块: from nengo.spa import Vocabulary [as 别名]
# 或者: from nengo.spa.Vocabulary import parse [as 别名]
def test_am_spa_interaction(Simulator):
"""Standard associative memory interacting with other SPA modules.
Options: threshold = 0.5, non-inhibitable, non-wta, does not output
utilities or thresholded utilities.
"""
rng = np.random.RandomState(1)
D = 16
vocab = Vocabulary(D, rng=rng)
vocab.parse('A+B+C+D')
D2 = int(D / 2)
vocab2 = Vocabulary(D2, rng=rng)
vocab2.parse('A+B+C+D')
def input_func(t):
if t < 0.5:
return '0.49*A'
else:
return '0.79*A'
m = nengo.spa.SPA('model', seed=123)
with m:
m.buf = nengo.spa.Buffer(D)
m.input = nengo.spa.Input(buf=input_func)
m.am = AssociativeMemory(vocab, vocab2, threshold=0.5)
cortical_actions = nengo.spa.Actions('am = buf')
m.c_act = nengo.spa.Cortical(cortical_actions)
# Check to see if model builds properly. No functionality test needed
Simulator(m)
示例4: test_am_spa_interaction
# 需要导入模块: from nengo.spa import Vocabulary [as 别名]
# 或者: from nengo.spa.Vocabulary import parse [as 别名]
def test_am_spa_interaction(Simulator, seed, rng):
"""Make sure associative memory interacts with other SPA modules."""
D = 16
vocab = Vocabulary(D, rng=rng)
vocab.parse('A+B+C+D')
D2 = int(D / 2)
vocab2 = Vocabulary(D2, rng=rng)
vocab2.parse('A+B+C+D')
def input_func(t):
return '0.49*A' if t < 0.5 else '0.79*A'
with nengo.spa.SPA(seed=seed) as m:
m.buf = nengo.spa.Buffer(D)
m.input = nengo.spa.Input(buf=input_func)
m.am = AssociativeMemory(vocab, vocab2,
input_keys=['A', 'B', 'C'],
output_keys=['B', 'C', 'D'],
default_output_key='A',
threshold=0.5,
inhibitable=True,
wta_output=True,
threshold_output=True)
cortical_actions = nengo.spa.Actions('am = buf')
m.c_act = nengo.spa.Cortical(cortical_actions)
# Check to see if model builds properly. No functionality test needed
Simulator(m)
示例5: test_subset
# 需要导入模块: from nengo.spa import Vocabulary [as 别名]
# 或者: from nengo.spa.Vocabulary import parse [as 别名]
def test_subset(rng):
v1 = Vocabulary(32, rng=rng)
v1.parse('A+B+C+D+E+F+G')
# Test creating a vocabulary subset
v2 = v1.create_subset(['A', 'C', 'E'])
assert v2.keys == ['A', 'C', 'E']
assert v2['A'] == v1['A']
assert v2['C'] == v1['C']
assert v2['E'] == v1['E']
assert v2.parent is v1
# Test creating a subset from a subset (it should create off the parent)
v3 = v2.create_subset(['C', 'E'])
assert v3.parent is v2.parent and v2.parent is v1
v3.include_pairs = True
assert v3.key_pairs == ['C*E']
assert not v1.include_pairs
assert not v2.include_pairs
# Test transform_to between subsets (should be identity transform)
t = v1.transform_to(v2)
assert v2.parse('A').compare(np.dot(t, v1.parse('A').v)) >= 0.99999999
示例6: test_am_defaults
# 需要导入模块: from nengo.spa import Vocabulary [as 别名]
# 或者: from nengo.spa.Vocabulary import parse [as 别名]
def test_am_defaults(Simulator):
"""Default assoc memory.
Options: auto-associative, threshold = 0.3, non-inhibitable, non-wta,
does not output utilities or thresholded utilities.
"""
rng = np.random.RandomState(1)
D = 64
vocab = Vocabulary(D, rng=rng)
vocab.parse('A+B+C+D')
m = nengo.Network('model', seed=123)
with m:
am = AssociativeMemory(vocab)
in_node = nengo.Node(output=vocab.parse("A").v,
label='input')
out_node = nengo.Node(size_in=D, label='output')
nengo.Connection(in_node, am.input)
nengo.Connection(am.output, out_node, synapse=0.03)
in_p = nengo.Probe(in_node)
out_p = nengo.Probe(out_node)
sim = Simulator(m)
sim.run(1.0)
assert np.allclose(sim.data[in_p][-10:], vocab.parse("A").v,
atol=.1, rtol=.01)
assert np.allclose(sim.data[out_p][-10:], vocab.parse("A").v,
atol=.1, rtol=.01)
示例7: test_readonly
# 需要导入模块: from nengo.spa import Vocabulary [as 别名]
# 或者: from nengo.spa.Vocabulary import parse [as 别名]
def test_readonly(rng):
v1 = Vocabulary(32, rng=rng)
v1.parse('A+B+C')
v1.readonly = True
with pytest.raises(ValueError):
v1.parse('D')
示例8: test_transform
# 需要导入模块: from nengo.spa import Vocabulary [as 别名]
# 或者: from nengo.spa.Vocabulary import parse [as 别名]
def test_transform(rng):
v1 = Vocabulary(32, rng=rng)
v2 = Vocabulary(64, rng=rng)
A = v1.parse('A')
B = v1.parse('B')
C = v1.parse('C')
# Test transform from v1 to v2 (full vocbulary)
# Expected: np.dot(t, A.v) ~= v2.parse('A')
# Expected: np.dot(t, B.v) ~= v2.parse('B')
# Expected: np.dot(t, C.v) ~= v2.parse('C')
t = v1.transform_to(v2)
assert v2.parse('A').compare(np.dot(t, A.v)) > 0.95
assert v2.parse('C+B').compare(np.dot(t, C.v + B.v)) > 0.9
# Test transform from v1 to v2 (only 'A' and 'B')
t = v1.transform_to(v2, keys=['A', 'B'])
assert v2.parse('A').compare(np.dot(t, A.v)) > 0.95
assert v2.parse('B').compare(np.dot(t, C.v + B.v)) > 0.95
# Test transform_to when either vocabulary is read-only
v1.parse('D')
v2.parse('E')
# When both are read-only, transform_to shouldn't add any new items to
# either and the transform should be using keys that are the intersection
# of both vocabularies
v1.readonly = True
v2.readonly = True
t = v1.transform_to(v2)
assert v1.keys == ['A', 'B', 'C', 'D']
assert v2.keys == ['A', 'B', 'C', 'E']
# When one is read-only, transform_to should add any new items to the non
# read-only vocabulary
v1.readonly = False
v2.readonly = True
t = v1.transform_to(v2)
assert v1.keys == ['A', 'B', 'C', 'D', 'E']
assert v2.keys == ['A', 'B', 'C', 'E']
# When one is read-only, transform_to should add any new items to the non
# read-only vocabulary
v1.readonly = True
v2.readonly = False
t = v1.transform_to(v2)
assert v1.keys == ['A', 'B', 'C', 'D', 'E']
assert v2.keys == ['A', 'B', 'C', 'E', 'D']
示例9: test_transform
# 需要导入模块: from nengo.spa import Vocabulary [as 别名]
# 或者: from nengo.spa.Vocabulary import parse [as 别名]
def test_transform(rng):
v1 = Vocabulary(32, rng=rng)
v2 = Vocabulary(64, rng=rng)
A = v1.parse("A")
B = v1.parse("B")
C = v1.parse("C")
t = v1.transform_to(v2)
assert v2.parse("A").compare(np.dot(t, A.v)) > 0.95
assert v2.parse("C+B").compare(np.dot(t, C.v + B.v)) > 0.9
t = v1.transform_to(v2, keys=["A", "B"])
assert v2.parse("A").compare(np.dot(t, A.v)) > 0.95
assert v2.parse("B").compare(np.dot(t, B.v)) > 0.95
示例10: test_am_wta
# 需要导入模块: from nengo.spa import Vocabulary [as 别名]
# 或者: from nengo.spa.Vocabulary import parse [as 别名]
def test_am_wta(Simulator, plt, seed, rng):
"""Test the winner-take-all ability of the associative memory."""
D = 64
vocab = Vocabulary(D, rng=rng)
vocab.parse('A+B+C+D')
def input_func(t):
if t < 0.2:
return vocab.parse('A+0.8*B').v
elif t < 0.3:
return np.zeros(D)
else:
return vocab.parse('0.8*A+B').v
with nengo.Network('model', seed=seed) as m:
am = AssociativeMemory(vocab, wta_output=True)
in_node = nengo.Node(output=input_func, label='input')
nengo.Connection(in_node, am.input)
in_p = nengo.Probe(in_node)
out_p = nengo.Probe(am.output, synapse=0.03)
sim = Simulator(m)
sim.run(0.5)
t = sim.trange()
more_a = (t > 0.15) & (t < 0.2)
more_b = t > 0.45
plt.subplot(2, 1, 1)
plt.plot(t, nengo.spa.similarity(sim.data[in_p], vocab))
plt.ylabel("Input")
plt.ylim(top=1.1)
plt.legend(vocab.keys, loc='best')
plt.subplot(2, 1, 2)
plt.plot(t, nengo.spa.similarity(sim.data[out_p], vocab))
plt.plot(t[more_a], np.ones(t.shape)[more_a] * 0.8, c='g', lw=2)
plt.plot(t[more_b], np.ones(t.shape)[more_b] * 0.8, c='g', lw=2)
plt.ylabel("Output")
plt.legend(vocab.keys, loc='best')
assert similarity(sim.data[out_p][more_a], vocab.parse("A").v) > 0.8
assert similarity(sim.data[out_p][more_a], vocab.parse("B").v) < 0.2
assert similarity(sim.data[out_p][more_b], vocab.parse("B").v) > 0.8
assert similarity(sim.data[out_p][more_b], vocab.parse("A").v) < 0.2
示例11: test_am_spa_keys_as_expressions
# 需要导入模块: from nengo.spa import Vocabulary [as 别名]
# 或者: from nengo.spa.Vocabulary import parse [as 别名]
def test_am_spa_keys_as_expressions(Simulator, plt, seed, rng):
"""Provide semantic pointer expressions as input and output keys."""
D = 64
vocab_in = Vocabulary(D, rng=rng)
vocab_out = Vocabulary(D, rng=rng)
vocab_in.parse('A+B')
vocab_out.parse('C+D')
in_keys = ['A', 'A*B']
out_keys = ['C*D', 'C+D']
with nengo.spa.SPA(seed=seed) as model:
model.am = AssociativeMemory(input_vocab=vocab_in,
output_vocab=vocab_out,
input_keys=in_keys,
output_keys=out_keys)
model.inp = Input(am=lambda t: 'A' if t < 0.1 else 'A*B')
in_p = nengo.Probe(model.am.input)
out_p = nengo.Probe(model.am.output, synapse=0.03)
with Simulator(model) as sim:
sim.run(0.2)
# Specify t ranges
t = sim.trange()
t_item1 = (t > 0.075) & (t < 0.1)
t_item2 = (t > 0.175) & (t < 0.2)
# Modify vocabularies (for plotting purposes)
vocab_in.add(in_keys[1], vocab_in.parse(in_keys[1]).v)
vocab_out.add(out_keys[0], vocab_out.parse(out_keys[0]).v)
plt.subplot(2, 1, 1)
plt.plot(t, similarity(sim.data[in_p], vocab_in))
plt.ylabel("Input: " + ', '.join(in_keys))
plt.legend(vocab_in.keys, loc='best')
plt.ylim(top=1.1)
plt.subplot(2, 1, 2)
plt.plot(t, similarity(sim.data[out_p], vocab_out))
plt.plot(t[t_item1], np.ones(t.shape)[t_item1] * 0.9, c='r', lw=2)
plt.plot(t[t_item2], np.ones(t.shape)[t_item2] * 0.91, c='g', lw=2)
plt.plot(t[t_item2], np.ones(t.shape)[t_item2] * 0.89, c='b', lw=2)
plt.ylabel("Output: " + ', '.join(out_keys))
plt.legend(vocab_out.keys, loc='best')
assert np.mean(similarity(sim.data[out_p][t_item1],
vocab_out.parse(out_keys[0]).v,
normalize=True)) > 0.9
assert np.mean(similarity(sim.data[out_p][t_item2],
vocab_out.parse(out_keys[1]).v,
normalize=True)) > 0.9
示例12: test_text
# 需要导入模块: from nengo.spa import Vocabulary [as 别名]
# 或者: from nengo.spa.Vocabulary import parse [as 别名]
def test_text(rng):
v = Vocabulary(64, rng=rng)
x = v.parse("A+B+C")
y = v.parse("-D-E-F")
ptr = r"-?[01]\.[0-9]{2}[A-F]"
assert re.match(";".join([ptr] * 3), v.text(x))
assert re.match(";".join([ptr] * 2), v.text(x, maximum_count=2))
assert re.match(ptr, v.text(x, maximum_count=1))
assert len(v.text(x, maximum_count=10).split(";")) <= 10
assert re.match(";".join([ptr] * 4), v.text(x, minimum_count=4))
assert re.match(";".join([ptr.replace("F", "C")] * 3), v.text(x, minimum_count=4, terms=["A", "B", "C"]))
assert re.match(ptr, v.text(y, threshold=0.6))
assert v.text(y, minimum_count=None, threshold=0.6) == ""
assert v.text(x, join=",") == v.text(x).replace(";", ",")
assert re.match(";".join([ptr] * 2), v.text(x, normalize=True))
assert v.text([0] * 64) == "0.00F"
assert v.text(v["D"].v) == "1.00D"
示例13: test_text
# 需要导入模块: from nengo.spa import Vocabulary [as 别名]
# 或者: from nengo.spa.Vocabulary import parse [as 别名]
def test_text(rng):
v = Vocabulary(64, rng=rng)
x = v.parse('A+B+C')
y = v.parse('-D-E-F')
ptr = r'-?[01]\.[0-9]{2}[A-F]'
assert re.match(';'.join([ptr] * 3), v.text(x))
assert re.match(';'.join([ptr] * 2), v.text(x, maximum_count=2))
assert re.match(ptr, v.text(x, maximum_count=1))
assert len(v.text(x, maximum_count=10).split(';')) <= 10
assert re.match(';'.join([ptr] * 4), v.text(x, minimum_count=4))
assert re.match(';'.join([ptr.replace('F', 'C')] * 3),
v.text(x, minimum_count=4, terms=['A', 'B', 'C']))
assert re.match(ptr, v.text(y, threshold=0.6))
assert v.text(y, minimum_count=None, threshold=0.6) == ''
assert v.text(x, join=',') == v.text(x).replace(';', ',')
assert re.match(';'.join([ptr] * 2), v.text(x, normalize=True))
assert v.text([0]*64) == '0.00F'
assert v.text(v['D'].v) == '1.00D'
示例14: test_text
# 需要导入模块: from nengo.spa import Vocabulary [as 别名]
# 或者: from nengo.spa.Vocabulary import parse [as 别名]
def test_text():
rng = np.random.RandomState(1)
v = Vocabulary(64, rng=rng)
x = v.parse('A+B+C')
y = v.parse('-D-E-F')
assert v.text(x) == '0.99A;0.96C;0.90B'
assert v.text(x, maximum_count=2) == '0.99A;0.96C'
assert v.text(x, maximum_count=1) == '0.99A'
assert v.text(x, maximum_count=10) == '0.99A;0.96C;0.90B'
assert v.text(x, minimum_count=4) == '0.99A;0.96C;0.90B;-0.02D'
assert v.text(y) == '0.50C;0.15B'
assert v.text(y, threshold=0.6) == '0.50C'
assert v.text(y, minimum_count=None, threshold=0.6) == ''
assert (v.text(x, minimum_count=4, terms=['A', 'B', 'C']) ==
'0.99A;0.96C;0.90B')
assert v.text(x, join=',') == '0.99A,0.96C,0.90B'
assert v.text(x, normalize=True) == '0.59A;0.57C;0.53B'
assert v.text([0]*64) == '0.00F'
assert v.text(v['D'].v) == '1.00D'
示例15: test_create_pointer_warning
# 需要导入模块: from nengo.spa import Vocabulary [as 别名]
# 或者: from nengo.spa.Vocabulary import parse [as 别名]
def test_create_pointer_warning(rng):
v = Vocabulary(2, rng=rng)
# five pointers shouldn't fit
with warns(UserWarning):
v.parse('A')
v.parse('B')
v.parse('C')
v.parse('D')
v.parse('E')