当前位置: 首页>>代码示例>>Python>>正文


Python Vocabulary.create_subset方法代码示例

本文整理汇总了Python中nengo.spa.Vocabulary.create_subset方法的典型用法代码示例。如果您正苦于以下问题:Python Vocabulary.create_subset方法的具体用法?Python Vocabulary.create_subset怎么用?Python Vocabulary.create_subset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在nengo.spa.Vocabulary的用法示例。


在下文中一共展示了Vocabulary.create_subset方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_subset

# 需要导入模块: from nengo.spa import Vocabulary [as 别名]
# 或者: from nengo.spa.Vocabulary import create_subset [as 别名]
def test_subset(rng):
    v1 = Vocabulary(32, rng=rng)
    v1.parse('A+B+C+D+E+F+G')

    # Test creating a vocabulary subset
    v2 = v1.create_subset(['A', 'C', 'E'])
    assert v2.keys == ['A', 'C', 'E']
    assert v2['A'] == v1['A']
    assert v2['C'] == v1['C']
    assert v2['E'] == v1['E']
    assert v2.parent is v1

    # Test creating a subset from a subset (it should create off the parent)
    v3 = v2.create_subset(['C', 'E'])
    assert v3.parent is v2.parent and v2.parent is v1

    v3.include_pairs = True
    assert v3.key_pairs == ['C*E']
    assert not v1.include_pairs
    assert not v2.include_pairs

    # Test transform_to between subsets (should be identity transform)
    t = v1.transform_to(v2)

    assert v2.parse('A').compare(np.dot(t, v1.parse('A').v)) >= 0.99999999
开发者ID:4n6strider,项目名称:nengo,代码行数:27,代码来源:test_vocabulary.py

示例2: make_mtr_sp

# 需要导入模块: from nengo.spa import Vocabulary [as 别名]
# 或者: from nengo.spa.Vocabulary import create_subset [as 别名]
def make_mtr_sp(path_x, path_y):
    path_x = convert_func_2_diff_func(path_x)
    path_y = convert_func_2_diff_func(path_y)
    return np.concatenate((path_x, path_y))

mtr_vocab = Vocabulary(cfg.mtr_dim, rng=cfg.rng)
for i, sp_str in enumerate(num_sp_strs):
    mtr_sp_vec = make_mtr_sp(mtr_canon_paths_x[i, :], mtr_canon_paths_y[i, :])
    mtr_vocab.add(sp_str, mtr_sp_vec)

mtr_unk_vocab = Vocabulary(cfg.mtr_dim, rng=cfg.rng)
mtr_unk_vocab.add(mtr_sp_strs[0], make_mtr_sp(mtr_canon_paths_x[-1, :],
                                              mtr_canon_paths_y[-1, :]))

mtr_disp_vocab = mtr_vocab.create_subset(num_sp_strs)
mtr_disp_vocab.readonly = False  # Disable read-only flag for display vocab
mtr_disp_vocab.add(mtr_sp_strs[0], mtr_unk_vocab[mtr_sp_strs[0]].v)

mtr_sp_scale_factor = float(mtr_canon_paths['size_scaling_factor'])

# ##################### Sub-vocabulary definitions ############################
vis_vocab = vocab.create_subset(vis_sp_strs)
vis_vocab_nums_inds = range(len(num_sp_strs))
vis_vocab_syms_inds = range(len(num_sp_strs), len(vis_sp_strs))

pos_vocab = vocab.create_subset(pos_sp_strs)

item_vocab = vocab.create_subset(num_sp_strs)

ps_task_vocab = vocab.create_subset(ps_task_sp_strs)
开发者ID:Stanford-BIS,项目名称:spaun2.0,代码行数:32,代码来源:vocabs.py

示例3: test_am_complex

# 需要导入模块: from nengo.spa import Vocabulary [as 别名]
# 或者: from nengo.spa.Vocabulary import create_subset [as 别名]
def test_am_complex(Simulator, plt, seed, rng):
    """Complex auto-associative memory test.

    Has a default output vector, outputs utilities, and becomes inhibited.
    """
    D = 64
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D+E+F')

    vocab2 = vocab.create_subset(["A", "B", "C", "D"])

    def input_func(t):
        if t < 0.25:
            return vocab.parse('A+0.8*B').v
        elif t < 0.5:
            return vocab.parse('0.8*A+B').v
        else:
            return vocab.parse('E').v

    def inhib_func(t):
        return int(t > 0.75)

    with nengo.Network('model', seed=seed) as m:
        am = AssociativeMemory(vocab2,
                               default_output_vector=vocab.parse("F").v,
                               inhibitable=True,
                               output_utilities=True,
                               output_thresholded_utilities=True)
        in_node = nengo.Node(output=input_func, label='input')
        inhib_node = nengo.Node(output=inhib_func, label='inhib')
        nengo.Connection(in_node, am.input)
        nengo.Connection(inhib_node, am.inhibit)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(am.output, synapse=0.03)
        utils_p = nengo.Probe(am.utilities, synapse=0.05)
        utils_th_p = nengo.Probe(am.thresholded_utilities, synapse=0.05)

    sim = Simulator(m)
    sim.run(1.0)
    t = sim.trange()
    # Input: A+0.8B
    more_a = (t >= 0.2) & (t < 0.25)
    # Input: 0.8B+A
    more_b = (t >= 0.45) & (t < 0.5)
    # Input: E (but E isn't in the memory vocabulary, so should output F)
    all_e = (t >= 0.7) & (t < 0.75)
    # Input: E (but inhibited, so should output nothing)
    inhib = (t >= 0.95)

    def plot(i, y, ylabel):
        plt.subplot(4, 1, i)
        plt.plot(t, y)
        plt.axvline(0.25, c='k')
        plt.axvline(0.5, c='k')
        plt.axvline(0.75, c='k')
        plt.ylabel(ylabel)
        plt.legend(vocab.keys[:y.shape[1]], loc='best', fontsize='xx-small')
    plot(1, nengo.spa.similarity(sim.data[in_p], vocab), "Input")
    plot(2, sim.data[utils_p], "Utilities")
    plot(3, sim.data[utils_th_p], "Thresholded utilities")
    plot(4, nengo.spa.similarity(sim.data[out_p], vocab), "Output")

    assert all(np.mean(sim.data[utils_p][more_a], axis=0)[:2] > [0.8, 0.5])
    assert all(np.mean(sim.data[utils_p][more_a], axis=0)[2:] < [0.01, 0.01])
    assert all(np.mean(sim.data[utils_p][more_b], axis=0)[:2] > [0.5, 0.8])
    assert all(np.mean(sim.data[utils_p][more_b], axis=0)[2:] < [0.01, 0.01])
    assert similarity(sim.data[utils_p][all_e], np.ones((1, 4))) < 0.05
    assert similarity(sim.data[utils_p][inhib], np.ones((1, 4))) < 0.05
    assert all(np.mean(sim.data[utils_th_p][more_a], axis=0)[:2] > [0.8, 0.8])
    assert all(
        np.mean(sim.data[utils_th_p][more_a], axis=0)[2:] < [0.01, 0.01])
    assert all(np.mean(sim.data[utils_th_p][more_b], axis=0)[:2] > [0.8, 0.8])
    assert all(
        np.mean(sim.data[utils_th_p][more_b], axis=0)[2:] < [0.01, 0.01])
    assert similarity(sim.data[utils_th_p][all_e], np.ones((1, 4))) < 0.05
    assert similarity(sim.data[utils_th_p][inhib], np.ones((1, 4))) < 0.05
    assert similarity(sim.data[out_p][more_a], vocab.parse("A").v) > 0.8
    assert similarity(sim.data[out_p][more_a], vocab.parse("B").v) > 0.8
    assert similarity(sim.data[out_p][more_b], vocab.parse("A").v) > 0.8
    assert similarity(sim.data[out_p][more_b], vocab.parse("B").v) > 0.8
    assert similarity(sim.data[out_p][all_e], vocab.parse("F").v) > 0.8
    assert similarity(sim.data[out_p][inhib], np.ones((1, D))) < 0.05
开发者ID:LittileBee,项目名称:nengo,代码行数:85,代码来源:test_assoc_mem.py

示例4: SpaunVocabulary

# 需要导入模块: from nengo.spa import Vocabulary [as 别名]
# 或者: from nengo.spa.Vocabulary import create_subset [as 别名]

#.........这里部分代码省略.........
            self.main.add(self.pos_sp_strs[i + 1], pos_sp)

        self.inc_sp = inc_sp

        # --- Add production system sp's ---
        self.main.parse('+'.join(self.ps_task_sp_strs))
        self.main.parse('+'.join(self.ps_state_sp_strs))
        self.main.parse('+'.join(self.ps_dec_sp_strs))
        if len(self.ps_action_sp_strs) > 0:
            self.main.parse('+'.join(self.ps_action_sp_strs))
        self.main.parse('+'.join(self.misc_ps_sp_strs))

        # --- Add instruction processing system sp's ---
        self.main.parse('+'.join(self.instr_tag_strs))

        # ################### Visual Vocabulary definitions ###################
        self.vis_sp_strs = list(stim_SP_labels)

        # Visual sp str vocab check
        if (not all(x in self.vis_sp_strs for x in self.num_sp_strs)):
            raise RuntimeError("Vocabulator - Stimulus vocabulary does not " +
                               "contain necessary Spaun NUM semantic pointer" +
                               " definitions.")
        if (not all(x in self.vis_sp_strs for x in self.misc_vis_sp_strs)):
            raise RuntimeError("Vocabulator - Stimulus vocabulary does not " +
                               "contain necessary Spaun MISC semantic " +
                               "pointer definitions.")
        if (not all(x in self.vis_sp_strs for x in self.ps_task_vis_sp_strs)):
            raise RuntimeError("Vocabulator - Stimulus vocabulary does not " +
                               "contain necessary Spaun PS semantic " +
                               "pointer definitions.")

        # ################# Sub-vocabulary definitions ########################
        self.vis_main = self.main.create_subset(self.vis_sp_strs)

        self.pos = self.main.create_subset(self.pos_sp_strs)

        self.item = self.main.create_subset(self.num_sp_strs)
        self.item_1_index = self.main.create_subset(self.num_sp_strs[1:])

        self.ps_task = self.main.create_subset(self.ps_task_sp_strs)
        self.ps_state = self.main.create_subset(self.ps_state_sp_strs)
        self.ps_dec = self.main.create_subset(self.ps_dec_sp_strs)
        self.ps_cmp = self.main.create_subset(self.misc_ps_sp_strs)
        self.ps_action = self.main.create_subset(self.ps_action_sp_strs)
        self.ps_action_learn = \
            self.main.create_subset(self.ps_action_learn_sp_strs)

        self.reward = self.main.create_subset(self.reward_sp_strs)

        self.instr = self.main.create_subset(self.instr_tag_strs)

        # ############ Enumerated vocabulary definitions ######################
        # --- Enumerated vocabulary, enumerates all possible combinations of
        #     position and item vectors (for debug purposes)
        self.enum = Vocabulary(self.sp_dim, rng=rng)
        for pos in self.pos_sp_strs:
            for num in self.num_sp_strs:
                sp_str = '%s*%s' % (pos, num)
                self.enum.add(sp_str, self.main.parse(sp_str))

        self.pos1 = Vocabulary(self.sp_dim, rng=rng)
        for num in self.num_sp_strs:
            sp_str = '%s*%s' % (self.pos_sp_strs[0], num)
            self.pos1.add(sp_str, self.main.parse(sp_str))
开发者ID:xchoo,项目名称:spaun2.0,代码行数:69,代码来源:vocabulator.py

示例5: test_am_default_output_inhibit_utilities

# 需要导入模块: from nengo.spa import Vocabulary [as 别名]
# 或者: from nengo.spa.Vocabulary import create_subset [as 别名]
def test_am_default_output_inhibit_utilities(Simulator):
    """Auto-associative memory (non-wta) complex test.

    Options: defaults to predefined vector if no match is found,
    threshold = 0.3, inhibitable, non-wta, outputs utilities and thresholded
    utilities.
    """
    rng = np.random.RandomState(1)

    D = 64
    vocab = Vocabulary(D, rng=rng)
    vocab.parse('A+B+C+D+E+F')

    vocab2 = vocab.create_subset(["A", "B", "C", "D"])

    def input_func(t):
        if t < 0.25:
            return vocab.parse('A+0.8*B').v
        elif t < 0.5:
            return vocab.parse('0.8*A+B').v
        else:
            return vocab.parse('E').v

    def inhib_func(t):
        return int(t > 0.75)

    m = nengo.Network('model', seed=123)
    with m:
        am = AssociativeMemory(vocab2,
                               default_output_vector=vocab.parse("F").v,
                               inhibitable=True, output_utilities=True,
                               output_thresholded_utilities=True)
        in_node = nengo.Node(output=input_func, label='input')
        inhib_node = nengo.Node(output=inhib_func, label='inhib')
        out_node = nengo.Node(size_in=D, label='output')
        utils_node = nengo.Node(size_in=4, label='utils')
        utils_th_node = nengo.Node(size_in=4, label='utils_th')
        nengo.Connection(in_node, am.input)
        nengo.Connection(inhib_node, am.inhibit)
        nengo.Connection(am.output, out_node, synapse=0.03)
        nengo.Connection(am.utilities, utils_node, synapse=0.05)
        nengo.Connection(am.thresholded_utilities, utils_th_node, synapse=0.05)

        in_p = nengo.Probe(in_node)
        out_p = nengo.Probe(out_node)
        utils_p = nengo.Probe(utils_node)
        utils_th_p = nengo.Probe(utils_th_node)

    sim = Simulator(m)
    sim.run(1.0)

    assert np.allclose(sim.data[in_p][240:250], vocab.parse("A+0.8*B").v,
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[in_p][490:500], vocab.parse("0.8*A+B").v,
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[in_p][-10:], vocab.parse("E").v,
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[out_p][240:250], vocab.parse("A+B").v,
                       atol=.2, rtol=.05)
    assert np.allclose(sim.data[out_p][490:500], vocab.parse("A+B").v,
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[out_p][740:750], vocab.parse("F").v,
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[out_p][-10:], vocab.parse("0").v,
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[utils_p][240:250], [1, 0.75, 0, 0],
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[utils_p][490:500], [0.75, 1, 0, 0],
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[utils_p][740:750], [0, 0, 0, 0],
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[utils_p][-10:], [0, 0, 0, 0],
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[utils_th_p][240:250], [1.05, 1.05, 0, 0],
                       atol=.2, rtol=.05)
    assert np.allclose(sim.data[utils_th_p][490:500], [1.05, 1.05, 0, 0],
                       atol=.1, rtol=.05)
    assert np.allclose(sim.data[utils_th_p][740:750], [0, 0, 0, 0],
                       atol=.1, rtol=.01)
    assert np.allclose(sim.data[utils_th_p][-10:], [0, 0, 0, 0],
                       atol=.1, rtol=.01)
开发者ID:goaaron,项目名称:blouw-etal-2015,代码行数:83,代码来源:test_assoc_mem.py


注:本文中的nengo.spa.Vocabulary.create_subset方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。