当前位置: 首页>>代码示例>>Python>>正文


Python Vocabulary.add方法代码示例

本文整理汇总了Python中nengo.spa.Vocabulary.add方法的典型用法代码示例。如果您正苦于以下问题:Python Vocabulary.add方法的具体用法?Python Vocabulary.add怎么用?Python Vocabulary.add使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在nengo.spa.Vocabulary的用法示例。


在下文中一共展示了Vocabulary.add方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_am_spa_keys_as_expressions

# 需要导入模块: from nengo.spa import Vocabulary [as 别名]
# 或者: from nengo.spa.Vocabulary import add [as 别名]
def test_am_spa_keys_as_expressions(Simulator, plt, seed, rng):
    """Provide semantic pointer expressions as input and output keys."""
    D = 64

    vocab_in = Vocabulary(D, rng=rng)
    vocab_out = Vocabulary(D, rng=rng)

    vocab_in.parse('A+B')
    vocab_out.parse('C+D')

    in_keys = ['A', 'A*B']
    out_keys = ['C*D', 'C+D']

    with nengo.spa.SPA(seed=seed) as model:
        model.am = AssociativeMemory(input_vocab=vocab_in,
                                     output_vocab=vocab_out,
                                     input_keys=in_keys,
                                     output_keys=out_keys)

        model.inp = Input(am=lambda t: 'A' if t < 0.1 else 'A*B')

        in_p = nengo.Probe(model.am.input)
        out_p = nengo.Probe(model.am.output, synapse=0.03)

    with Simulator(model) as sim:
        sim.run(0.2)

    # Specify t ranges
    t = sim.trange()
    t_item1 = (t > 0.075) & (t < 0.1)
    t_item2 = (t > 0.175) & (t < 0.2)

    # Modify vocabularies (for plotting purposes)
    vocab_in.add(in_keys[1], vocab_in.parse(in_keys[1]).v)
    vocab_out.add(out_keys[0], vocab_out.parse(out_keys[0]).v)

    plt.subplot(2, 1, 1)
    plt.plot(t, similarity(sim.data[in_p], vocab_in))
    plt.ylabel("Input: " + ', '.join(in_keys))
    plt.legend(vocab_in.keys, loc='best')
    plt.ylim(top=1.1)
    plt.subplot(2, 1, 2)
    plt.plot(t, similarity(sim.data[out_p], vocab_out))
    plt.plot(t[t_item1], np.ones(t.shape)[t_item1] * 0.9, c='r', lw=2)
    plt.plot(t[t_item2], np.ones(t.shape)[t_item2] * 0.91, c='g', lw=2)
    plt.plot(t[t_item2], np.ones(t.shape)[t_item2] * 0.89, c='b', lw=2)
    plt.ylabel("Output: " + ', '.join(out_keys))
    plt.legend(vocab_out.keys, loc='best')

    assert np.mean(similarity(sim.data[out_p][t_item1],
                              vocab_out.parse(out_keys[0]).v,
                              normalize=True)) > 0.9
    assert np.mean(similarity(sim.data[out_p][t_item2],
                              vocab_out.parse(out_keys[1]).v,
                              normalize=True)) > 0.9
开发者ID:nengo,项目名称:nengo,代码行数:57,代码来源:test_assoc_mem.py

示例2: main

# 需要导入模块: from nengo.spa import Vocabulary [as 别名]
# 或者: from nengo.spa.Vocabulary import add [as 别名]
def main():
    
    model = spa.SPA(label="Vector Storage")
    with model:
        
        # Dimensionality of each representation
        num_dimensions = 2
        sub_dimensions = 2
        
        # Create the vocabulary
        vocab = Vocabulary(num_dimensions, randomize = False)
        
        # Form the inputs
        stored_value_1 = [1] * num_dimensions
        stored_value_1 = [s/np.linalg.norm(stored_value_1) for s in stored_value_1]
        vocab.add("Stored_value_1", stored_value_1)
        
        stored_value_2 = [(-1**i) for i in range(num_dimensions)]
        stored_value_2 = [s/np.linalg.norm(stored_value_2) for s in stored_value_2]
        vocab.add("Stored_value_2", stored_value_2)
                
        def first_input(t):
            if t < 10:
                return "Stored_value_2"
            else:
                return "Stored_value_1"
        
        def second_input(t):
            if t < 5:
                return "Stored_value_1"
            else:
                return "Stored_value_2"
                
        # Buffers to store the input
        model.buffer1 = spa.Buffer(dimensions = num_dimensions, subdimensions = sub_dimensions, neurons_per_dimension = 200, direct = True)
        model.buffer2 = spa.Buffer(dimensions = num_dimensions, subdimensions = sub_dimensions, neurons_per_dimension = 200, direct = True)
        
        # Probe to visualize the values stored in the buffers
        buffer_1_probe = nengo.Probe(model.buffer1.state.output)
        buffer_2_probe = nengo.Probe(model.buffer2.state.output)
        
        # Connect up the inputs
        model.input = spa.Input(buffer1 = first_input)
        model.input = spa.Input(buffer2 = second_input)
        
        # Buffer to store the output
        model.buffer3 = spa.Buffer(dimensions = num_dimensions, subdimensions = sub_dimensions, neurons_per_dimension = 200, direct = True)
        buffer_3_probe = nengo.Probe(model.buffer3.state.output)
        
        # Control system
        actions = spa.Actions('dot(buffer1, Stored_value_2) --> buffer3=Stored_value_2', 'dot(buffer1, Stored_value_1) --> buffer3=Stored_value_1+Stored_value_2')
        model.bg = spa.BasalGanglia(actions)
        model.thalamus = spa.Thalamus(model.bg)

        
    # Start the simulator
    sim = nengo.Simulator(model)

    # Dynamic plotting
    plt.ion() # Dynamic updating of plots
    fig = plt.figure(figsize=(15,8))
    plt.show()
    ax = fig.gca()
    ax.set_title("Vector Storage")

    while True:
        sim.run(1) # Run for an additional 1 second
        plt.clf() # Clear the figure
        plt.plot(sim.trange(), similarity(sim.data, buffer_1_probe, vocab), label = "Buffer 1 Value") # Plot the entire dataset so far
        plt.plot(sim.trange(), similarity(sim.data, buffer_2_probe, vocab), label = "Buffer 2 Value")
        plt.plot(sim.trange(), similarity(sim.data, buffer_3_probe, vocab), label = "Buffer 3 Value")
        print sim.data[buffer_1_probe][-1]
        print sim.data[buffer_2_probe][-1]
        print sim.data[buffer_3_probe][-1]
        plt.legend(vocab.keys * 3, loc = 2)
        plt.draw() # Re-draw
开发者ID:adammarblestone,项目名称:Nengo_Explorations,代码行数:78,代码来源:SPA_word_semantics_1.py

示例3: Vocabulary

# 需要导入模块: from nengo.spa import Vocabulary [as 别名]
# 或者: from nengo.spa.Vocabulary import add [as 别名]
# --- Unitary semantic pointers
unitary_sp_strs = [num_sp_strs[0], pos_sp_strs[0]]
unitary_sp_strs.extend(ops_sp_strs)


# ####################### Vocabulary definitions ##############################
# --- Primary vocabulary ---
vocab = Vocabulary(cfg.sp_dim, unitary=unitary_sp_strs, rng=cfg.rng)

# --- Add numerical sp's ---
vocab.parse('%s+%s' % (ops_sp_strs[0], num_sp_strs[0]))
add_sp = vocab[ops_sp_strs[0]]
num_sp = vocab[num_sp_strs[0]].copy()
for i in range(len(num_sp_strs) - 1):
    num_sp = num_sp.copy() * add_sp
    vocab.add(num_sp_strs[i + 1], num_sp)

# --- Add positional sp's ---
vocab.parse('%s+%s' % (ops_sp_strs[1], pos_sp_strs[0]))
inc_sp = vocab[ops_sp_strs[1]]
pos_sp = vocab[pos_sp_strs[0]].copy()
for i in range(len(pos_sp_strs) - 1):
    pos_sp = pos_sp.copy() * inc_sp
    vocab.add(pos_sp_strs[i + 1], pos_sp)

# --- Add other visual sp's ---
vocab.parse('+'.join(misc_vis_sp_strs))
vocab.parse('+'.join(ps_task_vis_sp_strs))

# --- Add production system sp's ---
vocab.parse('+'.join(ps_task_sp_strs))
开发者ID:Stanford-BIS,项目名称:spaun2.0,代码行数:33,代码来源:vocabs.py

示例4: test_add

# 需要导入模块: from nengo.spa import Vocabulary [as 别名]
# 或者: from nengo.spa.Vocabulary import add [as 别名]
def test_add(rng):
    v = Vocabulary(3, rng=rng)
    v.add('A', [1, 2, 3])
    v.add('B', [4, 5, 6])
    v.add('C', [7, 8, 9])
    assert np.allclose(v.vectors, [[1, 2, 3], [4, 5, 6], [7, 8, 9]])
开发者ID:CamZHU,项目名称:nengo,代码行数:8,代码来源:test_vocabulary.py

示例5: SpaunVocabulary

# 需要导入模块: from nengo.spa import Vocabulary [as 别名]
# 或者: from nengo.spa.Vocabulary import add [as 别名]

#.........这里部分代码省略.........
            rng = np.random.RandomState(int(time.time()))

        # ############### Semantic pointer list definitions ###################
        # --- Position (enumerated) semantic pointers ---
        self.pos_sp_strs = ['POS%i' % (i + 1)
                            for i in range(self.max_enum_list_pos)]

        # --- Unitary semantic pointers
        self.unitary_sp_strs = [self.num_sp_strs[0], self.pos_sp_strs[0]]
        self.unitary_sp_strs.extend(self.ops_sp_strs)

        # --- Production system (action) semantic pointers ---
        self.ps_action_learn_sp_strs = ['A%d' % (i + 1) for i in
                                        range(num_learn_actions)]
        self.ps_action_misc_sp_strs = []
        self.ps_action_sp_strs = (self.ps_action_learn_sp_strs +
                                  self.ps_action_misc_sp_strs)

        # #################### Vocabulary definitions #########################
        # --- Primary vocabulary ---
        self.main = Vocabulary(self.sp_dim, unitary=self.unitary_sp_strs,
                               max_similarity=0.2, rng=rng)

        # --- Add in visual sp's ---
        self.main.parse('+'.join(self.misc_vis_sp_strs))
        self.main.parse('+'.join(self.ps_task_vis_sp_strs))
        for sp_str in list(stim_SP_labels):
            if sp_str not in self.num_sp_strs and \
               sp_str not in self.pos_sp_strs:
                self.main.parse(sp_str)

        # --- Add numerical sp's ---
        self.main.parse('%s+%s' % (self.ops_sp_strs[0], self.num_sp_strs[0]))
        add_sp = self.main[self.ops_sp_strs[0]]
        num_sp = self.main[self.num_sp_strs[0]].copy()
        for i in range(len(self.num_sp_strs) - 1):
            num_sp = num_sp.copy() * add_sp
            self.main.add(self.num_sp_strs[i + 1], num_sp)

        self.add_sp = add_sp

        # --- Add positional sp's ---
        self.main.parse('%s+%s' % (self.ops_sp_strs[1], self.pos_sp_strs[0]))
        inc_sp = self.main[self.ops_sp_strs[1]]
        pos_sp = self.main[self.pos_sp_strs[0]].copy()
        for i in range(len(self.pos_sp_strs) - 1):
            pos_sp = pos_sp.copy() * inc_sp
            self.main.add(self.pos_sp_strs[i + 1], pos_sp)

        self.inc_sp = inc_sp

        # --- Add production system sp's ---
        self.main.parse('+'.join(self.ps_task_sp_strs))
        self.main.parse('+'.join(self.ps_state_sp_strs))
        self.main.parse('+'.join(self.ps_dec_sp_strs))
        if len(self.ps_action_sp_strs) > 0:
            self.main.parse('+'.join(self.ps_action_sp_strs))
        self.main.parse('+'.join(self.misc_ps_sp_strs))

        # --- Add instruction processing system sp's ---
        self.main.parse('+'.join(self.instr_tag_strs))

        # ################### Visual Vocabulary definitions ###################
        self.vis_sp_strs = list(stim_SP_labels)

        # Visual sp str vocab check
开发者ID:xchoo,项目名称:spaun2.0,代码行数:70,代码来源:vocabulator.py

示例6: test_add

# 需要导入模块: from nengo.spa import Vocabulary [as 别名]
# 或者: from nengo.spa.Vocabulary import add [as 别名]
def test_add():
    v = Vocabulary(3)
    v.add("A", [1, 2, 3])
    v.add("B", [4, 5, 6])
    v.add("C", [7, 8, 9])
    assert np.allclose(v.vectors, [[1, 2, 3], [4, 5, 6], [7, 8, 9]])
开发者ID:qqming113,项目名称:nengo,代码行数:8,代码来源:test_vocabulary.py

示例7: setup_probes_generic

# 需要导入模块: from nengo.spa import Vocabulary [as 别名]
# 或者: from nengo.spa.Vocabulary import add [as 别名]
def setup_probes_generic(model):
    with model:
        model.config[nengo.Probe].synapse = Lowpass(0.005)

        vocab_dict = {}
        graph_list = []
        anim_config = []

        sub_vocab1 = enum_vocab.create_subset(['POS1*ONE', 'POS2*TWO',
                                               'POS3*THR', 'POS4*FOR',
                                               'POS5*FIV'])

        sub_vocab2 = vocab.create_subset(['ADD'])
        sub_vocab2.readonly = False
        sub_vocab2.add('N_ADD', vocab.parse('~ADD'))
        sub_vocab2.add('ADD*ADD', vocab.parse('ADD*ADD'))
        sub_vocab2.add('ADD*ADD*ADD', vocab.parse('ADD*ADD*ADD'))
        # sub_vocab2.add('ADD*ADD*ADD*ADD', vocab.parse('ADD*ADD*ADD*ADD'))
        # sub_vocab2.add('ADD*ADD*ADD*ADD*ADD',
        #                vocab.parse('ADD*ADD*ADD*ADD*ADD'))

        sub_vocab3 = vocab.create_subset([])
        sub_vocab3.readonly = False
        # sub_vocab3.add('N_POS1*ONE', vocab.parse('~(POS1*ONE)'))
        # sub_vocab3.add('N_POS1*TWO', vocab.parse('~(POS1*TWO)'))
        # sub_vocab3.add('N_POS1*THR', vocab.parse('~(POS1*THR)'))
        # sub_vocab3.add('N_POS1*FOR', vocab.parse('~(POS1*FOR)'))
        # sub_vocab3.add('N_POS1*FIV', vocab.parse('~(POS1*FIV)'))
        sub_vocab3.add('ADD', vocab.parse('ADD'))
        sub_vocab3.add('INC', vocab.parse('INC'))

        vocab_seq_list = vocab.create_subset([])
        vocab_seq_list.readonly = False
        for sp_str in ['POS1*ONE', 'POS2*TWO', 'POS3*THR', 'POS4*FOR',
                       'POS5*FIV', 'POS6*SIX', 'POS7*SEV', 'POS8*EIG']:
            vocab_seq_list.add(sp_str, vocab.parse(sp_str))

        vocab_rpm = vocab.create_subset([])
        vocab_rpm.readonly = False
        for i in [1, 3, 8]:
            sp_str = num_sp_strs[i]
            vocab_rpm.add('A_(P1+P2+P3)*%s' % sp_str,
                          vocab.parse('POS1*%s+POS2*%s+POS3*%s' %
                                      (sp_str, sp_str, sp_str)))
            vocab_rpm.add('N_(P1+P2+P3)*%s' % sp_str,
                          vocab.parse('~(POS1*%s+POS2*%s+POS3*%s)' %
                                      (sp_str, sp_str, sp_str)))

        ####
        vocab_seq_list = vocab_rpm

        if hasattr(model, 'stim'):
            p0 = nengo.Probe(model.stim.output, synapse=None)

            add_to_anim_config(anim_config, key='vis',
                               data_func_name='generic_single',
                               data_func_params={'data': p0},
                               plot_type_name='imshow',
                               plot_type_params={'shape': (28, 28)})
        else:
            p0 = 0

        if hasattr(model, 'vis') and True:
            pvs1 = nengo.Probe(model.vis.output)
            pvs2 = nengo.Probe(model.vis.neg_attention)
            pvs3 = nengo.Probe(model.vis.am_utilities)
            pvs4 = nengo.Probe(model.vis.mb_output)
            pvs5 = nengo.Probe(model.vis.vis_out)

            # probes = gen_graph_list(['vis', p0, pvs1, pvs2, pvs3])
            # vocab_dict[idstr(pvs1)] = vis_vocab

            add_to_graph_list(graph_list, ['vis', p0, pvs1, pvs2, pvs3, 0,
                                           'vis net', pvs4, pvs5])
            add_to_vocab_dict(vocab_dict, {pvs1: vis_vocab})

        # ############ FOR DEBUGGING VIS DETECT SYSTEM ########################
        # if hasattr(model, 'vis') and True:
        #     pvsd1 = nengo.Probe(model.vis.detect_change_net.input_diff)
        #     pvsd2 = nengo.Probe(model.vis.detect_change_net.item_detect)
        #     pvsd3 = nengo.Probe(model.vis.detect_change_net.blank_detect)

        #     probes = gen_graph_list(['vis detect', p0, pvsd1, pvsd2, pvsd3])
        #     graph_list.extend(probes)

        if hasattr(model, 'ps') and True:
            pps1 = nengo.Probe(model.ps.task)
            pps2 = nengo.Probe(model.ps.state)
            pps3 = nengo.Probe(model.ps.dec)

            pps4 = nengo.Probe(model.ps.ps_task_mb.mem1.output)
            pps5 = nengo.Probe(model.ps.ps_task_mb.mem2.output)
            pps6 = nengo.Probe(model.ps.ps_task_mb.mem1.input, synapse=None)
            pps6b = nengo.Probe(model.ps.task_init.output)

            pps7 = nengo.Probe(model.ps.ps_state_mb.mem1.output)
            pps8 = nengo.Probe(model.ps.ps_state_mb.mem2.output)
            pps9 = nengo.Probe(model.ps.ps_state_mb.mem1.input, synapse=None)

            pps10 = nengo.Probe(model.ps.ps_dec_mb.mem1.output)
#.........这里部分代码省略.........
开发者ID:Stanford-BIS,项目名称:spaun2.0,代码行数:103,代码来源:probes.py

示例8: main

# 需要导入模块: from nengo.spa import Vocabulary [as 别名]
# 或者: from nengo.spa.Vocabulary import add [as 别名]
def main():
    
    print "Loading Word2Vec model..."
    word2vec_model = gensim.models.Word2Vec.load("word2vec_model_1_cleaned")
    word2vec_model.init_sims(replace=True)
    word2vec_vocab = word2vec_model.index2word
    
    import readline
    readline.parse_and_bind("tab: complete")

    def complete(text,state):
       results = [x for x in word2vec_vocab if x.startswith(text)] + [None]
       return results[state]

    readline.set_completer(complete)

    print "This program uses an SPA network in Nengo to perform vector operations on a semantically structured word-vector space *learned* from a sentence corpus."
    print "When trained on a large corpus of English sentences, for example, it should produce: Vector[king] - Vector[man] + Vector[woman] = Vector[king]"
    
    print "For now, it just does subtraction..."
    print "\nPress <tab> twice to see all your autocomplete options."
    print "_______________________________________________________"
    line1 = raw_input('\nFirst word:> ')
    line2 = raw_input('\nSecond word:> ')

    if line1 and line2 in word2vec_vocab:
           val1 = word2vec_model[line1]
           val2 = word2vec_model[line2]
           diff = val1 - val2
           dot_products = [np.dot(word2vec_model[word2vec_model.index2word[i]], diff) for i in range(len(word2vec_model.index2word))]
           closest_word = word2vec_model.index2word[dot_products.index(max(dot_products))]
           print "\nWhat the Nengo model SHOULD return is something like: %s" % closest_word
    
    print "\nDefining SPA network..."
    model = spa.SPA(label = "Vector Storage")
    with model:
        
        # Dimensionality of each representation
        num_dimensions = 100
        sub_dimensions = 1
        
        # Create the vocabulary
        vocab = Vocabulary(num_dimensions, randomize = False)
                
        stored_value_1 = val1
        vocab.add("Stored_value_1", stored_value_1)
        
        stored_value_2 = val2
        vocab.add("Stored_value_2", stored_value_2)
        
        # Create a semantic pointer corresponding to the "correct" answer for the operation
        sum_vector = np.subtract(stored_value_1, stored_value_2)
        sum_vector = sum_vector/np.linalg.norm(sum_vector)
        vocab.add("Correct_target", sum_vector)

        # Define the control signal inputs as random vectors
        r1 = [1] * num_dimensions
        r1 = r1 / np.linalg.norm(r1)
        r2 = [(-1)**k for k in range(num_dimensions)]
        r2 = r2 / np.linalg.norm(r2)
        vocab.add("Hold_signal", r1)
        vocab.add("Start_signal", r2)

        # Control when the vector operation takes place
        def control_input(t):
            if t < 1:
                return "Hold_signal"
            else:
                return "Start_signal"
                
        # Control buffer
        model.control = spa.Buffer(dimensions = num_dimensions, subdimensions = sub_dimensions, neurons_per_dimension = 200, direct = True, vocab = vocab)
        control_probe = nengo.Probe(model.control.state.output)
        
        # Inputs to the word input buffers
        def first_input(t):
            return "Stored_value_1"
        def second_input(t):
            return "Stored_value_2"
        
        # Buffers to store the inputs:
        model.word_buffer1 = spa.Buffer(dimensions = num_dimensions, subdimensions = sub_dimensions, neurons_per_dimension = 200, direct = True, vocab = vocab)
        model.word_buffer2 = spa.Buffer(dimensions = num_dimensions, subdimensions = sub_dimensions, neurons_per_dimension = 200, direct = True, vocab = vocab)
        
        # Probe to visualize the values stored in the buffers
        buffer_1_probe = nengo.Probe(model.word_buffer1.state.output)
        buffer_2_probe = nengo.Probe(model.word_buffer2.state.output)
        
        # Buffer to hold the result:
        model.result = spa.Buffer(dimensions = num_dimensions, subdimensions = sub_dimensions, neurons_per_dimension = 200, direct = True, vocab = vocab)
        result_probe = nengo.Probe(model.result.state.output)        
        
        # Control system        
        actions = spa.Actions('dot(control, Start_signal) --> result = word_buffer1 - word_buffer2', 'dot(control, Hold_signal) --> result = Hold_signal')
        model.bg = spa.BasalGanglia(actions)
        model.thalamus = spa.Thalamus(model.bg, subdim_channel = sub_dimensions)
        
        # Connect up the inputs
        model.input = spa.Input(control = control_input, word_buffer1 = first_input, word_buffer2 = second_input)
        
#.........这里部分代码省略.........
开发者ID:adammarblestone,项目名称:Nengo_Explorations,代码行数:103,代码来源:SPA_word_semantics_3.py


注:本文中的nengo.spa.Vocabulary.add方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。