当前位置: 首页>>代码示例>>Python>>正文


Python Pattern.from_concat方法代码示例

本文整理汇总了Python中neurokernel.pattern.Pattern.from_concat方法的典型用法代码示例。如果您正苦于以下问题:Python Pattern.from_concat方法的具体用法?Python Pattern.from_concat怎么用?Python Pattern.from_concat使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在neurokernel.pattern.Pattern的用法示例。


在下文中一共展示了Pattern.from_concat方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_from_concat

# 需要导入模块: from neurokernel.pattern import Pattern [as 别名]
# 或者: from neurokernel.pattern.Pattern import from_concat [as 别名]
    def test_from_concat(self):
        # Need to specify selectors for both interfaces in pattern:
        self.assertRaises(ValueError, Pattern.from_concat, '', '/[baz,qux]',
                          from_sel='', to_sel='/[baz,qux]', data=1)

        # Patterns with interfaces using selectors with 1 level:
        p = Pattern.from_concat('/[foo,bar]', '/[baz,qux]',
                                from_sel='/[foo,bar]', to_sel='/[baz,qux]',
                                data=1)
        df = pd.DataFrame(data=[1, 1],
                          index=pd.MultiIndex(levels=[['bar', 'foo'], ['baz', 'qux']],
                                              labels=[[1, 0], [0, 1]],
                                              names=['from_0', 'to_0'], dtype=object),
                          columns=['conn'], dtype=object)
        assert_frame_equal(p.data, df)

        # Patterns with interfaces using selectors with more than 1 level:
        p = Pattern.from_concat('/foo[0:2]', '/bar[0:2]',
                                from_sel='/foo[0:2]', to_sel='/bar[0:2]',
                                data=1)
        df = pd.DataFrame(data=[1, 1],
                          index=pd.MultiIndex(levels=[['foo'], [0, 1], ['bar'], [0, 1]],
                                              labels=[[0, 0], [0, 1], [0, 0], [0, 1]],
                                              names=['from_0', 'from_1', 'to_0', 'to_1'], 
                                              dtype=object),
                          columns=['conn'], dtype=object)
        assert_frame_equal(p.data, df)

        # Patterns where port types are specified:
        p = Pattern.from_concat('/foo[0:2]', '/bar[0:2]',
                                from_sel='/foo[0:2]', to_sel='/bar[0:2]',
                                gpot_sel='/foo[0],/bar[0]',
                                spike_sel='/foo[1:2],/bar/[1:2]',
                                data=1)
        df_int = pd.DataFrame({'interface': [0, 0, 1, 1],
                               'io': ['in', 'in', 'out', 'out'],
                               'type': ['gpot', 'spike', 'gpot', 'spike']},
                              index=pd.MultiIndex(levels=[['bar', 'foo'], [0, 1]],
                                                  labels=[[1, 1, 0, 0], [0, 1, 0, 1]],
                                                  names=[u'0', u'1'],
                                                  dtype=object),
                              dtype=object)
        df = pd.DataFrame(data=[1, 1],
                          index=pd.MultiIndex(levels=[['foo'], [0, 1], ['bar'], [0, 1]],
                                              labels=[[0, 0], [0, 1], [0, 0], [0, 1]],
                                              names=['from_0', 'from_1', 'to_0', 'to_1'], 
                                              dtype=object),
                          columns=['conn'],
                          dtype=object)
        assert_frame_equal(p.data, df)
        assert_frame_equal(p.interface.data, df_int)
开发者ID:CEPBEP,项目名称:neurokernel,代码行数:53,代码来源:test_pattern.py

示例2: update_pattern_master_worker

# 需要导入模块: from neurokernel.pattern import Pattern [as 别名]
# 或者: from neurokernel.pattern.Pattern import from_concat [as 别名]
    def update_pattern_master_worker(self, j, worker_num):
        indexes = self.get_worker_nodes(j, worker_num)
        
        master_selectors = self.get_master_selectors()
        worker_selectors = self.get_worker_selectors(j, worker_num)
        
        from_list = []
        to_list = []

        for i, ind in enumerate(indexes):
            col_m = ind // 6
            ind_m = 1 + (ind % 6)
            src = '/master/{}/buf{}'.format(col_m, ind_m)
            dest = '/ret/{}/in{}'.format(col_m, ind_m)
            from_list.append(src)
            to_list.append(dest)
            
            src = '/ret/{}/R{}'.format(col_m, ind_m)
            dest = '/master/{}/R{}'.format(col_m, ind_m)
            
            from_list.append(src)
            to_list.append(dest)

        pattern = Pattern.from_concat(','.join(master_selectors),
                                      ','.join(worker_selectors),
                                      from_sel = ','.join(from_list),
                                      to_sel = ','.join(to_list),
                                      gpot_sel = ','.join(from_list+to_list))
        return pattern
开发者ID:neurokernel,项目名称:retina,代码行数:31,代码来源:retina.py

示例3: test_from_concat

# 需要导入模块: from neurokernel.pattern import Pattern [as 别名]
# 或者: from neurokernel.pattern.Pattern import from_concat [as 别名]
    def test_from_concat(self):
        p = Pattern.from_concat('/[foo,bar]', '/[baz,qux]',
                                from_sel='/[foo,bar]', to_sel='/[baz,qux]',
                                data=1)
        df = pd.DataFrame(data=[1, 1],
                    index=pd.MultiIndex(levels=[['bar', 'foo'], ['baz', 'qux']],
                                        labels=[[1, 0], [0, 1]],
                                        names=['from_0', 'to_0'], dtype=object),
                    columns=['conn'])
        assert_frame_equal(p.data, df)

        p = Pattern.from_concat('/foo[0:2]', '/bar[0:2]',
                                from_sel='/foo[0:2]', to_sel='/bar[0:2]',
                                data=1)
        df = pd.DataFrame(data=[1, 1],
                index=pd.MultiIndex(levels=[['foo'], [0, 1], ['bar'], [0, 1]],
                                    labels=[[0, 0], [0, 1], [0, 0], [0, 1]],
                                    names=['from_0', 'from_1', 'to_0', 'to_1'], 
                                    dtype=object),
                    columns=['conn'])
        assert_frame_equal(p.data, df)
开发者ID:MariyaS,项目名称:neurokernel,代码行数:23,代码来源:test_pattern.py

示例4: create_pattern

# 需要导入模块: from neurokernel.pattern import Pattern [as 别名]
# 或者: from neurokernel.pattern.Pattern import from_concat [as 别名]
def create_pattern(n_dict_1, n_dict_2, save_as=None):
    """
    If `save_as` is not None, save the pattern in GEXF format as the specified file name.
    """

    lpu1_sel_in_gpot = plsel.Selector(LPU.extract_in_gpot(n_dict_1))
    lpu1_sel_out_gpot = plsel.Selector(LPU.extract_out_gpot(n_dict_1))
    lpu2_sel_in_gpot = plsel.Selector(LPU.extract_in_gpot(n_dict_2))
    lpu2_sel_out_gpot = plsel.Selector(LPU.extract_out_gpot(n_dict_2))

    lpu1_sel_in_spike = plsel.Selector(LPU.extract_in_spk(n_dict_1))
    lpu1_sel_out_spike = plsel.Selector(LPU.extract_out_spk(n_dict_1))
    lpu2_sel_in_spike = plsel.Selector(LPU.extract_in_spk(n_dict_2))
    lpu2_sel_out_spike = plsel.Selector(LPU.extract_out_spk(n_dict_2))

    lpu1_sel_out = plsel.Selector.union(lpu1_sel_out_gpot, lpu1_sel_out_spike)
    lpu2_sel_out = plsel.Selector.union(lpu2_sel_out_gpot, lpu2_sel_out_spike)
    lpu1_sel_in = plsel.Selector.union(lpu1_sel_in_gpot, lpu1_sel_in_spike)
    lpu2_sel_in = plsel.Selector.union(lpu2_sel_in_gpot, lpu2_sel_in_spike)

    lpu1_sel = plsel.Selector.union(lpu1_sel_out, lpu1_sel_in)
    lpu2_sel = plsel.Selector.union(lpu2_sel_out, lpu2_sel_in)

    Neuron_list_12 = ["L1", "L2", "L3", "L4", "L5", "T1"]
    Neuron_list_21 = ["C2", "C3"]

    gpot_sel = plsel.Selector.union(lpu1_sel_out_gpot, lpu1_sel_in_gpot, lpu2_sel_out_gpot, lpu2_sel_in_gpot)
    spike_sel = plsel.Selector.union(lpu1_sel_out_spike, lpu1_sel_in_spike, lpu2_sel_out_spike, lpu2_sel_in_spike)

    Neuron_str_12 = "[" + ",".join(Neuron_list_12) + "]"
    Neuron_str_21 = "[" + ",".join(Neuron_list_21) + "]"
    cart_str = "[" + ",".join(["cart%i" % i for i in range(768)]) + "]"

    from_sel_12 = "/lamina" + cart_str + Neuron_str_12
    to_sel_12 = "/medulla" + cart_str + Neuron_str_12
    from_sel_21 = "/medulla" + cart_str + Neuron_str_21
    to_sel_21 = "/lamina" + cart_str + Neuron_str_21

    from_sel = from_sel_12 + "," + from_sel_21
    to_sel = to_sel_12 + "," + to_sel_21

    pat = Pattern.from_concat(
        lpu1_sel, lpu2_sel, from_sel=from_sel, to_sel=to_sel, gpot_sel=gpot_sel, spike_sel=spike_sel, data=1
    )

    if save_as:
        nx.write_gexf(pat.to_graph(), save_as, prettyprint=True)
    return pat
开发者ID:neurokernel,项目名称:vision,代码行数:50,代码来源:vision_configuration.py

示例5: connect_retina_lamina

# 需要导入模块: from neurokernel.pattern import Pattern [as 别名]
# 或者: from neurokernel.pattern.Pattern import from_concat [as 别名]
def connect_retina_lamina(config, i, retina, lamina, manager):
    '''
        The connections between Retina and Lamina follow
        the neural superposition rule of the fly's compound eye.
        See more information in NeurokernelRFC#2.

        Retina provides an interface to make this connection easier.
        --
        config: configuration dictionary like object
        i: identifier of eye in case more than one is used
        retina: retina array object
        lamina: lamina array object
        manager: manager object to which connection pattern will be added
    '''
    retina_id = get_retina_id(i)
    lamina_id = get_lamina_id(i)
    print('Connecting {} and {}'.format(retina_id, lamina_id))

    retina_selectors = retina.get_all_selectors()
    lamina_selectors = lamina.get_all_selectors()
    with Timer('creation of Pattern object'):
        from_list = []
        to_list = []

        # accounts neural superposition
        rulemap = retina.rulemap
        for ret_sel in retina_selectors:
            # format should be '/ret/<ommid>/<neuronname>'
            _, lpu, ommid, n_name = ret_sel.split('/')
            # find neighbor of neural superposition
            neighborid = rulemap.neighbor_for_photor(int(ommid), n_name)
            # format should be '/lam/<cartid>/<neuronname>'
            lam_sel = lamina.get_selector(neighborid, n_name)

            # setup connection from retina to lamina
            from_list.append(ret_sel)
            to_list.append(lam_sel)

        pattern = Pattern.from_concat(','.join(retina_selectors),
                                      ','.join(lamina_selectors),
                                      from_sel=','.join(from_list),
                                      to_sel=','.join(to_list),
                                      gpot_sel=','.join(from_list+to_list))
        nx.write_gexf(pattern.to_graph(), retina_id+'_'+lamina_id+'.gexf.gz',
                      prettyprint=True)

    with Timer('update of connections in Manager'):
        manager.connect(retina_id, lamina_id, pattern)
开发者ID:neurokernel,项目名称:retina-lamina,代码行数:50,代码来源:retlam_demo.py

示例6: emulate

# 需要导入模块: from neurokernel.pattern import Pattern [as 别名]
# 或者: from neurokernel.pattern.Pattern import from_concat [as 别名]
def emulate(n_lpu, n_spike, n_gpot, steps):
    """
    Benchmark inter-LPU communication throughput.

    Each LPU is configured to use a different local GPU.

    Parameters
    ----------
    n_lpu : int
        Number of LPUs. Must be at least 2 and no greater than the number of
        local GPUs.
    n_spike : int
        Total number of input and output spiking ports any 
        single LPU exposes to any other LPU. Each LPU will therefore
        have 2*n_spike*(n_lpu-1) total spiking ports.
    n_gpot : int
        Total number of input and output graded potential ports any 
        single LPU exposes to any other LPU. Each LPU will therefore
        have 2*n_gpot*(n_lpu-1) total graded potential ports.
    steps : int
        Number of steps to execute.

    Returns
    -------
    average_throughput, total_throughput : float
        Average per-step and total received data throughput in bytes/seconds.
    exec_time : float
        Execution time in seconds.
    """

    # Time everything starting with manager initialization:
    start_all = time.time()

    # Check whether a sufficient number of GPUs are available:
    drv.init()
    if n_lpu > drv.Device.count():
        raise RuntimeError('insufficient number of available GPUs.')

    # Set up manager and broker:
    man = Manager(get_random_port(), get_random_port(), get_random_port())
    man.add_brok()

    # Generate selectors for configuring modules and patterns:
    mod_sels, pat_sels = gen_sels(n_lpu, n_spike, n_gpot)

    # Set up modules:
    for i in xrange(n_lpu):
        lpu_i = 'lpu%s' % i
        sel, sel_in, sel_out, sel_gpot, sel_spike = mod_sels[lpu_i]
        m = MyModule(sel, sel_in, sel_out,
                     sel_gpot, sel_spike,
                     port_data=man.port_data, port_ctrl=man.port_ctrl,
                     port_time=man.port_time,
                     id=lpu_i, device=i, debug=args.debug)
        man.add_mod(m)

    # Set up connections between module pairs:
    for i, j in itertools.combinations(xrange(n_lpu), 2):
        lpu_i = 'lpu%s' % i
        lpu_j = 'lpu%s' % j
        sel_from, sel_to, sel_in_i, sel_out_i, sel_gpot_i, sel_spike_i, \
            sel_in_j, sel_out_j, sel_gpot_j, sel_spike_j = pat_sels[(lpu_i, lpu_j)]
        pat = Pattern.from_concat(sel_from, sel_to,
                                  from_sel=sel_from, to_sel=sel_to, data=1)
        pat.interface[sel_in_i, 'interface', 'io'] = [0, 'in']
        pat.interface[sel_out_i, 'interface', 'io'] = [0, 'out']
        pat.interface[sel_gpot_i, 'interface', 'type'] = [0, 'gpot']
        pat.interface[sel_spike_i, 'interface', 'type'] = [0, 'spike']
        pat.interface[sel_in_j, 'interface', 'io'] = [1, 'in']
        pat.interface[sel_out_j, 'interface', 'io'] = [1, 'out']
        pat.interface[sel_gpot_j, 'interface', 'type'] = [1, 'gpot']
        pat.interface[sel_spike_j, 'interface', 'type'] = [1, 'spike']
        man.connect(man.modules[lpu_i], man.modules[lpu_j], pat, 0, 1,
                compat_check=False)

    start_main = time.time()
    man.start(steps=steps)
    man.stop()
    stop_main = time.time()
    t = man.get_throughput()
    return t[0], (time.time()-start_all), (stop_main-start_main), t[3]
开发者ID:MariyaS,项目名称:neurokernel,代码行数:83,代码来源:timing_demo_gpu.py

示例7: emulate

# 需要导入模块: from neurokernel.pattern import Pattern [as 别名]
# 或者: from neurokernel.pattern.Pattern import from_concat [as 别名]
def emulate(conn_mat, scaling, n_gpus, steps, use_mps, cache_file='cache.db'):
    """
    Benchmark inter-LPU communication throughput.

    Each LPU is configured to use a different local GPU.

    Parameters
    ----------
    conn_mat : numpy.ndarray
        Square array containing numbers of directed spiking port connections 
        between LPUs (which correspond to the row and column indices). 
    scaling : int
        Scaling factor; multiply all connection numbers by this value.
    n_gpus : int
        Number of GPUs over which to partition the emulation.
    steps : int
        Number of steps to execute.
    use_mps : bool
        Use Multi-Process Service if True.

    Returns
    -------
    average_throughput, total_throughput : float
        Average per-step and total received data throughput in bytes/seconds.
    exec_time : float
        Execution time in seconds.
    """

    # Time everything starting with manager initialization:
    start_all = time.time()

    # Set up manager:
    man = MyManager(use_mps)

    # Generate selectors for configuring modules and patterns:
    mod_sels, pat_sels = gen_sels(conn_mat, scaling)

    # Partition nodes in connectivity matrix:
    part_map = partition(conn_mat, n_gpus)

    # Set up modules such that those in each partition use that partition's GPU:
    ranks = set([rank for rank in itertools.chain.from_iterable(part_map.values())])
    rank_to_gpu_map = {rank:gpu for gpu in part_map for rank in part_map[gpu]}
    for i in ranks:
        lpu_i = 'lpu%s' % i
        sel, sel_in, sel_out, sel_gpot, sel_spike = mod_sels[lpu_i]
        man.add(MyModule, lpu_i, sel, sel_in, sel_out, sel_gpot, sel_spike,
                None, None, ['interface', 'io', 'type'],
                CTRL_TAG, GPOT_TAG, SPIKE_TAG, device=rank_to_gpu_map[i],
                time_sync=True)

    # Set up connections between module pairs:
    env = lmdb.open(cache_file, map_size=10**10)
    with env.begin() as txn:
        data = txn.get('routing_table')
    if data is not None:
        man.log_info('loading cached routing table')
        routing_table = dill.loads(data)

        # Don't replace man.routing_table outright because its reference is
        # already in the dict of named args to transmit to the child MPI process:
        for c in routing_table.connections:
            man.routing_table[c] = routing_table[c]
    else:
        man.log_info('no cached routing table found - generating')
        for lpu_i, lpu_j in pat_sels.keys():
            sel_from, sel_to, sel_in_i, sel_out_i, sel_gpot_i, sel_spike_i, \
                sel_in_j, sel_out_j, sel_gpot_j, sel_spike_j = pat_sels[(lpu_i, lpu_j)]
            pat = Pattern.from_concat(sel_from, sel_to,
                                      from_sel=sel_from, to_sel=sel_to, data=1, validate=False)
            pat.interface[sel_in_i, 'interface', 'io'] = [0, 'in']
            pat.interface[sel_out_i, 'interface', 'io'] = [0, 'out']
            pat.interface[sel_gpot_i, 'interface', 'type'] = [0, 'gpot']
            pat.interface[sel_spike_i, 'interface', 'type'] = [0, 'spike']
            pat.interface[sel_in_j, 'interface', 'io'] = [1, 'in']
            pat.interface[sel_out_j, 'interface', 'io'] = [1, 'out']
            pat.interface[sel_gpot_j, 'interface', 'type'] = [1, 'gpot']
            pat.interface[sel_spike_j, 'interface', 'type'] = [1, 'spike']
            man.connect(lpu_i, lpu_j, pat, 0, 1, compat_check=False)
        with env.begin(write=True) as txn:
            txn.put('routing_table', dill.dumps(man.routing_table))

    man.spawn(part_map)
    start_main = time.time()
    man.start(steps)
    man.wait()
    stop_main = time.time()
    return man.average_step_sync_time, (time.time()-start_all), (stop_main-start_main), \
        (man.stop_time-man.start_time)
开发者ID:CEPBEP,项目名称:neurokernel,代码行数:91,代码来源:timing_connectome_demo_gpu.py

示例8: emulate

# 需要导入模块: from neurokernel.pattern import Pattern [as 别名]
# 或者: from neurokernel.pattern.Pattern import from_concat [as 别名]
def emulate(n_lpu, n_spike, n_gpot, steps):
    """
    Benchmark inter-LPU communication throughput.

    Each LPU is configured to use a different local GPU.

    Parameters
    ----------
    n_lpu : int
        Number of LPUs. Must be at least 2 and no greater than the number of
        local GPUs.
    n_spike : int
        Total number of input and output spiking ports any
        single LPU exposes to any other LPU. Each LPU will therefore
        have 2*n_spike*(n_lpu-1) total spiking ports.
    n_gpot : int
        Total number of input and output graded potential ports any
        single LPU exposes to any other LPU. Each LPU will therefore
        have 2*n_gpot*(n_lpu-1) total graded potential ports.
    steps : int
        Number of steps to execute.

    Returns
    -------
    average_throughput, total_throughput : float
        Average per-step and total received data throughput in bytes/seconds.
    exec_time : float
        Execution time in seconds.
    """

    # Time everything starting with manager initialization:
    start_all = time.time()

    # Set up manager:
    man = Manager()

    # Generate selectors for configuring modules and patterns:
    mod_sels, pat_sels = gen_sels(n_lpu, n_spike, n_gpot)

    # Set up modules:
    for i in xrange(n_lpu):
        lpu_i = 'lpu%s' % i
        sel, sel_in, sel_out, sel_gpot, sel_spike = mod_sels[lpu_i]
        man.add(MyModule, lpu_i, sel, sel_in, sel_out, sel_gpot, sel_spike,
                None, None, ['interface', 'io', 'type'],
                CTRL_TAG, GPOT_TAG, SPIKE_TAG, time_sync=True)

    # Set up connections between module pairs:
    for i, j in itertools.combinations(xrange(n_lpu), 2):
        lpu_i = 'lpu%s' % i
        lpu_j = 'lpu%s' % j
        sel_from, sel_to, sel_in_i, sel_out_i, sel_gpot_i, sel_spike_i, \
            sel_in_j, sel_out_j, sel_gpot_j, sel_spike_j = pat_sels[(lpu_i, lpu_j)]
        pat = Pattern.from_concat(sel_from, sel_to,
                                  from_sel=sel_from, to_sel=sel_to, data=1)
        pat.interface[sel_in_i, 'interface', 'io'] = [0, 'in']
        pat.interface[sel_out_i, 'interface', 'io'] = [0, 'out']
        pat.interface[sel_gpot_i, 'interface', 'type'] = [0, 'gpot']
        pat.interface[sel_spike_i, 'interface', 'type'] = [0, 'spike']
        pat.interface[sel_in_j, 'interface', 'io'] = [1, 'in']
        pat.interface[sel_out_j, 'interface', 'io'] = [1, 'out']
        pat.interface[sel_gpot_j, 'interface', 'type'] = [1, 'gpot']
        pat.interface[sel_spike_j, 'interface', 'type'] = [1, 'spike']
        man.connect(lpu_i, lpu_j, pat, 0, 1, compat_check=False)

    man.spawn()
    start_main = time.time()
    man.start(steps)
    man.wait()
    stop_main = time.time()
    return man.average_step_sync_time, (time.time()-start_all), (stop_main-start_main), \
        (man.stop_time-man.start_time)
开发者ID:CEPBEP,项目名称:neurokernel,代码行数:74,代码来源:timing_demo.py


注:本文中的neurokernel.pattern.Pattern.from_concat方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。