当前位置: 首页>>代码示例>>Python>>正文


Python brian2.NeuronGroup类代码示例

本文整理汇总了Python中brian2.NeuronGroup的典型用法代码示例。如果您正苦于以下问题:Python NeuronGroup类的具体用法?Python NeuronGroup怎么用?Python NeuronGroup使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了NeuronGroup类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: run_simulation

 def run_simulation():
     G = NeuronGroup(10, 'dv/dt = -v / (10*ms) : 1',
                     reset='v=0', threshold='v>1')
     G.v = np.linspace(0, 1, 10)
     run(1*ms)
     # We return potentially problematic references to a VariableView
     return G.v
开发者ID:appusom,项目名称:brian2,代码行数:7,代码来源:test_network.py

示例2: test_store_restore_magic

def test_store_restore_magic():
    source = NeuronGroup(10, '''dv/dt = rates : 1
                                rates : Hz''', threshold='v>1', reset='v=0')
    source.rates = 'i*100*Hz'
    target = NeuronGroup(10, 'v:1')
    synapses = Synapses(source, target, model='w:1', pre='v+=w', connect='i==j')
    synapses.w = 'i*1.0'
    synapses.delay = 'i*ms'
    state_mon = StateMonitor(target, 'v', record=True)
    spike_mon = SpikeMonitor(source)
    store()  # default time slot
    run(10*ms)
    store('second')
    run(10*ms)
    v_values = state_mon.v[:, :]
    spike_indices, spike_times = spike_mon.it_

    restore() # Go back to beginning
    assert magic_network.t == 0*ms
    run(20*ms)
    assert defaultclock.t == 20*ms
    assert_equal(v_values, state_mon.v[:, :])
    assert_equal(spike_indices, spike_mon.i[:])
    assert_equal(spike_times, spike_mon.t_[:])

    # Go back to middle
    restore('second')
    assert magic_network.t == 10*ms
    run(10*ms)
    assert defaultclock.t == 20*ms
    assert_equal(v_values, state_mon.v[:, :])
    assert_equal(spike_indices, spike_mon.i[:])
    assert_equal(spike_times, spike_mon.t_[:])
开发者ID:appusom,项目名称:brian2,代码行数:33,代码来源:test_network.py

示例3: run_network

def run_network(traj):
    """Runs brian network consisting of
        200 inhibitory IF neurons"""

    eqs = '''
    dv/dt=(v0-v)/(5*ms) : volt (unless refractory)
    v0 : volt
    '''
    group = NeuronGroup(100, model=eqs, threshold='v>10 * mV',
                        reset='v = 0*mV', refractory=5*ms)
    group.v0 = traj.par.v0
    group.v = np.random.rand(100) * 10.0 * mV

    syn = Synapses(group, group, on_pre='v-=1*mV')
    syn.connect('i != j', p=0.2)

    spike_monitor = SpikeMonitor(group, variables=['v'])
    voltage_monitor = StateMonitor(group, 'v', record=True)
    pop_monitor = PopulationRateMonitor(group, name='pop' + str(traj.v_idx))

    net = Network(group, syn, spike_monitor, voltage_monitor, pop_monitor)
    net.run(0.25*second, report='text')

    traj.f_add_result(Brian2MonitorResult, 'spikes',
                      spike_monitor)
    traj.f_add_result(Brian2MonitorResult, 'v',
                      voltage_monitor)
    traj.f_add_result(Brian2MonitorResult, 'pop',
                      pop_monitor)
开发者ID:SmokinCaterpillar,项目名称:pypet,代码行数:29,代码来源:another_network_test.py

示例4: test_profile_ipython_html

def test_profile_ipython_html():
    G = NeuronGroup(10, 'dv/dt = -v / (10*ms) : 1', threshold='v>1',
                    reset='v=0', name='profile_test')
    G.v = 1.1
    net = Network(G)
    net.run(1*ms, profile=True)
    summary = profiling_summary(net)
    assert len(summary._repr_html_())
开发者ID:appusom,项目名称:brian2,代码行数:8,代码来源:test_network.py

示例5: test_get_set_states

def test_get_set_states():
    G = NeuronGroup(10, 'v:1', name='a_neurongroup')
    G.v = 'i'
    net = Network(G)
    states1 = net.get_states()
    states2 = magic_network.get_states()
    states3 = net.get_states(read_only_variables=False)
    assert set(states1.keys()) == set(states2.keys()) == set(states3.keys()) == {'a_neurongroup'}
    assert set(states1['a_neurongroup'].keys()) == set(states2['a_neurongroup'].keys()) == {'i', 'dt', 'N', 't', 'v'}
    assert set(states3['a_neurongroup']) == {'v'}

    # Try re-setting the state
    G.v = 0
    net.set_states(states3)
    assert_equal(G.v, np.arange(10))
开发者ID:appusom,项目名称:brian2,代码行数:15,代码来源:test_network.py

示例6: test_profile

def test_profile():
    G = NeuronGroup(10, 'dv/dt = -v / (10*ms) : 1', threshold='v>1',
                    reset='v=0', name='profile_test')
    G.v = 1.1
    net = Network(G)
    net.run(1*ms, profile=True)
    # The should be four simulated CodeObjects, one for the group and one each
    # for state update, threshold and reset
    info = net.profiling_info
    info_dict = dict(info)
    assert len(info) == 4
    assert 'profile_test' in info_dict
    assert 'profile_test_stateupdater' in info_dict
    assert 'profile_test_thresholder' in info_dict
    assert 'profile_test_resetter' in info_dict
    assert all([t>=0*second for _, t in info])
开发者ID:appusom,项目名称:brian2,代码行数:16,代码来源:test_network.py

示例7: test_magic_collect

def test_magic_collect():
    '''
    Make sure all expected objects are collected in a magic network
    '''
    P = PoissonGroup(10, rates=100*Hz)
    G = NeuronGroup(10, 'v:1')
    S = Synapses(G, G, '')
    G_runner = G.custom_operation('')
    S_runner = S.custom_operation('')

    state_mon = StateMonitor(G, 'v', record=True)
    spike_mon = SpikeMonitor(G)
    rate_mon = PopulationRateMonitor(G)

    objects = collect()

    assert len(objects) == 8, ('expected %d objects, got %d' % (8, len(objects)))
开发者ID:msGenDev,项目名称:brian2,代码行数:17,代码来源:test_network.py

示例8: run_network

def run_network():

    monitor_dict={}
    defaultclock.dt= 0.01*ms

    C=281*pF
    gL=30*nS
    EL=-70.6*mV
    VT=-50.4*mV
    DeltaT=2*mV
    tauw=40*ms
    a=4*nS
    b=0.08*nA
    I=8*nA
    Vcut="vm>2*mV"# practical threshold condition
    N=10

    reset = 'vm=Vr;w+=b'

    eqs="""
    dvm/dt=(gL*(EL-vm)+gL*DeltaT*exp((vm-VT)/DeltaT)+I-w)/C : volt
    dw/dt=(a*(vm-EL)-w)/tauw : amp
    Vr:volt
    """

    neuron=NeuronGroup(N,model=eqs,threshold=Vcut,reset=reset)
    neuron.vm=EL
    neuron.w=a*(neuron.vm-EL)
    neuron.Vr=linspace(-48.3*mV,-47.7*mV,N) # bifurcation parameter

    #run(25*msecond,report='text') # we discard the first spikes

    MSpike=SpikeMonitor(neuron, variables=['vm']) # record Vr and w at spike times
    MPopRate = PopulationRateMonitor(neuron)

    MMultiState = StateMonitor(neuron, ['w','vm'], record=[6,7,8,9])


    run(10*msecond,report='text')


    monitor_dict['SpikeMonitor']=MSpike
    monitor_dict['MultiState']=MMultiState
    monitor_dict['PopulationRateMonitor']=MPopRate

    return monitor_dict
开发者ID:SmokinCaterpillar,项目名称:pypet,代码行数:46,代码来源:run_a_brian2_network.py

示例9: test_continuation

def test_continuation():
    defaultclock.dt = 1*ms
    G = NeuronGroup(1, 'dv/dt = -v / (10*ms) : 1')
    G.v = 1
    mon = StateMonitor(G, 'v', record=True)
    net = Network(G, mon)
    net.run(2*ms)

    # Run the same simulation but with two runs that use sub-dt run times
    G2 = NeuronGroup(1, 'dv/dt = -v / (10*ms) : 1')
    G2.v = 1
    mon2 = StateMonitor(G2, 'v', record=True)
    net2 = Network(G2, mon2)
    net2.run(0.5*ms)
    net2.run(1.5*ms)

    assert_equal(mon.t[:], mon2.t[:])
    assert_equal(mon.v[:], mon2.v[:])
开发者ID:appusom,项目名称:brian2,代码行数:18,代码来源:test_network.py

示例10: __init__

    def __init__(self, source, n_per_channel=1, params=None):
        params = ZhangSynapse._get_parameters(params)
        c_0, c_1 = params['c_0'], params['c_1']
        s_0, s_1 = params['s_0'], params['s_1']
        R_A = params['R_A']
        ns = dict(s_0=s_0, s_1=s_1, c_0=c_0, c_1=c_1)
        eqs =  '''
        # time-varying discharge rate, input into this model
        s : Hz
        
        # discharge-history effect (Equation 20 in differential equation form)        
        H = c_0*e_0 + c_1*e_1 : 1
        de_0/dt = -e_0/s_0    : 1 (unless refractory)
        de_1/dt = -e_1/s_1    : 1 (unless refractory)

        # final time-varying discharge rate for the Poisson process, equation 19
        R = s * (1 - H) : Hz
        '''
        
        # make sure that the s value is first updated in
        # ZhangSynapseRate, then this NeuronGroup is
        # updated by setting order+1
        @network_operation(dt=source.dt[:], when='start', order=source.order+1)
        def distribute_input():
            self.s[:] = source.s[:].repeat(n_per_channel)
        
        NeuronGroup.__init__(self, len(source) * n_per_channel,
                             model=eqs,
                             threshold='rand()<R*dt',
                             reset='''
                             e_0 = 1
                             e_1 = 1
                             ''',
                             refractory=R_A,
                             dt=source.dt[:], order=source.order+1,
                             namespace=ns,
                             method='euler',
                             )
        
        self.contained_objects.append(distribute_input)
开发者ID:brian-team,项目名称:brian2hears,代码行数:40,代码来源:tan_carney.py

示例11: example_run

def example_run(debug=False, **build_options):
    '''
    Run a simple example simulation that test whether the Brian2/Brian2GeNN/GeNN
    pipeline is working correctly.

    Parameters
    ----------
    debug : bool
        Whether to display debug information (e.g. compilation output) during
        the run. Defaults to ``False``.
    build_options : dict
        Additional options that will be forwarded to the ``set_device`` call,
        e.g. ``use_GPU=False``.
    '''
    from brian2.devices.device import set_device, reset_device
    from brian2 import ms, NeuronGroup, run
    from brian2.utils.logger import std_silent
    import numpy as np
    from numpy.testing import assert_allclose
    from tempfile import mkdtemp
    import shutil
    with std_silent(debug):
        test_dir = mkdtemp(prefix='brian2genn_test')
        set_device('genn', directory=test_dir, debug=debug, **build_options)
        N = 100
        tau = 10*ms
        eqs = '''
        dV/dt = -V/tau: 1
        '''
        G = NeuronGroup(N, eqs, threshold='V>1', reset='V=0', refractory=5 * ms,
                        method='linear')
        G.V = 'i/100.'
        run(1*ms)
        assert_allclose(G.V, np.arange(100)/100.*np.exp(-1*ms/tau))
        shutil.rmtree(test_dir, ignore_errors=True)
        reset_device()
    print('Example run was successful.')
开发者ID:brian-team,项目名称:brian2genn,代码行数:37,代码来源:__init__.py

示例12: __init__

    def __init__(self, filterbank, targetvar, *args, **kwds):
        # Make sure we're not in standalone mode (which won't work)
        if not isinstance(get_device(), RuntimeDevice):
            raise RuntimeError("Cannot use standalone mode with brian2hears")

        self.targetvar = targetvar
        self.filterbank = filterbank
        filterbank.buffer_init()

        # Sanitize the clock - does it have the right dt value?
        if 'clock' in kwds:
            if int(1/kwds['clock'].dt)!=int(filterbank.samplerate):
                raise ValueError('Clock should have 1/dt=samplerate')
        elif 'dt' in kwds:
            if int(1 / kwds['dt']) != int(filterbank.samplerate):
                raise ValueError('Require 1/dt=samplerate')
        else:
            kwds['dt'] = 1/filterbank.samplerate
        
        buffersize = kwds.pop('buffersize', 32)
        if not isinstance(buffersize, int):
            if not have_same_dimensions(buffersize, second):
                raise DimensionMismatchError("buffersize argument should be an integer or in seconds")
            buffersize = int(buffersize*filterbank.samplerate)

        self.buffersize = buffersize

        self.apply_filterbank = ApplyFilterbank(self, targetvar, filterbank, buffersize)

        NeuronGroup.__init__(self, filterbank.nchannels, *args, **kwds)

        if self.variables[targetvar].dim is not DIMENSIONLESS:
            raise DimensionMismatchError("Target variable must be dimensionless")

        apply_filterbank_output = NetworkOperation(self.apply_filterbank.__call__, when='start', clock=self.clock)
        self.contained_objects.append(apply_filterbank_output)
开发者ID:brian-team,项目名称:brian2hears,代码行数:36,代码来源:filterbankgroup.py

示例13: _build_model

    def _build_model(self, traj, brian_list, network_dict):
        """Builds the neuron groups from `traj`.

        Adds the neuron groups to `brian_list` and `network_dict`.

        """

        model = traj.parameters.model

        # Create the equations for both models
        eqs_dict = self._build_model_eqs(traj)

        # Create inhibitory neurons
        eqs_i = eqs_dict['i']
        neurons_i = NeuronGroup(N=model.N_i,
                              model = eqs_i,
                              threshold=model.V_th,
                              reset=model.reset_func,
                              refractory=model.refractory,
                              method='Euler')

        # Create excitatory neurons
        eqs_e = eqs_dict['e']
        neurons_e = NeuronGroup(N=model.N_e,
                              model = eqs_e,
                              threshold=model.V_th,
                              reset=model.reset_func,
                              refractory=model.refractory,
                              method='Euler')


        # Set the bias terms
        neurons_e.mu =rand(model.N_e) * (model.mu_e_max - model.mu_e_min) + model.mu_e_min
        neurons_i.mu =rand(model.N_i) * (model.mu_i_max - model.mu_i_min) + model.mu_i_min

        # Set initial membrane potentials
        neurons_e.V = rand(model.N_e)
        neurons_i.V = rand(model.N_i)

        # Add both groups to the `brian_list` and the `network_dict`
        brian_list.append(neurons_i)
        brian_list.append(neurons_e)
        network_dict['neurons_e']=neurons_e
        network_dict['neurons_i']=neurons_i
开发者ID:SmokinCaterpillar,项目名称:pypet,代码行数:44,代码来源:clusternet.py

示例14: run_cpp_standalone

def run_cpp_standalone(params, network_objs):
    import os
    from numpy.fft import rfft, irfft
    from brian2.devices.device import CurrentDeviceProxy
    from brian2.units import Unit
    from brian2 import check_units, implementation, device, prefs, NeuronGroup, Network

    tempdir = os.path.join(params["program_dir"], "cpp_standalone")
    tempdir = os.path.join(tempdir, "c_" + str(params["sigma_c"]) + \
                           "_s_" + str(params["sigma_s"]))
    if not os.path.exists(tempdir):
        os.makedirs(tempdir)


    prefs.codegen.cpp.libraries += ['mkl_gf_lp64', # -Wl,--start-group
                                    'mkl_gnu_thread',
                                    'mkl_core', #  -Wl,--end-group
                                    'iomp5']


    # give extra arguments and path information to the compiler
    extra_incs = ['-I'+os.path.expanduser(s) for s in [ tempdir, "~/intel/mkl/include"]]
    prefs.codegen.cpp.extra_compile_args_gcc = ['-w', '-Ofast', '-march=native'] + extra_incs

    # give extra arguments and path information to the linker
    prefs.codegen.cpp.extra_link_args += ['-L{0}/intel/mkl/lib/intel64'.format(os.path.expanduser('~')),
                                          '-L{0}/intel/lib/intel64'.format(os.path.expanduser('~')),
                                          '-m64', '-Wl,--no-as-needed']

    # Path that the compiled and linked code needs at runtime
    os.environ["LD_LIBRARY_PATH"] = os.path.expanduser('~/intel/mkl/lib/intel64:')
    os.environ["LD_LIBRARY_PATH"] += os.path.expanduser('~/intel/lib/intel64:')

    # Variable definitions
    N = params["NI"] # this is the amount of neurons with variable synaptic strength
    Noffset = params["NE"]
    neurons = network_objs["neurons"]
    params["rho0_dt"] = params["rho_0"]/second * params["rate_interval"]
    mkl_threads = 1


    # Includes the header files in all generated files
    prefs.codegen.cpp.headers += ['<sense.h>',]
    prefs.codegen.cpp.define_macros += [('N_REAL', int(N)),
                                        ('N_CMPLX', int(N/2+1))]
    path_to_sense_hpp = os.path.join(tempdir, 'sense.h')
    path_to_sense_cpp = os.path.join(tempdir, 'sense.cpp')
    with open(path_to_sense_hpp, "w") as f:
        header_code = '''
        #ifndef SENSE_H
        #define SENSE_H
        #include <mkl_service.h>
        #include <mkl_vml.h>
        #include <mkl_dfti.h>
        #include <cstring>
        extern DFTI_DESCRIPTOR_HANDLE hand;
        extern MKL_Complex16 in_cmplx[N_CMPLX], out_cmplx[N_CMPLX], k_cmplx[N_CMPLX];
        DFTI_DESCRIPTOR_HANDLE init_dfti();
        #endif'''
        f.write(header_code)
        #MKL_Complex16 is a type (probably struct)
    with open(path_to_sense_cpp, "w") as f:
        sense_code = '''
        #include <sense.h>
        DFTI_DESCRIPTOR_HANDLE hand;
        MKL_Complex16 in_cmplx[N_CMPLX], out_cmplx[N_CMPLX], k_cmplx[N_CMPLX];
        DFTI_DESCRIPTOR_HANDLE init_dfti()
        {{
            DFTI_DESCRIPTOR_HANDLE hand = 0;
            mkl_set_num_threads({mkl_threads});
            DftiCreateDescriptor(&hand, DFTI_DOUBLE, DFTI_REAL, 1, (MKL_LONG)N_REAL); //MKL_LONG status
            DftiSetValue(hand, DFTI_PLACEMENT, DFTI_NOT_INPLACE);
            DftiSetValue(hand, DFTI_CONJUGATE_EVEN_STORAGE, DFTI_COMPLEX_COMPLEX);
            DftiSetValue(hand, DFTI_BACKWARD_SCALE, 1. / N_REAL);
            //if (0 == status) status = DftiSetValue(hand, DFTI_THREAD_LIMIT, {mkl_threads});
            DftiCommitDescriptor(hand); //if (0 != status) cout << "ERROR, status = " << status << "\\n";
            return hand;
        }} '''.format(mkl_threads=mkl_threads, )
        f.write(sense_code)

    # device_get_array_name will be the function get_array_name() and what it does is getting
    # the string names of brian objects
    device_get_array_name = CurrentDeviceProxy.__getattr__(device, 'get_array_name')
    # instert_code is a function which is used to insert code into the main()
    # function
    insert_code = CurrentDeviceProxy.__getattr__(device, 'insert_code')

    ### Computing the kernel (Owen changed it to a gaussian kernel now)
    # Owen uses a trick here which is he creates a NeuronGroup which doesn't
    # really do anything in the Simulation. It's just a dummy NeuronGroup
    # to hold an array to which he would like to have access to during runtime.
    if params["sigma_s"] == np.infty:
        k = np.ones(N)/N
    elif params["sigma_s"] < 1e-3:
        k = np.zeros(N)
        k[0] = 1
    else:
        intercell = params["x_NI"]
        length = intercell*N
        d = np.linspace(intercell-length/2, length/2, N)
#.........这里部分代码省略.........
开发者ID:Maltimore,项目名称:InhibitoryPlasticity,代码行数:101,代码来源:mytools.py

示例15: simulate_brunel_network

def simulate_brunel_network(
        N_Excit=5000,
        N_Inhib=None,
        N_extern=N_POISSON_INPUT,
        connection_probability=CONNECTION_PROBABILITY_EPSILON,
        w0=SYNAPTIC_WEIGHT_W0,
        g=RELATIVE_INHIBITORY_STRENGTH_G,
        synaptic_delay=SYNAPTIC_DELAY,
        poisson_input_rate=POISSON_INPUT_RATE,
        w_external=None,
        v_rest=V_REST,
        v_reset=V_RESET,
        firing_threshold=FIRING_THRESHOLD,
        membrane_time_scale=MEMBRANE_TIME_SCALE,
        abs_refractory_period=ABSOLUTE_REFRACTORY_PERIOD,
        monitored_subset_size=100,
        random_vm_init=False,
        sim_time=100.*b2.ms):
    """
    Fully parametrized implementation of a sparsely connected network of LIF neurons (Brunel 2000)

    Args:
        N_Excit (int): Size of the excitatory popluation
        N_Inhib (int): optional. Size of the inhibitory population.
            If not set (=None), N_Inhib is set to N_excit/4.
        N_extern (int): optional. Number of presynaptic excitatory poisson neurons. Note: if set to a value,
            this number does NOT depend on N_Excit and NOT depend on connection_probability (this is different
            from the book and paper. Only if N_extern is set to 'None', then N_extern is computed as
            N_Excit*connection_probability.
        connection_probability (float): probability to connect to any of the (N_Excit+N_Inhib) neurons
            CE = connection_probability*N_Excit
            CI = connection_probability*N_Inhib
            Cexternal = N_extern
        w0 (float): Synaptic strength J
        g (float): relative importance of inhibition. J_exc = w0. J_inhib = -g*w0
        synaptic_delay (Quantity): Delay between presynaptic spike and postsynaptic increase of v_m
        poisson_input_rate (Quantity): Poisson rate of the external population
        w_external (float): optional. Synaptic weight of the excitatory external poisson neurons onto all
            neurons in the network. Default is None, in that case w_external is set to w0, which is the
            standard value in the book and in the paper Brunel2000.
            The purpose of this parameter is to see the effect of external input in the
            absence of network feedback(setting w0 to 0mV and w_external>0).
        v_rest (Quantity): Resting potential
        v_reset (Quantity): Reset potential
        firing_threshold (Quantity): Spike threshold
        membrane_time_scale (Quantity): tau_m
        abs_refractory_period (Quantity): absolute refractory period, tau_ref
        monitored_subset_size (int): nr of neurons for which a VoltageMonitor is recording Vm
        random_vm_init (bool): if true, the membrane voltage of each neuron is initialized with a
            random value drawn from Uniform(v_rest, firing_threshold)
        sim_time (Quantity): Simulation time

    Returns:
        (rate_monitor, spike_monitor, voltage_monitor, idx_monitored_neurons)
        PopulationRateMonitor: Rate Monitor
        SpikeMonitor: SpikeMonitor for ALL (N_Excit+N_Inhib) neurons
        StateMonitor: membrane voltage for a selected subset of neurons
        list: index of monitored neurons. length = monitored_subset_size
    """
    if N_Inhib is None:
        N_Inhib = int(N_Excit/4)
    if N_extern is None:
        N_extern = int(N_Excit*connection_probability)
    if w_external is None:
        w_external = w0

    J_excit = w0
    J_inhib = -g*w0

    lif_dynamics = """
    dv/dt = -(v-v_rest) / membrane_time_scale : volt (unless refractory)"""

    network = NeuronGroup(
        N_Excit+N_Inhib, model=lif_dynamics,
        threshold="v>firing_threshold", reset="v=v_reset", refractory=abs_refractory_period,
        method="linear")
    if random_vm_init:
        network.v = random.uniform(v_rest/b2.mV, high=firing_threshold/b2.mV, size=(N_Excit+N_Inhib))*b2.mV
    else:
        network.v = v_rest
    excitatory_population = network[:N_Excit]
    inhibitory_population = network[N_Excit:]

    exc_synapses = Synapses(excitatory_population, target=network, on_pre="v += J_excit", delay=synaptic_delay)
    exc_synapses.connect(p=connection_probability)

    inhib_synapses = Synapses(inhibitory_population, target=network, on_pre="v += J_inhib", delay=synaptic_delay)
    inhib_synapses.connect(p=connection_probability)

    external_poisson_input = PoissonInput(target=network, target_var="v", N=N_extern,
                                          rate=poisson_input_rate, weight=w_external)

    # collect data of a subset of neurons:
    monitored_subset_size = min(monitored_subset_size, (N_Excit+N_Inhib))
    idx_monitored_neurons = sample(range(N_Excit+N_Inhib), monitored_subset_size)
    rate_monitor = PopulationRateMonitor(network)
    # record= some_list is not supported? :-(
    spike_monitor = SpikeMonitor(network, record=idx_monitored_neurons)
    voltage_monitor = StateMonitor(network, "v", record=idx_monitored_neurons)

#.........这里部分代码省略.........
开发者ID:EPFL-LCN,项目名称:neuronaldynamics-exercises,代码行数:101,代码来源:LIF_spiking_network.py


注:本文中的brian2.NeuronGroup类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。