当前位置: 首页>>代码示例>>Python>>正文


Python AttrDict.update方法代码示例

本文整理汇总了Python中attrdict.AttrDict.update方法的典型用法代码示例。如果您正苦于以下问题:Python AttrDict.update方法的具体用法?Python AttrDict.update怎么用?Python AttrDict.update使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在attrdict.AttrDict的用法示例。


在下文中一共展示了AttrDict.update方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: BuildSurprise

# 需要导入模块: from attrdict import AttrDict [as 别名]
# 或者: from attrdict.AttrDict import update [as 别名]
def BuildSurprise(hist_times,t_before,base_rate,maxbin,surpmax,surpmin):
        
    # hist_times:         the array of spiketimes for each whisker and direction relative to stim
    # t_before & t_after: how long we want to compute the psths bins (all 1 msec)
    # base_rate:          rate of a 1ms bin of the blank psth
    # maxbin:             the number of binning sizes where we compute the surprise, from 1 to maxbin
    # blankw:             the blank whisker number (changes depending on the experiment)
    # surpmax:            up to what time we compute surprise (tipically 55ms)
    # nsizeth:            how many consecutive bins are required to be responsive (tipically 2)
    # surpmin:            is from the time we consider to look for a response (tipically over 5ms)
    
    # we save all in this variable as attr dictionary
    Surprise = dict()
    #-------------------------------------
    # here computing the surprise (from surpmin to surpmax in ms)
    SurpriseW = {}
   
    for w in np.arange(25):
        Surprisefixbin = {}  #here we store the 25 whiskers
        
        for binsize in np.arange(maxbin)+1:
            Surprisefixbin[binsize] = BuildSingleSurprise(hist_times[w],base_rate,binsize,surpmax,t_before)
        
        SurpriseW[w] = Surprisefixbin
            
    Surprise = AttrDict({'Data' : SurpriseW})
    Surprise.update({'logic_tree_data': '[whiskers][binsizes][direction][values] = 25x20x2x'+ str(surpmax-surpmin)})

    return Surprise    
开发者ID:matigoldin,项目名称:Expect_analysis,代码行数:31,代码来源:surp_functions.py

示例2: BuildPSTH

# 需要导入模块: from attrdict import AttrDict [as 别名]
# 或者: from attrdict.AttrDict import update [as 别名]
def BuildPSTH(Stims, Spikes, sampling_freq, exp, meas):
    
    stimtimes = {}
    stim_samp = 1/.0009997575757
    # make an 'output dict'
    # the PSTH will be built on -tbefore:tafter
    PSTH_times = {}
    
    # Loop each neuron and get the spikes.
    for neuron in list(Spikes.keys()): 
        codename = 'exp'+ str(exp) + '_' + str(meas) + '_c' + str(neuron)
        psth = AttrDict({'clusnum': neuron,'exp' : int(exp) , 'meas': int(meas[1]) , 'shank': int(meas[3])})
        psth.update(AttrDict({'psth_counts': [] , 'psth_times': []}))
        
        histo= build_hist_dict()
        spikes = Spikes[neuron].spike_times*1000 #(want them in ms)
        
        #loop episodes and stims_per_episode, and populate the histograms
        for ep in np.arange(Stims.episodes)[:]:
            if ep<30:
                stims = 82
            else:
                stims = 28

            #print('Episode: ',ep)
            
            stims=8
            for se in np.arange(stims):#np.arange(Stims.stims_ep):
                #print('se     :',se)
                code = str(int(Stims.st_ctrl[ep][se]))
                c = str(Stims.st_logic.ctrl[code])
                                
                if code=='0':
                    t_after=500
                    start = Stims.st_times[ep][se]
                    if len(spikes[(start <= spikes) * (spikes <= start + t_after)])>0:
                        histo[c].extend(spikes[(start <= spikes) * (spikes <= start + t_after)]-start)
                        histo['Counts'][c] +=  len(spikes[(start <= spikes) * (spikes <= start + t_after)])
                else:
                    code = str(int(Stims.st_types[ep][se]))                                                
                    t = str(Stims.st_logic.types[code])
                
                    code = str(int(Stims.st_pad[ep][se]))
                    p = Stims.st_logic.pad[code]
                    
                    r = Stims.st_rep[ep][se]
                    i = Stims.st_isi[ep][se]
                    start = Stims.st_times[ep][se]
                              
                    t_after = 500*r
                    
                    if len(spikes[(start <= spikes) * (spikes <= start + t_after)])>0:
                        histo[c][t][p][r][i].extend(spikes[(start <= spikes) * (spikes <= start + t_after)]-start)
                        histo['Counts'][c][t][p][r][i]  += len((spikes[(start <= spikes) * (spikes <= start + t_after)]))
                                                       
        PSTH_times[codename] = histo
       
    return PSTH_times
开发者ID:matigoldin,项目名称:Expect_analysis,代码行数:60,代码来源:stim_loading.py

示例3: BuildPSTH

# 需要导入模块: from attrdict import AttrDict [as 别名]
# 或者: from attrdict.AttrDict import update [as 别名]
def BuildPSTH(stim,stimtype, Spikes, sampling_freq, t_before, t_after,starts,stops,exp,meas) :
## The first task is to find the stimulus onset times for each whisker in each sweep in each direction
    #stim, stimtype = read_stimulus()
    stim = stim[np.where(stimtype=='F')[0], :, :]
    starts = starts[np.where(stimtype=='F')[0]]
    stops = stops[np.where(stimtype=='F')[0]]
    
    stimtimes = {}
    for w in np.arange(25, dtype='int') :  
        timesUP = []
        timesDOWN = []
        for i in np.arange(len(stim), dtype='int') :
            indsUP = (np.where(stim[i, w, :]==1108.8889)[0]-1)[::2]
            # This finds all time points where the stim = 1108.8889, because each ramp has two 1108.8889 values
            # (on the way up and on the way down) we take every other index using [::2]
            timesUP.append(indsUP)
            indsDOWN = (np.where(stim[i, w, :]==-1108.8889)[0]-1)[::2]
            # This finds all time points where the stim = -1108.8889, because each ramp has two -1108.8889 values
            # (on the way up and on the way down) we take every other index using [::2]
            timesDOWN.append(indsDOWN)
        stimtimes[w] = timesUP, timesDOWN # stimtimes[whisker][0][:]=UP stimtimes[whisker][1][:]=DOWN
    
    # make an 'output dict'
    # the PSTH will be built on -tbefore:tafter
    hist_inds = {}
    PSTH = {}
    psth = dict()
    psth_times = dict()
    
    # Loop each neuron and get the spikes.
    for neuron in list(Spikes.keys()): 
        codename = 'exp'+ str(exp) + '_' + str(meas) + '_c' + str(neuron)
        
        psth = AttrDict({'clusnum': neuron,'exp' : int(exp) , 'meas': int(meas[1]) , 'shank': int(meas[3])})
        
        psth.update(AttrDict({'psth_counts': [] , 'psth_times': [] , 'psth_length': [t_before,t_after] }))
        
        psth['psth_counts'], psth['psth_times'] = PSTH_spikes(stim, stimtype, stimtimes, Spikes[neuron].spike_times, sampling_freq, t_before, t_after, starts, stops)
        
        PSTH[codename] = psth
       
    return PSTH
开发者ID:matigoldin,项目名称:Expect_analysis,代码行数:44,代码来源:PSTH_functions.py

示例4: BuildSig

# 需要导入模块: from attrdict import AttrDict [as 别名]
# 或者: from attrdict.AttrDict import update [as 别名]
def BuildSig(Surprise,thresh,blankw,nconsecabove,surpmin,nsizesaboveth):

    # here computing significance
    Detection = AttrDict({})

    Sig, SigSizesList , SigTop, PW , PWstrong, SigStrength, SigStrengthNorm= IsSig(Surprise,thresh,blankw,nconsecabove,surpmin,nsizesaboveth)
										    
    Detection.update({'Sig':Sig,'Sig_sizes':SigSizesList,'Sig_top':SigTop,'PW':PW , 'PWstrong': PWstrong})        
    Detection.update({'Sig_strength': SigStrength, 'Sig_strength_norm': SigStrengthNorm})       
    
    Detection.update({'logic_tree_significants': '[whiskers][direction] = 25x2'})
    Detection.update({'logic_tree_sig_sizes': '[whiskers][binsizes][direction] = 25x20x2'})
    #-------------------------------------
    
    return Detection
开发者ID:matigoldin,项目名称:Expect_analysis,代码行数:17,代码来源:surp_functions.py

示例5: build_dict_stim2

# 需要导入模块: from attrdict import AttrDict [as 别名]
# 或者: from attrdict.AttrDict import update [as 别名]

#.........这里部分代码省略.........
    # count number of episodes
    episodes=1
    stims_ep=0
    for time in times[:-1]:             #last line is blank, we skip it
        if time == '': 
            episodes+=1
        if episodes ==1:
            stims_ep+=1
    print('   Total episodes: ', episodes)        
    print('   Total stims per episode: ', stims_ep)        
    #------------------------------------------
    # get episode duration
    #ep_duration = get_ep_duration(epdurationfile)
    ####
    # for the formatting I have now:           #####################X   XXXXXXXXXXXXXXXXXX
    #ep_length = ep_duration[0]
    ep_length = ep_duration/30
    ep1=0
    new=0
    ###
    #------------------------------------------
    # starting times of each stimulus
    starts = np.zeros([episodes, stims_ep]) + FC_ep*3100   # each FC episode is 3.1 sec
    tot_stims = episodes*stims_ep

    st_count=0
    for t in times[:-1]:
        if t!='':
            st = st_count % stims_ep
            ep = st_count // stims_ep 
            starts[ep,st] = float(t) + (ep-new)*ep_length + ep1
            st_count+=1
        else:
            st_count = (ep+1)*stims_ep          #correct for episodes with less stimuli for concatenated files
            #if ep==29:
            #    ep_length = ep_duration[1]
            #    ep1= ep_duration[2]
            #    new=30
            
    #for ep in np.arange(28,46,1):
    #    plt.plot(starts[ep,:], 'o')
    #print(starts[45,20])
    #------------------------------------------
    # stims        
    st_pad = np.zeros([episodes,stims_ep],dtype=int) # 1 row, 2 arc, 3 pad
    st_types = np.zeros([episodes,stims_ep],dtype=int) # 1 hold, 2 pass, 3 release        
    st_isi = np.zeros([episodes,stims_ep],dtype=int) # 10 , 20, 50
    st_rep = np.zeros([episodes,stims_ep],dtype=int) # 2, 5, 10
    st_ctrl = np.zeros([episodes,stims_ep],dtype=int) # 0, blank, 1 ctrl1, 2 ctrl2, 3 ctrl3, 10 normal 
    # dictionary of stims
    st_logic = {} 
    st_logic['pad'] = {'1' : 'ROW' , '2' : 'ARC'  , '3': 'PAD' }
    st_logic['types'] = { '1':'hold', '2': 'pass'  , '3': 'release'}
    st_logic['ctrl'] = {'0' : 'BLANK', '1' : 'Ctrl1', '2' : 'Ctrl2', '3' : 'Ctrl3', '10' : 'Normal'}

    ep=0
    st=0
    # get stims
    for stim in stims[:-3]:           #last three lines useless
        line = stim.split()
        if line and ep<episodes:

            if line[1]=='BLANK':
                st+=1
                if st%stims_ep==0:
                    st=0
                    ep+=1
            elif len(line)>3:
                if line[1][1:4]=='ROW': st_pad[ep,st]=1
                elif line[1][1:4]=='ARC': st_pad[ep,st]=2
                elif line[1][1:4]=='PAD': st_pad[ep,st]=3

                if line[1][5:-1]=='Hold': st_types[ep,st]=1
                elif line[1][5:-1]=='Pass': st_types[ep,st]=2
                elif line[1][5:-1]=='Release': st_types[ep,st]=3

                if line[3]=='REP_2': st_rep[ep,st]=2
                elif line[3]=='REP_5': st_rep[ep,st]=5
                elif line[3]=='REP_10': st_rep[ep,st]=10

                if line[5]=='ISI_10': st_isi[ep,st]=10
                elif line[5]=='ISI_20': st_isi[ep,st]=20
                #elif line[5]=='ISI_50': st_isi[ep,st]=50
                elif line[5]=='ISI_2': st_isi[ep,st]=2
                    
                if line[7]=='Normal': st_ctrl[ep,st]=10
                elif line[7][0:5]=='Ctrl1': st_ctrl[ep,st]=1
                elif line[7][0:5]=='Ctrl2': st_ctrl[ep,st]=2
                elif line[7][0:5]=='Ctrl3': st_ctrl[ep,st]=3
                elif line[7][0:5]=='Ctrl4': st_ctrl[ep,st]=4

                st+=1
                if st%stims_ep==0:
                    st=0
                    ep+=1

    Stim_dict = AttrDict({'st_logic':st_logic, 'episodes': episodes  ,'stims_ep':stims_ep  ,'st_times': starts})
    Stim_dict.update({'st_isi':st_isi,'st_rep':st_rep,'st_types':st_types,'st_ctrl': st_ctrl,'st_pad':st_pad})
    
    return Stim_dict
开发者ID:matigoldin,项目名称:Expect_analysis,代码行数:104,代码来源:stim_loading.py

示例6: Process

# 需要导入模块: from attrdict import AttrDict [as 别名]
# 或者: from attrdict.AttrDict import update [as 别名]
class Process(object):
    """A generic parent class for all climlab process objects.
    Every process object has a set of state variables on a spatial grid.

    For more general information about `Processes` and their role in climlab,
    see :ref:`process_architecture` section climlab-architecture.

    **Initialization parameters** \n

    An instance of ``Process`` is initialized with the following
    arguments *(for detailed information see Object attributes below)*:

    :param Field state: spatial state variable for the process.
                        Set to ``None`` if not specified.
    :param domains:     domain(s) for the process
    :type domains:      :class:`~climlab.domain.domain._Domain` or dict of
                        :class:`~climlab.domain.domain._Domain`
    :param subprocess:  subprocess(es) of the process
    :type subprocess:   :class:`~climlab.process.process.Process` or dict of
                        :class:`~climlab.process.process.Process`
    :param array lat:   latitudinal points (optional)
    :param lev:         altitudinal points (optional)
    :param int num_lat: number of latitudional points (optional)
    :param int num_levels:
                        number of altitudinal points (optional)
    :param dict input:  collection of input quantities
    :param bool verbose: Flag to control text output during instantiation
                         of the Process [default: True]

    **Object attributes** \n

    Additional to the parent class :class:`~climlab.process.process.Process`
    following object attributes are generated during initialization:

    :ivar dict domains:     dictionary of process :class:`~climlab.domain.domain._Domain`
    :ivar dict state:       dictionary of process states
                            (of type :class:`~climlab.domain.field.Field`)
    :ivar dict param:       dictionary of model parameters which are given
                            through ``**kwargs``
    :ivar dict diagnostics: a dictionary with all diagnostic variables
    :ivar dict _input_vars: collection of input quantities like boundary conditions
                            and other gridded quantities
    :ivar str creation_date:
                            date and time when process was created
    :ivar subprocess:       dictionary of suprocesses of the process
    :vartype subprocess:    dict of :class:`~climlab.process.process.Process`

    """

    def __str__(self):
        str1 = 'climlab Process of type {0}. \n'.format(type(self))
        str1 += 'State variables and domain shapes: \n'
        for varname in list(self.state.keys()):
            str1 += '  {0}: {1} \n'.format(varname, self.domains[varname].shape)
        str1 += 'The subprocess tree: \n'
        str1 += walk.process_tree(self, name=self.name)
        return str1

    def __init__(self, name='Untitled', state=None, domains=None, subprocess=None,
                 lat=None, lev=None, num_lat=None, num_levels=None,
                 input=None, verbose=True, **kwargs):
        # verbose flag used to control text output at process creation time
        self.verbose = verbose
        self.name = name
        # dictionary of domains. Keys are the domain names
        self.domains = _make_dict(domains, _Domain)
        #  If lat is given, create a simple domains
        if lat is not None:
            sfc = zonal_mean_surface()
            self.domains.update({'default': sfc})
        # dictionary of state variables (all of type Field)
        self.state = AttrDict()
        states = _make_dict(state, Field)
        for name, value in states.items():
            self.set_state(name, value)
        # dictionary of model parameters
        self.param = kwargs
        # dictionary of diagnostic quantities
        #self.diagnostics = AttrDict()
        #self._diag_vars = frozenset()
        self._diag_vars = []
        # dictionary of input quantities
        #self.input = _make_dict(input, Field)
        if input is None:
            #self._input_vars = frozenset()
            self._input_vars = []
        else:
            self.add_input(list(input.keys()))
            for name, var in input:
                self.__dict__[name] = var
        self.creation_date = time.strftime("%a, %d %b %Y %H:%M:%S %z",
                                           time.localtime())
        # subprocess is a dictionary of any sub-processes
        self.subprocess = AttrDict()
        if subprocess is not None:
            self.add_subprocesses(subprocess)
        #if subprocess is None:
        #    #self.subprocess = {}
        #    # a dictionary whose items can be accessed as attributes
        #    self.subprocess = AttrDict()
#.........这里部分代码省略.........
开发者ID:brian-rose,项目名称:climlab,代码行数:103,代码来源:process.py

示例7: KBConfig

# 需要导入模块: from attrdict import AttrDict [as 别名]
# 或者: from attrdict.AttrDict import update [as 别名]
class KBConfig(object):

    def __init__(self):
        # The default configuration file for KloudBuster
        default_cfg = resource_string(__name__, "cfg.scale.yaml")
        # Read the configuration file
        self.config_scale = AttrDict(yaml.safe_load(default_cfg))
        self.alt_cfg = None
        self.cred_tested = None
        self.cred_testing = None
        self.server_cfg = None
        self.client_cfg = None
        self.topo_cfg = None
        self.tenants_list = None
        self.storage_mode = False
        self.multicast_mode = False

    def update_configs(self):
        # Initialize the key pair name
        if self.config_scale['public_key_file']:
            # verify the public key file exists
            if not os.path.exists(self.config_scale['public_key_file']):
                LOG.error('Error: Invalid public key file: ' + self.config_scale['public_key_file'])
                sys.exit(1)
        else:
            # pick the user's public key if there is one
            pub_key = os.path.expanduser('~/.ssh/id_rsa.pub')
            if os.path.isfile(pub_key):
                self.config_scale['public_key_file'] = pub_key
                LOG.info('Using %s as public key for all VMs' % (pub_key))
            else:
                LOG.warning('No public key is found or specified to instantiate VMs. '
                            'You will not be able to access the VMs spawned by KloudBuster.')

        if self.storage_mode:
            disk_size = self.config_scale.client.storage_stage_configs.disk_size
            io_file_size = self.config_scale.client.storage_stage_configs.io_file_size
            if not disk_size:
                LOG.error('You have to specify a disk size in order to run storage tests.')
                raise KBConfigParseException()

            if io_file_size > disk_size:
                LOG.error('io_file_size must be less or eqaul than disk_size.')
                raise KBConfigParseException()

        if self.alt_cfg:
            self.config_scale = self.config_scale + AttrDict(self.alt_cfg)

        # Use the default image name for Glance
        # defaults to something like "kloudbuster_v3"
        if not self.config_scale['image_name']:
            self.config_scale['image_name'] = kb_vm_agent.get_image_name()

        # A bit of config dict surgery, extract out the client and server side
        # and transplant the remaining (common part) into the client and server dict
        self.server_cfg = AttrDict(self.config_scale.pop('server'))
        self.client_cfg = AttrDict(self.config_scale.pop('client'))
        self.server_cfg.update(self.config_scale)
        self.client_cfg.update(self.config_scale)

        # Hardcode a few client side options
        self.client_cfg.update(hardcoded_client_cfg)

        # Adjust the VMs per network on the client side to match the total
        # VMs on the server side (1:1)
        # There is an additional VM in client kloud as a proxy node
        if self.storage_mode:
            self.client_cfg['vms_per_network'] = \
                self.client_cfg.storage_stage_configs.vm_count + 1
        else:
            self.client_cfg['vms_per_network'] = \
                self.get_total_vm_count(self.server_cfg) + 1

        # If multicast mode, the number of receivers is specified in the multicast config instead.
        if self.multicast_mode:
            self.server_cfg['vms_per_network'] =\
                self.client_cfg['multicast_tool_configs']['receivers'][-1]

        self.config_scale['server'] = self.server_cfg
        self.config_scale['client'] = self.client_cfg

        # missing rate or rate_iops = 0 = no-limit
        # note we need to use key based access to modify the content
        # (self.config_scale['client'].storage_tool_configs will make a shallow copy)
        for tc in self.config_scale['client']['storage_tool_configs']:
            if 'rate' not in tc:
                tc['rate'] = '0'
            if 'rate_iops' not in tc:
                tc['rate_iops'] = 0

    def init_with_cli(self):
        self.storage_mode = CONF.storage
        self.multicast_mode = CONF.multicast
        self.get_credentials()
        self.get_configs()
        self.get_topo_cfg()
        self.get_tenants_list()
        self.update_configs()

    def init_with_rest_api(self, **kwargs):
#.........这里部分代码省略.........
开发者ID:openstack,项目名称:kloudbuster,代码行数:103,代码来源:kb_config.py


注:本文中的attrdict.AttrDict.update方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。