当前位置: 首页>>代码示例>>Python>>正文


Python scipy.ravel函数代码示例

本文整理汇总了Python中scipy.ravel函数的典型用法代码示例。如果您正苦于以下问题:Python ravel函数的具体用法?Python ravel怎么用?Python ravel使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了ravel函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_process_data

 def test_process_data(self, data_field_class):
     r"""
     checking if the process data method works
     """
     #
     # creating horizontal channels
     eval_chans = EvalChannels(data_field_class())
     eval_chans.data_map = sp.zeros((eval_chans.nz, eval_chans.nx), dtype=int)
     eval_chans.data_map[2:4, :] = 255
     eval_chans.data_map[6:9, :] = 255
     eval_chans.data_vector = sp.ravel(eval_chans.data_map)
     eval_chans.args = {
         'axis': 'x',
         'thresh': 100
     }
     eval_chans._process_data()
     #
     # creating vertical channels
     eval_chans = EvalChannels(data_field_class())
     eval_chans.data_map = sp.zeros((eval_chans.nz, eval_chans.nx), dtype=int)
     eval_chans.data_map[:, 2:4] = 255
     eval_chans.data_map[:, 6:9] = 255
     eval_chans.data_vector = sp.ravel(eval_chans.data_map)
     eval_chans.args = {
         'axis': 'z',
         'thresh': 100
     }
     eval_chans._process_data()
     #
     eval_chans.args = {
         'axis': 'y',
         'thresh': 100
     }
     eval_chans._process_data()
开发者ID:stadelmanma,项目名称:netl-AP_MAP_FLOW,代码行数:34,代码来源:TestEvalChannels.py

示例2: _generate_masked_mesh

 def _generate_masked_mesh(self, cell_mask=None):
     r"""
     Generates the mesh based on the cell mask provided
     """
     #
     if cell_mask is None:
         cell_mask = sp.ones(self.data_map.shape, dtype=bool)
     #
     # initializing arrays
     self._edges = sp.ones(0, dtype=str)
     self._merge_patch_pairs = sp.ones(0, dtype=str)
     self._create_blocks(cell_mask)
     #
     # building face arrays
     mapper = sp.ravel(sp.array(cell_mask, dtype=int))
     mapper[mapper == 1] = sp.arange(sp.count_nonzero(mapper))
     mapper = sp.reshape(mapper, (self.nz, self.nx))
     mapper[~cell_mask] = -sp.iinfo(int).max
     #
     boundary_dict = {
         'bottom':
             {'bottom': mapper[0, :][cell_mask[0, :]]},
         'top':
             {'top': mapper[-1, :][cell_mask[-1, :]]},
         'left':
             {'left': mapper[:, 0][cell_mask[:, 0]]},
         'right':
             {'right': mapper[:, -1][cell_mask[:, -1]]},
         'front':
             {'front': mapper[cell_mask]},
         'back':
             {'back': mapper[cell_mask]},
         'internal':
             {'bottom': [], 'top': [], 'left': [], 'right': []}
     }
     #
     # determining cells linked to a masked cell
     cell_mask = sp.where(~sp.ravel(cell_mask))[0]
     inds = sp.in1d(self._field._cell_interfaces, cell_mask)
     inds = sp.reshape(inds, (len(self._field._cell_interfaces), 2))
     inds = inds[:, 0].astype(int) + inds[:, 1].astype(int)
     inds = (inds == 1)
     links = self._field._cell_interfaces[inds]
     #
     # adjusting order so masked cells are all on links[:, 1]
     swap = sp.in1d(links[:, 0], cell_mask)
     links[swap] = links[swap, ::-1]
     #
     # setting side based on index difference
     sides = sp.ndarray(len(links), dtype='<U6')
     sides[sp.where(links[:, 1] == links[:, 0]-self.nx)[0]] = 'bottom'
     sides[sp.where(links[:, 1] == links[:, 0]+self.nx)[0]] = 'top'
     sides[sp.where(links[:, 1] == links[:, 0]-1)[0]] = 'left'
     sides[sp.where(links[:, 1] == links[:, 0]+1)[0]] = 'right'
     #
     # adding each block to the internal face dictionary
     inds = sp.ravel(mapper)[links[:, 0]]
     for side, block_id in zip(sides, inds):
         boundary_dict['internal'][side].append(block_id)
     self.set_boundary_patches(boundary_dict, reset=True)
开发者ID:stadelmanma,项目名称:netl-AP_MAP_FLOW,代码行数:60,代码来源:__BlockMeshDict__.py

示例3: process_maps

def process_maps(aper_map, data_map1, data_map2, args):
    r"""
    subtracts the data maps and then calculates percentiles of the result
    before outputting a final map to file.
    """
    #
    # creating resultant map from clone of aperture map
    result = aper_map.clone()
    result.data_map = data_map1 - data_map2
    result.data_vector = sp.ravel(result.data_map)
    result.infile = args.out_name
    result.outfile = args.out_name
    #
    print('Percentiles of data_map1 - data_map2')
    output_percentile_set(result, args)
    #
    # checking if data is to be normalized and/or absolute
    if args.post_abs:
        result.data_map = sp.absolute(result.data_map)
        result.data_vector = sp.absolute(result.data_vector)
    #
    if args.post_normalize:
        result.data_map = result.data_map/sp.amax(sp.absolute(result.data_map))
        result.data_vector = sp.ravel(result.data_map)
    #
    return result
开发者ID:stadelmanma,项目名称:netl-AP_MAP_FLOW,代码行数:26,代码来源:apm_subtract_data_maps.py

示例4: newEpisode

    def newEpisode(self):
        if self.learning:
            params = ravel(self.explorationlayer.module.params)
            target = ravel(sum(self.history.getSequence(self.history.getNumSequences()-1)[2]) / 500)
        
            if target != 0.0:
                self.gp.addSample(params, target)
                if len(self.gp.trainx) > 20:
                    self.gp.trainx = self.gp.trainx[-20:, :]
                    self.gp.trainy = self.gp.trainy[-20:]
                    self.gp.noise = self.gp.noise[-20:]
                    
                self.gp._calculate()
                        
                # get new parameters where mean was highest
                max_cov = diag(self.gp.pred_cov).max()
                indices = where(diag(self.gp.pred_cov) == max_cov)[0]
                pick = indices[random.randint(len(indices))]
                new_param = self.gp.testx[pick]
            
                # check if that one exists already in gp training set
                if len(where(self.gp.trainx == new_param)[0]) > 0:
                    # add some normal noise to it
                    new_param += random.normal(0, 1, len(new_param))

                self.explorationlayer.module._setParameters(new_param)

            else:
                self.explorationlayer.drawRandomWeights()
        
        # don't call StateDependentAgent.newEpisode() because it randomizes the params
        LearningAgent.newEpisode(self)
开发者ID:HKou,项目名称:pybrain,代码行数:32,代码来源:statedependentgp.py

示例5: __init__

    def __init__(self, U, Y, statedim, reg=None):
        if size(shape(U)) == 1:
            U = reshape(U, (-1,1))
        if size(shape(Y)) == 1:
            Y = reshape(Y, (-1,1))
        if reg is None:
            reg = 0

        yDim = size(Y,1)
        uDim = size(U,1)

        self.output_size = size(Y,1) # placeholder

        # number of samples of past/future we'll mash together into a 'state'
        width = 1
        # total number of past/future pairings we get as a result
        K = size(U,0) - 2 * width + 1

        # build hankel matrices containing pasts and futures
        U_p = array([ravel(U[t : t + width]) for t in range(K)]).T
        U_f = array([ravel(U[t + width : t + 2 * width]) for t in range(K)]).T
        Y_p = array([ravel(Y[t : t + width]) for t in range(K)]).T
        Y_f = array([ravel(Y[t + width : t + 2 * width]) for t in range(K)]).T

        # solve the eigenvalue problem
        YfUfT = dot(Y_f, U_f.T)
        YfUpT = dot(Y_f, U_p.T)
        YfYpT = dot(Y_f, Y_p.T)
        UfUpT = dot(U_f, U_p.T)
        UfYpT = dot(U_f, Y_p.T)
        UpYpT = dot(U_p, Y_p.T)
        F = bmat([[None, YfUfT, YfUpT, YfYpT],
                  [YfUfT.T, None, UfUpT, UfYpT],
                  [YfUpT.T, UfUpT.T, None, UpYpT],
                  [YfYpT.T, UfYpT.T, UpYpT.T, None]])
        Ginv = bmat([[pinv(dot(Y_f,Y_f.T)), None, None, None],
                     [None, pinv(dot(U_f,U_f.T)), None, None],
                     [None, None, pinv(dot(U_p,U_p.T)), None],
                     [None, None, None, pinv(dot(Y_p,Y_p.T))]])
        F = F - eye(size(F, 0)) * reg

        # Take smallest eigenvalues
        _, W = eigs(Ginv.dot(F), k=statedim, which='SR')

        # State sequence is a weighted combination of the past
        W_U_p = W[ width * (yDim + uDim) : width * (yDim + uDim + uDim), :]
        W_Y_p = W[ width * (yDim + uDim + uDim):, :]
        X_hist = dot(W_U_p.T, U_p) + dot(W_Y_p.T, Y_p)

        # Regress; trim inputs to match the states we retrieved
        R = concatenate((X_hist[:, :-1], U[width:-width].T), 0)
        L = concatenate((X_hist[:, 1: ], Y[width:-width].T), 0)
        RRi = pinv(dot(R, R.T))
        RL  = dot(R, L.T)
        Sys = dot(RRi, RL).T
        self.A = Sys[:statedim, :statedim]
        self.B = Sys[:statedim, statedim:]
        self.C = Sys[statedim:, :statedim]
        self.D = Sys[statedim:, statedim:]
开发者ID:riscy,项目名称:mllm,代码行数:59,代码来源:system_identifier.py

示例6: _getSequenceField

 def _getSequenceField(self, index, field):
     """Return a sequence of one single field given by `field` and indexed by
     `index`."""
     seq = ravel(self.getField('sequence_index'))
     if len(seq) == index + 1:
         # user wants to access the last sequence, return until end of data
         return self.getField(field)[ravel(self.getField('sequence_index'))[index]:]            
     if len(seq) < index + 1:
         # sequence index beyond number of sequences. raise exception
         raise IndexError('sequence does not exist.')
     return self.getField(field)[ravel(self.getField('sequence_index'))[index]:ravel(self.getField('sequence_index'))[index + 1]]
开发者ID:ZachPhillipsGary,项目名称:CS200-NLP-ANNsProject,代码行数:11,代码来源:sequential.py

示例7: plotCurves

    def plotCurves(self, showSamples=False, force2D=True):
        from pylab import clf, hold, plot, fill, title, gcf, pcolor, gray

        if not self.calculated:
            self._calculate()

        if self.indim == 1:
            clf()
            hold(True)
            if showSamples:
                # plot samples (gray)
                for _ in range(5):
                    plot(
                        self.testx,
                        self.pred_mean + random.multivariate_normal(zeros(len(self.testx)), self.pred_cov),
                        color="gray",
                    )

            # plot training set
            plot(self.trainx, self.trainy, "bx")
            # plot mean (blue)
            plot(self.testx, self.pred_mean, "b", linewidth=1)
            # plot variance (as "polygon" going from left to right for upper half and back for lower half)
            fillx = r_[ravel(self.testx), ravel(self.testx[::-1])]
            filly = r_[self.pred_mean + 2 * diag(self.pred_cov), self.pred_mean[::-1] - 2 * diag(self.pred_cov)[::-1]]
            fill(fillx, filly, facecolor="gray", edgecolor="white", alpha=0.3)
            title("1D Gaussian Process with mean and variance")

        elif self.indim == 2 and not force2D:
            from matplotlib import axes3d as a3

            fig = gcf()
            fig.clear()
            ax = a3.Axes3D(fig)  # @UndefinedVariable

            # plot training set
            ax.plot3D(ravel(self.trainx[:, 0]), ravel(self.trainx[:, 1]), ravel(self.trainy), "ro")

            # plot mean
            (x, y, z) = map(
                lambda m: m.reshape(sqrt(len(m)), sqrt(len(m))), (self.testx[:, 0], self.testx[:, 1], self.pred_mean)
            )
            ax.plot_wireframe(x, y, z, colors="gray")
            return ax

        elif self.indim == 2 and force2D:
            # plot mean on pcolor map
            gray()
            # (x, y, z) = map(lambda m: m.reshape(sqrt(len(m)), sqrt(len(m))), (self.testx[:,0], self.testx[:,1], self.pred_mean))
            m = floor(sqrt(len(self.pred_mean)))
            pcolor(self.pred_mean.reshape(m, m)[::-1, :])

        else:
            print("plotting only supported for indim=1 or indim=2.")
开发者ID:avain,项目名称:pybrain,代码行数:54,代码来源:gaussprocess.py

示例8: integrateObservation

 def integrateObservation(self, obs):
     if len(obs) == 3:
         if self.useSpecialInfo:
             self.lastobs[-2:] = obs[:2]
         leftindex = max(0, 11-self.inGridSize/2)
         rightindex = min(22, 11+self.inGridSize/2+1)
         middle = obs[2][leftindex:rightindex, leftindex:rightindex]
         #boolmid = logical_not(logical_not(middle))*1.
         if self.useSpecialInfo:
             self.lastobs[:-2] = ravel(middle)
         else:
             self.lastobs[:] = ravel(middle)
开发者ID:DioMuller,项目名称:ai-exercices,代码行数:12,代码来源:networkagent.py

示例9: sphere_features

def sphere_features(features, sphere_vectors):
    
    features.shape = features.shape[0], -1

    fmean, fstd = sphere_vectors
    features -= fmean        
    assert((fstd!=0).all())
    features /= fstd

    assert(not sp.isnan(sp.ravel(features)).any())
    assert(not sp.isinf(sp.ravel(features)).any())
    
    return features
开发者ID:jaberg,项目名称:sclas,代码行数:13,代码来源:features.py

示例10: removeSequence

    def removeSequence(self, index):
        """Remove the `index`'th sequence from the dataset and places the
        marker to the sample following the removed sequence."""
        if index >= self.getNumSequences():
            # sequence doesn't exist, raise exception
            raise IndexError('sequence does not exist.')
        sequences = ravel(self.getField('sequence_index'))
        seqstart = sequences[index]
        if index == self.getNumSequences() - 1:
            # last sequence is going to be removed
            lastSeqDeleted = True
            seqend = self.getLength()
        else:
            lastSeqDeleted = False
            # sequence to remove is not last one (sequence_index exists)
            seqend = sequences[index + 1]

        # cut out data from all fields
        for label in self.link:
            # concatenate rows from start to seqstart and from seqend to end
            self.data[label] = r_[self.data[label][:seqstart, :], self.data[label][seqend:, :]]
            # update endmarkers of linked fields
            self.endmarker[label] -= seqend - seqstart

        # update sequence indices
        for i, val in enumerate(sequences):
            if val > seqstart:
                self.data['sequence_index'][i, :] -= seqend - seqstart

        # remove sequence index of deleted sequence and reduce its endmarker
        self.data['sequence_index'] = r_[
            self.data['sequence_index'][:index, :], self.data['sequence_index'][index + 1:, :]]
        self.endmarker['sequence_index'] -= 1

        if lastSeqDeleted:
            # last sequence was removed
            # move sequence marker to last remaining sequence
            self.currentSeq = index - 1
            # move sample marker to end of dataset
            self.index = self.getLength()
            # if there was only 1 sequence left, re-initialize sequence index
            if self.getLength() == 0:
                self.clear()
        else:
            # removed sequence was not last one (sequence_index exists)
            # move sequence marker to the new sequence at position 'index'
            self.currentSeq = index
            # move sample marker to beginning of sequence at position 'index'
            self.index = ravel(self.getField('sequence_index'))[index]
开发者ID:firestrand,项目名称:pybrain-gpu,代码行数:49,代码来源:sequential.py

示例11: output_percentile_set

def output_percentile_set(data_field, args):
    r"""
    Does three sets of percentiles and stacks them as columns: raw data,
    absolute value data, normalized+absolute value
    """
    data = {}
    #
    # outputting percentiles of initial subtraction to screen
    field = data_field.clone()
    pctle = Percentiles(field, percentiles=args.perc)
    pctle.process()
    data['raw'] = pctle.processed_data
    #
    # normalizing data
    field = data_field.clone()
    field.data_map = field.data_map/sp.amax(sp.absolute(field.data_map))
    field.data_vector = sp.ravel(field.data_map)
    pctle = Percentiles(field, percentiles=args.perc)
    pctle.process()
    data['norm'] = pctle.processed_data
    #
    # taking absolute value of data
    field = data_field.clone()
    field.data_map = sp.absolute(field.data_map)
    field.data_vector = sp.absolute(field.data_vector)
    pctle = Percentiles(field, percentiles=args.perc)
    pctle.process()
    data['abs'] = pctle.processed_data
    #
    # absolute value + normed
    field.data_map = field.data_map/sp.amax(field.data_map)
    field.data_vector = sp.ravel(field.data_map)
    pctle = Percentiles(field, percentiles=args.perc)
    pctle.process()
    data['abs+norm'] = pctle.processed_data
    #
    # outputting stacked percentiles
    fmt = '    {:>6.2f}\t{: 0.6e}\t{: 0.6e}\t{: 0.6e}\t{: 0.6e}\n'
    content = 'Percentile\tRaw Data\tAbsolute\tNormalized\tNorm+abs\n'
    data = zip(args.perc, data['raw'].values(),
               data['abs'].values(),
               data['norm'].values(),
               data['abs+norm'].values())
    #
    for row in data:
        content += fmt.format(*row)
    content += '\n'
    print(content)
开发者ID:stadelmanma,项目名称:netl-AP_MAP_FLOW,代码行数:48,代码来源:apm_subtract_data_maps.py

示例12: _updateWeights

 def _updateWeights(self, state, action, reward, next_state, learned_policy=None):
     """ Policy is a function that returns a probability vector for all actions, 
     given the current state(-features). """
     if learned_policy is None:
         learned_policy = self._greedyPolicy
     
     self._updateEtraces(state, action)
     
     phi = zeros((self.num_actions, self.num_features))
     phi[action] += state        
     phi_n = outer(learned_policy(next_state), next_state)
     
     self._A += outer(ravel(self._etraces), ravel(phi - self.rewardDiscount * phi_n))
     self._b += reward * ravel(self._etraces)       
     
     self._theta = dot(pinv2(self._A), self._b).reshape(self.num_actions, self.num_features)
开发者ID:Angeliqe,项目名称:pybrain,代码行数:16,代码来源:linearfa.py

示例13: make_y0

def make_y0(model):
    """ Make y0 """
    def mu_ij(i, j):
        return -sp.sqrt(uij[j, i] + (model.c / (1 - model.p[j]))
                        - (1 - model.d) * ubar[j]
                        - model.d * v0[j])

    # \bar{u} : status quo payoffs
    ubar = -(model.ideals ** 2).sum(1) + model.K
    # TODO: where did plus 10 come from?
    uij = (-(model.ideals[:, 0] - model.ideals[:, 0][:, sp.newaxis])**2 +
           -(model.ideals[:, 1] - model.ideals[:, 1][:, sp.newaxis])**2 + model.K)
    # v_0
    v0 = (uij * model.p[:, sp.newaxis]).sum(1) + model.c
        ## \lambda_0
    lam0 = sp.ones((5, 6)) * -sp.sqrt(model.c)
    # if m_i = i
    lam0[sp.r_[0:5], sp.r_[0:5]] = 1
    lam0 = reshape(lam0, (lam0.size, ))
    # x_0
    x0 = sp.reshape(sp.repeat(model.ideals, 6, axis=0), (60, ))
    # \mu_0
    mu0 = sp.zeros((5, 6, 2))
    # For players
    for i in range(5):
        # For coalitions
        for mi in range(6):
            # for each other player in the coalition
            ii = i * 6 + mi
            mu0[i, mi, 0] = mu_ij(i, model.part1[ii])
            mu0[i, mi, 1] = mu_ij(i, model.part2[ii])
    mu0 = sp.ravel(mu0)
    # y_0
    y0 = sp.concatenate((v0, lam0, x0, mu0))
    return y0
开发者ID:jrnold,项目名称:psc585,代码行数:35,代码来源:ps3.py

示例14: learn

 def learn(self):
     """ calls the gradient calculation function and executes a step in direction
         of the gradient, scaled with a small learning rate alpha. """
     assert self.ds != None
     assert self.module != None
     
     # get the deltas from the dataset
     deltas = self.ds.getField('deltas')
     
     # initialize matrix D and vector R
     D = ones((self.ds.getNumSequences(), self.module.paramdim + 1))
     R = zeros((self.ds.getNumSequences(), 1))
     
     # calculate the gradient with pseudo inverse
     for seq in range(self.ds.getNumSequences()):
         _state, _action, reward = self.ds.getSequence(seq)
         D[seq,:-1] = deltas[seq,:]
         R[seq,:] = mean(reward)
     
     beta = dot(pinv(D), R)        
     gradient = ravel(beta[:-1])
     
     # update the weights
     self.original = self.gd(gradient)       
     self.module._setParameters(self.original)
        
     self.module.reset()
开发者ID:HKou,项目名称:pybrain,代码行数:27,代码来源:basic.py

示例15: lossTraces

def lossTraces(fwrap, aclass, dim, maxsteps, storesteps=None, x0=None,
               initNoise=0., minLoss=1e-10, algoparams={}):
    """ Compute a number of loss curves, for the provided settings,
    stored at specific storestep points. """
    if not storesteps:
        storesteps = range(maxsteps + 1)
    
    # initial points, potentially noisy
    if x0 is None:
        x0 = ones(dim) + randn(dim) * initNoise
    
    # tracking progress by callback
    paramtraces = {'index':-1}
    def storer(a):
        lastseen = paramtraces['index']
        for ts in [x for x in storesteps if x > lastseen and x <= a._num_updates]:
            paramtraces[ts] = a.bestParameters.copy()
        paramtraces['index'] = a._num_updates
        
    # initialization    
    algo = aclass(fwrap, x0, callback=storer, **algoparams)
    print algo, fwrap, dim, maxsteps,
    
    # store initial step   
    algo.callback(algo)
    algo.run(maxsteps)

    # process learning curve
    del paramtraces['index']
    paramtraces = array([x for _, x in sorted(paramtraces.items())])
    oloss = mean(fwrap.stochfun.expectedLoss(ones(100) * fwrap.stochfun.optimum))
    ls = abs(fwrap.stochfun.expectedLoss(ravel(paramtraces)) - oloss) + minLoss
    ls = reshape(ls, paramtraces.shape)
    print median(ls[-1])
    return ls
开发者ID:bitfort,项目名称:py-optim,代码行数:35,代码来源:experiments.py


注:本文中的scipy.ravel函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。