当前位置: 首页>>代码示例>>Python>>正文


Python numpy.asfarray函数代码示例

本文整理汇总了Python中numpy.asfarray函数的典型用法代码示例。如果您正苦于以下问题:Python asfarray函数的具体用法?Python asfarray怎么用?Python asfarray使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了asfarray函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: scaleSignal

def scaleSignal(img, fitParams=None, backgroundToZero=False, reference=None):
    '''
    scale the image between...
    
    backgroundToZero=True -> 0 (average background) and 1 (maximum signal)
    backgroundToZero=False -> signal+-3std
    
    reference -> reference image -- scale image to fit this one
    
    returns:
    scaled image
    '''
    img = imread(img)
    if reference is not None: 
        low, high = signalRange(img, fitParams)
        low2, high2 = signalRange(reference)
        img = np.asfarray(img)
        ampl = (high2-low2)/(high-low)
        img-=low
        img *= ampl
        img += low2
        return img
    else:
        offs, div = scaleParams(img, fitParams, backgroundToZero)
        img = np.asfarray(img)  - offs 
        img /= div 
        print 'offset: %s, divident: %s' %(offs, div)
        return img
开发者ID:Jayme-T,项目名称:imgProcessor,代码行数:28,代码来源:signal.py

示例2: SetInitialPoints

    def SetInitialPoints(self, x0, radius=0.05):
        """Set Initial Points with Guess (x0)

input::
    - x0: must be a sequence of length self.nDim
    - radius: generate random points within [-radius*x0, radius*x0]
        for i!=0 when a simplex-type initial guess in required"""
        x0 = asfarray(x0)
        rank = len(x0.shape)
        if rank is 0:
            x0 = asfarray([x0])
            rank = 1
        if not -1 < rank < 2:
            raise ValueError, "Initial guess must be a scalar or rank-1 sequence."
        if len(x0) != self.nDim:
            raise ValueError, "Initial guess must be length %s" % self.nDim

        #slightly alter initial values for solvers that depend on randomness
        min = x0*(1-radius)
        max = x0*(1+radius)
        numzeros = len(x0[x0==0])
        min[min==0] = asarray([-radius for i in range(numzeros)])
        max[max==0] = asarray([radius for i in range(numzeros)])
        self.SetRandomInitialPoints(min,max)
        #stick initial values in population[i], i=0
        self.population[0] = x0.tolist()
开发者ID:cdeil,项目名称:mystic,代码行数:26,代码来源:abstract_solver.py

示例3: testCopyConstructorBP

    def testCopyConstructorBP(self, level=1):
	""" test if a copied bandpass ESN generates the same result """
        
	# set bandpass parameters
	self.net.setSimAlgorithm(SIM_BP)
	f1 = N.linspace(0.1, 1., self.net.getSize())
	f2 = N.linspace(0.0001, 0.5, self.net.getSize())
	self.net.init()
	self.net.setBPCutoff(f1,f2)
	
	# set output weight matrix
	trainin = N.random.rand(self.ins,self.train_size) * 2 - 1
	trainout = N.random.rand(self.outs,self.train_size) * 2 - 1
	trainin = N.asfarray(trainin, self.dtype)
	trainout = N.asfarray(trainout, self.dtype)
	self.net.train(trainin,trainout,1)
	
	# copy network
	# ATTENTION: operator= is shallow copy !
	if self.dtype is 'float32':
		netA = SingleESN(self.net)
	else:
		netA = DoubleESN(self.net)
	
	# simulate both networks separate and test result
	indata = N.random.rand(self.ins,self.sim_size)*2-1
	indata = N.asfarray(indata, self.dtype)
	outdata = N.empty((self.outs,self.sim_size),self.dtype)
	outdataA = N.empty((self.outs,self.sim_size),self.dtype)
	self.net.simulate( indata, outdata )
	netA.simulate( indata, outdataA )
	assert_array_almost_equal(outdata,outdataA)
开发者ID:HerrPeterPaul,项目名称:anna,代码行数:32,代码来源:test_correspondence.py

示例4: testPI

    def testPI(self, level=1):
	""" test TRAIN_PI with zero input and feedback """
        
	# init network
	self.net.setSimAlgorithm(SIM_STD)
	self.net.setTrainAlgorithm(TRAIN_PI)
	self.net.init()
	
	# train network
	washout = 2
	# test with zero input:
	indata = N.zeros((self.ins,self.train_size),self.dtype)
	outdata = N.random.rand(self.outs,self.train_size) * 2 - 1
	indata = N.asfarray( indata, self.dtype )
	outdata = N.asfarray( outdata, self.dtype )
	self.net.train( indata, outdata, washout )
	wout_target = self.net.getWout().copy()
	
	# teacher forcing, collect states
	X = self._teacherForcing(indata,outdata)
	
	# restructure data
	M = N.r_[X,indata]
	M = M[:,washout:self.train_size].T
	T = outdata[:,washout:self.train_size].T
	
	# calc pseudo inverse: wout = pinv(M) * T
	wout = ( N.dot(pinv(M),T) ).T
	
	# normalize result for comparison
	wout = wout / abs(wout).max()
	wout_target = wout_target / abs(wout_target).max()
	assert_array_almost_equal(wout_target,wout,2)
开发者ID:HerrPeterPaul,项目名称:anna,代码行数:33,代码来源:test_train.py

示例5: numerical_partials

def numerical_partials(f, p, f0=None, pmin=None, pmax=None, prel=1.e-6,
                       pabs=1.e-9, pmask=None, args=(), kwargs=None):
    """Compute partial derivatives of f(p) wrt p by finite differences."""
    if kwargs is None:
        kwargs = {}
    f0, p = f(p) if f0 is None else asfarray(f0), asfarray(p)
    dp = zeros_like(p)
    prel, pabs = prel + dp, pabs + dp
    dp = maximum(prel*absolute(p), pabs)
    pfull = p.copy()
    if pmask is not None:
        p, dp = p[pmask], dp[pmask]
    else:
        pmask = ones(p.shape, dtype=bool)
    # Assume that pmin <= p <= pmax, but check p+dp.
    if pmax is not None:
        mask = p+dp > pmax
        dp[mask] *= -1
        if mask.any():
            if pmin is not None and (p+dp < pmin).any():
                raise ValueError("pmin and pmax too close together")
    dfdp = []
    for dp, p1 in zip(dp, p+diag(dp)):
        if not dp:
            raise ValueError("zero step size, check prel and pabs")
        pfull[pmask] = p1
        dfdp.append((f(pfull, *args, **kwargs) - f0)/dp)
    return array(dfdp)
开发者ID:dhmunro,项目名称:npplus,代码行数:28,代码来源:lsqfit.py

示例6: __init__

    def __init__(self,ad):
        """Load parameters from a FITS header into the fitting function such that
           we can evaluate the function.
           The header has keywords of the form COEFF_A, COEFF_B..., from low
           to high order.
           :param xmin,xmax:  Min,max range where to eveluate
           :param fitname:  Fitting function name used.
           :type fitname: {'polynomial','legendre',or 'chebyshev'}
           :param order: polynomial order used to evaluate the coeffs
           :param coeff: Coefficients from the fitting
           :type coeff: List
           :return: An array of evaluated values

           Example:
             ef = gfit.Pix2coord(ad)
             ef(1300)   # Evaluate wavelength at pixel 1300 in the middle row
             ef(1300,400)   # Evaluate wavelength at pixel 1300 at row 400
        """

        tbh = ad['WAVECAL',1].header
        pixsample = np.asfarray(tbh['pixrange'].split())
        self.fitname = tbh['fitname']
        self.order = tbh['fitorder']
        self.coeff = np.asfarray(tbh['fitcoeff'].split())
        # Set up the pix2wavelength evaluator function 
        xmin, xmax = pixsample
        self.xnorm = lambda x: 2.*(x - xmin) / (xmax-xmin) - 1

        f3dcoeff = []
        for k in ['A', 'B', 'C', 'D', 'E', 'F']:
            f3dcoeff.append(tbh['COEFF_'+k])
        self.f3dcoeff = np.asfarray(f3dcoeff)
开发者ID:pyrrho314,项目名称:recipesystem,代码行数:32,代码来源:gfit.py

示例7: relu

def relu(x,deriv = False):
    if not deriv:
        return np.asfarray(np.maximum(0,x))
    if deriv:
        der = np.asfarray(np.maximum(0,x))
        der[der > 0] = 1
        return der
开发者ID:DreamsDragon,项目名称:DLLib,代码行数:7,代码来源:ActivationFunction.py

示例8: snv_on_chromosome

def snv_on_chromosome(axis, variants, segments, genes,
                      do_trend, do_boost=False):
    # XXX only set x-limits if not already done for probes/segments
    # setup_chromosome(axis, None, segments, variants,
    #                  0.0, 1.0, "VAF")
    axis.set_ylim(0.0, 1.0)
    axis.set_ylabel("VAF")
    axis.set_xlabel("Position (Mb)")
    axis.get_yaxis().tick_left()
    axis.get_xaxis().tick_top()
    axis.tick_params(which='both', direction='out',
                     labelbottom=False, labeltop=False)

    x_mb = variants["start"] * MB
    if do_boost:
        y = variants.tumor_boost()
    else:
        y = np.asfarray(variants["alt_freq"])
    axis.scatter(x_mb, y, color=POINT_COLOR, alpha=0.3)
    # TODO - highlight genes/selection
    if segments:
        # Draw average VAF within each segment
        posns = np.asfarray(variants["start"]) # * MB
        y = np.asfarray(y)
        for v_start, v_end, v_freq in group_snvs_by_segments(posns, y,
                                                             segments):
            # ENH: color by segment gain/loss
            axis.plot([v_start * MB, v_end * MB], [v_freq, v_freq],
                      color='#C0C0C0', linewidth=2, #zorder=1,
                      solid_capstyle='round')
开发者ID:mpschr,项目名称:cnvkit,代码行数:30,代码来源:plots.py

示例9: _det

def _det(xvert, yvert):
    """
    Compute twice the area of the triangle defined by points using the
    determinant formula.

    Parameters
    ----------
    xvert : array
        A vector of nodal x-coords.
    yvert : array
        A vector of nodal y-coords.

    Returns
    -------
    area : float
        Twice the area of the triangle defined by the points:
            area is positive if points define polygon in anticlockwise order.
            area is negative if points define polygon in clockwise order.
            area is zero if at least two of the points are concident or if
            all points are collinear.

    """
    xvert = np.asfarray(xvert)
    yvert = np.asfarray(yvert)
    x_prev = np.concatenate(([xvert[-1]], xvert[:-1]))
    y_prev = np.concatenate(([yvert[-1]], yvert[:-1]))
    return np.sum(yvert * x_prev - xvert * y_prev, axis=0)
开发者ID:Joshuaalbert,项目名称:factor,代码行数:27,代码来源:polygon.py

示例10: calcNLL

  def calcNLL(self, doc, state):
    """Calculates the negative log likelihood of the document, given the relevant information. This is the DocState object again, but this time with the entire state object as well. Probability (Expressed as negative log likelihood.) is specificly calculated using all terms that contain a variable in the document, but none that would be identical for all documents. That is, it contains the probability of the cluster, the probability of the dp given the cluster, and the probability of the samples, which factors in both the drawing of the topic and the drawing of the word. The ordering of the samples is considered irrelevant, with both the topic and word defining uniqueness. Some subtle approximation is made - see if you can spot it in the code!"""
    self.nll = 0.0

    # Probability of drawing the cluster...
    self.nll -= math.log(state.clusterUse[doc.cluster])
    self.nll += math.log(state.clusterUse.sum()+state.clusterConc)


    # Probability of drawing the documents dp from its cluster, taking into account the abnormal entrys...
    cl = state.cluster[doc.cluster]
    logBMN = numpy.log(cl[2] / (cl[2]*numpy.asfarray(doc.behFlags)).sum())

    behInstCounts = numpy.zeros(doc.behFlags.shape[0], dtype=numpy.int32)
    instCounts = numpy.zeros(cl[0].shape[0], dtype=numpy.int32)
    for ii in xrange(doc.use.shape[0]):
      behInstCounts[doc.use[ii,0]] += 1
      if doc.use[ii,0]==0: instCounts[doc.use[ii,1]] += 1

    self.nll -= (logBMN * behInstCounts).sum()
    self.nll -= scipy.special.gammaln(behInstCounts.sum() + 1.0)
    self.nll += scipy.special.gammaln(behInstCounts + 1.0).sum()

    norm = cl[0][:,1].sum() + cl[1]
    self.nll -= (numpy.log(numpy.asfarray(cl[0][:,1])/norm)*instCounts).sum()
    self.nll -= scipy.special.gammaln(instCounts.sum() + 1.0) # Cancels with a term from the above - can optimise, but would rather have neat code.
    self.nll += scipy.special.gammaln(instCounts + 1.0).sum()


    # Count the numbers of word/topic instance pairs in the data structure - sum using a dictionary...
    samp_count = collections.defaultdict(int) # [instance,word]
    for s in xrange(doc.samples.shape[0]):
      samp_count[doc.samples[s,0],doc.samples[s,1]] += 1

    # Calculate the probability distribution of drawing each topic instance and the probability of drawing each word/topic assignment...
    inst = numpy.asfarray(doc.use[:,2])
    inst /= inst.sum() + doc.conc
    
    topicWord = numpy.asfarray(state.topicWord) + state.beta
    topicWord = (topicWord.T/topicWord.sum(axis=1)).T

    abnormTopicWord = numpy.asfarray(state.abnormTopicWord) + state.beta
    abnormTopicWord = (abnormTopicWord.T/abnormTopicWord.sum(axis=1)).T

    instLog = numpy.log(inst)
    wordLog = numpy.log(topicWord)
    abnormLog = numpy.log(abnormTopicWord)


    # Now sum into nll the probability of drawing the samples that have been drawn - gets a tad complex as includes the probability of drawing the topic from the documents dp and then the probability of drawing the word from the topic, except I've merged them such that it doesn't look like that is what is happening...
    self.nll -= scipy.special.gammaln(doc.samples.shape[0]+1.0)
    for pair, count in samp_count.iteritems():
      inst, word = pair
      beh = doc.use[inst,0]
      if beh==0:
        topic = cl[0][doc.use[inst,1],0]
        self.nll -= count * (wordLog[topic,word] + instLog[inst])
      else:
        self.nll -= count * (abnormLog[beh,word] + instLog[inst])
      self.nll += scipy.special.gammaln(count+1.0)
开发者ID:zerocolar,项目名称:Project_Code,代码行数:60,代码来源:model.py

示例11: find_peaks

def find_peaks(lpix, indl, indr):
    """
      Given the left and right edges of a line list,
      calculates the peak center by simply centroid algorithm
    """

    centres = []
    max_flux = []
    wth = []
    for i0,i1 in zip(indl,indr):
        fl = lpix[i0:i1]
        wa = np.arange(i0,i1)
        if not len(wa): continue
        try:
            ew = len(wa)*fl
            ewtot = np.sum(ew)
            wa_ewtot = np.sum(wa * ew)
            center = wa_ewtot / ewtot
        except:
            center = (i1-i0)/2.

        centres.append(center)
        try:
           if i0==i1:
              print 'FNDPK00:',i0
           max_flux.append(max(fl))
        except:
           print 'FNDPK:',i0,i1
        wth.append(abs(i1-i0))

    return np.asfarray(centres),np.asfarray(max_flux),np.asfarray(wth)
开发者ID:pyrrho314,项目名称:recipesystem,代码行数:31,代码来源:spec_utils.py

示例12: __init__

    def __init__(self, X=None, Y=None, Z=None, clipboard=False,
                 x_label=None, x_unit=None,
                 y_label=None, y_unit=None,
                 label=None, unit=None):
                    
        self.sp = None

        if isinstance(X, mesh1d):
            self.X = X
        else:
            self.X = mesh1d(np.asfarray(X, dtype='float64'))

        if isinstance(Y, mesh1d):
            self.Y = Y
        else:
            self.Y = mesh1d(np.asfarray(Y, dtype='float64'))

        self.label = label
        self.unit = unit
        self.Z = mesh1d(np.asfarray(Z, dtype='float64'), self.label, self.unit)  # np.array(Z)

        if clipboard is True:
            self.read_clipboard()
            self.__init_sp()
        elif self.Z.shape[0] > 1:
            self.__init_sp()
开发者ID:gwin-zegal,项目名称:lerp,代码行数:26,代码来源:lerp.py

示例13: __solver__

 def __solver__(self, p):
     
     p.xk = p.x0.copy()
     p.fk = asfarray((p.f(p.x0)) ** 2).sum().flatten()
         
     p.iterfcn()
     if p.istop:
         p.xf, p.ff = p.xk, p.fk
         return 
     
     if p.userProvided.df:
         xf, cov_x, infodict, mesg, ier = leastsq(p.f, p.x0, Dfun=p.df, xtol = p.xtol, ftol = p.ftol, maxfev = p.maxFunEvals, full_output = 1)
     else:
         xf, cov_x, infodict, mesg, ier = leastsq(p.f, p.x0, xtol = p.xtol, maxfev = p.maxFunEvals, epsfcn = p.diffInt, ftol = p.ftol, full_output = 1)
     
     if ier == 1: p.istop = 1000
     else: p.istop = -1000
     p.msg = mesg
         
     ff = asfarray((p.f(xf)) ** 2).sum().flatten()
     p.xk = xf
     p.fk = ff
     
     p.xf = xf
     p.ff = ff        
     p.iterfcn()
开发者ID:AlbertHolmes,项目名称:openopt,代码行数:26,代码来源:scipy_leastsq_oo.py

示例14: _cdf

    def _cdf(self, xloc, left, right, cache):
        """
        Cumulative distribution function.

        Example:
            >>> print(chaospy.Uniform().fwd([-0.5, 0.5, 1.5, 2.5]))
            [0.  0.5 1.  1. ]
            >>> print(chaospy.Add(chaospy.Uniform(), 1).fwd([-0.5, 0.5, 1.5, 2.5]))
            [0.  0.  0.5 1. ]
            >>> print(chaospy.Add(1, chaospy.Uniform()).fwd([-0.5, 0.5, 1.5, 2.5]))
            [0.  0.  0.5 1. ]
            >>> print(chaospy.Add(1, 1).fwd([-0.5, 0.5, 1.5, 2.5]))
            [0. 0. 0. 1.]
        """
        left = evaluation.get_forward_cache(left, cache)
        right = evaluation.get_forward_cache(right, cache)

        if isinstance(left, Dist):
            if isinstance(right, Dist):
                raise evaluation.DependencyError(
                    "under-defined distribution {} or {}".format(left, right))
        elif not isinstance(right, Dist):
            return numpy.asfarray(left+right <= xloc)
        else:
            left, right = right, left
        xloc = (xloc.T-numpy.asfarray(right).T).T
        output = evaluation.evaluate_forward(left, xloc, cache=cache)
        assert output.shape == xloc.shape
        return output
开发者ID:hplgit,项目名称:chaospy,代码行数:29,代码来源:addition.py

示例15: get_fasta_stats

def get_fasta_stats(cnarr, fa_fname):
    """Calculate GC and RepeatMasker content of each bin in the FASTA genome."""
    logging.info("Calculating GC and RepeatMasker content in %s ...", fa_fname)
    gc_rm_vals = [calculate_gc_lo(subseq)
                  for subseq in fasta_extract_regions(fa_fname, cnarr)]
    gc_vals, rm_vals = zip(*gc_rm_vals)
    return np.asfarray(gc_vals), np.asfarray(rm_vals)
开发者ID:chapmanb,项目名称:cnvkit,代码行数:7,代码来源:reference.py


注:本文中的numpy.asfarray函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。