当前位置: 首页>>代码示例>>Python>>正文


Python numpy.append函数代码示例

本文整理汇总了Python中numpy.append函数的典型用法代码示例。如果您正苦于以下问题:Python append函数的具体用法?Python append怎么用?Python append使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了append函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_mean_vmax

def get_mean_vmax():
    hostvmaxs = []
    hostvmax25s = []
    hostvmax75s = []
    twentyfifth, fifty, seventyfifth = get_percentile()
    rootdir = "/Users/catherinefielder/Documents/Research_Halos/HaloDetail"
    for subdir, dirs, files in os.walk(rootdir):
        head, tail = os.path.split(subdir)
        haloname = tail
        for file in files:
            if file.endswith("_columnsadded_final"):
                values = ascii.read(
                    os.path.join(subdir, file), format="commented_header"
                )  # Get full path and access file
                hostvmax = values[1]["host_vmax"]
                hostvmaxs = np.append(hostvmaxs, hostvmax)
    twentyfifth = np.percentile(hostvmaxs, 25)
    seventyfifth = np.percentile(hostvmaxs, 75)
    for i in range(0, len(hostvmaxs)):
        if hostvmaxs[i] >= seventyfifth:
            hostvmax75s = np.append(hostvmax75s, hostvmaxs[i])
        elif hostvmaxs[i] < twentyfifth:
            hostvmax25s = np.append(hostvmax25s, hostvmaxs[i])
        else:
            continue
    sumvmax = np.sum(hostvmaxs)
    meanvmax = np.divide(sumvmax, len(hostvmaxs))
    mean75 = np.mean(hostvmax75s)
    mean25 = np.mean(hostvmax25s)
    print "mean"
    print meanvmax
    print mean75
    print mean25
    return meanvmax, mean75, mean25
开发者ID:cfielder,项目名称:DM_haloprops,代码行数:34,代码来源:total_mean_CVF_spin.py

示例2: iir_bandstops

def iir_bandstops(fstops, fs, order=4):
    """ellip notch filter
    fstops is a list of entries of the form [frequency (Hz), df, df2]                           
    where df is the pass width and df2 is the stop width (narrower                              
    than the pass width). Use caution if passing more than one freq at a time,                  
    because the filter response might behave in ways you don't expect.
    """
    nyq = 0.5 * fs

    # Zeros zd, poles pd, and gain kd for the digital filter
    zd = np.array([])
    pd = np.array([])
    kd = 1

    # Notches
    for fstopData in fstops:
        fstop = fstopData[0]
        df = fstopData[1]
        df2 = fstopData[2]
        low = (fstop - df) / nyq
        high = (fstop + df) / nyq
        low2 = (fstop - df2) / nyq
        high2 = (fstop + df2) / nyq
        z, p, k = iirdesign([low,high], [low2,high2], gpass=1, gstop=6,
                            ftype='ellip', output='zpk')
        zd = np.append(zd,z)
        pd = np.append(pd,p)

    # Set gain to one at 100 Hz...better not notch there                                        
    bPrelim,aPrelim = zpk2tf(zd, pd, 1)
    outFreq, outg0 = freqz(bPrelim, aPrelim, 100/nyq)

    # Return the numerator and denominator of the digital filter                                
    b,a = zpk2tf(zd,pd,k)
    return b, a
开发者ID:idaks,项目名称:ligo,代码行数:35,代码来源:GW150914_tutorial_uri.py

示例3: loadParticles

def loadParticles(filename):

    file = open(filename)

    particlePools = {}

    for line in file.readlines():
        #print line
        if line[0] == '#':
            continue

        id, x, y, z, r = line.split()

        if not id in particlePools:
            particlePools[id] = Particles()

        pool = particlePools[id]
        
        pool.pos = numpy.append(pool.pos, 
                                [[float(x), float(y), float(z)]],
                                axis=0)

        pool.radii = numpy.append(pool.radii, float(r))


    file.close()

    return particlePools
开发者ID:Jintram,项目名称:egfrd,代码行数:28,代码来源:renderParticles.py

示例4: append_new_point

    def append_new_point(self, y, x=None):
        self._axis_y_array = np.append(self._axis_y_array, y)
        if x:
            self._axis_x_array = np.append(self._axis_x_array, x)
        else:
            self._axis_x_array = np.arange(len(self._axis_y_array))

        if self.max_plot_points:
            if self._axis_y_array.size > self.max_plot_points:
                self._axis_y_array = np.delete(self._axis_y_array, 0)
                self._axis_x_array = np.delete(self._axis_x_array, 0)

        if self.single_curve is None:
            self.single_curve, = self.axes.plot(
                self._axis_y_array, linewidth=2, marker="s"
            )
        else:
            self.axes.fill(self._axis_y_array, "r", linewidth=2)

        self._axis_y_limits[1] = (
            self._axis_y_array.max() + self._axis_y_array.max() * 0.05
        )
        self.axes.set_ylim(self._axis_y_limits)
        self.single_curve.set_xdata(self._axis_x_array)
        self.single_curve.set_ydata(self._axis_y_array)
        self.axes.relim()
        self.axes.autoscale_view()
        self.fig.canvas.draw()
        self.fig.canvas.flush_events()
        self.axes.grid(True)

        # TODO move y lims as propery
        self.axes.set_ylim(
            (0, self._axis_y_array.max() + self._axis_y_array.max() * 0.05)
        )
开发者ID:IvarsKarpics,项目名称:mxcube,代码行数:35,代码来源:matplot_widget.py

示例5: get_data

	def get_data(self,name):
		obj = self.read_csv(name)

		date = []
		date.append(obj[0]["Date"])

		high=np.array([],dtype="float32")
		low=np.array([],dtype="float32")
		vol=np.array([],dtype="float32")
		aver=np.array([],dtype="float32")

		before_high=float(obj[0]["High"])
		before_low=float(obj[0]["Low"])
		before_vol=float(obj[0]["Volume"])

		for day in obj[1:]:

			date.append(day["Date"])
			aver=np.append(aver,(float(day["High"])+float(day["Low"]))/2)
			high=np.append(high,(float(day["High"])-before_high)/before_high)
			low=np.append(low,(float(day["Low"])-before_low)/before_low)
			vol=np.append(vol,(float(day["Volume"])-before_vol)/before_vol)

			before_high=float(day["High"])
			before_low=float(day["Low"])
			before_vol=float(day["Volume"])

		output={"start":date[0],"date":date,"high":high,"low":low,"vol":vol,"aver":aver}
		return output
开发者ID:Kaisuke5,项目名称:FinancialNetwork,代码行数:29,代码来源:CsvHandler.py

示例6: read_fits

def read_fits(ccd, order_frame, soldir, interp=False):
   rm, xpos, target, res, w_c, y1, y2 =  mode_setup_information(ccd.header)

   if target=='upper': 
     target=True
   else:
     target=False


   xarr=np.array([])
   farr=np.array([])
   oarr=np.array([])

   min_order = int(order_frame.data[order_frame.data>0].min())
   max_order = int(order_frame.data[order_frame.data>0].max())
    
   for n_order in np.arange(min_order, max_order):
      try:
         shift_dict, ws = pickle.load(open(soldir+'sol_%i.pkl' % n_order))
      except:
         continue
      x, f = xextract_order(ccd, order_frame, n_order, shift_dict, target=target, interp=interp)
      o=np.ones_like(x)*n_order
      xarr=np.append(xarr,x)
      farr=np.append(farr,f)
      oarr=np.append(oarr,o)
   return xarr,farr,oarr
开发者ID:saltastro,项目名称:pyhrs,代码行数:27,代码来源:wavelength_2Dfit.py

示例7: _indtosub_converter

def _indtosub_converter(dims, order='F', onebased=True):
    """Converter for changing linear indexing to subscript indexing

    See also
    --------
    Series.indtosub
    """

    _check_order(order)

    def indtosub_inline_onebased(k, dimprod):
        return tuple(map(lambda (x, y): int(mod(ceil(float(k)/y) - 1, x) + 1), dimprod))

    def indtosub_inline_zerobased(k, dimprod):
        return tuple(map(lambda (x, y): int(mod(ceil(float(k+1)/y) - 1, x)), dimprod))

    inline_fcn = indtosub_inline_onebased if onebased else indtosub_inline_zerobased

    if size(dims) > 1:
        if order == 'F':
            dimprod = zip(dims, append(1, cumprod(dims)[0:-1]))
        else:
            dimprod = zip(dims, append(1, cumprod(dims[::-1])[0:-1])[::-1])
        converter = lambda k: inline_fcn(k, dimprod)
    else:
        converter = lambda k: (k,)

    return converter
开发者ID:Young-china,项目名称:thunder,代码行数:28,代码来源:keys.py

示例8: BED_extract

def BED_extract(path, nfft):
  list_data = numpy.array([])
  list_label = numpy.array([])
  
  """
  dic = {'W':[1,0],'L':[0,1],'E':[0,1],'A':[0,1],'F':[1,0],'T':[0,1],'N':[0.5,0.5]}
  """
  dic = {'W':[0,1],'L':[0,1],'E':[0,1],'A':[0,1],'F':[1,0],'T':[0,1],'N':[0.5,0.5]}
  

  for root, dir, files in os.walk(path):

    rootpath = os.path.join(os.path.abspath(path), root)

    for file in files:
      if os.path.splitext(file)[1].lower()=='.wav':
        filepath = os.path.join(rootpath, file)

        SR, X = wavfile.read(filepath)

        _, _, spec = mfcc(X, fs=SR, nfft=(nfft*2))

        list_data = numpy.append(list_data, numpy.mean(spec, axis=0)[:nfft]/numpy.max(spec))
        list_label = numpy.append(list_label, dic[file[5]])

  list_data = numpy.reshape(list_data, (len(list_data)/nfft, nfft))
  list_label = numpy.reshape(list_label, (len(list_label)/label_length, label_length))

  return list_data, list_label
开发者ID:j-pong,项目名称:tensorflow_test,代码行数:29,代码来源:BEDLDC_input_distance.py

示例9: ReadGeoPolygonLst

 def ReadGeoPolygonLst(self, polygonLst ):
     """
     Read GeoPolygon List from a txt file
     longitude latitude
     """
     f = open(polygonLst, 'r');
     NumbC=0;
     newpolygon=False;
     for lines in f.readlines():
         lines=lines.split();
         if newpolygon==True:
             lon=lines[0];
             if lon=='>':
                 newpolygon=False;
                 self.append(geopolygon)
                 continue;
             else:
                 lon=float(lines[0]);
                 lat=float(lines[1]);
                 geopolygon.lonArr=np.append(geopolygon.lonArr, lon);
                 geopolygon.latArr=np.append(geopolygon.latArr, lat);
         a=lines[0];
         b=lines[1];
         if a=='#' and b!='@P':
             continue;
         if b=='@P':
             NumbC=NumbC+1;
             newpolygon=True;
             geopolygon=GeoPolygon();
             continue;
         f.close()
     print 'End of reading', NumbC, 'geological polygons';
     return
开发者ID:NoisyLeon,项目名称:SES3DPy,代码行数:33,代码来源:GeoPolygon.py

示例10: cap

def cap(guess_vector):
    """
    This takes the Euler equations, and sets them equal to zero for an f-solve
    Remember that Keq was found by taking the derivative of the sum of the 
        utility functions, with respect to k in each time period, and that 
        leq was the same, but because l only shows up in 1 period, it has a
        much smaller term.

    ### Paramaters ###
    guess_vector: The first half is the intial guess for the kapital, and
        the second half is the intial guess for the labor
    """
    #equations for keq
    ks = np.zeros(periods)
    ks[1:] = guess_vector[:periods-1]
    ls  = guess_vector[periods-1:]
    kk  = ks[:-1]
    kk1 = ks[1:]
    kk2 = np.zeros(periods-1)
    kk2[:-1] = ks[2:]
    lk  = ls[:-1]
    lk1 = ls[1:]
    #equation for leq
    ll = np.copy(ls)
    kl = np.copy(ks)
    kl1 = np.zeros(periods)
    kl1[:-1] = kl[1:]
    w = wage(ks, ls)
    r = rate(ks, ls)
    keq = ((lk*w+(1.+r-delta)*kk - kk1)**-gamma - (beta*(1+r-delta)*(lk1*w+(1+r-delta)*kk1-kk2)**-gamma))
    leq = ((w*(ll*w + (1+r-delta)*kl-kl1)**-gamma)-(1-ll)**-sigma)
    error = np.append(keq, leq)

    return np.append(keq, leq)
开发者ID:jdebacker,项目名称:firm_sandbox,代码行数:34,代码来源:OLG.py

示例11: Mie_ab

def Mie_ab(m,x):
#  http://pymiescatt.readthedocs.io/en/latest/forward.html#Mie_ab
  mx = m*x
  nmax = np.round(2+x+4*(x**(1/3)))
  nmx = np.round(max(nmax,np.abs(mx))+16)
  n = np.arange(1,nmax+1)
  nu = n + 0.5

  sx = np.sqrt(0.5*np.pi*x)
  px = sx*jv(nu,x)

  p1x = np.append(np.sin(x), px[0:int(nmax)-1])
  chx = -sx*yv(nu,x)

  ch1x = np.append(np.cos(x), chx[0:int(nmax)-1])
  gsx = px-(0+1j)*chx
  gs1x = p1x-(0+1j)*ch1x

  # B&H Equation 4.89
  Dn = np.zeros(int(nmx),dtype=complex)
  for i in range(int(nmx)-1,1,-1):
    Dn[i-1] = (i/mx)-(1/(Dn[i]+i/mx))

  D = Dn[1:int(nmax)+1] # Dn(mx), drop terms beyond nMax
  da = D/m+n/x
  db = m*D+n/x

  an = (da*px-p1x)/(da*gsx-gs1x)
  bn = (db*px-p1x)/(db*gsx-gs1x)

  return an, bn
开发者ID:dalerxli,项目名称:PyMieScatt,代码行数:31,代码来源:Mie.py

示例12: read_power

def read_power(file, datadir='data/'):
    """ 
    29-apr-2009/dintrans: coded
    t,dat=read_power(name_power_file)
    Read a power spectra file like 'data/poweru.dat'
    """ 
    filename = path.join(datadir, file)
    infile = open(filename, 'r')
    lines = infile.readlines()
    infile.close()
#
#  find the number of blocks (t,power) that should be read
#
    dim=read_dim(datadir=datadir)
    nblock=int(len(lines)/int(N.ceil(dim.nxgrid/2/8.)+1))
#
    with open(filename, 'r') as infile:
        t=N.zeros(1, dtype='Float32')
        data=N.zeros(1, dtype='Float32')
        for i in range(nblock):
            st=infile.readline()
            t=N.append(t, float(st))
            for ii in range(int(N.ceil(dim.nxgrid/2/8.))):
                st=infile.readline()
                data=N.append(data, N.asarray(st.split()).astype('f'))

    t=t[1:] ; data=data[1:]
    nt=len(t) ; nk=int(len(data)/nt)
    data=data.reshape(nt, nk)
    return t, data
开发者ID:belfhi,项目名称:pencil-code,代码行数:30,代码来源:power.py

示例13: find_offset_old

    def find_offset_old(self,datafile, nonlinmin, nonlinmax, exclude, threshold):
        '''find_offset is used to determine the systematic offset present
        in the experimental setup that causes data to not be symmetric
        about zero input angle. It reads in the output of laserBench and
        returns the offset (in degrees)'''
        
        input_a, output_a = np.loadtxt(datafile,usecols=(0,1),unpack=True)
        
        for e in exclude:
            did = np.where(input_a == e)
            output_a = np.delete(output_a, did)
            input_a = np.delete(input_a, did)

        pidx = np.where(input_a > nonlinmax)
        nidx = np.where(input_a < nonlinmin)
        
        in_a = np.append(input_a[nidx],input_a[pidx])
        out_a = np.append(-1*output_a[nidx],output_a[pidx])
        error = np.zeros(in_a.size)+1

        b = 1000.
        offset = 0.
        while abs(b) > threshold:
            m, b = ADE.fit_line(in_a,out_a,error)
            offset += b
            in_a += b

        return offset
开发者ID:eigenbrot,项目名称:snakes,代码行数:28,代码来源:metaBench.py

示例14: FindBigStuff

def FindBigStuff(data,xsd =3,sd_method = 'Quian'):
    
    #s = np.std(data,0) * xsd
    
    #print s
    spikelist = np.array([0,0,0])[None,...]
    m,n = data.shape
    s = np.zeros(n)
    for i in range(n):
        
        x = data[:,i]
        if sd_method == 'Quian':
            s[i] = xsd * np.median(np.abs(x)) / 0.6745
        elif sd_method == 'STD':
            s[i] = np.std(x) * xsd
        taux = np.diff(np.where(abs(x)>s[i],1,0))
        times = np.nonzero(taux==1)[0]
        times2 = np.nonzero(taux==-1)[0]
        if len(times) !=0:
            if len(times)-1 == len(times2):
                times2 = np.append(times2,m)
            elif len(times) == len(times2)-1:
                times = np.append(0,times)
            chs = np.ones(times.shape)*i
            aux = np.append(chs[...,None],times[...,None],1)   
            aux = np.append(aux,times2[...,None],1)  
            spikelist = np.append(spikelist,aux,0)
    return np.delete(spikelist, (0), axis=0),s
开发者ID:britodasilva,项目名称:pyhfo,代码行数:28,代码来源:pyspike.py

示例15: arraySlidingWindow

def arraySlidingWindow(result_array, sliding_window_size, filter_ratio):
	array_length = np.size(result_array)
	buffer_array = np.zeros((1), dtype = np.int)

	for index in range(0, array_length - sliding_window_size):
		window_score = np.sum(result_array[index: index + sliding_window_size])
		if window_score > (sliding_window_size * filter_ratio):
			buffer_array = np.append(buffer_array, 1)
		else:
			buffer_array = np.append(buffer_array, 0)

	buffer_array= np.delete(buffer_array, 0)
	# print buffer_array
	length = np.size(buffer_array)
	flag_array = np.zeros((length), dtype = np.int)
	pre_value = 0
	for buffer_index , value in enumerate(buffer_array):
		if (pre_value - value) == -1:
			flag_array[buffer_index] = 1
		elif(pre_value - value) == 1:
			flag_array[buffer_index] = -1
		else:
			pass
		pre_value = value
	return flag_array
开发者ID:weilunzhong,项目名称:Intro_n_outro,代码行数:25,代码来源:single_log_handler.py


注:本文中的numpy.append函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。