当前位置: 首页>>代码示例>>Python>>正文


Python smooth.smooth函数代码示例

本文整理汇总了Python中smooth.smooth函数的典型用法代码示例。如果您正苦于以下问题:Python smooth函数的具体用法?Python smooth怎么用?Python smooth使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了smooth函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: calc_we

def calc_we(cdata,basedir,log_target_rate):
     # WE parameters
    we_dir = os.path.join(basedir,cdata['name'],'analysis')
    we_dt = cdata['tau']
    we_nbins = cdata['nbins']
    we_target_count = cdata['target_count']

    winsize_flux = cdata['analysis']['winsize_flux']
    winsize_err = cdata['analysis']['winsize_err']
    last_n = cdata['analysis']['last_n']

    we_nframes = 0

    we_data_files = glob(os.path.join(we_dir,'*/rate.h5'))

    for fname in we_data_files:
        f = h5py.File(fname,'r')
        we_nframes = max(we_nframes,f.attrs['last_completed_iter']-2)
        f.close() 

    we_err = np.empty((len(we_data_files),2,we_nframes))
    we_err.fill(np.nan)

    for k,fname in enumerate(we_data_files):
        print 'we: {}'.format(fname)
        f = h5py.File(fname,'r')
        s = f['data'][:]
        dget = min(we_nframes,s.shape[0])
        s = s[:dget,:]

        ss = np.empty_like(s)
        for i in xrange(4):
            ss[:,i] = smooth(s[:,i],winsize_flux,'flat')

        sm = s[-last_n:,:].sum(0)

        rAB = (1.0*ss[:,1]) / (we_dt*ss[:,2])
        rBA = (1.0*ss[:,0]) / (we_dt*ss[:,3])

        rABm = (1.0*sm[1]) / (we_dt*sm[2])
        rBAm = (1.0*sm[0]) / (we_dt*sm[3])

        print 'we_{} -- kAB: {}, kBA: {}'.format(k,rABm,rBAm)

        we_err[k,0,:rAB.shape[0]] = logfunc(rAB) - log_target_rate[0]
        we_err[k,1,:rBA.shape[0]] = logfunc(rBA) - log_target_rate[1]

        f.close()

    we_err = np.abs(we_err)

    #we_err_avg = np.sqrt(np.mean(we_err**2,0))
    we_err_avg = np.sqrt(bn.nanmean(we_err**2,0))

    for i in xrange(2):
        we_err_avg[i,:] = smooth(we_err_avg[i,:],winsize_err,'flat')

    we_t = we_dt * we_nbins * we_target_count * np.arange(we_nframes)

    return we_err_avg, we_t
开发者ID:nrego,项目名称:westpa,代码行数:60,代码来源:error_plot.py

示例2: kinematic_params

def kinematic_params(mech_x_l, time_list, smooth_window):
    mech_time_mat = np.matrix(time_list)
    tstep_size = .03333333333
    num_el = mech_time_mat.shape[1]
    uniform_time2 = np.cumsum(np.round((mech_time_mat[0,1:] - mech_time_mat[0,0:-1]) / tstep_size) * tstep_size)
    uniform_time2 = np.column_stack((np.matrix([0]), uniform_time2))

    mech_x_mat = np.matrix(mech_x_l)
    if uniform_time2.shape[1] != mech_x_mat.shape[1]:
        pdb.set_trace()

    mech_x_mat = np.matrix(smooth.smooth(mech_x_mat.A1, smooth_window,
                                         'blackman'))
    uniform_time2 = uniform_time2[:,smooth_window-1:-smooth_window+1]

    vel = gradient(uniform_time2, mech_x_mat)
    uniform_time2 = uniform_time2[:,1:-1]

    vel = np.matrix(smooth.smooth(vel.A1, smooth_window, 'blackman'))
    mech_x_mat = mech_x_mat[:,smooth_window-1:-smooth_window+1]
    uniform_time2 = uniform_time2[:,smooth_window-1:-smooth_window+1]

    acc = gradient(uniform_time2, vel)
    uniform_time2 = uniform_time2[:,1:-1]
    vel = vel[:,1:-1]
    mech_x_mat = mech_x_mat[:,2:-2]

    acc = np.matrix(smooth.smooth(acc.A1, smooth_window, 'blackman'))
    vel = vel[:,smooth_window-1:-smooth_window+1]
    mech_x_mat = mech_x_mat[:,smooth_window-1:-smooth_window+1]
    uniform_time2 = uniform_time2[:,smooth_window-1:-smooth_window+1]

    return mech_x_mat.A1.tolist(), vel.A1.tolist(), acc.A1.tolist(), uniform_time2.A1.tolist()
开发者ID:gt-ros-pkg,项目名称:hrl,代码行数:33,代码来源:mf_common.py

示例3: smooth

    def smooth(self,smooth,downsample=True,**kwargs):
        """
        Smooth the spectrum by factor `smooth`.  


        Documentation from the :mod:`smooth` module:

        Parameters
        ----------
        downsample: bool
            Downsample the spectrum by the smoothing factor?
        """
        smooth = round(smooth)
        self.data = sm.smooth(self.data,smooth,downsample=downsample,**kwargs)

        if downsample:
            self.xarr = self.xarr[::smooth]
            if len(self.xarr) != len(self.data):
                raise ValueError("Convolution resulted in different X and Y array lengths.  Convmode should be 'same'.")
            if self.error is not None:
                self.error = sm.smooth(self.error,smooth,**kwargs)
            self.baseline.downsample(smooth)
            self.specfit.downsample(smooth)
    
            self._smooth_header(smooth)
开发者ID:migueldvb,项目名称:pyspeckit,代码行数:25,代码来源:classes.py

示例4: get_matrix_for_textfile

def get_matrix_for_textfile(data, img_size_crop_x, img_size_crop_y, stim, zz, time_start, time_end,\
    f_f_flag, dff_start, dff_end,stim_start,stim_end,filename, pp):
    
    #Cropping unwanted pixels as specified by user
    if img_size_crop_x!= 0 and img_size_crop_y!=0:
        print "Cropping x and y pixels.."
        data1 = data[img_size_crop_y:-img_size_crop_y, img_size_crop_x:-img_size_crop_x]
    elif img_size_crop_x==0 and img_size_crop_y!=0:
        print "Cropping only y pixels.."
        data1 = data[img_size_crop_y:-img_size_crop_y, :]
    elif img_size_crop_x!=0 and img_size_crop_y==0:
        print "Cropping only x pixels.."
        data1 = data[:, img_size_crop_x:-img_size_crop_x]
    else:
        data1 = data
   
    print 'Creating array from stack for Stim ' + stim + ' Z='+ str(filename)
    temp_matfile_for_thunder = np.zeros([np.size(data1, axis=0)*np.size(data1, axis=1),3+(time_end-time_start+1)+smooth_window-2], dtype=np.int)

    count = 0    
    for yy in xrange(0,np.size(data1, axis=1)):
        for xx in xrange(0,np.size(data1, axis=0)): 
            temp_matfile_for_thunder[count,0] = xx+1;
            temp_matfile_for_thunder[count,1] = yy+1;
            temp_matfile_for_thunder[count,2] = zz;
            # Create delta f/f values if necessary
            if f_f_flag==0:
                temp_matfile_for_thunder[count,3:] = smooth(data1[xx,yy,time_start:time_end],smooth_window,'hanning')
            else:
                temp_matfile_for_thunder[count,3:] = smooth(((data1[xx,yy,time_start:time_end]-np.mean(data1[xx,yy,dff_start:dff_end]))/np.std(data1[xx,yy,dff_start:dff_end])),smooth_window,'hanning')
            count = count+1 
    
    #Plot heatmap for validation    
    with sns.axes_style("white"):
        A = temp_matfile_for_thunder[:,3:]    
        B = np.argsort(np.mean(A, axis=1))  
        C = A[B,:]
        if f_f_flag == 1: #Plot with correct climif dff is true
            fig2 = plt.imshow(C[-1000:,:],aspect='auto', cmap='jet',vmin=-5, vmax=5)
        else:
            fig2 = plt.imshow(C[-1000:,:],aspect='auto', cmap='jet')
        plot_vertical_lines(stim_start-time_start,stim_end-time_start)
        labels, locs = plt.xticks()
        labels1 = [int(item) for item in labels]
        labels2 = [str(int(item)+time_start) for item in labels]
        plt.xticks((labels1),(labels2))
        plt.xlim(0,(time_end-time_start))
        plt.title('Sorted Heatmap Z='+str(filename))
        plt.colorbar()
        fig2 = plt.gcf()
        pp.savefig(fig2)
        plt.close()
        A = None    

    return temp_matfile_for_thunder
开发者ID:seethakris,项目名称:Thunder_for_OBData,代码行数:55,代码来源:create_textfile_for_thunder_stacks.py

示例5: get_matrix_for_textfile

def get_matrix_for_textfile(data_mat, name_for_saving_files, num_z_planes, time_start,time_end, f_f_flag, dff_start, dff_end, stimulus_on_time, stimulus_off_time, smooth_window, pp):
    
    #Save as numpy array
    print 'Creating array from stack for ' + name_for_saving_files
    if smooth_window!=0:                
        temp_numpy_array_for_thunder = np.zeros([np.size(data_mat, axis=0)*np.size(data_mat, axis=1)*np.size(data_mat,axis=2),3+(time_end-time_start+1)+smooth_window-2], dtype=np.int)
    else:
        temp_numpy_array_for_thunder = np.zeros([np.size(data_mat, axis=0)*np.size(data_mat, axis=1)*np.size(data_mat,axis=2),3+(time_end-time_start)], dtype=np.int)

    print np.shape(temp_numpy_array_for_thunder)    
    count = 0  
    count1 = 0 
    for zz in xrange(0,np.size(num_z_planes,axis=0)):
        for yy in xrange(0,np.size(data_mat, axis=1)):
            for xx in xrange(0,np.size(data_mat, axis=0)): 
                temp_numpy_array_for_thunder[count,0] = xx+1;
                temp_numpy_array_for_thunder[count,1] = yy+1;
                temp_numpy_array_for_thunder[count,2] = num_z_planes[zz];
                # Create delta f/f values if necessary
                if smooth_window!=0:                
                    if f_f_flag==0:
                        temp_numpy_array_for_thunder[count,3:] = smooth(data_mat[xx,yy,zz,time_start:time_end],smooth_window,'hanning')
                    else:
                        temp_numpy_array_for_thunder[count,3:] = smooth(((data_mat[xx,yy,zz,time_start:time_end]-np.mean(data_mat[xx,yy,zz,dff_start:dff_end]))/np.std(data_mat[xx,yy,zz,dff_start:dff_end])),smooth_window,'hanning')
                else:
                    if f_f_flag==0:
                        temp_numpy_array_for_thunder[count,3:] = data_mat[xx,yy,zz,time_start:time_end]
                    else:
                        temp_numpy_array_for_thunder[count,3:] = ((data_mat[xx,yy,zz,time_start:time_end]-np.mean(data_mat[xx,yy,zz,dff_start:dff_end]))/np.std(data_mat[xx,yy,zz,dff_start:dff_end]))
                count = count+1 
                
        #Plot heatmap for validation    
        with sns.axes_style("white"):
            A = temp_numpy_array_for_thunder[count1:count-1,3:]
            count1 = count-1
            B = np.argsort(np.mean(A, axis=1))  
            C = A[B,:]
            if f_f_flag == 1: #Plot with correct clim if dff is true
                fig2 = plt.imshow(C[-1000:,:],aspect='auto', cmap='jet',vmin=-5, vmax=5)
            else:
                fig2 = plt.imshow(C[-1000:,:],aspect='auto', cmap='jet')
            
            plot_vertical_lines_onset(stimulus_on_time)
            plot_vertical_lines_offset(stimulus_off_time)
            plt.title(name_for_saving_files +' Z='+ str(zz+1))
            plt.colorbar()
            fig2 = plt.gcf()
            pp.savefig(fig2)
            plt.close()


    return temp_numpy_array_for_thunder
开发者ID:seethakris,项目名称:Olfactory-Chip-Scripts,代码行数:52,代码来源:create_textfiles.py

示例6: smooth

    def smooth(self, smooth, **kwargs):
        """
        Smooth the spectrum by factor "smooth".  Options are defined in sm.smooth

        because 'Spectra' does not have a header attribute, don't do anything to it...
        """
        smooth = round(smooth)
        self.data = sm.smooth(self.data, smooth, **kwargs)
        self.xarr = self.xarr[::smooth]
        if len(self.xarr) != len(self.data):
            raise ValueError("Convolution resulted in different X and Y array lengths.  Convmode should be 'same'.")
        self.error = sm.smooth(self.error, smooth, **kwargs)
        self.baseline.downsample(smooth)
        self.specfit.downsample(smooth)
开发者ID:soylentdeen,项目名称:pyspeckit,代码行数:14,代码来源:classes.py

示例7: pop_spectra

def pop_spectra(self,row,col):
    self.laserFlashFile.switchOffHotPixTimeMask()
    dataDict = self.laserFlashFile.getTimedPacketList(row,col,timeSpacingCut=0.001)
    peakHeights=np.asarray(dataDict['peakHeights'])*1.0
    baselines=np.asarray(dataDict['baselines'])*1.0
    peakHeights-=baselines
    biggest_photon = int(min(peakHeights))
    n_inbin,phase_bins=np.histogram(peakHeights,bins=np.abs(biggest_photon),range=(biggest_photon,0))
    phase_bins=(phase_bins+(phase_bins[1]-phase_bins[0])/2.0)[:-1]
    try:
        last_ind = np.where(n_inbin>5)[0][-1]
    except IndexError:
        last_ind=len(n_inbin)-1
    expTime = self.laserFlashFile.hotPixTimeMask.expTime
    self.axes.plot(phase_bins,n_inbin*1.0/expTime, 'k.',alpha=0.5,label="raw")
    self.axes.set_xlim(phase_bins[(np.where(n_inbin >= 3))[0][0]],phase_bins[last_ind])

    self.laserFlashFile.switchOnHotPixTimeMask(reasons=['laser not on','hot pixel'])
    dataDict = self.laserFlashFile.getTimedPacketList(row,col,timeSpacingCut=0.001)
    peakHeights=np.asarray(dataDict['peakHeights'])*1.0
    baselines=np.asarray(dataDict['baselines'])*1.0
    peakHeights-=baselines
    biggest_photon = int(min(peakHeights))
    n_inbin,phase_bins=np.histogram(peakHeights,bins=np.abs(biggest_photon),range=(biggest_photon,0))
    phase_bins_1=(phase_bins+(phase_bins[1]-phase_bins[0])/2.0)[:-1]
    intTime_1 = self.laserFlashFile.hotPixTimeMask.getEffIntTime(row,col)
    smoothed_1 = np.asarray(smooth.smooth(n_inbin, 30, 'hanning'))
    self.axes.plot(phase_bins_1,n_inbin*1.0/intTime_1, 'b.',label="laser on, hotpix masked")
    
    self.laserFlashFile.switchOnHotPixTimeMask(reasons=['laser not off','hot pixel'])
    dataDict = self.laserFlashFile.getTimedPacketList(row,col,timeSpacingCut=0.001)
    peakHeights=np.asarray(dataDict['peakHeights'])*1.0
    baselines=np.asarray(dataDict['baselines'])*1.0
    peakHeights-=baselines
    biggest_photon = int(min(peakHeights))
    n_inbin,phase_bins=np.histogram(peakHeights,bins=np.abs(biggest_photon),range=(biggest_photon,0))
    phase_bins_2=(phase_bins+(phase_bins[1]-phase_bins[0])/2.0)[:-1]
    intTime_2 = self.laserFlashFile.hotPixTimeMask.getEffIntTime(row,col)
    smoothed_2 = np.asarray(smooth.smooth(n_inbin, 30, 'hanning'))
    self.axes.plot(phase_bins_2,n_inbin*1.0/intTime_2, 'g.',label="laser off, hotpix masked")
    
    self.axes.legend(loc=2)
    
    self.axes.plot(phase_bins_1, smoothed_1*1.0/intTime_1,'b-')
    self.axes.plot(phase_bins_2,smoothed_2*1.0/intTime_2,'g-')
    
    self.axes.set_xlabel('phase [ADC/DAC units]')
    self.axes.set_ylabel('Exposure time adjusted count rate [photons/s]')
开发者ID:RupertDodkins,项目名称:ARCONS-pipeline-1,代码行数:48,代码来源:flashPopup.py

示例8: loadsimdata

def loadsimdata(filename):
    # load data and apply efficiency
    itof = hh.load(filename, 'I(tof)')
    eff = hh.load('mon1-eff.h5')
    i = itof.I * eff.I
    itof.I[:] = i

    # clean up
    itof[(0.019,None)].I = 0
    
    #
    x = itof.tof
    y = itof.I

    # convert to counts/10 mus
    #  counts * 10, bins / 10
    from smooth import smooth
    y = smooth(y, window_len=10, window='flat')
    # y = y[:len(x)]
    # y *= 10.
    indexes = range(5, len(x), 10)
    x = x[indexes]
    y = y[indexes] * 10

    # convert to arcs run #5
    # according to ARCS_runinfo.xml of run #5
    # beam power 110kW
    # total run time is 22590/30 seconds
    # the mc simulated was 2MW, 60Hz
    y *= 22590/30*110e3/(2e6/60)

    # extra scaling factor, why?
    y *= 0.83
    return x,y
开发者ID:mcvine,项目名称:resources-old,代码行数:34,代码来源:plot3.py

示例9: load

 def load(self,name):
     """
     Returns a two dimensional numpy array where a[:,0] is
     wavelength in Angstroms and a[:,1] is flux in 
     counts/sec/angstrom/cm^2
     
     Noisy spectra are smoothed with window_len in the .txt file.
     Ergs and AB Mag units are automatically converted to counts.
     """
     fname = self.objects[name]['dataFile']
     fullFileName = os.path.join(self.this_dir,"data",fname[0])
     if (string.count(fullFileName,"fit")):
         a = self.loadSdssSpecFits(fullFileName)
     else:
         a = numpy.loadtxt(fullFileName)
         
     len = int(self.objects[name]['window_len'][0])
     if len > 1:
         a[:,1] = smooth.smooth(a[:,1], window_len=len)[len/2:-(len/2)]
     try:
         fluxUnit = self.objects[name]['fluxUnit'][0]
         scale = float(fluxUnit.split()[0])
         a[:,1] *= scale
     except ValueError:
         print "error"
     ergs = string.count(self.objects[name]['fluxUnit'][0],"ergs")
     if ergs:
         a[:,1] *= (a[:,0] * self.k)
     mag = string.count(self.objects[name]['fluxUnit'][0],"mag")
     if mag:
         a[:,1] = (10**(-2.406/2.5))*(10**(-0.4*a[:,1]))/(a[:,0]**2) * (a[:,0] * self.k)
     return a
开发者ID:stoughto,项目名称:MKIDStd,代码行数:32,代码来源:MKIDStd.py

示例10: speedBias

    def speedBias(self, bias_type='normal', debug=False):
        '''
        Calculates the unsigned speed bias quickly without having to
        calculate everything else.
        '''
        if debug: print 'Calculating bias on unsigned speed...'

        # grab important variables
        mod_u = self.Variables.struct['mod_timeseries']['ua']
        mod_v = self.Variables.struct['mod_timeseries']['va']
        mod_spd = np.sqrt(mod_u**2 + mod_v**2)
        obs_u = self.Variables.struct['obs_timeseries']['ua']
        obs_v = self.Variables.struct['obs_timeseries']['va']
        obs_spd = np.sqrt(obs_u**2 + obs_v**2)

        # change times to datetime times
        obs_time = self.Variables.struct['obs_time']
        mod_time = self.Variables.struct['mod_time']
        obs_dt, mod_dt = [], []
        for i in np.arange(obs_time.size):
            obs_dt.append(dn2dt(obs_time[i]))
        for i in np.arange(mod_time.size):
            mod_dt.append(dn2dt(mod_time[i]))

        # perform interpolation and grab bias
        (mod_sp_int, obs_sp_int, step_sp_int, start_sp_int) = \
            smooth(mod_spd, mod_dt, obs_spd, obs_dt,
                   debug=debug)
        stats = TidalStats(mod_sp_int, obs_sp_int, step_sp_int,
                           start_sp_int, type='speed', debug=debug)
        bias = stats.getBias(bias_type=bias_type)
        return bias
开发者ID:LaVieEnRoux,项目名称:PySeidon,代码行数:32,代码来源:validationClass.py

示例11: powerRMSE

    def powerRMSE(self, debug=False):
        '''
        Calculates the RMSE quickly without having to calculate everything
        else.
        '''
        # grab important variables
        mod_u = self.Variables.struct['mod_timeseries']['ua']
        mod_v = self.Variables.struct['mod_timeseries']['va']
        mod_spd = np.sqrt(mod_u**2 + mod_v**2)
        mod_pow = 0.5 * rho**3 * mod_spd**3

        obs_u = self.Variables.struct['obs_timeseries']['ua']
        obs_v = self.Variables.struct['obs_timeseries']['va']
        obs_spd = np.sqrt(obs_u**2 + obs_v**2)
        obs_pow = 0.5 * rho**3 * obs_spd**3

        # change times to datetime times
        obs_time = self.Variables.struct['obs_time']
        mod_time = self.Variables.struct['mod_time']
        obs_dt, mod_dt = [], []
        for i in np.arange(obs_time.size):
            obs_dt.append(dn2dt(obs_time[i]))
        for i in np.arange(mod_time.size):
            mod_dt.append(dn2dt(mod_time[i]))

        # perform interpolation and grab RMSE
        (mod_pw_int, obs_pw_int, step_pw_int, start_pw_int) = \
            smooth(mod_pow, mod_dt, obs_pow, obs_dt,
                   debug=debug)
        stats = TidalStats(mod_pw_int, obs_pw_int, step_pw_int,
                           start_pw_int, type='power', debug=debug)
        RMSE = stats.getRMSE()
        return RMSE
开发者ID:LaVieEnRoux,项目名称:PySeidon,代码行数:33,代码来源:validationClass.py

示例12: smooth_traces

def smooth_traces(browser):
    """ Smooth traces
    
    Options:
    1) Window type
    2) Window length
    """
    
    # Get options
    window = str(browser.ui.toolStackedWidget.smoothComboBox.currentText())
    window_len = float(browser.ui.toolStackedWidget.smoothLength.text())
    
    # Get data and widgets
    plotWidget = browser.ui.dataPlotsWidget
    toolsWidget = browser.ui.toolStackedWidget
    
    # Smooth data
    results = [] 
    for item in plotWidget.plotDataItems:  
        # Copy attributes and add some new ones
        attrs = item.attrs
        attrs['smooth_window_type'] = window
        attrs['smooth_window_length'] = window_len
        
        # Smooth
        traceSmooth = smooth.smooth(item.data, window_len=window_len, window=window)
        results.append([item.text(0), traceSmooth, attrs])
        
        # Plot smoothed trace
        x = np.arange(0, len(traceSmooth)*item.attrs['dt'], item.attrs['dt'])
        plotWidget.plot(x, traceSmooth, pen=pg.mkPen('#F2EF44', width=1))

    # Store results
    parentText = plotWidget.plotDataItems[0].parent().text(0) # Assumes all plotted data have the same parent
    aux.save_results(browser, parentText+'_smooth', results)           
开发者ID:ineuron,项目名称:NeuroDAQ-Analysis,代码行数:35,代码来源:basic.py

示例13: edgedetect

def edgedetect(image):
	global pixelsum, pixelcount
	oldimage=image
	if image[0]=="P3":
		#converts file to grayscale and smoothes it
		import smooth
		image=smooth.smooth(image)
	file2=oldimage[:]
	xyarray=image[2].split(" ")
	width=int(xyarray[0])
	height=int(xyarray[1])
	count=0
	#loop through all non-edge pixels
	for x in range(1,width-1):
		for y in range(1,height-1):
			#calculate the horizontal gradient of the current pixel
			hg=int(-1*getpixel(x-1,y-1,image)+0*getpixel(x,y-1,image)+1*getpixel(x+1,y-1,image)-2*getpixel(x-1,y,image)+0*getpixel(x,y,image)+2*getpixel(x+1,y,image)-1*getpixel(x-1,y+1,image)+0*getpixel(x,y+1,image)+1*getpixel(x+1,y+1,image))
			#calculate the vertical gradient of the current pixel
			vg=int(1*getpixel(x-1,y-1,image)+2*getpixel(x,y-1,image)+1*getpixel(x+1,y-1,image)+0*getpixel(x-1,y,image)+0*getpixel(x,y,image)+0*getpixel(x+1,y,image)-1*getpixel(x-1,y+1,image)-2*getpixel(x,y+1,image)-1*getpixel(x+1,y+1,image))
			#if the threshold is reached, mark the pixel
			if(abs(hg)+abs(vg)>threshold):
				#count the number of pixels marked as edges
				pixelcount+=1
				if oldimage[0]=="P3": #image is color, mark as red
					file2[(width*y+x)*3+4]=255
					file2[(width*y+x)*3+5]=0
					file2[(width*y+x)*3+5]=0
				else: #image is grayscale, mark as white
					file2[width*y+x+4]=255
	pixelsum=width*height
	return file2
开发者ID:ComputerDruid,项目名称:ai2-edgedetection,代码行数:31,代码来源:edgedetect.py

示例14: get_cutoff

def get_cutoff(data, w_len=50, m=30, guess=None):
    if guess is None:
        approx = get_threshold(data, m)
    else:
        approx = guess

    smoothData = smooth(data)
    return get_first_in_range(smoothData[approx - w_len:approx + w_len],
                              approx, w_len)
开发者ID:EdwardBetts,项目名称:diploma-thesis-code,代码行数:9,代码来源:pedestal_prop_time.py

示例15: compareTG

def compareTG(data, plot=False, save_csv=False, debug=False, debug_plot=False):
    """
    Does a comprehensive comparison between tide gauge height data and
    modeled data.

    Input:
       - data = dictionary containing all necessary tide gauge and model data.
    Outputs
       - elev_suite = dictionary of useful statistics
    Options:
       - plot = boolean flag for plotting results
       - save_csv = boolean flag for saving statistical benchmarks in csv file
    """
    if debug: print "CompareTG..."
    # load data
    mod_elev = data['mod_timeseries']['elev']
    obs_elev = data['obs_timeseries']['elev']
    obs_datenums = data['obs_time']
    mod_datenums = data['mod_time']
    gear = data['type'] # Type of measurement gear (drifter, adcp,...)
    #TR: comment out
    #mod_harm = data['elev_mod_harmonics']

    # Save path & create folder
    name = data['name']
    save_path = name.split('/')[-1].split('.')[0]+'/'
    while exists(save_path):
        save_path = save_path[:-1] + '_bis/'
    mkdir(save_path)


    # convert times and grab values
    obs_time, mod_time = [], []
    for i, v in enumerate(obs_datenums):
        obs_time.append(dn2dt(v))
    for j, w in enumerate(mod_datenums):
        mod_time.append(dn2dt(w))

    if debug: print "...check if they line up in the time domain..."
    if (mod_time[-1] < obs_time[0] or obs_time[-1] < mod_time[0]):
        raise PyseidonError("---time periods do not match up---")

    else:

        if debug: print "...interpolate timeseries onto a common timestep..."
        (mod_elev_int, obs_elev_int, step_int, start_int) = \
            smooth(mod_elev, mod_time, obs_elev, obs_time,
                   debug=debug, debug_plot=debug_plot)

    elev_suite = tidalSuite(gear, mod_elev_int, obs_elev_int, step_int, start_int,
                            [], [], [], [], [], [],
                            kind='elevation', plot=plot, save_csv=save_csv, save_path=save_path,
                            debug=debug, debug_plot=debug_plot)

    if debug: print "...CompareTG done."

    return elev_suite
开发者ID:GrumpyNounours,项目名称:PySeidon,代码行数:57,代码来源:compareData.py


注:本文中的smooth.smooth函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。