当前位置: 首页>>代码示例>>Python>>正文


Python interpolate.spline函数代码示例

本文整理汇总了Python中scipy.interpolate.spline函数的典型用法代码示例。如果您正苦于以下问题:Python spline函数的具体用法?Python spline怎么用?Python spline使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了spline函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: plot_word_graph

def plot_word_graph():
	global score_true,score_false
	accuracies = []
	n_pure = []
	data_size = []
	fig, ax = plt.subplots()
	axes = [ax, ax.twinx()]
	for thr in range(1500,2500):
		print thr
		mixed = []
		pure  = []
		threshold = float(thr)/1
		for similiarity_index in score_true:
			if similiarity_index > thr:
				pure.append(1)
			else:
				mixed.append(0)
		for similiarity_index in score_false:
			if similiarity_index > thr:
				pure.append(0)
			else:
				mixed.append(1)
		accuracies.append(float(pure.count(1)*100)/len(pure))
		n_pure.append(pure.count(1))
		data_size.append(len(pure))

	base = np.array([float(x)/1 for x in range(1500,2500)])
	thr = np.linspace(base.min(),base.max(),2000)
	accuracies_smooth = spline(base,accuracies,thr)
	n_pure_smooth = spline(base,n_pure,thr)
	data_size_smooth = spline(base,data_size,thr)
	axes[1].plot(thr,accuracies_smooth,'r')
	axes[0].plot(thr,n_pure_smooth,'b')
	axes[0].plot(thr,data_size_smooth,'g')
	plt.show()
开发者ID:kautsiitd,项目名称:Unsupervised-Decomposition-of-a-Multi-Author-Document,代码行数:35,代码来源:words_method.py

示例2: draw_CV_on_window_center

def draw_CV_on_window_center():
    x_ecg = np.array([-0.415263158, -0.336315789, -0.257368421, -0.178421053, -0.099473684, -0.035957895, -0.020526316, -0.015784211, 0.0, 0.036094737, 0.059157895, 0.069473684, 0.103842105, 0.188842105, 0.249368421, 0.373315789, 0.442473684, 0.468421053, 0.531842105, 0.576526316, 0.584736842, 0.648421053])
    y_ecg = np.array([-1414, -1738, -333, -1846, -981, 2585, 11990, 14476, 22367, 16746, 5720, -4333, -10386, -2819, -3143, -766, 3342, 3882, 1504, -1954, -2170, -2063])
    x_ecg_new = np.linspace(np.min(x_ecg), np.max(x_ecg), len(x_ecg) * 5)
    y_ecg_new = spline(x_ecg, y_ecg, x_ecg_new)

    x_CV = np.array(np.linspace(-0.475, 0.525, 41))
    # y_CV = [0.8811, 0.8757, 0.8649, 0.7838, 0.6595, 0.5297, 0.6054, 0.6270, 0.6378, 0.6270, 0.6486, 0.6378, 0.6378, 0.6216, 0.5892, 0.6324, 0.7027, 0.7243, 0.7351, 0.7351, 0.7081, 0.7135, 0.6270, 0.6270, 0.6486, 0.6486, 0.6378, 0.6216, 0.6000, 0.6270, 0.7892, 0.8270, 0.8378, 0.8054, 0.7405, 0.7297, 0.6541, 0.6811, 0.8378, 0.8595]
    y_CV = np.array([0.7135, 0.627, 0.627, 0.6486, 0.6486, 0.6378, 0.6216, 0.6, 0.627, 0.7892, 0.827, 0.8378, 0.8054, 0.7405, 0.7297, 0.6541, 0.6811, 0.8378, 0.8595, 0.8811, 0.8757, 0.8649, 0.7838, 0.6595, 0.5297, 0.6054, 0.627, 0.6378, 0.627, 0.6486, 0.6378, 0.6378, 0.6216, 0.5892, 0.6324, 0.7027, 0.7243, 0.7351, 0.7351, 0.7081, 0.7135])
    x_CV_new = np.linspace(np.min(x_CV), np.max(x_CV), len(x_CV) * 5)
    y_CV_new = spline(x_CV, y_CV, x_CV_new)

    fig, ax1 = plt.subplots()
    ax1.plot(x_ecg / 6, y_ecg / float(0.8 * np.max(y_ecg)), 'k-', linewidth=2.)
    ax1.set_xlabel(u'Время, сек')
    ax1.set_ylabel(u'Интенсивность', color='k')
    for tl in ax1.get_yticklabels():
        tl.set_color('k')

    ax2 = ax1.twinx()
    # ax2.plot(x_CV_new / 6, y_CV_new, 'r-', linewidth=2.)
    ax2.plot(x_CV / 6, y_CV, 'r-', linewidth=2.)
    ax2.plot(x_CV / 6, y_CV, 'r^')
    plt.ylim([0.1, 1.0])
    ax2.set_ylabel(u'Оценка 5х5-fold кросс-валидации', color='r')
    for tl in ax2.get_yticklabels():
        tl.set_color('r')
    plt.grid(True)
    # plt.show()
    plt.savefig(r'..\..\logs\CV_on_window_center.png')
开发者ID:ilya-yamschikov,项目名称:ECG_MIPT,代码行数:30,代码来源:plots_final.py

示例3: read_adp_data

    def read_adp_data(self, data, d):
        """reads in the extra data fro the adp format"""

        self.d_data = np.zeros([self.Nelements, self.Nelements, self.nr])
        # should be non symetrical combinations of 2
        for i in range(self.Nelements):
            for j in range(i, self.Nelements):
                self.d_data[i, j] = data[d:d + self.nr]
                d += self.nr
                
        self.q_data = np.zeros([self.Nelements, self.Nelements, self.nr])
        # should be non symetrical combinations of 2
        for i in range(self.Nelements):
            for j in range(i, self.Nelements):
                self.q_data[i, j] = data[d:d + self.nr]
                d += self.nr
    
        self.d = np.zeros([self.Nelements, self.Nelements], object)
        self.d_d = np.zeros([self.Nelements, self.Nelements], object)
        self.q = np.zeros([self.Nelements, self.Nelements], object)
        self.d_q = np.zeros([self.Nelements, self.Nelements], object)
    
        for i in range(self.Nelements):
            for j in range(i, self.Nelements):
                self.d[i, j] = spline(self.r[1:], self.d_data[i, j][1:], k=3)
                self.d_d[i, j] = self.deriv(self.d[i, j])
                self.q[i, j] = spline(self.r[1:], self.q_data[i, j][1:], k=3)
                self.d_q[i, j] = self.deriv(self.q[i, j])

                # make symmetrical
                if j != i:
                    self.d[j, i] = self.d[i, j]
                    self.d_d[j, i] = self.d_d[i, j]
                    self.q[j, i] = self.q[i, j]
                    self.d_q[j, i] = self.d_q[i, j]
开发者ID:conwayje,项目名称:ase-python,代码行数:35,代码来源:eam.py

示例4: prepare_mf

def prepare_mf(mpart, grid, mf_kwargs):
    M = np.linspace(np.log10(mpart), np.log10(grid.max()), 2000)
    mf_obj = MassFunction(M=M, **mf_kwargs)

    mf = mf_obj.dndm
    m_outside_range = mf_obj.mltm[0] + mf_obj.mgtm[-1]

    cumfunc = cumtrapz(10 ** M * mf, M, initial=0) * np.log(10)

    cdf = spline(M, cumfunc, k=3)
    icdf = spline(cumfunc, M, k=3)

    if MAKE_PLOTS:
        plt.clf()
        plt.plot(M, cumfunc)

        plt.plot(M, cdf(M))
        plt.savefig("cumfunc.pdf")

        plt.clf()
        mcumfunc = cumtrapz(10 ** (2 * M) * mf, dx=M[1] - M[0], initial=1e-20) * np.log(10)
        plt.plot(M, mcumfunc)
        plt.savefig("mcumfunc.pdf")

        # How much mass is above 10**12.5
        minvcumfunc = cumtrapz(10 ** (2 * M[::-1]) * mf[::-1], dx=M[1] - M[0]) * np.log(10)
        minvcumfunc = np.concatenate((np.array([minvcumfunc[0]]), minvcumfunc))
        minvcumfunc /= minvcumfunc[-1]
        plt.clf()
        plt.plot(M, minvcumfunc[::-1])
        plt.yscale('log')
        plt.grid(True)
        plt.savefig("minvcumfunc.pdf")

    return cdf, icdf, M, mf, m_outside_range
开发者ID:savila,项目名称:pyfast,代码行数:35,代码来源:fast.py

示例5: _lower_ngtm

    def _lower_ngtm(self, M, mass_function, cut):
        ### WE CALCULATE THE MASS FUNCTION BELOW THE COMPUTED RANGE ###
        # mass_function is logged already (not log10 though)
        m_lower = np.linspace(np.log(10 ** 3), np.log(M[0]), 500)
        if cut:  #since its been cut, the best we can do is a power law
            mf_func = spline(np.log(M), mass_function, k=1)
            mf = mf_func(m_lower)
        else:
            #We try to calculate the hmf as far as we can normally
            sigma_0 = tools.mass_variance(np.exp(m_lower), self._power_0, self.lnkh, self.cosmo_params['mean_dens'])
            sigma = sigma_0 * self.growth
            dlnsdlnm = tools.dlnsdlnm(np.exp(m_lower), sigma_0, self._power_0, self.lnkh, self.cosmo_params['mean_dens'])
            n_eff = tools.n_eff(dlnsdlnm)
            fsigma = fits(m_lower, n_eff, self.mf_fit, sigma, self.cosmo_params['delta_c'],
                          self.z, self.delta_halo, self.cosmo_params, self.user_fit, cut_fit=True).nufnu()()
            #fsigma = nufnu()
            dndm = fsigma * self.cosmo_params['mean_dens'] * np.abs(dlnsdlnm) / np.exp(m_lower) ** 2
            mf = np.log(np.exp(m_lower) * dndm)

            if np.isnan(mf[0]):  #Then we couldn't go down all the way, so have to do linear ext.
                mfslice = mf[np.logical_not(np.isnan(mf))]
                m_nan = m_lower[np.isnan(mf)]
                m_true = m_lower[np.logical_not(np.isnan(mf))]
                mf_func = spline(m_true, mfslice, k=1)
                mf[:len(mfslice)] = mf_func(m_nan)
        return m_lower, mf
开发者ID:duducosmos,项目名称:hmf,代码行数:26,代码来源:hmf.py

示例6: _upper_ngtm

    def _upper_ngtm(self, M, mass_function, cut):
        """Calculate the mass function above given range of `M` in order to integrate"""
        ### WE CALCULATE THE MASS FUNCTION ABOVE THE COMPUTED RANGE ###
        # mass_function is logged already (not log10 though)
        m_upper = np.linspace(np.log(M[-1]), np.log(10 ** 18), 500)
        if cut:  # since its been cut, the best we can do is a power law
            mf_func = spline(np.log(M), mass_function, k=1)
            mf = mf_func(m_upper)
        else:
            # We try to calculate the hmf as far as we can normally
            new_pert = copy.deepcopy(self)
            new_pert.update(M=np.log10(np.exp(m_upper)))
            mf = np.log(np.exp(m_upper) * new_pert.dndm)

            if np.isnan(mf[-1]):  # Then we couldn't get up all the way, so have to do linear ext.
                if np.isnan(mf[1]):  # Then the whole extension is nan and we have to use the original (start at 1 because 1 val won't work either)
                    mf_func = spline(np.log(M), mass_function, k=1)
                    mf = mf_func(m_upper)
                else:
                    mfslice = mf[np.logical_not(np.isnan(mf))]
                    m_nan = m_upper[np.isnan(mf)]
                    m_true = m_upper[np.logical_not(np.isnan(mf))]
                    mf_func = spline(m_true, mfslice, k=1)
                    mf[len(mfslice):] = mf_func(m_nan)
        return m_upper, mf
开发者ID:tbs1980,项目名称:hmf,代码行数:25,代码来源:hmf.py

示例7: animate

def animate(cpu):
    x = range(50)
    cpuinfo_g = cpuinfo()

    y_load_l = cpuinfo_g.load_sum_l
    y_load_b = cpuinfo_g.load_sum_b
    y_freq_l = cpuinfo_g.freq_l
    y_freq_b = cpuinfo_g.freq_b

    x_sm = np.array(x)
    y_sm_ll = np.array(y_load_l)
    y_sm_lb = np.array(y_load_b)
    y_sm_fl = np.array(y_freq_l)
    y_sm_fb = np.array(y_freq_b)

    x_smooth = np.linspace(x_sm.min(), x_sm.max(), 200)
    y_smooth_load_l = spline(x, y_sm_ll, x_smooth)
    y_smooth_load_b = spline(x, y_sm_lb, x_smooth)

    y_smooth_freq_l = spline(x, y_sm_fl, x_smooth)
    y_smooth_freq_b = spline(x, y_sm_fb, x_smooth)


    cpulock.acquire()
    load_sum_l.set_data(x_smooth, y_smooth_load_l)
    load_sum_b.set_data(x_smooth, y_smooth_load_b)

    freq_l.set_data(x_smooth, y_smooth_freq_l)
    freq_b.set_data(x_smooth, y_smooth_freq_b)
    cpulock.release()

    return load_sum_l, load_sum_b, freq_l, freq_b
开发者ID:bigzz,项目名称:moniter,代码行数:32,代码来源:cpuinfo.py

示例8: set_splines

    def set_splines(self):
        # this section turns the file data into three functions (and
        # derivative functions) that define the potential
        self.embedded_energy = np.empty(self.Nelements, object)
        self.electron_density = np.empty(self.Nelements, object)
        self.d_embedded_energy = np.empty(self.Nelements, object)
        self.d_electron_density = np.empty(self.Nelements, object)

        for i in range(self.Nelements):
            self.embedded_energy[i] = spline(self.rho,
                                             self.embedded_data[i], k=3)
            self.electron_density[i] = spline(self.r,
                                              self.density_data[i], k=3)
            self.d_embedded_energy[i] = self.deriv(self.embedded_energy[i])
            self.d_electron_density[i] = self.deriv(self.electron_density[i])

        self.phi = np.empty([self.Nelements, self.Nelements], object)
        self.d_phi = np.empty([self.Nelements, self.Nelements], object)

        # ignore the first point of the phi data because it is forced
        # to go through zero due to the r*phi format in alloy and adp
        for i in range(self.Nelements):
            for j in range(i, self.Nelements):
                self.phi[i, j] = spline(
                    self.r[1:],
                    self.rphi_data[i, j][1:] / self.r[1:], k=3)

                self.d_phi[i, j] = self.deriv(self.phi[i, j])

                if j != i:
                    self.phi[j, i] = self.phi[i, j]
                    self.d_phi[j, i] = self.d_phi[i, j]
开发者ID:rchiechi,项目名称:QuantumParse,代码行数:32,代码来源:eam.py

示例9: isentrope

    def isentrope(self, S):
        if self.S == S:
            return
        self.S = S

        # Make array of v values.  To be independent variable for ode
        # and splines.
        dv = (self.v_max - self.v_min)/1000
        v = np.arange(self.v_min-dv, self.v_max+1.5*dv, dv)
        # Allocate array for dependent variables
        PE = np.empty((2,len(v)))
        
        # Find initial state variables for ode
        v_1 = 1/self.rhoCJ
        E_1 = self.E_0*np.exp((S-self.S_0)/self.cv)
        P_1 = self.Ev2P(E_1, v_1)
        i_1 = np.searchsorted(v,[v_1])[0] # v[i_1-1] < v_1 < v[i_1]

        self.ode.set_initial_value([P_1,E_1],v_1)
        for i in range(i_1,len(v)):
            self.ode.integrate(v[i])
            PE[:,i] = self.ode.y
        self.ode.set_initial_value([P_1,E_1],v_1)
        for i in range(i_1-1,-1,-1):
            self.ode.integrate(v[i])
            PE[:,i] = self.ode.y
        from scipy.interpolate import InterpolatedUnivariateSpline as spline
        self.isentrope_v2E = spline(v,PE[1],k=3,s=0)
        self.isentrope_E2v = spline(PE[1,-1::-1],v)
        self.isentrope_P2v = spline(PE[0,-1::-1],v)
        return
开发者ID:fraserphysics,项目名称:metfie,代码行数:31,代码来源:eos.py

示例10: __init__

    def __init__(self, **model_parameters):
        super(Tinker08, self).__init__(**model_parameters)


        if self.delta_halo not in self.delta_virs:
            A_array = np.array([self.params["A_%s" % d] for d in self.delta_virs])
            a_array = np.array([self.params["a_%s" % d] for d in self.delta_virs])
            b_array = np.array([self.params["b_%s" % d] for d in self.delta_virs])
            c_array = np.array([self.params["c_%s" % d] for d in self.delta_virs])

            A_func = spline(self.delta_virs, A_array)
            a_func = spline(self.delta_virs, a_array)
            b_func = spline(self.delta_virs, b_array)
            c_func = spline(self.delta_virs, c_array)

            A_0 = A_func(self.delta_halo)
            a_0 = a_func(self.delta_halo)
            b_0 = b_func(self.delta_halo)
            c_0 = c_func(self.delta_halo)
        else:
            A_0 = self.params["A_%s" % (int(self.delta_halo))]
            a_0 = self.params["a_%s" % (int(self.delta_halo))]
            b_0 = self.params["b_%s" % (int(self.delta_halo))]
            c_0 = self.params["c_%s" % (int(self.delta_halo))]


        self.A = A_0 * (1 + self.z) ** (-self.params["A_exp"])
        self.a = a_0 * (1 + self.z) ** (-self.params["a_exp"])
        alpha = 10 ** (-(0.75 / np.log10(self.delta_halo / 75)) ** 1.2)
        self.b = b_0 * (1 + self.z) ** (-alpha)
        self.c = c_0
开发者ID:inonchiu,项目名称:hmf,代码行数:31,代码来源:fitting_functions.py

示例11: semi_supervised_test2

def semi_supervised_test2(df, labeled_points, step):
    # for i in range(6):
    total_points = 600
    feature, label = seperate_feature_label(df)
    accuracy_for_supervise = []
    accuracy_for_semi_supervise = []
    x = []
    indices = np.arange(len(feature))
    label_indices = balanced_sample_maker(feature, label, labeled_points / len(label))
    unlabeled_indices = np.delete(indices, np.array(label_indices))
    # print(unlabeled_indices.size)
    # print(unlabeled_indices.size)
    rng = np.random.RandomState(0)
    rng.shuffle(unlabeled_indices)
    indices = np.concatenate((label_indices, unlabeled_indices[:total_points]))
    # n_total_samples = len(indices)

    for i in range(80):
        x.append(total_points)
        unlabeled_index = np.arange(total_points)[labeled_points:]
        # print(unlabeled_index.size)
        X = feature.iloc[indices]
        y = label.iloc[indices]
        y_train = np.copy(y)
        y_train[unlabeled_index] = -1
        # supervised learning
        classifier = KNeighborsClassifier(n_neighbors=6)
        classifier.fit(X.iloc[:labeled_points], y.iloc[:labeled_points])
        y_pred = classifier.predict(X.iloc[labeled_points:])
        true_labels = y.iloc[unlabeled_index]
        # print(confusion_matrix(true_labels,y_pred))
        print("%d labeled & %d unlabeled (%d total)"
              % (labeled_points, total_points - labeled_points, total_points))
        accuracy_for_supervise.append(accuracy_score(true_labels, y_pred))

        lp_model = label_propagation.LabelSpreading(gamma=1, kernel='knn', max_iter=300, n_neighbors=6)
        lp_model.fit(X, y_train)
        predicted_labels = lp_model.transduction_[unlabeled_index]
        # print('Iteration %i %s' % (i, 70 * '_'))
        accuracy_for_semi_supervise.append(accuracy_score(true_labels, predicted_labels))
        print('Semi-supervised learning:', accuracy_score(true_labels, predicted_labels))

        total_points += step  # print(unlabeled_indices[(total_points-50):total_points])
        indices = np.concatenate((indices, unlabeled_indices[(total_points - step):total_points]))

    x_sm = np.array(x)
    y_sm = np.array(accuracy_for_supervise)
    y1_sm = np.array(accuracy_for_semi_supervise)
    x_smooth = np.linspace(x_sm.min(), x_sm.max(), 200)
    y_smooth = spline(x, y_sm, x_smooth)
    y1_smooth = spline(x, y1_sm, x_smooth)
    sup, = plt.plot(x_smooth, y_smooth, label='Supervised learning with kNN')
    semi_l, = plt.plot(x_smooth, y1_smooth, label='Semi-supervised learning using Label Propagation')
    # plt.legend(handles=[sup, semi_l])
    plt.xlabel('Total samples')
    plt.ylabel('Accuracy')
    plt.title('Semi-supervised learning for labeled ' + str(labeled_points) + ' samples ')
    plt.show()
    return accuracy_score(true_labels, predicted_labels)
开发者ID:Lilywu96,项目名称:activity_recognition_for_sensor,代码行数:59,代码来源:initial_training.py

示例12: _p

    def _p(self, K, c):
        """
        The reduced dimensionless fourier-transform of the profile

        This function should not need to be called by the user in general.

        Parameters
        ----------
        K : float or array_like
            The unit-less wavenumber k*r_s

        c : float or array_like
            The concentration

        .. note :: This should be replaced by an analytic function if possible
        """
        c = np.atleast_1d(c)
        if K.ndim < 2:
            if len(K)!=len(c):
                K = np.atleast_2d(K).T # should be len(rs) x len(k)
            else:
                K = np.atleast_2d(K)
        minsteps = 100

        # if len(c)>50:
        #     C = np.linspace(c.min(),c.max(),50)
        # else:
        #     C = c
        #
        if K.size > 100:
            kk = np.logspace(np.log10(K.min()),np.log10(K.max()),100)
        else:
            kk = np.sort(np.flatten(K))

        res = np.zeros_like(K)
        intermediate_res = np.zeros((len(kk),len(c)))

        for ik, kappa in enumerate(kk):
            smallest_period = np.pi / kappa
            dx = smallest_period / 5

            nsteps = max(int(np.ceil(c.max() / dx)),minsteps)

            x, dx = np.linspace(0, c.max(), nsteps, retstep=True)
            spl = spline(x, x*self._f(x)*np.sin(kappa*x)/kappa)

            intg = spl.antiderivative()

            intermediate_res[ik,:] = intg(c) - intg(0)

        for ic, cc in enumerate(c):
            #print intermediate_res.shape, kk.shape, res.shape
            # For high K, intermediate_res can be negative, so we mask that.
            mask = intermediate_res[:,ic]>0
            spl = spline(np.log(kk[mask]),np.log(intermediate_res[mask,ic]),k=1)
            res[:,ic] = np.exp(spl(np.log(K[:,ic])))

        return res
开发者ID:steven-murray,项目名称:halomod,代码行数:58,代码来源:profiles.py

示例13: __init__

    def __init__(self, symbol):
        """Atomic Setup (Pseudopotential data)
        data read from data/*_pp.dat files.
        TODO: expand documentation & cleanup
        """
        self.lref = 1

        fname = 'data/' + symbol.lower() + '_pp.dat'                
        datafile = pkg_resources.resource_stream(__name__, fname)
        self.symbol = symbol
        self.fname = fname
        
        # python doesn't like 1D-2 formating (convert to 1E-2)
        t = maketrans('D', 'E')
        data = [l.translate(t).split() for l in datafile]
        
        line=0
        self.n = int(data[line][0])
        self.dx = float(data[line][1])
        self.lmax = int(data[line][2])
        self.q = float(data[line][3])
        
        line=1 
        self.rl = np.array([float(r) for r in data[line]]) 
        self.radius = max(self.rl)
        
        m = self.n + 3
        v = np.array([[float(x) for x in line] for line in data[2:m]]).T
        u = np.array([[float(x) for x in line] for line in data[m::]]).T
        
        x = np.arange(self.n+1) * self.dx

        # create splines
        self.v_spline = {l:spline(x, v[l+1], s=0)
                         for l in xrange(self.lmax+1)}
        self.u_spline = {l:spline(x, u[l+1], s=0)
                         for l in xrange(self.lmax+1)}

        self.vnl_spline = {}
        self.psi_spline = {}

        psi = np.zeros_like(x)
        vnl = np.zeros_like(x)
            
        for l in xrange(self.lmax+1):
            psi[1::] = u[l+1,1::]/(x[1::]**(l+1)*np.sqrt((2*l+1)/(4*np.pi)))
            vnl[1::] = (v[l+1,1::]-v[self.lref+1, 1::])*psi[1::]
            psi[0] = psi[1]
            vnl[0] = vnl[1]

            self.psi_spline[l] = spline(x,psi,s=0)
            self.vnl_spline[l] = spline(x,vnl,s=0)
            

        self.L = [l for l in xrange(self.lmax+1) if l != self.lref]        
        self.LM = list(itertools.chain.from_iterable(
                range(l**2, (l+1)**2) for l in self.L))
        self.uVu = self.calculate_uVu()
开发者ID:yidapa,项目名称:lfdft,代码行数:58,代码来源:setups.py

示例14: HighPassFilter

def HighPassFilter(data, vel, width=5, linearize=False):
    """
    Function to apply a high-pass filter to data.
      Data must be in an xypoint container, and have linear wavelength spacing
      vel is the width of the features you want to remove, in velocity space (in cm/s)
      width is how long it takes the filter to cut off, in units of wavenumber
    """

    if linearize:
        original_data = data.copy()
        datafcn = spline(data.x, data.y, k=3)
        errorfcn = spline(data.x, data.err, k=3)
        contfcn = spline(data.x, data.cont, k=3)
        linear = DataStructures.xypoint(data.x.size)
        linear.x = np.linspace(data.x[0], data.x[-1], linear.size())
        linear.y = datafcn(linear.x)
        linear.err = errorfcn(linear.x)
        linear.cont = contfcn(linear.x)
        data = linear

    # Figure out cutoff frequency from the velocity.
    featuresize = 2 * data.x.mean() * vel / constants.c.cgs.value  # vel MUST be given in units of cm
    dlam = data.x[1] - data.x[0]  # data.x MUST have constant x-spacing
    Npix = featuresize / dlam

    nsamples = data.size()
    sample_rate = 1.0 / dlam
    nyq_rate = sample_rate / 2.0  # The Nyquist rate of the signal.
    width /= nyq_rate
    cutoff_hz = min(1.0 / featuresize, nyq_rate - width * nyq_rate / 2.0)  # Cutoff frequency of the filter

    # The desired attenuation in the stop band, in dB.
    ripple_db = 60.0

    # Compute the order and Kaiser parameter for the FIR filter.
    N, beta = kaiserord(ripple_db, width)
    if N % 2 == 0:
        N += 1

    # Use firwin with a Kaiser window to create a lowpass FIR filter.
    taps = firwin(N, cutoff_hz / nyq_rate, window=('kaiser', beta), pass_zero=False)

    # Extend data to prevent edge effects
    y = np.r_[data.y[::-1], data.y, data.y[::-1]]

    # Use lfilter to filter data with the FIR filter.
    smoothed_y = lfilter(taps, 1.0, y)

    # The phase delay of the filtered signal.
    delay = 0.5 * (N - 1) / sample_rate
    delay_idx = np.searchsorted(data.x, data.x[0] + delay)
    smoothed_y = smoothed_y[data.size() + delay_idx:-data.size() + delay_idx]
    if linearize:
        fcn = spline(data.x, smoothed_y)
        return fcn(original_data.x)
    else:
        return smoothed_y
开发者ID:kgullikson88,项目名称:General,代码行数:57,代码来源:HelperFunctions.py

示例15: projected_corr_gal

    def projected_corr_gal(self):
        """
        Projected correlation function w(r_p).

        From Beutler 2011, eq 6.

        To integrate perform a substitution y = x - r_p.
        """
        lnr = np.log(self.r)
        lnxi = np.log(self.corr_gal)

        p = np.zeros(len(self.r))

        # Calculate correlation to higher scales for better integration
        if self.proj_limit is None:
            rlim = max(80.0, 5 * self.rmax)
        else:
            rlim = self.proj_limit

        print "RLIM, RMAX", rlim, self.rmax
        if rlim > self.rmax:
            upper_h = deepcopy(self)
            dr = self.r[1] / self.r[0]
            upper_h.update(rmin=self.rmax * dr, rmax=rlim, rnum=20)

            fit = spline(np.concatenate((self.r, upper_h.r)),
                         np.concatenate((self.corr_gal, upper_h.corr_gal)), k=3)  # [self.corr_gal > 0] maybe?
            print "FIT: ", fit(0.1)
        else:
            fit = spline(self.r, self.corr_gal, k=3)  # [self.corr_gal > 0] maybe?
            print "fit: ", fit(0.1)
        f_peak = 0.01
        a = 0

        for i, rp in enumerate(self.r):
            if a != 1.3 and i < len(self.r) - 1:
                # Get slope at rp (== index of power law at rp)
                ydiff = (lnxi[i + 1] - lnxi[i]) / (lnr[i + 1] - lnr[i])
                # if the slope is flatter than 1.3, it will converge faster, but to make sure, we cut at 1.3
                a = max(1.3, -ydiff)
                theta = self._get_theta(a)

            min_y = theta * f_peak ** 2 * rp

            # Get the upper limit for this rp
            lim = np.log10(rlim - rp)

            # Set the y vector for this rp
            y = np.logspace(np.log10(min_y), lim, 1000)

            # Integrate
            integ_corr = fit(y + rp)
            integrand = integ_corr / np.sqrt((y + 2 * rp) * y)
            p[i] = intg.simps(integrand, y) * 2

        return p
开发者ID:prollejazz,项目名称:halomod,代码行数:56,代码来源:halo_model.py


注:本文中的scipy.interpolate.spline函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。