当前位置: 首页>>代码示例>>Python>>正文


Python UnivariateSpline.antiderivative方法代码示例

本文整理汇总了Python中scipy.interpolate.UnivariateSpline.antiderivative方法的典型用法代码示例。如果您正苦于以下问题:Python UnivariateSpline.antiderivative方法的具体用法?Python UnivariateSpline.antiderivative怎么用?Python UnivariateSpline.antiderivative使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在scipy.interpolate.UnivariateSpline的用法示例。


在下文中一共展示了UnivariateSpline.antiderivative方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: integral

# 需要导入模块: from scipy.interpolate import UnivariateSpline [as 别名]
# 或者: from scipy.interpolate.UnivariateSpline import antiderivative [as 别名]
def integral(x, y, I, k=10):
    """
    Integrate y = f(x) for x = 0 to a such that the integral = I
    I can be an array
    """
    I = np.atleast_1d(I)

    f = UnivariateSpline(x, y, s=k)

    # Integrate as a function of x
    F = f.antiderivative()
    Y = F(x)

    a = []
    for intval in I:
        F2 = UnivariateSpline(x, Y/Y[-1] - intval, s=0)
        a.append(F2.roots())

    return np.hstack(a)
开发者ID:kgullikson88,项目名称:General,代码行数:21,代码来源:HelperFunctions.py

示例2: TablePSF

# 需要导入模块: from scipy.interpolate import UnivariateSpline [as 别名]
# 或者: from scipy.interpolate.UnivariateSpline import antiderivative [as 别名]

#.........这里部分代码省略.........

        self._dp_dr /= integral

        # Don't divide by 0
        EPS = 1e-6
        rad = np.clip(self._rad.radian, EPS, None)
        rad = Quantity(rad, 'radian')
        self._dp_domega = self._dp_dr / (2 * np.pi * rad)
        self._compute_splines(self._spline_kwargs)

    def broaden(self, factor, normalize=True):
        r"""Broaden PSF by scaling the offset array.

        For a broadening factor :math:`f` and the offset
        array :math:`r`, the offset array scaled
        in the following way:

        .. math::
            r_{new} = f \times r_{old}
            \frac{dP}{dr}(r_{new}) = \frac{dP}{dr}(r_{old})

        Parameters
        ----------
        factor : float
            Broadening factor
        normalize : bool
            Normalize PSF after broadening
        """
        self._rad *= factor
        # We define broadening such that self._dp_domega remains the same
        # so we only have to re-compute self._dp_dr and the slines here.
        self._dp_dr = (2 * np.pi * self._rad * self._dp_domega).to('radian^-1')
        self._compute_splines(self._spline_kwargs)

        if normalize:
            self.normalize()

    def plot_psf_vs_rad(self, ax=None, quantity='dp_domega', **kwargs):
        """Plot PSF vs radius.

        TODO: describe PSF ``quantity`` argument in a central place and link to it from here.
        """
        import matplotlib.pyplot as plt
        ax = plt.gca() if ax is None else ax

        x = self._rad.to('deg')
        y = self.evaluate(self._rad, quantity)

        ax.plot(x.value, y.value, **kwargs)
        ax.loglog()
        ax.set_xlabel('Radius ({})'.format(x.unit))
        ax.set_ylabel('PSF ({})'.format(y.unit))

    def _compute_splines(self, spline_kwargs=DEFAULT_PSF_SPLINE_KWARGS):
        """Compute two splines representing the PSF.

        * `_dp_domega_spline` is used to evaluate the 2D PSF.
        * `_dp_dr_spline` is not really needed for most applications,
          but is available via `eval`.
        * `_cdf_spline` is used to compute integral and for normalisation.
        * `_ppf_spline` is used to compute containment radii.
        """
        from scipy.interpolate import UnivariateSpline

        # Compute spline and normalize.
        x, y = self._rad.value, self._dp_domega.value
        self._dp_domega_spline = UnivariateSpline(x, y, **spline_kwargs)

        x, y = self._rad.value, self._dp_dr.value
        self._dp_dr_spline = UnivariateSpline(x, y, **spline_kwargs)

        # We use the terminology for scipy.stats distributions
        # http://docs.scipy.org/doc/scipy/reference/tutorial/stats.html#common-methods

        # cdf = "cumulative distribution function"
        self._cdf_spline = self._dp_dr_spline.antiderivative()

        # ppf = "percent point function" (inverse of cdf)
        # Here's a discussion on methods to compute the ppf
        # http://mail.scipy.org/pipermail/scipy-user/2010-May/025237.html
        y = self._rad.value
        x = self.integral(Angle(0, 'rad'), self._rad)

        # Since scipy 1.0 the UnivariateSpline requires that x is strictly increasing
        # So only keep nodes where this is the case (and always keep the first one):
        x, idx = np.unique(x, return_index=True)
        y = y[idx]

        # Dummy values, for cases where one really doesn't have a valid PSF.
        if len(x) < 4:
            x = [0, 1, 2, 3]
            y = [0, 0, 0, 0]

        self._ppf_spline = UnivariateSpline(x, y, **spline_kwargs)

    def _rad_clip(self, rad):
        """Clip to radius support range, because spline extrapolation is unstable."""
        rad = Angle(rad, 'radian').radian
        rad = np.clip(rad, 0, self._rad[-1].radian)
        return rad
开发者ID:cdeil,项目名称:gammapy,代码行数:104,代码来源:psf_table.py

示例3: TablePSF

# 需要导入模块: from scipy.interpolate import UnivariateSpline [as 别名]
# 或者: from scipy.interpolate.UnivariateSpline import antiderivative [as 别名]

#.........这里部分代码省略.........
        """
        radius = self._ppf_spline(fraction)
        return Angle(radius, 'radian').to('deg')

    def normalize(self):
        """Normalize PSF to unit integral.

        Computes the total PSF integral via the :math:`dP / d\theta` spline
        and then divides the :math:`dP / d\theta` array.
        """
        integral = self.integral()

        self._dp_dtheta /= integral

        # Don't divide by 0
        EPS = 1e-6
        offset = np.clip(self._offset.radian, EPS, None)
        offset = Quantity(offset, 'radian')
        self._dp_domega = self._dp_dtheta / (2 * np.pi * offset)
        self._compute_splines(self._spline_kwargs)

    def broaden(self, factor, normalize=True):
        r"""Broaden PSF by scaling the offset array.

        For a broadening factor :math:`f` and the offset
        array :math:`\theta`, the offset array scaled
        in the following way:

        .. math::
            \theta_{new} = f \times \theta_{old}
            \frac{dP}{d\theta}(\theta_{new}) = \frac{dP}{d\theta}(\theta_{old})

        Parameters
        ----------
        factor : float
            Broadening factor
        normalize : bool
            Normalize PSF after broadening
        """
        self._offset *= factor
        # We define broadening such that self._dp_domega remains the same
        # so we only have to re-compute self._dp_dtheta and the slines here.
        self._dp_dtheta = (2 * np.pi * self._offset * self._dp_domega).to('radian^-1')
        self._compute_splines(self._spline_kwargs)

        if normalize:
            self.normalize()

    def plot_psf_vs_theta(self, quantity='dp_domega'):
        """Plot PSF vs offset.

        TODO: describe PSF ``quantity`` argument in a central place and link to it from here.
        """
        import matplotlib.pyplot as plt

        x = self._offset.to('deg')
        y = self.evaluate(self._offset, quantity)

        plt.plot(x.value, y.value, lw=2)
        plt.semilogy()
        plt.loglog()
        plt.xlabel('Offset ({0})'.format(x.unit))
        plt.ylabel('PSF ({0})'.format(y.unit))

    def _compute_splines(self, spline_kwargs=DEFAULT_PSF_SPLINE_KWARGS):
        """Compute two splines representing the PSF.

        * `_dp_domega_spline` is used to evaluate the 2D PSF.
        * `_dp_dtheta_spline` is not really needed for most applications,
          but is available via `eval`.
        * `_cdf_spline` is used to compute integral and for normalisation.
        * `_ppf_spline` is used to compute containment radii.
        """
        from scipy.interpolate import UnivariateSpline

        # Compute spline and normalize.
        x, y = self._offset.value, self._dp_domega.value
        self._dp_domega_spline = UnivariateSpline(x, y, **spline_kwargs)

        x, y = self._offset.value, self._dp_dtheta.value
        self._dp_dtheta_spline = UnivariateSpline(x, y, **spline_kwargs)

        # We use the terminology for scipy.stats distributions
        # http://docs.scipy.org/doc/scipy/reference/tutorial/stats.html#common-methods

        # cdf = "cumulative distribution function"
        self._cdf_spline = self._dp_dtheta_spline.antiderivative()

        # ppf = "percent point function" (inverse of cdf)
        # Here's a discussion on methods to compute the ppf
        # http://mail.scipy.org/pipermail/scipy-user/2010-May/025237.html
        x = self._offset.value
        y = self._cdf_spline(x)
        self._ppf_spline = UnivariateSpline(y, x, **spline_kwargs)

    def _offset_clip(self, offset):
        """Clip to offset support range, because spline extrapolation is unstable."""
        offset = Angle(offset, 'radian').radian
        offset = np.clip(offset, 0, self._offset[-1].radian)
        return offset
开发者ID:tibaldo,项目名称:gammapy,代码行数:104,代码来源:psf_table.py

示例4: len

# 需要导入模块: from scipy.interpolate import UnivariateSpline [as 别名]
# 或者: from scipy.interpolate.UnivariateSpline import antiderivative [as 别名]
    if(len(curr_data) <= 3):
        curr_data = np.concatenate([curr_data, np.zeros((3,num_params))])

    time = np.arange(0, len(curr_data), 1) # the sample 'times' (0 to number of samples)

    acc_X = curr_data[:,0]
    acc_Y = curr_data[:,1]
    acc_Z = curr_data[:,2]

    # fit 2nd the antiderivative

    # the interpolation representation
    tck_X = UnivariateSpline(time, acc_X, s=0)

    # integrals
    tck_X.integral = tck_X.antiderivative()
    tck_X.integral_2 = tck_X.antiderivative(2)

    # the interpolation representation
    tck_Y = UnivariateSpline(time, acc_Y, s=0)

    # integrals
    tck_Y.integral = tck_Y.antiderivative()
    tck_Y.integral_2 = tck_Y.antiderivative(2)

    # the interpolation representation
    tck_Z = UnivariateSpline(time, acc_Z, s=0)

    # integrals
    tck_Z.integral = tck_Z.antiderivative()
    tck_Z.integral_2 = tck_Z.antiderivative(2)
开发者ID:galenwilkerson,项目名称:Handwriting-Recognition-using-acceleration-data,代码行数:33,代码来源:draw_letters.py

示例5: preprocess

# 需要导入模块: from scipy.interpolate import UnivariateSpline [as 别名]
# 或者: from scipy.interpolate.UnivariateSpline import antiderivative [as 别名]
def preprocess(filename, num_resamplings = 25):

	# read data
	#filename = "../data/MarieTherese_jul31_and_Aug07_all.pkl"

	pkl_file = open(filename, 'rb')
        data1 = cPickle.load(pkl_file)
        num_strokes = len(data1)

        # get the unique stroke labels, map to class labels (ints) for later using dictionary
        stroke_dict = dict()
        value_index = 0
        for i in range(0,num_strokes):
                current_key = data1[i][0]
                if current_key not in stroke_dict:
                        stroke_dict[current_key] = value_index
                        value_index = value_index + 1

        # save the dictionary to file, for later use
        dict_filename = "../data/stroke_label_mapping.pkl"
        dict_file = open(dict_filename, 'wb')
        pickle.dump(stroke_dict, dict_file)

	# - smooth data
	# 	for each stroke, get the vector of data, smooth/interpolate it over time, store sampling from smoothed signal in vector
	# - sample at regular intervals (1/30 of total time, etc.) -> input vector X


	num_params = len(data1[0][1][0]) #accelx, accely, etc.
	#num_params = 16 #accelx, accely, etc.

        # re-sample the interpolated spline this many times (25 or so seems ok, since most letters have this many points)


        # build an output array large enough to hold the vectors for each stroke and the (unicode -> int) stroke value (1 elts)
#        output_array = np.zeros((num_strokes, (num_resamplings_2 + num_resamplings) * num_params + 1))
        output_array = np.zeros((num_strokes, (5 * num_resamplings) * num_params + 1))
        print output_array.size

        print filename
        print num_params
        print num_resamplings_2
        print

	for i in range(0, num_strokes):

                # how far?
                if (i % 100 == 0):
                        print float(i)/num_strokes
	
		X_matrix = np.zeros((num_params, num_resamplings * 5)) # the array to store in (using original data and 2 derivs, 2 integrals)

                # the array to store reshaped resampled vector in
		X_2_vector_scaled = np.zeros((num_params, num_resamplings_2)) 

                # the array to store the above 2 concatenated
#		concatenated_X_X_2 = np.zeros((num_params, num_resamplings_2 + num_resamplings)) 
		concatenated_X_X_2 = np.zeros((num_params, num_resamplings * 5)) # the array to store in (using original data and 2 derivs, 2 integrals)

		# for each parameter (accelX, accelY, ...)

                # map the unicode character to int
                curr_stroke_val = stroke_dict[data1[i][0]]
                                        
                #print(len(curr_stroke))
                #print(curr_stroke[0])
                #print(curr_stroke[1])

		curr_data = data1[i][1]

                # fix if too short for interpolation - pad current data with 3 zeros
                if(len(curr_data) <= 3):
                        curr_data = np.concatenate([curr_data, np.zeros((3,num_params))])

		time = np.arange(0, len(curr_data), 1) # the sample 'times' (0 to number of samples)
		time_new = np.arange(0, len(curr_data), float(len(curr_data))/num_resamplings) # the resampled time points

		for j in range(0, num_params): # iterate through parameters

			signal = curr_data[:,j] # one signal (accelx, etc.) to interpolate
			# interpolate the signal using a spline or so, so that arbitrary points can be used 
			# (~30 seems reasonable based on data, for example)
                        
			#tck = interpolate.splrep(time, signal, s=0)  # the interpolation represenation
                        tck = UnivariateSpline(time, signal, s=0)

			# sample the interpolation num_resamplings times to get values
                        # resampled_data = interpolate.splev(time_new, tck, der=0) # the resampled data
                        resampled_data = tck(time_new)

                        # scale data (center, norm)
                        resampled_data = preprocessing.scale(resampled_data)
                        
                        # first integral
                        tck.integral = tck.antiderivative()
                        resampled_data_integral = tck.integral(time_new)

                        # scale data (center, norm)
                        resampled_data_integral = preprocessing.scale(resampled_data_integral)

#.........这里部分代码省略.........
开发者ID:galenwilkerson,项目名称:Handwriting-Recognition-using-acceleration-data,代码行数:103,代码来源:preprocess_w_arc_lengths.py


注:本文中的scipy.interpolate.UnivariateSpline.antiderivative方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。