本文整理汇总了Python中numpy.gradient函数的典型用法代码示例。如果您正苦于以下问题:Python gradient函数的具体用法?Python gradient怎么用?Python gradient使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了gradient函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_1d_gaussian_slope_error
def test_1d_gaussian_slope_error(self):
mirror_length=200.0
step=1.0
rms_slopes=1.3e-7
rms_heights = 1e-7
correlation_length = 10.0
x, f = simulate_profile_1D_gaussian(step=step, \
mirror_length=mirror_length, \
rms_heights=rms_heights, \
correlation_length=correlation_length,\
renormalize_to_slopes_sd=None)
slopes = numpy.gradient(f, x[1]-x[0])
print("test_1d_gaussian: test function: %s, Stdev (not normalized): HEIGHTS=%g.SLOPES=%g"%("test_1d_gaussian_slope_error",f.std(),slopes.std()))
x, f = simulate_profile_1D_gaussian(step=step, \
mirror_length=mirror_length, \
rms_heights=rms_heights, \
correlation_length=correlation_length,\
renormalize_to_slopes_sd=rms_slopes)
slopes = numpy.gradient(f, x[1]-x[0])
print("test_1d_gaussian: test function: %s, SLOPES Stdev (normalized to %g)=%g"%("test_1d_gaussian_slope_error",rms_slopes,slopes.std()))
assert numpy.abs( rms_slopes - slopes.std() ) < 0.01 * numpy.abs(rms_slopes)
if do_plot:
from srxraylib.plot.gol import plot
plot(x,slopes,title="test_1d_gaussian_slope_error",xtitle="Y",ytitle="slopes Z'")
示例2: spline_do
def spline_do(x, y, ax, k=3, s=7, n=1, diff=True, plot=True, error=False):
'''
Finds spline with degree k and smoothing factor s for x and y.
Returns ys,d_ys: the y values of the spline and the derivative.
'''
# xs = np.array(x).astype(float)
s = UnivariateSpline(x, y, k=k, s=s)
xs = np.linspace(x.values.min(), x.values.max(), len(x))
ys = s(xs)
if plot:
if not error:
ax.plot(x, norm(y), '.', label='Data')
ax.plot(xs, norm(ys), label='Spline fit')
if error:
ax.errorbar(x, norm(y), yerr=np.sqrt(norm(y)))
d_ys = np.gradient(ys)
dd_ys = np.gradient(d_ys)
if diff:
if plot:
ax.plot(xs, norm(d_ys), '-.', label='1st derivative')
#ax.plot(xs, norm(dd_ys),'--',label='2nd derivative')
ax.plot(xs, norm(smooth(dd_ys, 81)),
'--', label='2nd derivative (smothed)')
if plot:
ax.legend(loc=0)
return xs, ys, d_ys, dd_ys
示例3: get_beta_deff
def get_beta_deff(savepath, filename):
filepath = os.path.join(savepath,filename)
with open(filepath,'r') as f1:
#col1 = [] #typically mean x-position
#col2 = [] #typically mean (x-position)^2
col3 = [] #typically MSD-x as calculated from the above quantities
for line in f1:
ls = line.split()
#Analytical data output is currently set at 3 cols.
#Make sure you are using the correct data.
#col1 += [float(ls[0])]
#col2 += [float(ls[1])]
col3 += [float(ls[2])]
times = np.array(range(1, len(col3) + 1)) #create an array of times
msd_x = np.array(col3) #create an array of msd_x
#For effective diffusion:
deff = np.true_divide(msd_x, 2*times)
#For beta(t) plot.
log_times = np.log10(times)
log_msd_x = np.log10(msd_x)
dt = np.gradient(log_times)
dd = np.gradient(log_msd_x, dt)
return times, deff, log_times, dd, msd_x
示例4: vorticity
def vorticity(x, y, u, v, coord_type='geographic'):
"""
USAGE
-----
zeta = vorticity(x, y, u, v, coord_type='geographic')
Calculates the vertical component 'zeta' (dv/dx - du/dy, in 1/s) of the
relative vorticity vector from the 'u' and 'v' velocity arrays (in m/s)
specified in spherical coordinates by the 'lon' and 'lat' 2D meshgrid-type
arrays (in degrees).
"""
x, y, u, v = map(np.array, (x, y, u, v))
if coord_type=='geographic':
dx, dy = deg2m_dist(lon, lat)
elif coord_type=='cartesian':
dy, _ = np.gradient(y)
_, dx = np.gradient(x)
elif coord_type=='dxdy':
dx, dy = x, y
pass
duy, _ = np.gradient(u)
_, dvx = np.gradient(v)
dvdx = dvx/dx
dudy = duy/dy
vrt = dvdx - dudy # [1/s]
return vrt
示例5: airfoil
def airfoil(self):
"""Biconvex airfoil"""
L = self.L
N = self.N
R = 210
theta_arc = np.arcsin(L/2 / R) * 2
pi = np.pi
theta_up = np.linspace((pi+theta_arc) / 2,
+(pi-theta_arc) / 2, N)
#theta_up = theta[::-1]
theta_down =np.linspace((3*pi-theta_arc) / 2,
+(3*pi+theta_arc) / 2, N)
X_up = R * np.cos(theta_up)
Y_up = R * np.sin(theta_up)
X_down = R * np.cos(theta_down)
Y_down = R * np.sin(theta_down)
shift_r = X_up[0]
X_up -= shift_r
X_down -= shift_r
shift_up = Y_up[0]
shift_down = Y_down[0]
Y_up = Y_up - shift_up
Y_down = Y_down - shift_down
X = np.concatenate((X_up, X_down))
Y = np.concatenate((Y_up, Y_down))
slope_up = np.gradient(Y_up,1)/np.gradient(X_up,1)
slope_down = np.gradient(Y_down,1)/np.gradient(X_down,1)
angle = np.arctan(np.concatenate((slope_up, slope_down)))
return X, Y, angle
示例6: extractAllDescriptors
def extractAllDescriptors(signal):
"""
Extracts the descriptors expected for the analysis of a given audio file.
"""
described = {}
described['Silence'] = _silence = silence(signal)
signal = signal[config.hopSize * _silence[0]:config.hopSize * _silence[1]] / np.max(
signal) # Tomo solo la parte con sonido y promedio para que todas las señales sean parejas.
described['mfcc'] = mfccs(signal)
described['Inharmonicity'] = inharmonicity_tesis(signal)
described['Energy'] = energy(signal)
described['LogAttackTime'] = log_attack_time(signal)
described['Standard-Dev'] = standard_dev(signal)
described['Variance'] = variance(signal)
described['Skewness'] = skewness(signal)
described['kurtosis'] = kurtosis(signal)
# described['mfcc-1st'] = np.gradient(described['mfcc'])[1]
# described['mfcc-2nd'] = np.gradient(described['mfcc-1st'])[1]
described['Inharmonicity-1st'] = np.gradient(described['Inharmonicity'])
described['Inharmonicity-2nd'] = np.gradient(described['Inharmonicity-1st'])
described['mfcc-Std-f'], described['mfcc-Var-f'], described['mfcc-Skew-f'], described['mfcc-Kurt-f']\
= mfcc_std_frequency(described)
return described
示例7: read_HRRS_data
def read_HRRS_data(ff):
"""
Read in a .dat file from SPARC high-res radiosonde data
Input ff is a string pointing to the full path of the desired file.
"""
# here is a dict that gives bad values for different columns
# alert: this is still incomplete
badvals = {'Temp':['999.0'],'Alt':['99.0','99999.0'],'Lat':['999.000'],'Lon':['9999.000']}
D= pd.read_csv(ff,skiprows=13,error_bad_lines=False,delim_whitespace=True,na_values=badvals)
colnames=list(D.columns.values)
# kick out the first two rows - they hold units and symbols
D.drop(D.index[[0,1]], inplace=True)
# also make sure that lat, lon, pressure, altitude, and temp are numerics
vars_to_float = ['Press','Temp','Lat','Lon','Alt']
D[vars_to_float] = D[vars_to_float].astype(float)
# compute the vertical gradient of potential temp and, from that, buoyancy frequency
P0=1000.0
Rd = 286.9968933 # Gas constant for dry air J/degree/kg
g = 9.80616 # Acceleration due to gravity m/s^2
cp = 1005.0 # heat capacity at constant pressure m^2/s^2*K
theta=(D['Temp']+273.15)*(P0/D['Press'])**(Rd/cp) # note that this includes conversion of Celsius to Kelvin
dZ = np.gradient(D['Alt'])
dthetadZ = np.gradient(theta,dZ)
D["N2"]=(g/theta)*dthetadZ
return(D)
示例8: exportGradientImage
def exportGradientImage(sigma=3.0):
x, y = 5000, 3500
size = 2000
topleft = (x, y)
bottomright = (x+size, y+size)
image = getImageByName(imagefilename='../resources/images/registered-to-2008-07-24-09_55.tif',
topleft=topleft,
bottomright=bottomright)
smooth = vigra.filters.gaussianSmoothing(image, sigma)
smoothswap = smooth.swapaxes(0, 1)
m, n = vigra.Image((2000,2000)), vigra.Image((2000,2000))
for i in range(size):
grad = np.gradient(smooth[i])
for j in range(len(grad)):
m[i][j] = grad[j]
for i in range(size):
grad = np.gradient(smoothswap[i])
for j in range(len(grad)):
n[j][i] = grad[j]
out = m + n
vigra.impex.writeImage(vigra.colors.linearRangeMapping(out), '/home/max/Desktop/out.png')
vigra.impex.writeImage(vigra.colors.linearRangeMapping(m), '/home/max/Desktop/m.png')
vigra.impex.writeImage(vigra.colors.linearRangeMapping(n), '/home/max/Desktop/n.png')
return smooth
示例9: _threshold_gradient
def _threshold_gradient(im):
"""Indicate pixel locations with gradient below the bottom 10th percentile
Parameters
----------
im : array
The mean intensity images for each channel.
Size: (num_channels, num_rows, num_columns).
Returns
-------
array
Binary values indicating whether the magnitude of the gradient is below
the 10th percentile. Same size as im.
"""
if im.shape[0] > 1:
# Calculate directional relative derivatives
_, g_x, g_y = np.gradient(np.log(im))
else:
# Calculate directional relative derivatives
g_x, g_y = np.gradient(np.log(im[0]))
g_x = g_x.reshape([1, g_x.shape[0], g_x.shape[1]])
g_y = g_y.reshape([1, g_y.shape[0], g_y.shape[1]])
gradient_magnitudes = np.sqrt((g_x ** 2) + (g_y ** 2))
below_threshold = []
for chan in gradient_magnitudes:
threshold = mquantiles(chan[np.isfinite(chan)].flatten(), [0.1])[0]
below_threshold.append(chan < threshold)
return np.array(below_threshold)
示例10: rect_guess
def rect_guess(self, data, x=None, **kwargs):
if x is None:
return
ymin, ymax = min(data), max(data)
xmin, xmax = min(x), max(x)
ntest = min(2, len(data)/5)
step_up = (data[:ntest].mean() > data[-ntest:].mean())
dydx = savitzky_golay(np.gradient(data)/np.gradient(x), 5, 2)
cen1 = x[np.where(dydx==dydx.max())][0]
cen2 = x[np.where(dydx==dydx.min())][0]
if step_up:
center1 = cen1 # + (xmax+xmin)/4.0)/2.
center2 = cen2 # + 3*(xmax+xmin)/4.0)/2.
else:
center1 = cen2 # + (xmax+xmin)/4.0)/2.0
center2 = cen1 # + 3*(xmax+xmin)/4.0)/2.0
pars = self.make_params(amplitude=(ymax-ymin),
center1=center1, center2=center2)
pars['%ssigma1' % self.prefix].set(value=(xmax-xmin)/5.0, min=0.0)
pars['%ssigma2' % self.prefix].set(value=(xmax-xmin)/5.0, min=0.0)
return update_param_vals(pars, self.prefix, **kwargs)
示例11: _optimize_num_overlap_pixels
def _optimize_num_overlap_pixels(data):
"""
"""
num_projection, num_slices, num_pixels = data.shape
if num_projection % 2 != 0: # if odd
img_first_half = np.squeeze(data[1:num_projection/2 + 1, num_slices/2, :])
img_second_half = np.squeeze(data[num_projection/2:num_projection - 1, num_slices/2, :])
else:
img_first_half = np.squeeze(data[1:num_projection/2 + 1, num_slices/2, :])
img_second_half = np.squeeze(data[num_projection/2:num_projection, num_slices/2, :])
ind = range(0, num_pixels)[::-1]
img_second_half = img_second_half[:, ind]
img_first_half = ndimage.filters.gaussian_filter(img_first_half, sigma=2)
img_second_half = ndimage.filters.gaussian_filter(img_second_half, sigma=2)
gx1, gy1 = np.gradient(img_first_half)
gx2, gy2 = np.gradient(img_second_half)
img_first_half = np.power(gx1, 2) + np.power(gy1, 2)
img_second_half = np.power(gx2, 2) + np.power(gy2, 2)
img1 = np.fft.fft(img_first_half)
img2 = np.fft.fft(img_second_half)
tmp = np.real(np.fft.ifft(np.multiply(np.conj(img2), img1)))
return np.argmax(np.sum(np.abs(tmp), axis=0))
示例12: propagate_tie
def propagate_tie(mu, delta, pixel_size, dist):
"""
Propagate emitting x-ray wave based on Transport of Intensity.
Parameters
----------
mu : ndarray, optional
3D tomographic data for attenuation.
delta : ndarray
3D tomographic data for refractive index.
pixel_size : float
Detector pixel size in cm.
dist : float
Propagation distance of the wavefront in cm.
Returns
-------
ndarray
3D propagated tomographic intensity.
"""
i1 = np.exp(-mu)
i2 = np.zeros(delta.shape)
for m in range(delta.shape[0]):
dx, dy = np.gradient(delta[m], pixel_size)
d2x, _ = np.gradient(i1[m] * dx, pixel_size)
_, d2y = np.gradient(i1[m] * dy, pixel_size)
i2[m] = i1[m] + dist * (d2x + d2y)
return i2
示例13: deblurImage
def deblurImage(blurImg,iteration,mode=0):
niter=0
edge=getEdge(blurImg*255,mode)
stDesv=np.std(edge)
grady, gradx=np.gradient((blurImg*255))
deblurImg=np.copy(blurImg)
normalizar=False
#print "Gradiente antes", np.sum(gradx+grady)
desv=np.std(deblurImg)
extraGain=1.0
while(niter<iteration):
desv=np.std(deblurImg)
#Dprint "Desviacion estandar borrosa", desv
for j in range(deblurImg.shape[0]-1):
for k in range(deblurImg.shape[1]-1):
gain=gradx[j,k]*stDesv+grady[j,k]*stDesv
if(gain<extraGain):
extraGain=gain
deblurImg[j,k]=deblurImg[j,k]+gain
if(deblurImg[j,k]<0.0 or deblurImg[j,k]>255.0):
normalizar=True
deblurImg=extraGain/10.0+deblurImg
if normalizar:
normalize(deblurImg)
edge=getEdge(deblurImg,mode)
stDesv=np.std(edge)
niter=niter+1
gradx2, grady2=np.gradient(deblurImg)
return deblurImg
示例14: get_quantum_driving_parameters
def get_quantum_driving_parameters(self):
"""Return the adapted parameters (eps_prime, delta, theta_prime) to
obtain adiabatic dynamics for arbitrary length.
"""
eps, delta = self.get_cycle_parameters()
eps_dot, delta_dot = [np.gradient(x, self.dt) for x in eps, delta]
mixing_angle_dot = 2.*np.abs(self.B0)*(delta*eps_dot-delta_dot*eps)
mixing_angle_dot /= (delta**2 + 4.*np.abs(self.B0)**2*eps**2)
self.mixing_angle_dot = mixing_angle_dot
self.mixing_angle = np.arctan(2.*np.abs(self.B0)*eps/delta)
self.mixing_angle_dot_alt = np.gradient(self.mixing_angle, self.dt)
theta_prime = -2.*np.arctan2(mixing_angle_dot, (2*np.abs(self.B0)*eps))
B_prime = (-1j * (np.exp(1j*theta_prime) + 1.) * np.pi**2 /
self.W**3 / np.sqrt(self.k0*self.k1))
eps_prime = np.sqrt(4.*np.abs(self.B0)**2*eps**2 + mixing_angle_dot**2)
eps_prime /= 2.*np.abs(B_prime)
# avoid divergencies
for n in (0, -1):
eps_prime[n] = 0.0
self.eps_prime = eps_prime
self.delta_prime = delta
self.theta_prime = theta_prime
return eps_prime, delta, theta_prime
示例15: march
def march(x,u_e,nu):
dx = numpy.diff(x)
du_e = numpy.gradient(u_e,numpy.gradient(x))
delta = numpy.full_like(x,0.)
lam = numpy.full_like(x,lam0)
# Initial conditions must be a stagnation point. If u_e[0]>0
# assume stagnation is at x=0 and integrate from x=0..x[0].
if u_e[0]<0.01: # stagnation point
delta[0] = numpy.sqrt(lam0*nu/du_e[0])
elif x[0]>0: # just downstream
delta[0] = numpy.sqrt(lam0*nu*x[0]/u_e[0])
delta[0] += 0.5*x[0]*g_pohl(delta[0],0,u_e,du_e,nu)
lam[0] = delta[0]**2*du_e[0]/nu
else:
raise ValueError('x=0 must be stagnation point')
# march!
for i in range(len(x)-1):
delta[i+1] = heun(g_pohl,delta[i],i,dx[i],
u_e,du_e,nu) # ...additional arguments
lam[i+1] = delta[i+1]**2*du_e[i+1]/nu
if lam[i+1] < -12: i-=1; break # separation condition
return delta,lam,i+1 # return with separation index