本文整理汇总了Python中numpy.diff函数的典型用法代码示例。如果您正苦于以下问题:Python diff函数的具体用法?Python diff怎么用?Python diff使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了diff函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: draw
def draw(self, event):
"""Draw the widget
Parameters
----------
event : instance of Event
The draw event.
"""
super(Console, self).draw(event)
if event is None:
raise RuntimeError('Event cannot be None')
xform = event.get_full_transform()
tr = (event.document_to_framebuffer *
event.framebuffer_to_render)
logical_scale = np.diff(tr.map(([0, 1], [1, 0])), axis=0)[0, :2]
tr = event.document_to_framebuffer
log_to_phy = np.mean(np.diff(tr.map(([0, 1], [1, 0])), axis=0)[0, :2])
n_pix = (self.font_size / 72.) * 92. # num of pixels tall
# The -2 here is because the char_height has a gap built in
font_scale = max(n_pix / float((self._char_height-2)), 1)
self._resize_buffers(font_scale)
self._do_pending_writes()
self._program['u_origin'] = xform.map((0, 0, 0, 1))
self._program.prepare()
self._program['u_logical_scale'] = font_scale * logical_scale
self._program['u_color'] = self.text_color.rgba
self._program['u_physical_scale'] = font_scale * log_to_phy
self._program['a_position'] = self._position
self._program['a_bytes_012'] = VertexBuffer(self._bytes_012)
self._program['a_bytes_345'] = VertexBuffer(self._bytes_345)
set_state(depth_test=False, blend=True,
blend_func=('src_alpha', 'one_minus_src_alpha'))
self._program.draw('points')
示例2: getOmega
def getOmega(dels):
# for k in range(1,dels.delta_d.shape[0])
N = dels.delta_d.shape[1]
delta_t = dels.delta_t
delta_d = dels.delta_d
a_t = np.diff(delta_t)
a_t = a_t[:,0:-1]
a_d = np.diff(delta_t[:,::-1])
a_d = a_d[:,::-1]
a_d = a_d[:,1::]
b_t = np.diff(delta_d)
b_t = b_t[:,0:-1]
b_d = np.diff(delta_d[:,::-1])
b_d = b_d[:,::-1]
b_d = b_d[:,1::]
c_t = 0.25*(np.abs(a_t)+np.abs(b_t))*np.sign(a_t)*np.sign(b_t)*(np.sign(a_t)*np.sign(b_t)-1)
c_d = 0.25*(np.abs(a_d)+np.abs(b_d))*np.sign(a_d)*np.sign(b_d)*(np.sign(a_d)*np.sign(b_d)-1)
Omega = 1.0/(2*N)*(c_t.mean(axis=0) + c_d.mean(axis=0))
return Omega
示例3: compute_metric
def compute_metric(self):
gfloprate = 0
if self.ts.pmc_type == 'amd64' :
gfloprate += self.arc(self.ts.data[0])
if self.ts.pmc_type == 'intel_hsw':
# print "Haswell chips do not have FLOP counters"
return
if self.ts.pmc_type == 'intel_snb':
schema = self.ts.j.get_schema('intel_snb')
if 'ERROR' in schema: return
data = self.ts.j.aggregate_stats('intel_snb')
try:
flops = numpy.diff(data[0][:,schema['SSE_DOUBLE_SCALAR'].index] + 2*data[0][:,schema['SSE_DOUBLE_PACKED'].index] +
4*data[0][:,schema['SIMD_DOUBLE_256'].index])/numpy.diff(self.ts.t)
except:
flops = numpy.diff(data[0][:,schema['SSE_D_ALL'].index] + 4*data[0][:,schema['SIMD_D_256'].index])/numpy.diff(self.ts.t)
flops = flops/data[1]
self.metric = tmean(flops)/1.0e9
return
示例4: sample_line_segment_mm_s
def sample_line_segment_mm_s(start_xy_mm, end_xy_mm, dt_s, mW=None, max_mm=5.0):
""" Given a line segment in mm space, map it to galvo space.
To make the line straight in mm space, samples may be added to
more-closely approximate a straight line.
Returns: An array of shape nx3 (if mW is None) or nx4 (if mW is not None)
of points time deltas in mm and seconds,
excluding start_xy_mm and including end_xy_mm,
possibly including samples along the way.
"""
import FLP
from numpy.linalg import norm
dist_mm = norm(np.asarray(end_xy_mm) - start_xy_mm)
if dist_mm <= max_mm:
if mW is None:
return np.array((tuple(end_xy_mm) + (dt_s,),)) # Just the end sample.
else:
return np.array((tuple(end_xy_mm) + (dt_s, mW),)) # Just the end sample.
samples_s = np.linspace(0, dt_s, np.ceil(dist_mm / max_mm) + 1)
timeRange_s = (0, dt_s)
if mW is None:
return np.transpose([np.interp(samples_s[1:], timeRange_s, (start_xy_mm[0], end_xy_mm[0])),
np.interp(samples_s[1:], timeRange_s, (start_xy_mm[1], end_xy_mm[1])),
np.diff(samples_s)])
else:
return np.transpose([np.interp(samples_s[1:], timeRange_s, (start_xy_mm[0], end_xy_mm[0])),
np.interp(samples_s[1:], timeRange_s, (start_xy_mm[1], end_xy_mm[1])),
np.diff(samples_s),
mW * np.ones_like(samples_s[1:])])
示例5: test_mean_std_12bit
def test_mean_std_12bit(self):
# Input 12-bit, with an 8-bit color target
input_scene = np.tile(np.arange(4096)[:, None, None], (1, 1, 3))
color_target = np.tile(np.arange(256)[:, None, None], (1, 1, 3))
luts = hm.mean_std_luts(input_scene.astype(np.uint16),
color_target.astype(np.uint8))
np.testing.assert_array_equal(luts[0], luts[1])
np.testing.assert_array_equal(luts[1], luts[2])
lut = luts[0]
assert np.all(lut[:8] == 0)
assert np.all(lut[-8:] == 4096)
assert np.diff(lut[8:-8]).min() == 1
assert np.diff(lut[8:-8]).max() == 2
# Input 12-bit, with a 12-bit color target
input_scene = np.tile(np.arange(4096)[:, None, None], (1, 1, 3))
color_target = np.tile(np.arange(4096)[:, None, None], (1, 1, 3))
luts = hm.mean_std_luts(input_scene.astype(np.uint16),
color_target.astype(np.uint16))
# Should be a 1 to 1 look-up-table...
np.testing.assert_array_equal(luts[0], np.arange(4097))
示例6: __init__
def __init__(self,turn,elem,single,name,s,x,xp,y,yp,pc,de,tau,**args):
apc=float(pc[0])*1e9
ade=float(de[0])
self.m0=self.pmass
en=np.sqrt(apc**2+self.pmass**2)
self.e0=en-ade
self.p0c=np.sqrt(self.e0**2-self.m0**2)
# structure
self.elem=np.array(elem,dtype=int)
self.turn=np.array(turn,dtype=int)
d0=np.where(np.diff(self.elem)!=0)[0][0]+1
d1=(np.where(np.diff(self.turn)!=0)[0][0]+1)/d0
d2=len(self.turn)/d1/d0
self.single=np.array(single,dtype=int)
self.name=np.array(name,dtype=str)
self.s =np.array(s ,dtype=float)
self.x =np.array(x ,dtype=float)
self.y =np.array(y ,dtype=float)
self.tau=-np.array(tau,dtype=float)*self.clight
opd=np.array(pc,dtype=float)*(1e9/self.p0c)
self.delta=opd-1
self.pt=np.array(de,dtype=float)/self.p0c
self.px=np.array(xp,dtype=float)*opd
self.py=np.array(yp,dtype=float)*opd
for nn,vv in self.__dict__.items():
if hasattr(vv,'__len__') and len(vv)==d0*d1*d2:
setattr(self,nn,vv.reshape(d2,d1,d0))
示例7: get_resampling_matrix
def get_resampling_matrix(global_grid,local_grid):
"""Build the rectangular matrix that linearly resamples from the global grid to a local grid.
The local grid range must be contained within the global grid range.
Args:
global_grid(numpy.ndarray): Sorted array of n global grid wavelengths.
local_grid(numpy.ndarray): Sorted array of m local grid wavelengths.
Returns:
numpy.ndarray: Array of (m,n) matrix elements that perform the linear resampling.
"""
assert np.all(np.diff(global_grid) > 0),'Global grid is not strictly increasing.'
assert np.all(np.diff(local_grid) > 0),'Local grid is not strictly increasing.'
# Locate each local wavelength in the global grid.
global_index = np.searchsorted(global_grid,local_grid)
assert local_grid[0] >= global_grid[0],'Local grid extends below global grid.'
assert local_grid[-1] <= global_grid[-1],'Local grid extends above global grid.'
# Lookup the global-grid bracketing interval (xlo,xhi) for each local grid point.
# Note that this gives xlo = global_grid[-1] if local_grid[0] == global_grid[0]
# but this is fine since the coefficient of xlo will be zero.
global_xhi = global_grid[global_index]
global_xlo = global_grid[global_index-1]
# Create the rectangular interpolation matrix to return.
alpha = (local_grid - global_xlo)/(global_xhi - global_xlo)
local_index = np.arange(len(local_grid),dtype=int)
matrix = np.zeros((len(local_grid),len(global_grid)))
matrix[local_index,global_index] = alpha
matrix[local_index,global_index-1] = 1 - alpha
return matrix
示例8: create_grid_polygons
def create_grid_polygons(x,y):
"""
Creates a list of grid polygons (rectangles) in well-known text (WKT) format from evenly spaced x and y vectors.
Args:
x (1d numpy array): vector of x-values
y (1d numpy array): vector of y-values
Returns:
list: grid polygons in WKT format
"""
import numpy as np
import pdb
xdiff = np.diff(x)
if np.std(xdiff)>1e-10:
raise ValueError('Uneven longitude spacing.')
dx = np.mean(xdiff)
ydiff = np.diff(y)
if np.std(ydiff)>1e-10:
raise ValueError('Uneven latitude spacing.')
dy = np.mean(ydiff)
logger.debug('Spacing is ({},{})'.format(dx,dy))
xmatr,ymatr = np.meshgrid(x,y)
rows = []
for (i,j),x_ij in np.ndenumerate(xmatr):
y_ij = ymatr[i,j]
x1,y1 = x_ij-dx/2.,y_ij-dy/2.
x2,y2 = x_ij+dx/2.,y_ij+dy/2.
rows.append((i,j,x_ij,y_ij,'POLYGON(({x1} {y1},{x1} {y2},{x2} {y2},{x2} {y1},{x1} {y1}))'.format(x1=x1,y1=y1,x2=x2,y2=y2)))
return rows
示例9: test_power
def test_power():
a = 5. # shape
samples = 10000
s1 = np.random.power(a, samples)
s2 = common.rand_pow_array(a, samples)
plt.figure('power test')
count1, bins1, ignored1 = plt.hist(s1,
bins=30,
label='numpy',
histtype='step')
x = np.linspace(0, 1, 100)
y = a * x**(a - 1.0)
normed_y1 = samples * np.diff(bins1)[0] * y
plt.plot(x, normed_y1, label='numpy.random.power fit')
count2, bins2, ignored2 = plt.hist(s2,
bins=30,
label='joinmarket',
histtype='step')
normed_y2 = samples * np.diff(bins2)[0] * y
plt.plot(x, normed_y2, label='common.rand_pow_array fit')
plt.title('testing power distribution')
plt.legend(loc='upper left')
plt.show()
示例10: xCoordinates
def xCoordinates(sobel_img):
num_rows = float(len(sobel_img)) # get number of x values
# sum along y axis
vert_sum = np.sum(sobel_img,axis=0)
# make it an average value (divide by # of x values)
vert_sum = np.divide(vert_sum,num_rows)
x = np.arange(0,len(vert_sum)) # for graphing
xnew = np.arange(0,len(vert_sum),50) # for smoothing
#smooth
y_smooth = spline(x, vert_sum, xnew)
#make a sin curve 1/3 of the width of image
img_width, img_height = sobel_img.shape
z = np.arange(0,int(img_width/3),1)
def f(x):
return np.sin(x/90)*-15 + 25
f = [f(i) for i in z] # make sine into an array
# convolve sine and the vertical sum
y_conv = np.convolve(vert_sum, f,'same')
# detect local minima
mins = (np.diff(np.sign(np.diff(y_conv))) > 0).nonzero()[0] + 1
return mins
示例11: yCoordinates
def yCoordinates(sobel_img):
num_col = float(len(sobel_img[0])) #number of y values
# sum along x axis
horiz_sum = np.sum(sobel_img, axis=1)
#average value
horiz_sum = np.divide(horiz_sum, num_col)
y = np.arange(0, len(horiz_sum))
ynew = np.arange(0, len(horiz_sum))
x_smooth = spline(y, horiz_sum, ynew)
#make a sin curve 1/3 of the height
img_width, img_height = sobel_img.shape
z = np.arange(0,int(img_height/3),1)
def f(x):
return np.sin(x/90)*-15 + 25
f = [f(i) for i in z] # make sine into an array
# convolve sine and the vertical sum
y_conv = np.convolve(horiz_sum, f,'same')
# detect local minima
mins = (np.diff(np.sign(np.diff(y_conv))) > 0).nonzero()[0] + 1
return mins
示例12: check_obs_scheme
def check_obs_scheme(self):
" Checks the internal validity of provided observation schemes "
# check sub_pops
idx_union = np.sort(self._sub_pops[0])
i = 1
while idx_union.size < self._p and i < len(self._sub_pops):
idx_union = np.union1d(idx_union, self._sub_pops[i])
i += 1
if idx_union.size != self._p or np.any(idx_union!=np.arange(self._p)):
raise Exception(('all subpopulations together have to cover '
'exactly all included observed varibles y_i in y.'
'This is not the case. Change the difinition of '
'subpopulations in variable sub_pops or reduce '
'the number of observed variables p. '
'The union of indices of all subpopulations is'),
idx_union )
# check obs_time
if not self._obs_time[-1]==self._T:
raise Exception(('Entries of obs_time give the respective ends of '
'the periods of observation for any '
'subpopulation. Hence the last entry of obs_time '
'has to be the full recording length. The last '
'entry of obs_time before is '), self._obs_time[-1])
if np.any(np.diff(self._obs_time)<1):
raise Exception(('lengths of observation have to be at least 1. '
'Minimal observation time for a subpopulation: '),
np.min(np.diff(self._obs_time)))
# check obs_pops
if not self._obs_time.size == self._obs_pops.size:
raise Exception(('each entry of obs_pops gives the index of the '
'subpopulation observed up to the respective '
'time given in obs_time. Thus the sizes of the '
'two arrays have to match. They do not. '
'no. of subpop. switch points and no. of '
'subpopulations ovserved up to switch points '
'are '), (self._obs_time.size, self._obs_pops.size))
idx_pops = np.sort(np.unique(self._obs_pops))
if not np.min(idx_pops)==0:
raise Exception(('first subpopulation has to have index 0, but '
'is given the index '), np.min(idx_pops))
elif not idx_pops.size == len(self._sub_pops):
raise Exception(('number of specified subpopulations in variable '
'sub_pops does not meet the number of '
'subpopulations indexed in variable obs_pops. '
'Delete subpopulations that are never observed, '
'or change the observed subpopulations in '
'variable obs_pops accordingly. The number of '
'indexed subpopulations is '),
len(self._sub_pops))
elif not np.all(np.diff(idx_pops)==1):
raise Exception(('subpopulation indices have to be consecutive '
'integers from 0 to the total number of '
'subpopulations. This is not the case. '
'Given subpopulation indices are '),
idx_pops)
示例13: _buildInterp
def _buildInterp(self, x, y, z, pot):
""" Private function to build interpolation arrays using potential
array `pot`. Assumes that only the positive part of z is in the array,
so reflects the array in the (x, y) plane.
"""
self.xmin = x[0]
self.xmax = x[-1]
self.ymin = y[0]
self.ymax = y[-1]
self.zmin = -z[-1]
self.zmax = z[-1]
# Field in negative z direction. Reverse the order in this axis.
potNeg = pot[...,-1:0:-1]
# Concatenate positive and negative z direction arrays.
_z = np.hstack((-z[-1:0:-1], z))
_pot = np.dstack((potNeg, pot))
self.bInterpolator = Interpolator((x, y, _z), _pot)
# Build difference derivative arrays
self.dx = x[1]-x[0]
self.dy = y[1]-y[0]
self.dz = z[1]-z[0]
dbdx = np.diff(_pot, axis=0)/self.dx
dbdy = np.diff(_pot, axis=1)/self.dy
dbdz = np.diff(_pot, axis=2)/self.dz
x_dbdx = x[:-1]+self.dx/2
y_dbdy = y[:-1]+self.dy/2
z_dbdz = _z[:-1]+self.dz/2
self.dBdxInterp = Interpolator((x_dbdx, y, _z), dbdx)
self.dBdyInterp = Interpolator((x, y_dbdy, _z), dbdy)
self.dBdzInterp = Interpolator((x, y, z_dbdz), dbdz)
示例14: get_blotter_pnl
def get_blotter_pnl(order_qty, filled_qty, filled_price, cum_position, data, drawdown):
#import pdb; pdb.set_trace()
mid = midpoint(data)
cash = np.sum(filled_qty * filled_price) * (-1.0)
open_cash = cum_position[-1] * mid[-1]
pnl = cash + open_cash
pnl_t = np.cumsum(cum_position[:-1] * np.diff(mid))
spread = np.cumsum((mid - filled_price) * filled_qty)
pnl_t = spread[1:] + pnl_t
assert abs(pnl - pnl_t[-1]) < 0.01
running_max = np.maximum.accumulate(pnl_t)
idx = np.where(pnl_t - running_max < drawdown)[0]
if len(idx) > 0:
stop_idx = np.min(idx)
cum_position[(stop_idx+1):] = 0.0
pnl_t = np.cumsum(cum_position[:-1] * np.diff(mid))
order_qty[(stop_idx+1):] = 0.0
filled_qty[(stop_idx+1):] = 0.0
spread = np.cumsum((mid - filled_price) * filled_qty)
pnl_t = spread[1:] + pnl_t
order_volume = np.sum(np.abs(order_qty))
trade_volume = np.sum(np.abs(filled_qty))
result = np.array([(pnl_t[-1], np.min(pnl_t), np.max(pnl_t),
np.min(cum_position), np.max(cum_position), trade_volume,
order_volume, trade_volume * 1.0 / order_volume)],
dtype = [('total_pnl', 'f'), ('min_pnl', 'f'),
('max_pnl', 'f'), ('min_position', int),
('max_position', int), ('volume', int),
('order_volume', int), ('fill_ratio', float)])
return result
示例15: zero_crossings
def zero_crossings(y_axis, window = 11):
"""
Algorithm to find zero crossings. Smoothens the curve and finds the
zero-crossings by looking for a sign change.
keyword arguments:
y_axis -- A list containg the signal over which to find zero-crossings
window -- the dimension of the smoothing window; should be an odd integer
(default: 11)
return -- the index for each zero-crossing
"""
# smooth the curve
length = len(y_axis)
x_axis = np.asarray(range(length), int)
# discard tail of smoothed signal
y_axis = _smooth(y_axis, window)[:length]
zero_crossings = np.where(np.diff(np.sign(y_axis)))[0]
indices = [x_axis[index] for index in zero_crossings]
# check if zero-crossings are valid
diff = np.diff(indices)
if diff.std() / diff.mean() > 0.2:
print diff.std() / diff.mean()
print np.diff(indices)
raise(ValueError,
"False zero-crossings found, indicates problem {0} or {1}".format(
"with smoothing window", "problem with offset"))
# check if any zero crossings were found
if len(zero_crossings) < 1:
raise(ValueError, "No zero crossings found")
return indices