本文整理汇总了Python中scipy.average函数的典型用法代码示例。如果您正苦于以下问题:Python average函数的具体用法?Python average怎么用?Python average使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了average函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_cluster_distribution
def get_cluster_distribution(g, method = 'average'):
"""
The clustering coefficient distribution grouped by degree. Similar to the histogram shows the possible degree k,
and average/median clustering coefficient of nodes with degree k in graph g.
Parameters:
-----------
g: NetworkX Graph
method: str, ('average', 'median'), (default = 'average')
Returns:
--------
xdata, ydata, a 2-tuple of array, (k, avg_cc(V_k)), where V_k are the nodes with degree k
"""
g = to_undirected(g)
k = nx.clustering(g)
d = g.degree()
ck = defaultdict(list)
for n in g.nodes_iter():
ck[d[n]].append(k[n])
xdata, ydata = list(), list()
if method == 'average':
for x, y in ifilter(lambda x: x[0] > 1 and average(x[1]) > 0, ck.iteritems()):
xdata.append(x)
ydata.append(average(y))
elif method == 'median':
for x, y in ifilter(lambda x: x[0] > 1 and median(x[1]) > 0, ck.iteritems()):
xdata.append(x)
ydata.append(median(y))
else:
raise NameError("method should be 'average' or 'mean'")
xdata = array(xdata)
ydata = array(ydata)
return(xdata, ydata)
示例2: find_homog_trans
def find_homog_trans(points_a, points_b, err_threshold=0, rot_0=None):
"""Finds a homogeneous transformation matrix that, when applied to
the points in points_a, minimizes the squared Euclidean distance
between the transformed points and the corresponding points in
points_b. Both points_a and points_b are (n, 3) arrays.
"""
#Align the centroids of the two point clouds
cent_a = sp.average(points_a, axis=0)
cent_b = sp.average(points_b, axis=0)
points_a = points_a - cent_a
points_b = points_b - cent_b
#Define the error as a function of a rotation vector in R^3
rot_cost = lambda rot: (sp.dot(vec_to_rot(rot), points_a.T).T
- points_b).flatten()**2
#Run the optimization
if rot_0 == None:
rot_0 = sp.zeros(3)
rot = opt.leastsq(rot_cost, rot_0)[0]
#Compute the final homogeneous transformation matrix
homog_1 = sp.eye(4)
homog_1[0:3, 3] = -cent_a
homog_2 = sp.eye(4)
homog_2[0:3,0:3] = vec_to_rot(rot)
homog_3 = sp.eye(4)
homog_3[0:3,3] = cent_b
homog = sp.dot(homog_3, sp.dot(homog_2, homog_1))
return homog, rot
示例3: calc_velocity
def calc_velocity(vol_flow, side):
r"""Calculates the velocity field for a rate BC"""
#
x_vel = 0.0
z_vel = 0.0
avg_fact = namespace.sim_params['avg_fact']
#
if side == 'top':
avg_b = sp.average(map_data_field.data_map[-1, :])
axis_len = avg_fact * len(map_data_field.data_map[-1, :])
z_vel = vol_flow/(avg_b * axis_len)
elif side == 'bottom':
vol_flow = -vol_flow
avg_b = sp.average(map_data_field.data_map[0, :])
axis_len = avg_fact * len(map_data_field.data_map[0, :])
z_vel = vol_flow/(avg_b * axis_len)
elif side == 'left':
vol_flow = -vol_flow
avg_b = sp.average(map_data_field.data_map[:, 0])
axis_len = avg_fact * len(map_data_field.data_map[:, 0])
x_vel = vol_flow/(avg_b * axis_len)
elif side == 'right':
avg_b = sp.average(map_data_field.data_map[:, -1])
axis_len = avg_fact * len(map_data_field.data_map[:, -1])
x_vel = vol_flow/(avg_b * axis_len)
else:
raise ValueError('Invalid side given: '+side)
#
return 'uniform ({} 0.0 {})'.format(x_vel, z_vel)
示例4: standardize_vacuum_quadratures
def standardize_vacuum_quadratures(args, h5):
vacuum_quadratures = h5["vacuum_quadratures"][:]
corrected_vacuum = correct_intrastep_drift(vacuum_quadratures)
create_dataset(args, h5,
"corrected_vacuum_quadratures", data=corrected_vacuum)
mean = average(corrected_vacuum, axis=1)
centered_vacuum = corrected_vacuum - mean[:, None]
create_dataset(args, h5,
"centered_vacuum_quadratures", data=centered_vacuum)
return average(std(centered_vacuum, axis=1))
示例5: update
def update(self):
""" former set image data."""
"""
has to check: -is average? -is dFF? -flag to show? -only one?
average behaviour: take all that are active, average and overlay
dFF behaviour: if multiple channels are active, the dFF are over
layed and colored according to their channel
if only one channel is active:
raw is in grayscale, dFF is in glow color map
"""
### for implementation of global lut mod
# current_lut = self.LUTwidgets.currentIndex()
# work only on those that are active
for n in range(self.data.nFiles):
if self.Options.view['show_flags'][n] == False: # hide inactive
self.ImageItems[n].hide()
self.ImageItems_dFF[n].hide()
if self.Options.view['show_flags'][n] == True: # work only on those that are active
if self.Options.view['show_dFF']: # when showing dFF
if self.Options.view['show_monochrome']: # when in mono glow mode
self.ImageItems[n].show()
else:
self.ImageItems[n].hide()
if self.Options.view['show_avg']: # when showing avg
self.ImageItems_dFF[n].setImage(sp.average(self.data.dFF[:,:,:,n],axis=2))
self.ImageItems[n].setImage(sp.average(self.data.raw[:,:,:,n],axis=2))
else:
self.ImageItems_dFF[n].setImage(self.data.dFF[:,:,self.frame,n])
self.ImageItems[n].setImage(self.data.raw[:,:,self.frame,n])
self.ImageItems_dFF[n].show()
else: # when showing raw
self.ImageItems_dFF[n].hide() # no dFF
if self.Options.view['show_avg']:
self.ImageItems[n].setImage(sp.average(self.data.raw[:,:,:,n],axis=2))
else:
self.ImageItems[n].setImage(self.data.raw[:,:,self.frame,n])
self.ImageItems[n].show()
self.ImageItems[n].setLevels(self.Data_Display.LUT_Controlers.raw_levels[n])
self.ImageItems_dFF[n].setLevels(self.Data_Display.LUT_Controlers.dFF_levels[n])
pass
示例6: update_frame
def update_frame(self):
for ind in self.active_inds:
if self.Main.Options.view['show_avg']:
self.ImageItems_dFF[ind].setImage(sp.average(self.Main.Data.dFF[:,:,:,ind],axis=2))
self.ImageItems[ind].setImage(sp.average(self.Main.Data.raw[:,:,:,ind],axis=2))
else:
self.ImageItems_dFF[ind].setImage(self.Main.Data.dFF[:,:,self.frame,ind])
self.ImageItems[ind].setImage(self.Main.Data.raw[:,:,self.frame,ind])
self.update_levels()
pass
示例7: against_the_field
def against_the_field(self):
wins = scipy.zeros((len(self.realTeams), len(self.weekly['OP'])))
for (i, t) in enumerate(self.realTeams):
for (j, w) in enumerate(self.weekly['PTS FOR']):
for t2 in [el for el in self.realTeams if el != t]:
wins[i, j] += int(self.dataDic[t][w] > self.dataDic[t2][w]) if self.dataDic[t][w] else 0.0
wins[i, j] += .5 * int(self.dataDic[t][w] == self.dataDic[t2][w]) if self.dataDic[t][w] else 0.0
losses = 11. - wins
return scipy.average(wins, axis=1), scipy.std(wins, axis=1), scipy.average(losses, axis=1), scipy.std(losses, axis=1)
示例8: get_network_reading
def get_network_reading(self):
# Update the readings for all nodes
self.update_all_readings()
# Get the current readings from all nodes
node_readings = []
for node_name in self.nodes:
node_readings.append(self.nodes[node_name].stable_field_prediction)
#node_readings = np.array(node_readings)
network_map = np.full((25,25), 0)
network_confidence = np.zeros((25,25))
# Go through each cell and get values from node predictions
for x_index in range(25):
for y_index in range(25):
cell_vals = []
index = (x_index, y_index)
for plane in node_readings:
# Get the value
val = plane[index]
if not np.isnan(val):
cell_vals.append(val)
#if x_index == 13 and y_index == 13:
# print cell_vals
if not np.isnan(scipy.average(np.array(cell_vals))):
network_map[index] = scipy.average(np.array(cell_vals))
network_confidence[index] = scipy.std(np.array(cell_vals))
else:
network_map[index] = 0
network_confidence[index] = 0
# Get the average
#network_avg = scipy.average(node_readings)
# Get the standard deviation
#network_std = scipy.std(node_readings)
return network_map, network_confidence
示例9: react_xy
def react_xy(self, rolling_av=False, toprint=True):
if rolling_av:
weights = scipy.exp((-1.*(scipy.arange(self.rolling,0,-1.)/self.rolling)**2)/2.)
xd = scipy.average(self.xdrift[(-1*self.rolling):],weights=weights)
yd = scipy.average(self.ydrift[(-1*self.rolling):],weights=weights)
else:
xd = self.xdrift[-1]
yd = self.ydrift[-1]
if len(self.xdrift)>1:
last_slope_1_x = self.xdrift[-1] - self.xdrift[-2]
last_slope_1_y = self.ydrift[-1] - self.ydrift[-2]
integrated_diff_x = scipy.sum(self.xdrift)
integrated_diff_y = scipy.sum(self.ydrift)
move_x = xd * self.micronperpixel_x
move_y = -1*yd * self.micronperpixel_y
if not self.use_marz:
last_x = self.piezo.getPosition(1)
last_y = self.piezo.getPosition(2)
if (not self.movedLastTime[-1]) or (not self.move_every_other):
if (abs(xd) > self.xythreshold_pixels) and (not self.no_xy):
if self.use_marz:
if toprint:
print "Moving x:", move_x
self.xystage.goRelative(move_x,0)
self.movedx.append(move_x)
else:
if toprint:
print "Moving x:", move_x
self.piezo.moveTo(1, last_x+move_x, waitForConvergence=False)
self.movedx.append(move_x)
else:
self.movedx.append(0)
if (abs(yd) > self.xythreshold_pixels) and (not self.no_xy):
if self.use_marz:
if toprint:
print "Moving y:", move_y
self.xystage.goRelative(0,move_y)
self.movedy.append(move_y)
else:
if toprint:
print "Moving y:", move_y
self.piezo.moveTo(2, last_y+move_y, waitForConvergence=False)
self.movedx.append(move_y)
else:
self.movedy.append(0)
示例10: locate
def locate(self, P1, P2, C):
pointlist = []
for i, testfunc in enumerate(self.testfuncs):
if self.flagfuncs[i] == iszero:
for ind in range(testfunc.m):
X, V = testfunc.findzero(P1, P2, ind)
pointlist.append((X,V))
X = array(average([point[0] for point in pointlist]))
V = array(average([point[1] for point in pointlist]))
C.Corrector(X,V)
return X, V
示例11: main
def main(argv=None):
global args
parser=argparse.ArgumentParser(description="Compute various statistics related to sequences sets or individual sequences; either in the provided fasta files or for the sequences piped in")
# parser.add_argument('infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument('-p',dest="pretty",action="store_true",help="Pretty print using PrettyTable module")
parser.add_argument('-i',dest="individual",action="store_true",help="Display statistics for each individual sequences")
parser.add_argument('-d',dest="delimiter",help="Colum separator for output, default to whitespace",default=" ")
parser.add_argument('-t',dest="min_length",help="Minimun length threshold to filter fasta file",default=0,type=int)
parser.add_argument('-r',dest="reference_length",help="(Not yet implemented)Reference length used to compute corrected Nx values",default=0)
parser.add_argument('-o', nargs='?', type=argparse.FileType('w'), default=sys.stdout,dest="outfile")
parser.add_argument('FASTAFILE',action='append',nargs="+",help='List of fasta files to keep. Use "*" to keep them all')
args=parser.parse_args()
all_records=[]
FASTAFILE=args.FASTAFILE[0]
if args.pretty:
import prettytable
for f in FASTAFILE:
for record in SeqIO.parse(f, "fasta", generic_dna):
if len(record.seq)<=args.min_length:
continue
all_records.append(SequenceStat(f,record))
if args.individual:
process_individual_sequences(all_records)
return 0
# Display summary statistics per file
sequences_per_files=collections.defaultdict(list)
for s in all_records:
sequences_per_files[s.file].append(s)
if args.pretty:
table=prettytable.PrettyTable(["File","#Seqs","Avg GC","Avg Length(kb)", "Quant","Sum Length(kb)","N50(kb)","L50"])
table.align["File"] = "l"
for file,seqs in sequences_per_files.items():
lengths=[x.length for x in seqs]
table.add_row([file,len(seqs),round(scipy.average([x.gc for x in seqs]),2),\
round(scipy.average(lengths)/1000,2),mquantiles(lengths),round(sum(lengths)/1000,2),round(N50.N50(lengths)/1000,2),N50.L50(lengths)])
print >>args.outfile,table.get_string(sortby="N50(kb)")
else:
for file,seqs in sequences_per_files.items():
lengths=[x.length for x in seqs]
print >>args.outfile," ".join(map(str,[\
file,len(seqs),scipy.average([x.gc for x in seqs]),\
scipy.average(lengths),sum(lengths),N50.N50(lengths),N50.L50(lengths)
]))
示例12: suppressFire_callback
def suppressFire_callback(channel):
x,y = float('nan'),float('nan')
while np.isnan(x) or np.isnan(y):
FireImage = abs(average(ImQueue[-1],-1) - average(ImQueue[0],-1))
x,y = findFire(FireImage)
fo = '-'.join(map(str, datetime.now().timetuple()[:6]))
misc.imsave('fire'+fo+'.bmp',FireImage)
xdivtmp, ydivtmp = xdivs[:], ydivs[:]
bisect.insort(xdivtmp,x) # Insert the fire coordinates into the protection grid
bisect.insort(ydivtmp,y)
xzone = xdivtmp.index(x) - 1 # Find the grid coordinates
yzone = ydivtmp.index(y) - 1
del xdivtmp, ydivtmp
firePorts((xzone,yzone))
print 'Fire at (%.2f,%.2f) in zone %d,%d\nFiring ports %d & %d' % ((x,y,xzone,yzone,) + fireDict[(xzone,yzone)])
示例13: center_on_cos
def center_on_cos(raw_quadratures, phi0=None, omega=None, snap_omega=False):
mean = scipy.average(raw_quadratures, axis=1)
no_angles, no_pulses = raw_quadratures.shape
model = Model(cos_model)
offset, amplitude, phi0, omega = guess_initial_parameters(mean, phi0, omega)
model.set_param_hint("offset", value=offset)
model.set_param_hint("amplitude", min=0., value=amplitude)
model.set_param_hint("phi0", value=phi0)
model.set_param_hint("omega", min=0., value=omega)
model.make_params(verbose=False)
steps = scipy.arange(no_angles)
res = model.fit(mean, x=steps, verbose=False)
omega_param = res.params["omega"]
if snap_omega:
appx_omega = float(omega_param)
no_pi_intervals = int(round(pi/appx_omega))
omega = pi/no_pi_intervals
omega_param.set(omega, vary=False)
res.fit(mean, x=steps, verbose=False)
d_value, p_value_ks = kstest(res.residual, 'norm')
mean_fit = res.eval(x=steps)
offset = mean-mean_fit
aligned_quadratures = raw_quadratures - offset[:,None]
centered_quadratures = aligned_quadratures - float(res.params["offset"])
return (centered_quadratures,
float(omega_param), float(res.params["phi0"]), p_value_ks)
示例14: print_all_stats
def print_all_stats(ctx, series):
ftime = get_ftime(series)
start = 0
end = ctx.interval
print('start-time, samples, min, avg, median, 90%, 95%, 99%, max')
while (start < ftime): # for each time interval
end = ftime if ftime < end else end
sample_arrays = [ s.get_samples(start, end) for s in series ]
samplevalue_arrays = []
for sample_array in sample_arrays:
samplevalue_arrays.append(
[ sample.value for sample in sample_array ] )
#print('samplevalue_arrays len: %d' % len(samplevalue_arrays))
#print('samplevalue_arrays elements len: ' + \
#str(map( lambda l: len(l), samplevalue_arrays)))
# collapse list of lists of sample values into list of sample values
samplevalues = reduce( array_collapser, samplevalue_arrays, [] )
#print('samplevalues: ' + str(sorted(samplevalues)))
# compute all stats and print them
myarray = scipy.fromiter(samplevalues, float)
mymin = scipy.amin(myarray)
myavg = scipy.average(myarray)
mymedian = scipy.median(myarray)
my90th = scipy.percentile(myarray, 90)
my95th = scipy.percentile(myarray, 95)
my99th = scipy.percentile(myarray, 99)
mymax = scipy.amax(myarray)
print( '%f, %d, %f, %f, %f, %f, %f, %f, %f' % (
start, len(samplevalues),
mymin, myavg, mymedian, my90th, my95th, my99th, mymax))
# advance to next interval
start += ctx.interval
end += ctx.interval
示例15: clutch_performance
def clutch_performance(self):
"""Record against the field in the playoffs only"""
playoffs = [el for el in self.weekly['OP'] if 'PLAYOFFS' in el]
wins = scipy.zeros((len(self.realTeams), len(playoffs)))
for (i, t) in enumerate(self.realTeams):
for (j, w) in enumerate(playoffs):
for t2 in [el for el in self.realTeams if el != t]:
wins[i, j] += int(self.dataDic[t][w] > self.dataDic[t2][w]) if self.dataDic[t][w] else 0.0
wins[i, j] += .5 * int(self.dataDic[t][w] == self.dataDic[t2][w]) if self.dataDic[t][w] else 0.0
losses = 11. - wins
return (scipy.average(wins, axis=1),
scipy.std(wins, axis=1),
scipy.average(losses, axis=1),
scipy.std(losses, axis=1))