本文整理汇总了Python中numpy.average函数的典型用法代码示例。如果您正苦于以下问题:Python average函数的具体用法?Python average怎么用?Python average使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了average函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: mkhist
def mkhist(fname, xmin, xmax, delta, ihist):
xdata = []
if os.path.exists(fname + ".gz"):
import gzip
fp = gzip.open(fname + ".gz")
else:
fp = open(fname)
for line in fp:
time, x = map(float, line.strip().split()[:2])
xdata.append(x)
x = np.array(xdata)
xbins = [xmin + i * delta for i in range(nbin + 1)]
hist[ihist], edges = np.histogram(x, bins=xbins, range=(xmin, xmax))
nb_data[ihist] = int(np.sum(hist[ihist, :]))
print "statistics for timeseries # ", ihist
print "minx:", "%8.3f" % np.min(x), "maxx:", "%8.3f" % np.max(x)
print "average x", "%8.3f" % np.average(x), "rms x", "%8.3f" % np.std(x)
print "statistics for histogram # ", ihist
print int(np.sum(hist[ihist, :])), "points in the histogram"
print "average x", "%8.3f" % (
np.sum([hist[ihist, i] * (edges[i] + edges[i + 1]) / 2 for i in range(nbin)]) / np.sum(hist[ihist])
)
print
var = (
1.0
/ (nblock * (nblock - 1))
* np.sum([np.average((x[k : (k + 1) * (len(x) / nblock)] - np.average(x)) ** 2) for k in range(nblock)])
)
return var
示例2: tabular_td_n_online
def tabular_td_n_online(states, actions, generator_class, generator_args, n, alpha):
gamma = 1
rms_error = np.zeros(100)
for i in range(100):
values = {state: 0 for state in states}
policies = {state: {action: 1.0/len(actions) for action in actions} for state in states}
errors = []
for j in range(10):
episode_states = []
rewards = []
generator = generator_class(*generator_args)
current_state = generator.state
while True:
action, next_state, reward = generator.step(policies, current_state)
episode_states.append(current_state)
rewards.append(reward)
if next_state == None:
break
current_state = next_state
# online returns
for t, state in enumerate(episode_states):
returns = 0
for t_s in range(n):
if t+t_s < len(episode_states):
returns += gamma**t_s*rewards[t+t_s]
if t+n < len(episode_states):
last_episode_value = values[episode_states[t+n]]
else:
last_episode_value = 0
values[state] += alpha*(returns+last_episode_value-values[state])
errors.append(np.average([(values[state]-(state+1)/10.0+1)**2 for state in states])**0.5)
rms_error[i] = np.average(errors)
return np.average(rms_error)
示例3: testEncodeUnrelatedAreas
def testEncodeUnrelatedAreas(self):
"""
assert unrelated areas don"t share bits
(outside of chance collisions)
"""
avgThreshold = 0.3
maxThreshold = 0.12
overlaps = overlapsForUnrelatedAreas(1499, 37, 5)
self.assertLess(np.max(overlaps), maxThreshold)
self.assertLess(np.average(overlaps), avgThreshold)
maxThreshold = 0.12
overlaps = overlapsForUnrelatedAreas(1499, 37, 10)
self.assertLess(np.max(overlaps), maxThreshold)
self.assertLess(np.average(overlaps), avgThreshold)
maxThreshold = 0.17
overlaps = overlapsForUnrelatedAreas(999, 25, 10)
self.assertLess(np.max(overlaps), maxThreshold)
self.assertLess(np.average(overlaps), avgThreshold)
maxThreshold = 0.25
overlaps = overlapsForUnrelatedAreas(499, 13, 10)
self.assertLess(np.max(overlaps), maxThreshold)
self.assertLess(np.average(overlaps), avgThreshold)
示例4: average_data
def average_data(data):
"""
Find mean and std. deviation of data returned by ``simulate``.
"""
numnodes = data['nodes']
its = data['its']
its_mean = numpy.average(its)
its_std = math.sqrt(numpy.var(its))
dead = data['dead']
dead_mean = 100.0*numpy.average(dead)/numnodes
dead_std = 100.0*math.sqrt(numpy.var(dead))/numnodes
immune = data['immune']
immune_mean = 100.0*numpy.average(immune)/numnodes
immune_std = 100.0*math.sqrt(numpy.var(immune))/numnodes
max_contam = data['max_contam']
max_contam_mean = 100.0*numpy.average(max_contam)/numnodes
max_contam_std = 100.0*math.sqrt(numpy.var(max_contam))/numnodes
normal = data['normal']
normal_mean = 100.0*numpy.average(normal)/numnodes
normal_std = 100.0*math.sqrt(numpy.var(normal))/numnodes
return {'its': (its_mean, its_std),
'nodes': numnodes,
'dead': (dead_mean, dead_std),
'immune': (immune_mean, immune_std),
'max_contam': (max_contam_mean, max_contam_std),
'normal': (normal_mean, normal_std)}
示例5: get_reference_pt
def get_reference_pt(self):
# Reference point for a compound object is the average of all
# it's contituents reference points
points = numpy.array([ obj.get_reference_pt() for obj in self.objects ])
t_ = points.T
x, y = numpy.average(t_[0]), numpy.average(t_[1])
return (x, y)
示例6: assign_nearest_nbh
def assign_nearest_nbh(self, query_doc):
block_id, query_words, doc_words = query_doc
query_vector = self.vectorize(query_words)
doc_vector = self.vectorize(doc_words)
#distance = emd(query_vector, doc_vector, self.distance_matrix)
#return block_id, distance
doc_indices = np.nonzero(doc_vector)[0]
query_indices = np.nonzero(query_vector)[0]
query_weights = [self.word_level_idf.get(q_i, 0) for q_i in query_indices]
doc_weights = [self.word_level_idf.get(d_i, 0) for d_i in doc_indices]
doc_centroid = np.average([self.embedding.model[self.reverse_vocab[i]] for i in doc_indices], axis=0,
weights=doc_weights)
query_centroid = np.average([self.embedding.model[self.reverse_vocab[i]] for i in query_indices], axis=0,
weights=query_weights)
# sklearn euclidean distances may not be a symmetric matrix, so taking
# average of the two entries
dist_arr = np.array([[(self.distance_matrix[w_i, q_j] + self.distance_matrix[q_j, w_i]) / 2
for w_i in doc_indices] for q_j in query_indices])
label_assignment = np.argmin(dist_arr, axis=1)
label_assignment = [(index, l) for index, l in enumerate(label_assignment)]
distances = [dist_arr[(i,e)] * self.word_level_idf.get(query_indices[i], 1) for i, e in label_assignment]
distance = (1 - self.alpha) * np.sum(distances) + \
self.alpha * sp.spatial.distance.cosine(doc_centroid,query_centroid)
return block_id, distance
示例7: calc_precision_recall_fmeasure
def calc_precision_recall_fmeasure(self):
""" Computes Precision, Recall, F-measure and Support """
# precision, recall, F-measure and support for each class for a given thresholds
for threshold in [10, 30, 50]:
result = precision_recall_fscore_support(self.y_true, prediction_to_binary(self.y_pred, threshold))
self.scores['Precision ' + str(threshold) + '%'] = result[0]
self.scores['Recall ' + str(threshold) + '%'] = result[1]
self.scores['F-score ' + str(threshold) + '%'] = result[2]
self.scores['Support'] = result[3]
# Computes precision-recall pairs for different probability thresholds
self.precision, self.recall, self.thresholds = precision_recall_curve(self.y_true, self.y_pred)
#print "precision = " + str(precision)
#print "recall = " + str(recall)
#print "thresholds = " + str(thresholds)
# Compute the area under the precision-recall curve (average precision from prediction scores)
self.scores['Precision-Recall AUC'] = average_precision_score(self.y_true, self.y_pred)
self.scores['Weighted Precision'] = average_precision_score(self.y_true, self.y_pred, average='weighted') # weighted average precision by support (the number of true instances for each label).
self.scores['Average Recall'] = np.average(self.recall)
self.scores['Average Threshold'] = np.average(self.thresholds)
return
示例8: direction_var
def direction_var(values, weights):
import numpy
from scitbx import matrix
weights = numpy.array(weights)
valx = numpy.array([x for x, y, z in values])
valy = numpy.array([y for x, y, z in values])
valz = numpy.array([z for x, y, z in values])
# Calculate avergae x, y, z
avrx = numpy.average(valx, weights=weights)
avry = numpy.average(valy, weights=weights)
avrz = numpy.average(valz, weights=weights)
# Calculate mean direction vector
s1m = matrix.col((avrx, avry, avrz)).normalize()
# Calculate angles between vectors
angles = []
for s in values:
angles.append(s1m.angle(s))
# Calculate variance of angles
angles = numpy.array(angles)
var = numpy.dot(weights, (angles)**2)/numpy.sum(weights)
return var
示例9: kMeans
def kMeans(k, centres, data, error, return_cost = False):
# centres (kx2)
# data (Nx2)
# error: epsilon
m = centres[:]
while(True):
sets = [[] for i in range(k)]
for point in data:
# Calculate distance
dist_sq = np.sum((point - m) ** 2, axis = 1)
# Choose the nearest centre and add point into corresponding set
sets[np.argmin(dist_sq)].append(point)
temp_m = m[:]
for i in range(len(sets)):
if sets[i] != []:
temp_m[i] = (np.mean(sets[i], axis = 0)) # centroid
temp_m = np.array(temp_m)
changes = temp_m - m
m = temp_m
if((changes < error).all()):
break
if(return_cost):
costs = []
for i in range(len(sets)):
costs.append(np.average(np.sqrt(np.sum((m[i] - sets[i]) ** 2, axis = 1))))
cost = np.average(costs)
return m, cost
else:
return m
示例10: linearRegression
def linearRegression(segmentedValues):
print("Linear regression")
#regression = LinearRegression()
linRegress = dict()
for key in segmentedValues.keys():
x = [x[0] for x in segmentedValues[key]]
y = [x[1] for x in segmentedValues[key]]
mean = [float(np.average(x)),float(np.average(y))]
valuesDict = dict()
valuesDict['x'] = x
valuesDict['y'] = y
valuesFrame = pd.DataFrame(valuesDict)
try:
rlmRes = sm.rlm(formula = 'y ~ x', data=valuesFrame).fit()
except ZeroDivisionError:
#I have no idea why this occurs. A problem with statsmodel
#Return None
print("divide by zero :( ")
return None
#Caclulate r2_score (unfortunately, rlm does not give this to us)
x = np.array(x)
y = np.array(y)
#Get the predicted values of Y
y_pred = x*rlmRes.params.x+rlmRes.params.Intercept
score = r2_score(y, y_pred)
#These should both be positive -- put in abs anyway
slopeConfInterval = abs(float(rlmRes.params.x) - float(rlmRes.conf_int(.005)[0].x))
intConfInterval = abs(float(rlmRes.params.Intercept) - float(rlmRes.conf_int(.005)[0].Intercept))
#Slope, Intercept, R^2, num of values, confidenceIntervals, mean of cluster
linRegress[key] = [rlmRes.params.x, rlmRes.params.Intercept, score, len(x), [slopeConfInterval, intConfInterval], mean]
print("Key: "+str(key)+" Slope: "+str(rlmRes.params.x)+" Intercept: "+str(rlmRes.params.Intercept)+"R2 Score: "+str(score)+" Num vals: "+str(len(x))+" confidence: "+str(slopeConfInterval)+", "+str(intConfInterval)+" mean: "+str(mean))
return linRegress
示例11: randomized_auto_const_bg
def randomized_auto_const_bg(self, amount):
""" Automatically determine background. Only consider a randomly
chosen subset of the image.
Parameters
----------
amount : int
Size of random sample that is considered for calculation of
the background.
"""
cols = [randint(0, self.shape[1] - 1) for _ in xrange(amount)]
# pylint: disable=E1101,E1103
data = self.astype(to_signed(self.dtype))
# Subtract average value from every frequency channel.
tmp = (data - np.average(self, 1).reshape(self.shape[0], 1))
# Get standard deviation at every point of time.
# Need to convert because otherwise this class's __getitem__
# is used which assumes two-dimensionality.
tmp = tmp[:, cols]
sdevs = np.asarray(np.std(tmp, 0))
# Get indices of values with lowest standard deviation.
cand = sorted(xrange(amount), key=lambda y: sdevs[y])
# Only consider the best 5 %.
realcand = cand[:max(1, int(0.05 * len(cand)))]
# Average the best 5 %
bg = np.average(self[:, [cols[r] for r in realcand]], 1)
return bg.reshape(self.shape[0], 1)
示例12: genstats
def genstats():
# returns a list of dictionaries whereas each dictionary contains averages
global db
averages = [
# {
# "reporter": "",
# "util": "",
# "time_stddev": "",
# "time_avg": "",
# "vertices_avg": "",
# "edges_avg": "",
# },
] # lists of averages
for (reporter, util), value in db.iteritems():
value = {k:filter(lambda x: not (x is False or x is None), v) for k,v in value.iteritems()}
averages.append(
{
"reporter": reporter,
"util": util,
"time_stddev" : np.std(value["time"], dtype=np.float64),
"time_avg" : np.average(value["time"]),
"vertices_avg" : np.average(value["vertices"]) if reporter!="none" else 0,
"edges_avg" : np.average(value["edges"] if reporter!="none" else 0),
"timedout_count" : sum(value["timedout"])
})
return averages
示例13: processLanes
def processLanes (lane_ids_as_string, row, junction,isIncomingLane):
#append an empty row if there are no lanes in this junction
if (lane_ids_as_string==""):
appendEmptyValuesToRow(row)
return
edge_prios=[]
edge_types=[]
lane_lengths=[]
lane_speeds=[]
lane_id_list= lane_ids_as_string.split(" ")
for l_id in lane_id_list:
try:
lane= lane_table[l_id]
edge= lane.getparent()
if isIncomingLane:
edge_types.append( edge.get("type"))
edge_prios.append(float(edge.get("priority")))
lane_lengths.append(float(lane.get("length")))
lane_speeds.append(float(lane.get("speed")))
except:
print ("error with lane_ids: '{}', l_id:'{}' junction_id:'{}'".format(lane_ids_as_string,
l_id, row[0]))
raise
row.append(np.average(lane_speeds))
row.append(np.std(lane_speeds))
row.append(np.average(lane_lengths))
row.append(np.std(lane_lengths))
if isIncomingLane:
row.append(edge_types)
row.append(np.average(edge_prios))
else:
row.append(None)
row.append(-1)
row.append(len(lane_id_list))
示例14: AverageBar
def AverageBar(indir='/Volumes/Documents/colbrydi/Documents/DirksWork/chamview/ChamB/'):
tot = 0.0;
R = np.array([0,0,0]);
G = np.array([0,0,0]);
B = np.array([0,0,0]);
for root, dirs, filenames in os.walk(indir):
filenames.sort()
for f in filenames:
if fnmatch.fnmatch(f,'0*.jpeg'):
im = readim(os.path.join(root,f))
sz = im.shape[0]
#print(im.shape)
r = np.zeros((sz,1))
g = np.zeros((sz,1))
b = np.zeros((sz,1))
r[:,0] = np.average(im[:,:,0],1)
g[:,0] = np.average(im[:,:,1],1)
b[:,0] = np.average(im[:,:,2],1)
if tot==0:
R = r
G = g
B = b
else:
R = np.append(R, r, axis=1)
G = np.append(G, g, axis=1)
B = np.append(B, b, axis=1)
tot=tot+1
if tot==0:
print('ERROR - No files found in '+indir)
return ''
im3 = np.zeros((R.shape[0],R.shape[1], 3))
im3[:,:,0] = R
im3[:,:,1] = G
im3[:,:,2] = B
return im3
示例15: tabular_td_lambda_offline
def tabular_td_lambda_offline(states, actions, generator_class, generator_args, l, alpha):
gamma = 1
rms_error = np.zeros(100)
for i in range(100):
values = {state: 0 for state in states}
policies = {state: {action: 1.0/len(actions) for action in actions} for state in states}
errors = []
for j in range(10):
episode_states = []
rewards = []
generator = generator_class(*generator_args)
current_state = generator.state
while True:
action, next_state, reward = generator.step(policies, current_state)
episode_states.append(current_state)
rewards.append(reward)
if next_state == None:
break
current_state = next_state
# offline returns
new_values = {state: values[state] for state in states}
z = {state: 0 for state in states}
for t, state in enumerate(episode_states):
z[state] += 1
if t < len(episode_states) - 1:
delta = rewards[t]+gamma*values[episode_states[t+1]]-values[state]
else:
delta = rewards[t]-values[state]
for state in states:
new_values[state] += alpha*delta*z[state]
z[state] *= (gamma*l)
values = new_values
errors.append(np.average([(values[state]-(state+1)/10.0+1)**2 for state in states])**0.5)
rms_error[i] = np.average(errors)
return np.average(rms_error)