本文整理匯總了Python中statistics.stdev方法的典型用法代碼示例。如果您正苦於以下問題:Python statistics.stdev方法的具體用法?Python statistics.stdev怎麽用?Python statistics.stdev使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類statistics
的用法示例。
在下文中一共展示了statistics.stdev方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: evaluate_and_update_max_score
# 需要導入模塊: import statistics [as 別名]
# 或者: from statistics import stdev [as 別名]
def evaluate_and_update_max_score(self, t, episodes):
eval_stats = eval_performance(
self.env, self.agent, self.n_steps, self.n_episodes,
max_episode_len=self.max_episode_len,
logger=self.logger)
elapsed = time.time() - self.start_time
custom_values = tuple(tup[1] for tup in self.agent.get_statistics())
mean = eval_stats['mean']
values = (t,
episodes,
elapsed,
mean,
eval_stats['median'],
eval_stats['stdev'],
eval_stats['max'],
eval_stats['min']) + custom_values
record_stats(self.outdir, values)
if mean > self.max_score:
self.logger.info('The best score is updated %s -> %s',
self.max_score, mean)
self.max_score = mean
if self.save_best_so_far_agent:
save_agent(self.agent, "best", self.outdir, self.logger)
return mean
示例2: calc_disagreement
# 需要導入模塊: import statistics [as 別名]
# 或者: from statistics import stdev [as 別名]
def calc_disagreement(evaluations):
"""Return the disagreement level for evaluations, or None if no evaluations.
Calculated as the max disagreement of (1) N/A and non-N/A responses and (2) non-N/A evaluations
:param evaluations: an iterable of Eval
"""
if evaluations:
na_it, rated_it = partition(lambda x: x is not Eval.not_applicable, evaluations)
na_votes = list(na_it)
rated_votes = list(rated_it)
# Here we use the sample standard deviation because we consider the evaluations are a sample of all the
# evaluations that could be given.
# Not clear the best way to make the N/A disagreement comparable to the evaluation disagreement calculation
na_disagreement = (
statistics.stdev(([0] * len(na_votes)) + ([1] * len(rated_votes)))
if len(na_votes) + len(rated_votes) > 1
else 0.0)
rated_disagreement = (
statistics.stdev([v.value for v in rated_votes])
if len(rated_votes) > 1
else 0.0)
return max(na_disagreement, rated_disagreement)
else:
return None
示例3: _random
# 需要導入模塊: import statistics [as 別名]
# 或者: from statistics import stdev [as 別名]
def _random(self, env, min_mean, max_mean, min_stdev, max_stdev,
test_count=3, sample_count=300):
mmin, smin, mmax, smax = 100, 100, 0, 0
for i in range(test_count):
values = [env(0) for i in range(sample_count)]
mean, stdev = statistics.mean(values), statistics.stdev(values)
mmax = max(mmax, mean)
mmin = min(mmin, mean)
smax = max(smax, stdev)
smin = min(smin, stdev)
self.assertGreater(mmin, min_mean)
self.assertLess(mmax, max_mean)
self.assertGreater(smin, min_stdev)
self.assertLess(smax, max_stdev)
return mmin, mmax, smin, smax
示例4: eval_performance
# 需要導入模塊: import statistics [as 別名]
# 或者: from statistics import stdev [as 別名]
def eval_performance(process_idx, make_env, model, phi, n_runs):
assert n_runs > 1, 'Computing stdev requires at least two runs'
scores = []
for i in range(n_runs):
model.reset_state()
env = make_env(process_idx, test=True)
obs = env.reset()
done = False
test_r = 0
while not done:
s = chainer.Variable(np.expand_dims(phi(obs), 0))
pout, _ = model.pi_and_v(s)
a = pout.action_indices[0]
obs, r, done, info = env.step(a)
test_r += r
scores.append(test_r)
print('test_{}:'.format(i), test_r)
mean = statistics.mean(scores)
median = statistics.median(scores)
stdev = statistics.stdev(scores)
return mean, median, stdev
示例5: eval_performance
# 需要導入模塊: import statistics [as 別名]
# 或者: from statistics import stdev [as 別名]
def eval_performance(rom, p_func, n_runs):
assert n_runs > 1, 'Computing stdev requires at least two runs'
scores = []
for i in range(n_runs):
env = ale.ALE(rom, treat_life_lost_as_terminal=False)
test_r = 0
while not env.is_terminal:
s = chainer.Variable(np.expand_dims(dqn_phi(env.state), 0))
pout = p_func(s)
a = pout.action_indices[0]
test_r += env.receive_action(a)
scores.append(test_r)
print('test_{}:'.format(i), test_r)
mean = statistics.mean(scores)
median = statistics.median(scores)
stdev = statistics.stdev(scores)
return mean, median, stdev
示例6: to_dict
# 需要導入模塊: import statistics [as 別名]
# 或者: from statistics import stdev [as 別名]
def to_dict(self):
return {
"LB_min": round(self.latency.min, 2),
"LB_avg": round(self.latency.avg, 2),
"LB_max": round(self.latency.max, 2),
"L_stdev": round(self.latency.stdev, 2),
"L_stdev[%]": round(self.latency.stdev / self.latency.avg * 100, 2),
# ---
"TB_min": round(self.throughput.min, 2),
"TB_avg": round(self.throughput.avg, 2),
"TB_max": round(self.throughput.max, 2),
"T_stdev": round(self.throughput.stdev, 2),
"T_stdev[%]": round(self.throughput.stdev / self.throughput.avg * 100, 2),
# ---
"B_count": self.count,
"app": self.name
}
示例7: get_partitions_info_str
# 需要導入模塊: import statistics [as 別名]
# 或者: from statistics import stdev [as 別名]
def get_partitions_info_str(j):
partitions = j['components']['partition_counts']['counts']
partitions_info = {
'Partitions': len(partitions),
'Rows': sum(partitions),
'Empty partitions': len([p for p in partitions if p == 0])
}
if partitions_info['Partitions'] > 1:
partitions_info.update({
'Min(rows/partition)': min(partitions),
'Max(rows/partition)': max(partitions),
'Median(rows/partition)': median(partitions),
'Mean(rows/partition)': int(mean(partitions)),
'StdDev(rows/partition)': int(stdev(partitions))
})
return "\n{}".format(IDENT).join(['{}: {}'.format(k, v) for k, v in partitions_info.items()])
示例8: status
# 需要導入模塊: import statistics [as 別名]
# 或者: from statistics import stdev [as 別名]
def status(self):
import collections, statistics
from datetime import datetime
return {
"starttime": datetime.utcfromtimestamp(self.stats["start_time"]).strftime('%Y-%m-%d %H:%M:%S'),
"pass": self.numPass(),
"fail": self.numFails(),
"failures": self.failures,
"speed": self.testsPerSecond(),
"mean": statistics.mean(self.traceLengths) if self.traceLengths else "NA",
"stdev": statistics.stdev(self.traceLengths) if len(self.traceLengths) > 2 else "NA",
"numZero": self.traceLengths.count(0) if self.traceLengths else "NA",
"max": max(self.traceLengths) if self.traceLengths else "NA",
"maxDepth": max(self.traceDepths) if self.traceDepths else "NA",
"numConst": statistics.mean(self.traceConstantinopleOps) if self.traceConstantinopleOps else "NA",
"activeSockets": self.stats["num_active_sockets"],
"activeTests": self.stats["num_active_tests"],
}
示例9: evaluate_and_update_max_score
# 需要導入模塊: import statistics [as 別名]
# 或者: from statistics import stdev [as 別名]
def evaluate_and_update_max_score(self, t, episodes):
eval_stats = eval_performance(
self.env, self.agent, self.n_runs,
max_episode_len=self.max_episode_len, explorer=self.explorer,
logger=self.logger)
elapsed = time.time() - self.start_time
custom_values = tuple(tup[1] for tup in self.agent.get_statistics())
mean = eval_stats['mean']
values = (t,
episodes,
elapsed,
mean,
eval_stats['median'],
eval_stats['stdev'],
eval_stats['max'],
eval_stats['min']) + custom_values
record_stats(self.outdir, values)
if mean > self.max_score:
self.logger.info('The best score is updated %s -> %s',
self.max_score, mean)
self.max_score = mean
if self.save_best_so_far_agent:
save_agent(self.agent, t, self.outdir, self.logger)
return mean
示例10: within_stdev_percent
# 需要導入模塊: import statistics [as 別名]
# 或者: from statistics import stdev [as 別名]
def within_stdev_percent(values, x_stdev, percent_threshold, min_values=100):
'''Return True if percent_threshold of values are within x_stdev of the mean.'''
if len(values) < min_values:
return True
mean = statistics.mean(values)
stdev = statistics.stdev(values)
found = []
for v in values:
diff = abs(mean - v)
if diff <= (stdev * x_stdev):
found.append(v)
percent_found = len(found) / len(values)
result = percent_found > percent_threshold
log.debug(f"Within {x_stdev} sigma check was {result}. {percent_found:.2f}%/{percent_threshold:.2f}% within stdev*{x_stdev}. "
f"Mean: {mean:.2f}. Stdev: {stdev:.2f}. Acceptable range was: {mean - stdev * x_stdev:.2f} - {mean + stdev * x_stdev:.2f}")
return result
示例11: test_ci95
# 需要導入模塊: import statistics [as 別名]
# 或者: from statistics import stdev [as 別名]
def test_ci95(self):
for length in [2, 3, 5, 10, 100, 1000]:
numbers = [random.random() for _ in range(length)]
ci = plot.ci95(numbers)
mu = mean(numbers)
std = stdev(numbers, xbar=mu)
lower = mu - 2.0 * std / math.sqrt(length)
upper = mu + 2.0 * std / math.sqrt(length)
self.assertTrue(ci[0] - lower <= 1e-6)
self.assertTrue(ci[1] - upper <= 1e-6)
# Test the documentation example
smoothed = []
for replay in range(10):
rewards = [random.random() for _ in range(100)]
y_smoothed = plot.smooth(rewards)
smoothed.append(y_smoothed)
means = [mean(r) for r in zip(*smoothed)]
confidences = [plot.ci95(r) for r in zip(*smoothed)]
lower_bounds = [conf[0] for conf in confidences]
upper_bounds = [conf[1] for conf in confidences]
for lb, ub, m in zip(lower_bounds, upper_bounds, means):
self.assertTrue(lb <= m)
self.assertTrue(ub >= m)
示例12: cohens
# 需要導入模塊: import statistics [as 別名]
# 或者: from statistics import stdev [as 別名]
def cohens(dataset, predictions, combined_data: CombinedData):
xs = combined_data.get_explanatory_variables()
ys = combined_data.get_explained_variables()
x = xs[0]
y = ys[0]
cat = [k for k,v in x.metadata[categories].items()]
data = []
pred = None
if predictions:
pred = predictions[0][0]
lhs = None
rhs = None
for c in cat:
cat_data = dataset.select(y.metadata[name], where=[f"{x.metadata[name]} == '{c}'"])
if c == pred.lhs.value:
lhs = cat_data
if c == pred.rhs.value:
rhs = cat_data
data.append(cat_data)
cohens_d = (mean(lhs) - mean(rhs)) / (sqrt((stdev(lhs) ** 2 + stdev(rhs) ** 2) / 2))
return cohens_d
示例13: _count_resource
# 需要導入模塊: import statistics [as 別名]
# 或者: from statistics import stdev [as 別名]
def _count_resource(self, attr_name, aggr_function=None) -> Tuple[float, float]:
"""
Calculate resources from exec reports.
:param attr_name: name of the attribute of execreport to count resource.
:param aggr_function: function to process value of execreport.
:return: (mean_value, standart_deviation)
"""
if not aggr_function:
aggr_function = lambda x: x # noqa: E731
values = [aggr_function(getattr(i, attr_name)) for i in self.exec_reports]
mean_value = mean(values)
std_dev = stdev(values) if len(values) > 1 else 0
return (mean_value, std_dev)
示例14: evaluate
# 需要導入模塊: import statistics [as 別名]
# 或者: from statistics import stdev [as 別名]
def evaluate():
model.eval() # Turn on the evaluation mode
with torch.no_grad():
# evaluating
node_embeddings = model.ss.weight
graph_embeddings = torch.spmm(graph_pool, node_embeddings).data.cpu().numpy()
acc_10folds = []
for fold_idx in range(10):
train_idx, test_idx = separate_data_idx(graphs, fold_idx)
train_graph_embeddings = graph_embeddings[train_idx]
test_graph_embeddings = graph_embeddings[test_idx]
train_labels = graph_labels[train_idx]
test_labels = graph_labels[test_idx]
cls = LogisticRegression(solver="liblinear", tol=0.001)
cls.fit(train_graph_embeddings, train_labels)
ACC = cls.score(test_graph_embeddings, test_labels)
acc_10folds.append(ACC)
print('epoch ', epoch, ' fold ', fold_idx, ' acc ', ACC)
mean_10folds = statistics.mean(acc_10folds)
std_10folds = statistics.stdev(acc_10folds)
# print('epoch ', epoch, ' mean: ', str(mean_10folds), ' std: ', str(std_10folds))
return mean_10folds, std_10folds
示例15: eval_performance
# 需要導入模塊: import statistics [as 別名]
# 或者: from statistics import stdev [as 別名]
def eval_performance(env, agent, n_steps, n_episodes, max_episode_len=None,
logger=None):
"""Run multiple evaluation episodes and return statistics.
Args:
env (Environment): Environment used for evaluation
agent (Agent): Agent to evaluate.
n_steps (int): Number of timesteps to evaluate for.
n_episodes (int): Number of evaluation episodes.
max_episode_len (int or None): If specified, episodes longer than this
value will be truncated.
logger (Logger or None): If specified, the given Logger object will be
used for logging results. If not specified, the default logger of
this module will be used.
Returns:
Dict of statistics.
"""
assert (n_steps is None) != (n_episodes is None)
if isinstance(env, chainerrl.env.VectorEnv):
scores = batch_run_evaluation_episodes(
env, agent, n_steps, n_episodes,
max_episode_len=max_episode_len,
logger=logger)
else:
scores = run_evaluation_episodes(
env, agent, n_steps, n_episodes,
max_episode_len=max_episode_len,
logger=logger)
stats = dict(
episodes=len(scores),
mean=statistics.mean(scores),
median=statistics.median(scores),
stdev=statistics.stdev(scores) if len(scores) >= 2 else 0.0,
max=np.max(scores),
min=np.min(scores))
return stats