本文整理汇总了Python中statistics.pstdev函数的典型用法代码示例。如果您正苦于以下问题:Python pstdev函数的具体用法?Python pstdev怎么用?Python pstdev使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pstdev函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _addOne
def _addOne(self, _data_struct: DataStruct):
index_value = _data_struct.index()[0]
self.buf.append(_data_struct.getColumn(self.use_key)[0])
if len(self.data) > self.period:
const_std = statistics.pstdev(self.buf[-self.period:])
self.dynamic_n *= const_std / self.prev_std
self.dynamic_n = max(self.min_n, self.dynamic_n)
self.dynamic_n = min(self.max_n, self.dynamic_n)
tmp_n = int(round(self.dynamic_n))
mean = statistics.mean(self.buf[-tmp_n:])
std = statistics.pstdev(self.buf[-tmp_n:])
self.data.addRow(
[index_value, mean + self.rate * std,
mean, mean - self.rate * std],
self.keys
)
self.prev_std = const_std
else:
if len(self.data) == self.period:
self.prev_std = statistics.pstdev(self.buf)
self.data.addRow(
[index_value, None, None, None],
self.keys
)
示例2: output
def output(nums, filesize, filetypec, filetypes, filetime):
fsum = sum(filesize)
print(('%s. %s' % (', '.join('%d %s' % vals for vals in filter(_ig1, zip(nums, ('files', 'directories', 'links', 'mount points', 'errors')))), '%s data.' % sizeof_fmt(fsum))).lstrip('. '))
if not filesize:
return
if len(filesize) > 2:
favg = fsum / len(filesize)
stdev = statistics.pstdev(filesize, favg)
print('File size: max %s, mean %s, median %s, stdev %s' % tuple(map(sizeof_fmt, (max(filesize), favg, statistics.median(filesize), stdev))))
print(' µ+σ (68%): ' + sizeof_fmt(favg + stdev) +
', µ+2σ (95%): ' + sizeof_fmt(favg + stdev * 2))
print('Modification time:')
print(' min ' + time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(min(filetime))))
print(' max ' + time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(max(filetime))))
tavg = statistics.mean(filetime)
print(' mean ' + time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(tavg)))
print(' median ' + time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(statistics.median(filetime))))
print(' stdev ' + timestring(statistics.pstdev(filetime, tavg)))
print('File type by number:')
mcomm = filetypec.most_common(5)
count = sum(filetypec.values())
print('\n'.join(' % 6s: %.2f%%' % (k or '<N/A>', v/count*100) for k, v in mcomm))
print(' Others: %.2f%%' % ((count - sum(v for k, v in mcomm)) / count * 100))
print('File type by size:')
mcomm = filetypes.most_common(5)
count = sum(filetypes.values())
print('\n'.join(' % 6s: %.2f%%' % (k or '<N/A>', v/count*100) for k, v in mcomm))
print(' Others: %.2f%%' % ((count - sum(v for k, v in mcomm)) / count * 100))
示例3: insertNormalizedModelInDB
def insertNormalizedModelInDB(idUser, idString, keystroke, isTest = False):
insertNormalizedRecord = replaceIfIsTest("INSERT INTO `mdl_user#isTest_keystroke_normalized`(`id_user`, `id_string`) VALUES (%s, %s)", isTest);
updateNormalizedRecord = replaceIfIsTest("UPDATE `mdl_user#isTest_keystroke_normalized` ", isTest);
executeSqlInDB(insertNormalizedRecord, (idUser, idString));
keyDimensionsExtractor = KeystrokeDimensionsExtractor(keystroke);
#extracting dimensions
timePressed = keyDimensionsExtractor.getTimePressed();
#geting avarage and standardDeviation
timePressedAverage = statistics.mean(timePressed);
timePressedstandardDeviation = statistics.pstdev(timePressed);
latencies = keyDimensionsExtractor.getLatencies();
latenciesAverage = statistics.mean(latencies);
latenciesStandardDeviation = statistics.pstdev(latencies);
dbModel = {
'id_user': idUser,
'id_string': idString,
'press_average': timePressedAverage,
'latency_avarage': latenciesAverage,
'press_standard_deviation': timePressedstandardDeviation,
'latency_standard_deviation': latenciesStandardDeviation,
}
#update in table created before
updateNormalizedRecord = updateNormalizedRecord + (" SET `press_average`= %(press_average)s,`latency_avarage`= %(latency_avarage)s, `press_standard_deviation`= %(press_standard_deviation)s,`latency_standard_deviation`= %(latency_standard_deviation)s "
" WHERE `id_user`= %(id_user)s AND `id_string`= %(id_string)s");
executeSqlInDB(updateNormalizedRecord, dbModel);
示例4: nutritionfacts
def nutritionfacts(self):
# print keys
svgdata = ""
frame_x = self.width * self.bins + 100 - 90
frame_y = (self.graphheight + 700) // 2 + 25 - self.graphheight
for i, s in enumerate([l for l in self.points if l[2]]):
mu = "μ = —"
sigma = "σ = —"
if len(s[0]) != 0:
xmean = stat.mean([t[0] for t in s[0]])
xsigma = stat.pstdev([t[0] for t in s[0]], xmean)
ymean = stat.mean([t[1] for t in s[0]])
ysigma = stat.pstdev([t[1] for t in s[0]], ymean)
mu = "μ = (" + str(round(xmean, 4)) + ", " + str(round(ymean, 4)) + ")"
sigma = "σ = (" + str(round(xsigma, 4)) + ", " + str(round(ysigma, 4)) + ")"
line_y = frame_y + i * 65
svgdata += circle(frame_x - 4, line_y + 3, 2, s[1])
svgdata += circle(frame_x + 4, line_y + 4, 2, s[1])
svgdata += circle(frame_x - 1, line_y + 10, 2, s[1])
svgdata += text(frame_x + 20, line_y + 10, s[2], align=-1, color=s[1], font="Neue Frutiger 65")
svgdata += text(frame_x + 28, line_y + 25, "n = " + str(len(s[0])), align=-1, color=s[1])
svgdata += text(frame_x + 28, line_y + 40, mu, align=-1, color=s[1])
svgdata += text(frame_x + 28, line_y + 55, sigma, align=-1, color=s[1])
self._frostbyte(svgdata)
示例5: sd_extreme_ex
def sd_extreme_ex (a,b,c,d,e):
normal_sd = pstdev([a,b,c,d,e])
min_sd = 1000000000
max_sd = 0
val1 = 0
val2 = 0
val3 = 0
val4 = 0
val5 = 0
for i in list(range(0,11)):#31
for j in list(range(0,11)):
for k in list(range(0,11)):
for p in list(range(0,11)):
for q in list(range(0,11)):
val1 = a - i*0.1
val2 = b - j*0.1
val3 = c - k*0.1
val4 = d - p*0.1
val5 = e - q*0.1
sd = pstdev([val1,val2,val3,val4,val5])
if (sd>=max_sd):
max_sd = sd
if (sd<=min_sd):
min_sd = sd
#print('.',end="",flush=True)
print(max_sd-normal_sd)
print(min_sd-normal_sd)
示例6: normalize_geojson
def normalize_geojson(js_data):
pols = []
subs = []
for js in js_data:
pols.append(js.get_avg_polarity())
subs.append(js.get_avg_subjectivity())
max_pol = max(pols)
min_pol = min(pols)
max_sub = max(subs)
min_sub = min(subs)
old_range_pol = (max_pol - min_pol)
old_range_sub = (max_sub - min_sub)
for js in js_data:
sub = (js.mAvgSubjectivity - min_sub) / old_range_sub
sub -= .5
sub *= 1.25
sub += stats.pstdev(subs)/2
sub += .5
sub = max(0, sub)
sub = min(1, sub)
js.mAvgSubjectivity = sub
pol = (js.mAvgPolarity - min_pol) / old_range_pol
pol -= .5
pol *= 1.25
pol += stats.pstdev(pols)/2
pol += .5
pol = max(0, pol)
pol = min(1, pol)
js.mAvgPolarity = pol
return js_data
示例7: morpheme_stdev
def morpheme_stdev(trie: MorphemeTrie, reverse_trie: MorphemeTrie) -> (float, float, float):
trie_mpr, reverse_trie_mpr = (list(trie.morphemes_per_word()),
list(reverse_trie.morphemes_per_word()))
stdev_trie = pstdev(trie_mpr)
stdev_reverse_trie = pstdev(reverse_trie_mpr)
stdev_combined = pstdev(trie_mpr + reverse_trie_mpr)
return stdev_trie, stdev_reverse_trie, stdev_combined
示例8: pearson
def pearson(A, B):
M = len(A)
assert M == len(B)
A_mean = statistics.mean(A)
A_stdev = statistics.pstdev(A)
B_mean = statistics.mean(B)
B_stdev = statistics.pstdev(B)
cross_mean = sum(A[i] * B[i] for i in range(M)) / M
return (cross_mean - A_mean * B_mean) / (A_stdev * B_stdev)
示例9: calculate_mean
def calculate_mean(TE_program):
my_directory = sys.argv[1]
files=''
items = []
for results_file in os.listdir(my_directory):
match = re.findall("(FAMILY_TFPN_ALL*)",results_file)
if len(match) >0:
print "yes"
print results_file
files+=str(" {results_file}".format(**locals()))
TPR_fam={}
FDR_fam={}
TPR_fam = defaultdict(list)
FDR_fam = defaultdict(list)
fam_found = {}
#get rid of leading space...next tiem append space after file:
files= files[1:]
files_to_test = files.split(' ')
for sim_file in files_to_test:
OPEN_SIM_FILE = open(sim_file, "r")
for line in OPEN_SIM_FILE:
if re.search(TE_program,line):
line = line.rstrip('\n')
items= re.split("[\t]",line) # WILL NEED TO CHANGE THESE
M1 = items[0]
fam = items[1]
TPR = items[5]
FDR = items[6]
if TPR !="NA":
TPR_fam[fam].append(TPR)
fam_found[fam] = 0
FDR_fam[fam].append(FDR)
for key in FDR_fam.keys():
print key
print FDR_fam[key]
for key in sorted(all_families.keys()):
if key in fam_found.keys():
TPR_fam[key] = map(float, TPR_fam[key]) #convert strings in list to integers
mean_TPR = statistics.mean(TPR_fam[key])
standard_deviation_TPR = statistics.pstdev(TPR_fam[key])
else:
mean_TPR = "NA"
standard_deviation_TPR = "NA"
FDR_fam[key] = map(float, FDR_fam[key])
print key
print FDR_fam[key]
mean_FDR = statistics.mean(FDR_fam[key])
standard_deviation_FDR = statistics.pstdev(FDR_fam[key])
print "The mean_TPR is {mean_TPR}".format(**locals())
print "The standard deviation TPR is {standard_deviation_TPR}".format(**locals())
OUT.write ("{M1}\t{key}\t{mean_TPR}\t{mean_FDR}\t{standard_deviation_TPR}\t{standard_deviation_FDR}\n".format(**locals()))
示例10: printOverTime
def printOverTime(label, this_acc_over_time, this_conf_over_time):
print('\n\n' + str(label))
for numEvents in this_acc_over_time:
accMean = st.mean(this_acc_over_time[numEvents])
accStd = st.pstdev(this_acc_over_time[numEvents])
confMean = st.mean(this_conf_over_time[numEvents])
confStd = st.pstdev(this_conf_over_time[numEvents])
print(str(numEvents) + '\t' + str(accMean) + '\t' + str(accStd) + '\t' + str(confMean) + '\t' + str(confStd))
示例11: eliminate_sd
def eliminate_sd(alphabet,v):
new_alphabet = []
for i in alphabet:
sd_min = pstdev(i[0])-0.5
sd_max = pstdev(i[0])+0.5
#if intersect(v-1,v,sd_min,sd_max):
if intersect(v*0.5-0.5,v*0.5,sd_min,sd_max):
new_alphabet.append(i)
#print(new_alphabet)
return (new_alphabet)
示例12: get_meanCV
def get_meanCV(file):
CSV_file = pandas.read_csv(file)
expt_samples = len(CSV_file)
DNAs = 7
replicates = expt_samples/DNAs
if "A13" in CSV_file["Well"].values:
plate_map = Container(None, _CONTAINER_TYPES['384-pcr'])
else:
plate_map = Container(None, _CONTAINER_TYPES['96-pcr'])
start = 0
replicate_locs = []
for i in range (0,DNAs-1):
loc = [plate_map.humanize(s) for s in range(start, start + replicates)]
replicate_locs.append(loc)
start += replicates
DNA_Ct = []
for h in replicate_locs:
for x in h:
Replicate_Ct_DNA = []
data_source = open(file)
replicate_locations = h
for line in data_source:
split_line=line.split(',')
wellID=split_line[0]
Ct=split_line[3]
for w in replicate_locations:
if w == wellID:
try:
Replicate_Ct_DNA.append(float(Ct))
except:
Replicate_Ct_DNA.append(0.0)
DNA_Ct.append(Replicate_Ct_DNA)
percentageCV = []
for n in DNA_Ct:
try:
percentageCV.append(((stats.pstdev(n)/stats.mean(n))*100))
except ZeroDivisionError as err:
percentageCV.append(0.0)
meanCV = stats.mean(percentageCV)
for n in DNA_Ct:
line = []
line.append(stats.mean(n))
line.append(stats.pstdev(n))
mean_SD.append(line)
writer = csv.writer(open('./output/mean_SD.csv', 'w'))
writer.writerows(mean_SD)
return meanCV
示例13: __init__
def __init__(self, data_list):
# Null values are counted as 0
list_total = []
# Without null values
list = []
self.total_filled = 0
self.total_not_filled = 0
self.quintilesX = []
self.quintilesY = []
for data in data_list:
if data != "":
list_total.append(int(data))
list.append(int(data))
self.total_filled += 1
else:
list_total.append(0)
self.total_not_filled += 1
if list != []:
self.mean = round(mean(list), 2)
self.standard_deviation = round(pstdev(list, self.mean), 2)
minimum = min(list)
maximum = max(list)
quintile_length = math.floor((maximum - minimum + 1) / 5)
# First 4 quintiles
first = minimum
for i in range(1, 5):
second = first + quintile_length
quintile_x = "[" + str(first) + ", " + str(second) + ")"
self.quintilesX.append(quintile_x)
quintile_y = 0
for num in list:
if (first <= num) and (num < second):
quintile_y += 1
self.quintilesY.append(quintile_y)
first = second
# Last quintile
self.quintilesX.append("[" + str(first) + ", " + str(maximum) + "]")
quintile_y = 0
for num in list:
if (first <= num) and (num <= maximum):
quintile_y += 1
self.quintilesY.append(quintile_y)
else:
self.mean = 0
self.standard_deviation = 0
self.total_mean = round(mean(list_total), 2)
self.total_standard_deviation = round(pstdev(list_total, self.total_mean), 2)
示例14: print_stats
def print_stats(times, nodes):
print("Nodes:")
for node in nodes:
print(" {}:{}:{}".format(node['address'], node['port'], node['job_slots']))
pprint(times)
stats = dict((key, dict(mean=statistics.mean(data), stdev=statistics.pstdev(data))) for key, data in times.items())
pprint(stats)
示例15: calculate_feature_statistics
def calculate_feature_statistics(self, series_key='series'):
for key in self.sound_files[0].analysis[series_key]:
self.feature_statistics[key] = {
'min': None,
'max': None,
'mean': None,
'standard_deviation': None
}
for feature in self.feature_statistics:
series = []
for sf in self.sound_files:
if isinstance(sf.analysis[series_key][feature][0], list):
series += Standardizer.join_lists(sf.analysis[series_key][feature])
else:
series += sf.analysis[series_key][feature]
if len(series) == 0:
continue
self.feature_statistics[feature]['min'] = min(series)
self.feature_statistics[feature]['max'] = max(series)
self.feature_statistics[feature]['mean'] = statistics.mean(series)
self.feature_statistics[feature]['standard_deviation'] = statistics.pstdev(series)
return self.feature_statistics