本文整理匯總了Python中pyaid.number.NumericUtils.NumericUtils.getMeanAndDeviation方法的典型用法代碼示例。如果您正苦於以下問題:Python NumericUtils.getMeanAndDeviation方法的具體用法?Python NumericUtils.getMeanAndDeviation怎麽用?Python NumericUtils.getMeanAndDeviation使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類pyaid.number.NumericUtils.NumericUtils
的用法示例。
在下文中一共展示了NumericUtils.getMeanAndDeviation方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _postAnalyze
# 需要導入模塊: from pyaid.number.NumericUtils import NumericUtils [as 別名]
# 或者: from pyaid.number.NumericUtils.NumericUtils import getMeanAndDeviation [as 別名]
def _postAnalyze(self):
"""_postAnalyze doc..."""
self._csv.save()
meanDiff = NumericUtils.getMeanAndDeviation(self._diffs)
self.logger.write('Rotation %s' % meanDiff.label)
self._paths.append(self._makePlot(
label='Rotation Differences',
data=self._diffs,
histRange=[-180, 180]))
self._paths.append(self._makePlot(
label='Rotation Differences',
data=self._diffs,
histRange=[-180, 180],
isLog=True))
circs = []
circsUnc = []
diffs = []
diffsUnc = []
entries = self.owner.getStage('lengthWidth').entries
for entry in entries:
track = entry['track']
if track.uid not in self.deviations:
# Skip those tracks with no deviation value (solo tracks)
continue
diffDeg = self.deviations[track.uid]
diffs.append(abs(diffDeg.value))
diffsUnc.append(diffDeg.uncertainty)
# Compute the circularity of the track from its aspect ratio. If
# the aspect is less than or equal to 1.0 use the aspect value
# directly. However, if the value is greater than one, take the
# reciprocal so that large and small aspect ratios can be compared
# equally.
aspect = entry['aspect']
if aspect.value > 1.0:
a = 1.0/aspect.raw
aspect = NumericUtils.toValueUncertainty(a, a*(aspect.rawUncertainty/aspect.raw))
circs.append(abs(aspect.value - 1.0))
circsUnc.append(aspect.uncertainty)
pl = self.plot
self.owner.createFigure('circular')
pl.errorbar(x=circs, y=diffs, xerr=circsUnc, yerr=diffsUnc, fmt='.')
pl.xlabel('Aspect Circularity')
pl.ylabel('Rotation Deviation')
pl.title('Rotation Deviation and Aspect Circularity')
self._paths.append(self.owner.saveFigure('circular'))
self.mergePdfs(self._paths)
self._paths = []
示例2: _processAspectRatios
# 需要導入模塊: from pyaid.number.NumericUtils import NumericUtils [as 別名]
# 或者: from pyaid.number.NumericUtils.NumericUtils import getMeanAndDeviation [as 別名]
def _processAspectRatios(self):
"""_processAspectRatios doc..."""
aspects = []
pesAspects = []
manAspects = []
for entry in self.entries:
value = entry['aspect'].value
aspects.append(value)
if entry['track'].pes:
pesAspects.append(value)
else:
manAspects.append(value)
self.logger.write('%s\nASPECT RATIO' % ('='*80))
self.logger.write('Total: %s' %
NumericUtils.getMeanAndDeviation(aspects).label)
self.logger.write('Pes: %s' %
NumericUtils.getMeanAndDeviation(pesAspects).label)
self.logger.write('Manus: %s' %
NumericUtils.getMeanAndDeviation(manAspects).label)
h = Histogram(data=aspects, color='green')
h.title = 'Aspect Ratios'
h.yLabel = 'Count'
h.xLabel = 'Aspect Ratio (width/length)'
self._paths.append(h.save(self.getTempFilePath(extension='pdf')))
h = Histogram(data=pesAspects, color='green')
h.title = 'Aspect Ratios (Pes)'
h.yLabel = 'Count'
h.xLabel = 'Aspect Ratio (width/length)'
self._paths.append(h.save(self.getTempFilePath(extension='pdf')))
h = Histogram(data=manAspects, color='green')
h.title = 'Aspect Ratios (Manus)'
h.yLabel = 'Count'
h.xLabel = 'Aspect Ratio (width/length)'
self._paths.append(h.save(self.getTempFilePath(extension='pdf')))
示例3: _process
# 需要導入模塊: from pyaid.number.NumericUtils import NumericUtils [as 別名]
# 或者: from pyaid.number.NumericUtils.NumericUtils import getMeanAndDeviation [as 別名]
def _process(self):
"""_processDeviations doc..."""
errors = []
for entry in self.entries:
if 'fractional' in entry:
errors.append(entry['fractional'])
res = NumericUtils.getMeanAndDeviation(errors)
self.logger.write('Fractional Stride Error %s' % res.label)
label = 'Fractional Stride Errors'
self._paths.append(self._makePlot(
label=label,
data=errors,
histRange=(-1.0, 1.0) ))
self._paths.append(self._makePlot(
label=label,
data=errors,
isLog=True,
histRange=(-1.0, 1.0) ))
# noinspection PyUnresolvedReferences
d = np.absolute(np.array(errors))
self._paths.append(self._makePlot(
label='Absolute %s' % label,
data=d, histRange=(0.0, 1.0) ))
self._paths.append(self._makePlot(
label='Absolute %s' % label,
data=d,
isLog=True,
histRange=(0.0, 1.0) ))
highDeviationCount = 0
for entry in self.entries:
if 'measured' not in entry:
# Skip tracks that have no measured stride value for comparison
continue
if entry['deviation'] > 2.0:
highDeviationCount += 1
track = entry['track']
delta = NumericUtils.roundToSigFigs(100.0*abs(entry['delta']), 3)
self._csv.addRow({
'fingerprint':track.fingerprint,
'uid':track.uid,
'measured':entry['measured'].label,
'entered':entry['entered'].label,
'dev':entry['deviation'],
'delta':delta})
if not self._csv.save():
self.logger.write(
'[ERROR]: Failed to save CSV file %s' % self._csv.path)
percentage = NumericUtils.roundToOrder(
100.0*float(highDeviationCount)/float(len(self.entries)), -2)
self.logger.write(
'%s significant %s (%s%%)' % (
highDeviationCount,
label.lower(),
percentage))
if percentage > (100.0 - 95.45):
self.logger.write(
'[WARNING]: Large deviation count exceeds normal ' +
'distribution expectations.')
示例4: _addQuartileEntry
# 需要導入模塊: from pyaid.number.NumericUtils import NumericUtils [as 別名]
# 或者: from pyaid.number.NumericUtils.NumericUtils import getMeanAndDeviation [as 別名]
def _addQuartileEntry(self, label, trackway, data):
if not data or len(data) < 4:
return
if label not in self._quartileStats:
csv = CsvWriter()
csv.path = self.getPath(
'%s-Quartiles.csv' % label.replace(' ', '-'),
isFile=True)
csv.autoIndexFieldName = 'Index'
csv.addFields(
('name', 'Name'),
('normality', 'Normality'),
('unweightedNormality', 'Unweighted Normality'),
('unweightedLowerBound', 'Unweighted Lower Bound'),
('unweightedLowerQuart', 'Unweighted Lower Quartile'),
('unweightedMedian', 'Unweighted Median'),
('unweightedUpperQuart', 'Unweighted Upper Quartile'),
('unweightedUpperBound', 'Unweighted Upper Bound'),
('lowerBound', 'Lower Bound'),
('lowerQuart', 'Lower Quartile'),
('median', 'Median'),
('upperQuart', 'Upper Quartile'),
('upperBound', 'Upper Bound'),
('diffLowerBound', 'Diff Lower Bound'),
('diffLowerQuart', 'Diff Lower Quartile'),
('diffMedian', 'Diff Median'),
('diffUpperQuart', 'Diff Upper Quartile'),
('diffUpperBound', 'Diff Upper Bound') )
self._quartileStats[label] = csv
csv = self._quartileStats[label]
dd = mstats.density.Distribution(data)
unweighted = mstats.density.boundaries.unweighted_two(dd)
weighted = mstats.density.boundaries.weighted_two(dd)
#-----------------------------------------------------------------------
# PLOT DENSITY
# Create a density plot for each value
p = MultiScatterPlot(
title='%s %s Density Distribution' % (trackway.name, label),
xLabel=label,
yLabel='Probability (AU)')
x_values = mstats.density.ops.adaptive_range(dd, 10.0)
y_values = dd.probabilities_at(x_values=x_values)
p.addPlotSeries(
line=True,
markers=False,
label='Weighted',
color='blue',
data=zip(x_values, y_values)
)
temp = mstats.density.create_distribution(
dd.naked_measurement_values(raw=True)
)
x_values = mstats.density.ops.adaptive_range(dd, 10.0)
y_values = dd.probabilities_at(x_values=x_values)
p.addPlotSeries(
line=True,
markers=False,
label='Unweighted',
color='red',
data=zip(x_values, y_values)
)
if label not in self._densityPlots:
self._densityPlots[label] = []
self._densityPlots[label].append(
p.save(self.getTempFilePath(extension='pdf')))
#-----------------------------------------------------------------------
# NORMALITY
# Calculate the normality of the weighted and unweighted
# distributions as a test against how well they conform to
# the Normal distribution calculated from the unweighted data.
#
# The unweighted Normality test uses a basic bandwidth detection
# algorithm to create a uniform Gaussian kernel to populate the
# DensityDistribution. It is effectively a density kernel
# estimation, but is aggressive in selecting the bandwidth to
# prevent over-smoothing multi-modal distributions.
if len(data) < 8:
normality = -1.0
unweightedNormality = -1.0
else:
result = NumericUtils.getMeanAndDeviation(data)
mean = result.raw
std = result.rawUncertainty
normality = mstats.density.ops.overlap(
dd,
mstats.density.create_distribution([mean], [std])
)
#.........這裏部分代碼省略.........
示例5: _process
# 需要導入模塊: from pyaid.number.NumericUtils import NumericUtils [as 別名]
# 或者: from pyaid.number.NumericUtils.NumericUtils import getMeanAndDeviation [as 別名]
def _process(
self, label, widthKey, lengthKey, trackDeviations,
absoluteOnly =False
):
"""_process doc..."""
pl = self.plot
ws = []
ls = []
w2D = []
l2D = []
for entry in self.entries:
if widthKey in entry:
ws.append(entry[widthKey])
if lengthKey in entry:
w2D.append(entry[widthKey])
if lengthKey in entry:
ls.append(entry[lengthKey])
if widthKey in entry:
l2D.append(entry[lengthKey])
plotList = [
('widths', ws, 'Width', 'b'),
('lengths', ls, 'Length', 'r')]
wRes = NumericUtils.getMeanAndDeviation(ws)
self.logger.write('Width %ss' % wRes.label)
lRes = NumericUtils.getMeanAndDeviation(ls)
self.logger.write('Length %ss' % lRes.label)
for data in plotList:
if not absoluteOnly:
d = data[1]
self._paths.append(
self._makePlot(
label, d, data,
histRange=(-1.0, 1.0)))
self._paths.append(
self._makePlot(
label, d, data,
isLog=True,
histRange=(-1.0, 1.0)))
# noinspection PyUnresolvedReferences
d = np.absolute(np.array(data[1]))
self._paths.append(
self._makePlot(
'Absolute ' + label, d, data,
histRange=(0.0, 1.0)))
self._paths.append(
self._makePlot(
'Absolute ' + label, d, data,
isLog=True,
histRange=(0.0, 1.0)))
self.owner.createFigure('twoD')
pl.hist2d(w2D, l2D, bins=20, range=([-1, 1], [-1, 1]))
pl.title('2D %s Distribution' % label)
pl.xlabel('Width %s' % label)
pl.ylabel('Length %s' % label)
pl.xlim(-1.0, 1.0)
pl.ylim(-1.0, 1.0)
path = self.getTempPath(
'%s.pdf' % StringUtils.getRandomString(16),
isFile=True)
self.owner.saveFigure('twoD', path)
self._paths.append(path)
csv = CsvWriter()
csv.path = self.getPath(
'%s-Deviations.csv' % label.replace(' ', '-'),
isFile=True)
csv.addFields(
('uid', 'UID'),
('fingerprint', 'Fingerprint'),
('wSigma', 'Width Deviation'),
('lSigma', 'Length Deviation') )
count = 0
for entry in self.entries:
widthDevSigma = NumericUtils.roundToOrder(
abs(entry.get(widthKey, 0.0)/wRes.uncertainty), -2)
lengthDevSigma = NumericUtils.roundToOrder(
abs(entry.get(lengthKey, 0.0)/lRes.uncertainty), -1)
if widthDevSigma > 2.0 or lengthDevSigma > 2.0:
count += 1
track = entry['track']
data = dict(
wSigma=widthDevSigma,
lSigma=lengthDevSigma)
if trackDeviations is not None:
trackDeviations[track.uid] = data
csv.createRow(
uid=track.uid,
fingerprint=track.fingerprint,
**data)
#.........這裏部分代碼省略.........
示例6: _postAnalyze
# 需要導入模塊: from pyaid.number.NumericUtils import NumericUtils [as 別名]
# 或者: from pyaid.number.NumericUtils.NumericUtils import getMeanAndDeviation [as 別名]
def _postAnalyze(self):
h = Histogram(
data=self._uncs,
binCount=80,
xLimits=(0, max(*self._uncs)),
color="r",
title="Distribution of Rotational Uncertainties",
xLabel="Uncertainty Value (degrees)",
yLabel="Frequency",
)
p1 = h.save(self.getTempFilePath(extension="pdf"))
h.isLog = True
h.title += " (log)"
p2 = h.save(self.getTempFilePath(extension="pdf"))
self.mergePdfs([p1, p2], self.getPath("Rotational-Uncertainty-Distribution.pdf"))
average = NumericUtils.getMeanAndDeviation(self._uncs)
self.logger.write("Average rotational uncertainty: %s" % average.label)
# -------------------------------------------------------------------------------------------
# FIND LARGE UNCERTAINTY TRACKS
largeUncertaintyCount = 0
drawing = None
sitemap = None
# If track uncertainty is 2x average, add that track to the spreadsheet and map overlay
for t in self._tracks:
# if the tracksite has changed, save previous map and make a new one
if sitemap != t.trackSeries.trackway.sitemap:
# save the last site map drawing (if there was one)
if drawing:
drawing.save()
# then start a new drawing for this new site map
sitemap = t.trackSeries.trackway.sitemap
fileName = "%s-%s-ROTATION_UNC.svg" % (sitemap.name, sitemap.level)
path = self.getPath(self.DRAWING_FOLDER_NAME, fileName, isFile=True)
drawing = CadenceDrawing(path, sitemap)
# create a group to be instanced for the spreadsheet values
drawing.createGroup("rect1")
# create a rectangle of 100x100 cm that is to be scaled by fractional meters
drawing.rect((0, 0), 100, 100, scene=True, groupId="rect1")
# create another group to be instanced for the mapped values.
drawing.createGroup("rect2")
# create a rectangle of 100x100 cm that is to be scaled by fractional meters
drawing.rect((0, 0), 100, 100, scene=True, groupId="rect2")
# and place a grid and the federal coordinates in the drawing file
drawing.grid()
drawing.federalCoordinates()
# now examine the positional uncertainties for this track
rotation = t.rotationAngle.valueDegrees
if rotation.uncertainty <= 2.0 * average.uncertainty:
# then just indicate that this track has low uncertainty
self._drawLowUncertaintyMarker(drawing, t)
# label this track green
# drawing.text(
# t.name,
# (t.x - 20, t.z),
# scene=True,
# stroke='green',
# stroke_width='0.25',
# font_size='8px',
# font_family='Arial')
continue
# else, since the uncertainty is high, first write that track in the spreadsheet
largeUncertaintyCount += 1
self._largeUncCsv.createRow(uid=t.uid, fingerprint=t.fingerprint, r=rotation.label)
# if either the measured width or length is 0, mark with a yellow disk with red outline
if t.rotationMeasured == 0:
drawing.circle(
(t.x, t.z),
100 * (t.widthUncertainty + t.lengthUncertainty) / 2.0,
scene=True,
fill="yellow",
stroke="red",
)
# drawing.text(
# t.name,
# (t.x - 20, t.z),
# scene=True,
# stroke='black',
# stroke_width='0.25',
# font_size='6px',
# font_family='Arial')
continue
#.........這裏部分代碼省略.........
示例7: _postAnalyze
# 需要導入模塊: from pyaid.number.NumericUtils import NumericUtils [as 別名]
# 或者: from pyaid.number.NumericUtils.NumericUtils import getMeanAndDeviation [as 別名]
def _postAnalyze(self):
h = Histogram(
data=self._uncs,
binCount=80,
xLimits=(0, max(*self._uncs)),
color='r',
title='Distribution of Spatial (X, Z) Uncertainties',
xLabel='Uncertainty Value (m)',
yLabel='Frequency')
p1 = h.save(self.getTempFilePath(extension='pdf'))
h.isLog = True
h.title += ' (log)'
p2 = h.save(self.getTempFilePath(extension='pdf'))
self.mergePdfs([p1, p2], self.getPath('Spatial-Uncertainty-Distribution.pdf'))
average = NumericUtils.getMeanAndDeviation(self._uncs)
self.logger.write('Average spatial uncertainty: %s' % average.label)
#-------------------------------------------------------------------------------------------
# FIND LARGE UNCERTAINTY TRACKS
largeUncertaintyCount = 0
drawing = None
sitemap = None
# If track uncertainty is 2x average, add that track to the spreadsheet and map overlay
for t in self._tracks:
# if the tracksite has changed, save previous map and make a new one
if sitemap != t.trackSeries.trackway.sitemap:
# save the last site map drawing (if there was one)
if drawing:
drawing.save()
# then start a new drawing for this new site map
sitemap = t.trackSeries.trackway.sitemap
fileName = sitemap.name + "_" + sitemap.level + '_uncertainty.svg'
path = self.getPath(self.DRAWING_FOLDER_NAME, fileName, isFile=True)
drawing = CadenceDrawing(path, sitemap)
# create a group to be instanced for the spreadsheet values
drawing.createGroup('rect1')
# create a rectangle of 100x100 cm that is to be scaled by fractional meters
drawing.rect((0, 0), 100, 100, scene=True, groupId='rect1')
# create another group to be instanced for the mapped values.
drawing.createGroup('rect2')
# create a rectangle of 100x100 cm that is to be scaled by fractional meters
drawing.rect((0, 0), 100, 100, scene=True, groupId='rect2')
# and place a grid and the federal coordinates in the drawing file
drawing.grid()
drawing.federalCoordinates()
# now examine the positional uncertainties for this track
x = t.xValue
z = t.zValue
if x.uncertainty > 0.15 or z.uncertainty > 0.15:
# s = '%s%s %s%s: %s %s'% (
# t.site, t.level, t.trackwayType, t.trackwayNumber, t.name, t.uid)
# print('%s: (%s and %s)' % (s, x.uncertainty, z.uncertainty))
print('%s\t%s' % (t.uid, t.fingerprint))
if max(x.uncertainty, z.uncertainty) <= 2.0*average.uncertainty:
# then just indicate that this track has low uncertainty
self._drawLowUncertaintyMarker(drawing, t)
# label this track with green
drawing.text(
t.name,
(t.x - 20, t.z),
scene=True,
stroke='green',
stroke_width='0.25',
font_size='8px',
font_family='Arial')
continue
# else, since the uncertainty is high, first write that track in the spreadsheet
largeUncertaintyCount += 1
self._largeUncCsv.createRow(
uid=t.uid,
fingerprint=t.fingerprint,
x=x.label,
z=z.label)
# if either the measured width or length is 0, mark with a yellow disk with red outline
if t.widthMeasured == 0 or t.lengthMeasured == 0:
drawing.circle(
(t.x, t.z),
100*(t.widthUncertainty + t.lengthUncertainty)/2.0,
scene=True,
fill='yellow',
stroke='red')
drawing.text(
t.name,
(t.x - 20, t.z),
#.........這裏部分代碼省略.........
示例8: _process
# 需要導入模塊: from pyaid.number.NumericUtils import NumericUtils [as 別名]
# 或者: from pyaid.number.NumericUtils.NumericUtils import getMeanAndDeviation [as 別名]
def _process(self):
"""_processDeviations doc..."""
errors = []
for entry in self.entries:
if 'fractional' in entry:
errors.append(entry['fractional'])
res = NumericUtils.getMeanAndDeviation(errors)
self.logger.write('Fractional Pace Error %s' % res.label)
label = 'Fractional Pace Errors'
d = errors
self._paths.append(self._makePlot(
label=label,
data=d,
histRange=(-1.0, 1.0)))
self._paths.append(self._makePlot(
label=label,
data=d,
isLog=True,
histRange=(-1.0, 1.0)))
# noinspection PyUnresolvedReferences
d = np.absolute(np.array(d))
self._paths.append(self._makePlot(
label='Absolute %s' % label,
data=d,
histRange=(0.0, 1.0) ))
self._paths.append(self._makePlot(
label='Absolute %s' % label,
data=d,
isLog=True,
histRange=(0.0, 1.0) ))
highDeviationCount = 0
for entry in self.entries:
if 'measured' not in entry:
# entry['drawFunc']('purple')
continue
if entry['deviation'] > 2.0:
entry['drawFunc']('red')
highDeviationCount += 1
else:
entry['drawFunc'](
'black' if abs(entry['deviation']) < 2.0 else '#FFAAAA')
track = entry['track']
delta = NumericUtils.roundToSigFigs(100.0*abs(entry['delta']), 3)
pairTrack = entry.get('pairTrack')
if pairTrack:
pairedFingerprint = pairTrack.fingerprint
pairedUid = pairTrack.uid
else:
pairedFingerprint = ''
pairedUid = ''
self._csv.addRow({
'fingerprint':track.fingerprint,
'uid':track.uid,
'measured':entry['measured'].label,
'entered':entry['entered'].label,
'dev':entry['deviation'],
'delta':delta,
'pairedUid':pairedUid,
'pairedFingerprint':pairedFingerprint})
for sitemap in self.owner.getSitemaps():
# Remove drawing from the sitemap cache and save the drawing file
try:
sitemap.cache.extract('drawing').save()
except Exception:
self.logger.write('[WARNING]: No sitemap saved for %s-%s' % (
sitemap.name, sitemap.level))
if not self._csv.save():
self.logger.write(
'[ERROR]: Failed to save CSV file %s' % self._csv.path)
if not self._errorCsv.save():
self.logger.write(
'[ERROR]: Failed to save CSV file %s' % self._errorCsv.path)
percentage = NumericUtils.roundToOrder(
100.0*float(highDeviationCount)/float(len(self.entries)), -2)
self.logger.write('%s significant %s (%s%%)' % (
highDeviationCount,
label.lower(),
percentage))
if percentage > (100.0 - 95.45):
self.logger.write(
'[WARNING]: Large deviation count exceeds normal ' +
'distribution expectations.')