本文整理汇总了Python中thunder.utils.common.checkParams函数的典型用法代码示例。如果您正苦于以下问题:Python checkParams函数的具体用法?Python checkParams怎么用?Python checkParams使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了checkParams函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: similarity
def similarity(self, other, metric='distance', thresh=5):
"""
Estimate similarity between sources in self and other.
Will compute the fraction of sources in self that are found
in other, based on a given distance metric and a threshold.
The fraction is estimated as the number of sources in self
found in other, divided by the total number of sources in self.
Parameters
----------
other : SourceModel
The sources to compare to
metric : str, optional, default = "distance"
Metric to use when computing distances
thresh : scalar, optional, default = 5
The distance below which a source is considered found
"""
checkParams(metric, ['distance'])
if metric == 'distance':
vals = self.distance(other, minDistance=thresh)
vals[isnan(vals)] = inf
else:
raise Exception("Metric not recognized")
hits = sum(vals < thresh) / float(len(self.sources))
return hits
示例2: distance
def distance(self, other, method='euclidean'):
"""
Distance between the center of this source and another.
Parameters
----------
other : Source, or array-like
Either another source, or the center coordinates of another source
method : str
Specify a distance measure to used for spatial distance between source
centers. Current options include Euclidean distance ('euclidean') and
L1-norm ('l1').
"""
from numpy.linalg import norm
checkParams(method, ['euclidean', 'l1'])
if method == 'l1':
order = 1
else:
order = 2
if isinstance(other, Source):
return norm(self.center - other.center, ord=order)
elif isinstance(other, list) or isinstance(other, ndarray):
return norm(self.center - asarray(other), ord=order)
示例3: detrend
def detrend(self, method='linear', **kwargs):
"""
Detrend time series data with linear or nonlinear detrending
Preserve intercept so that subsequent steps can adjust the baseline
Parameters
----------
method : str, optional, default = 'linear'
Detrending method
order : int, optional, default = 5
Order of polynomial, for non-linear detrending only
"""
checkParams(method, ['linear', 'nonlinear'])
if method.lower() == 'linear':
order = 1
else:
if 'order' in kwargs:
order = kwargs['order']
else:
order = 5
def func(y):
x = arange(len(y))
p = polyfit(x, y, order)
p[-1] = 0
yy = polyval(p, x)
return y - yy
return self.applyValues(func, keepIndex=True)
示例4: makeExample
def makeExample(self, dataset=None, **opts):
"""
Make an example data set for testing analyses.
Options include 'pca', 'factor', 'kmeans', 'ica', 'sources'
See thunder.utils.datasets for detailed options.
Parameters
----------
dataset : str
Which dataset to generate
Returns
-------
data : RDD of (tuple, array) pairs
Generated dataset
"""
from thunder.utils.datasets import DATASET_MAKERS
if dataset is None:
return sorted(DATASET_MAKERS.keys())
checkParams(dataset, DATASET_MAKERS.keys())
return DataSets.make(self._sc, dataset, **opts)
示例5: loadSeries
def loadSeries(self, dataPath, nkeys=None, nvalues=None, inputFormat='binary', minPartitions=None,
confFilename='conf.json', keyType=None, valueType=None):
"""
Loads a Series object from data stored as text or binary files.
Supports single files or multiple files stored on a local file system, a networked file system (mounted
and available on all cluster nodes), Amazon S3, or HDFS.
Parameters
----------
dataPath: string
Path to data files or directory, specified as either a local filesystem path or in a URI-like format,
including scheme. A dataPath argument may include a single '*' wildcard character in the filename. Examples
of valid dataPaths include 'a/local/relative/directory/*.stack", "s3n:///my-s3-bucket/data/mydatafile.tif",
"/mnt/my/absolute/data/directory/", or "file:///mnt/another/data/directory/".
nkeys: int, optional (but required if `inputFormat` is 'text')
dimensionality of data keys. (For instance, (x,y,z) keyed data for 3-dimensional image timeseries data.)
For text data, number of keys must be specified in this parameter; for binary data, number of keys must be
specified either in this parameter or in a configuration file named by the 'conffile' argument if this
parameter is not set.
nvalues: int, optional (but required if `inputFormat` is 'text')
Number of values expected to be read. For binary data, nvalues must be specified either in this parameter
or in a configuration file named by the 'conffile' argument if this parameter is not set.
inputFormat: {'text', 'binary'}. optional, default 'binary'
Format of data to be read.
minPartitions: int, optional
Explicitly specify minimum number of Spark partitions to be generated from this data. Used only for
text data. Default is to use minParallelism attribute of Spark context object.
confFilename: string, optional, default 'conf.json'
Path to JSON file with configuration options including 'nkeys', 'nvalues', 'keytype', and 'valuetype'.
If a file is not found at the given path, then the base directory given in 'datafile'
will also be checked. Parameters `nkeys` or `nvalues` that are specified as explicit arguments to this
method will take priority over those found in conffile if both are present.
Returns
-------
data: thunder.rdds.Series
A newly-created Series object, wrapping an RDD of series data. This RDD will have as keys an n-tuple
of int, with n given by `nkeys` or the configuration passed in `conffile`. RDD values will be a numpy
array of length `nvalues` (or as specified in the passed configuration file).
"""
checkParams(inputFormat, ['text', 'binary'])
from thunder.rdds.fileio.seriesloader import SeriesLoader
loader = SeriesLoader(self._sc, minPartitions=minPartitions)
if inputFormat.lower() == 'text':
data = loader.fromText(dataPath, nkeys=nkeys)
else:
# must be either 'text' or 'binary'
data = loader.fromBinary(dataPath, confFilename=confFilename, nkeys=nkeys, nvalues=nvalues,
keyType=keyType, valueType=valueType)
return data
示例6: overlap
def overlap(self, other, method='support', counts=False, symmetric=True):
"""
Compute the overlap between this source and other, in terms
of either support or similarity of coefficients.
Support computes the number of overlapping pixels relative
to the union of both sources. Correlation computes the similarity
of the weights (not defined for binary masks).
Parameters
----------
other : Source
The source to compute overlap with.
method : str
Compare either support of source coefficients ('support'), or the
source spatial filters (not yet implemented).
counts : boolean, optional, default = True
Whether to return raw counts when computing support, otherwise
return a fraction.
"""
checkParams(method, ['support', 'corr'])
coordsSelf = aslist(self.coordinates)
coordsOther = aslist(other.coordinates)
intersection = [a for a in coordsSelf if a in coordsOther]
complementLeft = [a for a in coordsSelf if a not in intersection]
complementRight = [a for a in coordsOther if a not in intersection]
hits = len(intersection)
if symmetric is True:
misses = len(complementLeft + complementRight)
else:
misses = len(complementLeft)
if method == 'support':
if counts:
return hits, misses
else:
return hits/float(hits+misses)
if method == 'corr':
from scipy.stats import spearmanr
if not (hasattr(self, 'values') and hasattr(other, 'values')):
raise Exception('Sources must have values to compute correlation')
else:
valuesSelf = aslist(self.values)
valuesOther = aslist(other.values)
if len(intersection) > 0:
rho, _ = spearmanr(valuesSelf[intersection], valuesOther[intersection])
else:
rho = 0.0
return (rho * hits)/float(hits + misses)
示例7: __new__
def __new__(cls, method, **kwargs):
from thunder.extraction.block.methods.nmf import BlockNMF
from thunder.extraction.feature.methods.localmax import LocalMax
EXTRACTION_METHODS = {
'nmf': BlockNMF,
'localmax': LocalMax
}
checkParams(method, EXTRACTION_METHODS.keys())
return EXTRACTION_METHODS[method](**kwargs)
示例8: normalize
def normalize(self, baseline='percentile', window=None, perc=20, offset=0.1):
"""
Normalize each time series by subtracting and dividing by a baseline.
Baseline can be derived from a global mean or percentile,
or a smoothed percentile estimated within a rolling window.
Parameters
----------
baseline : str, optional, default = 'percentile'
Quantity to use as the baseline, options are 'mean', 'percentile', 'window', or 'window-fast'
window : int, optional, default = 6
Size of window for baseline estimation, for 'window' and 'window-fast' baseline only
perc : int, optional, default = 20
Percentile value to use, for 'percentile', 'window', or 'window-fast' baseline only
offset : float, optional, default = 0.1
Scalar added to baseline during division
"""
checkParams(baseline, ['mean', 'percentile', 'window', 'window-fast'])
method = baseline.lower()
from warnings import warn
if not (method == 'window' or method == 'window-fast') and window is not None:
warn('Setting window without using method "window" has no effect')
if method == 'mean':
baseFunc = mean
if method == 'percentile':
baseFunc = lambda x: percentile(x, perc)
if method == 'window':
if window & 0x1:
left, right = (ceil(window/2), ceil(window/2) + 1)
else:
left, right = (window/2, window/2)
n = len(self.index)
baseFunc = lambda x: asarray([percentile(x[max(ix-left, 0):min(ix+right+1, n)], perc)
for ix in arange(0, n)])
if method == 'window-fast':
from scipy.ndimage.filters import percentile_filter
baseFunc = lambda x: percentile_filter(x.astype(float64), perc, window, mode='nearest')
def get(y):
b = baseFunc(y)
return (y - b) / (b + offset)
return self.applyValues(get, keepIndex=True)
示例9: __new__
def __new__(cls, method, **kwargs):
from thunder.registration.methods.crosscorr import CrossCorr, PlanarCrossCorr
REGMETHODS = {
'crosscorr': CrossCorr,
'planarcrosscorr': PlanarCrossCorr
}
checkParams(method, REGMETHODS.keys())
return REGMETHODS[method](kwargs)
示例10: export
def export(data, outputDirPath, outputFilename, outputFormat, sorting=False):
"""
Export data to a variety of local formats.
Can export local arrays or a Series. If passed a Series,
it will first be packed into one or more local arrrays.
Parameters
----------
data : Series, or numpy array
The data to export
outputDirPath : str
Output directory
outputFilename : str
Output filename
outputFormat : str
Output format ("matlab", "npy", or "text")
"""
from thunder.rdds.series import Series
from scipy.io import savemat
checkParams(outputFormat, ['matlab', 'npy', 'text'])
if not os.path.exists(outputDirPath):
os.makedirs(outputDirPath)
filename = os.path.join(outputDirPath, outputFilename)
def write(array, file, format, varname=None):
if format == 'matlab':
savemat(file+".mat", mdict={varname: array}, oned_as='column', do_compression='true')
if format == 'npy':
save(file, array)
if format == 'text':
savetxt(file+".txt", array, fmt="%.6f")
if isinstance(data, Series):
# force calculation of dimensions
_tmp = data.dims
if size(data.index) > 1:
for ix in data.index:
result = data.select(ix).pack(sorting=sorting)
write(result, filename+"_"+str(ix), outputFormat, varname=outputFilename+"_"+str(ix))
else:
result = data.pack(sorting=sorting)
write(result, filename, outputFormat, varname=outputFilename+"_"+str(data.index))
else:
write(data, filename, outputFormat, varname=outputFilename)
示例11: overlap
def overlap(self, other, method="fraction"):
"""
Compute the overlap between this source and other.
Options are a symmetric measure of overlap based on the fraction
of intersecting pixels relative to the union ('fraction'), an assymmetric
measure of overlap that expresses detected intersecting pixels
(relative to this source) using precision and recall rates ('rates'), or
a correlation coefficient of the weights within the intersection
(not defined for binary weights) ('correlation')
Parameters
----------
other : Source
The source to compute overlap with.
method : str
Which estimate of overlap to compute, options are
'fraction' (symmetric) 'rates' (asymmetric) or 'correlation'
"""
checkParams(method, ["fraction", "rates", "correlation"])
coordsSelf = aslist(self.coordinates)
coordsOther = aslist(other.coordinates)
intersection = [a for a in coordsSelf if a in coordsOther]
nhit = float(len(intersection))
ntotal = float(len(set([tuple(x) for x in coordsSelf] + [tuple(x) for x in coordsOther])))
if method == "rates":
recall = nhit / len(coordsSelf)
precision = nhit / len(coordsOther)
return recall, precision
if method == "fraction":
return nhit / float(ntotal)
if method == "correlation":
from scipy.stats import spearmanr
if not (hasattr(self, "values") and hasattr(other, "values")):
raise ValueError("Sources must have values to compute correlation")
else:
valuesSelf = aslist(self.values)
valuesOther = aslist(other.values)
if len(intersection) > 0:
left = [v for v, c in zip(valuesSelf, coordsSelf) if c in coordsOther]
right = [v for v, c in zip(valuesOther, coordsOther) if c in coordsSelf]
rho, _ = spearmanr(left, right)
else:
rho = 0.0
return rho
示例12: similarity
def similarity(self, other, metric="distance", thresh=5, minDistance=inf):
"""
Estimate similarity to another set of sources using recall and precision.
Will compute the number of sources in self that are also
in other, based on a given distance metric and a threshold.
The recall rate is the number of matches divided by the number in self,
and the precision rate is the number of matches divided by the number in other.
Typically self is ground truth and other is an estimate.
The F score is defined as 2 * (recall * precision) / (recall + precision)
Before computing metrics, all sources in self are matched to other,
and a minimum distance can be set to control matching.
Parameters
----------
other : SourceModel
The sources to compare to.
metric : str, optional, default = 'distance'
Metric to use when computing distances,
options include 'distance' and 'overlap'
thresh : scalar, optional, default = 5
The distance below which a source is considered found.
minDistance : scalar, optional, default = inf
Minimum distance to use when matching indices.
"""
checkParams(metric, ["distance", "overlap"])
if metric == "distance":
# when evaluating distances,
# minimum distance should be the threshold
if minDistance == inf:
minDistance = thresh
vals = self.distance(other, minDistance=minDistance)
vals[isnan(vals)] = inf
compare = lambda x: x < thresh
elif metric == "overlap":
vals = self.overlap(other, method="fraction", minDistance=minDistance)
vals[isnan(vals)] = 0
compare = lambda x: x > thresh
else:
raise Exception("Metric not recognized")
recall = sum(map(compare, vals)) / float(self.count)
precision = sum(map(compare, vals)) / float(other.count)
score = 2 * (recall * precision) / (recall + precision)
return recall, precision, score
示例13: export
def export(self, data, filename, outputFormat=None, overwrite=False, varname=None):
"""
Export local array data to a variety of formats.
Can write to a local file sytem or S3 (destination inferred from filename schema).
S3 writing useful for persisting arrays when working in an environment without
accessible local storage.
Parameters
----------
data : array-like
The data to export
filename : str
Output location (path/to/file.ext)
outputFormat : str, optional, default = None
Ouput format ("npy", "mat", or "txt"), if not provided will
try to infer from file extension.
overwrite : boolean, optional, default = False
Whether to overwrite if directory or file already exists
varname : str, optional, default = None
Variable name for writing "mat" formatted files
"""
from numpy import save, savetxt, asarray
from scipy.io import savemat
from StringIO import StringIO
from thunder.rdds.fileio.writers import getFileWriterForPath
path, file, outputFormat = handleFormat(filename, outputFormat)
checkParams(outputFormat, ["npy", "mat", "txt"])
clazz = getFileWriterForPath(filename)
writer = clazz(path, file, overwrite=overwrite, awsCredentialsOverride=self._credentials)
stream = StringIO()
if outputFormat == "mat":
varname = os.path.splitext(file)[0] if varname is None else varname
savemat(stream, mdict={varname: data}, oned_as='column', do_compression='true')
if outputFormat == "npy":
save(stream, data)
if outputFormat == "txt":
if asarray(data).ndim > 2:
raise Exception("Cannot write data with more than two dimensions to text")
savetxt(stream, data)
stream.seek(0)
writer.writeFile(stream.buf)
示例14: similarity
def similarity(self, other, metric='distance', thresh=5, minDistance=inf):
"""
Estimate similarity between sources in self and other.
Will compute the fraction of sources in self that are found
in other, based on a given distance metric and a threshold.
The fraction is estimated as the number of sources in self
found in other, divided by the total number of sources in self.
Before computing metrics, all sources in self are matched to other,
and a minimum distance can be set to control matching.
Parameters
----------
other : SourceModel
The sources to compare to
metric : str, optional, default = "distance"
Metric to use when computing distances,
options include 'distance' and 'overlap'
thresh : scalar, optional, default = 5
The distance below which a source is considered found
minDistance : scalar, optional, default = inf
Minimum distance to use when matching indices
"""
checkParams(metric, ['distance', 'overlap'])
if metric == 'distance':
# when evaluating distances,
# minimum distance should be the threshold
if minDistance == inf:
minDistance = thresh
vals = self.distance(other, minDistance=minDistance)
vals[isnan(vals)] = inf
compare = lambda x: x < thresh
elif metric == 'overlap':
vals = self.overlap(other, method='support', minDistance=minDistance)
vals[isnan(vals)] = 0
compare = lambda x: x > thresh
else:
raise Exception("Metric not recognized")
hits = sum(map(compare, vals)) / float(len(self.sources))
return hits
示例15: loadExampleS3
def loadExampleS3(self, dataset=None):
"""
Load an example data set from S3.
Info on the included datasets can be found at the CodeNeuro data repository
(http://datasets.codeneuro.org/). If called with None, will return
list of available datasets.
Parameters
----------
dataset : str
Which dataset to load
Returns
-------
data : a Data object (usually a Series or Images)
The dataset as one of Thunder's data objects
params : dict
Parameters or metadata for dataset
"""
DATASETS = {
'ahrens.lab/direction.selectivity': 'ahrens.lab/direction.selectivity/1/',
'ahrens.lab/optomotor.response': 'ahrens.lab/optomotor.response/1/',
'svoboda.lab/tactile.navigation': 'svoboda.lab/tactile.navigation/1/'
}
if dataset is None:
return DATASETS.keys()
if 'local' in self._sc.master:
raise Exception("Must be running on an EC2 cluster to load this example data set")
checkParams(dataset, DATASETS.keys())
basePath = 's3n://neuro.datasets/'
dataPath = DATASETS[dataset]
data = self.loadSeries(basePath + dataPath + 'series')
params = self.loadParams(basePath + dataPath + 'params/covariates.json')
return data, params