本文整理汇总了Python中thunder.ThunderContext类的典型用法代码示例。如果您正苦于以下问题:Python ThunderContext类的具体用法?Python ThunderContext怎么用?Python ThunderContext使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ThunderContext类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_sima
def test_sima(self):
"""
(BlockMethod) with SIMA strategy
"""
# NOTE: this test was brittle and failed non-deterministically with any
# more than one source
import sima.segment
# construct the SIMA strategy
simaStrategy = sima.segment.STICA(components=1)
simaStrategy.append(sima.segment.SparseROIsFromMasks(min_size=20))
simaStrategy.append(sima.segment.SmoothROIBoundaries())
simaStrategy.append(sima.segment.MergeOverlapping(threshold=0.5))
tsc = ThunderContext(self.sc)
data = tsc.makeExample('sources', dims=(60, 60), centers=[[20, 15]], noise=0.5, seed=42)
# create and fit the thunder extraction strategy
strategy = SourceExtraction('sima', simaStrategy=simaStrategy)
model = strategy.fit(data, size=(30, 30))
assert(model.count == 1)
# check that the one center is recovered
ep = 1.5
assert(model[0].distance([20, 15]) < ep)
示例2: test_local_max
def test_local_max(self):
"""
(FeatureMethod) localmax with defaults
"""
tsc = ThunderContext(self.sc)
data = tsc.makeExample('sources', dims=[60, 60], centers=[[10, 10], [40, 40]], noise=0.0, seed=42)
model = SourceExtraction('localmax').fit(data)
# order is irrelevant, but one of these must be true
cond1 = (model[0].distance([10, 10]) == 0) and (model[1].distance([40, 40]) == 0)
cond2 = (model[0].distance([40, 40]) == 0) and (model[1].distance([10, 10]) == 0)
assert(cond1 or cond2)
示例3: test_nmf
def test_nmf(self):
"""
(BlockMethod) nmf with defaults
"""
tsc = ThunderContext(self.sc)
data = tsc.makeExample('sources', dims=(60, 60), centers=[[20, 20], [40, 40]], noise=0.1, seed=42)
model = SourceExtraction('nmf', componentsPerBlock=1).fit(data, size=(30, 30))
# order is irrelevant, but one of these must be true
ep = 0.50
cond1 = (model[0].distance([20, 20]) < ep) and (model[1].distance([40, 40]) < ep)
cond2 = (model[0].distance([40, 40]) < ep) and (model[1].distance([20, 20]) < ep)
assert(cond1 or cond2)
示例4: test_sima
def test_sima(self):
"""
(BlockMethod) with SIMA strategy
"""
import sima.segment
# construct the SIMA strategy
simaStrategy = sima.segment.STICA(components=2)
simaStrategy.append(sima.segment.SparseROIsFromMasks(min_size=20))
simaStrategy.append(sima.segment.SmoothROIBoundaries())
simaStrategy.append(sima.segment.MergeOverlapping(threshold=0.5))
tsc = ThunderContext(self.sc)
data = tsc.makeExample('sources', dims=(60, 60), centers=[[20, 15], [40, 45]], noise=0.1, seed=42)
# create and fit the thunder extraction strategy
strategy = SourceExtraction('sima', simaStrategy=simaStrategy)
model = strategy.fit(data, size=(30, 30))
# order is irrelevant, but one of these must be true
ep = 1.5
cond1 = (model[0].distance([20, 15]) < ep) and (model[1].distance([40, 45]) < ep)
cond2 = (model[1].distance([20, 15]) < ep) and (model[0].distance([40, 45]) < ep)
assert(cond1 or cond2)
示例5: execute
def execute(self, lock, pipe):
"""
Execute this pull request
"""
lock.acquire()
base, module = self.clone()
f = open(base + 'info.json', 'r')
info = json.loads(f.read())
printer.status("Executing pull request %s from user %s"
% (self.id, self.login))
printer.status("Branch name: %s" % self.branch)
printer.status("Algorithm name: %s" % info['algorithm'])
sys.path.append(module)
run = importlib.import_module('run', module)
spark_home = os.getenv('SPARK_HOME')
if spark_home is None or spark_home == '':
raise Exception('must assign the environmental variable SPARK_HOME with the location of Spark')
sys.path.append(os.path.join(spark_home, 'python'))
sys.path.append(os.path.join(spark_home, 'python/lib/py4j-0.8.2.1-src.zip'))
with quiet():
from thunder import ThunderContext
from thunder.utils.launch import findThunderEgg
tsc = ThunderContext.start(master=self.get_master(), appName="neurofinder")
tsc.addPyFile(findThunderEgg())
log4j = tsc._sc._jvm.org.apache.log4j
log4j.LogManager.getRootLogger().setLevel(log4j.Level.ERROR)
time.sleep(5)
base_path = 'neuro.datasets.private/challenges/neurofinder.test'
datasets = ['00.00.test', '00.01.test', '01.00.test', '01.01.test',
'02.00.test', '02.01.test', '03.00.test']
metrics = {'score': [], 'recall': [], 'precision': [], 'overlap': [], 'exactness': []}
try:
for ii, name in enumerate(datasets):
printer.status("Proccessing data set %s" % name)
data_path = 's3n://' + base_path + '/' + name
data_info = self.load_info(base_path, name)
data = tsc.loadImages(data_path + '/images/', recursive=True,
npartitions=600)
truth = tsc.loadSources(data_path + '/sources/sources.json')
sources = run.run(data, info=data_info)
threshold = 6.0 / data_info['pixels-per-micron']
recall, precision, score = truth.similarity(sources, metric='distance', minDistance=threshold)
stats = truth.overlap(sources, method='rates', minDistance=threshold)
if sum(~isnan(stats)) > 0:
overlap, exactness = tuple(nanmean(stats, axis=0))
else:
overlap, exactness = 0.0, 1.0
contributors = str(", ".join(data_info["contributors"]))
animal = data_info["animal"]
region = data_info["region"]
lab = data_info["lab"]
base = {"dataset": name, "contributors": contributors,
"lab": lab, "region": region, "animal": animal}
m = {"value": score}
m.update(base)
metrics['score'].append(m)
m = {"value": recall}
m.update(base)
metrics['recall'].append(m)
m = {"value": precision}
m.update(base)
metrics['precision'].append(m)
m = {"value": overlap}
m.update(base)
metrics['overlap'].append(m)
m = {"value": exactness}
m.update(base)
metrics['exactness'].append(m)
base = data.mean()
im = sources.masks(outline=True, base=base.clip(0, percentile(base, 99.9)))
self.post_image(im, name)
for k in metrics.keys():
overall = mean([v['value'] for v in metrics[k]])
metrics[k].append({"dataset": "overall", "value": overall,
"contributors": "", "region": "", "animal": ""})
msg = "Execution successful"
#.........这里部分代码省略.........
示例6: setUp
def setUp(self):
super(TestContextLoading, self).setUp()
self.tsc = ThunderContext(self.sc)
示例7: SparkConf
# Load thunder
from pyspark import SparkContext, SparkConf
from thunder import Colorize, ThunderContext
image = Colorize.image
import os
#Load Sci-kit image
from skimage.viewer import ImageViewer as skImageViewer
#Load spark context
conf = SparkConf() \
.setAppName("Display face") \
.set("spark.executor.memory", "5g")
sc = SparkContext(conf=conf)
#load thunder bolt context
tsc = ThunderContext(sc)
# Load image using thunder
data = tsc.loadImages(os.path.dirname(os.path.realpath(__file__))+'/mush.png',inputFormat='png')
img = data.first()[1]
# Display image using Sci-kit image
viewer = skImageViewer(img[:,:,0])
viewer.show()
示例8: execute
def execute(self):
"""
Execute this pull request
"""
printer.status("Executing pull request %s from user %s" % (self.id, self.login))
base, module = self.clone()
f = open(base + 'info.json', 'r')
info = json.loads(f.read())
sys.path.append(module)
run = importlib.import_module('run')
spark = os.getenv('SPARK_HOME')
if spark is None or spark == '':
raise Exception('must assign the environmental variable SPARK_HOME with the location of Spark')
sys.path.append(os.path.join(spark, 'python'))
sys.path.append(os.path.join(spark, 'python/lib/py4j-0.8.2.1-src.zip'))
from thunder import ThunderContext
tsc = ThunderContext.start(master="local", appName="neurofinder")
datasets = ['data-0', 'data-1', 'data-2', 'data-3', 'data-4', 'data-5']
centers = [5, 7, 9, 11, 13, 15]
metrics = {'accuracy': [], 'overlap': [], 'distance': [], 'count': [], 'area': []}
try:
for ii, name in enumerate(datasets):
data, ts, truth = tsc.makeExample('sources', dims=(200, 200),
centers=centers[ii], noise=1.0, returnParams=True)
sources = run.run(data)
accuracy = truth.similarity(sources, metric='distance', thresh=10, minDistance=10)
overlap = truth.overlap(sources, minDistance=10)
distance = truth.distance(sources, minDistance=10)
count = sources.count
area = mean(sources.areas)
metrics['accuracy'].append({"dataset": name, "value": accuracy})
metrics['overlap'].append({"dataset": name, "value": nanmean(overlap)})
metrics['distance'].append({"dataset": name, "value": nanmean(distance)})
metrics['count'].append({"dataset": name, "value": count})
metrics['area'].append({"dataset": name, "value": area})
im = sources.masks(base=data.mean())
self.post_image(im, name)
for k in metrics.keys():
overall = mean([v['value'] for v in metrics[k]])
metrics[k].append({"dataset": "overall", "value": overall})
msg = "Execution successful"
printer.success()
self.update_status("executed")
except Exception:
metrics = None
msg = "Execution failed"
printer.error("failed, returning error")
print(traceback.format_exc())
self.send_message(msg)
return metrics, info
示例9: TestContextLoading
class TestContextLoading(PySparkTestCaseWithOutputDir):
def setUp(self):
super(TestContextLoading, self).setUp()
self.tsc = ThunderContext(self.sc)
@staticmethod
def _findTestResourcesDir(resourcesdirname="resources"):
testdirpath = os.path.dirname(os.path.realpath(__file__))
testresourcesdirpath = os.path.join(testdirpath, resourcesdirname)
if not os.path.isdir(testresourcesdirpath):
raise IOError("Test resources directory "+testresourcesdirpath+" not found")
return testresourcesdirpath
def __run_loadStacksAsSeries(self, shuffle):
rangeary = np.arange(64*128, dtype=np.dtype('int16'))
filepath = os.path.join(self.outputdir, "rangeary.stack")
rangeary.tofile(filepath)
expectedary = rangeary.reshape((128, 64), order='F')
range_series = self.tsc.loadImagesAsSeries(filepath, dims=(128, 64), shuffle=shuffle)
range_series_ary = range_series.pack()
assert_equals((128, 64), range_series.dims.count)
assert_equals((128, 64), range_series_ary.shape)
assert_true(np.array_equal(expectedary, range_series_ary))
def test_loadStacksAsSeriesNoShuffle(self):
self.__run_loadStacksAsSeries(False)
def test_loadStacksAsSeriesWithShuffle(self):
self.__run_loadStacksAsSeries(True)
def __run_load3dStackAsSeries(self, shuffle):
rangeary = np.arange(32*64*4, dtype=np.dtype('int16'))
filepath = os.path.join(self.outputdir, "rangeary.stack")
rangeary.tofile(filepath)
expectedary = rangeary.reshape((32, 64, 4), order='F')
range_series_noshuffle = self.tsc.loadImagesAsSeries(filepath, dims=(32, 64, 4), shuffle=shuffle)
range_series_noshuffle_ary = range_series_noshuffle.pack()
assert_equals((32, 64, 4), range_series_noshuffle.dims.count)
assert_equals((32, 64, 4), range_series_noshuffle_ary.shape)
assert_true(np.array_equal(expectedary, range_series_noshuffle_ary))
def test_load3dStackAsSeriesNoShuffle(self):
self.__run_load3dStackAsSeries(False)
def test_load3dStackAsSeriesWithShuffle(self):
self.__run_load3dStackAsSeries(True)
def __run_loadMultipleStacksAsSeries(self, shuffle):
rangeary = np.arange(64*128, dtype=np.dtype('int16'))
filepath = os.path.join(self.outputdir, "rangeary01.stack")
rangeary.tofile(filepath)
expectedary = rangeary.reshape((128, 64), order='F')
rangeary2 = np.arange(64*128, 2*64*128, dtype=np.dtype('int16'))
filepath = os.path.join(self.outputdir, "rangeary02.stack")
rangeary2.tofile(filepath)
expectedary2 = rangeary2.reshape((128, 64), order='F')
range_series = self.tsc.loadImagesAsSeries(self.outputdir, dims=(128, 64), shuffle=shuffle)
range_series_ary = range_series.pack()
range_series_ary_xpose = range_series.pack(transpose=True)
assert_equals((128, 64), range_series.dims.count)
assert_equals((2, 128, 64), range_series_ary.shape)
assert_equals((2, 64, 128), range_series_ary_xpose.shape)
assert_true(np.array_equal(expectedary, range_series_ary[0]))
assert_true(np.array_equal(expectedary2, range_series_ary[1]))
assert_true(np.array_equal(expectedary.T, range_series_ary_xpose[0]))
assert_true(np.array_equal(expectedary2.T, range_series_ary_xpose[1]))
def test_loadMultipleStacksAsSeriesNoShuffle(self):
self.__run_loadMultipleStacksAsSeries(False)
def test_loadMultipleStacksAsSeriesWithShuffle(self):
self.__run_loadMultipleStacksAsSeries(True)
def __run_loadTifAsSeries(self, shuffle):
tmpary = np.arange(60*120, dtype=np.dtype('uint16'))
rangeary = np.mod(tmpary, 255).astype('uint8').reshape((60, 120))
pilimg = Image.fromarray(rangeary)
filepath = os.path.join(self.outputdir, "rangetif01.tif")
pilimg.save(filepath)
del pilimg, tmpary
range_series = self.tsc.loadImagesAsSeries(self.outputdir, inputformat="tif-stack", shuffle=shuffle)
range_series_ary = range_series.pack()
assert_equals((60, 120, 1), range_series.dims.count)
assert_equals((60, 120), range_series_ary.shape)
assert_true(np.array_equal(rangeary, range_series_ary))
@unittest.skipIf(not _have_image, "PIL/pillow not installed or not functional")
def test_loadTifAsSeriesNoShuffle(self):
self.__run_loadTifAsSeries(False)
@unittest.skipIf(not _have_image, "PIL/pillow not installed or not functional")
def test_loadTifAsSeriesWithShuffle(self):
#.........这里部分代码省略.........
示例10: open
if use_existing_parameters == 1:
with open(Exp_Folder+filename_save_prefix_forICA+'_save_ICA_variables') as f:
ICA_components_ind, num_ICA_colors_ind, color_map_ind,\
ICA_components_eachexp, num_ICA_colors_eachexp, color_map_eachexp,\
ICA_components_allexp, num_ICA_colors_allexp, color_map_allexp,colors_ica = pickle.load(f)
# Go into the main function that does ICA for indiviudal trials
from ica_thunder_analysis import run_analysis_individualexps
from ica_thunder_analysis import run_analysis_eachexp
from ica_thunder_analysis import run_analysis_allexp
from thunder import ThunderContext
print 'Starting Thunder Now. Check console for details'
tsc = ThunderContext.start(appName="thunderICA")
if files_to_do_ICA[0]== 1:
run_analysis_individualexps(Exp_Folder, filename_save_prefix_forICA, filename_save_prefix_for_textfile, ICA_components_ind, PCA_components_ind, num_ICA_colors_ind, color_map_ind,\
tsc,redo_ICA, num_fish_used, stimulus_pulse, stimulus_on_time, stimulus_off_time,color_mat, time_baseline,colors_ica )
if files_to_do_ICA[1]== 1:
run_analysis_eachexp(Exp_Folder, filename_save_prefix_forICA, filename_save_prefix_for_textfile, ICA_components_eachexp, PCA_components_eachexp, num_ICA_colors_eachexp, color_map_eachexp,\
tsc,redo_ICA, num_fish_used, stimulus_pulse, stimulus_on_time, stimulus_off_time,color_mat, time_baseline,colors_ica )
if files_to_do_ICA[2]== 1:
run_analysis_allexp(Exp_Folder, filename_save_prefix_forICA, filename_save_prefix_for_textfile, ICA_components_allexp, PCA_components_allexp, num_ICA_colors_allexp, color_map_allexp,\
tsc,redo_ICA, num_fish_used, stimulus_pulse, stimulus_on_time, stimulus_off_time,color_mat, time_baseline,colors_ica )
############# Save all imput parameters
with open(Exp_Folder+filename_save_prefix_forICA+'_save_ICA_variables', 'w') as f:
示例11: Exception
from thunder import ThunderContext, RegressionModel, PCA
if __name__ == "__main__":
parser = optparse.OptionParser(description="fit a regression model",
usage="%prog datafile modelfile outputdir [options]")
parser.add_option("--regressmode", choices=("mean", "linear", "bilinear"), help="form of regression")
parser.add_option("--k", type=int, default=2)
opts, args = parser.parse_args()
try:
datafile = args[0]
modelfile = args[1]
outputdir = args[2]
except IndexError:
parser.print_usage()
raise Exception("too few arguments")
tsc = ThunderContext.start(appName="regresswithpca")
data = tsc.loadSeries(datafile)
model = RegressionModel.load(modelfile, opts.regressmode) # do regression
betas, stats, resid = model.fit(data)
pca = PCA(opts.k).fit(betas) # do PCA
traj = model.fit(data, pca.comps) # get trajectories
outputdir += "-regress"
tsc.export(pca.comps, outputdir, "comps", "matlab")
tsc.export(pca.latent, outputdir, "latent", "matlab")
tsc.export(pca.scores, outputdir, "scores", "matlab")
tsc.export(traj, outputdir, "traj", "matlab")
示例12: open
stimulus_pulse = 1
if stimulus_pulse == 1:
stimulus_on_time = [10,28,47,65,83,101]
stimulus_off_time = [14,32,51,69,87,105]
color_mat = ['#00FFFF','#0000A0','#800080','#FF00FF', '#800000','#A52A2A']
# Go into the main function that does pca for indiviudal trials
from pca_thunder_analysis import run_analysis_individualodors
from pca_thunder_analysis import run_analysis_eachodor
from pca_thunder_analysis import run_analysis_allodor
from thunder import ThunderContext
print 'Starting Thunder Now. Check console for details'
tsc = ThunderContext.start(appName="thunderpca")
if files_to_do_PCA[0]== 1:
run_analysis_individualodors(Exp_Folder, filename_save_prefix_forPCA, filename_save_prefix_for_textfile, pca_components_ind, num_pca_colors_ind, num_samples_ind, thresh_pca_ind, color_map_ind,\
tsc,redo_pca,reconstruct_pca, stimulus_on_time, stimulus_off_time,color_mat,required_pcs,time_baseline )
if files_to_do_PCA[1]== 1:
run_analysis_eachodor(Exp_Folder, filename_save_prefix_forPCA, filename_save_prefix_for_textfile, pca_components_eachodor, num_pca_colors_eachodor, num_samples_eachodor, thresh_pca_eachodor, color_map_eachodor,\
tsc,redo_pca,reconstruct_pca, stimulus_on_time, stimulus_off_time,color_mat,required_pcs,time_baseline )
if files_to_do_PCA[2]== 1:
run_analysis_allodor(Exp_Folder, filename_save_prefix_forPCA, filename_save_prefix_for_textfile, pca_components_allodor, num_pca_colors_allodor, num_samples_allodor, thresh_pca_allodor, color_map_allodor,\
tsc,redo_pca,reconstruct_pca, stimulus_on_time, stimulus_off_time,color_mat,required_pcs,time_baseline )
############# Save all imput parameters
with open(Exp_Folder+filename_save_prefix_forPCA+'_save_pca_variables', 'w') as f:
pickle.dump([pca_components_ind, num_pca_colors_ind, num_samples_ind, thresh_pca_ind, color_map_ind,\
示例13: SparkConf
)
print "Found {0} faces!".format(len(faces))
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), -1)
return img
# Load images using thundear and pass it to OpenCV haar cascase one by one
if __name__ == "__main__":
# Define Spark and Thunder context
conf = SparkConf().setAppName("Collaborative Filter").set("spark.executor.memory", "5g")
sc = SparkContext(conf=conf)
tsc = ThunderContext(sc)
# Load all images in data directory
data = tsc.loadImages("/home/vj/Desktop/CS-Project/data", inputFormat="png")
# Loop through each image and convert them to gray
grayImages = data.apply(lambda (k, v): (k, convertToGray(v)))
# Loop through all the gray images and find faces
FaceImages = grayImages.apply(lambda (k, v): (k, detectFaces(v)))
print (data.dims)
print (data.nrecords)
cv2.imshow("image1", grayImages[0])
cv2.imshow("Face detected1", FaceImages[0])
cv2.imshow("image2", grayImages[1])
cv2.imshow("Face detected2", FaceImages[1])
示例14:
Exp_Folder ='/Users/seetha/Desktop/Ruey_Habenula/Habenula/Short_Stimulus/Fish104_Block2_Blue&UV1c/'
filename_save_prefix = 'Test1'
from thunder import ThunderContext
print 'Starting Thunder Now. Check console for details'
tsc = ThunderContext.start(appName="thunderNMF")
import os
filesep = os.path.sep
import matplotlib.pyplot as plt
import numpy as np
from thunder_NMF import run_NMF
from thunder_NMF import make_NMF_maps
from thunder_NMF_plots import plot_NMF_maps
from thunder import Colorize
image = Colorize.image
Stimulus_Directories = [f for f in os.listdir(Exp_Folder) if os.path.isdir(os.path.join(Exp_Folder, f)) and f.find('Figures')<0]
#Stimulus_Directories
ii = 0
Trial_Directories = [f for f in os.listdir(os.path.join(Exp_Folder, Stimulus_Directories[ii]))\
if os.path.isdir(os.path.join(Exp_Folder, Stimulus_Directories[ii], f)) and f.find('Figures')<0]
Trial_Directories
jj = 0
stim_start = 10 #Stimulus Starting time point
stim_end = 14 #Stimulus Ending time point
示例15: Exception
"""
Example standalone app for calculating series statistics
"""
import optparse
from thunder import ThunderContext
if __name__ == "__main__":
parser = optparse.OptionParser(description="compute summary statistics on time series data",
usage="%prog datafile outputdir mode [options]")
parser.add_option("--preprocess", action="store_true", default=False)
opts, args = parser.parse_args()
try:
datafile = args[0]
outputdir = args[1]
mode = args[2]
except IndexError:
parser.print_usage()
raise Exception("too few arguments")
tsc = ThunderContext.start(appName="stats")
data = tsc.loadSeries(datafile).cache()
vals = data.seriesStat(mode)
outputdir += "-stats"
tsc.export(vals, outputdir, "stats_" + mode, "matlab")