本文整理汇总了Python中joblib.Memory.cache方法的典型用法代码示例。如果您正苦于以下问题:Python Memory.cache方法的具体用法?Python Memory.cache怎么用?Python Memory.cache使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类joblib.Memory
的用法示例。
在下文中一共展示了Memory.cache方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load_adni_longitudinal_rs_fmri
# 需要导入模块: from joblib import Memory [as 别名]
# 或者: from joblib.Memory import cache [as 别名]
def load_adni_longitudinal_rs_fmri(dirname='ADNI_longitudinal_rs_fmri',
prefix='wr*.nii'):
""" Returns paths of ADNI rs-fMRI
"""
# get file paths and description
images, subject_paths, description = _get_subjects_and_description(
base_dir=dirname, prefix='I[0-9]*')
images = np.array(images)
# get func files
func_files = list(map(lambda x: _glob_subject_img(
x, suffix='func/' + prefix, first_img=True),
subject_paths))
func_files = np.array(func_files)
# get motion files
# motions = None
motions = list(map(lambda x: _glob_subject_img(
x, suffix='func/' + 'rp_*.txt', first_img=True), subject_paths))
# get phenotype from csv
dx = pd.read_csv(os.path.join(_get_data_base_dir('ADNI_csv'),
'DXSUM_PDXCONV_ADNIALL.csv'))
roster = pd.read_csv(os.path.join(_get_data_base_dir('ADNI_csv'),
'ROSTER.csv'))
df = description[description['Image_ID'].isin(images)]
df = df.sort_values(by='Image_ID')
dx_group = np.array(df['DX_Group'])
subjects = np.array(df['Subject_ID'])
exams = np.array(df['EXAM_DATE'])
exams = [date(int(e[:4]), int(e[5:7]), int(e[8:])) for e in exams]
# caching dataframe extraction functions
CACHE_DIR = _get_cache_base_dir()
cache_dir = os.path.join(CACHE_DIR, 'joblib', 'load_data_cache')
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
memory = Memory(cachedir=cache_dir, verbose=0)
def _get_ridsfmri(subjects):
return [_ptid_to_rid(s, roster) for s in subjects]
rids = np.array(memory.cache(_get_ridsfmri)(subjects))
def _get_examdatesfmri(rids):
return [_get_dx(rids[i], dx, exams[i], viscode=None, return_code=True)
for i in range(len(rids))]
exam_dates = np.array(memory.cache(_get_examdatesfmri)(rids))
def _get_viscodesfmri(rids):
return [_get_vcodes(rids[i], str(exam_dates[i]), dx)
for i in range(len(rids))]
viscodes = np.array(memory.cache(_get_viscodesfmri)(rids))
vcodes, vcodes2 = viscodes[:, 0], viscodes[:, 1]
return Bunch(func=func_files, dx_group=dx_group, exam_codes=vcodes,
exam_dates=exam_dates, exam_codes2=vcodes2,
motion=motions,
subjects=subjects, images=images)
示例2: _niigz2nii
# 需要导入模块: from joblib import Memory [as 别名]
# 或者: from joblib.Memory import cache [as 别名]
def _niigz2nii(self):
"""
Convert .nii.gz to .nii (crucial for SPM).
"""
cache_dir = os.path.join(self.output_dir, 'cache_dir')
mem = Memory(cache_dir, verbose=100)
self.func = mem.cache(do_niigz2nii)(self.func,
output_dir=self.output_dir)
if not self.anat is None:
self.anat = mem.cache(do_niigz2nii)(self.anat,
output_dir=self.output_dir)
示例3: load_adni_longitudinal_hippocampus_volume
# 需要导入模块: from joblib import Memory [as 别名]
# 或者: from joblib.Memory import cache [as 别名]
def load_adni_longitudinal_hippocampus_volume():
""" Returns longitudinal hippocampus measures
"""
BASE_DIR = _get_data_base_dir('ADNI_csv')
roster = pd.read_csv(os.path.join(BASE_DIR, 'ROSTER.csv'))
dx = pd.read_csv(os.path.join(BASE_DIR, 'DXSUM_PDXCONV_ADNIALL.csv'))
fs = pd.read_csv(os.path.join(BASE_DIR, 'UCSFFSX51_05_20_15.csv'))
# extract hippocampus numerical values
column_idx = np.arange(131, 147)
cols = ['ST' + str(c) + 'HS' for c in column_idx]
hipp = fs[cols].values
idx_num = np.array([~np.isnan(h).all() for h in hipp])
hipp = hipp[idx_num, :]
# extract roster id
rids = fs['RID'].values[idx_num]
# caching dataframe extraction functions
CACHE_DIR = _get_cache_base_dir()
cache_dir = os.path.join(CACHE_DIR, 'joblib', 'load_data_cache')
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
memory = Memory(cachedir=cache_dir, verbose=0)
# get subject id
def _getptidshippo(rids):
return [_rid_to_ptid(rid, roster) for rid in rids]
ptids = memory.cache(_getptidshippo)(rids)
# extract exam date
exams = fs['EXAMDATE'].values[idx_num]
vcodes = fs['VISCODE'].values[idx_num]
vcodes2 = fs['VISCODE2'].values[idx_num]
exams = list(map(
lambda e: date(int(e[:4]), int(e[5:7]), int(e[8:])), exams))
exams = np.array(exams)
# extract diagnosis
def _getdxhippo(rids, exams):
return np.array(list(map(_get_dx, rids, [dx]*len(rids), exams)))
dx_ind = memory.cache(_getdxhippo)(rids, exams)
dx_group = DX_LIST[dx_ind]
return Bunch(dx_group=np.array(dx_group), subjects=np.array(ptids),
hipp=np.array(hipp), exam_dates=np.array(exams),
exam_codes=np.array(vcodes), exam_codes2=np.array(vcodes2))
示例4: extract_group_components
# 需要导入模块: from joblib import Memory [as 别名]
# 或者: from joblib.Memory import cache [as 别名]
def extract_group_components(subject_components, variances,
ccs_threshold=None, n_group_components=None,
cachedir=None):
# Use asarray to cast to a non memmapped array
subject_components = np.asarray(subject_components)
if len(subject_components) == 1:
# We are in a single subject case
return subject_components[0, :n_group_components].T, \
variances[0][:n_group_components]
# The group components (concatenated subject components)
group_components = subject_components.T
group_components = np.reshape(group_components,
(group_components.shape[0], -1))
# Save memory
del subject_components
# Inter-subject CCA
memory = Memory(cachedir=cachedir, mmap_mode='r')
svd = memory.cache(linalg.svd)
cca_maps, ccs, _ = svd(group_components, full_matrices=False)
# Save memory
del group_components
if n_group_components is None:
n_group_components = np.argmin(ccs > ccs_threshold)
cca_maps = cca_maps[:, :n_group_components]
ccs = ccs[:n_group_components]
return cca_maps, ccs
示例5: __init__
# 需要导入模块: from joblib import Memory [as 别名]
# 或者: from joblib.Memory import cache [as 别名]
def __init__(
self,
gmm_ubm,
feature=None, cache=False
):
super(SpeakerIdentification, self).__init__()
self.gmm_ubm = gmm_ubm
# default features for speaker identification are MFCC
# 13 coefs + delta coefs + delta delta coefs
# + delta energy + delta delta energy
if feature is None:
from pyannote.feature.yaafe import YaafeMFCC
feature = YaafeMFCC(
e=False, De=True, DDe=True,
coefs=13, D=True, DD=True
)
self.feature = feature
if cache:
# initialize cache
from joblib import Memory
from tempfile import mkdtemp
memory = Memory(cachedir=mkdtemp(), verbose=0)
# cache feature extraction method
self.get_features = memory.cache(self.get_features)
示例6: load_adni_longitudinal_csf_biomarker
# 需要导入模块: from joblib import Memory [as 别名]
# 或者: from joblib.Memory import cache [as 别名]
def load_adni_longitudinal_csf_biomarker():
""" Returns longitudinal csf measures
"""
BASE_DIR = _get_data_base_dir('ADNI_csv')
roster = pd.read_csv(os.path.join(BASE_DIR, 'ROSTER.csv'))
dx = pd.read_csv(os.path.join(BASE_DIR, 'DXSUM_PDXCONV_ADNIALL.csv'))
csf_files = ['UPENNBIOMK.csv', 'UPENNBIOMK2.csv', 'UPENNBIOMK3.csv',
'UPENNBIOMK4_09_06_12.csv', 'UPENNBIOMK5_10_31_13.csv',
'UPENNBIOMK6_07_02_13.csv', 'UPENNBIOMK7.csv',
'UPENNBIOMK8.csv']
cols = ['RID', 'VISCODE', 'ABETA', 'PTAU', 'TAU']
# 3,4,5,7,8
csf = pd.DataFrame()
for csf_file in csf_files[2:]:
fs = pd.read_csv(os.path.join(BASE_DIR, csf_file))
csf = csf.append(fs[cols])
# remove nans from csf values
biom = csf[cols[2:]].values
idx = np.array([~np.isnan(v).any() for v in biom])
biom = biom[idx]
# get phenotype
vcodes = csf['VISCODE'].values[idx]
rids = csf['RID'].values[idx]
# caching dataframe extraction functions
CACHE_DIR = _get_cache_base_dir()
cache_dir = os.path.join(CACHE_DIR, 'joblib', 'load_data_cache')
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
memory = Memory(cachedir=cache_dir, verbose=0)
def _getptidscsf(rids):
return list(map(lambda x: _rid_to_ptid(x, roster), rids))
ptids = memory.cache(_getptidscsf)(rids)
# get diagnosis
def _getdxcsf(rids, vcodes):
return list(map(lambda x, y: DX_LIST[_get_dx(x, dx, viscode=y)],
rids, vcodes))
dx_group = memory.cache(_getdxcsf)(rids, vcodes)
return Bunch(dx_group=np.array(dx_group), subjects=np.array(ptids),
csf=np.array(biom), exam_codes=np.array(vcodes),
exam_codes2=np.array(vcodes))
示例7: construct_and_attach_filename_data
# 需要导入模块: from joblib import Memory [as 别名]
# 或者: from joblib.Memory import cache [as 别名]
def construct_and_attach_filename_data(self):
synsets = self.synset_list
num_per_synset = self.data['num_per_synset']
seed = self.data['seed']
folder = self.local_home('PrecomputedDicts')
mem = Memory(folder)
compute_filename_dict = mem.cache(self.compute_filename_dict)
filenames, filenames_dict = compute_filename_dict(synsets, num_per_synset, seed)
self.filenames_dict = filenames_dict
示例8: load_adni_longitudinal_mmse_score
# 需要导入模块: from joblib import Memory [as 别名]
# 或者: from joblib.Memory import cache [as 别名]
def load_adni_longitudinal_mmse_score():
""" Returns longitudinal mmse scores
"""
BASE_DIR = _get_data_base_dir('ADNI_csv')
roster = pd.read_csv(os.path.join(BASE_DIR, 'ROSTER.csv'))
dx = pd.read_csv(os.path.join(BASE_DIR, 'DXSUM_PDXCONV_ADNIALL.csv'))
fs = pd.read_csv(os.path.join(BASE_DIR, 'MMSE.csv'))
# extract nans free mmse
mmse = fs['MMSCORE'].values
idx_num = fs['MMSCORE'].notnull().values
mmse = mmse[idx_num]
# extract roster id
rids = fs['RID'].values[idx_num]
# caching dataframe extraction functions
CACHE_DIR = _get_cache_base_dir()
cache_dir = os.path.join(CACHE_DIR, 'joblib', 'load_data_cache')
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
memory = Memory(cachedir=cache_dir, verbose=0)
def _getptidsmmse(rids):
return [_rid_to_ptid(rid, roster) for rid in rids]
# get subject id
ptids = memory.cache(_getptidsmmse)(rids)
# extract visit code (don't use EXAMDATE ; null for GO/2)
vcodes = fs['VISCODE'].values
vcodes = vcodes[idx_num]
vcodes2 = fs['VISCODE2'].values
vcodes2 = vcodes2[idx_num]
def _getdxmmse(rids, vcodes2):
return list(map(
lambda x, y: DX_LIST[_get_dx(x, dx, viscode=y)], rids, vcodes2))
# get diagnosis
dx_group = memory.cache(_getdxmmse)(rids, vcodes2)
return Bunch(dx_group=np.array(dx_group), subjects=np.array(ptids),
mmse=mmse, exam_codes=vcodes, exam_codes2=vcodes2)
示例9: add_caching_to_funcs
# 需要导入模块: from joblib import Memory [as 别名]
# 或者: from joblib.Memory import cache [as 别名]
def add_caching_to_funcs(obj, funcNames):
mem = Memory('../.add_caching_to_funcs', verbose=11)
if obj is None or funcNames is None:
return
if isScalar(funcNames):
funcNames = [funcNames]
for name in funcNames:
func = getattr(obj, name, None)
if func is not None:
setattr(obj, name, mem.cache(func))
示例10: __init__
# 需要导入模块: from joblib import Memory [as 别名]
# 或者: from joblib.Memory import cache [as 别名]
def __init__(self):
self.name = self.__class__.__name__
try:
from joblib import Memory
mem = Memory(cachedir=self.home('cache'), verbose=False)
self._get_meta = mem.cache(self._get_meta)
except ImportError:
pass
示例11: _run_suject_level1_glm
# 需要导入模块: from joblib import Memory [as 别名]
# 或者: from joblib.Memory import cache [as 别名]
def _run_suject_level1_glm(subject_data_dir, subject_output_dir,
**kwargs):
"""
Just another wrapper.
"""
mem = Memory(os.path.join(subject_output_dir, "cache_dir"))
return mem.cache(run_suject_level1_glm)(subject_data_dir,
subject_output_dir,
**kwargs)
示例12: __init__
# 需要导入模块: from joblib import Memory [as 别名]
# 或者: from joblib.Memory import cache [as 别名]
def __init__(self, meta=None):
if meta is not None:
self._meta = meta
self.name = self.__class__.__name__
try:
from joblib import Memory
mem = Memory(cachedir=self.home('cache'))
self._get_meta = mem.cache(self._get_meta)
except ImportError:
pass
示例13: getagreement
# 需要导入模块: from joblib import Memory [as 别名]
# 或者: from joblib.Memory import cache [as 别名]
def getagreement(tpl,datadir,task_type='all'):
"""Get agreement values for annotators in the :data:'tpl' list
Args:
tpl (list): combination group of annotators
datadir (str): Cache data directory used by joblib
Returns:
namedtuple defined as ``Agree = collections.namedtuple('Agree', ['kappa', 'alpha','avg_ao'], verbose=True)``
"""
mem = Memory(cachedir=datadir)
readjson=mem.cache(json2taskdata.readjson,mmap_mode='r')
create_task_data= mem.cache(json2taskdata.create_task_data)
count_occurrances=mem.cache(json2taskdata.count_occurrances)
count_labels=mem.cache(json2taskdata.count_labels)
annotators=set()
lectask=[]
#-------------------------------------------------------------------------------
# for each annotator in group tpl
#-------------------------------------------------------------------------------
for stditem in tpl:
aname=stditem.split('.')[0][3:][-2:]
annotators.add(aname)
lecdict=readjson(stditem)
newlectask= create_task_data(lecdict,task_type=task_type,annotator=aname)
label_data=json2taskdata.create_labels_list(newlectask)
abscount=count_occurrances(str(label_data))
yaml.dump(abscount,open(os.path.join( datadir,'abscount-'+aname+'.yaml'),'w'))
setcount=count_labels(newlectask)
yaml.dump(setcount,open(os.path.join( datadir,'setcount-'+aname+'.yaml'),'w'))
lectask=lectask+newlectask
task=AnnotationTask(data=lectask,distance=nltk.metrics.distance.masi_distance_mod)
return {frozenset(annotators): Agree(task.kappa(),task.alpha(),task.avg_Ao())}
示例14: __init__
# 需要导入模块: from joblib import Memory [as 别名]
# 或者: from joblib.Memory import cache [as 别名]
def __init__(self, use_cache=True, cachedir=None):
"""Inits TpsSolverFactory
Args:
use_cache: whether to cache solver matrices in file
cache_dir: cached directory. if not specified, the .cache directory in parent directory of top-level package is used.
"""
if use_cache:
if cachedir is None:
# .cache directory in parent directory of top-level package
cachedir = os.path.join(__import__(__name__.split('.')[0]).__path__[0], os.path.pardir, ".cache")
memory = Memory(cachedir=cachedir, verbose=0)
self.get_solver_mats = memory.cache(self.get_solver_mats)
示例15: main
# 需要导入模块: from joblib import Memory [as 别名]
# 或者: from joblib.Memory import cache [as 别名]
def main():
## subsdir=r'E:\elan projects\L2\submissions\extracted'
## dstdir=os.path.join(subsdir,r'passed')
## copypassedfiles(dstdir,subsdir)
dstdir=r'E:\elan projects\L2\resubmission\full'
import glob
jsonflist=glob.glob(dstdir+'\\'+r'*.379.json')
mem = Memory(cachedir=dstdir)
json2agreementmatrix_cached=mem.cache(json2agreementmatrix)
c=json2agreementmatrix_cached(jsonflist,task_type='all')
print c