当前位置: 首页>>代码示例>>Python>>正文


Python joblib.Memory类代码示例

本文整理汇总了Python中joblib.Memory的典型用法代码示例。如果您正苦于以下问题:Python Memory类的具体用法?Python Memory怎么用?Python Memory使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了Memory类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: extract_group_components

def extract_group_components(subject_components, variances,
                ccs_threshold=None, n_group_components=None, 
                cachedir=None):
    # Use asarray to cast to a non memmapped array
    subject_components = np.asarray(subject_components)
    if len(subject_components) == 1:
        # We are in a single subject case
        return subject_components[0, :n_group_components].T, \
                variances[0][:n_group_components]

    # The group components (concatenated subject components)
    group_components = subject_components.T
    group_components = np.reshape(group_components,
                                    (group_components.shape[0], -1))
    # Save memory
    del subject_components

    # Inter-subject CCA
    memory = Memory(cachedir=cachedir, mmap_mode='r')
    svd = memory.cache(linalg.svd)
    cca_maps, ccs, _ = svd(group_components, full_matrices=False)
    # Save memory
    del group_components
    if n_group_components is None:
        n_group_components = np.argmin(ccs > ccs_threshold)
    cca_maps = cca_maps[:, :n_group_components]
    ccs = ccs[:n_group_components]
    return cca_maps, ccs
开发者ID:GaelVaroquaux,项目名称:canica,代码行数:28,代码来源:main.py

示例2: __init__

    def __init__(
        self,
        gmm_ubm,
        feature=None, cache=False
    ):

        super(SpeakerIdentification, self).__init__()

        self.gmm_ubm = gmm_ubm

        # default features for speaker identification are MFCC
        # 13 coefs + delta coefs  + delta delta coefs
        #          + delta energy + delta delta energy
        if feature is None:
            from pyannote.feature.yaafe import YaafeMFCC
            feature = YaafeMFCC(
                e=False, De=True, DDe=True,
                coefs=13, D=True, DD=True
            )
        self.feature = feature

        if cache:

            # initialize cache
            from joblib import Memory
            from tempfile import mkdtemp
            memory = Memory(cachedir=mkdtemp(), verbose=0)

            # cache feature extraction method
            self.get_features = memory.cache(self.get_features)
开发者ID:MamadouDoumbia,项目名称:pyannote,代码行数:30,代码来源:speech.py

示例3: load_adni_longitudinal_rs_fmri

def load_adni_longitudinal_rs_fmri(dirname='ADNI_longitudinal_rs_fmri',
                                   prefix='wr*.nii'):
    """ Returns paths of ADNI rs-fMRI
    """

    # get file paths and description
    images, subject_paths, description = _get_subjects_and_description(
        base_dir=dirname, prefix='I[0-9]*')
    images = np.array(images)
    # get func files
    func_files = list(map(lambda x: _glob_subject_img(
        x, suffix='func/' + prefix, first_img=True),
                     subject_paths))
    func_files = np.array(func_files)

    # get motion files
    # motions = None
    motions = list(map(lambda x: _glob_subject_img(
        x, suffix='func/' + 'rp_*.txt', first_img=True), subject_paths))

    # get phenotype from csv
    dx = pd.read_csv(os.path.join(_get_data_base_dir('ADNI_csv'),
                                  'DXSUM_PDXCONV_ADNIALL.csv'))
    roster = pd.read_csv(os.path.join(_get_data_base_dir('ADNI_csv'),
                                      'ROSTER.csv'))
    df = description[description['Image_ID'].isin(images)]
    df = df.sort_values(by='Image_ID')
    dx_group = np.array(df['DX_Group'])
    subjects = np.array(df['Subject_ID'])
    exams = np.array(df['EXAM_DATE'])
    exams = [date(int(e[:4]), int(e[5:7]), int(e[8:])) for e in exams]

    # caching dataframe extraction functions
    CACHE_DIR = _get_cache_base_dir()
    cache_dir = os.path.join(CACHE_DIR, 'joblib', 'load_data_cache')
    if not os.path.isdir(cache_dir):
        os.makedirs(cache_dir)
    memory = Memory(cachedir=cache_dir, verbose=0)

    def _get_ridsfmri(subjects):
        return [_ptid_to_rid(s, roster) for s in subjects]
    rids = np.array(memory.cache(_get_ridsfmri)(subjects))

    def _get_examdatesfmri(rids):
        return [_get_dx(rids[i], dx, exams[i], viscode=None, return_code=True)
                for i in range(len(rids))]

    exam_dates = np.array(memory.cache(_get_examdatesfmri)(rids))

    def _get_viscodesfmri(rids):
        return [_get_vcodes(rids[i], str(exam_dates[i]), dx)
                for i in range(len(rids))]
    viscodes = np.array(memory.cache(_get_viscodesfmri)(rids))
    vcodes, vcodes2 = viscodes[:, 0], viscodes[:, 1]

    return Bunch(func=func_files, dx_group=dx_group, exam_codes=vcodes,
                 exam_dates=exam_dates, exam_codes2=vcodes2,
                 motion=motions,
                 subjects=subjects, images=images)
开发者ID:mrahim,项目名称:dataset_loader,代码行数:59,代码来源:dataset.py

示例4: construct_and_attach_filename_data

 def construct_and_attach_filename_data(self):
     synsets = self.synset_list
     num_per_synset = self.data['num_per_synset']
     seed = self.data['seed']
     folder = self.local_home('PrecomputedDicts')
     mem = Memory(folder)
     compute_filename_dict = mem.cache(self.compute_filename_dict)
     filenames, filenames_dict = compute_filename_dict(synsets, num_per_synset, seed)
     self.filenames_dict = filenames_dict
开发者ID:simudream,项目名称:imagenet,代码行数:9,代码来源:dldatasets.py

示例5: add_caching_to_funcs

def add_caching_to_funcs(obj, funcNames):
	mem = Memory('../.add_caching_to_funcs', verbose=11)
	if obj is None or funcNames is None:
		return
	if isScalar(funcNames):
		funcNames = [funcNames]
	for name in funcNames:
		func = getattr(obj, name, None)
		if func is not None:
			setattr(obj, name, mem.cache(func))
开发者ID:dblalock,项目名称:flock,代码行数:10,代码来源:learn.py

示例6: __init__

    def __init__(self):

        self.name = self.__class__.__name__

        try:
            from joblib import Memory
            mem = Memory(cachedir=self.home('cache'), verbose=False)
            self._get_meta = mem.cache(self._get_meta)
        except ImportError:
            pass
开发者ID:Afey,项目名称:skdata,代码行数:10,代码来源:dataset.py

示例7: _run_suject_level1_glm

    def _run_suject_level1_glm(subject_data_dir, subject_output_dir,
                               **kwargs):
        """
        Just another wrapper.

        """

        mem = Memory(os.path.join(subject_output_dir, "cache_dir"))
        return mem.cache(run_suject_level1_glm)(subject_data_dir,
                                                subject_output_dir,
                                                **kwargs)
开发者ID:fabianp,项目名称:pypreprocess,代码行数:11,代码来源:hcp_preproc_and_analysis.py

示例8: __init__

    def __init__(self, meta=None):
        if meta is not None:
            self._meta = meta

        self.name = self.__class__.__name__

        try:
            from joblib import Memory
            mem = Memory(cachedir=self.home('cache'))
            self._get_meta = mem.cache(self._get_meta)
        except ImportError:
            pass
开发者ID:pierreg,项目名称:skdata,代码行数:12,代码来源:uiuc_car.py

示例9: __init__

 def __init__(self, use_cache=True, cachedir=None):
     """Inits TpsSolverFactory
     
     Args:
         use_cache: whether to cache solver matrices in file
         cache_dir: cached directory. if not specified, the .cache directory in parent directory of top-level package is used.
     """
     if use_cache:
         if cachedir is None:
             # .cache directory in parent directory of top-level package
             cachedir = os.path.join(__import__(__name__.split('.')[0]).__path__[0], os.path.pardir, ".cache")
         memory = Memory(cachedir=cachedir, verbose=0)
         self.get_solver_mats = memory.cache(self.get_solver_mats)
开发者ID:amoliu,项目名称:lfd,代码行数:13,代码来源:solver.py

示例10: main

def main():
##    subsdir=r'E:\elan projects\L2\submissions\extracted'
##    dstdir=os.path.join(subsdir,r'passed')
##    copypassedfiles(dstdir,subsdir)
    dstdir=r'E:\elan projects\L2\resubmission\full'
    import glob
    jsonflist=glob.glob(dstdir+'\\'+r'*.379.json')

    mem = Memory(cachedir=dstdir)
    json2agreementmatrix_cached=mem.cache(json2agreementmatrix)

    c=json2agreementmatrix_cached(jsonflist,task_type='all')
    print c
开发者ID:aliabbasjp,项目名称:elanexp,代码行数:13,代码来源:processsubmissions.py

示例11: _niigz2nii

    def _niigz2nii(self):
        """
        Convert .nii.gz to .nii (crucial for SPM).

        """

        cache_dir = os.path.join(self.output_dir, 'cache_dir')
        mem = Memory(cache_dir, verbose=100)

        self.func = mem.cache(do_niigz2nii)(self.func,
                                            output_dir=self.output_dir)
        if not self.anat is None:
            self.anat = mem.cache(do_niigz2nii)(self.anat,
                                                output_dir=self.output_dir)
开发者ID:fabianp,项目名称:pypreprocess,代码行数:14,代码来源:subject_data.py

示例12: load_adni_longitudinal_hippocampus_volume

def load_adni_longitudinal_hippocampus_volume():
    """ Returns longitudinal hippocampus measures
    """

    BASE_DIR = _get_data_base_dir('ADNI_csv')

    roster = pd.read_csv(os.path.join(BASE_DIR, 'ROSTER.csv'))
    dx = pd.read_csv(os.path.join(BASE_DIR, 'DXSUM_PDXCONV_ADNIALL.csv'))
    fs = pd.read_csv(os.path.join(BASE_DIR, 'UCSFFSX51_05_20_15.csv'))

    # extract hippocampus numerical values
    column_idx = np.arange(131, 147)
    cols = ['ST' + str(c) + 'HS' for c in column_idx]
    hipp = fs[cols].values
    idx_num = np.array([~np.isnan(h).all() for h in hipp])
    hipp = hipp[idx_num, :]

    # extract roster id
    rids = fs['RID'].values[idx_num]

    # caching dataframe extraction functions
    CACHE_DIR = _get_cache_base_dir()
    cache_dir = os.path.join(CACHE_DIR, 'joblib', 'load_data_cache')
    if not os.path.isdir(cache_dir):
        os.makedirs(cache_dir)
    memory = Memory(cachedir=cache_dir, verbose=0)

    # get subject id
    def _getptidshippo(rids):
        return [_rid_to_ptid(rid, roster) for rid in rids]
    ptids = memory.cache(_getptidshippo)(rids)

    # extract exam date
    exams = fs['EXAMDATE'].values[idx_num]
    vcodes = fs['VISCODE'].values[idx_num]
    vcodes2 = fs['VISCODE2'].values[idx_num]
    exams = list(map(
        lambda e: date(int(e[:4]), int(e[5:7]), int(e[8:])), exams))
    exams = np.array(exams)

    # extract diagnosis
    def _getdxhippo(rids, exams):
        return np.array(list(map(_get_dx, rids, [dx]*len(rids), exams)))
    dx_ind = memory.cache(_getdxhippo)(rids, exams)
    dx_group = DX_LIST[dx_ind]

    return Bunch(dx_group=np.array(dx_group), subjects=np.array(ptids),
                 hipp=np.array(hipp), exam_dates=np.array(exams),
                 exam_codes=np.array(vcodes), exam_codes2=np.array(vcodes2))
开发者ID:mrahim,项目名称:dataset_loader,代码行数:49,代码来源:dataset.py

示例13: _load_data

def _load_data(root_dir="/",
               data_set="ds107",
               cache_dir="/volatile/storage/workspace/parietal_retreat/" +
               "covariance_learn/cache/",
               n_jobs=1):
    from joblib import Memory
    mem = Memory(cachedir=cache_dir)
    load_data_ = mem.cache(setup_data_paths.run)

    df = setup_data_paths.get_all_paths(root_dir=root_dir, data_set=data_set)
    # region_signals = joblib.load(os.path.join(root_dir, dump_file))
    region_signals = load_data_(root_dir=root_dir, data_set=data_set,
                                n_jobs=n_jobs,
                                dump_dir=os.path.join(cache_dir, data_set))
    return df, region_signals
开发者ID:rphlypo,项目名称:parietalretreat,代码行数:15,代码来源:classify_covs.py

示例14: __init__

    def __init__(self, data_same, normalize=True, min_max_scale=False,
            scale_f1=None, scale_f2=None,
            nframes=1, batch_size=1, marginf=0, only_same=False,
            cache_to_disk=False):
        self.print_mean_DTW_costs(data_same)
        self.ratio_same = 0.5  # init
        self.ratio_same = self.compute_ratio_speakers(data_same)
        self._nframes = nframes
        print "nframes:", self._nframes

        (self._x1, self._x2, self._y_word, self._y_spkr,
                self._scale_f1, self._scale_f2) = self.prep_data(data_same,
                        normalize, min_max_scale, scale_f1, scale_f2)

        self._y1 = [numpy.zeros(x.shape[0], dtype='int8') for x in self._x1]
        self._y2 = [numpy.zeros(x.shape[0], dtype='int8') for x in self._x1]
        # self._y1 says if frames in x1 and x2 belong to the same (1) word or not (0)
        # self._y2 says if frames in x1 and x2 were said by the same (1) speaker or not(0)
        for ii, yy in enumerate(self._y_word):
            self._y1[ii][:] = yy
        for ii, yy in enumerate(self._y_spkr):
            self._y2[ii][:] = yy
        self._nwords = batch_size
        self._margin = marginf
        # marginf says if we pad taking a number of frames as margin
        self._x1_mem = []
        self._x2_mem = []
        self._y1_mem = []
        self._y2_mem = []
        self.cache_to_disk = cache_to_disk
        if self.cache_to_disk:
            from joblib import Memory
            self.mem = Memory(cachedir='joblib_cache', verbose=0)
开发者ID:RolT,项目名称:abnet,代码行数:33,代码来源:dataset_iterators.py

示例15: ica_step

def ica_step(group_maps, group_variance, cachedir=None):
    memory = Memory(cachedir=cachedir, mmap_mode='r')
    # We do a spatial ICA: the arrays are transposed in the following,
    # axis1 = component, and axis2 is voxel number.

    _, ica_maps = memory.cache(fastica)(group_maps.T, whiten=False)

    # Project the ICAs on the group maps to give a 'cross-subject
    # reproducibility' score.
    proj = np.dot(ica_maps, group_maps)
    reproducibility_score = (np.abs(proj)*group_variance).sum(axis=-1)

    order = np.argsort(reproducibility_score)[::-1]

    ica_maps = ica_maps[order, :]

    return ica_maps.T
开发者ID:GaelVaroquaux,项目名称:canica,代码行数:17,代码来源:main.py


注:本文中的joblib.Memory类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。