当前位置: 首页>>代码示例>>Python>>正文


Python Parallel.argmin方法代码示例

本文整理汇总了Python中joblib.Parallel.argmin方法的典型用法代码示例。如果您正苦于以下问题:Python Parallel.argmin方法的具体用法?Python Parallel.argmin怎么用?Python Parallel.argmin使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在joblib.Parallel的用法示例。


在下文中一共展示了Parallel.argmin方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from joblib import Parallel [as 别名]
# 或者: from joblib.Parallel import argmin [as 别名]
def main(version='100k', n_jobs=1, random_state=0, cross_val=False):
    dl_params = {}
    dl_params['100k'] = dict(learning_rate=1, batch_size=10, offset=0, alpha=1)
    dl_params['1m'] = dict(learning_rate=.75, batch_size=60, offset=0,
                           alpha=.8)
    dl_params['10m'] = dict(learning_rate=.75, batch_size=600, offset=0,
                            alpha=3)
    dl_params['netflix'] = dict(learning_rate=.8, batch_size=4000, offset=0,
                                alpha=0.16)
    cd_params = {'100k': dict(alpha=.1), '1m': dict(alpha=.03),
                 '10m': dict(alpha=.04),
                 'netflix': dict(alpha=.1)}

    if version in ['100k', '1m', '10m']:
        X = load_movielens(version)
        X_tr, X_te = train_test_split(X, train_size=0.75,
                                      random_state=random_state)
        X_tr = X_tr.tocsr()
        X_te = X_te.tocsr()
    elif version is 'netflix':
        X_tr = load(expanduser('~/spira_data/nf_prize/X_tr.pkl'))
        X_te = load(expanduser('~/spira_data/nf_prize/X_te.pkl'))

    cd_mf = ExplicitMF(n_components=60, max_iter=50, alpha=.1, normalize=True,
                       verbose=1, )
    dl_mf = DictMF(n_components=30, n_epochs=20, alpha=1.17, verbose=5,
                   batch_size=10000, normalize=True,
                   fit_intercept=True,
                   random_state=0,
                   learning_rate=.75,
                   impute=False,
                   partial=False,
                   backend='python')
    dl_mf_partial = DictMF(n_components=60, n_epochs=20, alpha=1.17, verbose=5,
                           batch_size=10000, normalize=True,
                           fit_intercept=True,
                           random_state=0,
                           learning_rate=.75,
                           impute=False,
                           partial=True,
                           backend='python')

    timestamp = datetime.datetime.now().strftime('%Y-%m-%d_%H'
                                                 '-%M-%S')
    if cross_val:
        subdir = 'benches_ncv'
    else:
        subdir = 'benches'
    output_dir = expanduser(join('~/output/recommender/', timestamp, subdir))
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    alphas = np.logspace(-2, 1, 10)
    mf_list = [dl_mf_partial]
    dict_id = {cd_mf: 'cd', dl_mf: 'dl', dl_mf_partial: 'dl_partial'}
    names = {'cd': 'Coordinate descent', 'dl': 'Proposed online masked MF',
             'dl_partial': 'Proposed algorithm (with partial projection)'}

    if os.path.exists(join(output_dir, 'results_%s_%s.json' % (version,
                                                               random_state))):
        with open(join(output_dir, 'results_%s_%s.json' % (version,
                                                           random_state)),
                  'r') as f:
            results = json.load(f)
    else:
        results = {}

    for mf in mf_list:
        results[dict_id[mf]] = {}
        if not cross_val:
            if isinstance(mf, DictMF):
                mf.set_params(
                    learning_rate=dl_params[version]['learning_rate'],
                    batch_size=dl_params[version]['batch_size'],
                    alpha=dl_params[version]['alpha'])
            else:
                mf.set_params(alpha=cd_params[version]['alpha'])
        else:
            if isinstance(mf, DictMF):
                mf.set_params(
                    learning_rate=dl_params[version]['learning_rate'],
                    batch_size=dl_params[version]['batch_size'])
            if version != 'netflix':
                cv = ShuffleSplit(n_iter=3, train_size=0.66, random_state=0)
                mf_scores = Parallel(n_jobs=n_jobs, verbose=10)(
                    delayed(single_fit)(mf, alpha, X_tr, cv) for alpha in
                    alphas)
            else:
                mf_scores = Parallel(n_jobs=n_jobs, verbose=10)(
                    delayed(single_fit)(mf, alpha, X_tr, X_te,
                                        nested=False) for alpha in alphas)
            mf_scores = np.array(mf_scores).mean(axis=1)
            best_alpha_arg = mf_scores.argmin()
            best_alpha = alphas[best_alpha_arg]
            mf.set_params(alpha=best_alpha)

        cb = Callback(X_tr, X_te, refit=False)
        mf.set_params(callback=cb)
        mf.fit(X_tr)
        results[dict_id[mf]] = dict(name=names[dict_id[mf]],
#.........这里部分代码省略.........
开发者ID:arthurmensch,项目名称:spira,代码行数:103,代码来源:plot_explicit_mf_objective.py


注:本文中的joblib.Parallel.argmin方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。