当前位置: 首页>>代码示例>>Python>>正文


Python Plot.plot_simple_regret方法代码示例

本文整理汇总了Python中plot.Plot.plot_simple_regret方法的典型用法代码示例。如果您正苦于以下问题:Python Plot.plot_simple_regret方法的具体用法?Python Plot.plot_simple_regret怎么用?Python Plot.plot_simple_regret使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在plot.Plot的用法示例。


在下文中一共展示了Plot.plot_simple_regret方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: run_bandit_experiment

# 需要导入模块: from plot import Plot [as 别名]
# 或者: from plot.Plot import plot_simple_regret [as 别名]
def run_bandit_experiment(bandit, num_pulls, num_trials):
    # specify bandit algorithms below
    algorithm1 = IncrementalUniformAlgorithm(bandit)
    algorithm2 = UCBAlgorithm(bandit)
    algorithm3 = EpsilonGreedyAlgorithm(bandit)
    algorithms = [algorithm1, algorithm2, algorithm3]

    # keep track of data for plotting
    plot_sample_rate = 1
    plot = Plot(num_pulls, num_trials,
        [a.get_name() for a in algorithms], plot_sample_rate)

    # experiment loop
    for a in algorithms:
        print '\nRunning algorithm {0}...'.format(a.get_name())
        plot.reset_trial()

        best_arms = np.zeros(num_trials)
        for t in range(num_trials):
            print 'Running trial {0}...'.format(t)
            start = time.time()

            plot.begin_trial()
            optimal_expected_reward = bandit.get_expected_reward_optimal_arm()
            regret = Regret(optimal_expected_reward)
            a.reset(bandit)

            for i in range(num_pulls):
                # pull arm according to algorithm
                pulled_arm, _ = a.pull()

                # update regrets
                best_arm = a.get_best_arm()
                expected_reward_pulled_arm = bandit.get_expected_reward_arm(pulled_arm)
                expected_reward_best_arm = bandit.get_expected_reward_arm(best_arm)
                regret.add(expected_reward_pulled_arm, expected_reward_best_arm)

                # update plot
                if i % plot_sample_rate == 0:
                    plot.add_point(i, regret.get_simple_regret(),
                        regret.get_cumulative_regret(), a.get_name())

            end = time.time()
            print '\telapsed: {0}'.format(end-start)
            print '\tbest arm: {0}'.format(a.get_best_arm())
            best_arms[t] = a.get_best_arm()

        print "Best arm distribution: "
        print np.histogram(best_arms, bins=range(21))

    # create plot
    plot.plot_simple_regret(bandit.get_name())
    plot.plot_cumulative_regret(bandit.get_name())

    # save
    plot.save('{0}_data'.format(bandit.get_name()))
开发者ID:mqtlam,项目名称:osu-cs533,代码行数:58,代码来源:main.py

示例2: SBRDBandit

# 需要导入模块: from plot import Plot [as 别名]
# 或者: from plot.Plot import plot_simple_regret [as 别名]
# This is a hacky way of re-plotting graphs...

from plot import Plot
from bandit_algorithms import IncrementalUniformAlgorithm
from bandit_algorithms import UCBAlgorithm
from bandit_algorithms import EpsilonGreedyAlgorithm
from bandit import SBRDBandit

# load old plot
arm_params = [(1,1)] # dummy params
b = SBRDBandit(arm_params, 'custom_bandit')

num_pulls = 10001
num_trials = 1000
plot_sample_rate = 1
algorithms = [IncrementalUniformAlgorithm(b), UCBAlgorithm(b), EpsilonGreedyAlgorithm(b)]
plot = Plot(num_pulls, num_trials, [a.get_name() for a in algorithms], plot_sample_rate)

print "loading data..."
plot.load('custom_bandit_data.npz')

# new plot
print "creating plots..."
sample_rate = 1
end_index = 501
plot.plot_cumulative_regret('new_'+b.get_name(), sample_rate, end_index)
plot.plot_simple_regret('new_'+b.get_name(), sample_rate, end_index)
开发者ID:mqtlam,项目名称:osu-cs533,代码行数:29,代码来源:replot.py


注:本文中的plot.Plot.plot_simple_regret方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。