当前位置: 首页>>代码示例>>Python>>正文


Python Simulator.run方法代码示例

本文整理汇总了Python中simulator.Simulator.run方法的典型用法代码示例。如果您正苦于以下问题:Python Simulator.run方法的具体用法?Python Simulator.run怎么用?Python Simulator.run使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在simulator.Simulator的用法示例。


在下文中一共展示了Simulator.run方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: run

# 需要导入模块: from simulator import Simulator [as 别名]
# 或者: from simulator.Simulator import run [as 别名]
def run():
    """Run the agent for a finite number of trials."""
    successnum = dict()
    for i in range(10, 36,10):
        for j in range(40,71,10):
            for k in range(6,16,4):
                arguemns = (i/100.0, j/100.0, k/100.0)
                tenSucc = []
                for index in range(0, 5):
                    # Set up environment and agent
                    e = Environment()  # create environment (also adds some dummy traffic)
                    a = e.create_agent(LearningAgent,arguemns)  # create agent
                    e.set_primary_agent(a, enforce_deadline=True)  # specify agent to track
                    # NOTE: You can set enforce_deadline=False while debugging to allow longer trials

                    # Now simulate it
                    sim = Simulator(e, update_delay=0.001, display=False)  # create simulator (uses pygame when display=True, if available)
                    # NOTE: To speed up simulation, reduce update_delay and/or set display=False

                    sim.run(n_trials=100)  # run for a specified number of trials
                    # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line
                    tenSucc.append(e.success)
                successnum[arguemns] = tenSucc

    print(successnum)
开发者ID:danache,项目名称:ML_Udacity,代码行数:27,代码来源:agent.py

示例2: run

# 需要导入模块: from simulator import Simulator [as 别名]
# 或者: from simulator.Simulator import run [as 别名]
def run():
    """Run the agent for a finite number of trials."""

    # Set up environment and agent
    e = Environment()  # create environment (also adds some dummy traffic)
    a = e.create_agent(LearningAgent)  # create agent
    e.set_primary_agent(a, enforce_deadline=True)  # specify agent to track
    # NOTE: You can set enforce_deadline=False while debugging to allow longer trials

    # Now simulate it
    sim = Simulator(e, update_delay=0, display=False)  # create simulator (uses pygame when display=True, if available)
    # NOTE: To speed up simulation, reduce update_delay and/or set display=False

    sim.run(n_trials=100)  # run for a specified number of trials
    # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line
    num_successes = np.sum(a.successes)
    last_failure = a.find_last_failure()
    total_penalty = a.cumulative_penalties
    avg_time_remaining = np.mean(a.all_times_remaining)

    print "Total number of successes: {}".format(num_successes)
    print "Failure last occurred at trial: {}".format(last_failure)
    print 'Total penalties incurred: {}'.format(total_penalty)
    print "Average time remaining: {}".format(avg_time_remaining)


    for state in a.state_q_dict:
        print state
        for action in a.state_q_dict[state]:
            print "Action: {}, Q: {:2f}".format(action,a.state_q_dict[state][action])

    print a.state_q_dict[('right','red',None,None,None)]
    
    return (num_successes,last_failure,total_penalty,avg_time_remaining)
开发者ID:btaborsky,项目名称:machine-learning,代码行数:36,代码来源:agent.py

示例3: Launcher

# 需要导入模块: from simulator import Simulator [as 别名]
# 或者: from simulator.Simulator import run [as 别名]
class Launcher(object):
  
  def setup_logging(self):
    t = datetime.now()
    self.tstamp = '%d-%d-%d-%d-%d' % (t.year, t.month, t.day, t.hour, t.minute)
    fname = LOG_FILE_PATH + LOG_FILENAME + self.tstamp + '.log'    
    logging.basicConfig(filename=fname,level=logging.INFO,format=FORMAT)  
  
  def configure(self, p):
    print('constructing simulator')
    self.sim = Simulator(p['ins'], p['strat'], p['start_date'], p['end_date'], p['open_bal'], self.tstamp)

  def simulate(self):
    print('running simulator')
    start = clock()
    self.sim.run()
    end = clock()
    dur_str = 'seconds = %f' % (end - start)
    print(dur_str)
    logging.info('sim time = ' + dur_str)

  def report(self):
    print('plotting')
    start = clock()
    self.sim.plot()
    end = clock()
    dur_str = 'seconds = %f' % (end - start)
    print(dur_str)
    logging.info('plot time = ' + dur_str)

  def go(self, p):
    self.setup_logging()
    self.configure(p)
    self.simulate()
    self.report()
开发者ID:davidbarkhuizen,项目名称:simagora,代码行数:37,代码来源:launcher.py

示例4: run2

# 需要导入模块: from simulator import Simulator [as 别名]
# 或者: from simulator.Simulator import run [as 别名]
def run2(): #helps to find sweetspot for alpha, gammma values

    alphas = [0.1, 0.2, 0.4, 0.6, 0.8, 1.0]
    gammas = [0.1, 0.2, 0.4, 0.6, 0.8, 1.0]
    heatmap = []

    for i, alpha in enumerate(alphas):
        row = []
        for j, gamma in enumerate(gammas):
            e = Environment()
            a = e.create_agent(LearningAgent)
            a.alpha = alpha
            a.gamma = gamma

            e.set_primary_agent(a, enforce_deadline=True)
            sim = Simulator(e, update_delay=0.0, display=False)
            sim.run(n_trials=100)
            print "Successful journeys : {}".format(a.targetReachedCount)
            row.append(a.targetReachedCount / 100.0)
            #qstats.append(a.q_learn_stats())
        heatmap.append(row)

    print heatmap
    ax = sns.heatmap(heatmap, xticklabels=gammas, yticklabels=alphas, annot=True)
    ax.set(xlabel="gamma", ylabel="alpha")
    plt.show()
开发者ID:Suyyala,项目名称:machine-learning,代码行数:28,代码来源:agent.py

示例5: run

# 需要导入模块: from simulator import Simulator [as 别名]
# 或者: from simulator.Simulator import run [as 别名]
def run():
    """Run the agent for a finite number of trials."""
    optimize = True
    globalNum = 0
    if optimize:
        for alpha in range(1, 11):
            for n in range(1, 11):
                e = Environment()  # create environment (also adds some dummy traffic)
                a = e.create_agent(LearningAgent)  # create agent
                a.experiment  ='alpha'
                e.set_primary_agent(a, enforce_deadline=True)  # set agent to track
                sim = Simulator(e, update_delay=0)  # reduce update_delay to speed up simulation
                a.glNum = globalNum
                globalNum += 1
                a.alpha = (alpha / 20.0) + 0.2
                a.run += 1
                a.counter = 0
                a.trip = 0
                sim.run(n_trials=100) 
            
        for gamma in range(1, 11):
            for n in range(1, 11):
                e = Environment()  # create environment (also adds some dummy traffic)
                a = e.create_agent(LearningAgent)  # create agent
                a.experiment  ='gamma'
                e.set_primary_agent(a, enforce_deadline=True)  # set agent to track
                sim = Simulator(e, update_delay=0)  # reduce update_delay to speed up simulation
                a.glNum = globalNum
                globalNum += 1
                a.gamma = (gamma / 20.0) + 0.2
                a.run += 1
                a.counter = 0
                a.trip = 0
                sim.run(n_trials=100)
开发者ID:jank3,项目名称:Machine-Learning-Engineer-Nanodegree,代码行数:36,代码来源:agent.py

示例6: run

# 需要导入模块: from simulator import Simulator [as 别名]
# 或者: from simulator.Simulator import run [as 别名]
def run():
    """Run the agent for a finite number of trials."""

    # Set up environment and agent
    e = Environment()  # create environment (also adds some dummy traffic)
    a = e.create_agent(LearningAgent)  # create agent
    e.set_primary_agent(a, enforce_deadline=True)  # specify agent to track
    # NOTE: You can set enforce_deadline=False while debugging to allow longer trials

    # Now simulate it
    sim = Simulator(e, update_delay=0.0001, display=False)  # create simulator (uses pygame when display=True, if available)
    # NOTE: To speed up simulation, reduce update_delay and/or set display=False

    sim.run(n_trials=100)  # run for a specified number of trials
    # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line

    # Output a dataframe for visualizing and summarizing the experiment
    df = pd.DataFrame()
    df["Turns"] = a.turncountlist
    df["Rewards"] = a.rewardsumlist
    df["Destination"] = a.reachdestlist
    df["Rewards per Turn"] = df["Rewards"] / df["Turns"]
    df["Trial"] = df.index

    return df
开发者ID:kevinpalm,项目名称:udacity_smartcab,代码行数:27,代码来源:agent.py

示例7: run

# 需要导入模块: from simulator import Simulator [as 别名]
# 或者: from simulator.Simulator import run [as 别名]
def run(get_result = False, gm = 0.2, al = 0.5):
    """Run the agent for a finite number of trials."""
    if get_result:
        ## print for GridSearch
        print ("Running trial  for gamma = %.1f, alpha = %.1f" %(gm, al))

    # Set up environment and agent
    e = Environment()  # create environment (also adds some dummy traffic)
    a = e.create_agent(LearningAgent, gm = gm, al = al)  # create agent
    e.set_primary_agent(a, enforce_deadline=True)  # specify agent to track
    # NOTE: You can set enforce_deadline=False while debugging to allow longer trials

    # Now simulate it
    sim = Simulator(e, update_delay=0.0, display=False)  # create simulator (uses pygame when display=True, if available)
    # NOTE: To speed up simulation, reduce update_delay and/or set display=False

    n_trials = 100
    sim.run(n_trials=n_trials)  # run for a specified number of trials
    # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line

    print "average silly moves for the last 10 trials: ", np.average(a.silly_fq[-10])
    print "average risky moves for the last 10 trials: ", np.average(a.risk_fq[-10])


    """The Following Code is for GridSearch"""
    if get_result:
        summary = sim.rep.summary()
        rate = sum(summary[-1][-10:])/float(10)
        deadline = sum(summary[-2][-10:])/float(10)
        risk_fq = sum(a.risk_fq[-10:])
        print ("success_rate   for gamma = %.1f, alpha = %.1f is %.2f" %(gm, al, rate))
        print ("final_deadline for gamma = %.1f, alpha = %.1f is %.2f" %(gm, al, deadline))
        print ("risk_frequecy  for gamma = %.1f, alpha = %.1f is %d" %(gm, al, risk_fq))
        print
        return (rate, deadline, risk_fq)
开发者ID:HoijanLai,项目名称:MLND,代码行数:37,代码来源:agent.py

示例8: run

# 需要导入模块: from simulator import Simulator [as 别名]
# 或者: from simulator.Simulator import run [as 别名]
def run():
    """Run the agent for a finite number of trials."""

    # Set up environment and agent
    e = Environment()  # create environment (also adds some dummy traffic)
    a = e.create_agent(LearningAgent)  # create agent
    e.set_primary_agent(a, enforce_deadline=True)  # specify agent to track
    # NOTE: You can set enforce_deadline=False while debugging to allow longer trials

    # Now simulate it
    sim = Simulator(e, update_delay=0.0001, display=False)  # create simulator (uses pygame when display=True, if available)
    # NOTE: To speed up simulation, reduce update_delay and/or set display=False

    sim.run(n_trials=100)  # run for a specified number of trials
    # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line

    ## print Q table
    print '+++++++++++++++++++++++++++++++++++++++++++++++'
    print 'final Q table'
    print '+++++++++++++++++++++++++++++++++++++++++++++++'
    for key in a.Q:
        print key,
        print ["%0.2f" % i for i in a.Q[key]]

    print '===================================================================='
    print 'An Array of Arrays where each subarray shows neg rewards for a trial'
    print '===================================================================='
    #print neg rewards and split term
    x=a.reward_holder.split('3')
    y=[i.split(' ') for i in x]
    print y #shows an array of arrays, could calculate total neg reward for each 
开发者ID:WittyAgnomen,项目名称:smart_cab,代码行数:33,代码来源:agent.py

示例9: run

# 需要导入模块: from simulator import Simulator [as 别名]
# 或者: from simulator.Simulator import run [as 别名]
def run():
    """Run the agent for a finite number of trials."""

    # Set up environment and agent
    e = Environment()  # create environment (also adds some dummy traffic)
    a = e.create_agent(LearningAgent)  # create agent
    e.set_primary_agent(a, enforce_deadline=False)  # specify agent to track
    # NOTE: You can set enforce_deadline=False while debugging to allow longer trials

    # Now simulate it
    sim = Simulator(e, update_delay=0.0, display=False)  # create simulator (uses pygame when display=True, if available)
    # NOTE: To speed up simulation, reduce update_delay and/or set display=False

    sim.run(n_trials=100)  # run for a specified number of trials
    # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line
    print(round(100*np.mean(a.goal),2))
    print(round(np.mean(a.time),2))
    print(round(100*np.mean(a.right / a.time),2))
    print(round(100*np.mean(a.wrong / a.time),2))
    fig, ax = plt.subplots()
    ax.plot(range(50), a.goal, 'o')
    ax.set_title('Does the agent reach the destination in time?')
    plt.savefig('destination.png')

    fig, ax = plt.subplots()
    ax.plot(range(50), a.right / a.time, 'o', color='g')
    ax.plot(range(50), a.wrong / a.time, 'o', color='r')
    plt.legend(('Right', 'Wrong'))
    ax.set_title('Traffic-rules')
    plt.savefig('traffic_rules.png')
开发者ID:aayala15,项目名称:machine-learning,代码行数:32,代码来源:agent.py

示例10: run

# 需要导入模块: from simulator import Simulator [as 别名]
# 或者: from simulator.Simulator import run [as 别名]
def run(msg = ''):
    """Run the agent for a finite number of trials."""

    # set up environment and agent
    e = Environment()  # create environment (also adds some dummy traffic)
    a = e.create_agent(LearningAgent)  # create agent
    e.set_primary_agent(a, enforce_deadline=True)  # specify agent to track
    # NOTE: you can set enforce_deadline=False while debugging to allow longer trials

    # Now simulate it
    sim = Simulator(e, update_delay=0, display=False)  # create simulator (uses pygame when display=True, if available)
    # NOTE: to speed up simulation, reduce update_delay and/or set display=False

    sim.run(n_trials=100)  # run for a specified number of trials
    # NOTE: to quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line

    results = a.results
    average_cycles = mean([result[0] for result in results])
    average_reward = mean([result[1] for result in results])
    average_violations = mean([result[2] for result in results])
    # print '=' * 10, msg
    # print 'Average Cycles:', average_cycles
    # print 'Average Reward:', average_reward
    # print 'Average Violations:', average_violations

    return average_cycles, average_reward, average_violations
开发者ID:allanbreyes,项目名称:smartcab,代码行数:28,代码来源:agent.py

示例11: run

# 需要导入模块: from simulator import Simulator [as 别名]
# 或者: from simulator.Simulator import run [as 别名]
def run():
    """Run the agent for a finite number of trials."""

    # Set up environment and agent
    e = Environment()  # create environment (also adds some dummy traffic)
    a = e.create_agent(LearningAgent)  # create agent
    e.set_primary_agent(a, enforce_deadline=True)  # specify agent to track
    # NOTE: You can set enforce_deadline=False while debugging to allow longer trials

    # Now simulate it
    sim = Simulator(e, update_delay=0.00001, display=False)  # create simulator (uses pygame when display=True, if available)
    # NOTE: To speed up simulation, reduce update_delay and/or set display=False

    sim.run(n_trials=100)  # run for a specified number of trials
    # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line
    
    # Print summary #
    allPenalities = a.numberOfPenaltiesList
    allFailures = a.numberOfFailuresList
    numberOfTrials = float(len(allFailures))
    numberOfFailures = float(allFailures[-1])
    numberOfSuccess = numberOfTrials - numberOfFailures
    numberOfSuccessFirstHalf = ((numberOfTrials) / 2) - float(allFailures[len(allFailures)/2])
    numberOfSuccessSecondHalf = numberOfSuccess - numberOfSuccessFirstHalf
    print ("=================================================================================")
    print ("SUMMARY")
    print ("=================================================================================")
    print ("Total Penalities received = %3.2f" % (sum(allPenalities)))
    print ("\tPenalities received in the first half of trials  = %3.2f" % (sum(allPenalities[:len(allPenalities)/2])))
    print ("\tPenalities received in the second half of trials = %3.2f" % (sum(allPenalities[len(allPenalities)/2:])))
    print ("Success Rate: %3.2f%%" % (numberOfSuccess/numberOfTrials*100))
    print ("\tSuccess Rate of the first half : %3.2f%%" % (numberOfSuccessFirstHalf/(numberOfTrials/2)*100))
    print ("\tSuccess Rate of the second half: %3.2f%%" % (numberOfSuccessSecondHalf/(numberOfTrials/2)*100))
开发者ID:ZAZAZakari,项目名称:udacity,代码行数:35,代码来源:agent.py

示例12: run

# 需要导入模块: from simulator import Simulator [as 别名]
# 或者: from simulator.Simulator import run [as 别名]
def run():
    """Run the agent for a finite number of trials."""

    record = []
    for q_initial in [0, 2, 10]:
        for alpha in range(1, 6):
            # Set up environment and agent
            e = Environment()  # create environment (also adds some dummy traffic)
            a = e.create_agent(LearningAgent, alpha * 0.2, q_initial)  # create agent
            e.set_primary_agent(a, enforce_deadline=True)  # specify agent to track
            # NOTE: You can set enforce_deadline=False while debugging to allow longer trials

            # Now simulate it
            sim = Simulator(e, update_delay=0, display=False)  # create simulator (uses pygame when display=True, if available)
            # NOTE: To speed up simulation, reduce update_delay and/or set display=False

            sim.run(n_trials=100)  # run for a specified number of trials
            # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line

            a.reset()
            trip_log = pd.DataFrame(a.trip_log)
            # trip_log['Used'] = trip_log['Deadline'] - trip_log['Remaining']
            trip_log['Efficiency'] = trip_log['Remaining'] / trip_log['Deadline'] * 100
            record.append({
                'Success Rate': trip_log[trip_log.Success == True].shape[0],
                'Alpha': alpha * 0.2,
                'Q Initial': q_initial,
                'Efficiency': trip_log['Efficiency'].mean(),
                'Ave Reward': trip_log['Reward'].mean(),
                'Ave Penalty': trip_log['Penalty'].mean(),
            });

    return pd.DataFrame(record)
开发者ID:ibratface,项目名称:machine-learning,代码行数:35,代码来源:agent.py

示例13: run

# 需要导入模块: from simulator import Simulator [as 别名]
# 或者: from simulator.Simulator import run [as 别名]
def run():
    """Run the agent for a finite number of trials."""
    # create output file
    target_dir = os.path.dirname(os.path.realpath(__file__))
    target_path = os.path.join(target_dir, 'qlearning_tuning_report.txt')
    if not os.path.exists(target_dir):
        os.makedirs(target_dir)
	# loop the parameters
    for epsilon in [0.1, 0.5, 0.9]:
        for alpha in np.arange(0.1, 1, 0.2):
            for gamma in np.arange(0.1, 1, 0.2):
                print epsilon, alpha, gamma
                # Set up environment and agent
                e = Environment()  # create environment (also adds some dummy traffic)
                a = e.create_agent(QAgent, epsilon, alpha, gamma)  # create agent
                e.set_primary_agent(a, enforce_deadline=True)  # specify agent to track
				# NOTE: You can set enforce_deadline=False while debugging to allow longer trials

				# Now simulate it
                sim = Simulator(e, update_delay=0.001, display=False)  # create simulator (uses pygame when display=True, if available)
				# NOTE: To speed up simulation, reduce update_delay and/or set display=False
                sim.run(n_trials=100)  # run for a specified number of trials
                # get the count for the number of successful trials and average running time
                summary = sim.report()
                
                # write out the results
                try:
					with open(target_path, 'a') as f:
						f.write('epsilon {}, alpha {}, gamma {} : success {}, avg_time {}, total_reward {}\n'.format(epsilon, alpha, gamma, summary[0], summary[1], round(a.total_reward, 3)))
						f.close()
                except:
					raise
开发者ID:ibowen,项目名称:Machine-Learning-Engineer-Nanodegree,代码行数:34,代码来源:agent4.py

示例14: run

# 需要导入模块: from simulator import Simulator [as 别名]
# 或者: from simulator.Simulator import run [as 别名]
def run():
    f = open('running_report.txt', 'w')

    # setup various parameter combinations
    discount_factors = [0.5]
    starting_learning_rates = [0.5]
    epsilon_greedy_policy = [0.09]

    for d_factor in discount_factors:
        for alpha in starting_learning_rates:
            for greedy_policy in epsilon_greedy_policy:

                """Run the agent for a finite number of trials."""
                # Set up environment and agent
                e = Environment()  # create environment (also adds some dummy traffic)
                a = e.create_agent(LearningAgent, learning_rate=alpha, discount_factor=d_factor, greedy_policy=greedy_policy)  # create agent
                e.set_primary_agent(a, enforce_deadline=True)  # specify agent to track
                # NOTE: You can set enforce_deadline=False while debugging to allow longer trials

                # Now simulate it
                sim = Simulator(e, update_delay=0, display=True)  # create simulator (uses pygame when display=True, if available)

                number_of_trials = 100

                # NOTE: To speed up simulation, reduce update_delay and/or set display=False
                sim.run(n_trials=number_of_trials)  # run for a specified number of trials

                #NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line
                print >> f, "Learning rate:", alpha
                print >> f, "Discount factor:", d_factor
                print >> f, "Greedy Policy:", greedy_policy
                print >> f, "Percentage completed: ", a.completed_trials / 100.0, "\n"

                f.flush()
    f.close()
开发者ID:thalles753,项目名称:machine-learning,代码行数:37,代码来源:agent.py

示例15: mainQ

# 需要导入模块: from simulator import Simulator [as 别名]
# 或者: from simulator.Simulator import run [as 别名]
def mainQ(_learning=True):
    # Set player types and logging if provided in command line
    if len(sys.argv) == 3:
        pair = (sys.argv[1], sys.argv[2])
    else:
        pair = None

    # Prompt players
    # Needs to be adapted to get define parameters
    player_pair = promptPlayers(pair, _learning)
    # Create new game
    game = Game(player_pair)

    ######
    # Create new simulation
    # Flags:
    #   - debug: (True, False)
    sim = Simulator(game)


    ######
    # Run a simulation
    # Flags:
    # - tolerance=0.05 Epsilon tolerance to being testing.
    # - n_test=0  Number of test to be conducted after training

    sim.run(tolerance=0.001,n_test=100)
开发者ID:armandosrz,项目名称:UdacityNanoMachine,代码行数:29,代码来源:main.py


注:本文中的simulator.Simulator.run方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。