本文整理汇总了Python中simulator.Simulator.reset方法的典型用法代码示例。如果您正苦于以下问题:Python Simulator.reset方法的具体用法?Python Simulator.reset怎么用?Python Simulator.reset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类simulator.Simulator
的用法示例。
在下文中一共展示了Simulator.reset方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from simulator import Simulator [as 别名]
# 或者: from simulator.Simulator import reset [as 别名]
def main():
# Simulator(# of rabbits, # of foxes, size of world)
simulator = Simulator(30, 10, 20)
while (True):
simulator.print()
simulator.move()
simulator.reset()
simulator.eat()
simulator.reset()
simulator.getold()
simulator.reset()
simulator.starve()
simulator.reset()
input("Enter: ")
示例2: nice_json_dump
# 需要导入模块: from simulator import Simulator [as 别名]
# 或者: from simulator.Simulator import reset [as 别名]
director.direct(simulation, artifact_directory)
total_real_time = time.time() - start_time
elapsed, ticks, queenstats = simulator.get_results(simulation)
# this used to agregate data from several runs
elapsed_balanced = elapsed / amount_of_ants
data = {
'world': world_name,
'world_filepath': file_,
'queen': queen.get_name(),
'ants': amount_of_ants,
'ticks': ticks,
'cost': elapsed,
'cost_balanced': elapsed_balanced,
'total_food': reality.world.get_total_food(),
'best_finding_cost': queenstats.best_finding_cost,
'moves_leading_to_food_being_found': queenstats.moves_leading_to_food_being_found,
'total_real_time': total_real_time,
}
nice_json_dump(reality.world.to_json(), os.path.join(artifact_directory, os.path.basename(file_)))
draw_pheromone_levels(simulation, artifact_directory, reality, force_name='end')
draw_link_costs(simulation, artifact_directory, reality, force_name='end')
nice_json_dump(data, os.path.join(artifact_directory, 'results.json'))
print 'world: %s, queen: %s, ants: %s, avg.decisions: %s, avg.time/ant: %s' % (file_, queen.get_name(), amount_of_ants, ticks, elapsed_balanced)
simulator.reset()
#pycallgraph.make_dot_graph('profile.png')
#exit()
示例3: Catch
# 需要导入模块: from simulator import Simulator [as 别名]
# 或者: from simulator.Simulator import reset [as 别名]
# model.load_weights("model.h5")
# Define environment/game
# env = Catch(grid_size)
args = get_command_line_args()
env = Simulator(args)
# Initialize experience replay object
exp_replay = ExperienceReplay(max_memory=max_memory)
# Train
win_cnt = 0
for e in range(epoch):
loss = 0.
env.reset(args)
# env.reset()
game_over = False
# get initial input
input_t = env.observe()
while not game_over:
input_tm1 = input_t
# get next action
if np.random.rand() <= epsilon:
action = np.random.randint(0, num_actions, size=1)
else:
q = model.predict(input_tm1)
action = np.argmax(q[0])
# apply action, get rewards and new state