本文整理汇总了Python中OpenNero.set_ai方法的典型用法代码示例。如果您正苦于以下问题:Python OpenNero.set_ai方法的具体用法?Python OpenNero.set_ai怎么用?Python OpenNero.set_ai使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类OpenNero
的用法示例。
在下文中一共展示了OpenNero.set_ai方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load_rtneat
# 需要导入模块: import OpenNero [as 别名]
# 或者: from OpenNero import set_ai [as 别名]
def load_rtneat(self, location , pop, team=constants.OBJECT_TYPE_TEAM_0):
location = os.path.relpath("/") + location
if os.path.exists(location):
OpenNero.set_ai("rtneat-%s" % team, OpenNero.RTNEAT(
str(location), "data/ai/neat-params.dat",
constants.pop_size,
OpenNero.get_environment().agent_info.reward))
示例2: deploy
# 需要导入模块: import OpenNero [as 别名]
# 或者: from OpenNero import set_ai [as 别名]
def deploy(self, ai='rtneat', team=constants.OBJECT_TYPE_TEAM_0):
OpenNero.disable_ai()
if ai == 'rtneat':
OpenNero.set_ai('rtneat-%s' % team, None)
self.environment.remove_all_agents(team)
for _ in range(constants.pop_size):
self.spawnAgent(ai=ai, team=team)
OpenNero.enable_ai()
示例3: __init__
# 需要导入模块: import OpenNero [as 别名]
# 或者: from OpenNero import set_ai [as 别名]
def __init__(self):
"""
Create the environment
"""
OpenNero.Environment.__init__(self)
self.curr_id = 0
self.max_steps = 20
self.MAX_DIST = math.hypot(constants.XDIM, constants.YDIM)
self.states = {}
self.teams = {}
self.script = 'Hw5/menu.py'
abound = OpenNero.FeatureVectorInfo() # actions
sbound = OpenNero.FeatureVectorInfo() # sensors
rbound = OpenNero.FeatureVectorInfo() # rewards
# actions
abound.add_continuous(-1, 1) # forward/backward speed (gets multiplied by constants.MAX_MOVEMENT_SPEED)
abound.add_continuous(-constants.MAX_TURN_RADIANS, constants.MAX_TURN_RADIANS) # left/right turn (in radians)
# sensor dimensions
for a in range(constants.N_SENSORS):
sbound.add_continuous(0, 1);
# Rewards
# the enviroment returns the raw multiple dimensions of the fitness as
# they get each step. This then gets combined into, e.g. Z-score, by
# the ScoreHelper in order to calculate the final rtNEAT-fitness
for f in constants.FITNESS_DIMENSIONS:
# we don't care about the bounds of the individual dimensions
rbound.add_continuous(-sys.float_info.max, sys.float_info.max) # range for reward
# initialize the rtNEAT algorithm parameters
# input layer has enough nodes for all the observations plus a bias
# output layer has enough values for all the actions
# population size matches ours
# 1.0 is the weight initialization noise
rtneat = OpenNero.RTNEAT("data/ai/neat-params.dat",
constants.N_SENSORS,
constants.N_ACTIONS,
constants.pop_size,
1.0,
rbound, False)
key = "rtneat-%s" % constants.OBJECT_TYPE_TEAM_0
OpenNero.set_ai(key, rtneat)
print "get_ai(%s): %s" % (key, OpenNero.get_ai(key))
# set the initial lifetime
lifetime = module.getMod().lt
rtneat.set_lifetime(lifetime)
print 'rtNEAT lifetime:', lifetime
self.agent_info = OpenNero.AgentInitInfo(sbound, abound, rbound)
示例4: start_rtneat
# 需要导入模块: import OpenNero [as 别名]
# 或者: from OpenNero import set_ai [as 别名]
def start_rtneat(self, pop_size):
" start the rtneat learning demo "
OpenNero.disable_ai()
#self.environment = RoombaEnvironment(constants.XDIM, constants.YDIM, self)
#set_environment(self.environment)
#self.reset_sandbox()
# Create RTNEAT object
rbound = OpenNero.FeatureVectorInfo()
rbound.add_continuous(-sys.float_info.max, sys.float_info.max)
rtneat = OpenNero.RTNEAT("data/ai/neat-params.dat", 2, 1, pop_size, 1.0, rbound, False)
rtneat.set_weight(0,1)
OpenNero.set_ai("rtneat",rtneat)
OpenNero.enable_ai()
self.distribute_bots(pop_size, "data/shapes/roomba/RoombaRTNEAT.xml")
示例5: start_rtneatq
# 需要导入模块: import OpenNero [as 别名]
# 或者: from OpenNero import set_ai [as 别名]
def start_rtneatq(self, team=constants.OBJECT_TYPE_TEAM_0):
# initialize the rtNEAT+Q algorithm parameters
# input layer has enough nodes for all the observations plus a bias
# output layer has enough values for all the wires
# population size matches ours
# 1.0 is the weight initialization noise
rtneatq = OpenNero.RTNEAT("data/ai/neat-params.dat",
constants.N_SENSORS+1,
constants.N_ACTION_CANDIDATES * (constants.N_ACTIONS + 1),
constants.pop_size,
1.0,
rtneat_rewards(),
False)
key = "rtneatq-%s" % team
OpenNero.set_ai(key, rtneatq)
print "get_ai(%s): %s" % (key, OpenNero.get_ai(key))
rtneatq.set_lifetime(self.environment.lifetime)