本文整理汇总了Python中agent.Agent.__init__方法的典型用法代码示例。如果您正苦于以下问题:Python Agent.__init__方法的具体用法?Python Agent.__init__怎么用?Python Agent.__init__使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类agent.Agent
的用法示例。
在下文中一共展示了Agent.__init__方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from agent import Agent [as 别名]
# 或者: from agent.Agent import __init__ [as 别名]
def __init__(self, aid, booksList):
Agent.__init__(self, aid)
self.booksList = booksList
comportamento = ComportamentoAgenteLivraria(self)
self.behaviours.append(comportamento)
示例2: __init__
# 需要导入模块: from agent import Agent [as 别名]
# 或者: from agent.Agent import __init__ [as 别名]
def __init__(self, player_id, own_dice_list):
Agent.__init__(self, player_id, own_dice_list)
self.num_each_fv = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
for fv in self.own_dice_list:
self.num_each_fv[fv] += 1
self.pg = ProbGenerator((NUM_PLAYERS-1)*NUM_DICE)
self.pg.calc()
示例3: __init__
# 需要导入模块: from agent import Agent [as 别名]
# 或者: from agent.Agent import __init__ [as 别名]
def __init__(self, name, fg, ms, opt):
Agent.__init__(self, name, fg, ms, opt)
self.f = self.fg.functions[self.name]
self.neighbors = self.f.variables
self.domains = {v:self.fg.variables[v].domain for v in self.neighbors}
self.q = {v:{value:0 for value in self.domains[v]} for v in self.neighbors}
self.terminated_neighbors = {v:False for v in self.neighbors}
示例4: __init__
# 需要导入模块: from agent import Agent [as 别名]
# 或者: from agent.Agent import __init__ [as 别名]
def __init__(self, name, fg, ms, opt):
Agent.__init__(self, name, fg, ms, opt)
self.v = self.fg.variables[self.name]
self.neighbors = self.v.functions
self.domain = self.v.domain
self.z = {value:0 for value in self.domain}
self.r = {f:{value:0 for value in self.domain} for f in self.neighbors}
self.z_queue = []
示例5: __init__
# 需要导入模块: from agent import Agent [as 别名]
# 或者: from agent.Agent import __init__ [as 别名]
def __init__(self, aid):
Agent.__init__(self, aid)
message = ACLMessage(ACLMessage.REQUEST)
message.set_protocol(ACLMessage.FIPA_REQUEST_PROTOCOL)
message.set_content('REQUEST')
message.add_receiver('agent_participant_1')
comportamento_1 = RequestIniciante(self, message)
self.addBehaviour(comportamento_1)
示例6: __init__
# 需要导入模块: from agent import Agent [as 别名]
# 或者: from agent.Agent import __init__ [as 别名]
def __init__(self, aid, bookStores):
Agent.__init__(self, aid)
self.bookStores = bookStores
self.bestPropose = None
self.bestBookStore = None
self.proposes = []
self.messages = []
self.sends = 0
self.receives = 0
示例7: __init__
# 需要导入模块: from agent import Agent [as 别名]
# 或者: from agent.Agent import __init__ [as 别名]
def __init__(self, player_id):
Agent.__init__(self, player_id)
#self.num_each_fv = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
#for fv in self.own_dice_list:
# self.num_each_fv[fv] += 1
self.pg = ProbGenerator(NUM_PLAYERS*NUM_DICE)
self.pg.calc()
self.good_bid_count = 0
self.num_bids_made = 0
self.bad_bid_count = 0
示例8: __init__
# 需要导入模块: from agent import Agent [as 别名]
# 或者: from agent.Agent import __init__ [as 别名]
def __init__(self, aid):
Agent.__init__(self, aid)
pedido = {'tipo' : 'pedido', 'qtd' : 100.0}
message = ACLMessage(ACLMessage.CFP)
message.set_protocol(ACLMessage.FIPA_CONTRACT_NET_PROTOCOL)
message.set_content(dumps(pedido))
message.add_receiver('participant_agent_1')
message.add_receiver('participant_agent_2')
behaviour = InitiatorProtocol(self, message)
self.addBehaviour(behaviour)
示例9: __init__
# 需要导入模块: from agent import Agent [as 别名]
# 或者: from agent.Agent import __init__ [as 别名]
def __init__(self, city, location):
'''
Constructor
'''
Agent.__init__(self, city, location)
self.type = random.randint(0, Citizen.__AGENT_MAX_TYPES)
self.color = colorBlue if self.type else colorRed
self.groundType = city.getLocationType(location[0], location[1])
self.regions = None
self.path = None
# self.dist = lambda (x1, y1), (x2, y2): math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
self.dist = lambda (x1, y1), (x2, y2): abs(x2 - x1) + abs(y2 - y1)
示例10: __init__
# 需要导入模块: from agent import Agent [as 别名]
# 或者: from agent.Agent import __init__ [as 别名]
def __init__(
self,
modelStanding,
modelAnimationDict,
turnRate,
speed,
agentList,
name="",
rangeFinderCount=13,
collisionMask=BitMask32.allOff(),
adjacencySensorThreshold=0,
radarSlices=0,
radarLength=0.0,
scale=1.0,
brain=None,
massKg=0.1,
collisionHandler=None,
collisionTraverser=None,
waypoints=None,
):
Agent.__init__(
self,
modelStanding,
modelAnimationDict,
turnRate,
speed,
agentList,
massKg,
collisionMask,
name,
collisionHandler,
collisionTraverser,
)
self.collisionMask = collisionMask
self.adjacencySensorThreshold = adjacencySensorThreshold
self.radarSlices = radarSlices
self.radarLength = radarLength
self.scale = scale
self.brain = brain
self.npcState = "retriveKey"
self.waypoints = waypoints
self.targetReached = False
self.setScale(self.scale)
self.currentTarget = None
self.player = None
self.bestPath = None
self.key = None
self.keyInHand = False
self.hasFallen = False
self.pathSmoothening = True
示例11: __init__
# 需要导入模块: from agent import Agent [as 别名]
# 或者: from agent.Agent import __init__ [as 别名]
def __init__(self, aid, bookStores, pedido):
Agent.__init__(self, aid)
self.bookStores = bookStores
self.pedido = pedido
cfp_message = ACLMessage(ACLMessage.CFP)
cfp_message.set_protocol(ACLMessage.FIPA_CONTRACT_NET_PROTOCOL)
for i in self.bookStores:
cfp_message.add_receiver(i)
cfp_message.set_content(dumps(self.pedido))
comportamento = ComportamentoAgenteConsumidor(self, cfp_message)
self.addBehaviour(comportamento)
示例12: __init__
# 需要导入模块: from agent import Agent [as 别名]
# 或者: from agent.Agent import __init__ [as 别名]
def __init__(self, screen):
pygame.sprite.Sprite.__init__(self)
self.calmfile = "spr/e.png"
self.waryfile = "spr/ea.png"
self.firefile = "spr/es.png"
self.kickfile = "spr/ek.png"
self.deadfile = "spr/ed.png"
self.alive = True
self.shootstg = 0
self.before = self.calmfile
self.actchnce = 0
self.acthigh = 0
self.actlow = 0
screen = pygame.display.get_surface()
Agent.__init__(self, self.calmfile, self.waryfile, self.firefile, self.kickfile, screen)
self.rect.x = 720
self.rect.y = 350
示例13: __init__
# 需要导入模块: from agent import Agent [as 别名]
# 或者: from agent.Agent import __init__ [as 别名]
def __init__(self, prefix="msg", suffix="", default="messageReceived", silent=False):
""" Action agent constructor.
@param prefix: Prefix for all action-specific handler function name.
@type prefix: C{str}
@param suffix: Suffix for all action-specific handler function name.
@type suffix: C{str}
@param default: Default handler function name.
@type default: C{str}
@param silent: Silent flag.
@type silent: C{bool}
"""
Agent.__init__(self)
self.pattern = prefix + "%s" + suffix
if hasattr(self, default):
self.__defaultHandler = getattr(self, default)
else:
self.__defautlHandler = self.__default_handler
self.silent = silent
self.currentMessage = None
示例14: __init__
# 需要导入模块: from agent import Agent [as 别名]
# 或者: from agent.Agent import __init__ [as 别名]
def __init__(self, policy_function, discount_factor, update_freq=1):
"""
Parameters:
policy_function (PolicyFunction): Policy function instance implementing the mapping from states to actions.
discount_factor (float): Reward discount factor
update_freq (int): Update frequency. Parameters are updated after every update_freq episodes
"""
Agent.__init__(self, action_space=policy_function.action_space)
if discount_factor < 0.0 or discount_factor > 1.0:
raise ValueError("Discount factor should be between 0.0 and 1.0.")
self.discount_factor = discount_factor
if update_freq <= 0:
raise ValueError("Update frequency should be positive.")
self.update_freq = update_freq
self.policy_function = policy_function
self.trials_experienced = 0 # used for discounting reward
self.episodes_experienced = 0 # used for updating parameters at the desired update_freq
示例15: __init__
# 需要导入模块: from agent import Agent [as 别名]
# 或者: from agent.Agent import __init__ [as 别名]
def __init__(self, q_function, discount_factor, greed_eps):
"""
Parameters:
q_function (QFunction)
discount_factor (float): Reward discount factor
greed_eps (ParameterSchedule): Schedule for epsilon of epsilon-greedy action picking strategy (probability
of picking a random (rather than the greedy) action.
"""
Agent.__init__(self, action_space=q_function.action_space)
if discount_factor < 0.0 or discount_factor > 1.0:
raise ValueError("Discount factor should be between 0.0 and 1.0.")
self.discount_factor = discount_factor
self.greed_eps = greed_eps
# keep track of episodes experienced (this is for example used by parameter schedules)
self.episodes_experienced = 0
self.q = q_function
# We need to keep track of last state, action, and reward
self.last_state = None
self.last_action = None
self.last_reward = 0.0