本文整理汇总了Python中Agent.Agent类的典型用法代码示例。如果您正苦于以下问题:Python Agent类的具体用法?Python Agent怎么用?Python Agent使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Agent类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testAddAndRemoveEdge
def testAddAndRemoveEdge(self):
agent = Agent(n=10, p = 0, topology='ErdosRenyi')
self.assertFalse(agent.graph.are_connected(0,1))
agent.addEdge(0,1)
self.assertTrue(agent.graph.are_connected(0,1))
agent.removeEdge(0,1)
self.assertFalse(agent.graph.are_connected(0,1))
示例2: __init__
def __init__(self, refm, disc_rate, sims, depth, horizon, epsilon=0.05, threads=1, memory=32):
Agent.__init__(self, refm, disc_rate)
if epsilon > 1.0:
epsilon = 1.0
if epsilon < 0.0:
epsilon = 0.0
self.refm = refm
self.sims = int(sims)
self.depth = int(depth)
self.horizon = int(horizon)
self.memory = int(memory)
self.epsilon = epsilon
self.threads = int(threads)
self.obs_cells = refm.getNumObsCells()
self.obs_symbols = refm.getNumObsSyms()
self.obs_bits = int(ceil(log(refm.getNumObs(), 2.0)))
self.reward_bits = int(ceil(log(refm.getNumRewards(), 2.0)))
self.num_actions = refm.getNumActions()
print "obs_bits = ", self.obs_bits
print "reward_bits = ", self.reward_bits
self.agent = None
self.reset()
示例3: __init__
def __init__( self, refm, disc_rate, init_Q, Lambda, alpha, epsilon, gamma=0 ):
Agent.__init__( self, refm, disc_rate )
self.num_states = refm.getNumObs() # assuming that states = observations
self.obs_symbols = refm.getNumObsSyms()
self.obs_cells = refm.getNumObsCells()
self.init_Q = init_Q
self.Lambda = Lambda
self.epsilon = epsilon
self.alpha = alpha
# if the internal discount rate isn't set, use the environment value
if gamma == 0:
self.gamma = disc_rate
else:
self.gamma = gamma
if self.gamma >= 1.0:
print "Error: Q learning can only handle an internal discount rate ", \
"that is below 1.0"
sys.exit()
self.reset()
示例4: World
class World(Widget):
def __init__(self, settings):
super().__init__()
self.settings = settings
# Generate Obstacles
self.obstacles = []
for i in range(self.settings.NUM_OBSTACLES):
self.obstacles.append((randint(0, self.settings.GRID_WIDTH - 1) * self.settings.CELL_SIZE, randint(0, self.settings.GRID_HEIGHT - 1) * self.settings.CELL_SIZE))
# Generate goal
self.goal = (randint(0, self.settings.GRID_WIDTH - 1) * self.settings.CELL_SIZE, randint(0, self.settings.GRID_HEIGHT - 1) * self.settings.CELL_SIZE)
while self.goal in self.obstacles:
self.goal = (randint(0, self.settings.GRID_WIDTH - 1) * self.settings.CELL_SIZE, randint(0, self.settings.GRID_HEIGHT - 1) * self.settings.CELL_SIZE)
# Create the agent
self.agent = Agent(self.settings, self.canvas, self.goal, self.obstacles, self.settings.HEURISTIC)
def draw(self):
with self.canvas:
# Draw obstactles
Color(*self.settings.OBSTACLE_COLOR)
for ob in self.obstacles:
Rectangle(pos=ob, size=(self.settings.CELL_SIZE, self.settings.CELL_SIZE))
# Draw goal
Color(*self.settings.GOAL_COLOR)
Rectangle(pos=self.goal, size=(self.settings.CELL_SIZE, self.settings.CELL_SIZE))
def update(self, dt):
with self.canvas:
self.agent.update(dt)
示例5: __init__
def __init__(self,errGrowth,unnormalizeDirtRate,unnormalizeDirtSize,accuracy,N) :
Agent.__init__(self,Router.PLANNER)
# define the
# variance growth parameter,
# average dirt fall,
# handle to sensor,
# handle to array of vacuums)
self.setNumber(N)
self.vacuumRange = 3
self.setAccuracy(accuracy)
# Initialize the matrices.
self.worldview = zeros((N,N),dtype=float64);
self.dirtLevels = []
self.wetview = zeros((N,N),dtype=float64);
self.viewPrecision = zeros((N,N),dtype=float64);
self.unnormalizeDirtRate = unnormalizeDirtRate
self.unnormalizeDirtSize = unnormalizeDirtSize
self.errGrowth = errGrowth
self.normalizeDirtRate()
self.vacuumlocation = []
#create distance matrix
self.defineDistanceArray()
self.wDist=0; # default
示例6: __init__
def __init__(self, gamma, filename):
Agent.__init__(self)
self._fileName = filename + "startingCountry.pickle"
self.gamma = gamma
self.load()
self.lastState = None
self.lastAction = None
self.stateActionList = []
示例7: __init__
def __init__(self,accuracy=0.0) :
Agent.__init__(self,Router.SENSORARRAY)
# constructor (accuracy of measurement)
self.accuracy=accuracy-float(int(accuracy)) #force to be within constraints
self.N = 5
self.array = zeros((self.N,self.N),dtype=float64) # array of values for dirt levels
self.Wet = zeros((self.N,self.N),dtype=float64) # array of values for dirt levels
示例8: __init__
def __init__(self, gamma, filename):
Agent.__init__(self)
self._fileName = filename + "fortify.pickle"
self.load()
self.gamma = gamma
self.lastState = None
self.lastAction = None
self.lastScore = 0
示例9: __init__
def __init__(self, name=None):
if name is None:
name = "builder"
Agent.__init__(self, name, "build")
ProjectInspector.__init__(self)
return
示例10: __init__
def __init__( self, refm, disc_rate ):
Agent.__init__( self, refm, disc_rate )
if self.num_actions > 10:
print "Error: Manual agent only handles 10 or less actions!"
sys.exit()
self.mode = MANUAL
self.last_val = 0
示例11: __init__
def __init__(self, gamma, filename):
Agent.__init__(self)
self._fileName = filename + "placeTroops.pickle"
self.load()
self.gamma = gamma
self.lastState = None
self.lastAction = None
self.lastScore = 0
self.stateActionList = []
示例12: testAgent
class testAgent(unittest.TestCase):
def setUp(self):
self.agent = Agent()
def testPluck(self):
self.agent.pluck()
def testGroupSize(self):
agent = Agent(n=10)
self.assertEqual(agent.groupSize(), 10)
def testRandomVertexPair(self):
(i,j) = self.agent.getRandomVertexPair()
self.assertGreater(self.agent.groupSize(), i)
self.assertGreater(self.agent.groupSize(), j)
def testAddAndRemoveEdge(self):
agent = Agent(n=10, p = 0, topology='ErdosRenyi')
self.assertFalse(agent.graph.are_connected(0,1))
agent.addEdge(0,1)
self.assertTrue(agent.graph.are_connected(0,1))
agent.removeEdge(0,1)
self.assertFalse(agent.graph.are_connected(0,1))
def testPluckEdge(self):
agent = Agent(n=10, p =0)
agent.pluckEdge(1,2)
def testPluckTillConnectedEmpty(self):
agent = Agent(n=10, topology='Empty')
agent.pluckTillConnected()
self.assertTrue(agent.isConnected())
def testPluckTillConnectedStar(self):
agent = Agent(n=20, topology='Star')
agent.pluckTillConnected()
self.assertTrue(agent.isConnected)
def testAveragePathLengthFull(self):
agent = Agent(n=10, topology='Full')
self.assertEqual(1.0, agent.averagePathLength())
def testAveragePathLengthEmpty(self):
agent = Agent(n=10, topology='Empty')
self.assertEqual(agent.averagePathLength(), Inf)
def testAveragePathLengthStar(self):
for j in xrange(5,20):
n = float(j)
agent = Agent(n=j, topology='Star')
self.assertEqual(agent.averagePathLength(),(n-1)*2.0/n )
def testEdgeOccupation(self):
n = 10
for m in xrange(0, 10, 1):
agent = Agent(n = n, m = m, topology='ErdosRenyi')
p = float(m)/float(n * (n-1)/2)
self.assertEqual(agent.edgeOccupation(), p)
示例13: __init__
def __init__( self, refm, disc_rate, epsilon ):
Agent.__init__( self, refm, disc_rate )
self.obs_symbols = refm.getNumObsSyms()
self.obs_cells = refm.getNumObsCells()
self.epsilon = epsilon
self.reset()
示例14: __init__
def __init__(self,r=1.0,s=1.0,v=1.0,cloudsize=1.0) :
Agent.__init__(self,Router.WORLD)
self.time = 0
self.N=5 # %size of grid
self.expenditure = 0.0 # cummulative funds expended since last reset
self.numberVacuums = 0 # No vacuums assigned yet.
self.vacuumArray = [] # array of object handles
self.intializeVariables(r,s,v,cloudsize)
self.setSensor(None)
self.setPlanner(None)
示例15: __init__
def __init__(self, name=None, project=None, mode=None):
if name is None:
name = "janitor"
if mode is None:
mode = 'clean'
self.mode = mode
self.project = project
Agent.__init__(self, name, "janitor")
ProjectInspector.__init__(self)
return