當前位置: 首頁>>代碼示例>>Python>>正文


Python episodic.EpisodicTask類代碼示例

本文整理匯總了Python中pybrain.rl.environments.episodic.EpisodicTask的典型用法代碼示例。如果您正苦於以下問題:Python EpisodicTask類的具體用法?Python EpisodicTask怎麽用?Python EpisodicTask使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


在下文中一共展示了EpisodicTask類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: reset

 def reset(self):
     self.switched = False
     EpisodicTask.reset(self)
     if self.opponent.color == EuphoriaGame.BLACK:
         # first move by opponent
         self.opponent.game = self.env
         EpisodicTask.performAction(self, (EuphoriaGame.BLACK,self.opponent.getAction()))
開發者ID:myeaton1,項目名稱:euphoriaAI,代碼行數:7,代碼來源:euphoriatask.py

示例2: __init__

 def __init__(self, env, episodeLength):
     EpisodicTask.__init__(self, env)
     #self.inDim = 1 
     #self.outDim = 1
     self.counter = 0
     self.history = []
     self.total = []
     self.episodeLength = episodeLength
開發者ID:schmalerc,項目名稱:pybrain,代碼行數:8,代碼來源:ipdexample.py

示例3: __init__

 def __init__(self, environment):
     '''
     Constructor
     '''
     EpisodicTask.__init__(self, environment)
     self.prev_time = 0
     self.current_time = 0 
     self.reward = 0
開發者ID:jaegs,項目名稱:AI_Practicum,代碼行數:8,代碼來源:task.py

示例4: performAction

    def performAction(self, action):
        """ Perform action on the underlying environment, i.e specify new asset
        allocation.

        Args:
            action (np.array): new allocation
        """
        # Cache new asset allocation for computing rewards
        self.newAllocation = action
        # Perform action
        EpisodicTask.performAction(self, action)
開發者ID:pnecchi,項目名稱:Thesis,代碼行數:11,代碼來源:assetallocationtask.py

示例5: __init__

 def __init__(self, size, opponent = None, **args):
     EpisodicTask.__init__(self, PenteGame((size, size)))
     self.setArgs(**args)
     if opponent == None:
         opponent = RandomGomokuPlayer(self.env)
     elif isclass(opponent):
         # assume the agent can be initialized without arguments then.
         opponent = opponent(self.env)
     if not self.opponentStart:
         opponent.color = PenteGame.WHITE
     self.opponent = opponent
     self.minmoves = 9
     self.maxmoves = self.env.size[0] * self.env.size[1]
     self.reset()
開發者ID:DanSGraham,項目名稱:code,代碼行數:14,代碼來源:pentetask.py

示例6: __init__

 def __init__(self, size, opponent = None, **args):
     EpisodicTask.__init__(self, CaptureGame(size))
     self.setArgs(**args)
     if opponent == None:
         opponent = RandomCapturePlayer(self.env)
     elif isclass(opponent):
         # assume the agent can be initialized without arguments then.
         opponent = opponent(self.env)
     else:
         opponent.game = self.env
     if not self.opponentStart:
         opponent.color = CaptureGame.WHITE
     self.opponent = opponent
     self.maxmoves = self.env.size * self.env.size
     self.minmoves = 3
     self.reset()
開發者ID:DanSGraham,項目名稱:code,代碼行數:16,代碼來源:capturetask.py

示例7: performAction

 def performAction(self, action):
     # agent.game = self.env
     if self.opponentStart:
         EpisodicTask.performAction(self, (EuphoriaGame.WHITE, action))
     else:
         EpisodicTask.performAction(self, (EuphoriaGame.BLACK, action))
     if not self.isFinished():
         self.opponent.game = self.env
         if self.opponentStart:
             EpisodicTask.performAction(self, (EuphoriaGame.BLACK,self.opponent.getAction()))
         else:
             EpisodicTask.performAction(self, (EuphoriaGame.WHITE,self.opponent.getAction()))
開發者ID:myeaton1,項目名稱:euphoriaAI,代碼行數:12,代碼來源:euphoriatask.py

示例8: reset

    def reset(self):
        self.current_time = self.prev_time = 0.0
        if const.USE_PERIODS:
            self.current_time = self.prev_time = random.uniform(0,const.PERIODS)
        self.current_time = const.MID_DAY
        #print "ST", self.current_time
        self.start_time = self.current_time
        self.counter = 0
        #choose a random node that is not the destination
        node = grid.node_number(const.DESTINATION)
        while(node == grid.node_number(const.DESTINATION)):       
            node = random.randint(0, const.NODES - 1)
        #See what happens
        if const.SAME_START:
            node = 0
        
#        while(node == grid.node_number(const.DESTINATION)):
#            node = random.randint(0, const.NODES - 1)
        self.start_node = node 
        self.env.reset_grid(self.current_time, node)
        EpisodicTask.reset(self)
開發者ID:jaegs,項目名稱:AI_Practicum,代碼行數:21,代碼來源:task.py

示例9: getObservation

    def getObservation(self):
        """ An augmented observation of the underlying environment state that
            also includes the current portfolio weights, right before
            realloacation.

        Returns:
            state (np.array): the augmented state (size (P+1) * (I+1))
        """
        # Observe past asset returns from the environment
        pastReturns = EpisodicTask.getObservation(self)

        # Return augmented state
        return np.concatenate((pastReturns, self.currentAllocation))
開發者ID:pnecchi,項目名稱:Thesis,代碼行數:13,代碼來源:assetallocationtask.py

示例10: __init__

    def __init__(self,
                 environment,
                 deltaP,
                 deltaF,
                 deltaS,
                 discount,
                 backtest=False):
        """ Standard constructor for the asset allocation task.

        Args:
            environment (Environment): market environment object
            deltaP (double): proportional transaction costs rate
            deltaF (double): fixed transaction cost rate
            deltaS (double): short selling borrowing cost rate
            discount (double): discount factor
            backtest (bool): flag for training mode or test mode
        """
        # Initialize episodic task
        EpisodicTask.__init__(self, environment)

        # Transaction costs
        self.deltaP = deltaP
        self.deltaF = deltaF
        self.deltaS = deltaS

        # Discount factor
        self.discount = discount

        # Backtesting
        self.backtest = backtest

        # Report stores allocations and portfolio log-returns for backtesting
        self.report = pd.DataFrame(columns=list(self.env.data.columns) +
                                           ['ptfLogReturn'])

        # Initialize allocation
        self.initializeAllocation()
開發者ID:pnecchi,項目名稱:Thesis,代碼行數:37,代碼來源:assetallocationtask.py

示例11: f

 def f(self, x):
     """ If a module is given, wrap it into a ModuleDecidingAgent before evaluating it.
     Also, if applicable, average the result over multiple games. """
     if isinstance(x, Module):
         agent = ModuleDecidingPlayer(x, self.env, greedySelection = True)
     elif isinstance(x, EuphoriaRandomPlayer):
         agent = x
     else:
         raise NotImplementedError('Missing implementation for '+x.__class__.__name__+' evaluation')
     res = 0
     agent.game = self.env
     self.opponent.game = self.env
     for dummy in range(self.averageOverGames):
         agent.color = -self.opponent.color
         res += EpisodicTask.f(self, agent)
     return res / float(self.averageOverGames)
開發者ID:myeaton1,項目名稱:euphoriaAI,代碼行數:16,代碼來源:euphoriatask.py

示例12: performAction

 def performAction(self, action):
     EpisodicTask.performAction(self, action)
     if not self.isFinished():
         EpisodicTask.performAction(self, self.opponent.getAction())
開發者ID:DanSGraham,項目名稱:code,代碼行數:4,代碼來源:capturetask.py

示例13: reset

 def reset(self):
     self.switched = False
     EpisodicTask.reset(self)
     if self.opponent.color == CaptureGame.BLACK:
         # first move by opponent
         EpisodicTask.performAction(self, self.opponent.getAction())
開發者ID:DanSGraham,項目名稱:code,代碼行數:6,代碼來源:capturetask.py

示例14: reset

 def reset(self):
     #i suppose this is the proper way to do it?
     EpisodicTask.reset(self)
     self.env.reset()
開發者ID:yycho0108,項目名稱:Tetris_AI_R,代碼行數:4,代碼來源:main.py

示例15: reset

 def reset(self):
     EpisodicTask.reset(self)
     self.env.reset()
     self._ended = False
開發者ID:sarobe,項目名稱:VGDLEntityCreator,代碼行數:4,代碼來源:interfaces.py


注:本文中的pybrain.rl.environments.episodic.EpisodicTask類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。