當前位置: 首頁>>代碼示例>>Python>>正文


Python Logger.log_tabular方法代碼示例

本文整理匯總了Python中pybullet_utils.logger.Logger.log_tabular方法的典型用法代碼示例。如果您正苦於以下問題:Python Logger.log_tabular方法的具體用法?Python Logger.log_tabular怎麽用?Python Logger.log_tabular使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在pybullet_utils.logger.Logger的用法示例。


在下文中一共展示了Logger.log_tabular方法的2個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: Logger

# 需要導入模塊: from pybullet_utils.logger import Logger [as 別名]
# 或者: from pybullet_utils.logger.Logger import log_tabular [as 別名]
from pybullet_utils.logger import Logger
logger = Logger()
logger.configure_output_file("e:/mylog.txt")
for i in range (10):
	logger.log_tabular("Iteration", 1)
Logger.print2("hello world")

logger.print_tabular()
logger.dump_tabular()
開發者ID:jiapei100,項目名稱:bullet3,代碼行數:11,代碼來源:testlog.py

示例2: RLAgent

# 需要導入模塊: from pybullet_utils.logger import Logger [as 別名]
# 或者: from pybullet_utils.logger.Logger import log_tabular [as 別名]

#.........這裏部分代碼省略.........
            self.train_return = path.calc_return()

            if self._need_normalizer_update:
                self._record_normalizers(path)

        return path_id

    def _record_normalizers(self, path):
        states = np.array(path.states)
        self.s_norm.record(states)

        if self.has_goal():
            goals = np.array(path.goals)
            self.g_norm.record(goals)

        return

    def _update_normalizers(self):
        self.s_norm.update()

        if self.has_goal():
            self.g_norm.update()
        return

    def _train(self):
        samples = self.replay_buffer.total_count
        self._total_sample_count = int(MPIUtil.reduce_sum(samples))
        end_training = False
        
        if (self.replay_buffer_initialized):  
            if (self._valid_train_step()):
                prev_iter = self.iter
                iters = self._get_iters_per_update()
                avg_train_return = MPIUtil.reduce_avg(self.train_return)
            
                for i in range(iters):
                    curr_iter = self.iter
                    wall_time = time.time() - self.start_time
                    wall_time /= 60 * 60 # store time in hours

                    has_goal = self.has_goal()
                    s_mean = np.mean(self.s_norm.mean)
                    s_std = np.mean(self.s_norm.std)
                    g_mean = np.mean(self.g_norm.mean) if has_goal else 0
                    g_std = np.mean(self.g_norm.std) if has_goal else 0

                    self.logger.log_tabular("Iteration", self.iter)
                    self.logger.log_tabular("Wall_Time", wall_time)
                    self.logger.log_tabular("Samples", self._total_sample_count)
                    self.logger.log_tabular("Train_Return", avg_train_return)
                    self.logger.log_tabular("Test_Return", self.avg_test_return)
                    self.logger.log_tabular("State_Mean", s_mean)
                    self.logger.log_tabular("State_Std", s_std)
                    self.logger.log_tabular("Goal_Mean", g_mean)
                    self.logger.log_tabular("Goal_Std", g_std)
                    self._log_exp_params()

                    self._update_iter(self.iter + 1)
                    self._train_step()

                    Logger.print2("Agent " + str(self.id))
                    self.logger.print_tabular()
                    Logger.print2("") 

                    if (self._enable_output() and curr_iter % self.int_output_iters == 0):
                        self.logger.dump_tabular()

                if (prev_iter // self.int_output_iters != self.iter // self.int_output_iters):
                    end_training = self.enable_testing()

        else:

            Logger.print2("Agent " + str(self.id))
            Logger.print2("Samples: " + str(self._total_sample_count))
            Logger.print2("") 

            if (self._total_sample_count >= self.init_samples):
                self.replay_buffer_initialized = True
                end_training = self.enable_testing()
        
        if self._need_normalizer_update:
            self._update_normalizers()
            self._need_normalizer_update = self.normalizer_samples > self._total_sample_count

        if end_training:
            self._init_mode_train_end()
 
        return

    def _get_iters_per_update(self):
        return MPIUtil.get_num_procs() * self.iters_per_update

    def _valid_train_step(self):
        return True 

    def _log_exp_params(self):
        self.logger.log_tabular("Exp_Rate", self.exp_params_curr.rate)
        self.logger.log_tabular("Exp_Noise", self.exp_params_curr.noise)
        self.logger.log_tabular("Exp_Temp", self.exp_params_curr.temp)
        return
開發者ID:jiapei100,項目名稱:bullet3,代碼行數:104,代碼來源:rl_agent.py


注:本文中的pybullet_utils.logger.Logger.log_tabular方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。