当前位置: 首页>>代码示例>>Python>>正文


Python Plot.update_plot方法代码示例

本文整理汇总了Python中plot.Plot.update_plot方法的典型用法代码示例。如果您正苦于以下问题:Python Plot.update_plot方法的具体用法?Python Plot.update_plot怎么用?Python Plot.update_plot使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在plot.Plot的用法示例。


在下文中一共展示了Plot.update_plot方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: TransientSolver

# 需要导入模块: from plot import Plot [as 别名]
# 或者: from plot.Plot import update_plot [as 别名]

#.........这里部分代码省略.........
    self.fe = Enthalpy(firn, config)
    self.fv = Velocity(firn, config)
    self.fd = FullDensity(firn, config)
    if config['age']['on']:
      self.fa = Age(firn, config)

    if config['plot']['on']:
      #plt.ion()
      self.plot = Plot(firn, config)
      #plt.show()

  def solve(self):
    """
    """
    s    = '::: solving TransientSolver :::'
    text = colored(s, 'blue')
    print text
    
    firn   = self.firn
    config = self.config

    fe     = self.fe
    fv     = self.fv
    fd     = self.fd
    if config['age']['on']:
      fa     = self.fa
    
    t0      = config['t_start']
    tm      = config['t_mid']
    tf      = config['t_end']
    dt      = config['time_step']
    dt_list = config['dt_list']
    if dt_list != None:
      numt1   = (tm-t0)/dt_list[0] + 1       # number of time steps
      numt2   = (tf-tm)/dt_list[1] + 1       # number of time steps
      times1  = linspace(t0,tm,numt1)   # array of times to evaluate in seconds
      times2  = linspace(tm,tf,numt2)   # array of times to evaluate in seconds
      dt1     = dt_list[0] * ones(len(times1))
      dt2     = dt_list[1] * ones(len(times2))
      times   = hstack((times1,times2))
      dts     = hstack((dt1, dt2))
    
    else: 
      numt   = (tf-t0)/dt + 1         # number of time steps
      times  = linspace(t0,tf,numt)   # array of times to evaluate in seconds
      dts    = dt * ones(len(times))
      firn.t = t0
   
    self.times = times
    self.dts   = dts

    for t,dt in zip(times[1:], dts[1:]):
      
      # update timestep :
      firn.dt = dt
      firn.dt_v.assign(dt)

      # update boundary conditions :
      firn.update_Hbc()
      firn.update_rhoBc()
      firn.update_wBc()
      #firn.update_omegaBc()
    
      # newton's iterative method :
      fe.solve()
      fd.solve()
      fv.solve()
      if config['age']['on']:
        fa.solve()
      
      # update firn object :
      firn.update_vars(t)
      firn.update_height_history()
      if config['free_surface']['on']:
        if dt_list != None:
          if t > tm+dt:
            firn.update_height()
        else:
          firn.update_height()
      
      # update model parameters :
      if t != times[-1]:
         firn.H_1.assign(firn.H)
         firn.U_1.assign(firn.U)
         firn.omega_1.assign(firn.omega)
         firn.w_1.assign(firn.w)
         firn.a_1.assign(firn.a)
         firn.m_1.assign(firn.m)
    
      # update the plotting parameters :
      if config['plot']['on']:
        self.plot.update_plot()
        #plt.draw()
        
      s = '>>> Time: %i yr <<<'
      text = colored(s, 'red', attrs=['bold'])
      print text % (t / firn.spy)
    
    if config['plot']['on']:
      pass
开发者ID:pf4d,项目名称:um-fdm,代码行数:104,代码来源:solvers.py

示例2: train

# 需要导入模块: from plot import Plot [as 别名]
# 或者: from plot.Plot import update_plot [as 别名]
    def train(self, patience, patience_increase, n_epochs, improvement_threshold):
        logging.info('Training the model...')
        plot = Plot('Validation', 'Test')
        # go through this many minibatches before checking the network on the
        # validation set; in this case we check every epoch
        validation_frequency = min(self.n_train_batches, patience / 2)

        best_params = None
        best_validation_loss = numpy.inf
        test_score = 0.
        start_time = datetime.datetime.now()

        done_looping = False
        epoch = 0
        try:
            while (epoch < n_epochs) and (not done_looping):
                epoch = epoch + 1
                for minibatch_index in xrange(self.n_train_batches):

                    minibatch_avg_cost = self.train_model(minibatch_index)
                    # iteration number
                    iter = (epoch - 1) * self.n_train_batches + minibatch_index

                    if (iter + 1) % validation_frequency == 0:
                        # compute zero-one loss on validation set
                        validation_losses = [self.validate_model(i)
                                             for i in xrange(self.n_valid_batches)]
                        this_validation_loss = numpy.mean(validation_losses)

                        logging.info(
                            'epoch %i, minibatch %i/%i, validation error %f %%' %
                            (
                                epoch,
                                minibatch_index + 1,
                                self.n_train_batches,
                                this_validation_loss * 100.
                            )
                        )

                        plot.append('Validation', this_validation_loss)
                        plot.update_plot()

                        # if we got the best validation score until now
                        if this_validation_loss < best_validation_loss:
                            # improve patience if loss improvement is good enough
                            if this_validation_loss < best_validation_loss *  \
                               improvement_threshold:
                                patience = max(patience, iter * patience_increase)

                            best_validation_loss = this_validation_loss

                            # test it on the test set
                            test_losses = [self.test_model(i)
                                           for i in xrange(self.n_test_batches)]
                            test_score = numpy.mean(test_losses)

                            logging.info(
                                '     epoch %i, minibatch %i/%i test error of best model %f %%' %
                                (
                                    epoch,
                                    minibatch_index + 1,
                                    self.n_train_batches,
                                    test_score * 100.
                                )
                            )

                            plot.append('Test', test_score)
                            plot.update_plot()

                            best_params = Parameters(
                                self.classifier.params,
                                type(self.classifier).__name__,
                                best_validation_loss,
                                test_score
                            )
                            best_params.save()
                        else:
                            plot.append('Test', numpy.NaN)
                            plot.update_plot()

                        plot.save_plot()

                    if patience <= iter:
                        done_looping = True
                        break

        finally:
            end_time = datetime.datetime.now()
            logging.info(
                'Optimization complete with best validation score of %f %%, with test performance %f %%' %
                (best_validation_loss * 100., test_score * 100.))
            logging.info(
                'The code run for %d epochs (%s), with %f epochs/sec' %
                (epoch, (end_time - start_time), 1. * epoch / (end_time - start_time).total_seconds()))
开发者ID:crmne,项目名称:Genretron-Theano,代码行数:96,代码来源:classifier.py


注:本文中的plot.Plot.update_plot方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。