当前位置: 首页>>代码示例>>Python>>正文


Python Calculator.normal方法代码示例

本文整理汇总了Python中calculator.Calculator.normal方法的典型用法代码示例。如果您正苦于以下问题:Python Calculator.normal方法的具体用法?Python Calculator.normal怎么用?Python Calculator.normal使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在calculator.Calculator的用法示例。


在下文中一共展示了Calculator.normal方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: Model

# 需要导入模块: from calculator import Calculator [as 别名]
# 或者: from calculator.Calculator import normal [as 别名]
class Model(object):
    def __init__(self, I, K):
        # num(X), dim(X), dim(Z), dim(Y)
        self.I = I
        self.H = None
        self.K = K

        self.best_w = None

        self.w = None
        self.mean = None
        self.var = None

        self.layer = 1
        self.hidden_type = HIDDEN_TYPE["DIRECT"]
        self.act_type = ACTIVATION_TYPE["SOFTMAX"]
        self.indices = None

        # Optimizer
        self.optimizer = None
        self.logger = None

        self.calc = Calculator()

    def set_hidden(self, opt=HIDDEN_TYPE["DIRECT"]):
        self.hidden_type = opt
        self.H = HIDDEN_NUM[opt]
        self.w = np.zeros((self.H,self.K))

    def set_activation(self, opt=ACTIVATION_TYPE["SOFTMAX"]):
        self.act_type = opt

    def set_optimizer(self, optimizer):
        self.optimizer = optimizer
        self.layer = optimizer.layer
        self.set_logger(optimizer.loss, optimizer.lr, optimizer.batch_size)

    def set_logger(self, loss, lr, batch_size):
        self.logger = Logger(loss, lr, batch_size)

    def learn(self, data_x, data_y, time_limit):
        N = data_x.shape[0]
        self.logger.start_learn()
        # init 1st layer : Mean, Variance
        self.mean, self.var = self.calc.normal_parameters(self, data_x, data_y)

        # init 2nd layer : Weights (H+1)*K
        self.w = self.optimizer.init_weights(self, WEIGHT_INIT["UNIFORM"])
        min_error = float("inf")

        batch_amount = int(10000*4/5.)/self.optimizer.batch_size
        t_errs = np.zeros(batch_amount)
        t_accs = np.zeros(batch_amount)
        v_errs = np.zeros(batch_amount)
        v_accs = np.zeros(batch_amount)

        for epoch in range(self.optimizer.max_epoch):
            idx_t, idx_v = self.cross_validation(epoch, N)

            for b_cnt in range(batch_amount):
                batch = self.get_batch(b_cnt, idx_t)

                x = data_x[batch]
                y = data_y[batch]
                vx = data_x[idx_v]
                vy = data_y[idx_v]

                # Forward Propagation
                z = self.calc.normal(self, x)    # batch * (H+1)
                a = z.dot(self.w)                # batch * K
                f = self.calc.nonlinear(self, a)

                vf = self.predict(vx, self.w)
                t_errs[b_cnt], t_accs[b_cnt], v_errs[b_cnt], v_accs[b_cnt] = \
                    self.evaluate(f, y, vf, vy)

                # Backward Propagation > grads = [dW, dMean, dVariance]
                grads = self.optimizer.get_gradients(self, x, z, f, y)

                self.w += grads[0]
                self.mean += grads[1]
                self.var += grads[2]

            if v_errs.mean() < min_error:
                min_error = v_errs.mean()
                self.best_w = self.w
            # Logging
            self.optimizer.update(epoch, batch_amount)
            self.logger.update(t_errs.mean(), t_accs.mean(), v_errs.mean(), v_accs.mean())
            if self.logger.is_finish(time_limit):
                break

        # self.logger.save_log(self, vf, vy)

        best_e = np.array(self.logger.v_acc).argmax()
        return self.logger.t_err[best_e], self.logger.t_acc[best_e], self.logger.v_err[best_e], self.logger.v_acc[best_e]

    def predict(self, x, w=None):
        if w is None:
            w = self.best_w
#.........这里部分代码省略.........
开发者ID:whyjay,项目名称:SNU,代码行数:103,代码来源:model.py


注:本文中的calculator.Calculator.normal方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。