当前位置: 首页>>代码示例>>Python>>正文


Python tensor.square函数代码示例

本文整理汇总了Python中theano.tensor.square函数的典型用法代码示例。如果您正苦于以下问题:Python square函数的具体用法?Python square怎么用?Python square使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了square函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: log_likelihood

    def log_likelihood(self):
        Users = self.L[:, :-1]
        Items = self.R[:, :-1]
        UserBiases = self.L[:, -1].reshape((-1, 1))
        ItemBiases = self.R[:, -1].reshape((-1, 1))

        A = T.dot(self.L[:, :-1], (self.R[:, :-1]).T)
        A = T.inc_subtensor(A[:, :], UserBiases)
        A = T.inc_subtensor(A[:, :], ItemBiases.T)
        B = A * self.counts
        loglik = T.sum(B)

        A = T.exp(A)
        A += 1
        A = T.log(A)

        A = (self.counts + 1) * A
        loglik -= T.sum(A)

        # L2 regularization
        loglik -= 0.5 * self.reg_param * T.sum(T.square(self.L[:, :-1]))
        loglik -= 0.5 * self.reg_param * T.sum(T.square(self.R[:, :-1]))

        # Return negation of LogLikelihood cause we will minimize cost
        return -loglik
开发者ID:Nehoroshiy,项目名称:logmat_riemannian,代码行数:25,代码来源:logmat.py

示例2: kl_div_p_q

 def kl_div_p_q(self, p_mean, p_std, q_mean, q_std):
     """KL divergence D_{KL}[p(x)||q(x)] for a fully factorized Gaussian"""
     numerator = T.square(p_mean - q_mean) + \
         T.square(p_std) - T.square(q_std)
     denominator = 2 * T.square(q_std) + 1e-8
     return T.sum(
         numerator / denominator + T.log(q_std) - T.log(p_std))
开发者ID:jpdoyle,项目名称:vime,代码行数:7,代码来源:bnn.py

示例3: __init__

    def __init__(self, input, centerbias = None, alpha=1.0):
        self.input = input
        if centerbias is None:
            centerbias = np.ones(12)
        self.alpha = theano.shared(value = np.array(alpha).astype(theano.config.floatX), name='alpha')
        self.centerbias_ys = theano.shared(value=np.array(centerbias, dtype=theano.config.floatX), name='centerbias_ys')
        self.centerbias_xs = theano.shared(value=np.linspace(0, 1, len(centerbias), dtype=theano.config.floatX), name='centerbias_xs')

        height = T.cast(input.shape[0], theano.config.floatX)
        width = T.cast(input.shape[1], theano.config.floatX)
        x_coords = (T.arange(width) - 0.5*width) / (0.5*width)
        y_coords = (T.arange(height) - 0.5*height) / (0.5*height) + 0.0001  # We cannot have zeros in there because of grad

        x_coords = x_coords.dimshuffle('x', 0)
        y_coords = y_coords.dimshuffle(0, 'x')

        dists = T.sqrt(T.square(x_coords) + self.alpha*T.square(y_coords))
        self.max_dist = T.sqrt(1 + self.alpha)
        self.dists = dists/self.max_dist

        self.factors = nonlinearity(self.dists, self.centerbias_xs, self.centerbias_ys, len(centerbias))

        apply_centerbias = T.gt(self.centerbias_ys.shape[0], 2)
        self.output = ifelse(apply_centerbias, self.input+self.factors, self.input)
        self.params = [self.centerbias_ys, self.alpha]
开发者ID:matthias-k,项目名称:pysaliency,代码行数:25,代码来源:theano_utils.py

示例4: custom_loss

def custom_loss(y_true,y_pred):
    '''
    Args:
      y_true: Ground Truth output
      y_pred: Predicted output
      The forms of these two vectors are:
      ######################################
      ## x,y,h,w,p1,p2,...,p20,objectness ##
      ######################################
    Returns:
      The loss caused by y_pred
    '''
    y1 = y_pred
    y2 = y_true
    loss = 0.0

    scale_vector = []
    scale_vector.extend([2]*4)
    scale_vector.extend([1]*20)
    scale_vector = np.reshape(np.asarray(scale_vector),(1,len(scale_vector)))

    for i in range(49):
        y1_piece = y1[:,i*25:i*25+24]
        y2_piece = y2[:,i*25:i*25+24]

        y1_piece = y1_piece * scale_vector
        y2_piece = y2_piece * scale_vector

        loss_piece = T.sum(T.square(y1_piece - y2_piece),axis=1)
        loss = loss + loss_piece * y2[:,i*25+24]
        loss = loss + T.square(y2[:,i*25+24] - y1[:,i*25+24])

    loss = T.sum(loss)
    return loss
开发者ID:RomanKoshelev,项目名称:Darknet.keras,代码行数:34,代码来源:GeneratePascalResult.py

示例5: custom_loss

def custom_loss(y_true, y_pred):
  epsilon = 0.001
  first_log = T.log(T.clip(y_pred, 0.001, np.inf) + 1.)
  second_log = T.log(T.clip(y_true, 0.001, np.inf) + 1.)
  first_sum = T.log(T.sum(T.clip(y_pred, 0.001, np.inf))+1)
  second_sum = T.log(T.sum(T.clip(y_true, 0.001, np.inf))+1)
  return T.mean(T.square(first_log-second_log), axis=-1) + CMC_PENALTY*T.square(first_sum-second_sum)
开发者ID:Eiii,项目名称:DeepJace,代码行数:7,代码来源:nnet_additive.py

示例6: __init__

    def __init__(self, incoming, b=lasagne.init.Constant(0.), g=lasagne.init.Constant(1.),
                 W=lasagne.init.Normal(0.05), train_g=False, init_stdv=1., nonlinearity=relu, **kwargs):
        super(WeightNormLayer, self).__init__(incoming, **kwargs)
        self.nonlinearity = nonlinearity
        self.init_stdv = init_stdv
        k = self.input_shape[1]
        if b is not None:
            self.b = self.add_param(b, (k,), name="b", regularizable=False)
        if g is not None:
            self.g = self.add_param(g, (k,), name="g", regularizable=False, trainable=train_g)
        if len(self.input_shape)==4:
            self.axes_to_sum = (0,2,3)
            self.dimshuffle_args = ['x',0,'x','x']
        else:
            self.axes_to_sum = 0
            self.dimshuffle_args = ['x',0]

        # scale weights in layer below
        incoming.W_param = incoming.W
        #incoming.W_param.set_value(W.sample(incoming.W_param.get_value().shape))
        if incoming.W_param.ndim==4:
            if isinstance(incoming, Deconv2DLayer):
                W_axes_to_sum = (0,2,3)
                W_dimshuffle_args = ['x',0,'x','x']
            else:
                W_axes_to_sum = (1,2,3)
                W_dimshuffle_args = [0,'x','x','x']
        else:
            W_axes_to_sum = 0
            W_dimshuffle_args = ['x',0]
        if g is not None:
            incoming.W = incoming.W_param * (self.g/T.sqrt(1e-6 + T.sum(T.square(incoming.W_param),axis=W_axes_to_sum))).dimshuffle(*W_dimshuffle_args)
        else:
            incoming.W = incoming.W_param / T.sqrt(1e-6 + T.sum(T.square(incoming.W_param),axis=W_axes_to_sum,keepdims=True))
开发者ID:255BITS,项目名称:improved-gan,代码行数:34,代码来源:nn.py

示例7: _build_conditional

 def _build_conditional(self, Xnew, pred_noise, diag, X, Xu, y, sigma, cov_total, mean_total):
     sigma2 = tt.square(sigma)
     Kuu = cov_total(Xu)
     Kuf = cov_total(Xu, X)
     Luu = cholesky(stabilize(Kuu))
     A = solve_lower(Luu, Kuf)
     Qffd = tt.sum(A * A, 0)
     if self.approx == "FITC":
         Kffd = cov_total(X, diag=True)
         Lamd = tt.clip(Kffd - Qffd, 0.0, np.inf) + sigma2
     else:  # VFE or DTC
         Lamd = tt.ones_like(Qffd) * sigma2
     A_l = A / Lamd
     L_B = cholesky(tt.eye(Xu.shape[0]) + tt.dot(A_l, tt.transpose(A)))
     r = y - mean_total(X)
     r_l = r / Lamd
     c = solve_lower(L_B, tt.dot(A, r_l))
     Kus = self.cov_func(Xu, Xnew)
     As = solve_lower(Luu, Kus)
     mu = self.mean_func(Xnew) + tt.dot(tt.transpose(As), solve_upper(tt.transpose(L_B), c))
     C = solve_lower(L_B, As)
     if diag:
         Kss = self.cov_func(Xnew, diag=True)
         var = Kss - tt.sum(tt.square(As), 0) + tt.sum(tt.square(C), 0)
         if pred_noise:
             var += sigma2
         return mu, var
     else:
         cov = (self.cov_func(Xnew) - tt.dot(tt.transpose(As), As) +
                tt.dot(tt.transpose(C), C))
         if pred_noise:
             cov += sigma2 * tt.identity_like(cov)
         return mu, stabilize(cov)
开发者ID:bballamudi,项目名称:pymc3,代码行数:33,代码来源:gp.py

示例8: __init__

    def __init__(self, xdim, args, dec_nonlin=None):
        self.xdim = xdim
        self.hdim = args.hdim
        self.zdim = args.zdim
        self.lmbda = args.lmbda  # weight decay coefficient * 2
        self.x = T.matrix('x', dtype=floatX)
        self.eps = T.matrix('eps', dtype=floatX)
        self.train_i = T.scalar('train_i', dtype=floatX)
        self.dec = args.decM
        self.COV = args.COV

        self.enc_mlp = GaussianMLP(self.x, self.xdim, self.hdim, self.zdim, nlayers=args.nlayers, eps=self.eps, COV=self.COV)
        if self.dec == 'bernoulli':
            # log p(x | z) defined as -CE(x, y) = dec_mlp.cost(y)
            self.dec_mlp = BernoulliMLP(self.enc_mlp.out, self.zdim, self.hdim, self.xdim, nlayers=args.nlayers, y=self.x)
        elif self.dec == 'gaussian':
            self.dec_mlp = GaussianMLP(self.enc_mlp.out, self.zdim, self.hdim, self.xdim, nlayers=args.nlayers, y=self.x, activation=dec_nonlin, COV=self.COV)
        else:
            raise RuntimeError('unrecognized decoder %' % dec)
        #encoder part + decoder part
        if self.COV == False:
            self.enc_cost = -T.sum(kld_unit_mvn(self.enc_mlp.mu, self.enc_mlp.var))
        else:
            self.enc_cost = -T.sum(kldu_unit_mvn(self.enc_mlp.mu, self.enc_mlp.var, self.enc_mlp.u))
        self.cost = (self.enc_cost + self.dec_mlp.cost) / args.batsize
        self.params = self.enc_mlp.params + self.dec_mlp.params
        ##[T.grad(self.cost, p) + self.lmbda * p for p in self.params]
        self.gparams = [T.grad(self.cost, p) for p in self.params]
        self.gaccums = [shared(value=np.zeros(p.get_value().shape, dtype=floatX)) for p in self.params]
        self.lr = args.lr * (1-args.lmbda)**self.train_i

        # update params, update sum(grad_params) for adagrade
        self.updates = [
                (param, param - self.lr*gparam/T.sqrt(gaccum+T.square(gparam)+ADAG_EPS))
                for param, gparam, gaccum in zip(self.params, self.gparams, self.gaccums) ]
        self.updates += [ (gaccum, gaccum + T.square(gparam))
                    for gaccum, gparam in zip(self.gaccums, self.gparams)  ]

        self.train = function(
            inputs=[self.x, self.eps, self.train_i],
            outputs=self.cost,
            updates=self.updates
        )
        self.test = function(
            inputs=[self.x, self.eps],
            outputs=self.cost,
            updates=None
        )
        # can be used for semi-supervised learning for example
        self.encode = function(
            inputs=[self.x, self.eps],
            outputs=self.enc_mlp.out
        )
        # use this to sample
        self.decode = function(
            inputs=[self.enc_mlp.out],  ##z with shape (1,2)
            outputs=self.dec_mlp.out
        ) ##mlp103 .out=.mu+.sigma*eps
开发者ID:sshidy,项目名称:SBP-DLGM,代码行数:58,代码来源:vae.py

示例9: _do_calc

 def _do_calc(self, v0, v1):
     if self._func is None:
         xv0 = T.matrix('v0')
         xv1 = T.matrix('v1')
         norm0 = T.sqrt(T.square(xv0).sum(axis=1, keepdims=True))
         norm1 = T.sqrt(T.square(xv1).sum(axis=0, keepdims=True))
         dist = 1 - T.dot(xv0 / norm0, xv1 / norm1)
         self._func = theano.function([xv0, xv1], dist)
     return self._func(v0, v1)
开发者ID:jia-kai,项目名称:bachelor-thesis,代码行数:9,代码来源:get_match.py

示例10: in_transit

    def in_transit(self, t, r=0.0, texp=None):
        """Get a list of timestamps that are in transit

        Args:
            t (vector): A vector of timestamps to be evaluated.
            r (Optional): The radii of the planets.
            texp (Optional[float]): The exposure time.

        Returns:
            The indices of the timestamps that are in transit.

        """

        z = tt.zeros_like(self.a)
        r = tt.as_tensor_variable(r) + z
        R = self.r_star + z

        # Wrap the times into time since transit
        hp = 0.5 * self.period
        dt = tt.mod(self._warp_times(t) - self.t0 + hp, self.period) - hp

        if self.ecc is None:
            # Equation 14 from Winn (2010)
            k = r / R
            arg = tt.square(1 + k) - tt.square(self.b)
            factor = R / (self.a * self.sin_incl)
            hdur = hp * tt.arcsin(factor * tt.sqrt(arg)) / np.pi
            t_start = -hdur
            t_end = hdur
            flag = z

        else:
            M_contact = self.contact_points_op(
                self.a, self.ecc, self.cos_omega, self.sin_omega,
                self.cos_incl + z, self.sin_incl + z, R + r)
            flag = M_contact[2]

            t_start = (M_contact[0] - self.M0) / self.n
            t_start = tt.mod(t_start + hp, self.period) - hp
            t_end = (M_contact[1] - self.M0) / self.n
            t_end = tt.mod(t_end + hp, self.period) - hp

            t_start = tt.switch(tt.gt(t_start, 0.0),
                                t_start - self.period, t_start)
            t_end = tt.switch(tt.lt(t_end, 0.0),
                              t_end + self.period, t_end)

        if texp is not None:
            t_start -= 0.5*texp
            t_end += 0.5*texp

        mask = tt.any(tt.and_(dt >= t_start, dt <= t_end), axis=-1)
        result = ifelse(tt.all(tt.eq(flag, 0)),
                        tt.arange(t.size)[mask],
                        tt.arange(t.size))

        return result
开发者ID:dfm,项目名称:exoplanet,代码行数:57,代码来源:keplerian.py

示例11: mmd_full

def mmd_full(x_t, y_t, alpha=0.5):
    """ Implementation of the full kernel MMD statistic (gaussian kernel)"""
    N = x_t.shape[1]
    M = y_t.shape[1]

    term1 = T.mean(T.exp(-0.5 * (1 / alpha) * T.square(T.repeat(x_t, N) - T.tile(x_t, N))))
    term2 = T.mean(T.exp(-0.5 * (1 / alpha) * T.square(T.repeat(x_t, M) - T.tile(y_t, N))))
    term3 = T.mean(T.exp(-0.5 * (1 / alpha) * T.square(T.repeat(y_t, M) - T.tile(y_t, M))))
    return term1 - 2 * term2 + term3
开发者ID:JonnyTran,项目名称:ML-algorithms,代码行数:9,代码来源:mmd.py

示例12: square_dist

 def square_dist(self, X, Xs):
     X2 = tt.sum(tt.square(X), 1)
     if Xs is None:
         sqd = (-2.0 * tt.dot(X, tt.transpose(X))
                + (tt.reshape(X2, (-1, 1)) + tt.reshape(X2, (1, -1))))
     else:
         Xs2 = tt.sum(tt.square(Xs), 1)
         sqd = (-2.0 * tt.dot(X, tt.transpose(Xs))
                + (tt.reshape(Xs2, (-1, 1)) + tt.reshape(Xs2, (1, -1))))
     return tt.clip(sqd, 0.0, np.inf)
开发者ID:springcoil,项目名称:pymc3,代码行数:10,代码来源:cov.py

示例13: calc

 def calc(self, y, output):
     if y.ndim == 1:
         loss = (T.square(y - output))
     else:
         axis = tuple(range(y.ndim))[1:]
         loss = T.sum(T.square(y - output), axis=axis)
     if self.mode:
         loss = T.mean(loss)
     else:
         loss = T.sum(loss)
     return self.weight * loss
开发者ID:neonnnnn,项目名称:ml,代码行数:11,代码来源:objectives.py

示例14: __init__

    def __init__(self, xdim, args, dec='bernoulli'):
        self.xdim = xdim
        self.hdim = args.hdim
        self.zdim = args.zdim
        self.lmbda = args.lmbda  # weight decay coefficient * 2
        self.x = T.matrix('x', dtype=floatX)
        self.eps = T.matrix('eps', dtype=floatX)

        # XXX make this more general
        self.enc_mlp = GaussianMLP(self.x, self.xdim, self.hdim, self.zdim, nlayers=args.nlayers, eps=self.eps)
        if dec == 'bernoulli':
            # log p(x | z) defined as -CE(x, y) = dec_mlp.cost(y)
            self.dec_mlp = BernoulliMLP(self.enc_mlp.out, self.zdim, self.hdim, self.xdim, nlayers=args.nlayers, y=self.x)
        elif dec == 'gaussian':
            self.dec_mlp = GaussianMLP(self.enc_mlp.out, self.zdim, self.hdim, self.xdim, nlayers=args.nlayers, y=self.x)
        else:
            raise RuntimeError('unrecognized decoder %' % dec)

        self.cost = (-T.sum(kld_unit_mvn(self.enc_mlp.mu, self.enc_mlp.var)) + self.dec_mlp.cost) / args.batch_size
        self.params = self.enc_mlp.params + self.dec_mlp.params
        print(self.params)
        self.gparams = [T.grad(self.cost, p) + self.lmbda * p for p in self.params]
        self.gaccums = [theano.shared(value=np.zeros(p.get_value().shape, dtype=floatX)) for p in self.params]

        # XXX using adagrad update as described in paper, could try other optimizers
        self.updates = [
                (param, param - args.lr * gparam / T.sqrt(gaccum + T.square(gparam) + ADAGRAD_EPS))
                for param, gparam, gaccum in zip(self.params, self.gparams, self.gaccums)
        ]
        self.updates += [
            (gaccum, gaccum + T.square(gparam))
            for gaccum, gparam in zip(self.gaccums, self.gparams)
        ]

        self.train = theano.function(
            inputs=[self.x, self.eps],
            outputs=self.cost,
            updates=self.updates
        )
        self.test = theano.function(
            inputs=[self.x, self.eps],
            outputs=self.cost,
            updates=None
        )
        # can be used for semi-supervised learning for example
        self.encode = theano.function(
            inputs=[self.x, self.eps],
            outputs=self.enc_mlp.out
        )
        # use this to sample
        self.decode = theano.function(
            inputs=[self.enc_mlp.out],
            outputs=self.dec_mlp.out
        )
开发者ID:anirudh9119,项目名称:vae,代码行数:54,代码来源:vae.py

示例15: dynamics_costs_obs

    def dynamics_costs_obs(self,x,u):
        fmatrix = TT.matrix(dtype=floatX).type
        uvector = TT.vector(dtype='int8').type
        ctrl_lo, ctrl_hi = self.ctrl_bounds()


        @theano.as_op(itypes=[fmatrix,fmatrix,uvector],otypes=[fmatrix,fmatrix,fmatrix,fmatrix,fmatrix])
        def stepmulti2op(x_nd,u_ne,done_n):
            x_nd = x_nd.copy()
            u_ne = np.clip(u_ne, ctrl_lo, ctrl_hi)
            move_to_origin = self.md["move_to_origin"]
            offset_n2 = x_nd[:,move_to_origin].copy()
            x_nd[:,move_to_origin] -= offset_n2
            x_nd,f,dcom,dist,kin = self.world.StepMulti2(x_nd.astype("float64"),u_ne.astype("float64"),done_n)
            for _ in xrange(self.frame_skip-1):
                x_nd,f1,dcom1,dist,kin = self.world.StepMulti2(x_nd.astype("float64"),u_ne.astype("float64"),done_n)
                dcom += dcom1
                f += f1
            f /= self.frame_skip

            dist = np.clip(dist, 0, .1) # XXX clip level ad hoc 
            # Consider using nan_to_num here
            x_nd[:,move_to_origin] += offset_n2
            return (x_nd.astype(floatX),f.astype(floatX),dcom.astype(floatX),dist.astype(floatX),kin.astype(floatX))

        done = self.trial_done(x)
        notdone = 1 - done

        y,f,dcom,dist,kin = stepmulti2op(x,u,done)


        if self.vel_cost_type == "linear":
            cost_vel = (-self.vel_cost_coeff/self.world_info["timestep"]) * dcom[:,0]
        elif self.vel_cost_type == "quadratic":
            cost_vel = TT.square(dcom[:,0]/self.world_info["timestep"] - self.vel_cost_target) #pylint: disable=E1111
        else:
            raise ValueError
        cost_ctrl = .5*self.ctrl_cost_coeff*TT.square(u).sum(axis=1)
        cost_impact = .5*self.impact_cost_coeff * TT.square(f).sum(axis=1)
        if self.clip_impact_cost:
            cost_impact = TT.minimum(cost_impact, self.clip_impact_cost) #pylint: disable=E1111


        jntpos_mask = self.world_info["jnt_islimited"]
        if self.jntpos_root_only: jntpos_mask &= (self.world_info["jnt_body_id"]==1)
        jntpos_inds = np.flatnonzero(jntpos_mask)
        jntpos_dofs = np.array([dofidx for (dofidx,jntidx) in enumerate(self.world_info["dof_jnt_id"]) if jntidx in jntpos_inds])        
        cost_jntpos = (.5*self.jntpos_cost_coeff) * (TT.abs_ if self.jntpos_use_l1 else TT.square)(y[:,jntpos_dofs]).sum(axis=1)

        cost_done = (done-1)*self.done_cost_coeff
        feats = [y[:,1:],f,dist]
        if self.use_kinematic_features: feats.append(kin)
        obs = TT.concatenate(feats,axis=1)
        return [TT.switch(done[:,None], x, y),  [notdone*cost_vel, notdone*cost_ctrl, notdone*cost_impact, notdone*cost_jntpos, cost_done] , obs ]
开发者ID:SFPD,项目名称:rlreloaded,代码行数:54,代码来源:mjc.py


注:本文中的theano.tensor.square函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。