本文整理汇总了Python中tensorflow.digamma方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.digamma方法的具体用法?Python tensorflow.digamma怎么用?Python tensorflow.digamma使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.digamma方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: fitGamma
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import digamma [as 别名]
def fitGamma(cls, tau):
alpha = 0.5/(tf.log(tf.reduce_mean(tau))
+ 1e-6 # added due to numerical instability
- tf.reduce_mean(tf.log(tau)))
for i in range(20):
alpha = (1. / (1./alpha
+ (tf.reduce_mean(tf.log(tau))
- tf.log(tf.reduce_mean(tau))
+ tf.log(alpha)
- tf.digamma(alpha))
/ (alpha**2*(1./alpha
- tf.polygamma(tf.ones_like(alpha),
alpha)))))
beta = alpha/tf.reduce_mean(tau)
return(alpha, beta)
示例2: compute_kumar2beta_kld
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import digamma [as 别名]
def compute_kumar2beta_kld(a, b, alpha, beta):
# precompute some terms
ab = tf.mul(a,b)
a_inv = tf.pow(a, -1)
b_inv = tf.pow(b, -1)
# compute taylor expansion for E[log (1-v)] term
kl = tf.mul(tf.pow(1+ab,-1), beta_fn(a_inv, b))
for idx in xrange(10):
kl += tf.mul(tf.pow(idx+2+ab,-1), beta_fn(tf.mul(idx+2., a_inv), b))
kl = tf.mul(tf.mul(beta-1,b), kl)
kl += tf.mul(tf.div(a-alpha,a), -0.57721 - tf.digamma(b) - b_inv)
# add normalization constants
kl += tf.log(ab) + tf.log(beta_fn(alpha, beta))
# final term
kl += tf.div(-(b-1),b)
return kl
示例3: nuStep
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import digamma [as 别名]
def nuStep(cls, nu, n, delta, p=1.):
three = tf.constant(3., dtype=nu.dtype)
for i in range(2):
w = (nu+p)/(nu+delta)
fp = (-tf.digamma(nu/2) + tf.log(nu/2)
+ 1./n*tf.reduce_sum(tf.log((nu+p)/(nu+delta)) - w,
axis=0)
+ 1
+ tf.digamma((p+nu)/2) - tf.log((p+nu)/2))
fpp = (tf.polygamma(three, nu/2)/2. + 1./nu
+ tf.polygamma(three, (p+nu)/2)/2. - 1./(nu+p)
+ 1./n*tf.reduce_sum((delta-p)/(nu+delta)**2*(w-1),
axis=0))
nu = nu + fp/fpp
return(nu)
示例4: test_Digamma
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import digamma [as 别名]
def test_Digamma(self):
t = tf.digamma(self.random(4, 3))
self.check(t)
示例5: digamma
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import digamma [as 别名]
def digamma(self, a):
return tf.digamma(a)
示例6: loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import digamma [as 别名]
def loss(self, C, X_g, X_, alpha, beta, z, E, Zik, Tk):
const_term = C * tf.log(1e-10 + X_) - X_
const_term = tf.reduce_sum(const_term, 1)
loss1 = C * tf.log(1e-10 + X_g) - X_g
loss1 = tf.reduce_sum(loss1, 1)
loss2 = self.log_q(z, alpha + self.B, beta)
loss2 = const_term * tf.reduce_sum(loss2, 1)
# loss3 = -log_r(E, alpha,beta)
loss3 = -self.log_r(E, alpha + self.B, beta)
loss3 = const_term * tf.reduce_sum(loss3, 1)
# The sum of KL terms of all generator's wheights (up to constant terms)
kl_w = 0.0
if not self.w_determinist:
for l in range(0, self.L + 1):
kl_w += tf.reduce_sum(
-0.5 * tf.reduce_sum(tf.square(self.generator_params[l]), 1)
)
# KL Divergence term
kl_term = (
(alpha - self.aa - Zik) * tf.digamma(alpha)
- tf.lgamma(alpha)
+ (self.aa + Zik) * tf.log(beta)
+ alpha * (Tk + self.bb - beta) / beta
)
kl_term = -tf.reduce_sum(kl_term, 1)
return (
-tf.reduce_mean(loss1 + loss2 + loss3 + kl_term)
+ kl_w / self.aux_data.shape[0]
)
# fitting PCRL to observed data