當前位置: 首頁>>代碼示例>>Python>>正文


Python tf_util.argmax方法代碼示例

本文整理匯總了Python中baselines.common.tf_util.argmax方法的典型用法代碼示例。如果您正苦於以下問題:Python tf_util.argmax方法的具體用法?Python tf_util.argmax怎麽用?Python tf_util.argmax使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在baselines.common.tf_util的用法示例。


在下文中一共展示了tf_util.argmax方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: sample_dtype

# 需要導入模塊: from baselines.common import tf_util [as 別名]
# 或者: from baselines.common.tf_util import argmax [as 別名]
def sample_dtype(self):
        return tf.int32

# WRONG SECOND DERIVATIVES
# class CategoricalPd(Pd):
#     def __init__(self, logits):
#         self.logits = logits
#         self.ps = tf.nn.softmax(logits)
#     @classmethod
#     def fromflat(cls, flat):
#         return cls(flat)
#     def flatparam(self):
#         return self.logits
#     def mode(self):
#         return U.argmax(self.logits, axis=-1)
#     def logp(self, x):
#         return -tf.nn.sparse_softmax_cross_entropy_with_logits(self.logits, x)
#     def kl(self, other):
#         return tf.nn.softmax_cross_entropy_with_logits(other.logits, self.ps) \
#                 - tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
#     def entropy(self):
#         return tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
#     def sample(self):
#         u = tf.random_uniform(tf.shape(self.logits))
#         return U.argmax(self.logits - tf.log(-tf.log(u)), axis=-1) 
開發者ID:Hwhitetooth,項目名稱:lirpg,代碼行數:27,代碼來源:distributions.py

示例2: sample_dtype

# 需要導入模塊: from baselines.common import tf_util [as 別名]
# 或者: from baselines.common.tf_util import argmax [as 別名]
def sample_dtype(self):
        return tf.int32

# WRONG SECOND DERIVATIVES
# class CategoricalPd(Pd):
#     def __init__(self, logits):
#         self.logits = logits
#         self.ps = tf.nn.softmax(logits)
#     @classmethod
#     def fromflat(cls, flat):
#         return cls(flat)
#     def flatparam(self):
#         return self.logits
#     def mode(self):
#         return U.argmax(self.logits, axis=1)
#     def logp(self, x):
#         return -tf.nn.sparse_softmax_cross_entropy_with_logits(self.logits, x)
#     def kl(self, other):
#         return tf.nn.softmax_cross_entropy_with_logits(other.logits, self.ps) \
#                 - tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
#     def entropy(self):
#         return tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
#     def sample(self):
#         u = tf.random_uniform(tf.shape(self.logits))
#         return U.argmax(self.logits - tf.log(-tf.log(u)), axis=1) 
開發者ID:AdamStelmaszczyk,項目名稱:learning2run,代碼行數:27,代碼來源:distributions.py

示例3: mode

# 需要導入模塊: from baselines.common import tf_util [as 別名]
# 或者: from baselines.common.tf_util import argmax [as 別名]
def mode(self):
        return tf.argmax(self.logits, axis=-1) 
開發者ID:Hwhitetooth,項目名稱:lirpg,代碼行數:4,代碼來源:distributions.py

示例4: sample

# 需要導入模塊: from baselines.common import tf_util [as 別名]
# 或者: from baselines.common.tf_util import argmax [as 別名]
def sample(self):
        u = tf.random_uniform(tf.shape(self.logits))
        return tf.argmax(self.logits - tf.log(-tf.log(u)), axis=-1) 
開發者ID:Hwhitetooth,項目名稱:lirpg,代碼行數:5,代碼來源:distributions.py

示例5: sample

# 需要導入模塊: from baselines.common import tf_util [as 別名]
# 或者: from baselines.common.tf_util import argmax [as 別名]
def sample(self):
        u = tf.random_uniform(tf.shape(self.logits), dtype=self.logits.dtype)
        return tf.argmax(self.logits - tf.log(-tf.log(u)), axis=-1) 
開發者ID:MaxSobolMark,項目名稱:HardRLWithYoutube,代碼行數:5,代碼來源:distributions.py

示例6: pdfromlatent

# 需要導入模塊: from baselines.common import tf_util [as 別名]
# 或者: from baselines.common.tf_util import argmax [as 別名]
def pdfromlatent(self, latent_vector, init_scale=1.0, init_bias=0.0):
        pdparam = fc(latent_vector, 'pi', self.size, init_scale=init_scale, init_bias=init_bias)
        return self.pdfromflat(pdparam), pdparam

# WRONG SECOND DERIVATIVES
# class CategoricalPd(Pd):
#     def __init__(self, logits):
#         self.logits = logits
#         self.ps = tf.nn.softmax(logits)
#     @classmethod
#     def fromflat(cls, flat):
#         return cls(flat)
#     def flatparam(self):
#         return self.logits
#     def mode(self):
#         return U.argmax(self.logits, axis=-1)
#     def logp(self, x):
#         return -tf.nn.sparse_softmax_cross_entropy_with_logits(self.logits, x)
#     def kl(self, other):
#         return tf.nn.softmax_cross_entropy_with_logits(other.logits, self.ps) \
#                 - tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
#     def entropy(self):
#         return tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
#     def sample(self):
#         u = tf.random_uniform(tf.shape(self.logits))
#         return U.argmax(self.logits - tf.log(-tf.log(u)), axis=-1) 
開發者ID:quantumiracle,項目名稱:Reinforcement_Learning_for_Traffic_Light_Control,代碼行數:28,代碼來源:distributions.py

示例7: mode

# 需要導入模塊: from baselines.common import tf_util [as 別名]
# 或者: from baselines.common.tf_util import argmax [as 別名]
def mode(self):
        return U.argmax(self.logits, axis=1) 
開發者ID:AdamStelmaszczyk,項目名稱:learning2run,代碼行數:4,代碼來源:distributions.py

示例8: sample

# 需要導入模塊: from baselines.common import tf_util [as 別名]
# 或者: from baselines.common.tf_util import argmax [as 別名]
def sample(self):
        u = tf.random_uniform(tf.shape(self.logits))
        return tf.argmax(self.logits - tf.log(-tf.log(u)), axis=1) 
開發者ID:AdamStelmaszczyk,項目名稱:learning2run,代碼行數:5,代碼來源:distributions.py

示例9: pdfromlatent

# 需要導入模塊: from baselines.common import tf_util [as 別名]
# 或者: from baselines.common.tf_util import argmax [as 別名]
def pdfromlatent(self, latent_vector, init_scale=1.0, init_bias=0.0):
        pdparam = _matching_fc(latent_vector, 'pi', self.size, init_scale=init_scale, init_bias=init_bias)
        return self.pdfromflat(pdparam), pdparam

# WRONG SECOND DERIVATIVES
# class CategoricalPd(Pd):
#     def __init__(self, logits):
#         self.logits = logits
#         self.ps = tf.nn.softmax(logits)
#     @classmethod
#     def fromflat(cls, flat):
#         return cls(flat)
#     def flatparam(self):
#         return self.logits
#     def mode(self):
#         return U.argmax(self.logits, axis=-1)
#     def logp(self, x):
#         return -tf.nn.sparse_softmax_cross_entropy_with_logits(self.logits, x)
#     def kl(self, other):
#         return tf.nn.softmax_cross_entropy_with_logits(other.logits, self.ps) \
#                 - tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
#     def entropy(self):
#         return tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
#     def sample(self):
#         u = tf.random_uniform(tf.shape(self.logits))
#         return U.argmax(self.logits - tf.log(-tf.log(u)), axis=-1) 
開發者ID:hiwonjoon,項目名稱:ICML2019-TREX,代碼行數:28,代碼來源:distributions.py


注:本文中的baselines.common.tf_util.argmax方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。