本文整理汇总了Python中keras.backend.log方法的典型用法代码示例。如果您正苦于以下问题:Python backend.log方法的具体用法?Python backend.log怎么用?Python backend.log使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.log方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: actor_optimizer
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import log [as 别名]
def actor_optimizer(self):
action = K.placeholder(shape=[None, self.action_size])
advantages = K.placeholder(shape=[None, ])
policy = self.actor.output
# 정책 크로스 엔트로피 오류함수
action_prob = K.sum(action * policy, axis=1)
cross_entropy = K.log(action_prob + 1e-10) * advantages
cross_entropy = -K.sum(cross_entropy)
# 탐색을 지속적으로 하기 위한 엔트로피 오류
entropy = K.sum(policy * K.log(policy + 1e-10), axis=1)
entropy = K.sum(entropy)
# 두 오류함수를 더해 최종 오류함수를 만듬
loss = cross_entropy + 0.01 * entropy
optimizer = RMSprop(lr=self.actor_lr, rho=0.99, epsilon=0.01)
updates = optimizer.get_updates(self.actor.trainable_weights, [],loss)
train = K.function([self.actor.input, action, advantages],
[loss], updates=updates)
return train
# 가치신경망을 업데이트하는 함수
示例2: optimizer
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import log [as 别名]
def optimizer(self):
action = K.placeholder(shape=[None, 5])
discounted_rewards = K.placeholder(shape=[None, ])
# 크로스 엔트로피 오류함수 계산
action_prob = K.sum(action * self.model.output, axis=1)
cross_entropy = K.log(action_prob) * discounted_rewards
loss = -K.sum(cross_entropy)
# 정책신경망을 업데이트하는 훈련함수 생성
optimizer = Adam(lr=self.learning_rate)
updates = optimizer.get_updates(self.model.trainable_weights,[],
loss)
train = K.function([self.model.input, action, discounted_rewards], [],
updates=updates)
return train
# 정책신경망으로 행동 선택
示例3: build_model
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import log [as 别名]
def build_model(self):
input = Input(shape=self.state_size)
conv = Conv2D(16, (8, 8), strides=(4, 4), activation='relu')(input)
conv = Conv2D(32, (4, 4), strides=(2, 2), activation='relu')(conv)
conv = Flatten()(conv)
fc = Dense(256, activation='relu')(conv)
policy = Dense(self.action_size, activation='softmax')(fc)
value = Dense(1, activation='linear')(fc)
actor = Model(inputs=input, outputs=policy)
critic = Model(inputs=input, outputs=value)
actor._make_predict_function()
critic._make_predict_function()
actor.summary()
critic.summary()
return actor, critic
# make loss function for Policy Gradient
# [log(action probability) * advantages] will be input for the back prop
# we add entropy of action probability to loss
示例4: actor_optimizer
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import log [as 别名]
def actor_optimizer(self):
action = K.placeholder(shape=[None, self.action_size])
advantages = K.placeholder(shape=[None, ])
policy = self.actor.output
good_prob = K.sum(action * policy, axis=1)
eligibility = K.log(good_prob + 1e-10) * advantages
actor_loss = -K.sum(eligibility)
entropy = K.sum(policy * K.log(policy + 1e-10), axis=1)
entropy = K.sum(entropy)
loss = actor_loss + 0.01*entropy
optimizer = RMSprop(lr=self.actor_lr, rho=0.99, epsilon=0.01)
updates = optimizer.get_updates(self.actor.trainable_weights, [], loss)
train = K.function([self.actor.input, action, advantages], [loss], updates=updates)
return train
# make loss function for Value approximation
示例5: optimizer
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import log [as 别名]
def optimizer(self):
action = K.placeholder(shape=[None, 5])
discounted_rewards = K.placeholder(shape=[None, ])
# Calculate cross entropy error function
action_prob = K.sum(action * self.model.output, axis=1)
cross_entropy = K.log(action_prob) * discounted_rewards
loss = -K.sum(cross_entropy)
# create training function
optimizer = Adam(lr=self.learning_rate)
updates = optimizer.get_updates(self.model.trainable_weights, [],
loss)
train = K.function([self.model.input, action, discounted_rewards], [],
updates=updates)
return train
# get action from policy network
示例6: build_model
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import log [as 别名]
def build_model(self):
state = Input(batch_shape=(None, self.state_size))
shared = Dense(self.hidden1, input_dim=self.state_size, activation='relu', kernel_initializer='glorot_uniform')(state)
actor_hidden = Dense(self.hidden2, activation='relu', kernel_initializer='glorot_uniform')(shared)
action_prob = Dense(self.action_size, activation='softmax', kernel_initializer='glorot_uniform')(actor_hidden)
value_hidden = Dense(self.hidden2, activation='relu', kernel_initializer='he_uniform')(shared)
state_value = Dense(1, activation='linear', kernel_initializer='he_uniform')(value_hidden)
actor = Model(inputs=state, outputs=action_prob)
critic = Model(inputs=state, outputs=state_value)
actor._make_predict_function()
critic._make_predict_function()
actor.summary()
critic.summary()
return actor, critic
# make loss function for Policy Gradient
# [log(action probability) * advantages] will be input for the back prop
# we add entropy of action probability to loss
示例7: actor_optimizer
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import log [as 别名]
def actor_optimizer(self):
action = K.placeholder(shape=(None, self.action_size))
advantages = K.placeholder(shape=(None, ))
policy = self.actor.output
good_prob = K.sum(action * policy, axis=1)
eligibility = K.log(good_prob + 1e-10) * K.stop_gradient(advantages)
loss = -K.sum(eligibility)
entropy = K.sum(policy * K.log(policy + 1e-10), axis=1)
actor_loss = loss + 0.01*entropy
optimizer = Adam(lr=self.actor_lr)
updates = optimizer.get_updates(self.actor.trainable_weights, [], actor_loss)
train = K.function([self.actor.input, action, advantages], [], updates=updates)
return train
# make loss function for Value approximation
示例8: __init__
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import log [as 别名]
def __init__(self,
kind,
reduce_loss=True,
clip_prob=1e-6,
regularize=False,
location=None,
growth=None):
self.kind = kind
self.reduce_loss = reduce_loss
self.clip_prob = clip_prob
if regularize == True or location is not None or growth is not None:
raise DeprecationWarning('Directly penalizing beta has been found \
to be unneccessary when using bounded activation \
and clipping of log-likelihood.\
Use this method instead.')
示例9: loss_function
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import log [as 别名]
def loss_function(self, y_true, y_pred):
y, u, a, b = _keras_split(y_true, y_pred)
if self.kind == 'discrete':
loglikelihoods = loglik_discrete(y, u, a, b)
elif self.kind == 'continuous':
loglikelihoods = loglik_continuous(y, u, a, b)
if self.clip_prob is not None:
loglikelihoods = K.clip(loglikelihoods,
log(self.clip_prob), log(1 - self.clip_prob))
if self.reduce_loss:
loss = -1.0 * K.mean(loglikelihoods, axis=-1)
else:
loss = -loglikelihoods
return loss
# For backwards-compatibility
示例10: sort4minibatches
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import log [as 别名]
def sort4minibatches(xvals, evals, tvals, batchsize):
ntot = len(xvals)
indices = np.arange(ntot)
np.random.shuffle(indices)
start_idx=0
esall = []
for end_idx in list(range(batchsize, batchsize*(ntot//batchsize)+1, batchsize))+[ntot]:
excerpt = indices[start_idx:end_idx]
sort_idx = np.argsort(tvals[excerpt])[::-1]
es = excerpt[sort_idx]
esall += list(es)
start_idx = end_idx
return (xvals[esall], evals[esall], tvals[esall], esall)
#Define Cox PH partial likelihood function loss.
#Arguments: E (censoring status), risk (risk [log hazard ratio] predicted by network) for batch of input subjects
#As defined, this function requires that all subjects in input batch must be sorted in descending order of survival/censoring time (i.e. arguments E and risk will be in this order)
示例11: crossentropy_reed_wrap
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import log [as 别名]
def crossentropy_reed_wrap(_beta):
def crossentropy_reed_core(y_true, y_pred):
"""
This loss function is proposed in:
Reed et al. "Training Deep Neural Networks on Noisy Labels with Bootstrapping", 2014
:param y_true:
:param y_pred:
:return:
"""
# hyper param
print(_beta)
y_pred = K.clip(y_pred, K.epsilon(), 1)
# (1) dynamically update the targets based on the current state of the model: bootstrapped target tensor
# use predicted class proba directly to generate regression targets
y_true_update = _beta * y_true + (1 - _beta) * y_pred
# (2) compute loss as always
_loss = -K.sum(y_true_update * K.log(y_pred), axis=-1)
return _loss
return crossentropy_reed_core
示例12: softmax_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import log [as 别名]
def softmax_loss(y_true, y_pred):
"""Compute cross entropy loss aka softmax loss.
# Arguments
y_true: Ground truth targets,
tensor of shape (?, num_boxes, num_classes).
y_pred: Predicted logits,
tensor of shape (?, num_boxes, num_classes).
# Returns
softmax_loss: Softmax loss, tensor of shape (?, num_boxes).
"""
eps = K.epsilon()
y_pred = K.clip(y_pred, eps, 1. - eps)
softmax_loss = -tf.reduce_sum(y_true * tf.log(y_pred), axis=-1)
return softmax_loss
示例13: focal_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import log [as 别名]
def focal_loss(y_true, y_pred, gamma=2, alpha=0.25):
"""Compute focal loss.
# Arguments
y_true: Ground truth targets,
tensor of shape (?, num_boxes, num_classes).
y_pred: Predicted logits,
tensor of shape (?, num_boxes, num_classes).
# Returns
focal_loss: Focal loss, tensor of shape (?, num_boxes).
# References
https://arxiv.org/abs/1708.02002
"""
#y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
eps = K.epsilon()
y_pred = K.clip(y_pred, eps, 1. - eps)
pt = tf.where(tf.equal(y_true, 1), y_pred, 1 - y_pred)
focal_loss = -tf.reduce_sum(alpha * K.pow(1. - pt, gamma) * K.log(pt), axis=-1)
return focal_loss
示例14: kl_divergence
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import log [as 别名]
def kl_divergence(y_true, y_pred):
max_y_pred = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.max(K.max(y_pred, axis=2), axis=2)),
shape_r_out, axis=-1)), shape_c_out, axis=-1)
y_pred /= max_y_pred
sum_y_true = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.sum(K.sum(y_true, axis=2), axis=2)),
shape_r_out, axis=-1)), shape_c_out, axis=-1)
sum_y_pred = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.sum(K.sum(y_pred, axis=2), axis=2)),
shape_r_out, axis=-1)), shape_c_out, axis=-1)
y_true /= (sum_y_true + K.epsilon())
y_pred /= (sum_y_pred + K.epsilon())
return 10 * K.sum(K.sum(y_true * K.log((y_true / (y_pred + K.epsilon())) + K.epsilon()), axis=-1), axis=-1)
# Correlation Coefficient Loss
示例15: sparse_categorical_crossentropy
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import log [as 别名]
def sparse_categorical_crossentropy(gt_ids, pred_one_hot_post_softmax):
"""
K.sparse_categorical_crossentropyだと結果がNaNになる。。。
0割り算が発生しているかも。
https://qiita.com/4Ui_iUrz1/items/35a8089ab0ebc98061c1
対策として、微少値を用いてlog(0)にならないよう調整した本関数を作成。
"""
gt_ids = log.tfprint(gt_ids, "cross:gt_ids:")
pred_one_hot_post_softmax = log.tfprint(pred_one_hot_post_softmax,
"cross:pred_one_hot_post_softmax:")
gt_one_hot = K.one_hot(gt_ids, K.shape(pred_one_hot_post_softmax)[-1])
gt_one_hot = log.tfprint(gt_one_hot, "cross:gt_one_hot:")
epsilon = K.epsilon() # 1e-07
loss = -K.sum(
gt_one_hot * K.log(
tf.clip_by_value(pred_one_hot_post_softmax, epsilon, 1 - epsilon)),
axis=-1)
loss = log.tfprint(loss, "cross:loss:")
return loss