本文整理汇总了Python中chainer.functions.softmax方法的典型用法代码示例。如果您正苦于以下问题:Python functions.softmax方法的具体用法?Python functions.softmax怎么用?Python functions.softmax使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.functions
的用法示例。
在下文中一共展示了functions.softmax方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: mellowmax
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import softmax [as 别名]
def mellowmax(values, omega=1., axis=1):
"""Mellowmax function.
This is a kind of softmax function that is, unlike the Boltzmann softmax,
non-expansion.
See: http://arxiv.org/abs/1612.05628
Args:
values (Variable or ndarray):
Input values. Mellowmax is taken along the second axis.
omega (float):
Parameter of mellowmax.
axis (int):
Axis along which mellowmax is taken.
Returns:
outputs (Variable)
"""
n = values.shape[axis]
return (F.logsumexp(omega * values, axis=axis) - np.log(n)) / omega
示例2: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import softmax [as 别名]
def __call__(self, x):
h = x
for l in self.conv_layers:
h = self.activation(l(h))
# Advantage
batch_size = x.shape[0]
h = self.activation(self.main_stream(h))
h_a, h_v = F.split_axis(h, 2, axis=-1)
ya = F.reshape(self.a_stream(h_a),
(batch_size, self.n_actions, self.n_atoms))
mean = F.sum(ya, axis=1, keepdims=True) / self.n_actions
ya, mean = F.broadcast(ya, mean)
ya -= mean
# State value
ys = F.reshape(self.v_stream(h_v), (batch_size, 1, self.n_atoms))
ya, ys = F.broadcast(ya, ys)
q = F.softmax(ya + ys, axis=2)
return action_value.DistributionalDiscreteActionValue(q, self.z_values)
示例3: forward_one_step
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import softmax [as 别名]
def forward_one_step(self, x_data, y_data, state, train=True, dropout_ratio=0.5):
x = Variable(x_data, volatile=not train)
t = Variable(y_data, volatile=not train)
h0 = self.embed(x)
h1_in = self.l1_x(F.dropout(h0, ratio=dropout_ratio, train=train)) + self.l1_h(state['h1'])
c1, h1 = F.lstm(state['c1'], h1_in)
h2_in = self.l2_x(F.dropout(h1, ratio=dropout_ratio, train=train)) + self.l2_h(state['h2'])
c2, h2 = F.lstm(state['c2'], h2_in)
y = self.l3(F.dropout(h2, ratio=dropout_ratio, train=train))
state = {'c1': c1, 'h1': h1, 'c2': c2, 'h2': h2}
if train:
return state, F.softmax_cross_entropy(y, t)
else:
return state, F.softmax(y)
示例4: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import softmax [as 别名]
def __call__(self, x, t):
h = F.relu(self.conv1(x))
h = F.max_pooling_2d(h, 2, 1)
h = F.relu(self.conv2(h))
h = F.relu(self.conv3(h))
h = F.relu(self.fc4(h))
h = self.fc5(h)
h = F.reshape(h, (x.data.shape[0], 3, 16, 16))
h = self.channelwise_inhibited(h)
if self.train:
self.loss = F.softmax_cross_entropy(h, t, normalize=False)
return self.loss
else:
self.pred = F.softmax(h)
return self.pred
示例5: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import softmax [as 别名]
def __call__(self, x, t):
h = F.relu(self.conv1(x))
h = F.max_pooling_2d(h, 2, 1)
h = F.relu(self.conv2(h))
h = F.relu(self.conv3(h))
h = F.dropout(F.relu(self.fc4(h)), train=self.train)
h = self.fc5(h)
h = F.reshape(h, (x.data.shape[0], 3, 16, 16))
h = self.channelwise_inhibited(h)
if self.train:
self.loss = F.softmax_cross_entropy(h, t, normalize=False)
return self.loss
else:
self.pred = F.softmax(h)
return self.pred
示例6: forward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import softmax [as 别名]
def forward(self, batch):
label_onehot_batch = [self._onehot_encode(pair[1]) for pair in batch]
input_img, ground_truth = self.converter(batch, self.device)
ground_truth_onehot = self.converter(label_onehot_batch, self.device)
input_img = Variable(input_img, volatile=not self.gen.train)
ground_truth = Variable(ground_truth, volatile=not self.gen.train)
ground_truth_onehot = Variable(ground_truth_onehot, volatile=not self.gen.train)
x_real = self._make_dis_input(input_img, ground_truth_onehot)
y_real = self.dis(x_real)
pred_label_map = self.gen(input_img)
x_fake = self._make_dis_input(input_img, F.softmax(pred_label_map))
y_fake = self.dis(x_fake)
self.y_fake = y_fake
self.y_real = y_real
self.pred_label_map = pred_label_map
self.ground_truth = ground_truth
示例7: generate
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import softmax [as 别名]
def generate(net, image_model, image_path):
feature = image_model.feature(image_path)
net.initialize(feature)
candidates = [(net, [bos], 0)]
for i in range(max_length):
next_candidates = []
for prev_net, tokens, likelihood in candidates:
if tokens[-1] == eos:
next_candidates.append((None, tokens, likelihood))
continue
net = prev_net.copy()
x = xp.asarray([tokens[-1]]).astype(np.int32)
y = F.softmax(net(x))
token_likelihood = np.log(cuda.to_cpu(y.data[0]))
order = token_likelihood.argsort()[-beam_width:][::-1]
next_candidates.extend([(net, tokens + [i], likelihood + token_likelihood[i]) for i in order])
candidates = sorted(next_candidates, key=lambda x: -x[2])[:beam_width]
if all([candidate[1][-1] == eos for candidate in candidates]):
break
return [candidate[1] for candidate in candidates]
示例8: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import softmax [as 别名]
def __call__(self, x):
y = self.branches(x)
u = F.sum(y, axis=1)
s = F.average_pooling_2d(u, ksize=u.shape[2:])
z = self.fc1(s)
w = self.fc2(z)
batch = w.shape[0]
w = F.reshape(w, shape=(batch, self.num_branches, self.out_channels))
w = self.softmax(w)
w = F.expand_dims(F.expand_dims(w, axis=3), axis=4)
y = y * w
y = F.sum(y, axis=1)
return y
示例9: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import softmax [as 别名]
def __call__(self, doc_ids, update_only_docs=False):
""" Given an array of document integer indices, returns a vector
for each document. The vector is composed of topic weights projected
onto topic vectors.
Args:
doc_ids : chainer.Variable
One-dimensional batch vectors of IDs
Returns:
doc_vector : chainer.Variable
Batch of two-dimensional embeddings for every document.
"""
# (batchsize, ) --> (batchsize, multinomial)
proportions = self.proportions(doc_ids, softmax=True)
# (batchsize, n_factors) * (n_factors, n_dim) --> (batchsize, n_dim)
factors = F.dropout(self.factors(), ratio=self.dropout_ratio)
if update_only_docs:
factors.unchain_backward()
w_sum = F.matmul(proportions, factors)
return w_sum
示例10: proportions
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import softmax [as 别名]
def proportions(self, doc_ids, softmax=False):
""" Given an array of document indices, return a vector
for each document of just the unnormalized topic weights.
Returns:
doc_weights : chainer.Variable
Two dimensional topic weights of each document.
"""
w = self.weights(doc_ids)
if softmax:
size = w.data.shape
mask = self.xp.random.random_integers(0, 1, size=size)
y = (F.softmax(w * self.temperature) *
Variable(mask.astype('float32')))
norm, y = F.broadcast(F.expand_dims(F.sum(y, axis=1), 1), y)
return y / (norm + 1e-7)
else:
return w
示例11: attend
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import softmax [as 别名]
def attend(self, query, key, value, mask, minfs=None):
"""
Input shapes:
q=(b, units, dec_l), k=(b, units, enc_l),
v=(b, units, dec_l, enc_l), m=(b, dec_l, enc_l)
"""
# Calculate Attention Scores with Mask for Zero-padded Areas
pre_a = F.batch_matmul(query, key, transa=True) # (b, dec_l, enc_l)
minfs = self.xp.full(pre_a.shape, -np.inf, pre_a.dtype) \
if minfs is None else minfs
pre_a = F.where(mask, pre_a, minfs)
a = F.softmax(pre_a, axis=2)
# if values in axis=2 are all -inf, they become nan. thus do re-mask.
a = F.where(self.xp.isnan(a.data),
self.xp.zeros(a.shape, dtype=a.dtype), a)
reshaped_a = a[:, None] # (b, 1, dec_xl, enc_l)
# Calculate Weighted Sum
pre_c = F.broadcast_to(reshaped_a, value.shape) * value
c = F.sum(pre_c, axis=3, keepdims=True) # (b, units, dec_xl, 1)
return c
示例12: decode_predictions
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import softmax [as 别名]
def decode_predictions(self, predictions):
# concat all individual predictions and slice for each time step
predictions = F.concat([F.expand_dims(p, axis=0) for p in predictions], axis=0)
words = []
with cuda.get_device_from_array(predictions.data):
for prediction in F.separate(predictions, axis=0):
prediction = F.squeeze(prediction, axis=0)
prediction = F.softmax(prediction, axis=1)
prediction = self.xp.argmax(prediction.data, axis=1)
word = self.loss_metrics.strip_prediction(prediction[self.xp.newaxis, ...])[0]
if len(word) == 1 and word[0] == 0:
return ''
word = "".join(map(self.loss_metrics.label_to_char, word))
word = word.replace(chr(self.loss_metrics.char_map[str(self.loss_metrics.blank_symbol)]), '')
words.append(word)
text = " ".join(words)
return text
示例13: get_gaussian_params
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import softmax [as 别名]
def get_gaussian_params(self, x):
h = F.tanh(self.l1(x))
h = self.l2(h)
pi = h[:, :self.gaussian_mixtures]
mu_var_dim = self.gaussian_mixtures * self.input_dim
mu = h[:, self.gaussian_mixtures:self.gaussian_mixtures + mu_var_dim]
log_var = h[:, self.gaussian_mixtures + mu_var_dim:]
n_batch = x.shape[0]
# mixing coefficients
pi = F.reshape(pi, (n_batch, self.gaussian_mixtures))
pi = F.softmax(pi, axis=1)
# mean
mu = F.reshape(mu, (n_batch, self.gaussian_mixtures, self.input_dim))
# log variance
log_var = F.reshape(
log_var, (n_batch, self.gaussian_mixtures, self.input_dim))
return pi, mu, log_var
示例14: __init__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import softmax [as 别名]
def __init__(self, ndim_obs, n_actions, n_atoms, v_min, v_max,
n_hidden_channels, n_hidden_layers,
nonlinearity=F.relu, last_wscale=1.0):
assert n_atoms >= 2
assert v_min < v_max
z_values = np.linspace(v_min, v_max, num=n_atoms, dtype=np.float32)
model = chainerrl.links.Sequence(
MLP(in_size=ndim_obs, out_size=n_actions * n_atoms,
hidden_sizes=[n_hidden_channels] * n_hidden_layers,
nonlinearity=nonlinearity,
last_wscale=last_wscale),
lambda x: F.reshape(x, (-1, n_actions, n_atoms)),
lambda x: F.softmax(x, axis=2),
)
super().__init__(model=model, z_values=z_values)
示例15: select_action
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import softmax [as 别名]
def select_action(self, t, greedy_action_func, action_value=None):
assert action_value is not None
assert isinstance(action_value,
chainerrl.action_value.DiscreteActionValue)
n_actions = action_value.q_values.shape[1]
with chainer.no_backprop_mode():
probs = chainer.cuda.to_cpu(
F.softmax(action_value.q_values / self.T).array).ravel()
return np.random.choice(np.arange(n_actions), p=probs)