本文整理汇总了Python中scipy.special.softmax方法的典型用法代码示例。如果您正苦于以下问题:Python special.softmax方法的具体用法?Python special.softmax怎么用?Python special.softmax使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.special
的用法示例。
在下文中一共展示了special.softmax方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: to_prob
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import softmax [as 别名]
def to_prob(probabilities: np.ndarray):
"""
If the probabilities array is not a distrubution will softmax it.
Args:
probabilities (array): [batch_size, num_classes, ...]
Returns:
Same as probabilities.
"""
not_bounded = np.min(probabilities) < 0 or np.max(probabilities) > 1.0
multiclass = probabilities.shape[1] > 1
sum_to_one = np.allclose(probabilities.sum(1), 1)
if not_bounded or (multiclass and not sum_to_one):
if multiclass:
probabilities = softmax(probabilities, 1)
else:
probabilities = expit(probabilities)
return probabilities
示例2: match
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import softmax [as 别名]
def match(self, contexts):
assert len(contexts) == len(self.context_noise)
n = len(self._taxonomy)
context_logprobs = np.zeros(n)
axes_context_logprobs = _to_typed_list([
np.zeros(len(self._taxonomy_tasks)),
np.zeros(len(self._taxonomy_datasets)),
np.zeros(len(self._taxonomy_metrics)),
])
for context, noise, ms_noise, ts_noise in zip(contexts, self.context_noise, self.metrics_noise, self.task_noise):
self.compute_context_logprobs(context, noise, ms_noise, ts_noise, context_logprobs, axes_context_logprobs)
keys = self.taxonomy.taxonomy
logprobs = context_logprobs
#keys, logprobs = zip(*context_logprobs.items())
probs = softmax(np.array(logprobs))
axes_probs = [softmax(np.array(a)) for a in axes_context_logprobs]
return (
zip(keys, probs),
zip(self._taxonomy_tasks, axes_probs[0]),
zip(self._taxonomy_datasets, axes_probs[1]),
zip(self._taxonomy_metrics, axes_probs[2])
)
示例3: predict
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import softmax [as 别名]
def predict(self, probas):
resulting_preds = np.zeros((np.max([len(x) for x in probas]), self.n_class))
for i, c in enumerate(self.mapped_classes):
for indx, r in enumerate(c):
resulting_preds[:, i] += probas[indx][:, r]
resulting_preds = resulting_preds / self.R
# return resulting_preds/resulting_preds.sum(axis = 1).reshape(-1,1)
return softmax(resulting_preds, axis=1)
示例4: test_to_prob
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import softmax [as 别名]
def test_to_prob(an_array, a_binary_array):
out = to_prob(an_array)
assert not np.allclose(out, an_array)
out = to_prob(a_binary_array)
assert not np.allclose(out, a_binary_array)
a_array_scaled = softmax(an_array, 1)
a_binary_array_scaled = expit(a_binary_array)
out = to_prob(a_array_scaled)
assert np.allclose(out, a_array_scaled)
out = to_prob(a_binary_array_scaled)
assert np.allclose(out, a_binary_array_scaled)
示例5: existence_accuracy
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import softmax [as 别名]
def existence_accuracy(target, output, use_nodes=True, use_edges=True):
if not use_nodes and not use_edges:
raise ValueError("Nodes or edges (or both) must be used")
tdds = utils_np.graphs_tuple_to_data_dicts(target)
odds = utils_np.graphs_tuple_to_data_dicts(output)
cs = []
ss = []
for td, od in zip(tdds, odds):
nodes_to_predict = td["nodes"][:, 0] == 0
xn = np.argmax(td["nodes"][:, 1:], axis=-1)
xn = xn[nodes_to_predict]
yn = np.argmax(softmax(od["nodes"][:, 1:], axis=1), axis=-1)
yn = yn[nodes_to_predict]
edges_to_predict = td["edges"][:, 0] == 0
xe = np.argmax(td["edges"][:, 1:], axis=-1)
xe = xe[edges_to_predict]
ye = np.argmax(softmax(od["edges"][:, 1:], axis=1), axis=-1)
ye = ye[edges_to_predict]
c = []
if use_nodes:
c.append(xn == yn)
if use_edges:
c.append(xe == ye)
c = np.concatenate(c, axis=0)
s = np.all(c)
cs.append(c)
ss.append(s)
correct = np.mean(np.concatenate(cs, axis=0))
solved = np.mean(np.stack(ss))
return correct, solved
示例6: get_xhat_y_hat
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import softmax [as 别名]
def get_xhat_y_hat(prototypes, w, x):
M = softmax(-cdist(x, prototypes), axis=1)
x_hat = np.matmul(M, prototypes)
y_hat = np.clip(
np.matmul(M, w.reshape((-1, 1))),
np.finfo(float).eps,
1.0 - np.finfo(float).eps
)
return M, x_hat, y_hat
示例7: inference
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import softmax [as 别名]
def inference(args, model, tokenizer, prefix=""):
inf_task = args.task_name
inf_dataset = load_example(args, inf_task, tokenizer)
inf_sampler = SequentialSampler(inf_dataset)
inf_dataloader = DataLoader(inf_dataset, sampler=inf_sampler, batch_size=1)
# Inference!
logger.info("***** Running inference {} *****".format(prefix))
preds = None
out_label_ids = None
for batch in tqdm(inf_dataloader, desc="Inferencing"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids
'labels': batch[3]}
outputs = model(**inputs)
inf_loss, logits = outputs[:2]
pred_arr = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
logger.info("pred_arr: %s", pred_arr)
pred_prob = np.squeeze(softmax(pred_arr, axis=1))
logger.info("[0]: %s, [1]: %s", pred_prob[0], pred_prob[1])
if args.output_mode == "classification":
pred = np.argmax(pred_arr, axis=1)
elif args.output_mode == "regression":
pred = np.squeeze(pred_arr)
if pred == 0:
logger.info("Text is negative with confidence: %d ", pred_prob[0]*100)
else:
logger.info("Text is positive with confidence: %d ", pred_prob[1]*100)
示例8: _generate_y
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import softmax [as 别名]
def _generate_y(self, x, cluster_mean):
model_info = np.random.normal(loc=cluster_mean, scale=0.1, size=cluster_mean.shape)
w = np.matmul(self.Q, model_info)
num_samples = x.shape[0]
prob = softmax(np.matmul(x, w) + np.random.normal(loc=0., scale=0.1, size=(num_samples, self.num_classes)), axis=1)
y = np.argmax(prob, axis=1)
return y, w, model_info
示例9: metric_multi_accuracy
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import softmax [as 别名]
def metric_multi_accuracy(logits, labels, options_num):
logits = np.reshape(softmax(logits, -1)[:,1], (len(logits)//options_num, options_num))
labels = np.argmax(np.reshape(labels, (len(labels)//options_num, options_num)),-1)
return metric_accuracy(logits, labels)
示例10: test_binary_listnet_simple
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import softmax [as 别名]
def test_binary_listnet_simple():
y_pred = [0.5, 0.2]
y_true = [1.0, 0.0]
result = binary_listNet_wrap(y_pred, y_true, eps=0.0)
expected = - np.sum(y_true * np.log(softmax(y_pred)))
assert not math.isnan(result) and not math.isinf(result)
assert (result == approx(expected))
示例11: test_binary_listnet_stable_for_very_small_prediction
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import softmax [as 别名]
def test_binary_listnet_stable_for_very_small_prediction():
y_pred = [0.5, -1e30]
y_true = [1.0, 0.0]
result = binary_listNet_wrap(y_pred, y_true)
expected = - np.sum(y_true * np.log(softmax(y_pred) + DEFAULT_EPS))
assert not math.isnan(result) and not math.isinf(result)
assert (result == approx(expected, abs=1e-9))
示例12: test_binary_listnet_ignores_padded_value
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import softmax [as 别名]
def test_binary_listnet_ignores_padded_value():
y_pred = [0.5, 0.2, 0.5]
y_true = [1.0, 0.0, PADDED_Y_VALUE]
result = binary_listNet_wrap(y_pred, y_true)
expected = - np.sum(y_true[:2] * np.log(softmax(y_pred[:2]) + DEFAULT_EPS))
assert not math.isnan(result) and not math.isinf(result)
assert (result == approx(expected))
示例13: test_listnet_simple
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import softmax [as 别名]
def test_listnet_simple():
y_pred = [0.5, 0.2]
y_true = [1.0, 0.0]
result = listNet_wrap(y_pred, y_true, eps=0.0)
expected = - np.sum(softmax(y_true) * np.log(softmax(y_pred)))
assert not math.isnan(result) and not math.isinf(result)
assert (result == approx(expected))
示例14: test_listnet_stable_for_very_small_prediction
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import softmax [as 别名]
def test_listnet_stable_for_very_small_prediction():
y_pred = [0.5, -1e30]
y_true = [1.0, 0.0]
result = listNet_wrap(y_pred, y_true)
expected = - np.sum(softmax(y_true) * np.log(softmax(y_pred) + DEFAULT_EPS))
assert not math.isnan(result) and not math.isinf(result)
assert (result == approx(expected))
示例15: _sample_characteristic
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import softmax [as 别名]
def _sample_characteristic(states_df, options, level_dict, use_keys):
"""Sample characteristic of individuals.
The function is used to sample the values of one state space characteristic, say
experience. The keys of ``level_dict`` are the possible starting values of
experience. The values of the dictionary are :class:`pandas.Series` whose index are
covariate names and the values are the parameter values.
``states_df`` is used to generate all possible covariates with the existing
information.
For each level, the dot product of parameters and covariates determines the value
``z``. The softmax function converts the level-specific ``z``-values to
probabilities. The probabilities are used to sample the characteristic.
Parameters
----------
states_df : pandas.DataFrame
Contains the state of each individual.
options : dict
Options of the model.
level_dict : dict
A dictionary where the keys are the values distributed according to the
probability mass function. The values are a :class:`pandas.Series` with
covariate names as the index and parameter values.
use_keys : bool
Identifier for whether the keys of the level dict should be used as variables
values or use numeric codes instead. For example, assign numbers to choices.
Returns
-------
characteristic : numpy.ndarray
Array with shape (n_individuals,) containing sampled values.
"""
# Generate covariates.
all_data = compute_covariates(
states_df, options["covariates_all"], check_nans=True, raise_errors=False
)
# Calculate dot product of covariates and parameters.
z = ()
for level in level_dict:
labels = level_dict[level].index
x_beta = np.dot(
all_data[labels].to_numpy(dtype=COVARIATES_DOT_PRODUCT_DTYPE),
level_dict[level],
)
z += (x_beta,)
# Calculate probabilities with the softmax function.
probabilities = softmax(np.column_stack(z), axis=1)
np.random.seed(next(options["simulation_seed_iteration"]))
choices = level_dict if use_keys else len(level_dict)
characteristic = _random_choice(choices, probabilities)
return characteristic