本文整理汇总了Python中scipy.special.logit方法的典型用法代码示例。如果您正苦于以下问题:Python special.logit方法的具体用法?Python special.logit怎么用?Python special.logit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.special
的用法示例。
在下文中一共展示了special.logit方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_policy_fn
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logit [as 别名]
def get_policy_fn(request, ffn_model):
"""Returns a policy class based on the InferenceRequest proto."""
if request.movement_policy_name:
movement_policy_class = globals().get(request.movement_policy_name, None)
if movement_policy_class is None:
movement_policy_class = import_symbol(request.movement_policy_name)
else: # Default / fallback.
movement_policy_class = FaceMaxMovementPolicy
if request.movement_policy_args:
kwargs = json.loads(request.movement_policy_args)
else:
kwargs = {}
if 'deltas' not in kwargs:
kwargs['deltas'] = ffn_model.deltas[::-1]
if 'score_threshold' not in kwargs:
kwargs['score_threshold'] = logit(request.inference_options.move_threshold)
return lambda canvas: movement_policy_class(canvas, **kwargs)
示例2: fixed_offsets
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logit [as 别名]
def fixed_offsets(model, seed, fov_shifts=None):
"""Generates offsets based on a fixed list."""
for off in itertools.chain([(0, 0, 0)], fov_shifts):
if model.dim == 3:
is_valid_move = seed[:,
seed.shape[1] // 2 + off[2],
seed.shape[2] // 2 + off[1],
seed.shape[3] // 2 + off[0],
0] >= logit(FLAGS.threshold)
else:
is_valid_move = seed[:,
seed.shape[1] // 2 + off[1],
seed.shape[2] // 2 + off[0],
0] >= logit(FLAGS.threshold)
if not is_valid_move:
continue
yield off
示例3: fit_treatment_model
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logit [as 别名]
def fit_treatment_model(df, term_counts):
indices = df.post_index.values
tc = term_counts[indices,:]
tc = tc.toarray()
f_z = logit(df.treatment_probability.values)
print(f_z.shape, tc.shape)
features = np.column_stack((f_z, tc))
labels = df.treatment.values
true_model = LogisticRegression(solver='liblinear')
true_model.fit(features, labels)
coeffs = np.array(true_model.coef_).flatten()[1:]
print(coeffs.mean(), coeffs.std())
np.random.shuffle(tc)
features = np.column_stack((f_z, tc))
permuted = LogisticRegression(solver='liblinear')
permuted.fit(features, labels)
permuted_coeffs = np.array(permuted.coef_).flatten()[1:]
print(permuted_coeffs.mean(), permuted_coeffs.std())
#$E_{Z|W=1}[log P(T=1 | W=1, Z)/ P(T=1| Z)]$
示例4: _perturbed_model
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logit [as 别名]
def _perturbed_model(q_t0, q_t1, g, t, q, eps):
# helper function for psi_tmle
h1 = t / q - ((1 - t) * g) / (q * (1 - g))
full_q = (1.0 - t) * q_t0 + t * q_t1
perturbed_q = full_q - eps * h1
def q1(t_cf, epsilon):
h_cf = t_cf * (1.0 / g) - (1.0 - t_cf) / (1.0 - g)
full_q = (1.0 - t_cf) * q_t0 + t_cf * q_t1 # predictions from unperturbed model
return full_q - epsilon * h_cf
psi_init = np.mean(t * (q1(np.ones_like(t), eps) - q1(np.zeros_like(t), eps))) / q
h2 = (q_t1 - q_t0 - psi_init) / q
perturbed_g = expit(logit(g) - eps * h2)
return perturbed_q, perturbed_g
示例5: update_dag_logits
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logit [as 别名]
def update_dag_logits(self, gradient_dicts, weight_decay, max_grad=0.1):
"""
Updates the probabilities of each path being selected using the given gradients.
"""
dag_probs = tuple(expit(logit) for logit in self.dags_logits)
current_average_dag_probs = tuple(np.mean(prob) for prob in dag_probs)
for i, key in enumerate(self.all_connections):
for grad_dict, current_average_dag_prob, dag_logits in zip(gradient_dicts, current_average_dag_probs,
self.dags_logits):
if key in grad_dict:
grad = grad_dict[key] - weight_decay * (
current_average_dag_prob - self.target_ave_prob) # *expit(dag_logits[i])
deriv = sigmoid_derivitive(dag_logits[i])
logit_grad = grad * deriv
dag_logits[i] += np.clip(logit_grad, -max_grad, max_grad)
示例6: __init__
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logit [as 别名]
def __init__(self, warp="linear", values=None, range_=None):
"""Build Real space class.
Parameters
----------
warp : {'linear', 'log', 'logit', 'bilog'}
Which warping type to apply to the space. The warping is applied in the original space. That is, in a space
with ``warp='log'`` and ``range_=(2.0, 10.0)``, the value 2.0 warps to ``log(2)``, not ``-inf`` as in some
other frameworks.
values : None or list(float)
Possible values for space to take. Values must be of `float` type.
range_ : None or :class:`numpy:numpy.ndarray` of shape (2,)
Array with (lower, upper) pair with limits of space. Note that one must specify `values` or `range_`, but
not both. `range_` must be composed of `float`.
"""
assert warp is not None, "warp/space not specified for real"
Space.__init__(self, np.float_, identity, warp, values, range_)
示例7: _mh_sample
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logit [as 别名]
def _mh_sample(d_score, init_picked=0, start=1, random=np.random):
'''Same as `mh_sample` but more obviously correct.
'''
assert(np.ndim(d_score) == 1 and len(d_score) > 0)
assert(0 <= np.min(d_score) and np.max(d_score) <= 1)
assert(init_picked < start)
d_last = np.float_(d_score[init_picked])
picked_round = init_picked
for ii, d_new in enumerate(d_score[start:], start):
d_new = np.float_(d_new)
# Note: we might want to move to log or logit scale for disc probs if
# this starts to create numerics issues.
alpha = accept_prob_MH_disc(d_last, d_new)
assert(0 <= alpha and alpha <= 1)
if random.rand() <= alpha:
d_last = d_new
picked_round = ii
return picked_round
示例8: update
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logit [as 别名]
def update(self, prob_map, position):
"""Updates the state after an FFN inference call.
Args:
prob_map: object probability map returned by the FFN (in logit space)
position: postiion of the center of the FoV where inference was performed
(z, y, x)
"""
raise NotImplementedError()
示例9: __init__
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logit [as 别名]
def __init__(self, eval_shape):
self.eval_labels = tf.placeholder(
tf.float32, [1] + eval_shape + [1], name='eval_labels')
self.eval_preds = tf.placeholder(
tf.float32, [1] + eval_shape + [1], name='eval_preds')
self.eval_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.eval_preds, labels=self.eval_labels))
self.reset()
self.eval_threshold = logit(0.9)
self.sess = None
self._eval_shape = eval_shape
示例10: max_pred_offsets
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logit [as 别名]
def max_pred_offsets(model, seed):
"""Generates offsets with the policy used for inference."""
# Always start at the center.
queue = deque([(0, 0, 0)])
done = set()
train_image_radius = train_image_size(model) // 2
input_image_radius = np.array(model.input_image_size) // 2
while queue:
offset = queue.popleft()
# Drop any offsets that would take us beyond the image fragment we
# loaded for training.
if np.any(np.abs(np.array(offset)) + input_image_radius >
train_image_radius):
continue
# Ignore locations that were visited previously.
quantized_offset = (
offset[0] // max(model.deltas[0], 1),
offset[1] // max(model.deltas[1], 1),
offset[2] // max(model.deltas[2], 1))
if quantized_offset in done:
continue
done.add(quantized_offset)
yield offset
# Look for new offsets within the updated seed.
curr_seed = mask.crop_and_pad(seed, offset, model.pred_mask_size[::-1])
todos = sorted(
movement.get_scored_move_offsets(
model.deltas[::-1],
curr_seed[0, ..., 0],
threshold=logit(FLAGS.threshold)), reverse=True)
queue.extend((x[2] + offset[0],
x[1] + offset[1],
x[0] + offset[2]) for _, x in todos)
示例11: _ppf
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logit [as 别名]
def _ppf(self, q):
return sc.logit(q)
示例12: _isf
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logit [as 别名]
def _isf(self, q):
return -sc.logit(q)
示例13: check_logit_out
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logit [as 别名]
def check_logit_out(self, dtype, expected):
a = np.linspace(0,1,10)
a = np.array(a, dtype=dtype)
olderr = np.seterr(divide='ignore')
try:
actual = logit(a)
finally:
np.seterr(**olderr)
if np.__version__ >= '1.6':
assert_almost_equal(actual, expected)
else:
assert_almost_equal(actual[1:-1], expected[1:-1])
assert_equal(actual.dtype, np.dtype(dtype))
示例14: test_nan
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logit [as 别名]
def test_nan(self):
expected = np.array([np.nan]*4)
olderr = np.seterr(invalid='ignore')
try:
actual = logit(np.array([-3., -2., 2., 3.]))
finally:
np.seterr(**olderr)
assert_equal(expected, actual)
示例15: main
# 需要导入模块: from scipy import special [as 别名]
# 或者: from scipy.special import logit [as 别名]
def main():
predict_df = get_prediction_file()
term_counts = load_terms(dataset)
print(predict_df.shape, term_counts.shape)
if dataset == 'reddit':
imbalanced_terms = filter_imbalanced_terms(predict_df, term_counts)
term_counts = term_counts[:,imbalanced_terms]
print(term_counts.shape)
n_bootstraps = 10
n_w = term_counts.shape[1]
adjusted = np.zeros((n_bootstraps, n_w))
permuted = np.zeros((n_bootstraps, n_w))
unadjusted = np.zeros((n_bootstraps, n_w))
for i in range(n_bootstraps):
sample = assign_split(predict_df,num_splits=2)
sample = sample[sample.split==0]
indices = sample.post_index.values
labels = sample.treatment.values
words = term_counts[indices, :]
propensity_score = logit(sample.treatment_probability.values)
all_features = np.column_stack((propensity_score, words))
unadjusted[i,:] = fit_treatment(words, labels, coeff_offset=0)
adjusted[i,:] = fit_treatment(all_features, labels)
np.random.shuffle(words)
permuted_features = np.column_stack((propensity_score, words))
permuted[i,:] = fit_treatment(permuted_features, labels)
plot_density(unadjusted, adjusted, permuted)