本文整理汇总了Python中numpy.log1p方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.log1p方法的具体用法?Python numpy.log1p怎么用?Python numpy.log1p使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类numpy
的用法示例。
在下文中一共展示了numpy.log1p方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: generate_data
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import log1p [as 别名]
def generate_data():
generated = False
def _generate_data():
if not generated:
assert N_CAT_FEATURE > 1
assert N_NUM_FEATURE > 3
np.random.seed(RANDOM_SEED)
X_num = np.random.normal(size=(N_OBS, N_NUM_FEATURE))
X_cat = np.random.randint(0, N_CATEGORY, size=(N_OBS, N_CAT_FEATURE))
df = pd.DataFrame(
np.hstack((X_num, X_cat)),
columns=['num_{}'.format(x) for x in range(N_NUM_FEATURE)]
+ ['cat_{}'.format(x) for x in range(N_CAT_FEATURE)]
)
df[TARGET_COL] = (1 + X_num[:, 0] * X_num[:, 1] - np.log1p(np.exp(X_num[:, 1] + X_num[:, 2]))
+ 10 * (X_cat[:, 0] == 0).astype(int)
+ np.random.normal(scale=.01, size=N_OBS))
return df
yield _generate_data
示例2: load_names
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import log1p [as 别名]
def load_names(data_names, norm=True, log1p=False, verbose=True):
# Load datasets.
datasets = []
genes_list = []
n_cells = 0
for name in data_names:
X_i, genes_i = load_data(name)
if norm:
X_i = normalize(X_i, axis=1)
if log1p:
X_i = np.log1p(X_i)
X_i = csr_matrix(X_i)
datasets.append(X_i)
genes_list.append(genes_i)
n_cells += X_i.shape[0]
if verbose:
print('Loaded {} with {} genes and {} cells'.
format(name, X_i.shape[1], X_i.shape[0]))
if verbose:
print('Found {} cells among all datasets'
.format(n_cells))
return datasets, genes_list, n_cells
示例3: log_loss_value
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import log1p [as 别名]
def log_loss_value(Z, weights, total_weights, rho):
"""
computes the value and slope of the logistic loss in a numerically stable way
supports sample non-negative weights for each example in the training data
see http://stackoverflow.com/questions/20085768/
Parameters
----------
Z numpy.array containing training data with shape = (n_rows, n_cols)
rho numpy.array of coefficients with shape = (n_cols,)
total_weights numpy.sum(total_weights) (only included to reduce computation)
weights numpy.array of sample weights with shape (n_rows,)
Returns
-------
loss_value scalar = 1/n_rows * sum(log( 1 .+ exp(-Z*rho))
"""
scores = Z.dot(rho)
pos_idx = scores > 0
loss_value = np.empty_like(scores)
loss_value[pos_idx] = np.log1p(np.exp(-scores[pos_idx]))
loss_value[~pos_idx] = -scores[~pos_idx] + np.log1p(np.exp(scores[~pos_idx]))
loss_value = loss_value.dot(weights) / total_weights
return loss_value
示例4: log_loss_value
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import log1p [as 别名]
def log_loss_value(Z, rho):
"""
computes the value and slope of the logistic loss in a numerically stable way
see also: http://stackoverflow.com/questions/20085768/
Parameters
----------
Z numpy.array containing training data with shape = (n_rows, n_cols)
rho numpy.array of coefficients with shape = (n_cols,)
Returns
-------
loss_value scalar = 1/n_rows * sum(log( 1 .+ exp(-Z*rho))
"""
scores = Z.dot(rho)
pos_idx = scores > 0
loss_value = np.empty_like(scores)
loss_value[pos_idx] = np.log1p(np.exp(-scores[pos_idx]))
loss_value[~pos_idx] = -scores[~pos_idx] + np.log1p(np.exp(scores[~pos_idx]))
loss_value = loss_value.mean()
return loss_value
示例5: test_branch_cuts
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import log1p [as 别名]
def test_branch_cuts(self):
# check branch cuts and continuity on them
_check_branch_cut(np.log, -0.5, 1j, 1, -1, True)
_check_branch_cut(np.log2, -0.5, 1j, 1, -1, True)
_check_branch_cut(np.log10, -0.5, 1j, 1, -1, True)
_check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True)
_check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True)
_check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True)
_check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True)
_check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True)
_check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True)
_check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True)
_check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True)
# check against bogus branch cuts: assert continuity between quadrants
_check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1)
_check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1)
_check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1)
_check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1)
_check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1)
_check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1)
示例6: test_branch_cuts_complex64
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import log1p [as 别名]
def test_branch_cuts_complex64(self):
# check branch cuts and continuity on them
_check_branch_cut(np.log, -0.5, 1j, 1, -1, True, np.complex64)
_check_branch_cut(np.log2, -0.5, 1j, 1, -1, True, np.complex64)
_check_branch_cut(np.log10, -0.5, 1j, 1, -1, True, np.complex64)
_check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True, np.complex64)
_check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True, np.complex64)
_check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64)
_check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64)
_check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64)
_check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64)
_check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64)
_check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64)
# check against bogus branch cuts: assert continuity between quadrants
_check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64)
_check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64)
_check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64)
_check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1, False, np.complex64)
_check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64)
_check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64)
示例7: test_branch_cuts
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import log1p [as 别名]
def test_branch_cuts(self):
# check branch cuts and continuity on them
yield _check_branch_cut, np.log, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1, True
yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True
yield _check_branch_cut, np.arccos, [ -2, 2], [1j, 1j], 1, -1, True
yield _check_branch_cut, np.arctan, [0-2j, 2j], [1, 1], -1, 1, True
yield _check_branch_cut, np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True
yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True
yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True
# check against bogus branch cuts: assert continuity between quadrants
yield _check_branch_cut, np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1
yield _check_branch_cut, np.arccos, [0-2j, 2j], [ 1, 1], 1, 1
yield _check_branch_cut, np.arctan, [ -2, 2], [1j, 1j], 1, 1
yield _check_branch_cut, np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1
yield _check_branch_cut, np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1
yield _check_branch_cut, np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1
示例8: test_branch_cuts_complex64
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import log1p [as 别名]
def test_branch_cuts_complex64(self):
# check branch cuts and continuity on them
yield _check_branch_cut, np.log, -0.5, 1j, 1, -1, True, np.complex64
yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1, True, np.complex64
yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1, True, np.complex64
yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1, True, np.complex64
yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1, True, np.complex64
yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64
yield _check_branch_cut, np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64
yield _check_branch_cut, np.arctan, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64
yield _check_branch_cut, np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64
yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64
yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64
# check against bogus branch cuts: assert continuity between quadrants
yield _check_branch_cut, np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64
yield _check_branch_cut, np.arccos, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64
yield _check_branch_cut, np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64
yield _check_branch_cut, np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1, False, np.complex64
yield _check_branch_cut, np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64
yield _check_branch_cut, np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64
示例9: c_code
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import log1p [as 别名]
def c_code(self, node, name, inp, out, sub):
x, = inp
z, = out
# These constants were obtained by looking at the output of
# python commands like:
# for i in xrange(750):
# print i, repr(numpy.log1p(numpy.exp(theano._asarray([i,-i], dtype=dt))))
# the boundary checks prevent us from generating inf
# float16 limits: -17.0, 6.0
# We use the float32 limits for float16 for now as the
# computation will happend in float32 anyway.
if (node.inputs[0].type == scalar.float32 or
node.inputs[0].type == scalar.float16):
return """%(z)s = %(x)s < -103.0f ? 0.0 : %(x)s > 14.0f ? %(x)s : log1p(exp(%(x)s));""" % locals()
elif node.inputs[0].type == scalar.float64:
return """%(z)s = %(x)s < -745.0 ? 0.0 : %(x)s > 16.0 ? %(x)s : log1p(exp(%(x)s));""" % locals()
else:
raise NotImplementedError('only floatingpoint is implemented')
示例10: tf_log
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import log1p [as 别名]
def tf_log(dtm, log_fn=np.log1p):
"""
Transform raw count document-term-matrix `dtm` to log-normalized term frequency matrix ``log_fn(dtm)``.
:param dtm: (sparse) document-term-matrix of size NxM (N docs, M is vocab size) with raw term counts.
:param log_fn: log function to use; default is NumPy's :func:`numpy.log1p`, which calculates ``log(1 + x)``
:return: (sparse) log-normalized term frequency matrix of size NxM
"""
if dtm.ndim != 2:
raise ValueError('`dtm` must be a 2D array/matrix')
if log_fn is np.log1p:
if issparse(dtm):
return dtm.log1p()
else:
return log_fn(dtm)
else:
if issparse(dtm):
dtm = dtm.toarray()
return log_fn(dtm)
示例11: idf
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import log1p [as 别名]
def idf(dtm, smooth_log=1, smooth_df=1):
"""
Calculate inverse document frequency (idf) vector from raw count document-term-matrix `dtm` with formula
``log(smooth_log + N / (smooth_df + df))``, where ``N`` is the number of documents, ``df`` is the document frequency
(see function :func:`~tmtoolkit.bow.bow_stats.doc_frequencies`), `smooth_log` and `smooth_df` are smoothing
constants. With default arguments, the formula is thus ``log(1 + N/(1+df))``.
Note that this may introduce NaN values due to division by zero when a document is of length 0.
:param dtm: (sparse) document-term-matrix of size NxM (N docs, M is vocab size) with raw term counts.
:param smooth_log: smoothing constant inside log()
:param smooth_df: smoothing constant to add to document frequency
:return: NumPy array of size M (vocab size) with inverse document frequency for each term in the vocab
"""
if dtm.ndim != 2 or 0 in dtm.shape:
raise ValueError('`dtm` must be a non-empty 2D array/matrix')
n_docs = dtm.shape[0]
df = doc_frequencies(dtm)
x = n_docs / (smooth_df + df)
if smooth_log == 1: # log1p is faster than the equivalent log(1 + x)
return np.log1p(x)
else:
return np.log(smooth_log + x)
示例12: idf_probabilistic
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import log1p [as 别名]
def idf_probabilistic(dtm, smooth=1):
"""
Calculate probabilistic inverse document frequency (idf) vector from raw count document-term-matrix `dtm` with
formula ``log(smooth + (N - df) / df)``, where ``N`` is the number of documents and ``df`` is the document
frequency (see function :func:`~tmtoolkit.bow.bow_stats.doc_frequencies`).
:param dtm: (sparse) document-term-matrix of size NxM (N docs, M is vocab size) with raw term counts.
:param smooth: smoothing constant (setting this to 0 can lead to -inf results)
:return: NumPy array of size M (vocab size) with probabilistic inverse document frequency for each term in the vocab
"""
if dtm.ndim != 2 or 0 in dtm.shape:
raise ValueError('`dtm` must be a non-empty 2D array/matrix')
n_docs = dtm.shape[0]
df = doc_frequencies(dtm)
x = (n_docs - df) / df
if smooth == 1: # log1p is faster than the equivalent log(1 + x)
return np.log1p(x)
else:
return np.log(smooth + x)
示例13: bm25_weight
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import log1p [as 别名]
def bm25_weight(X, K1=100, B=0.8):
""" Weighs each row of a sparse matrix X by BM25 weighting """
# calculate idf per term (user)
X = coo_matrix(X)
N = float(X.shape[0])
idf = log(N) - log1p(bincount(X.col))
# calculate length_norm per document (artist)
row_sums = numpy.ravel(X.sum(axis=1))
average_length = row_sums.mean()
length_norm = (1.0 - B) + B * row_sums / average_length
# weight matrix rows by bm25
X.data = X.data * (K1 + 1.0) / (K1 * length_norm[X.row] + X.data) * idf[X.col]
return X
示例14: mu_law_encode
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import log1p [as 别名]
def mu_law_encode(audio):
'''Quantizes waveform amplitudes.
Mostly adaped from
https://github.com/ibab/tensorflow-wavenet/blob/master/wavenet/ops.py#L64-L75
Args:
audio: Raw wave signal. float32.
'''
mu = float(hp.Q - 1)
# Perform mu-law companding transformation (ITU-T, 1988).
# Minimum operation is here to deal with rare large amplitudes caused
# by resampling.
magnitude = np.log1p(mu * np.abs(audio)) / np.log1p(mu)
signal = np.sign(audio) * magnitude
# Quantize signal to the specified number of levels.
return ((signal + 1) / 2 * mu + 0.5).astype(np.int32)
示例15: test_branch_cuts
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import log1p [as 别名]
def test_branch_cuts(self):
# check branch cuts and continuity on them
yield _check_branch_cut, np.log, -0.5, 1j, 1, -1
yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1
yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1
yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1
yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1
yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, -1j], 1, -1
yield _check_branch_cut, np.arccos, [ -2, 2], [1j, -1j], 1, -1
yield _check_branch_cut, np.arctan, [-2j, 2j], [1, -1 ], -1, 1
yield _check_branch_cut, np.arcsinh, [-2j, 2j], [-1, 1], -1, 1
yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1
yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, -1j], 1, -1
# check against bogus branch cuts: assert continuity between quadrants
yield _check_branch_cut, np.arcsin, [-2j, 2j], [ 1, 1], 1, 1
yield _check_branch_cut, np.arccos, [-2j, 2j], [ 1, 1], 1, 1
yield _check_branch_cut, np.arctan, [ -2, 2], [1j, 1j], 1, 1
yield _check_branch_cut, np.arcsinh, [ -2, 2, 0], [1j, 1j, 1 ], 1, 1
yield _check_branch_cut, np.arccosh, [-2j, 2j, 2], [1, 1, 1j], 1, 1
yield _check_branch_cut, np.arctanh, [-2j, 2j, 0], [1, 1, 1j], 1, 1