本文整理汇总了Python中numpy.nan_to_num方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.nan_to_num方法的具体用法?Python numpy.nan_to_num怎么用?Python numpy.nan_to_num使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类numpy
的用法示例。
在下文中一共展示了numpy.nan_to_num方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: observe
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan_to_num [as 别名]
def observe(self) -> np.array:
"""Returns the rows to be observed by the agent."""
rows = self.rows.copy()
if len(rows) < self.window_size:
size = self.window_size - len(rows)
padding = np.zeros((size, rows.shape[1]))
padding = pd.DataFrame(padding, columns=self.rows.columns)
rows = pd.concat([padding, rows], ignore_index=True, sort=False)
if isinstance(rows, pd.DataFrame):
rows = rows.fillna(0, axis=1)
rows = rows.values
rows = np.nan_to_num(rows)
return rows
示例2: tfIdf
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan_to_num [as 别名]
def tfIdf(dtm):
nDoc = dtm.shape[0]
nTerm = dtm.shape[1]
dtmNorm = dtm/dtm.sum(axis=1, keepdims=True) # Normalize tf to unit weight, tf/line word count
dtmNorm = np.nan_to_num(dtmNorm)
tfIdfMat = np.zeros((nDoc,nTerm))
for j in range(nTerm):
tfVect = dtmNorm[:, j]
nExist = np.sum(tfVect > 0.0) # if tfVect is 0.0, word is not in current doc
idf = 0.0
# int32
if (nExist > 0):
idf = np.log(nDoc/nExist)/np.log(2) # log2()
else:
idf = 0.0
tfIdfMat[:,j] = tfVect * idf
return tfIdfMat
示例3: pagerank
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan_to_num [as 别名]
def pagerank(nDim, adjMat, d, K):
'''
Args:
d: damping factor,
K: iteration Number
'''
P = np.ones((nDim, 1)) * (1/nDim)
# normalize adjacency Matrix
B = adjMat/adjMat.sum(axis=1, keepdims=True)
B = np.nan_to_num(B)
U = np.ones((nDim, nDim)) * (1/nDim)
M = d * B + (1-d) * U
for i in range(K):
P = np.dot(M.T, P)
score = P.tolist()
return P
示例4: lf_overlaps
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan_to_num [as 别名]
def lf_overlaps(L, normalize_by_coverage=False):
"""Return the **fraction of items each LF labels that are also labeled by at
least one other LF.**
Note that the maximum possible overlap fraction for an LF is the LF's
coverage, unless `normalize_by_coverage=True`, in which case it is 1.
Args:
L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the
jth LF to the ith candidate
normalize_by_coverage: Normalize by coverage of the LF, so that it
returns the percent of LF labels that have overlaps.
"""
overlaps = (L != 0).T @ _overlapped_data_points(L) / L.shape[0]
if normalize_by_coverage:
overlaps /= lf_coverages(L)
return np.nan_to_num(overlaps)
示例5: lf_conflicts
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan_to_num [as 别名]
def lf_conflicts(L, normalize_by_overlaps=False):
"""Return the **fraction of items each LF labels that are also given a
different (non-abstain) label by at least one other LF.**
Note that the maximum possible conflict fraction for an LF is the LF's
overlaps fraction, unless `normalize_by_overlaps=True`, in which case it
is 1.
Args:
L: an n x m scipy.sparse matrix where L_{i,j} is the label given by the
jth LF to the ith candidate
normalize_by_overlaps: Normalize by overlaps of the LF, so that it
returns the percent of LF overlaps that have conflicts.
"""
conflicts = (L != 0).T @ _conflicted_data_points(L) / L.shape[0]
if normalize_by_overlaps:
conflicts /= lf_overlaps(L)
return np.nan_to_num(conflicts)
示例6: __init__
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan_to_num [as 别名]
def __init__(self, data, hpc_p, costs):
self.data_x = data.iloc[:, 0:-1].astype('float32').values
self.data_n = np.isnan(self.data_x)
self.data_x = np.nan_to_num(self.data_x)
self.data_y = data.iloc[:, -1].astype('int32').values
self.data_len = len(data)
self.hpc_p = hpc_p.values
self.costs = costs.values
self.mask = np.zeros( (config.AGENTS, config.FEATURE_DIM), dtype=np.float32 )
self.x = np.zeros( (config.AGENTS, config.FEATURE_DIM), dtype=np.float32 )
self.y = np.zeros( config.AGENTS, dtype=np.int64 )
self.p = np.zeros( config.AGENTS, dtype=np.int32 )
self.n = np.zeros( (config.AGENTS, config.FEATURE_DIM), dtype=np.bool )
示例7: nan_dot
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan_to_num [as 别名]
def nan_dot(A, B):
"""
Returns np.dot(left_matrix, right_matrix) with the convention that
nan * 0 = 0 and nan * x = nan if x != 0.
Parameters
----------
A, B : np.ndarrays
"""
# Find out who should be nan due to nan * nonzero
should_be_nan_1 = np.dot(np.isnan(A), (B != 0))
should_be_nan_2 = np.dot((A != 0), np.isnan(B))
should_be_nan = should_be_nan_1 + should_be_nan_2
# Multiply after setting all nan to 0
# This is what happens if there were no nan * nonzero conflicts
C = np.dot(np.nan_to_num(A), np.nan_to_num(B))
C[should_be_nan] = np.nan
return C
示例8: _average_precision
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan_to_num [as 别名]
def _average_precision(self, rec, prec):
"""
calculate average precision, override the default one,
special 11-point metric
Params:
----------
rec : numpy.array
cumulated recall
prec : numpy.array
cumulated precision
Returns:
----------
ap as float
"""
if rec is None or prec is None:
return np.nan
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(np.nan_to_num(prec)[rec >= t])
ap += p / 11.
return ap
示例9: Transform
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan_to_num [as 别名]
def Transform(self, data_container, store_folder='', store_key=''):
if data_container.IsEmpty():
return data_container
new_data_container = deepcopy(data_container)
array = new_data_container.GetArray()
array -= self._interception
array /= self._slop
array = np.nan_to_num(array)
new_data_container.SetArray(array)
new_data_container.UpdateFrameByData()
if store_folder:
assert(len(store_key) > 0)
self.SaveNormalDataContainer(data_container, store_folder, store_key)
return new_data_container
示例10: macro_accuracy
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan_to_num [as 别名]
def macro_accuracy(P, Y, n_classes, bg_class=None, return_all=False, **kwargs):
def macro_(P, Y, n_classes=None, bg_class=None, return_all=False):
conf_matrix = sm.confusion_matrix(Y, P, labels=np.arange(n_classes))
conf_matrix = conf_matrix / (conf_matrix.sum(0)[:, None] + 1e-5)
conf_matrix = np.nan_to_num(conf_matrix)
diag = conf_matrix.diagonal() * 100.
# Remove background score
if bg_class is not None:
diag = np.array([diag[i] for i in range(n_classes) if i != bg_class])
macro = diag.mean()
if return_all:
return macro, diag
else:
return macro
if type(P) == list:
out = [macro_(P[i], Y[i], n_classes=n_classes, bg_class=bg_class, return_all=return_all) for i in range(len(P))]
if return_all:
return (np.mean([o[0] for o in out]), np.mean([o[1] for o in out], 0))
else:
return np.mean(out)
else:
return macro_(P, Y, n_classes=n_classes, bg_class=bg_class, return_all=return_all)
示例11: rotate_around_axis
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan_to_num [as 别名]
def rotate_around_axis(coords, Q, origin='empty'):
'''Uses standard quaternion to rotate a vector. Q requires
a 4-dimensional vector. coords is the 3d location of the point.
coords can also be an N x 3 array of vectors. Happens to work
with Q as a tuple or a np array shape 4'''
if origin == 'empty':
vcV = np.cross(Q[1:], coords)
RV = np.nan_to_num(coords + vcV * (2*Q[0]) + np.cross(Q[1:],vcV)*2)
else:
coords -= origin
vcV = np.cross(Q[1:],coords)
RV = (np.nan_to_num(coords + vcV * (2*Q[0]) + np.cross(Q[1:],vcV)*2)) + origin
coords += origin #undo in-place offset
return RV
示例12: barycentric_generate
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan_to_num [as 别名]
def barycentric_generate(hits, tris):
'''Create scalars to be used by points and triangles'''
# where the hit lands on the two tri vecs
tv = tris[:, 1] - tris[:, 0]
hv = hits - tris[:, 0]
d1a = np.einsum('ij, ij->i', hv, tv)
d1b = np.einsum('ij, ij->i', tv, tv)
scalar1 = np.nan_to_num(d1a / d1b)
t2v = tris[:, 2] - tris[:, 0]
d2a = np.einsum('ij, ij->i', hv, t2v)
d2b = np.einsum('ij, ij->i', t2v, t2v)
scalar2 = np.nan_to_num(d2a / d2b)
# closest point on edge segment between the two points created above
cp1 = tv * np.expand_dims(scalar1, axis=1)
cp2 = t2v * np.expand_dims(scalar2, axis=1)
cpvec = cp2 - cp1
cp1_at = tris[:,0] + cp1
hcp = hits - cp1_at # this is cp3 above. Not sure what's it's for yet
dhcp = np.einsum('ij, ij->i', hcp, cpvec)
d3b = np.einsum('ij, ij->i', cpvec, cpvec)
hcp_scalar = np.nan_to_num(dhcp / d3b)
hcp_vec = cpvec * np.expand_dims(hcp_scalar, axis=1)
# base of tri on edge between first two points
d3 = np.einsum('ij, ij->i', -cp1, cpvec)
scalar3 = np.nan_to_num(d3 / d3b)
base_cp_vec = cpvec * np.expand_dims(scalar3, axis=1)
base_on_span = cp1_at + base_cp_vec
# Where the point occurs on the edge between the base of the triangle
# and the cpoe of the base of the triangle on the cpvec
base_vec = base_on_span - tris[:,0]
dba = np.einsum('ij, ij->i', hv, base_vec)
dbb = np.einsum('ij, ij->i', base_vec, base_vec)
scalar_final = np.nan_to_num(dba / dbb)
p_on_bv = base_vec * np.expand_dims(scalar_final, axis=1)
perp = (p_on_bv) - (cp1 + base_cp_vec)
return scalar1, scalar2, hcp_scalar, scalar3, scalar_final
示例13: project_points
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan_to_num [as 别名]
def project_points(points, tri_coords):
'''Using this to get the points off the surface
Takes the average length of two vecs off triangles
and applies it to the length of the normals.
This way the normal scales with the mesh and with
changes to the individual triangle vectors'''
t0 = tri_coords[:, 0]
t1 = tri_coords[:, 1]
t2 = tri_coords[:, 2]
tv1 = t1 - t0
tv2 = t2 - t0
cross = np.cross(tv1, tv2)
# get the average length of the two vectors and apply it to the cross product
sq = np.sqrt(np.einsum('ij,ij->i', cross, cross))
x1 = np.einsum('ij,ij->i', tv1, tv1)
x2 = np.einsum('ij,ij->i', tv2, tv2)
av_root = np.sqrt((x1 + x2) / 2)
cr_root = (cross / np.expand_dims(sq, axis=1)) * np.expand_dims(av_root, axis=1)
v1 = points - t0
v1_dots = np.einsum('ij,ij->i', cr_root, v1)
n_dots = np.einsum('ij,ij->i', cr_root, cr_root)
scale = np.nan_to_num(v1_dots / n_dots)
offset = cr_root * np.expand_dims(scale, axis=1)
drop = points - offset # The drop is used by the barycentric generator as points in the triangles
return drop, scale
示例14: decayCoefObjectiveFn
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan_to_num [as 别名]
def decayCoefObjectiveFn(x, Y, EX2):
"""
Computes the objective function for terms involving lambda in the M-step.
Checked.
Input:
x: value of lambda
Y: the matrix of observed values
EX2: the matrix of values of EX2 estimated in the E-step.
Returns:
obj: value of objective function
grad: gradient
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
y_squared = Y ** 2
Y_is_zero = np.abs(Y) < 1e-6
exp_Y_squared = np.exp(-x * y_squared)
log_exp_Y = np.nan_to_num(np.log(1 - exp_Y_squared))
exp_ratio = np.nan_to_num(exp_Y_squared / (1 - exp_Y_squared))
obj = sum(sum(Y_is_zero * (-EX2 * x) + (1 - Y_is_zero) * log_exp_Y))
grad = sum(sum(Y_is_zero * (-EX2) + (1 - Y_is_zero) * y_squared * exp_ratio))
if (type(obj) is not np.float64) or (type(grad) is not np.float64):
raise Exception("Unexpected behavior in optimizing decay coefficient lambda. Please contact emmap1@cs.stanford.edu.")
if type(obj) is np.float64:
obj = -np.array([obj])
if type(grad) is np.float64:
grad = -np.array([grad])
return obj, grad
示例15: forward
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan_to_num [as 别名]
def forward(y_hat, y):
y_hat = _cutoff(y_hat)
y = _cutoff(y)
return -np.mean(np.sum(np.nan_to_num(y * np.log(y_hat) + (1 - y) * np.log(1 - y_hat)), axis=1))