当前位置: 首页>>代码示例>>Python>>正文


Python numpy.nan方法代码示例

本文整理汇总了Python中numpy.nan方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.nan方法的具体用法?Python numpy.nan怎么用?Python numpy.nan使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在numpy的用法示例。


在下文中一共展示了numpy.nan方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: trix

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan [as 别名]
def trix(df, n):
    """Calculate TRIX for given data.
    
    :param df: pandas.DataFrame
    :param n: 
    :return: pandas.DataFrame
    """
    EX1 = df['Close'].ewm(span=n, min_periods=n).mean()
    EX2 = EX1.ewm(span=n, min_periods=n).mean()
    EX3 = EX2.ewm(span=n, min_periods=n).mean()
    i = 0
    ROC_l = [np.nan]
    while i + 1 <= df.index[-1]:
        ROC = (EX3[i + 1] - EX3[i]) / EX3[i]
        ROC_l.append(ROC)
        i = i + 1
    Trix = pd.Series(ROC_l, name='Trix_' + str(n))
    df = df.join(Trix)
    return df 
开发者ID:Crypto-toolbox,项目名称:pandas-technical-indicators,代码行数:21,代码来源:technical_indicators.py

示例2: query

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan [as 别名]
def query(self, coords, order=1):
        """
        Returns the map value at the specified location(s) on the sky.

        Args:
            coords (`astropy.coordinates.SkyCoord`): The coordinates to query.
            order (Optional[int]): Interpolation order to use. Defaults to `1`,
                for linear interpolation.

        Returns:
            A float array containing the map value at every input coordinate.
            The shape of the output will be the same as the shape of the
            coordinates stored by `coords`.
        """
        out = np.full(len(coords.l.deg), np.nan, dtype='f4')

        for pole in self.poles:
            m = (coords.b.deg >= 0) if pole == 'ngp' else (coords.b.deg < 0)

            if np.any(m):
                data, w = self._data[pole]
                x, y = w.wcs_world2pix(coords.l.deg[m], coords.b.deg[m], 0)
                out[m] = map_coordinates(data, [y, x], order=order, mode='nearest')

        return out 
开发者ID:gregreen,项目名称:dustmaps,代码行数:27,代码来源:sfd.py

示例3: test_select_confounds_error

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan [as 别名]
def test_select_confounds_error(confounds_file, tmp_path):
    import pandas as pd
    import numpy as np

    confounds_df = pd.read_csv(str(confounds_file), sep='\t', na_values='n/a')

    confounds_df['white_matter'][0] = np.nan

    conf_file = tmp_path / "confounds.tsv"

    confounds_df.to_csv(str(conf_file), index=False, sep='\t', na_rep='n/a')

    with pytest.raises(ValueError) as val_err:
        _select_confounds(str(conf_file), ['white_matter', 'csf'])

    assert "The selected confounds contain nans" in str(val_err.value) 
开发者ID:HBClab,项目名称:NiBetaSeries,代码行数:18,代码来源:test_nistats.py

示例4: wnba_parse_foul

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan [as 别名]
def wnba_parse_foul(row):
    """
    function to determine what type of foul is being commited by the player

    Input:
    row - row of nba play by play

    Output:
    foul_type - the foul type of the fould commited by the player
    """

    try:
        if row["etype"] == 6:
            try:
                return foul_dict[row["mtype"]]
            except KeyError:
                return np.nan
        return np.nan
    except KeyError:
        return np.nan 
开发者ID:mcbarlowe,项目名称:nba_scraper,代码行数:22,代码来源:stat_calc_functions.py

示例5: parse_foul

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan [as 别名]
def parse_foul(row):
    """
    function to determine what type of foul is being commited by the player

    Input:
    row - row of nba play by play

    Output:
    foul_type - the foul type of the fould commited by the player
    """

    try:
        if row["eventmsgtype"] == 6:
            try:
                return foul_dict[row["eventmsgactiontype"]]
            except KeyError:
                return np.nan
        return np.nan
    except KeyError:
        return np.nan 
开发者ID:mcbarlowe,项目名称:nba_scraper,代码行数:22,代码来源:stat_calc_functions.py

示例6: wnba_shot_types

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan [as 别名]
def wnba_shot_types(row):
    """
    function to parse what type of shot is being taken

    Inputs:
    row - pandas row of play by play dataframe

    Outputs:
    shot_type - returns a shot type of the values hook, jump, layup, dunk, tip
    """
    try:
        if row["etype"] in [1, 2, 3]:
            return SHOT_DICT[row["etype"]][row["mtype"]]
        else:
            return np.nan
    except KeyError:
        return np.nan 
开发者ID:mcbarlowe,项目名称:nba_scraper,代码行数:19,代码来源:stat_calc_functions.py

示例7: parse_shot_types

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan [as 别名]
def parse_shot_types(row):
    """
    function to parse what type of shot is being taken

    Inputs:
    row - pandas row of play by play dataframe

    Outputs:
    shot_type - returns a shot type of the values hook, jump, layup, dunk, tip
    """
    try:
        if row["eventmsgtype"] in [1, 2, 3]:
            return SHOT_DICT[row["eventmsgtype"]][row["eventmsgactiontype"]]
        else:
            return np.nan
    except KeyError:
        return np.nan 
开发者ID:mcbarlowe,项目名称:nba_scraper,代码行数:19,代码来源:stat_calc_functions.py

示例8: apply_cmap

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan [as 别名]
def apply_cmap(zs, cmap, vmin=None, vmax=None, unit=None, logrescale=False):
    '''
    apply_cmap(z, cmap) applies the given cmap to the values in z; if vmin and/or vmax are passed,
      they are used to scale z.

    Note that this function can automatically rescale data into log-space if the colormap is a
    neuropythy log-space colormap such as log_eccentricity. To enable this behaviour use the
    optional argument logrescale=True.
    '''
    zs = pimms.mag(zs) if unit is None else pimms.mag(zs, unit)
    zs = np.asarray(zs, dtype='float')
    if pimms.is_str(cmap): cmap = matplotlib.cm.get_cmap(cmap)
    if logrescale:
        if vmin is None: vmin = np.log(np.nanmin(zs))
        if vmax is None: vmax = np.log(np.nanmax(zs))
        mn = np.exp(vmin)
        u = zdivide(nanlog(zs + mn) - vmin, vmax - vmin, null=np.nan)
    else:        
        if vmin is None: vmin = np.nanmin(zs)
        if vmax is None: vmax = np.nanmax(zs)
        u = zdivide(zs - vmin, vmax - vmin, null=np.nan)
    u[np.isnan(u)] = -np.inf
    return cmap(u) 
开发者ID:noahbenson,项目名称:neuropythy,代码行数:25,代码来源:core.py

示例9: compute_cor_loc

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan [as 别名]
def compute_cor_loc(num_gt_imgs_per_class,
                    num_images_correctly_detected_per_class):
  """Compute CorLoc according to the definition in the following paper.

  https://www.robots.ox.ac.uk/~vgg/rg/papers/deselaers-eccv10.pdf

  Returns nans if there are no ground truth images for a class.

  Args:
    num_gt_imgs_per_class: 1D array, representing number of images containing
        at least one object instance of a particular class
    num_images_correctly_detected_per_class: 1D array, representing number of
        images that are correctly detected at least one object instance of a
        particular class

  Returns:
    corloc_per_class: A float numpy array represents the corloc score of each
      class
  """
  return np.where(
      num_gt_imgs_per_class == 0,
      np.nan,
      num_images_correctly_detected_per_class / num_gt_imgs_per_class) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:25,代码来源:metrics.py

示例10: __init__

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan [as 别名]
def __init__(self,
               num_groundtruth_classes,
               matching_iou_threshold=0.5,
               nms_iou_threshold=1.0,
               nms_max_output_boxes=10000):
    self.per_image_eval = per_image_evaluation.PerImageEvaluation(
        num_groundtruth_classes, matching_iou_threshold, nms_iou_threshold,
        nms_max_output_boxes)
    self.num_class = num_groundtruth_classes

    self.groundtruth_boxes = {}
    self.groundtruth_class_labels = {}
    self.groundtruth_is_difficult_list = {}
    self.num_gt_instances_per_class = np.zeros(self.num_class, dtype=int)
    self.num_gt_imgs_per_class = np.zeros(self.num_class, dtype=int)

    self.detection_keys = set()
    self.scores_per_class = [[] for _ in range(self.num_class)]
    self.tp_fp_labels_per_class = [[] for _ in range(self.num_class)]
    self.num_images_correctly_detected_per_class = np.zeros(self.num_class)
    self.average_precision_per_class = np.empty(self.num_class, dtype=float)
    self.average_precision_per_class.fill(np.nan)
    self.precisions_per_class = []
    self.recalls_per_class = []
    self.corloc_per_class = np.ones(self.num_class, dtype=float) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:27,代码来源:object_detection_evaluation.py

示例11: testReturnsCorrectNanLoss

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan [as 别名]
def testReturnsCorrectNanLoss(self):
    batch_size = 3
    num_anchors = 10
    code_size = 4
    prediction_tensor = tf.ones([batch_size, num_anchors, code_size])
    target_tensor = tf.concat([
        tf.zeros([batch_size, num_anchors, code_size / 2]),
        tf.ones([batch_size, num_anchors, code_size / 2]) * np.nan
    ], axis=2)
    weights = tf.ones([batch_size, num_anchors])
    loss_op = losses.WeightedL2LocalizationLoss()
    loss = loss_op(prediction_tensor, target_tensor, weights=weights,
                   ignore_nan_targets=True)

    expected_loss = (3 * 5 * 4) / 2.0
    with self.test_session() as sess:
      loss_output = sess.run(loss)
      self.assertAllClose(loss_output, expected_loss) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:20,代码来源:losses_test.py

示例12: plot_results

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan [as 别名]
def plot_results(start=0, stop=0):  # from utils.utils import *; plot_results()
    # Plot training results files 'results*.txt'
    fig, ax = plt.subplots(2, 5, figsize=(14, 7))
    ax = ax.ravel()
    s = ['GIoU', 'Objectness', 'Classification', 'Precision', 'Recall',
         'val GIoU', 'val Objectness', 'val Classification', 'mAP', 'F1']
    for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
        results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
        n = results.shape[1]  # number of rows
        x = range(start, min(stop, n) if stop else n)
        for i in range(10):
            y = results[i, x]
            if i in [0, 1, 2, 5, 6, 7]:
                y[y == 0] = np.nan  # dont show zero loss values
            ax[i].plot(x, y, marker='.', label=f.replace('.txt', ''))
            ax[i].set_title(s[i])
            if i in [5, 6, 7]:  # share train and val loss y axes
                ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])

    fig.tight_layout()
    ax[1].legend()
    fig.savefig('results.png', dpi=200) 
开发者ID:zbyuan,项目名称:pruning_yolov3,代码行数:24,代码来源:utils.py

示例13: plot_results_overlay

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan [as 别名]
def plot_results_overlay(start=0, stop=0):  # from utils.utils import *; plot_results_overlay()
    # Plot training results files 'results*.txt', overlaying train and val losses
    s = ['train', 'train', 'train', 'Precision', 'mAP', 'val', 'val', 'val', 'Recall', 'F1']  # legends
    t = ['GIoU', 'Objectness', 'Classification', 'P-R', 'mAP-F1']  # titles
    for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
        results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
        n = results.shape[1]  # number of rows
        x = range(start, min(stop, n) if stop else n)
        fig, ax = plt.subplots(1, 5, figsize=(14, 3.5))
        ax = ax.ravel()
        for i in range(5):
            for j in [i, i + 5]:
                y = results[j, x]
                if i in [0, 1, 2]:
                    y[y == 0] = np.nan  # dont show zero loss values
                ax[i].plot(x, y, marker='.', label=s[j])
            ax[i].set_title(t[i])
            ax[i].legend()
            ax[i].set_ylabel(f) if i == 0 else None  # add filename
        fig.tight_layout()
        fig.savefig(f.replace('.txt', '.png'), dpi=200) 
开发者ID:zbyuan,项目名称:pruning_yolov3,代码行数:23,代码来源:utils.py

示例14: test_linear_sum_assignment_input_validation

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan [as 别名]
def test_linear_sum_assignment_input_validation():
    assert_raises(ValueError, linear_sum_assignment, [1, 2, 3])

    C = [[1, 2, 3], [4, 5, 6]]
    assert_array_equal(linear_sum_assignment(C), linear_sum_assignment(np.asarray(C)))
    # assert_array_equal(linear_sum_assignment(C),
    #                    linear_sum_assignment(matrix(C)))

    I = np.identity(3)
    assert_array_equal(linear_sum_assignment(I.astype(np.bool)), linear_sum_assignment(I))
    assert_raises(ValueError, linear_sum_assignment, I.astype(str))

    I[0][0] = np.nan
    assert_raises(ValueError, linear_sum_assignment, I)

    I = np.identity(3)
    I[1][1] = np.inf
    assert_raises(ValueError, linear_sum_assignment, I) 
开发者ID:MolSSI,项目名称:QCElemental,代码行数:20,代码来源:test_scipy_hungarian.py

示例15: OutlierDetection

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import nan [as 别名]
def OutlierDetection(CMat, s):
    n = np.amax(s)
    _, N = CMat.shape
    OutlierIndx = list()
    FailCnt = 0
    Fail = False

    for i in range(0, N):
        c = CMat[:, i]
        if np.sum(np.isnan(c)) >= 1:
            OutlierIndx.append(i)
            FailCnt += 1
    sc = s.astype(float)
    sc[OutlierIndx] = np.nan
    CMatC = CMat.astype(float)
    CMatC[OutlierIndx, :] = np.nan
    CMatC[:, OutlierIndx] = np.nan
    OutlierIndx = OutlierIndx

    if FailCnt > (N - n):
        CMatC = np.nan
        sc = np.nan
        Fail = True
    return CMatC, sc, OutlierIndx, Fail 
开发者ID:abhinav4192,项目名称:sparse-subspace-clustering-python,代码行数:26,代码来源:OutlierDetection.py


注:本文中的numpy.nan方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。