當前位置: 首頁>>代碼示例>>Python>>正文


Python numpy.ones_like方法代碼示例

本文整理匯總了Python中numpy.ones_like方法的典型用法代碼示例。如果您正苦於以下問題:Python numpy.ones_like方法的具體用法?Python numpy.ones_like怎麽用?Python numpy.ones_like使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在numpy的用法示例。


在下文中一共展示了numpy.ones_like方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: train_lr_rfeinman

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import ones_like [as 別名]
def train_lr_rfeinman(densities_pos, densities_neg, uncerts_pos, uncerts_neg):
    """
    TODO
    :param densities_pos:
    :param densities_neg:
    :param uncerts_pos:
    :param uncerts_neg:
    :return:
    """
    values_neg = np.concatenate(
        (densities_neg.reshape((1, -1)),
         uncerts_neg.reshape((1, -1))),
        axis=0).transpose([1, 0])
    values_pos = np.concatenate(
        (densities_pos.reshape((1, -1)),
         uncerts_pos.reshape((1, -1))),
        axis=0).transpose([1, 0])

    values = np.concatenate((values_neg, values_pos))
    labels = np.concatenate(
        (np.zeros_like(densities_neg), np.ones_like(densities_pos)))

    lr = LogisticRegressionCV(n_jobs=-1).fit(values, labels)

    return values, labels, lr 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:27,代碼來源:util.py

示例2: compute_roc_rfeinman

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import ones_like [as 別名]
def compute_roc_rfeinman(probs_neg, probs_pos, plot=False):
    """
    TODO
    :param probs_neg:
    :param probs_pos:
    :param plot:
    :return:
    """
    probs = np.concatenate((probs_neg, probs_pos))
    labels = np.concatenate((np.zeros_like(probs_neg), np.ones_like(probs_pos)))
    fpr, tpr, _ = roc_curve(labels, probs)
    auc_score = auc(fpr, tpr)
    if plot:
        plt.figure(figsize=(7, 6))
        plt.plot(fpr, tpr, color='blue',
                 label='ROC (AUC = %0.4f)' % auc_score)
        plt.legend(loc='lower right')
        plt.title("ROC Curve")
        plt.xlabel("FPR")
        plt.ylabel("TPR")
        plt.show()

    return fpr, tpr, auc_score 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:25,代碼來源:util.py

示例3: _transform_col

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import ones_like [as 別名]
def _transform_col(self, x, i):
        """Encode one numerical feature column to quantiles.

        Args:
            x (pandas.Series): numerical feature column to encode
            i (int): column index of the numerical feature

        Returns:
            Encoded feature (pandas.Series).
        """
        # Map values to the emperical CDF between .1% and 99.9%
        rv = np.ones_like(x) * -1

        filt = ~np.isnan(x)
        rv[filt] = np.floor((self.ecdfs[i](x[filt]) * 0.998 + .001) *
                            self.n_label)

        return rv 
開發者ID:jeongyoonlee,項目名稱:Kaggler,代碼行數:20,代碼來源:numerical.py

示例4: test_symmetry

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import ones_like [as 別名]
def test_symmetry(self):
        shape = (5, 5)
        X = np.arange(shape[0] * shape[1], dtype=float).reshape(*shape)

        # symmetry
        step = 0
        X_ = X.copy()
        constraint = scarlet.SymmetryConstraint()
        X_ = constraint(X_, step)
        new_X = np.ones_like(X) * 12
        assert_almost_equal(X_, new_X)

        # symmetry at half strength
        X_ = X.copy()
        constraint = scarlet.SymmetryConstraint(strength=0.5)
        X_ = constraint(X_, step)
        new_X = [
            [6.0, 6.5, 7.0, 7.5, 8.0],
            [8.5, 9.0, 9.5, 10.0, 10.5],
            [11.0, 11.5, 12.0, 12.5, 13.0],
            [13.5, 14.0, 14.5, 15.0, 15.5],
            [16.0, 16.5, 17.0, 17.5, 18.0],
        ]
        assert_almost_equal(X_, new_X) 
開發者ID:pmelchior,項目名稱:scarlet,代碼行數:26,代碼來源:test_constraint.py

示例5: mask_frozen_ip

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import ones_like [as 別名]
def mask_frozen_ip(eom, vector, kshift, const=LARGE_DENOM):
    '''Replaces all frozen orbital indices of `vector` with the value `const`.'''
    r1, r2 = eom.vector_to_amplitudes(vector, kshift=kshift)
    nkpts = eom.nkpts
    nocc, nmo = eom.nocc, eom.nmo
    kconserv = eom.kconserv

    # Get location of padded elements in occupied and virtual space
    nonzero_opadding, nonzero_vpadding = eom.nonzero_opadding, eom.nonzero_vpadding

    new_r1 = const * np.ones_like(r1)
    new_r2 = const * np.ones_like(r2)

    new_r1[nonzero_opadding[kshift]] = r1[nonzero_opadding[kshift]]
    for ki in range(nkpts):
        for kj in range(nkpts):
            kb = kconserv[ki, kshift, kj]
            idx = np.ix_([ki], [kj], nonzero_opadding[ki], nonzero_opadding[kj], nonzero_vpadding[kb])
            new_r2[idx] = r2[idx]

    return eom.amplitudes_to_vector(new_r1, new_r2, kshift, kconserv) 
開發者ID:pyscf,項目名稱:pyscf,代碼行數:23,代碼來源:eom_kccsd_ghf.py

示例6: mask_frozen_ea

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import ones_like [as 別名]
def mask_frozen_ea(eom, vector, kshift, const=LARGE_DENOM):
    '''Replaces all frozen orbital indices of `vector` with the value `const`.'''
    r1, r2 = eom.vector_to_amplitudes(vector, kshift=kshift)
    kconserv = eom.kconserv
    nkpts = eom.nkpts
    nocc, nmo = eom.nocc, eom.nmo

    # Get location of padded elements in occupied and virtual space
    nonzero_opadding, nonzero_vpadding = eom.nonzero_opadding, eom.nonzero_vpadding

    new_r1 = const * np.ones_like(r1)
    new_r2 = const * np.ones_like(r2)

    new_r1[nonzero_vpadding[kshift]] = r1[nonzero_vpadding[kshift]]
    for kj in range(nkpts):
        for ka in range(nkpts):
            kb = kconserv[kshift, ka, kj]
            idx = np.ix_([kj], [ka], nonzero_opadding[kj], nonzero_vpadding[ka], nonzero_vpadding[kb])
            new_r2[idx] = r2[idx]

    return eom.amplitudes_to_vector(new_r1, new_r2, kshift, kconserv) 
開發者ID:pyscf,項目名稱:pyscf,代碼行數:23,代碼來源:eom_kccsd_ghf.py

示例7: compute_gradient

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import ones_like [as 別名]
def compute_gradient(self, grad=None):
        ''' Compute and return the gradient for matrix multiplication.

        :param grad: The gradient of other operation wrt the matmul output.
        :type grad: number or a ndarray, default value is 1.0.
        '''
        # Get input values.
        x, y = [node.output_value for node in self.input_nodes]

        # Default gradient wrt the matmul output.
        if grad is None:
            grad = np.ones_like(self.output_value)

        # Gradients wrt inputs.
        dfdx = np.dot(grad, np.transpose(y))
        dfdy = np.dot(np.transpose(x), grad)

        return [dfdx, dfdy] 
開發者ID:PytLab,項目名稱:simpleflow,代碼行數:20,代碼來源:operations.py

示例8: separatePano

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import ones_like [as 別名]
def separatePano(panoImg, fov, x, y, imgSize=320):
    '''cut a panorama image into several separate views'''
    assert x.shape == y.shape
    if not isinstance(fov, np.ndarray):
        fov = fov * np.ones_like(x)

    sepScene = [
        {
            'img': imgLookAt(panoImg.copy(), xi, yi, imgSize, fovi),
            'vx': xi,
            'vy': yi,
            'fov': fovi,
            'sz': imgSize,
        }
        for xi, yi, fovi in zip(x, y, fov)
    ]

    return sepScene 
開發者ID:sunset1995,項目名稱:HorizonNet,代碼行數:20,代碼來源:pano_lsd_align.py

示例9: test_2_targets_field_component

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import ones_like [as 別名]
def test_2_targets_field_component(self, optimization_variables_avg):
        l, Q, A = optimization_variables_avg
        l2 = l[::-1]
        l = np.vstack([l ,l2])
        m = 2e-3
        m1 = 4e-3
        x = optimization_methods.optimize_field_component(l, max_el_current=m,
                                                          max_total_current=m1)

        l_avg = np.average(l, axis=0)
        x_sp = optimize_comp(l_avg, np.ones_like(l2), max_el_current=m, max_total_current=m1)

        assert np.linalg.norm(x, 1) <= 2 * m1 + 1e-4
        assert np.all(np.abs(x) <= m + 1e-6)
        assert np.isclose(l_avg.dot(x), l_avg.dot(x_sp),
                          rtol=1e-4, atol=1e-4)
        assert np.isclose(np.sum(x), 0) 
開發者ID:simnibs,項目名稱:simnibs,代碼行數:19,代碼來源:test_optimization_methods.py

示例10: linkage_calculation

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import ones_like [as 別名]
def linkage_calculation(self, dist, labels, penalty): 
        cluster_num = len(self.label_to_images.keys())
        start_index = np.zeros(cluster_num,dtype=np.int)
        end_index = np.zeros(cluster_num,dtype=np.int)
        counts=0
        i=0
        for key in sorted(self.label_to_images.keys()):
            start_index[i] = counts
            end_index[i] = counts + len(self.label_to_images[key])
            counts = end_index[i]
            i=i+1
        dist=dist.numpy()
        linkages = np.zeros([cluster_num, cluster_num])
        for i in range(cluster_num):
            for j in range(i, cluster_num):
                linkage = dist[start_index[i]:end_index[i], start_index[j]:end_index[j]]
                linkages[i,j] = np.average(linkage)



        linkages = linkages.T + linkages - linkages * np.eye(cluster_num)
        intra = linkages.diagonal()
        penalized_linkages = linkages + penalty * ((intra * np.ones_like(linkages)).T + intra).T
        return linkages, penalized_linkages 
開發者ID:gddingcs,項目名稱:Dispersion-based-Clustering,代碼行數:26,代碼來源:bottom_up.py

示例11: tforward

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import ones_like [as 別名]
def tforward(self, disp0, im, std=None):
    self.pattern = self.pattern.to(disp0.device)
    self.uv0 = self.uv0.to(disp0.device)

    uv0 = self.uv0.expand(disp0.shape[0], *self.uv0.shape[1:])
    uv1 = torch.empty_like(uv0)
    uv1[...,0] = uv0[...,0] - disp0.contiguous().view(disp0.shape[0],-1)
    uv1[...,1] = uv0[...,1]

    uv1[..., 0] = 2 * (uv1[..., 0] / (self.im_width-1) - 0.5)
    uv1[..., 1] = 2 * (uv1[..., 1] / (self.im_height-1) - 0.5)
    uv1 = uv1.view(-1, self.im_height, self.im_width, 2).clone()
    pattern = self.pattern.expand(disp0.shape[0], *self.pattern.shape[1:])
    pattern_proj = torch.nn.functional.grid_sample(pattern, uv1, padding_mode='border')
    mask = torch.ones_like(im)
    if std is not None:
      mask = mask*std

    diff = torchext.photometric_loss(pattern_proj.contiguous(), im.contiguous(), 9, self.loss_type, self.loss_eps)
    val = (mask*diff).sum() / mask.sum()
    return val, pattern_proj 
開發者ID:autonomousvision,項目名稱:connecting_the_dots,代碼行數:23,代碼來源:networks.py

示例12: masking_data

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import ones_like [as 別名]
def masking_data(request):
    # Two years, 8 day repeat
    x = np.arange(735851, 735851 + 365 * 2, 8)

    # Simulate some timeseries in green & swir1
    def seasonality(x, amp):
        return np.cos(2 * np.pi / 365.25 * x) * amp
    green = np.ones_like(x) * 1000 + seasonality(x, 750)
    swir1 = np.ones_like(x) * 1250 + seasonality(x, 500)
    Y = np.vstack((green, swir1))

    # Add in some noise
    idx_green_noise = 15
    idx_swir1_noise = 30

    Y[0, idx_green_noise] = 8000
    Y[1, idx_swir1_noise] = 10

    return x, Y, np.array([idx_green_noise, idx_swir1_noise]) 
開發者ID:ceholden,項目名稱:yatsm,代碼行數:21,代碼來源:test_masking.py

示例13: im_detect

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import ones_like [as 別名]
def im_detect(rois, scores, bbox_deltas, im_info,
              bbox_stds, nms_thresh, conf_thresh):
    """rois (nroi, 4), scores (nrois, nclasses), bbox_deltas (nrois, 4 * nclasses), im_info (3)"""
    rois = rois.asnumpy()
    scores = scores.asnumpy()
    bbox_deltas = bbox_deltas.asnumpy()

    im_info = im_info.asnumpy()
    height, width, scale = im_info

    # post processing
    pred_boxes = bbox_pred(rois, bbox_deltas, bbox_stds)
    pred_boxes = clip_boxes(pred_boxes, (height, width))

    # we used scaled image & roi to train, so it is necessary to transform them back
    pred_boxes = pred_boxes / scale

    # convert to per class detection results
    det = []
    for j in range(1, scores.shape[-1]):
        indexes = np.where(scores[:, j] > conf_thresh)[0]
        cls_scores = scores[indexes, j, np.newaxis]
        cls_boxes = pred_boxes[indexes, j * 4:(j + 1) * 4]
        cls_dets = np.hstack((cls_boxes, cls_scores))
        keep = nms(cls_dets, thresh=nms_thresh)

        cls_id = np.ones_like(cls_scores) * j
        det.append(np.hstack((cls_id, cls_scores, cls_boxes))[keep, :])

    # assemble all classes
    det = np.concatenate(det, axis=0)
    return det 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:34,代碼來源:bbox.py

示例14: _debug_save_hardness

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import ones_like [as 別名]
def _debug_save_hardness(self, seed):
    out_path = os.path.join(self.logdir, '{:s}_{:d}_hardness.png'.format(self.building_name, seed))
    batch_size = 4000
    rng = np.random.RandomState(0)
    start_node_ids, end_node_ids, dists, pred_maps, paths, hardnesss, gt_dists = \
      rng_next_goal_rejection_sampling(
          None, batch_size, self.task.gtG, rng, self.task_params.max_dist,
          self.task_params.min_dist, self.task_params.max_dist,
          self.task.sampling_distribution, self.task.target_distribution,
          self.task.nodes, self.task_params.n_ori, self.task_params.step_size,
          self.task.distribution_bins, self.task.rejection_sampling_M)
    bins = self.task.distribution_bins 
    n_bins = self.task.n_bins
    with plt.style.context('ggplot'):
      fig, axes = utils.subplot(plt, (1,2), (10,10))
      ax = axes[0]
      _ = ax.hist(hardnesss, bins=bins, weights=np.ones_like(hardnesss)/len(hardnesss))
      ax.plot(bins[:-1]+0.5/n_bins, self.task.target_distribution, 'g')
      ax.plot(bins[:-1]+0.5/n_bins, self.task.sampling_distribution, 'b')
      ax.grid('on')
      
      ax = axes[1]
      _ = ax.hist(gt_dists, bins=np.arange(self.task_params.max_dist+1))
      ax.grid('on')
      ax.set_title('Mean: {:0.2f}, Median: {:0.2f}'.format(np.mean(gt_dists),
                                                           np.median(gt_dists)))
      with fu.fopen(out_path, 'w') as f:
        fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:30,代碼來源:nav_env.py

示例15: draw_mask_on_image_array

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import ones_like [as 別名]
def draw_mask_on_image_array(image, mask, color='red', alpha=0.7):
  """Draws mask on an image.

  Args:
    image: uint8 numpy array with shape (img_height, img_height, 3)
    mask: a float numpy array of shape (img_height, img_height) with
      values between 0 and 1
    color: color to draw the keypoints with. Default is red.
    alpha: transparency value between 0 and 1. (default: 0.7)

  Raises:
    ValueError: On incorrect data type for image or masks.
  """
  if image.dtype != np.uint8:
    raise ValueError('`image` not of type np.uint8')
  if mask.dtype != np.float32:
    raise ValueError('`mask` not of type np.float32')
  if np.any(np.logical_or(mask > 1.0, mask < 0.0)):
    raise ValueError('`mask` elements should be in [0, 1]')
  rgb = ImageColor.getrgb(color)
  pil_image = Image.fromarray(image)

  solid_color = np.expand_dims(
      np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])
  pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA')
  pil_mask = Image.fromarray(np.uint8(255.0*alpha*mask)).convert('L')
  pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)
  np.copyto(image, np.array(pil_image.convert('RGB'))) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:30,代碼來源:visualization_utils.py


注:本文中的numpy.ones_like方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。