當前位置: 首頁>>代碼示例>>Python>>正文


Python numpy.int32方法代碼示例

本文整理匯總了Python中numpy.int32方法的典型用法代碼示例。如果您正苦於以下問題:Python numpy.int32方法的具體用法?Python numpy.int32怎麽用?Python numpy.int32使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在numpy的用法示例。


在下文中一共展示了numpy.int32方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: in_top_k

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import int32 [as 別名]
def in_top_k(predictions, targets, k):
    '''Returns whether the `targets` are in the top `k` `predictions`

    # Arguments
        predictions: A tensor of shape batch_size x classess and type float32.
        targets: A tensor of shape batch_size and type int32 or int64.
        k: An int, number of top elements to consider.

    # Returns
        A tensor of shape batch_size and type int. output_i is 1 if
        targets_i is within top-k values of predictions_i
    '''
    predictions_top_k = T.argsort(predictions)[:, -k:]
    result, _ = theano.map(lambda prediction, target: any(equal(prediction, target)), sequences=[predictions_top_k, targets])
    return result


# CONVOLUTIONS 
開發者ID:lingluodlut,項目名稱:Att-ChemdNER,代碼行數:20,代碼來源:theano_backend.py

示例2: draw_heatmap

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import int32 [as 別名]
def draw_heatmap(img, heatmap, alpha=0.5):
    """Draw a heatmap overlay over an image."""
    assert len(heatmap.shape) == 2 or \
        (len(heatmap.shape) == 3 and heatmap.shape[2] == 1)
    assert img.dtype in [np.uint8, np.int32, np.int64]
    assert heatmap.dtype in [np.float32, np.float64]

    if img.shape[0:2] != heatmap.shape[0:2]:
        heatmap_rs = np.clip(heatmap * 255, 0, 255).astype(np.uint8)
        heatmap_rs = ia.imresize_single_image(
            heatmap_rs[..., np.newaxis],
            img.shape[0:2],
            interpolation="nearest"
        )
        heatmap = np.squeeze(heatmap_rs) / 255.0

    cmap = plt.get_cmap('jet')
    heatmap_cmapped = cmap(heatmap)
    heatmap_cmapped = np.delete(heatmap_cmapped, 3, 2)
    heatmap_cmapped = heatmap_cmapped * 255
    mix = (1-alpha) * img + alpha * heatmap_cmapped
    mix = np.clip(mix, 0, 255).astype(np.uint8)
    return mix 
開發者ID:aleju,項目名稱:cat-bbs,代碼行數:25,代碼來源:common.py

示例3: load_keypoints

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import int32 [as 別名]
def load_keypoints(image_filepath, image_height, image_width):
    """Load facial keypoints of one image."""
    fp_keypoints = "%s.cat" % (image_filepath,)
    if not os.path.isfile(fp_keypoints):
        raise Exception("Could not find keypoint coordinates for image '%s'." \
                        % (image_filepath,))
    else:
        coords_raw = open(fp_keypoints, "r").readlines()[0].strip().split(" ")
        coords_raw = [abs(int(coord)) for coord in coords_raw]
        keypoints = []
        #keypoints_arr = np.zeros((9*2,), dtype=np.int32)
        for i in range(1, len(coords_raw), 2): # first element is the number of coords
            x = np.clip(coords_raw[i], 0, image_width-1)
            y = np.clip(coords_raw[i+1], 0, image_height-1)
            keypoints.append((x, y))

        return keypoints 
開發者ID:aleju,項目名稱:cat-bbs,代碼行數:19,代碼來源:create_dataset.py

示例4: ctc_path_probs

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import int32 [as 別名]
def ctc_path_probs(predict, Y, alpha=1e-4):
    smoothed_predict = (1 - alpha) * predict[:, Y] + alpha * np.float32(1.) / Y.shape[0]
    L = T.log(smoothed_predict)
    zeros = T.zeros_like(L[0])
    log_first = zeros

    f_skip_idxs = ctc_create_skip_idxs(Y)
    b_skip_idxs = ctc_create_skip_idxs(Y[::-1])  # there should be a shortcut to calculating this

    def step(log_f_curr, log_b_curr, f_active, log_f_prev, b_active, log_b_prev):
        f_active_next, log_f_next = ctc_update_log_p(f_skip_idxs, zeros, f_active, log_f_curr, log_f_prev)
        b_active_next, log_b_next = ctc_update_log_p(b_skip_idxs, zeros, b_active, log_b_curr, log_b_prev)
        return f_active_next, log_f_next, b_active_next, log_b_next

    [f_active, log_f_probs, b_active, log_b_probs], _ = theano.scan(
        step, sequences=[L, L[::-1, ::-1]], outputs_info=[np.int32(1), log_first, np.int32(1), log_first])

    idxs = T.arange(L.shape[1]).dimshuffle('x', 0)
    mask = (idxs < f_active.dimshuffle(0, 'x')) & (idxs < b_active.dimshuffle(0, 'x'))[::-1, ::-1]
    log_probs = log_f_probs + log_b_probs[::-1, ::-1] - L
    return log_probs, mask 
開發者ID:lingluodlut,項目名稱:Att-ChemdNER,代碼行數:23,代碼來源:theano_backend.py

示例5: create_roidb_from_box_list

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import int32 [as 別名]
def create_roidb_from_box_list(self, box_list, gt_roidb):
    assert len(box_list) == self.num_images, \
            'Number of boxes must match number of ground-truth images'
    roidb = []

    if gt_roidb is not None:
        for i in range(self.num_images):
            boxes = box_list[i]

            real_label = gt_roidb[i]['labels']

            roidb.append({'boxes' : boxes,
                          'labels' : np.array([real_label], dtype=np.int32),
                          'flipped' : False})
    else:
        for i in range(self.num_images):
            boxes = box_list[i]

            roidb.append({'boxes' : boxes,
                          'labels' : np.zeros((1, 0), dtype=np.int32),
                          'flipped' : False})

    return roidb 
開發者ID:Sunarker,項目名稱:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代碼行數:25,代碼來源:imdb.py

示例6: generate_anchors_pre

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import int32 [as 別名]
def generate_anchors_pre(height, width, feat_stride, anchor_scales=(8,16,32), anchor_ratios=(0.5,1,2)):
  """ A wrapper function to generate anchors given different scales
    Also return the number of anchors in variable 'length'
  """
  anchors = generate_anchors(ratios=np.array(anchor_ratios), scales=np.array(anchor_scales))
  A = anchors.shape[0]
  shift_x = np.arange(0, width) * feat_stride
  shift_y = np.arange(0, height) * feat_stride
  shift_x, shift_y = np.meshgrid(shift_x, shift_y)
  shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose()
  K = shifts.shape[0]
  # width changes faster, so here it is H, W, C
  anchors = anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2))
  anchors = anchors.reshape((K * A, 4)).astype(np.float32, copy=False)
  length = np.int32(anchors.shape[0])

  return anchors, length 
開發者ID:Sunarker,項目名稱:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代碼行數:19,代碼來源:snippets.py

示例7: assemble_batch

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import int32 [as 別名]
def assemble_batch(story_fns, num_answer_words, format_spec):
    stories = []
    for sfn in story_fns:
        with gzip.open(sfn,'rb') as f:
            cvtd_story, _, _, _ = pickle.load(f)
        stories.append(cvtd_story)
    sents, graphs, queries, answers = zip(*stories)
    cvtd_sents = np.array(sents, np.int32)
    cvtd_queries = np.array(queries, np.int32)
    max_ans_len = max(len(a) for a in answers)
    cvtd_answers = np.stack([convert_answer(answer, num_answer_words, format_spec, max_ans_len) for answer in answers])
    num_new_nodes, new_node_strengths, new_node_ids, next_edges = zip(*graphs)
    num_new_nodes = np.stack(num_new_nodes)
    new_node_strengths = np.stack(new_node_strengths)
    new_node_ids = np.stack(new_node_ids)
    next_edges = np.stack(next_edges)
    return cvtd_sents, cvtd_queries, cvtd_answers, num_new_nodes, new_node_strengths, new_node_ids, next_edges 
開發者ID:hexahedria,項目名稱:gated-graph-transformer-network,代碼行數:19,代碼來源:ggtnn_train.py

示例8: create_cifar100

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import int32 [as 別名]
def create_cifar100(tfrecord_dir, cifar100_dir):
    print('Loading CIFAR-100 from "%s"' % cifar100_dir)
    import pickle
    with open(os.path.join(cifar100_dir, 'train'), 'rb') as file:
        data = pickle.load(file, encoding='latin1')
    images = data['data'].reshape(-1, 3, 32, 32)
    labels = np.array(data['fine_labels'])
    assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8
    assert labels.shape == (50000,) and labels.dtype == np.int32
    assert np.min(images) == 0 and np.max(images) == 255
    assert np.min(labels) == 0 and np.max(labels) == 99
    onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
    onehot[np.arange(labels.size), labels] = 1.0

    with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
        order = tfr.choose_shuffled_order()
        for idx in range(order.size):
            tfr.add_image(images[order[idx]])
        tfr.add_labels(onehot[order])

#---------------------------------------------------------------------------- 
開發者ID:zalandoresearch,項目名稱:disentangling_conditional_gans,代碼行數:23,代碼來源:dataset_tool.py

示例9: __init__

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import int32 [as 別名]
def __init__(self, resolution=1024, num_channels=3, dtype='uint8', dynamic_range=[0,255], label_size=0, label_dtype='float32'):
        self.resolution         = resolution
        self.resolution_log2    = int(np.log2(resolution))
        self.shape              = [num_channels, resolution, resolution]
        self.dtype              = dtype
        self.dynamic_range      = dynamic_range
        self.label_size         = label_size
        self.label_dtype        = label_dtype
        self._tf_minibatch_var  = None
        self._tf_lod_var        = None
        self._tf_minibatch_np   = None
        self._tf_labels_np      = None

        assert self.resolution == 2 ** self.resolution_log2
        with tf.name_scope('Dataset'):
            self._tf_minibatch_var = tf.Variable(np.int32(0), name='minibatch_var')
            self._tf_lod_var = tf.Variable(np.int32(0), name='lod_var') 
開發者ID:zalandoresearch,項目名稱:disentangling_conditional_gans,代碼行數:19,代碼來源:dataset.py

示例10: _get_area_ratio

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import int32 [as 別名]
def _get_area_ratio(self, pos_proposals, pos_assigned_gt_inds, gt_masks):
        """Compute area ratio of the gt mask inside the proposal and the gt
        mask of the corresponding instance."""
        num_pos = pos_proposals.size(0)
        if num_pos > 0:
            area_ratios = []
            proposals_np = pos_proposals.cpu().numpy()
            pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy()
            # compute mask areas of gt instances (batch processing for speedup)
            gt_instance_mask_area = gt_masks.areas
            for i in range(num_pos):
                gt_mask = gt_masks[pos_assigned_gt_inds[i]]

                # crop the gt mask inside the proposal
                bbox = proposals_np[i, :].astype(np.int32)
                gt_mask_in_proposal = gt_mask.crop(bbox)

                ratio = gt_mask_in_proposal.areas[0] / (
                    gt_instance_mask_area[pos_assigned_gt_inds[i]] + 1e-7)
                area_ratios.append(ratio)
            area_ratios = torch.from_numpy(np.stack(area_ratios)).float().to(
                pos_proposals.device)
        else:
            area_ratios = pos_proposals.new_zeros((0, ))
        return area_ratios 
開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:27,代碼來源:maskiou_head.py

示例11: _load_dataset_clipping

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import int32 [as 別名]
def _load_dataset_clipping(self, dataset_dir, epsilon):
    """Helper method which loads dataset and determines clipping range.

    Args:
      dataset_dir: location of the dataset.
      epsilon: maximum allowed size of adversarial perturbation.
    """
    self.dataset_max_clip = {}
    self.dataset_min_clip = {}
    self._dataset_image_count = 0
    for fname in os.listdir(dataset_dir):
      if not fname.endswith('.png'):
        continue
      image_id = fname[:-4]
      image = np.array(
          Image.open(os.path.join(dataset_dir, fname)).convert('RGB'))
      image = image.astype('int32')
      self._dataset_image_count += 1
      self.dataset_max_clip[image_id] = np.clip(image + epsilon,
                                                0,
                                                255).astype('uint8')
      self.dataset_min_clip[image_id] = np.clip(image - epsilon,
                                                0,
                                                255).astype('uint8') 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:26,代碼來源:run_attacks_and_defenses.py

示例12: load_images

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import int32 [as 別名]
def load_images(input_dir, metadata_file_path, batch_shape):
    """Retrieve numpy arrays of images and labels, read from a directory."""
    num_images = batch_shape[0]
    with open(metadata_file_path) as input_file:
        reader = csv.reader(input_file)
        header_row = next(reader)
        rows = list(reader)

    row_idx_image_id = header_row.index('ImageId')
    row_idx_true_label = header_row.index('TrueLabel')
    images = np.zeros(batch_shape)
    labels = np.zeros(num_images, dtype=np.int32)
    for idx in xrange(num_images):
        row = rows[idx]
        filepath = os.path.join(input_dir, row[row_idx_image_id] + '.png')

        with tf.gfile.Open(filepath, 'rb') as f:
            image = np.array(
                Image.open(f).convert('RGB')).astype(np.float) / 255.0
        images[idx, :, :, :] = image
        labels[idx] = int(row[row_idx_true_label])
    return images, labels 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:24,代碼來源:test_imagenet_attacks.py

示例13: read_from_tfrecord

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import int32 [as 別名]
def read_from_tfrecord(filenames):
    tfrecord_file_queue = tf.train.string_input_producer(filenames, name='queue')
    reader = tf.TFRecordReader()
    _, tfrecord_serialized = reader.read(tfrecord_file_queue)

    tfrecord_features = tf.parse_single_example(tfrecord_serialized, features={
        'label': tf.FixedLenFeature([],tf.int64),
        'shape': tf.FixedLenFeature([],tf.string),
        'image': tf.FixedLenFeature([],tf.string),
    }, name='features')

    image = tf.decode_raw(tfrecord_features['image'], tf.uint8)
    shape = tf.decode_raw(tfrecord_features['shape'], tf.int32)

    image = tf.reshape(image, shape)
    label = tfrecord_features['label']
    return label, shape, image 
開發者ID:wdxtub,項目名稱:deep-learning-note,代碼行數:19,代碼來源:18_basic_tfrecord.py

示例14: batch_gen

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import int32 [as 別名]
def batch_gen(download_url, expected_byte, vocab_size, batch_size,
              skip_window, visual_fld):
    local_dest = 'data/w2v/text8.zip'
    utils.download_one_file(download_url, local_dest, expected_byte)
    words = read_data(local_dest)
    dictionary, _ = build_vocab(words, vocab_size, visual_fld)
    index_words = convert_words_to_index(words, dictionary)
    del words  # to save memory
    single_gen = generate_sample(index_words, skip_window)

    while True:
        center_batch = np.zeros(batch_size, dtype=np.int32)
        target_batch = np.zeros([batch_size, 1])
        for index in range(batch_size):
            center_batch[index], target_batch[index] = next(single_gen)
        yield center_batch, target_batch 
開發者ID:wdxtub,項目名稱:deep-learning-note,代碼行數:18,代碼來源:w2v_utils.py

示例15: bonds_CH

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import int32 [as 別名]
def bonds_CH(ENGINE, rang=10, recur=10, refine=False, explore=True):
    groups = []
    for idx in range(0,ENGINE.pdb.numberOfAtoms, 13):
        groups.append( np.array([idx+1 ,idx+2 ], dtype=np.int32) ) # C1-H11
        groups.append( np.array([idx+1 ,idx+3 ], dtype=np.int32) ) # C1-H12
        groups.append( np.array([idx+4 ,idx+5 ], dtype=np.int32) ) # C2-H21
        groups.append( np.array([idx+4 ,idx+6 ], dtype=np.int32) ) # C2-H22
        groups.append( np.array([idx+7 ,idx+8 ], dtype=np.int32) ) # C3-H31
        groups.append( np.array([idx+7 ,idx+9 ], dtype=np.int32) ) # C3-H32
        groups.append( np.array([idx+10,idx+11], dtype=np.int32) ) # C4-H41
        groups.append( np.array([idx+10,idx+12], dtype=np.int32) ) # C4-H42
    ENGINE.set_groups(groups)
    [g.set_move_generator(DistanceAgitationGenerator(amplitude=0.2,agitate=(True,True))) for g in ENGINE.groups]
    # set selector
    if refine or explore:
        gs = RecursiveGroupSelector(RandomSelector(ENGINE), recur=recur, refine=refine, explore=explore)
        ENGINE.set_group_selector(gs)
    # number of steps
    nsteps = recur*len(ENGINE.groups)
    for stepIdx in range(rang):
        LOGGER.info("Running 'bonds_CH' mode step %i"%(stepIdx))
        ENGINE.run(numberOfSteps=nsteps, saveFrequency=nsteps)

# ############ RUN H-C-H ANGLES ############ # 
開發者ID:bachiraoun,項目名稱:fullrmc,代碼行數:26,代碼來源:run.py


注:本文中的numpy.int32方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。