當前位置: 首頁>>代碼示例>>Python>>正文


Python numpy.array_split方法代碼示例

本文整理匯總了Python中numpy.array_split方法的典型用法代碼示例。如果您正苦於以下問題:Python numpy.array_split方法的具體用法?Python numpy.array_split怎麽用?Python numpy.array_split使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在numpy的用法示例。


在下文中一共展示了numpy.array_split方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: scale

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import array_split [as 別名]
def scale(boxlist, y_scale, x_scale):
  """Scale box coordinates in x and y dimensions.

  Args:
    boxlist: BoxList holding N boxes
    y_scale: float
    x_scale: float

  Returns:
    boxlist: BoxList holding N boxes
  """
  y_min, x_min, y_max, x_max = np.array_split(boxlist.get(), 4, axis=1)
  y_min = y_scale * y_min
  y_max = y_scale * y_max
  x_min = x_scale * x_min
  x_max = x_scale * x_max
  scaled_boxlist = np_box_list.BoxList(np.hstack([y_min, x_min, y_max, x_max]))

  fields = boxlist.get_extra_fields()
  for field in fields:
    extra_field_data = boxlist.get_field(field)
    scaled_boxlist.add_field(field, extra_field_data)

  return scaled_boxlist 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:26,代碼來源:np_box_list_ops.py

示例2: Train

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import array_split [as 別名]
def Train(self, C, A, Y, SF):
        '''
        Train the classifier using the sample matrix A and target matrix Y
        '''
        C.fit(A, Y)
        YH = np.zeros(Y.shape, dtype = np.object)
        for i in np.array_split(np.arange(A.shape[0]), 32):   #Split up verification into chunks to prevent out of memory
            YH[i] = C.predict(A[i])
        s1 = SF(Y, YH)
        print('All:{:8.6f}'.format(s1))
        '''
        ss = ShuffleSplit(random_state = 1151)  #Use fixed state for so training can be repeated later
        trn, tst = next(ss.split(A, Y))         #Make train/test split
        mi = [8] * 1                            #Maximum number of iterations at each iter
        YH = np.zeros((A.shape[0]), dtype = np.object)
        for mic in mi:                                      #Chunk size to split dataset for CV results
            #C.SetMaxIter(mic)                               #Set the maximum number of iterations to run
            #C.fit(A[trn], Y[trn])                           #Perform training iterations
        ''' 
開發者ID:nicholastoddsmith,項目名稱:poeai,代碼行數:21,代碼來源:TargetingSystem.py

示例3: group_years

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import array_split [as 別名]
def group_years(years, interval=3):
    """ Return integers representing sequential groupings of years

    Note: years specified must be sorted

    Args:
        years (np.ndarray): the year corresponding to each EVI value
        interval (int, optional): number of years to group together
            (default: 3)

    Returns:
        np.ndarray: integers representing sequential year groupings

    """
    n_groups = math.ceil((years.max() - years.min()) / interval)
    if n_groups <= 1:
        return np.zeros_like(years, dtype=np.uint16)
    splits = np.array_split(np.arange(years.min(), years.max() + 1), n_groups)

    groups = np.zeros_like(years, dtype=np.uint16)
    for i, s in enumerate(splits):
        groups[np.in1d(years, s)] = i

    return groups 
開發者ID:ceholden,項目名稱:yatsm,代碼行數:26,代碼來源:longtermmean.py

示例4: compute_gradient

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import array_split [as 別名]
def compute_gradient(model, loss_fn, device, dataset, idx):
    n = idx.size
    grad_idx = np.array_split(np.arange(n), test_batch_size)
    u = [torch.zeros(*param.shape, requires_grad=False).to(device) for param in model.parameters()]
    model.eval()
    for i in grad_idx:
        X = []
        y = []
        for ii in i:
            d = dataset[idx[ii]]
            X.append(d[0])
            y.append(d[1])
        X = torch.stack(X).to(device)
        y = torch.from_numpy(np.array(y)).to(device)
        z = model(X)
        loss = loss_fn(z, y, reduction='sum')
        model.zero_grad()
        loss.backward()
        for j, param in enumerate(model.parameters()):
            u[j] += param.grad.data / n
    return u 
開發者ID:sato9hara,項目名稱:sgd-influence,代碼行數:23,代碼來源:infl.py

示例5: eval_model

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import array_split [as 別名]
def eval_model(model, loss_fn, device, dataset, idx):
    model.eval()
    n = idx.size
    with torch.no_grad():
        loss = 0
        eval_idx = np.array_split(np.arange(n), test_batch_size)
        for i in eval_idx:
            x = []
            for ii in i:
                d = dataset[idx[ii]]
                x.append(d[0])
            x = torch.stack(x).to(device)
            y = model(x)
            loss += loss_fn(y, x).item() * i.size
        loss /= n
    return loss 
開發者ID:sato9hara,項目名稱:sgd-influence,代碼行數:18,代碼來源:outlier.py

示例6: eval_model

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import array_split [as 別名]
def eval_model(model, loss_fn, device, dataset, idx):
    model.eval()
    n = idx.size
    with torch.no_grad():
        loss = 0
        acc = 0
        eval_idx = np.array_split(np.arange(n), test_batch_size)
        for i in eval_idx:
            X = []
            y = []
            for ii in i:
                d = dataset[idx[ii]]
                X.append(d[0])
                y.append(d[1])
            X = torch.stack(X).to(device)
            y = torch.from_numpy(np.array(y)).to(device)
            z = model(X)
            loss += loss_fn(z, y, reduction='sum').item()
            pred = z.argmax(dim=1, keepdim=True)
            acc += pred.eq(y.view_as(pred)).sum().item()
        loss /= n
        acc /= n
    return loss, acc 
開發者ID:sato9hara,項目名稱:sgd-influence,代碼行數:25,代碼來源:train.py

示例7: cross_validate

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import array_split [as 別名]
def cross_validate(model_class, params, samples, labels, kfold = 3, pool = None):
    n = len(samples)
    folds = np.array_split(np.arange(n), kfold)
    def f(i):
        model = model_class(**params)
        test_idx = folds[i]
        train_idx = list(folds)
        train_idx.pop(i)
        train_idx = np.hstack(train_idx)
        train_samples, train_labels = samples[train_idx], labels[train_idx]
        test_samples, test_labels = samples[test_idx], labels[test_idx]
        model.train(train_samples, train_labels)
        resp = model.predict(test_samples)
        score = (resp != test_labels).mean()
        print(".", end='')
        return score
    if pool is None:
        scores = list(map(f, xrange(kfold)))
    else:
        scores = pool.map(f, xrange(kfold))
    return np.mean(scores) 
開發者ID:makelove,項目名稱:OpenCV-Python-Tutorial,代碼行數:23,代碼來源:digits_adjust.py

示例8: split_long_lines

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import array_split [as 別名]
def split_long_lines(line, chunks = 2, max_symbols_per_line = False):
	if max_symbols_per_line:
		chunks = 0
		while 1:
			chunks += 1
			new_lines = []
			for i in range(chunks):
				new_line = ' '.join(numpy.array_split(line.split(' '), chunks)[i])
				new_lines.append(new_line)

			if len(max(new_lines, key = len)) <= max_symbols_per_line:
				return '\n'.join(new_lines)
	else:
		new_lines = []
		for i in range(chunks):
			new_line = ' '.join(numpy.array_split(line.split(' '), chunks)[i])
			new_lines.append(new_line)

		return '\n'.join(new_lines) 
開發者ID:oltodosel,項目名稱:interSubs,代碼行數:21,代碼來源:interSubs.py

示例9: iterbatches

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import array_split [as 別名]
def iterbatches(arrays, *, num_batches=None, batch_size=None, shuffle=True, include_final_partial_batch=True):
    """
    Iterates over arrays in batches, must provide either num_batches or batch_size, the other must be None.

    :param arrays: (tuple) a tuple of arrays
    :param num_batches: (int) the number of batches, must be None is batch_size is defined
    :param batch_size: (int) the size of the batch, must be None is num_batches is defined
    :param shuffle: (bool) enable auto shuffle
    :param include_final_partial_batch: (bool) add the last batch if not the same size as the batch_size
    :return: (tuples) a tuple of a batch of the arrays
    """
    assert (num_batches is None) != (batch_size is None), 'Provide num_batches or batch_size, but not both'
    arrays = tuple(map(np.asarray, arrays))
    n_samples = arrays[0].shape[0]
    assert all(a.shape[0] == n_samples for a in arrays[1:])
    inds = np.arange(n_samples)
    if shuffle:
        np.random.shuffle(inds)
    sections = np.arange(0, n_samples, batch_size)[1:] if num_batches is None else num_batches
    for batch_inds in np.array_split(inds, sections):
        if include_final_partial_batch or len(batch_inds) == batch_size:
            yield tuple(a[batch_inds] for a in arrays) 
開發者ID:Stable-Baselines-Team,項目名稱:stable-baselines,代碼行數:24,代碼來源:dataset.py

示例10: _train_once

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import array_split [as 別名]
def _train_once(self, runner, epoch):
        """Obtain samplers and train for one epoch.

        Args:
            runner (LocalRunner): LocalRunner to which may be used to obtain
                samples.
            epoch (int): The current epoch.

        Returns:
            List[float]: Losses.

        """
        batch = self._obtain_samples(runner, epoch)
        indices = np.random.permutation(len(batch.actions))
        minibatches = np.array_split(indices, self._minibatches_per_epoch)
        losses = []
        for minibatch in minibatches:
            observations = np_to_torch(batch.observations[minibatch])
            actions = np_to_torch(batch.actions[minibatch])
            self._optimizer.zero_grad()
            loss = self._compute_loss(observations, actions)
            loss.backward()
            losses.append(loss.item())
            self._optimizer.step()
        return losses 
開發者ID:rlworkgroup,項目名稱:garage,代碼行數:27,代碼來源:bc.py

示例11: get_attentions

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import array_split [as 別名]
def get_attentions(self):
        if self.option.query_is_language:
            num_batch = int(np.ceil(1.0*len(self.data.query_for_rules)/self.option.batch_size))
            query_batches = np.array_split(self.data.query_for_rules, num_batch)
        else:   
            #print(self.data.query_for_rules)
            if not self.option.type_check:
                num_batch = int(np.ceil(1.*len(self.data.query_for_rules)/self.option.batch_size))
                query_batches = np.array_split(self.data.query_for_rules, num_batch)       
            else:
                query_batches = [[i] for i in self.data.query_for_rules]

        all_attention_operators = {}
        all_attention_memories = {}

        for queries in query_batches:
            attention_operators, attention_memories \
            = self.learner.get_attentions_given_queries(self.sess, queries)
            
            # Tuple-ize in order to be used as dict keys
            if self.option.query_is_language:
                queries = [tuple(q) for q in queries]

            for i in xrange(len(queries)):
                all_attention_operators[queries[i]] \
                                        = [[attn[i] 
                                        for attn in attn_step] 
                                        for attn_step in attention_operators]
                all_attention_memories[queries[i]] = \
                                        [attn_step[i, :] 
                                        for attn_step in attention_memories]
        pickle.dump([all_attention_operators, all_attention_memories], 
                    open(os.path.join(self.option.this_expsdir, "attentions.pckl"), "w"))
               
        msg = self.msg_with_time("Attentions collected.")
        print(msg)
        self.log_file.write(msg + "\n")

        all_queries = reduce(lambda x,y: list(x) + list(y), query_batches, [])
        return all_attention_operators, all_attention_memories, all_queries 
開發者ID:fanyangxyz,項目名稱:Neural-LP,代碼行數:42,代碼來源:experiment.py

示例12: clip_to_window

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import array_split [as 別名]
def clip_to_window(boxlist, window):
  """Clip bounding boxes to a window.

  This op clips input bounding boxes (represented by bounding box
  corners) to a window, optionally filtering out boxes that do not
  overlap at all with the window.

  Args:
    boxlist: BoxList holding M_in boxes
    window: a numpy array of shape [4] representing the
            [y_min, x_min, y_max, x_max] window to which the op
            should clip boxes.

  Returns:
    a BoxList holding M_out boxes where M_out <= M_in
  """
  y_min, x_min, y_max, x_max = np.array_split(boxlist.get(), 4, axis=1)
  win_y_min = window[0]
  win_x_min = window[1]
  win_y_max = window[2]
  win_x_max = window[3]
  y_min_clipped = np.fmax(np.fmin(y_min, win_y_max), win_y_min)
  y_max_clipped = np.fmax(np.fmin(y_max, win_y_max), win_y_min)
  x_min_clipped = np.fmax(np.fmin(x_min, win_x_max), win_x_min)
  x_max_clipped = np.fmax(np.fmin(x_max, win_x_max), win_x_min)
  clipped = np_box_list.BoxList(
      np.hstack([y_min_clipped, x_min_clipped, y_max_clipped, x_max_clipped]))
  clipped = _copy_extra_fields(clipped, boxlist)
  areas = area(clipped)
  nonzero_area_indices = np.reshape(np.nonzero(np.greater(areas, 0.0)),
                                    [-1]).astype(np.int32)
  return gather(clipped, nonzero_area_indices) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:34,代碼來源:np_box_list_ops.py

示例13: prune_outside_window

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import array_split [as 別名]
def prune_outside_window(boxlist, window):
  """Prunes bounding boxes that fall outside a given window.

  This function prunes bounding boxes that even partially fall outside the given
  window. See also ClipToWindow which only prunes bounding boxes that fall
  completely outside the window, and clips any bounding boxes that partially
  overflow.

  Args:
    boxlist: a BoxList holding M_in boxes.
    window: a numpy array of size 4, representing [ymin, xmin, ymax, xmax]
            of the window.

  Returns:
    pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in.
    valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
     in the input tensor.
  """

  y_min, x_min, y_max, x_max = np.array_split(boxlist.get(), 4, axis=1)
  win_y_min = window[0]
  win_x_min = window[1]
  win_y_max = window[2]
  win_x_max = window[3]
  coordinate_violations = np.hstack([np.less(y_min, win_y_min),
                                     np.less(x_min, win_x_min),
                                     np.greater(y_max, win_y_max),
                                     np.greater(x_max, win_x_max)])
  valid_indices = np.reshape(
      np.where(np.logical_not(np.max(coordinate_violations, axis=1))), [-1])
  return gather(boxlist, valid_indices), valid_indices 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:33,代碼來源:np_box_list_ops.py

示例14: iterbatches

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import array_split [as 別名]
def iterbatches(arrays, *, num_batches=None, batch_size=None, shuffle=True, include_final_partial_batch=True):
    assert (num_batches is None) != (batch_size is None), 'Provide num_batches or batch_size, but not both'
    arrays = tuple(map(np.asarray, arrays))
    n = arrays[0].shape[0]
    assert all(a.shape[0] == n for a in arrays[1:])
    inds = np.arange(n)
    if shuffle: np.random.shuffle(inds)
    sections = np.arange(0, n, batch_size)[1:] if num_batches is None else num_batches
    for batch_inds in np.array_split(inds, sections):
        if include_final_partial_batch or len(batch_inds) == batch_size:
            yield tuple(a[batch_inds] for a in arrays) 
開發者ID:Hwhitetooth,項目名稱:lirpg,代碼行數:13,代碼來源:dataset.py

示例15: featurize

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import array_split [as 別名]
def featurize(self, data, batch_size=32):
        """Encodes the data into an embedding
        Data: ndarray with shape (-1, width, height, 1)
        """
        splitted_data = np.array_split(data, max(data.shape[0] // batch_size, 1))
        feature_vectors = []
        for batch in splitted_data:
            normalized_batch = batch / 255
            feature_vectors.append(self.sess.run(self.graph['feature_vector'], {
                self.graph['is_training']: False, self.graph['state']: normalized_batch
            }))
        feature_vectors = np.concatenate(feature_vectors)
        return feature_vectors 
開發者ID:MaxSobolMark,項目名稱:HardRLWithYoutube,代碼行數:15,代碼來源:BaseFeaturizer.py


注:本文中的numpy.array_split方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。