本文整理匯總了Python中numpy.split方法的典型用法代碼示例。如果您正苦於以下問題:Python numpy.split方法的具體用法?Python numpy.split怎麽用?Python numpy.split使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類numpy
的用法示例。
在下文中一共展示了numpy.split方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: KLDivergenceLoss
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import split [as 別名]
def KLDivergenceLoss():
'''KLDivergenceLoss loss
'''
data = mx.sym.Variable('data')
mu1, lv1 = mx.sym.split(data, num_outputs=2, axis=0)
mu2 = mx.sym.zeros_like(mu1)
lv2 = mx.sym.zeros_like(lv1)
v1 = mx.sym.exp(lv1)
v2 = mx.sym.exp(lv2)
mu_diff_sq = mx.sym.square(mu1 - mu2)
dimwise_kld = .5 * (
(lv2 - lv1) + mx.symbol.broadcast_div(v1, v2) + mx.symbol.broadcast_div(mu_diff_sq, v2) - 1.)
KL = mx.symbol.sum(dimwise_kld, axis=1)
KLloss = mx.symbol.MakeLoss(mx.symbol.mean(KL),name='KLloss')
return KLloss
示例2: intersection
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import split [as 別名]
def intersection(boxes1, boxes2):
"""Compute pairwise intersection areas between boxes.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes
boxes2: a numpy array with shape [M, 4] holding M boxes
Returns:
a numpy array with shape [N*M] representing pairwise intersection area
"""
[y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1)
[y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1)
all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))
all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))
intersect_heights = np.maximum(
np.zeros(all_pairs_max_ymin.shape),
all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))
all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))
intersect_widths = np.maximum(
np.zeros(all_pairs_max_xmin.shape),
all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
示例3: test_fragsep_error
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import split [as 別名]
def test_fragsep_error():
with pytest.raises(qcelemental.ValidationError) as e:
qcelemental.molparse.from_arrays(
domain="qmvz",
speclabel=True,
elbl=["ar1", "42AR2"],
fragment_multiplicities=[3, 3],
fragment_separators=np.array(["1"]),
geom_unsettled=[[], ["1", "bond"]],
hint_types=[],
units="Bohr",
variables=[("bond", "3")],
)
assert "unable to perform trial np.split on geometry" in str(e.value)
示例4: _split_into_xyxy
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import split [as 別名]
def _split_into_xyxy(self):
if self.mode == "xyxy":
# xmin, ymin, xmax, ymax = self.bbox.split(1, dim=-1)
xmin, ymin, xmax, ymax = np.split(self.bbox, 4, axis=1)
return xmin, ymin, xmax, ymax
elif self.mode == "xywh":
TO_REMOVE = 1
xmin, ymin, w, h = np.split(self.bbox, 4, axis=1)
return (
xmin,
ymin,
# xmin + (w - TO_REMOVE).clamp(min=0),
# ymin + (h - TO_REMOVE).clamp(min=0),
xmin + np.clip(w - TO_REMOVE, 0, None),
ymin + np.clip(h - TO_REMOVE, 0, None),
)
else:
raise RuntimeError("Should not be here")
# def resize(self, size, *args, **kwargs):
示例5: vector_to_amplitudes
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import split [as 別名]
def vector_to_amplitudes(vec, nmo, nocc, nkpts=1):
nocca, noccb = nocc
nmoa, nmob = nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
sizes = (nkpts*nocca*nvira, nkpts*noccb*nvirb,
nkpts**3*nocca**2*nvira**2, nkpts**3*nocca*noccb*nvira*nvirb,
nkpts**3*noccb**2*nvirb**2)
sections = np.cumsum(sizes[:-1])
t1a, t1b, t2aa, t2ab, t2bb = np.split(vec, sections)
t1a = t1a.reshape(nkpts,nocca,nvira)
t1b = t1b.reshape(nkpts,noccb,nvirb)
t2aa = t2aa.reshape(nkpts,nkpts,nkpts,nocca,nocca,nvira,nvira)
t2ab = t2ab.reshape(nkpts,nkpts,nkpts,nocca,noccb,nvira,nvirb)
t2bb = t2bb.reshape(nkpts,nkpts,nkpts,noccb,noccb,nvirb,nvirb)
return (t1a,t1b), (t2aa,t2ab,t2bb)
示例6: vector_to_amplitudes
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import split [as 別名]
def vector_to_amplitudes(vector, nmo, nocc):
nocca, noccb = nocc
nmoa, nmob = nmo
nvira, nvirb = nmoa-nocca, nmob-noccb
nocc = nocca + noccb
nvir = nvira + nvirb
nov = nocc * nvir
size = nov + nocc*(nocc-1)//2*nvir*(nvir-1)//2
if vector.size == size:
#return ccsd.vector_to_amplitudes_s4(vector, nmo, nocc)
raise RuntimeError('Input vector is GCCSD vecotr')
else:
sizea = nocca * nvira + nocca*(nocca-1)//2*nvira*(nvira-1)//2
sizeb = noccb * nvirb + noccb*(noccb-1)//2*nvirb*(nvirb-1)//2
sections = np.cumsum([sizea, sizeb])
veca, vecb, t2ab = np.split(vector, sections)
t1a, t2aa = ccsd.vector_to_amplitudes_s4(veca, nmoa, nocca)
t1b, t2bb = ccsd.vector_to_amplitudes_s4(vecb, nmob, noccb)
t2ab = t2ab.copy().reshape(nocca,noccb,nvira,nvirb)
return (t1a,t1b), (t2aa,t2ab,t2bb)
示例7: np_sample
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import split [as 別名]
def np_sample(img, coords):
# a numpy implementation of ImageSample layer
coords = np.maximum(coords, 0)
coords = np.minimum(coords, np.array([img.shape[0] - 1, img.shape[1] - 1]))
lcoor = np.floor(coords).astype('int32')
ucoor = lcoor + 1
ucoor = np.minimum(ucoor, np.array([img.shape[0] - 1, img.shape[1] - 1]))
diff = coords - lcoor
neg_diff = 1.0 - diff
lcoory, lcoorx = np.split(lcoor, 2, axis=2)
ucoory, ucoorx = np.split(ucoor, 2, axis=2)
diff = np.repeat(diff, 3, 2).reshape((diff.shape[0], diff.shape[1], 2, 3))
neg_diff = np.repeat(neg_diff, 3, 2).reshape((diff.shape[0], diff.shape[1], 2, 3))
diffy, diffx = np.split(diff, 2, axis=2)
ndiffy, ndiffx = np.split(neg_diff, 2, axis=2)
ret = img[lcoory, lcoorx, :] * ndiffx * ndiffy + \
img[ucoory, ucoorx, :] * diffx * diffy + \
img[lcoory, ucoorx, :] * ndiffy * diffx + \
img[ucoory, lcoorx, :] * diffy * ndiffx
return ret[:, :, 0, :]
示例8: dmc_propagate_parallel
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import split [as 別名]
def dmc_propagate_parallel(wf,configs,weights,client, npartitions, *args, **kwargs):
config = configs.split(npartitions)
weight = np.split(weights,npartitions)
runs=[ client.submit(dmc_propagate, wf, conf , wt, *args, **kwargs) for conf,wt in zip(config, weight)]
allresults = list(zip(*[r.result() for r in runs]))
configs.join(allresults[1])
weights = np.concatenate(allresults[2])
confweight = np.array([len(c.configs) for c in config], dtype=float)
confweight_avg = confweight/(np.mean(confweight)*npartitions)
weight = np.array([w['weight'] for w in allresults[0]])
weight_avg = weight/np.mean(weight)
block_avg = {}
for k in allresults[0][0].keys():
block_avg[k] = np.sum([res[k]*ww*cw for res,cw,ww in zip(allresults[0],confweight_avg, weight_avg)], axis=0)
block_avg['weight'] = np.mean(weight)
return block_avg, configs, weights
示例9: mini_batch
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import split [as 別名]
def mini_batch(self, data_file):
token_seqs = []
with codecs.open(data_file, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip('\n')
parse_line = list(map(int, line.split()))
if pm.REAL_WORLD_DATA:
if len(parse_line) == pm.WGAN_SEQ_LENGTH:
token_seqs.append(parse_line)
else:
if len(parse_line) == pm.SEQ_LENGTH:
token_seqs.append(parse_line)
self.num_batch = int(len(token_seqs) / self.batch_size)
token_seqs = token_seqs[:self.num_batch * self.batch_size]
self.token_sentences = np.array(token_seqs)
self.sequence_batch = np.split(self.token_sentences, self.num_batch, 0)
self.reset_pointer()
示例10: build_vocabulary
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import split [as 別名]
def build_vocabulary(self, path, datafile, vocab_size, char=True):
files = codecs.open(datafile, 'r', encoding='utf-8').read()
if char:
words = []
files = files.split('\n')
for word in files:
word = tuple(word)
words.append(word)
else:
words = files.split()
wordcount = Counter(c for line in words for c in line if c != ' ')
with codecs.open(path, 'w', encoding='utf-8') as f:
f.write("{}\t1000000000\n{}\t1000000000\n{}\t1000000000\n{}\t1000000000\n{}\t1000000000\n".format("<PAD>", "<UNK>", "<SOS>", "<EOS>", "<SPA>"))
for word, count in wordcount.most_common(len(wordcount)-5):
f.write("{}\t{}\n".format(word, count))
self.vocab_size = len(wordcount) - 5
示例11: backpropagate
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import split [as 別名]
def backpropagate(self, delta) -> np.ndarray:
shape = delta.shape
delta = rtm(delta)
h, t, c = np.split(self.gates, 3, axis=1)
dh = self.activation.backward(h) * t * delta
dt = sigmoid.backward(t) * h * delta
dc = sigmoid.backward(c) * self.inputs * delta
dx = c * delta
dgates = np.concatenate((dh, dt, dc), axis=1)
self.nabla_w = self.inputs.T.dot(dgates)
self.nabla_b = dgates.sum(axis=0)
return (dgates.dot(self.weights.T) + dx).reshape(shape)
示例12: clip_boxes_graph
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import split [as 別名]
def clip_boxes_graph(boxes, window):
"""
boxes: [N, (y1, x1, y2, x2)]
window: [4] in the form y1, x1, y2, x2
"""
# Split
wy1, wx1, wy2, wx2 = tf.split(window, 4)
y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
# Clip
y1 = tf.maximum(tf.minimum(y1, wy2), wy1)
x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes")
clipped.set_shape((clipped.shape[0], 4))
return clipped
示例13: test_append_read_large_ndarray
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import split [as 別名]
def test_append_read_large_ndarray(library, fw_pointers_cfg):
with FwPointersCtx(fw_pointers_cfg):
dtype = np.dtype([('abc', 'int64')])
ndarr = np.arange(50 * 1024 * 1024 / dtype.itemsize).view(dtype=dtype)
assert len(ndarr.tostring()) > 16 * 1024 * 1024
library.write('MYARR1', ndarr)
# Exactly enough appends to trigger 2 re-compacts, so the result should be identical
# to writing the whole array at once
ndarr2 = np.arange(240).view(dtype=dtype)
for n in np.split(ndarr2, 120):
library.append('MYARR1', n)
saved_arr = library.read('MYARR1').data
assert np.all(np.concatenate([ndarr, ndarr2]) == saved_arr)
library.write('MYARR2', np.concatenate([ndarr, ndarr2]))
version1 = library._read_metadata('MYARR1')
version2 = library._read_metadata('MYARR2')
assert version1['append_count'] == version2['append_count']
assert version1['append_size'] == version2['append_size']
assert version1['segment_count'] == version2['segment_count']
assert version1['up_to'] == version2['up_to']
示例14: forward
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import split [as 別名]
def forward(self, x, sequence_mask):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
a = self._attn(query, key, value, sequence_mask)
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
return a
示例15: read_data
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import split [as 別名]
def read_data(file_path):
with open(file_path, "r") as fin:
# 將整個文檔讀進一個長字符串
id_string = ' '.joint([line.strip() for line in fin.readlines()])
id_list = [int(w) for w in id_string.split()]
return id_list