本文整理汇总了Python中numpy.concatenate函数的典型用法代码示例。如果您正苦于以下问题:Python concatenate函数的具体用法?Python concatenate怎么用?Python concatenate使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了concatenate函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: parse_eph
def parse_eph(filenm):
global period, time
suffix = filenm.split(".")[-1]
if suffix == "bestprof":
x = bestprof.bestprof(filenm)
fs = pu.p_to_f(x.p0_bary, x.p1_bary, x.p2_bary)
epoch = x.epochi_bary + x.epochf_bary
T = x.T
elif suffix == "par":
x = parfile.psr_par(filenm)
# Try to see how many freq derivs we have
fs = [x.F0]
for ii in range(1, 20): # hopefully 20 is an upper limit!
attrib = "F%d" % ii
if hasattr(x, attrib):
fs.append(getattr(x, attrib))
else:
break
epoch = x.PEPOCH
T = (x.FINISH - x.START) * 86400.0
else:
print "I don't recognize the file type for", filenm
sys.exit()
newts = epoch + num.arange(int(T / 10.0 + 0.5), dtype=num.float) / 8640.0
time = num.concatenate((time, newts))
newps = 1.0 / pu.calc_freq(newts, epoch, *fs)
period = num.concatenate((period, newps))
print "%13.7f (%0.1f sec): " % (epoch, T), fs
示例2: Haffine_from_points
def Haffine_from_points(fp, tp):
'''计算仿射变换的单应性矩阵H,使得tp是由fp经过仿射变换得到的'''
if fp.shape != tp.shape:
raise RuntimeError('number of points do not match')
# 对点进行归一化
# 映射起始点
m = numpy.mean(fp[:2], axis=1)
maxstd = numpy.max(numpy.std(fp[:2], axis=1)) + 1e-9
C1 = numpy.diag([1/maxstd, 1/maxstd, 1])
C1[0, 2] = -m[0] / maxstd
C1[1, 2] = -m[1] / maxstd
fp_cond = numpy.dot(C1, fp)
# 映射对应点
m = numpy.mean(tp[:2], axis=1)
maxstd = numpy.max(numpy.std(tp[:2], axis=1)) + 1e-9
C2 = numpy.diag([1/maxstd, 1/maxstd, 1])
C2[0, 2] = -m[0] / maxstd
C2[1, 2] = -m[1] / maxstd
tp_cond = numpy.dot(C2, tp)
# 因为归一化之后点的均值为0,所以平移量为0
A = numpy.concatenate((fp_cond[:2], tp_cond[:2]), axis=0)
U, S, V = numpy.linalg.svd(A.T)
# 创建矩阵B和C
tmp = V[:2].T
B = tmp[:2]
C = tmp[2:4]
tmp2 = numpy.concatenate((numpy.dot(C, numpy.linalg.pinv(B)), numpy.zeros((2, 1))), axis=1)
H = numpy.vstack((tmp2, [0, 0, 1]))
H = numpy.dot(numpy.linalg.inv(C2), numpy.dot(H, C1)) # 反归一化
return H / H[2, 2] # 归一化,然后返回
示例3: test_testUfuncs1
def test_testUfuncs1 (self):
"Test various functions such as sin, cos."
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
self.assertTrue (eq(numpy.cos(x), cos(xm)))
self.assertTrue (eq(numpy.cosh(x), cosh(xm)))
self.assertTrue (eq(numpy.sin(x), sin(xm)))
self.assertTrue (eq(numpy.sinh(x), sinh(xm)))
self.assertTrue (eq(numpy.tan(x), tan(xm)))
self.assertTrue (eq(numpy.tanh(x), tanh(xm)))
olderr = numpy.seterr(divide='ignore', invalid='ignore')
try:
self.assertTrue (eq(numpy.sqrt(abs(x)), sqrt(xm)))
self.assertTrue (eq(numpy.log(abs(x)), log(xm)))
self.assertTrue (eq(numpy.log10(abs(x)), log10(xm)))
finally:
numpy.seterr(**olderr)
self.assertTrue (eq(numpy.exp(x), exp(xm)))
self.assertTrue (eq(numpy.arcsin(z), arcsin(zm)))
self.assertTrue (eq(numpy.arccos(z), arccos(zm)))
self.assertTrue (eq(numpy.arctan(z), arctan(zm)))
self.assertTrue (eq(numpy.arctan2(x, y), arctan2(xm, ym)))
self.assertTrue (eq(numpy.absolute(x), absolute(xm)))
self.assertTrue (eq(numpy.equal(x, y), equal(xm, ym)))
self.assertTrue (eq(numpy.not_equal(x, y), not_equal(xm, ym)))
self.assertTrue (eq(numpy.less(x, y), less(xm, ym)))
self.assertTrue (eq(numpy.greater(x, y), greater(xm, ym)))
self.assertTrue (eq(numpy.less_equal(x, y), less_equal(xm, ym)))
self.assertTrue (eq(numpy.greater_equal(x, y), greater_equal(xm, ym)))
self.assertTrue (eq(numpy.conjugate(x), conjugate(xm)))
self.assertTrue (eq(numpy.concatenate((x, y)), concatenate((xm, ym))))
self.assertTrue (eq(numpy.concatenate((x, y)), concatenate((x, y))))
self.assertTrue (eq(numpy.concatenate((x, y)), concatenate((xm, y))))
self.assertTrue (eq(numpy.concatenate((x, y, x)), concatenate((x, ym, x))))
示例4: _basic_insertion
def _basic_insertion(self, celltype):
# generate a list of which cell each point in self._data belongs in
cell_indices = self._get_indices_for_points(self._data)
# We now look for ranges of points belonging to the same cell.
# 1. shift lengthwise and difference; runs of cells with the same
# (i,j) indices will be zero, and nonzero value for i or j will
# indicate a transition to a new cell. (Just like find_runs().)
differences = cell_indices[1:] - cell_indices[:-1]
# Since nonzero() only works for 1D arrays, we merge the X and Y columns
# together to detect any point where either X or Y are nonzero. We have
# to add 1 because we shifted cell_indices before differencing (above).
diff_indices = nonzero(differences[:,0] + differences[:,1])[0] + 1
start_indices = concatenate([[0], diff_indices])
end_indices = concatenate([diff_indices, [len(self._data)]])
for start,end in zip(start_indices, end_indices):
gridx, gridy = cell_indices[start] # can use 'end' here just as well
if celltype == RangedCell:
self._cellgrid[gridx,gridy].add_ranges([(start,end)])
else:
self._cellgrid[gridx,gridy].add_indices(range(start,end))
return
示例5: get_diffs_bw
def get_diffs_bw(detect_buffer):
temp_buff = detect_buffer
temp_buff_size = temp_buff.shape[0]
diff_buff = None
count = 0
#Store contents of detect_buffer into a temp array
#absdiff every third capture and throw it into a new temp array
#Repeat until temp array has a size of 2, then return the bitwise and.
while True:
if temp_buff_size == 2:
cummulativeFrames = cv2.bitwise_and(temp_buff[0], temp_buff[1])
break
else:
count = 0
while count < (temp_buff_size / 3):
diff1 = np.array([cv2.absdiff(temp_buff[(count*3)+2], temp_buff[(count*3)+1])])
diff2 = np.array([cv2.absdiff(temp_buff[(count*3)+1], temp_buff[(count*3)+0])])
if diff_buff == None:
diff_buff = np.concatenate((diff1,diff2),axis=0)
else:
diff_buff = np.concatenate((diff_buff,diff1,diff2),axis=0)
count+=1
temp_buff = diff_buff
diff_buff = None
temp_buff_size = temp_buff.shape[0]
return cummulativeFrames
示例6: orthonormal_VanillaLSTMBuilder
def orthonormal_VanillaLSTMBuilder(lstm_layers, input_dims, lstm_hiddens, dropout_x=0., dropout_h=0., debug=False):
"""Build a standard LSTM cell, with variational dropout,
with weights initialized to be orthonormal (https://arxiv.org/abs/1312.6120)
Parameters
----------
lstm_layers : int
Currently only support one layer
input_dims : int
word vector dimensions
lstm_hiddens : int
hidden size
dropout_x : float
dropout on inputs, not used in this implementation, see `biLSTM` below
dropout_h : float
dropout on hidden states
debug : bool
set to True to skip orthonormal initialization
Returns
-------
lstm_cell : VariationalDropoutCell
A LSTM cell
"""
assert lstm_layers == 1, 'only accept one layer lstm'
W = orthonormal_initializer(lstm_hiddens, lstm_hiddens + input_dims, debug)
W_h, W_x = W[:, :lstm_hiddens], W[:, lstm_hiddens:]
b = nd.zeros((4 * lstm_hiddens,))
b[lstm_hiddens:2 * lstm_hiddens] = -1.0
lstm_cell = rnn.LSTMCell(input_size=input_dims, hidden_size=lstm_hiddens,
i2h_weight_initializer=mx.init.Constant(np.concatenate([W_x] * 4, 0)),
h2h_weight_initializer=mx.init.Constant(np.concatenate([W_h] * 4, 0)),
h2h_bias_initializer=mx.init.Constant(b))
wrapper = VariationalDropoutCell(lstm_cell, drop_states=dropout_h)
return wrapper
示例7: get_points
def get_points(self, peak_shape=triangle, num_discrete=10):
"""
Returns two lists of coordinates x y representing the whole spectrum, both the continuous and discrete components.
The mesh is chosen by extending x to include details of the discrete peaks.
Args:
peak_shape: The window function used to calculate the peaks. See :obj:`triangle` for an example.
num_discrete: Number of points that are added to mesh in each peak.
Returns:
(tuple): tuple containing:
x2 (List[float]): The list of x coordinates (energy) in the whole spectrum.
y2 (List[float]): The list of y coordinates (density) in the whole spectrum.
"""
if peak_shape is None or self.discrete == []:
return self.x[:], self.y[:]
# A mesh for each discrete component:
discrete_mesh = np.concatenate(list(map(lambda x: np.linspace(
x[0] - x[2], x[0] + x[2], num=num_discrete, endpoint=True), self.discrete)))
x2 = sorted(np.concatenate((discrete_mesh, self.x)))
f = self.get_continuous_function()
peak = np.vectorize(peak_shape)
def g(x):
t = 0
for l in self.discrete:
t += peak(x, loc=l[0], size=l[2]) * l[1]
return t
y2 = [f(x) + g(x) for x in x2]
return x2, y2
示例8: test_clipping
def test_clipping():
exterior = mpath.Path.unit_rectangle().deepcopy()
exterior.vertices *= 4
exterior.vertices -= 2
interior = mpath.Path.unit_circle().deepcopy()
interior.vertices = interior.vertices[::-1]
clip_path = mpath.Path(vertices=np.concatenate([exterior.vertices,
interior.vertices]),
codes=np.concatenate([exterior.codes,
interior.codes]))
star = mpath.Path.unit_regular_star(6).deepcopy()
star.vertices *= 2.6
ax1 = plt.subplot(121)
col = mcollections.PathCollection([star], lw=5, edgecolor='blue',
facecolor='red', alpha=0.7, hatch='*')
col.set_clip_path(clip_path, ax1.transData)
ax1.add_collection(col)
ax2 = plt.subplot(122, sharex=ax1, sharey=ax1)
patch = mpatches.PathPatch(star, lw=5, edgecolor='blue', facecolor='red',
alpha=0.7, hatch='*')
patch.set_clip_path(clip_path, ax2.transData)
ax2.add_patch(patch)
ax1.set_xlim([-3, 3])
ax1.set_ylim([-3, 3])
示例9: load
def load(root_path, debug = True):
'''
load cifar-10 dataset
'''
xs = []
ys = []
for b in xrange(1, 6):
file = os.path.join(root_path, 'data_batch_%d' % (b, ))
X, y = load_batch(file)
xs.append(X)
ys.append(y)
X = np.concatenate(xs)
y = np.concatenate(ys)
file = os.path.join(root_path, 'test_batch')
X_test, y_test = load_batch(file)
if debug:
# As a sanity check, we print out the size of the training and test data.
print 'Cifar-10 dataset has been loaded'
print 'X shape', X.shape
print 'y shape', y.shape
print 'X_test shape', X_test.shape
print 'y_test shape', y_test.shape
return X, y, X_test, y_test
示例10: run_epoch
def run_epoch(self, split, train=False, batch_size=128, return_pred=False):
total = total_loss = 0
func = self.model.train_on_batch if train else self.model.test_on_batch
ids, preds, targs = [], [], []
prog = Progbar(split.num_examples)
for idx, X, Y, types in split.batches(batch_size):
X.update({k: np.concatenate([v, types], axis=1) for k, v in Y.items()})
batch_end = time()
loss = func(X)
prob = self.model.predict(X, verbose=0)['p_relation']
prob *= self.typechecker.get_valid_cpu(types[:, 0], types[:, 1])
pred = prob.argmax(axis=1)
targ = Y['p_relation'].argmax(axis=1)
ids.append(idx)
targs.append(targ)
preds.append(pred)
total_loss += loss
total += 1
prog.add(idx.size, values=[('loss', loss), ('acc', np.mean(pred==targ))])
preds = np.concatenate(preds).astype('int32')
targs = np.concatenate(targs).astype('int32')
ids = np.concatenate(ids).astype('int32')
ret = {
'f1': f1_score(targs, preds, average='micro', labels=self.labels),
'precision': precision_score(targs, preds, average='micro', labels=self.labels),
'recall': recall_score(targs, preds, average='micro', labels=self.labels),
'accuracy': accuracy_score(targs, preds),
'loss': total_loss / float(total),
}
if return_pred:
ret.update({'ids': ids.tolist(), 'preds': preds.tolist(), 'targs': targs.tolist()})
return ret
示例11: transfer_f
def transfer_f(dw,aas,aai,eps,deltaw,f):
"""
Args:
dw: size of the grid spacing
aas=relative slowness of the signal mode
aai=relative slowness of the idler mode
lnl=inverse of the strength of the nonlinearity
deltaw: specifies the size of the frequency grid going from
-deltaw to deltaw for each frequency
f: shape of the pump function
"""
ddws=np.arange(-deltaw-dw/2,deltaw+dw/2,dw)
deltaks=aas*ddws
ddwi=np.arange(-deltaw-dw/2,deltaw+dw/2,dw)
deltaki=aai*ddwi
ds=np.diag(deltaks)
di=np.diag(deltaki)
def ff(x,y):
return f(x+y)
v=eps*(dw)*ff(ddwi[:,None],ddws[None,:])
G=1j*np.concatenate((np.concatenate((ds,v),axis=1),np.concatenate((-v,-di),axis=1)),axis=0)
z=1;
dsi=np.concatenate((deltaks,-deltaki),axis=0)
U0=linalg.expm(-1j*np.diag(dsi)*z/2)
GG=np.dot(np.dot(U0,linalg.expm(G)),U0)
n=len(ddws)
return (GG[0:n,0:n],GG[n:2*n,0:n],GG[0:n,n:2*n],GG[n:2*n,n:2*n])
示例12: phi_glob
def phi_glob(i, elements, nodes, resolution_per_element=41):
"""
Compute (x, y) coordinates of the curve y = phi_i(x),
where i is a global node number (used for plotting, e.g.).
Method: Run through each element and compute the pieces
of phi_i(x) on this element in the reference coordinate
system. Adding up the patches yields the complete phi_i(x).
"""
x_patches = []
phi_patches = []
for e in range(len(elements)):
Omega_e = (nodes[elements[e][0]], nodes[elements[e][-1]])
local_nodes = elements[e]
d = len(local_nodes) - 1
X = np.linspace(-1, 1, resolution_per_element)
if i in local_nodes:
r = local_nodes.index(i)
phi = phi_r(r, X, d)
phi_patches.append(phi)
x = affine_mapping(X, Omega_e)
x_patches.append(x)
else:
# i is not a node in the element, phi_i(x)=0
x_patches.append(Omega_e)
phi_patches.append([0, 0])
x = np.concatenate(x_patches)
phi = np.concatenate(phi_patches)
return x, phi
示例13: paradata
def paradata(wdir='.', type = 'phi', time = 0):
" Read array with rank = 0 and read Np and Nq from header file "
name = fname(wdir, type, time)
name0 = name[0] + '.' + "%.2d" % 0
pget(wdir,type,time)
fhandler = open(name0)
header = fhandler.readline().split()
Np = int(header[5])
Nq = int(header[6])
size = Np*Nq
for rank in range(size+1):
if (rank < size):
name_rank = name[0] + '.' + "%.2d" % rank
print name_rank
data = fread(name_rank)
if (rank % Nq == 0):
if (rank == Nq):
datarr = datacol
datacol = data
elif (rank > 0):
datarr = np.concatenate((datarr,datacol),axis=0)
datacol = data
else:
datacol = data
else:
datacol = np.concatenate((datacol,data),axis=1)
return datarr
示例14: paradata_init
def paradata_init(ldir='.', type = 'phi', dim=2):
name = fname_init(ldir,type)
name0 = name + '.' + "%.2d" % 0
fhandler = open(name0)
header = fhandler.readline().split()
Np = int(header[0])/int(header[2])
Nq = int(header[1])/int(header[3])
size = Np*Nq
for rank in range(size+1):
if (rank < size):
name_rank = name + '.' + "%.2d" % rank
data = fread(name_rank)
if (rank % Nq == 0):
if (rank == Nq):
datarr = datacol
datacol = data
elif (rank > 0):
datarr = np.concatenate((datarr,datacol),axis=0)
datacol = data
else:
datacol = data
else:
datacol = np.concatenate((datacol,data),axis=1)
if (dim == 2):
datarr = datarr.reshape(int(header[0]),int(header[1]))
return datarr
示例15: display
def display(self, xaxis, alpha, new=True):
"""
E.display(xaxis, alpha = .8)
:Arguments: xaxis, alpha
Plots the CI region on the current figure, with respect to
xaxis, at opacity alpha.
:Note: The fill color of the envelope will be self.mass
on the grayscale.
"""
if new:
figure()
if self.ndim == 1:
if self.mass>0.:
x = concatenate((xaxis,xaxis[::-1]))
y = concatenate((self.lo, self.hi[::-1]))
fill(x,y,facecolor='%f' % self.mass,alpha=alpha, label = ('centered CI ' + str(self.mass)))
else:
pyplot(xaxis,self.value,'k-',alpha=alpha, label = ('median'))
else:
if self.mass>0.:
subplot(1,2,1)
contourf(xaxis[0],xaxis[1],self.lo,cmap=cm.bone)
colorbar()
subplot(1,2,2)
contourf(xaxis[0],xaxis[1],self.hi,cmap=cm.bone)
colorbar()
else:
contourf(xaxis[0],xaxis[1],self.value,cmap=cm.bone)
colorbar()