本文整理匯總了Python中numpy.exp方法的典型用法代碼示例。如果您正苦於以下問題:Python numpy.exp方法的具體用法?Python numpy.exp怎麽用?Python numpy.exp使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類numpy
的用法示例。
在下文中一共展示了numpy.exp方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: mtx_freq2visi
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import exp [as 別名]
def mtx_freq2visi(M, p_mic_x, p_mic_y):
"""
build the matrix that maps the Fourier series to the visibility
:param M: the Fourier series expansion is limited from -M to M
:param p_mic_x: a vector that constains microphones x coordinates
:param p_mic_y: a vector that constains microphones y coordinates
:return:
"""
num_mic = p_mic_x.size
ms = np.reshape(np.arange(-M, M + 1, step=1), (1, -1), order='F')
G = np.zeros((num_mic * (num_mic - 1), 2 * M + 1), dtype=complex, order='C')
count_G = 0
for q in range(num_mic):
p_x_outer = p_mic_x[q]
p_y_outer = p_mic_y[q]
for qp in range(num_mic):
if not q == qp:
p_x_qqp = p_x_outer - p_mic_x[qp]
p_y_qqp = p_y_outer - p_mic_y[qp]
norm_p_qqp = np.sqrt(p_x_qqp ** 2 + p_y_qqp ** 2)
phi_qqp = np.arctan2(p_y_qqp, p_x_qqp)
G[count_G, :] = (-1j) ** ms * sp.special.jv(ms, norm_p_qqp) * \
np.exp(1j * ms * phi_qqp)
count_G += 1
return G
示例2: compute_final_scores
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import exp [as 別名]
def compute_final_scores(self, average_loss, nums):
average_loss["total_macro"] /= nums["total_macro"]
average_loss["total_micro"] /= nums["total_micro"]
if nums["negative_micro"]:
average_loss["negative_macro"] /= nums["negative_macro"]
average_loss["negative_micro"] /= nums["negative_micro"]
else:
average_loss["negative_macro"] = 0
average_loss["negative_micro"] = 0
average_loss["macro_diff"] = (average_loss["negative_macro"] -
average_loss["total_macro"])
average_loss["micro_diff"] = (average_loss["negative_micro"] -
average_loss["total_micro"])
average_loss["ppl_macro"] = np.exp(average_loss["total_macro"])
average_loss["ppl_micro"] = np.exp(average_loss["total_micro"])
return average_loss
示例3: gen_visibility
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import exp [as 別名]
def gen_visibility(alphak, phi_k, pos_mic_x, pos_mic_y):
"""
generate visibility from the Dirac parameter and microphone array layout
:param alphak: Diracs' amplitudes
:param phi_k: azimuths
:param pos_mic_x: a vector that contains microphones' x coordinates
:param pos_mic_y: a vector that contains microphones' y coordinates
:return:
"""
xk, yk = polar2cart(1, phi_k)
num_mic = pos_mic_x.size
visi = np.zeros((num_mic, num_mic), dtype=complex)
for q in xrange(num_mic):
p_x_outer = pos_mic_x[q]
p_y_outer = pos_mic_y[q]
for qp in xrange(num_mic):
p_x_qqp = p_x_outer - pos_mic_x[qp] # a scalar
p_y_qqp = p_y_outer - pos_mic_y[qp] # a scalar
visi[qp, q] = np.dot(np.exp(-1j * (xk * p_x_qqp + yk * p_y_qqp)), alphak)
return visi
示例4: compute_mode
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import exp [as 別名]
def compute_mode(self):
"""
Pre-compute mode vectors from candidate locations (in spherical
coordinates).
"""
if self.num_loc is None:
raise ValueError('Lookup table appears to be empty. \
Run build_lookup().')
self.mode_vec = np.zeros((self.max_bin,self.M,self.num_loc),
dtype='complex64')
if (self.nfft % 2 == 1):
raise ValueError('Signal length must be even.')
f = 1.0 / self.nfft * np.linspace(0, self.nfft / 2, self.max_bin) \
* 1j * 2 * np.pi
for i in range(self.num_loc):
p_s = self.loc[:, i]
for m in range(self.M):
p_m = self.L[:, m]
if (self.mode == 'near'):
dist = np.linalg.norm(p_m - p_s, axis=1)
if (self.mode == 'far'):
dist = np.dot(p_s, p_m)
# tau = np.round(self.fs*dist/self.c) # discrete - jagged
tau = self.fs * dist / self.c # "continuous" - smoother
self.mode_vec[:, m, i] = np.exp(f * tau)
示例5: mtx_updated_G
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import exp [as 別名]
def mtx_updated_G(phi_recon, M, mtx_amp2visi_ri, mtx_fri2visi_ri):
"""
Update the linear transformation matrix that links the FRI sequence to the
visibilities by using the reconstructed Dirac locations.
:param phi_recon: the reconstructed Dirac locations (azimuths)
:param M: the Fourier series expansion is between -M to M
:param p_mic_x: a vector that contains microphones' x-coordinates
:param p_mic_y: a vector that contains microphones' y-coordinates
:param mtx_freq2visi: the linear mapping from Fourier series to visibilities
:return:
"""
L = 2 * M + 1
ms_half = np.reshape(np.arange(-M, 1, step=1), (-1, 1), order='F')
phi_recon = np.reshape(phi_recon, (1, -1), order='F')
mtx_amp2freq = np.exp(-1j * ms_half * phi_recon) # size: (M + 1) x K
mtx_amp2freq_ri = np.vstack((mtx_amp2freq.real, mtx_amp2freq.imag[:-1, :])) # size: (2M + 1) x K
mtx_fri2amp_ri = linalg.lstsq(mtx_amp2freq_ri, np.eye(L))[0]
# projection mtx_freq2visi to the null space of mtx_fri2amp
mtx_null_proj = np.eye(L) - np.dot(mtx_fri2amp_ri.T,
linalg.lstsq(mtx_fri2amp_ri.T, np.eye(L))[0])
G_updated = np.dot(mtx_amp2visi_ri, mtx_fri2amp_ri) + \
np.dot(mtx_fri2visi_ri, mtx_null_proj)
return G_updated
示例6: convert_image
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import exp [as 別名]
def convert_image(self, filename):
pic = img.imread(filename)
# Set FFT size to be double the image size so that the edge of the spectrum stays clear
# preventing some bandfilter artifacts
self.NFFT = 2*pic.shape[1]
# Repeat image lines until each one comes often enough to reach the desired line time
ffts = (np.flipud(np.repeat(pic[:, :, 0], self.repetitions, axis=0) / 16.)**2.) / 256.
# Embed image in center bins of the FFT
fftall = np.zeros((ffts.shape[0], self.NFFT))
startbin = int(self.NFFT/4)
fftall[:, startbin:(startbin+pic.shape[1])] = ffts
# Generate random phase vectors for the FFT bins, this is important to prevent high peaks in the output
# The phases won't be visible in the spectrum
phases = 2*np.pi*np.random.rand(*fftall.shape)
rffts = fftall * np.exp(1j*phases)
# Perform the FFT per image line, then concatenate them to form the final signal
timedata = np.fft.ifft(np.fft.ifftshift(rffts, axes=1), axis=1) / np.sqrt(float(self.NFFT))
linear = timedata.flatten()
linear = linear / np.max(np.abs(linear))
return linear
示例7: predict_on_batch
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import exp [as 別名]
def predict_on_batch(self, x):
# run feature collection pipeline for the batch
soi = x.astype(str) # make sure the type is right
for i in range(len(soi)):
if len(soi[i]) < 94:
soi[i] = elongate_intron(soi[i])
parameters_batch = self._construct_features_array(soi)
don_cleavage_time = self.don_model.predict(parameters_batch)
acc_cleavage_time = self.acc_model.predict(parameters_batch)
cleavage_time = {'acc_cleavage_time': np.exp(acc_cleavage_time), 'don_cleavage_time': np.exp(don_cleavage_time)}
return cleavage_time
示例8: predict_on_batch
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import exp [as 別名]
def predict_on_batch(self, x):
# run feature collection pipeline for the batch
soi = x["soi"].astype(str) # make sure the type is right
self.bp_indexes = x["bp_index"]
for i in range(len(soi)):
if len(soi[i]) < 94:
soi[i] = elongate_intron(soi[i])
parameters_batch = self._construct_features_array(soi)
don_cleavage_time = self.don_model.predict(parameters_batch)
acc_cleavage_time = self.acc_model.predict(parameters_batch)
cleavage_time = {'acc_cleavage_time': np.exp(acc_cleavage_time), 'don_cleavage_time': np.exp(don_cleavage_time)}
return cleavage_time
示例9: apply_cmap
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import exp [as 別名]
def apply_cmap(zs, cmap, vmin=None, vmax=None, unit=None, logrescale=False):
'''
apply_cmap(z, cmap) applies the given cmap to the values in z; if vmin and/or vmax are passed,
they are used to scale z.
Note that this function can automatically rescale data into log-space if the colormap is a
neuropythy log-space colormap such as log_eccentricity. To enable this behaviour use the
optional argument logrescale=True.
'''
zs = pimms.mag(zs) if unit is None else pimms.mag(zs, unit)
zs = np.asarray(zs, dtype='float')
if pimms.is_str(cmap): cmap = matplotlib.cm.get_cmap(cmap)
if logrescale:
if vmin is None: vmin = np.log(np.nanmin(zs))
if vmax is None: vmax = np.log(np.nanmax(zs))
mn = np.exp(vmin)
u = zdivide(nanlog(zs + mn) - vmin, vmax - vmin, null=np.nan)
else:
if vmin is None: vmin = np.nanmin(zs)
if vmax is None: vmax = np.nanmax(zs)
u = zdivide(zs - vmin, vmax - vmin, null=np.nan)
u[np.isnan(u)] = -np.inf
return cmap(u)
示例10: angle_to_cortex
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import exp [as 別名]
def angle_to_cortex(self, theta, rho):
'See help(neuropythy.registration.RetinotopyModel.angle_to_cortex).'
#TODO: This should be made to work correctly with visual area boundaries: this could be done
# by, for each area (e.g., V2) looking at its boundaries (with V1 and V3) and flipping the
# adjacent triangles so that there is complete coverage of each hemifield, guaranteed.
if not pimms.is_vector(theta): return self.angle_to_cortex([theta], [rho])[0]
theta = np.asarray(theta)
rho = np.asarray(rho)
zs = np.asarray(
rho * np.exp([np.complex(z) for z in 1j * ((90.0 - theta)/180.0*np.pi)]),
dtype=np.complex)
coords = np.asarray([zs.real, zs.imag]).T
if coords.shape[0] == 0: return np.zeros((0, len(self.visual_meshes), 2))
# we step through each area in the forward model and return the appropriate values
tx = self.transform
res = np.transpose(
[self.visual_meshes[area].interpolate(coords, 'cortical_coordinates', method='linear')
for area in sorted(self.visual_meshes.keys())],
(1,0,2))
if tx is not None:
res = np.asarray(
[np.dot(tx, np.vstack((area_xy.T, np.ones(len(area_xy)))))[0:2].T
for area_xy in res])
return res
示例11: __call__
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import exp [as 別名]
def __call__(self, x, y=None):
if y is not None: x = (x,y)
x = np.asarray(x)
if len(x.shape) == 1: return self([x])[0]
x = np.transpose(x) if x.shape[0] == 2 else x
if not x.flags['WRITEABLE']: x = np.array(x)
crd = self.coordinates
sig = self.sigma
wts = self._weight
res = np.zeros(x.shape[0])
for (sh, qd, bi) in zip(self.spatial_hashes, self.bin_query_distances, self.sigma_bins):
neis = sh.query_ball_point(x, qd)
res += [
np.sum(w * np.exp(-0.5 * d2/s**2))
for (ni,pt) in zip(neis,x)
for ii in [bi[ni]]
for (w,s,d2) in [(wts[ii], sig[ii], np.sum((crd[ii] - pt)**2, axis=1))]]
return res
示例12: kernel_matrix
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import exp [as 別名]
def kernel_matrix(svm_model, original_X):
if (svm_model.svm_kernel == 'polynomial_kernel' or svm_model.svm_kernel == 'soft_polynomial_kernel'):
K = (svm_model.zeta + svm_model.gamma * np.dot(original_X, original_X.T)) ** svm_model.Q
elif (svm_model.svm_kernel == 'gaussian_kernel' or svm_model.svm_kernel == 'soft_gaussian_kernel'):
pairwise_dists = squareform(pdist(original_X, 'euclidean'))
K = np.exp(-svm_model.gamma * (pairwise_dists ** 2))
'''
K = np.zeros((svm_model.data_num, svm_model.data_num))
for i in range(svm_model.data_num):
for j in range(svm_model.data_num):
if (svm_model.svm_kernel == 'polynomial_kernel' or svm_model.svm_kernel == 'soft_polynomial_kernel'):
K[i, j] = Kernel.polynomial_kernel(svm_model, original_X[i], original_X[j])
elif (svm_model.svm_kernel == 'gaussian_kernel' or svm_model.svm_kernel == 'soft_gaussian_kernel'):
K[i, j] = Kernel.gaussian_kernel(svm_model, original_X[i], original_X[j])
'''
return K
示例13: kernel_matrix_xX
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import exp [as 別名]
def kernel_matrix_xX(svm_model, original_x, original_X):
if (svm_model.svm_kernel == 'polynomial_kernel' or svm_model.svm_kernel == 'soft_polynomial_kernel'):
K = (svm_model.zeta + svm_model.gamma * np.dot(original_x, original_X.T)) ** svm_model.Q
elif (svm_model.svm_kernel == 'gaussian_kernel' or svm_model.svm_kernel == 'soft_gaussian_kernel'):
K = np.exp(-svm_model.gamma * (cdist(original_X, np.atleast_2d(original_x), 'euclidean').T ** 2)).ravel()
'''
K = np.zeros((svm_model.data_num, svm_model.data_num))
for i in range(svm_model.data_num):
for j in range(svm_model.data_num):
if (svm_model.svm_kernel == 'polynomial_kernel' or svm_model.svm_kernel == 'soft_polynomial_kernel'):
K[i, j] = Kernel.polynomial_kernel(svm_model, original_x, original_X[j])
elif (svm_model.svm_kernel == 'gaussian_kernel' or svm_model.svm_kernel == 'soft_gaussian_kernel'):
K[i, j] = Kernel.gaussian_kernel(svm_model, original_x, original_X[j])
'''
return K
示例14: pred_test
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import exp [as 別名]
def pred_test(testing_data, exe, param_list=None, save_path=""):
ret = numpy.zeros((testing_data.shape[0], 2))
if param_list is None:
for i in range(testing_data.shape[0]):
exe.arg_dict['data'][:] = testing_data[i, 0]
exe.forward(is_train=False)
ret[i, 0] = exe.outputs[0].asnumpy()
ret[i, 1] = numpy.exp(exe.outputs[1].asnumpy())
numpy.savetxt(save_path, ret)
else:
for i in range(testing_data.shape[0]):
pred = numpy.zeros((len(param_list),))
for j in range(len(param_list)):
exe.copy_params_from(param_list[j])
exe.arg_dict['data'][:] = testing_data[i, 0]
exe.forward(is_train=False)
pred[j] = exe.outputs[0].asnumpy()
ret[i, 0] = pred.mean()
ret[i, 1] = pred.std()**2
numpy.savetxt(save_path, ret)
mse = numpy.square(ret[:, 0] - testing_data[:, 0] **3).mean()
return mse, ret
示例15: synthetic_grad
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import exp [as 別名]
def synthetic_grad(X, theta, sigma1, sigma2, sigmax, rescale_grad=1.0, grad=None):
if grad is None:
grad = nd.empty(theta.shape, theta.context)
theta1 = theta.asnumpy()[0]
theta2 = theta.asnumpy()[1]
v1 = sigma1 ** 2
v2 = sigma2 ** 2
vx = sigmax ** 2
denominator = numpy.exp(-(X - theta1) ** 2 / (2 * vx)) + numpy.exp(
-(X - theta1 - theta2) ** 2 / (2 * vx))
grad_npy = numpy.zeros(theta.shape)
grad_npy[0] = -rescale_grad * ((numpy.exp(-(X - theta1) ** 2 / (2 * vx)) * (X - theta1) / vx
+ numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (
X - theta1 - theta2) / vx) / denominator).sum() \
+ theta1 / v1
grad_npy[1] = -rescale_grad * ((numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (
X - theta1 - theta2) / vx) / denominator).sum() \
+ theta2 / v2
grad[:] = grad_npy
return grad