本文整理汇总了Python中scipy.argmin函数的典型用法代码示例。如果您正苦于以下问题:Python argmin函数的具体用法?Python argmin怎么用?Python argmin使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了argmin函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: calc_probability_matrix
def calc_probability_matrix(trains_a, trains_b, metric, tau, z):
""" Calculates the probability matrix that one spike train from stimulus X
will be classified as spike train from stimulus Y.
:param list trains_a: Spike trains of stimulus A.
:param list trains_b: Spike trains of stimulus B.
:param str metric: Metric to base the classification on. Has to be a key in
:const:`metrics.metrics`.
:param tau: Time scale parameter for the metric.
:type tau: Quantity scalar.
:param float z: Exponent parameter for the classifier.
"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "divide by zero")
dist_mat = calc_single_metric(trains_a + trains_b, metric, tau) ** z
dist_mat[sp.diag_indices_from(dist_mat)] = 0.0
assert len(trains_a) == len(trains_b)
l = len(trains_a)
classification_of_a = sp.argmin(sp.vstack((
sp.sum(dist_mat[:l, :l], axis=0) / (l - 1),
sp.sum(dist_mat[l:, :l], axis=0) / l)) ** (1.0 / z), axis=0)
classification_of_b = sp.argmin(sp.vstack((
sp.sum(dist_mat[:l, l:], axis=0) / l,
sp.sum(dist_mat[l:, l:], axis=0) / (l - 1))) ** (1.0 / z), axis=0)
confusion = sp.empty((2, 2))
confusion[0, 0] = sp.sum(classification_of_a == 0)
confusion[1, 0] = sp.sum(classification_of_a == 1)
confusion[0, 1] = sp.sum(classification_of_b == 0)
confusion[1, 1] = sp.sum(classification_of_b == 1)
return confusion / 2.0 / l
示例2: cosine_coefficient
def cosine_coefficient(self, target_FV):
#print target_FV
#print "temp = (self.nodes * target_FV)"
temp = (self.nodes * target_FV)
#print temp
#print "temp = temp.sum(axis=2)"
temp = temp.sum(axis=2)
#print temp
#print "temp_2 = (self.nodes**2)"
temp_2 = (self.nodes**2)
#print temp_2
#print "temp_2 = temp_2.sum(axis=2)"
temp_2 = temp_2.sum(axis=2)
#print temp_2
#print "temp_3 = (target_FV**2)"
temp_3 = (target_FV**2)
#print temp_3
#print "temp_3 = temp_3.sum()"
temp_3 = temp_3.sum()
#print temp_3
#print "temp_3 = temp_3**0.5"
temp_3 = temp_3**0.5
#print temp_3
#print "temp_4 = temp_2*temp_3"
temp_4 = temp_2*temp_3
#print temp_4
#print "temp_f = temp / temp_4"
temp_f = temp / temp_4
#print temp_f
return scipy.argmin(temp_f)
#(a*b).sum(axis=2) / (((a**2).sum(axis=2) * (b**2).sum())**0.5)
return scipy.argmin((self.nodes * target_FV).sum(axis=2) / ((self.nodes**2).sum(axis=2) * (target_FV**2).sum()**0.5))
示例3: kmeans
def kmeans(data,K, clusterType = "MeansPointRepresentative", distancePointToPoint = L2NormDistance, distancePointToSet = AveragePointSetDistance): # could also input tolerance
clusters = []
if clusterType == 'MeansPointRepresentative':
for k in range(K):
clusters.append(MeansCluster(distancePointToPoint,data)) #initializes the clusters as MeansPointRepresentative
elif clusterType == 'SetRepresentative':
for k in range(K):
clusters.append(SetCluster(distancePointToPoint,distancePointToSet,data)) #initializes the cluster as set representative
else:
print "Unknown type of cluster"
return None
hasConverged = False
iterations = 0
while not hasConverged: #continues to run until clusters converge
conv = []
for d in data:
distanceFromCluster = scipy.array([c.distanceToPointOrSet(d) for c in clusters])
indexCluster = scipy.argmin(distanceFromCluster) # i have a weird error here when I use setCluster
clusters[indexCluster].assign(d)
for c in clusters:
c.update()
conv.append(c.reachedTolerance()) #tests to see if epsilon is below tolerance
iterations = iterations + 1
hasConverged = all(conv)
print "The number of iterations is: ", iterations
clusterID =[]
for d in data: #creates cluster ID's
distanceFromCluster = scipy.array([c.distanceToPointOrSet(d) for c in clusters])
indexCluster = scipy.argmin(distanceFromCluster)
clusterID.append(indexCluster)
return [clusterID, clusters]
示例4: __convolveSphinx
def __convolveSphinx(self,star):
'''
Convolve the Sphinx output with the SPIRE resolution. The convolution
is done in wave number (cm^-1).
@param star: The Star() object for which Sphinx profiles are loaded
@type star: Star()
'''
#- Get sphinx model output and merge, for all star models in star_grid
if not self.resolution:
print '* Resolution is undefined. Cannot convolve Sphinx.'
return
print '* Reading Sphinx model and merging.'
sphinx_wav,sphinx_flux = star['LAST_GASTRONOOM_MODEL'] \
and self.mergeSphinx(star) \
or [[],[]]
if not sphinx_wav:
print '* No Sphinx data found.'
return
sphinx_wav = 1./array(sphinx_wav)*10**(4)
sphinx_flux = array(sphinx_flux)
sphinx_wav = sphinx_wav[::-1]
sphinx_flux = sphinx_flux[::-1]
#-- eliminate some of the zeroes in the grid to reduce calculation time
# (can reduce the array by a factor up to 100!!)
s = self.sigma
lcs = array(sorted([1./line.wavelength
for line in star['GAS_LINES']]))
new_wav, new_flux = [sphinx_wav[0]],[sphinx_flux[0]]
for w,f in zip(sphinx_wav[1:],sphinx_flux[1:]):
if f != 0 or (w < 5*s+lcs[argmin(abs(lcs-w))] \
and w > lcs[argmin(abs(lcs-w))]-5*s):
new_wav.append(w)
new_flux.append(f)
new_wav, new_flux = array(new_wav), array(new_flux)
#-- convolve the model fluxes with a gaussian and constant sigma(spire)
print '* Convolving Sphinx model for SPIRE.'
convolution = Data.convolveArray(new_wav,new_flux,s)
for data_wav,fn in zip(self.data_wave_list,self.data_filenames):
rebinned = []
#-- Convert wavelengths to wave number for integration, and reverse
data_cm = data_wav[::-1]
data_cm = 1./data_cm*10**4
rebinned = [trapz(y=convolution[abs(new_wav-wavi)<=self.resolution/self.oversampling],\
x=new_wav[abs(new_wav-wavi)<=self.resolution/self.oversampling])\
/(self.resolution/self.oversampling)
for wavi in data_cm]
#-- Reverse the rebinned fluxes so they match up with the
# wavelength grid.
rebinned = array(rebinned)[::-1]
self.sphinx_convolution[star['LAST_SPIRE_MODEL']][fn] = rebinned
示例5: getModel
def getModel(self,teff,logg):
"""
Return the model atmosphere for given effective temperature and log g.
Not yet scaled to the distance!
Units returned are (micron,Jy)
@param teff: the stellar effective temperature
@type teff: float
@param logg: the log g value
@type logg: float
@return: The model spectrum in (micron,Jy)
@rtype: recarray
"""
c = 2.99792458e18 #in angstrom/s
if self.modelgrid is None:
self.readModelGrid()
mg = self.modelgrid
#- Find the closest temperature in the grid
teff_prox = mg['TEFF'][argmin(abs(mg['TEFF']-teff))]
#- Select all models with that temperature
mgsel = mg[mg['TEFF']==teff_prox]
#- Select the closest log g in the selection
logg_prox = mgsel['LOGG'][argmin(abs(mgsel['LOGG']-logg))]
#- Get the index of the model closest to teff and logg
imodel = mgsel[mgsel['LOGG']==logg_prox]['INDEX'][0]
self.teff_actual = teff_prox
self.logg_actual = logg_prox
wave = self.ff[imodel].data.field('wavelength')
flux = self.ff[imodel].data.field('flux')
if self.header['FLXUNIT'] == 'erg/s/cm2/A':
#- Go to erg/s/cm2/Hz, lFl = nFn, then to Jy (factor 10**(23))
flux = flux * wave**2 / c * 10**(23)
else:
raise Error('Flux unit unknown in atmosphere model fits file.')
if self.header['WAVUNIT'] == 'angstrom':
wave = wave * 10**(-4)
else:
raise Error('Wavelength unit unknown in atmosphere model fits file.')
model = rec.fromarrays([wave,flux],names=['wave','flux'])
return model
示例6: decode
def decode(file_name):
border.rotate(file_name)
image = Image.open("temp.png")
q = border.find("temp.png")
ind = sp.argmin(sp.sum(q, 1), 0)
up_left = q[ind, 0] + 2
up_top = q[ind, 1] + 2
d_right = q[ind+1, 0] - 3
d_bottom = q[ind-1, 1] - 3
box = (up_left, up_top, d_right, d_bottom)
region = image.crop(box)
h_sum = sp.sum(region, 0)
m = argrelmax(sp.correlate(h_sum, h_sum, 'same'))
s = sp.average(sp.diff(m))
m = int(round(d_right - up_left)/s)
if m % 3 != 0:
m += 3 - m % 3
n = int(round(d_bottom - up_top)/s)
if n % 4 != 0:
n += 4 - n % 4
s = int(round(s))+1
region = region.resize((s*m, s*n), PIL.Image.ANTIALIAS)
region.save("0.png")
pix = region.load()
matrix = mix.off(rec.matrix(pix, s, m, n))
str2 = hamming.decode(array_to_str(matrix))
return hamming.bin_to_str(str2)
示例7: getclosest
def getclosest(self,coords,timelist=None):
"""This method will get the closest set of parameters in the coordinate space. It will return
the parameters from all times.
Input
coords - A list of x,y and z coordinates.
Output
paramout - A NtxNp array from the closes output params
sphereout - A Nc length array The sphereical coordinates of the closest point.
cartout - Cartisian coordinates of the closes point.
"""
X_vec = self.Cart_Coords[:,0]
Y_vec = self.Cart_Coords[:,1]
Z_vec = self.Cart_Coords[:,2]
xdiff = X_vec -coords[0]
ydiff = Y_vec -coords[1]
zdiff = Z_vec -coords[2]
distall = xdiff**2+ydiff**2+zdiff**2
minidx = np.argmin(distall)
paramout = self.Param_List[minidx]
velout = self.Velocity[minidx]
datatime = self.Time_Vector
if sp.ndim(self.Time_Vector)>1:
datatime = datatime[:,0]
if timelist is not None:
timeindx = []
for itime in timelist:
timeindx.append(sp.argmin(sp.absolute(itime-datatime)))
paramout=paramout[timeindx]
velout=velout[timeindx]
sphereout = self.Sphere_Coords[minidx]
cartout = self.Cart_Coords[minidx]
return (paramout,velout,sphereout,cartout,np.sqrt(distall[minidx]))
示例8: _init_params
def _init_params(self, X):
init = self.init
n_samples, n_features = X.shape
n_components = self.n_components
if (init == 'kmeans'):
km = Kmeans(n_components)
clusters, mean, cov = km.cluster(X)
coef = sp.array([c.shape[0] / n_samples for c in clusters])
comps = [multivariate_normal(mean[i], cov[i], allow_singular=True)
for i in range(n_components)]
elif (init == 'rand'):
coef = sp.absolute(sprand.randn(n_components))
coef = coef / coef.sum()
means = X[sprand.permutation(n_samples)[0: n_components]]
clusters = [[] for i in range(n_components)]
for x in X:
idx = sp.argmin([spla.norm(x - mean) for mean in means])
clusters[idx].append(x)
comps = []
for k in range(n_components):
mean = means[k]
cov = sp.cov(clusters[k], rowvar=0, ddof=0)
comps.append(multivariate_normal(mean, cov, allow_singular=True))
self.coef = coef
self.comps = comps
示例9: fit
def fit(self, X):
n_samples, n_features = X.shape
n_classes = self.n_classes
max_iter = self.max_iter
tol = self.tol
rand_center_idx = sprand.permutation(n_samples)[0:n_classes]
center = X[rand_center_idx].T
responsilibity = sp.zeros((n_samples, n_classes))
for iter in range(max_iter):
# E step
dist = sp.expand_dims(X, axis=2) - sp.expand_dims(center, axis=0)
dist = spla.norm(dist, axis=1)**2
min_idx = sp.argmin(dist, axis=1)
responsilibity.fill(0)
responsilibity[sp.arange(n_samples), min_idx] = 1
# M step
center_new = sp.dot(X.T, responsilibity) / sp.sum(responsilibity, axis=0)
diff = center_new - center
print('K-Means: {0:5d} {1:4e}'.format(iter, spla.norm(diff) / spla.norm(center)))
if (spla.norm(diff) < tol * spla.norm(center)):
break
center = center_new
self.center = center.T
self.responsibility = responsilibity
return self
示例10: brute_force_2ref
def brute_force_2ref(ref1,ref2,data,res):
[a,b,c] = data.shape[0],data.shape[1],data.shape[2]
print a,b,c
matrix_ref1 = np.copy(data)
matrix_ref2 = np.copy(data)
for i in range(c):
matrix_ref1[:, :, i] = ref1[i]
matrix_ref2[:, :, i] = ref2[i]
total = 100/res + 1
total = int(total)
factor = (np.linspace(0,1,total))
fRGB = np.zeros((3,total), dtype=np.float16)
fRGB[0,:] = factor
fRGB[1,:] = 1-factor
sum_sqdata = np.sum(np.square(data),axis=2)
R_ref = np.empty((a,b,total),dtype=np.float16)
for i in range(total):
print i
matrix_ref_com = fRGB[0,i]*matrix_ref1 + fRGB[1,i]*matrix_ref2
sqr = np.square(data - matrix_ref_com)
R_ref[:, :, i] = np.sum(sqr, axis=2) / sum_sqdata
min_R = np.amin(R_ref, axis=2)
index = scipy.argmin(R_ref, axis=2)
return min_R, index, fRGB
示例11: __init__
def __init__(self, func, pop0, args=(), crossover_rate=0.5, scale=None, strategy=("rand", 2, "bin"), eps=1e-6):
self.func = func
self.population = sp.array(pop0)
# added by Minh-Tri Pham
for n in xrange(len(self.population)):
self.refine(self.population[n])
self.npop, self.ndim = self.population.shape
self.args = args
self.crossover_rate = crossover_rate
self.strategy = strategy
self.eps = eps
self.pop_values = [self.func(m, *args) for m in self.population]
bestidx = sp.argmin(self.pop_values)
self.best_vector = self.population[bestidx]
self.best_value = self.pop_values[bestidx]
if scale is None:
self.scale = self.calculate_scale()
else:
self.scale = scale
self.generations = 0
self.best_val_history = []
self.best_vec_history = []
self.jump_table = {
("rand", 1, "bin"): (self.choose_rand, self.diff1, self.bin_crossover),
("rand", 2, "bin"): (self.choose_rand, self.diff2, self.bin_crossover),
("best", 1, "bin"): (self.choose_best, self.diff1, self.bin_crossover),
("best", 2, "bin"): (self.choose_best, self.diff2, self.bin_crossover),
("rand-to-best", 1, "bin"): (self.choose_rand_to_best, self.diff1, self.bin_crossover),
}
示例12: fit2D_2ref
def fit2D_2ref(ref1,ref2,data,res):
[a,b,c] = data.shape[0],data.shape[1],data.shape[2]
print a,b,c
matrix_ref1 = np.copy(data)
matrix_ref2 = np.copy(data)
for i in range(c):
matrix_ref1[:, :, i] = ref1[i]
matrix_ref2[:, :, i] = ref2[i]
total = 100/res + 1
total = int(total)
factor = (np.linspace(0,1,total))
fRGB = np.zeros((3,total), dtype=np.float16)
fRGB[0,:] = factor
fRGB[1,:] = 1-factor
sum_sqdata = np.sum(np.square(data),axis=2)
R_ref = np.empty((a,b,total),dtype=np.float16)
for i in range(total):
print i
combination = i
matrix_ref_com = fRGB[0,i]*matrix_ref1 + fRGB[1,i]*matrix_ref2
sqr = np.square(data - matrix_ref_com)
R_ref[:, :, i] = np.sum(sqr, axis=2) / sum_sqdata
min_R = np.amin(R_ref, axis=2)
index = scipy.argmin(R_ref, axis=2)
save_dir = 'D:/Research/BNL_2014_Summer_Intern/xanes_PyQT'
f = open(save_dir+'/index.txt','w')
for i in range(a):
f.write("%14.5f\n"%( index[i,500]))
f.close()
return min_R, index, fRGB
示例13: predict_gmm
def predict_gmm(self, testSamples, tau=0):
"""
Function that predict the label for testSamples using the learned model
Inputs:
testSamples: the samples to be classified
tau: regularization parameter
Outputs:
predLabels: the class
scores: the decision value for each class
"""
# Get information from the data
nbTestSpl = testSamples.shape[0] # Number of testing samples
# Initialization
scores = sp.empty((nbTestSpl,self.C))
# Start the prediction for each class
for c in xrange(self.C):
testSamples_c = testSamples - self.mean[c,:]
regvp = self.vp[c,:] + tau
logdet = sp.sum(sp.log(regvp))
cst = logdet - 2*sp.log(self.prop[c]) # Pre compute the constant term
# compute ||lambda^{-0.5}q^T(x-mu)||^2 + cst for all samples
scores[:,c] = sp.sum( sp.square( sp.dot( (self.Q[c,:,:][:,:]/sp.sqrt(regvp)).T, testSamples_c.T ) ), axis=0 ) + cst
del testSamples_c
# Assign the label to the minimum value of scores
predLabels = sp.argmin(scores,1)+1
return predLabels,scores
示例14: _set_reach_dist
def _set_reach_dist(setofobjects, point_index, epsilon):
# Assumes that the query returns ordered (smallest distance first)
# entries. This is the case for the balltree query...
dists, indices = setofobjects.query(setofobjects.data[point_index],
setofobjects._nneighbors[point_index])
# Checks to see if there more than one member in the neighborhood ##
if sp.iterable(dists):
# Masking processed values ##
# n_pr is 'not processed'
n_pr = indices[(setofobjects._processed[indices] < 1)[0].T]
rdists = sp.maximum(dists[(setofobjects._processed[indices] < 1)[0].T],
setofobjects.core_dists_[point_index])
new_reach = sp.minimum(setofobjects.reachability_[n_pr], rdists)
setofobjects.reachability_[n_pr] = new_reach
# Checks to see if everything is already processed;
# if so, return control to main loop ##
if n_pr.size > 0:
# Define return order based on reachability distance ###
return n_pr[sp.argmin(setofobjects.reachability_[n_pr])]
else:
return point_index
示例15: __init__
def __init__(self, func, pop0, args=(), crossover_rate=0.5, scale=None,
strategy=('rand', 2, 'bin'), eps=1e-6):
self.func = func
self.population = sp.array(pop0)
self.npop, self.ndim = self.population.shape
self.args = args
self.crossover_rate = crossover_rate
self.strategy = strategy
self.eps = eps
self.pop_values = [self.func(m, *args) for m in self.population]
bestidx = sp.argmin(self.pop_values)
self.best_vector = self.population[bestidx]
self.best_value = self.pop_values[bestidx]
if scale is None:
self.scale = self.calculate_scale()
else:
self.scale = scale
self.generations = 0
self.best_val_history = []
self.best_vec_history = []
self.jump_table = {
('rand', 1, 'bin'): (self.choose_rand, self.diff1, self.bin_crossover),
('rand', 2, 'bin'): (self.choose_rand, self.diff2, self.bin_crossover),
('best', 1, 'bin'): (self.choose_best, self.diff1, self.bin_crossover),
('best', 2, 'bin'): (self.choose_best, self.diff2, self.bin_crossover),
('rand-to-best', 1, 'bin'):
(self.choose_rand_to_best, self.diff1, self.bin_crossover),
}