本文整理汇总了Python中sklearn.mixture.GaussianMixture类的典型用法代码示例。如果您正苦于以下问题:Python GaussianMixture类的具体用法?Python GaussianMixture怎么用?Python GaussianMixture使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了GaussianMixture类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: fit
def fit(self, data, ngauss, n_iter=5000, min_covar=1.0e-6,
doplot=False, **keys):
"""
data is shape
[npoints, ndim]
"""
from sklearn.mixture import GaussianMixture
if len(data.shape) == 1:
data = data[:,numpy.newaxis]
print("ngauss: ",ngauss)
print("n_iter: ",n_iter)
print("min_covar:",min_covar)
gmm=GaussianMixture(
n_components=ngauss,
max_iter=n_iter,
reg_covar=min_covar,
covariance_type='full',
)
gmm.fit(data)
if not gmm.converged_:
print("DID NOT CONVERGE")
self._gmm=gmm
self.set_mixture(gmm.weights_, gmm.means_, gmm.covariances_)
if doplot:
plt=self.plot_components(data=data,**keys)
return plt
示例2: create_random_gmm
def create_random_gmm(n_mix, n_features, covariance_type, prng=0):
prng = check_random_state(prng)
g = GaussianMixture(n_mix, covariance_type=covariance_type)
g.means_ = prng.randint(-20, 20, (n_mix, n_features))
g.covars_ = make_covar_matrix(covariance_type, n_mix, n_features)
g.weights_ = normalized(prng.rand(n_mix))
return g
示例3: fit_mixtures
def fit_mixtures(X,mag,mbins,binwidth=0.2,seed=None,
keepscore=False,keepbic=False,**kwargs):
kwargs.setdefault('n_components',25)
kwargs.setdefault('covariance_type','full')
fits = []
if keepscore:
scores = []
if keepbic:
bics = []
if seed:
np.random.seed(seed)
for bincenter in mbins:
# this is not an efficient way to assign bins, but the time
# is negligible compared to the GMM fitting anyway
ii = np.where( np.abs(mag-bincenter) < binwidth )[0]
if False:
print('{:.2f}: {} qsos'.format(bincenter,len(ii)))
gmm = GaussianMixture(**kwargs)
gmm.fit(X[ii])
fits.append(gmm)
if keepscore:
scores.append(gmm.score(X[ii]))
if keepbic:
bics.append(gmm.bic(X[ii]))
rv = (fits,)
if keepscore:
rv += (scores,)
if keepbic:
rv += (bics,)
return rv
示例4: learn_subset
def learn_subset(self, search_space):
#Mask undesired features
current_array = self.vectors[:,search_space]
GM = GaussianMixture(n_components = 2,
covariance_type = "full",
tol = 0.001,
reg_covar = 1e-06,
max_iter = 1000,
n_init = 25,
init_params = "kmeans",
weights_init = None,
means_init = None,
precisions_init = None,
random_state = None,
warm_start = False,
verbose = 0,
verbose_interval = 10
)
GM.fit(current_array)
labels = GM.predict(current_array)
unique, counts = np.unique(labels, return_counts = True)
count_dict = dict(zip(unique, counts))
return count_dict, labels
示例5: GaussianMixture
def GaussianMixture(V, **kwargs):
"""Performs clustering on *V* by using Gaussian mixture models. The function uses :func:`sklearn.micture.GaussianMixture`. See sklearn documents
for details.
:arg V: row-normalized eigenvectors for the purpose of clustering.
:type V: :class:`numpy.ndarray`
:arg n_clusters: specifies the number of clusters.
:type n_clusters: int
"""
try:
from sklearn.mixture import GaussianMixture
except ImportError:
raise ImportError('Use of this function (GaussianMixture) requires the '
'installation of sklearn.')
n_components = kwargs.pop('n_components', None)
if n_components == None:
n_components = kwargs.pop('n_clusters',None)
if n_components == None:
n_components = 1
n_init = kwargs.pop('n_init', 1)
mixture = GaussianMixture(n_init=n_init, n_components=n_components, **kwargs).fit(V)
return mixture.fit_predict(V)
示例6: gmm
def gmm(nclusters, coords, n_init=50, n_iter=500):
if USE_GAUSSIAN_MIXTURE:
est = GaussianMixture(n_components=nclusters, n_init=n_init, max_iter=n_iter)
else:
est = GMM(n_components=nclusters, n_init=n_init, n_iter=n_iter)
est.fit(coords)
return Partition(est.predict(coords))
示例7: fit_gmm
def fit_gmm(samples, ncomponents=2):
"""Given a numpy array of floating point samples, fit a gaussian mixture model."""
# assume samples is of shape (NSAMPLES,); unsqueeze to (NSAMPLES,1) and train a GMM:
gmm = GaussianMixture(n_components=ncomponents)
gmm.fit(samples.reshape(-1,1))
# return params of GMM in [(coeff, mu, sigma)] format:
params = [(gmm.weights_[c], gmm.means_[c][0], gmm.covariances_[c][0][0]) for c in range(ncomponents)]
return params
示例8: main
def main():
X, Y = get_data(10000)
print("Number of data points:", len(Y))
model = GaussianMixture(n_components=10)
model.fit(X)
M = model.means_
R = model.predict_proba(X)
print("Purity:", purity(Y, R)) # max is 1, higher is better
print("DBI:", DBI(X, M, R)) # lower is better
示例9: fit_conditional_parameters
def fit_conditional_parameters(self, j):
class_wise_scores = self.get_class_wise_scores(j)
class_wise_parameters = dict()
for label in self._labels:
gmm = GaussianMixture(n_components=1)
gmm.fit(class_wise_scores[label].reshape(-1, 1))
class_wise_parameters[label] = \
self.Gaussian(mu=gmm.means_.flatten()[0],
std=np.sqrt(gmm.covariances_.flatten()[0]))
return class_wise_parameters
示例10: GaussianMixture1D
class GaussianMixture1D(object):
"""
Simple class to work with 1D mixtures of Gaussians
Parameters
----------
means : array_like
means of component distributions (default = 0)
sigmas : array_like
standard deviations of component distributions (default = 1)
weights : array_like
weight of component distributions (default = 1)
"""
def __init__(self, means=0, sigmas=1, weights=1):
data = np.array([t for t in np.broadcast(means, sigmas, weights)])
components = data.shape[0]
self._gmm = GaussianMixture(components, covariance_type='spherical')
self._gmm.means_ = data[:, :1]
self._gmm.weights_ = data[:, 2] / data[:, 2].sum()
self._gmm.covariances_ = data[:, 1] ** 2
self._gmm.precisions_cholesky_ = 1 / np.sqrt(self._gmm.covariances_)
self._gmm.fit = None # disable fit method for safety
def sample(self, size):
"""Random sample"""
return self._gmm.sample(size)
def pdf(self, x):
"""Compute probability distribution"""
if x.ndim == 1:
x = x[:, np.newaxis]
logprob = self._gmm.score_samples(x)
return np.exp(logprob)
def pdf_individual(self, x):
"""Compute probability distribution of each component"""
if x.ndim == 1:
x = x[:, np.newaxis]
logprob = self._gmm.score_samples(x)
responsibilities = self._gmm.predict_proba(x)
return responsibilities * np.exp(logprob[:, np.newaxis])
示例11: fit
def fit(self, X, Y=None):
if self.method == 'random':
N = len(X)
idx = np.random.randint(N, size=self.M)
self.samples = X[idx]
elif self.method == 'normal':
# just sample from N(0,1)
D = X.shape[1]
self.samples = np.random.randn(self.M, D) / np.sqrt(D)
elif self.method == 'kmeans':
X, Y = self._subsample_data(X, Y)
print("Fitting kmeans...")
t0 = datetime.now()
kmeans = KMeans(n_clusters=len(set(Y)))
kmeans.fit(X)
print("Finished fitting kmeans, duration:", datetime.now() - t0)
# calculate the most ambiguous points
# we will do this by finding the distance between each point
# and all cluster centers
# and return which points have the smallest variance
dists = kmeans.transform(X) # returns an N x K matrix
variances = dists.var(axis=1)
idx = np.argsort(variances) # smallest to largest
idx = idx[:self.M]
self.samples = X[idx]
elif self.method == 'gmm':
X, Y = self._subsample_data(X, Y)
print("Fitting GMM")
t0 = datetime.now()
gmm = GaussianMixture(
n_components=len(set(Y)),
covariance_type='spherical',
reg_covar=1e-6)
gmm.fit(X)
print("Finished fitting GMM, duration:", datetime.now() - t0)
# calculate the most ambiguous points
probs = gmm.predict_proba(X)
ent = stats.entropy(probs.T) # N-length vector of entropies
idx = np.argsort(-ent) # negate since we want biggest first
idx = idx[:self.M]
self.samples = X[idx]
return self
示例12: finish
def finish(self):
print("Calculating mean ToT for each PMT from gaussian fits...")
gmm = GaussianMixture()
xs, ys = [], []
for (dom_id, channel_id), tots in self.tot_data.iteritems():
dom = self.db.doms.via_dom_id(dom_id)
gmm.fit(np.array(tots)[:, np.newaxis]).means_[0][0]
mean_tot = gmm.means_[0][0]
xs.append(31 * (dom.floor - 1) + channel_id + 600 * (dom.du - 1))
ys.append(mean_tot)
fig, ax = plt.subplots()
ax.scatter(xs, ys, marker="+")
ax.set_xlabel("31$\cdot$(floor - 1) + channel_id + 600$\cdot$(DU - 1)")
ax.set_ylabel("ToT [ns]")
plt.title("Mean ToT per PMT")
plt.savefig(self.plotfilename)
示例13: Recognize
def Recognize(self, fn):
im = Image.open(fn)
im = util.CenterExtend(im, radius=20)
vec = np.asarray(im.convert('L')).copy()
Y = []
for i in range(vec.shape[0]):
for j in range(vec.shape[1]):
if vec[i][j] <= 200:
Y.append([i, j])
gmm = GaussianMixture(n_components=7, covariance_type='tied', reg_covar=1e2, tol=1e3, n_init=9)
gmm.fit(Y)
centers = gmm.means_
points = []
for i in range(7):
scoring = 0.0
for w_i in range(3):
for w_j in range(3):
p_x = centers[i][0] -1 +w_i
p_y = centers[i][1] -1 +w_j
cr = util.crop(im, p_x, p_y, radius=20)
cr = cr.resize((40, 40), Image.ANTIALIAS)
X = np.asarray(cr.convert('L'), dtype='float')
X = (X.astype("float") - 180) /200
x0 = np.expand_dims(X, axis=0)
x1 = np.expand_dims(x0, axis=3)
global model
if self.model.predict(x1)[0][0] < 0.5:
scoring += 1
if scoring > 4:
points.append((centers[i][0] -20, centers[i][1] -20))
return points
示例14: __init__
def __init__(self, means=0, sigmas=1, weights=1):
data = np.array([t for t in np.broadcast(means, sigmas, weights)])
components = data.shape[0]
self._gmm = GaussianMixture(components, covariance_type='spherical')
self._gmm.means_ = data[:, :1]
self._gmm.weights_ = data[:, 2] / data[:, 2].sum()
self._gmm.covariances_ = data[:, 1] ** 2
self._gmm.precisions_cholesky_ = 1 / np.sqrt(self._gmm.covariances_)
self._gmm.fit = None # disable fit method for safety
示例15: fit
def fit(self, X_train, y_train):
X_train = np.asarray(X_train)
y_train = np.asarray(y_train)
# from sklearn.mixture import GMM as GaussianMixture
from sklearn.mixture import GaussianMixture
unlabels = range(0, np.max(y_train) + 1)
for lab in unlabels:
if self.each_class_params is not None:
# print 'eacl'
# print self.each_class_params[lab]
model = GaussianMixture(**self.each_class_params[lab])
# print 'po gmm ', model
elif len(self.same_params) > 0:
model = GaussianMixture(**self.same_params)
# print 'ewe ', model
else:
model = GaussianMixture()
X_train_lab = X_train[y_train == lab]
# logger.debug('xtr lab shape ' + str(X_train_lab))
model.fit(X_train_lab)
self.models.insert(lab, model)