本文整理汇总了Python中sklearn.manifold.TSNE.fit方法的典型用法代码示例。如果您正苦于以下问题:Python TSNE.fit方法的具体用法?Python TSNE.fit怎么用?Python TSNE.fit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.manifold.TSNE
的用法示例。
在下文中一共展示了TSNE.fit方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: performDimensionalityReduction
# 需要导入模块: from sklearn.manifold import TSNE [as 别名]
# 或者: from sklearn.manifold.TSNE import fit [as 别名]
def performDimensionalityReduction(context_vector, n_component, perplexity):
'''
Applies TSNE on the feature vector of each of the word instances and creates
one model for each word type
'''
feature_vector_data = defaultdict(dict)
word_type_model = {}
for word_type, word_type_data in context_vector.iteritems():
feature_vector_word_type = OrderedDict()
#Reading in all the feature vectors for the given word type
for data_type, instance_details in word_type_data.iteritems():
for instance, context_details in instance_details.iteritems():
#Training data with have the sense id's while test data will have ['<UNKNOWN>']
senses = context_details.get('Sense')
for sense in senses:
feature_vector_word_type[(instance, sense, data_type)] = context_details["Feature_Vector"]
#Applying TSNE on all the feature vectors
feature_vector_array = np.array(feature_vector_word_type.values())
model = TSNE(n_components=n_component, random_state=0, perplexity=perplexity, metric="cosine")
model.fit(feature_vector_array)
#Storing the model since it will be needed to fit the test data
word_type_model[word_type] = model
#Converting to a structure of {WordType: {(instanceID, senseID): FeatureVector ... }}
for i in range(len(feature_vector_word_type)):
feature_vector_data[word_type][feature_vector_word_type.keys()[i]] = list(model.embedding_[i])
return feature_vector_word_type, word_type_model
示例2: labtest_TSNE
# 需要导入模块: from sklearn.manifold import TSNE [as 别名]
# 或者: from sklearn.manifold.TSNE import fit [as 别名]
def labtest_TSNE(PID):
data = [patients[pid]['tests'] for pid in PID]
X = pp.scale(data)
tsne = TSNE(n_components=2, perplexity=30.0, learning_rate=1000.0, n_iter=1000, n_iter_without_progress=30, min_grad_norm=1e-07, angle=0.5)
pos = tsne.fit(X).embedding_
return pos
示例3: data_embedding
# 需要导入模块: from sklearn.manifold import TSNE [as 别名]
# 或者: from sklearn.manifold.TSNE import fit [as 别名]
def data_embedding(self, type='TSNE'):
'''
Fit distance matrix into two-dimensions embedded space using
the TSNE or MDS model
'''
if type == 'TSNE':
model = TSNE(n_components=2, metric='precomputed')
if type == 'MDS':
model = MDS(n_components=2, max_iter=3000, eps=1e-9,
dissimilarity="precomputed", n_jobs=1)
# position of points in embedding space
pos = model.fit(self.distance_matrix).embedding_
return pos
示例4: TSNE
# 需要导入模块: from sklearn.manifold import TSNE [as 别名]
# 或者: from sklearn.manifold.TSNE import fit [as 别名]
import matplotlib.pyplot as plt # matplotlib 1.4.3
from sklearn.manifold import TSNE # scikit-learn 0.17
import pandas # pandas 0.16.2
# Read data
data = pandas.read_csv("data.csv", sep=",")
# Fit model
model = TSNE(n_components=2, perplexity=10, verbose=2, method='barnes_hut', init='pca', n_iter=1000)
model.fit(data.values.T)
# Plot results
hFig, hAx = plt.subplots()
hAx.scatter(model.embedding_[:, 0], model.embedding_[:, 1], 20, color="grey")
for i, txt in enumerate(data.keys()):
hAx.annotate(txt, (model.embedding_[i, 0], model.embedding_[i, 1]))
示例5: imread
# 需要导入模块: from sklearn.manifold import TSNE [as 别名]
# 或者: from sklearn.manifold.TSNE import fit [as 别名]
img = imread(df.local_path.loc[i])
if img.shape[0] < 200 or img.shape[1] < 200:
df.drop(i)
else:
img_gray = color.rgb2gray(img)
fd = hog(img_gray, orientations=9, pixels_per_cell=(8, 8),cells_per_block=(4, 4))
vector_list.append(fd)
print i, len(fd), df.local_path.loc[i]
X = np.vstack(vector_list)
from sklearn.manifold import TSNE as tsne
tsne = tsne(n_components=2)
tsne.fit(X)
subspace_tsne = pd.DataFrame(tsne.fit_transform(X),columns=["x","y"])
num_bins = 64
subspace_tsne['grid_x'] = pd.cut(subspace_tsne['x'],num_bins,labels=False)
subspace_tsne['grid_y'] = pd.cut(subspace_tsne['y'],num_bins,labels=False)
subspace_tsne['local_path'] = df.local_path[:len(subspace_tsne)]
# I should save the dataframe here, so later maybe I can use full images
thumb_side = 128
from PIL import Image
示例6: __plot_samples__
# 需要导入模块: from sklearn.manifold import TSNE [as 别名]
# 或者: from sklearn.manifold.TSNE import fit [as 别名]
def __plot_samples__(self, dfs, fold):
"""
:type dfs: List[pandas DataFrame] # [training df, testing df]
:type fold: int
:rtype: None
"""
mds = MDS(n_components=2, max_iter=3000, eps=1e-9, dissimilarity='euclidean', n_jobs=-1)
tsne = TSNE(n_components=2)
# change label to color index
# author 1 train (0 = light blue), author 1 test (1 = dark blue)
# author 2 train (2 = light green), author 2 test (3 = dark green)
df_all = pd.DataFrame(columns = dfs[0].columns)
df0_copy = dfs[0].copy()
df0_copy.loc[(df0_copy.label == 1).values, 'label'] = 0
df0_copy.loc[(df0_copy.label == -1).values, 'label'] = 2
df_all = df_all.append(df0_copy)
df1_copy = dfs[1].copy()
df1_copy.loc[(df1_copy.label == 1).values, 'label'] = 1
df1_copy.loc[(df1_copy.label == -1).values, 'label'] = 3
df_all = df_all.append(df1_copy)
legend = {0: 'Author 1 Training Sample',
1: 'Author 1 Test Sample',
2: 'Author 2 Training Sample' ,
3: 'Author 2 Test Sample' }
# fit on training data
pos_lst = [('Multi-Dimensional Scaling (MDS)',
mds.fit(df_all.drop('label', axis=1)).embedding_),
('t-Distributed Stochastic Neighbor Embedding (TSNE)',
tsne.fit(df_all.drop('label', axis=1)).embedding_)]
# plot
colors = sns.color_palette('Paired', 4)
fig = plt.figure(figsize=(16,7))
plt.hold(True)
for k, (title, pos) in enumerate(pos_lst, 1):
## fig.add_subplot() works in ipython notebook but creates a
## mysterious 3rd axes in python...
# ax = fig.add_subplot(1,2,k)
ax = plt.subplot(1,2,k)
ax.set_title(title)
for i in xrange(len(colors)):
samples = pos[(df_all.label == i).values, :]
ax.scatter(samples[:,0], samples[:,1],
c=colors[i], edgecolor='none',
label=legend[i])
ax.legend()
plt.hold(False)
plt.savefig('../figs/' + \
self.__PG_STATS_TBL__[self.__PG_STATS_TBL__.find("_")+1:] + \
'fold' + str(fold) + '.png',
dpi=300, transparent=True)
plt.close(fig)
示例7: print
# 需要导入模块: from sklearn.manifold import TSNE [as 别名]
# 或者: from sklearn.manifold.TSNE import fit [as 别名]
print("prefilter_train: ", prefilter_train.shape)
print("prefilter_test: ", prefilter_test.shape)
print("Performing PCA")
X_pca = pca(prefilter_train)
plotScatter(X_pca, y_train, title="6_PCA reduction (2d) of auto-encoded data (%dd)" % prefilter_train.shape[1])
print("Performing TSNE")
model = TSNE(n_components=2, random_state=0, init="pca")
toPlot = model.fit_transform(prefilter_train[:1000])
plotTSNE(toPlot, y_train[:1000], nb_classes, "7_t-SNE embedding of auto-encoded data ")
print("Classifying and comparing")
# Classify results from Autoencoder
print("Building classical fully connected layer for classification")
model = Sequential()
model.add(Dense(prefilter_train.shape[1], nb_classes, activation=activation))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(prefilter_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=(prefilter_test, Y_test))
score = model.evaluate(prefilter_test, Y_test, verbose=0, show_accuracy=True)
print('\nscore:', score)
print('Loss change:', 100*(score[0] - classical_score[0])/classical_score[0], '%')
print('Accuracy change:', 100*(score[1] - classical_score[1])/classical_score[1], '%')