本文整理汇总了Python中minisom.MiniSom类的典型用法代码示例。如果您正苦于以下问题:Python MiniSom类的具体用法?Python MiniSom怎么用?Python MiniSom使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了MiniSom类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: SomModel
class SomModel(Model):
def __init__(self,input_length):
from minisom import MiniSom
self.som = MiniSom(10, 10, input_length,sigma=0.3,learning_rate=0.1,normalize=True)
def run(self,inp):
self.som.trian_single_instance(inp.flatten())
示例2: SOM
def SOM(data,leninput,lentarget):
som = MiniSom(16,16,leninput,sigma=1.0,learning_rate=0.5)
som.random_weights_init(data)
print("Training...")
som.train_random(data,10000) # training with 10000 iterations
print("\n...ready!")
numpy.save('weight_som',som.weights)
示例3: init
class Som:
def init(self):
self.core = MiniSom(50,50,6,sigma=.8,learning_rate=.5) # needs to match generating minisom command (specifically the load_map)
self.core.load_map()
self.callme = rospy.Service("mapping", Compute, self.callback)
print "SOM setup complete"
def callback(self, data):
vector = np.array([data.fx, data.fy, data.fz, data.tx, data.ty, data.tz]) # format as needed
print vector
w = self.core.winner(vector)
return w[0],w[1]
示例4: test_som
def test_som():
print "Clustering.."
session_log_db = db.session_log
allTopic = articles.distinct("topic")
lentopic = len(allTopic)
uniqueTopic = []
for t in allTopic:
uniqueTopic.append("Topik " + str(t).strip())
lebarSOM = lentopic*lentopic + lentopic*2 + 1
panjangSOM = session_log_db.find({"data_uji":no_uji}).count()
#somInput = zeros((panjangSOM,lebarSOM),dtype=int16)
somInput = []
oriSess = []
for s in session_log_db.find({"data_uji":no_uji}):
somInput.append(getPresedenceMatrix(convertSession(s["session"],uniqueTopic),uniqueTopic,1))
oriSess.append(s["session"])
som = MiniSom(16,16,lentopic,sigma=1.0,learning_rate=0.5)
som.weights = numpy.load('weight_som.npy')
#print som.weights
outfile = open('cluster-result.csv','w')
seq_number = 0
cluster_mongo = db.cluster_result
cluster_mongo.remove({"data_uji":no_uji})
for cnt,xx in enumerate(somInput):
w = som.winner(xx) # getting the winner
#print cnt
#print xx
#print w
#for z in xx:
# outfile.write("%s " % str(z))
outfile.write("%s " % str(("|".join(oriSess[seq_number]))))
outfile.write("%s-%s \n" % (str(w[0]),str(w[1])))
cluster_mongo.insert({"topik":"|".join(oriSess[seq_number]),"cluster":(str(w[0])+"-"+str(w[1])),"data_uji":no_uji})
seq_number = seq_number + 1
#outfile.write("%s %s\n" % str(xx),str(w))
# palce a marker on the winning position for the sample xx
#plot(w[0]+.5,w[1]+.5,markers[t[cnt]],markerfacecolor='None',
# markeredgecolor=colors[t[cnt]],markersize=12,markeredgewidth=2)
outfile.close()
#TopikCluster()
html = '<div role="alert" class="alert alert-success alert-dismissible fade in">'
html = html + ' <button aria-label="Close" data-dismiss="alert" class="close" type="button"><span aria-hidden="true">Close</span></button>'
html = html + 'Berhasil Melakukan Clustering</div>'
return html
示例5: test_recommendation
def test_recommendation():
uji_profil = db.uji_profil
current_seq = []
for t in uji_profil.find({}):
current_seq.append("Topik " + str(t['topic']))
'''
APPLY SOM
'''
allTopic = articles.distinct("topic")
lentopic = len(allTopic)
uniqueTopic = []
for t in allTopic:
uniqueTopic.append("Topik " + str(t).strip())
lebarSOM = lentopic*lentopic + lentopic*2 + 1
somInput = []
somInput.append(getPresedenceMatrix(convertSession(current_seq,uniqueTopic),uniqueTopic,1))
som = MiniSom(16,16,lentopic,sigma=1.0,learning_rate=0.5)
som.weights = numpy.load('weight_som.npy')
cluster_winner = ""
for cnt,xx in enumerate(somInput):
w = som.winner(xx) # getting the winner
cluster_winner = (str(w[0])+"-"+str(w[1]))
'''
SEARCH FOR THE PATTERN IN PARTICULAR CLUSTER
'''
print cluster_winner
print current_seq
prefix_result = db.prefix_result
prefix_cluster = prefix_result.find({"cluster":cluster_winner,"data_uji":no_uji}).sort("min_sup",pymongo.DESCENDING)
topik_rekomendasi = getTopikRekomendasi(current_seq,prefix_cluster)
if topik_rekomendasi == "":
prefix_cluster = prefix_result.find({"data_uji":no_uji}).sort("min_sup",pymongo.DESCENDING)
topik_rekomendasi = getTopikRekomendasi(current_seq,prefix_cluster)
html = "--tidak ada topik rekomendasi--"
if(topik_rekomendasi!=""):
the_topik = topik_rekomendasi.replace("Topik","").strip()
html = getTestArticle(the_topik,"Rekomendasi 1","accordion_recommendation",'col_rek1',"")
html += getTestArticle(the_topik,"Rekomendasi 2","accordion_recommendation",'col_rek2',"")
html += getTestArticle(the_topik,"Rekomendasi 3","accordion_recommendation",'col_rek3',"")
return html
示例6: KuKuModel
class KuKuModel(Model):
def __init__(self,proprioception_input_length,sensory_input_length,reservoir_size):
# Build the Reservoir
tau = .1 # execution timestep for the cortical rate model
sigma = .001 # intra-reservoir weights
eps = .1 # learning rate
som_size = 10*10
self.sensory_input_length = sensory_input_length
self.proprioception_input_length = proprioception_input_length
full_reservoir_input_length = proprioception_input_length+som_size
# Nodes: units, tau, method
self.reservoir_input = esn.Node((full_reservoir_input_length,), 0, esn._load )
self.reservoir = esn.Node((reservoir_size,), tau, esn._reservoir )
self.reservoir_output = esn.Node((som_size,), 0, esn._load )
# Arcs: target, source, weight, eps
# input from som
self.d_P = esn.Arc( self.reservoir, self.reservoir_input, sigma, 0 )
self.d_P.initConnections( numpy.random.randn, self.reservoir.shape+self.reservoir_input.shape ) # type of init numpy func
#print d_P.connections
# recurrent connections intra node
self.r_P = esn.Arc( self.reservoir, self.reservoir, sigma, 0 )
self.r_P.initConnections( numpy.random.randn, self.reservoir.shape+self.reservoir.shape ) # type of init numpy func
#print r_P.connections
# input from som
self.d_out = esn.Arc( self.reservoir_output, self.reservoir, 0, eps )
self.d_out.initConnections( numpy.random.randn, self.reservoir_output.shape+self.reservoir.shape ) # type of init numpy func
#print d_out.connections
from minisom import MiniSom
self.som = MiniSom(10, 10, sensory_input_length,sigma=0.3,learning_rate=0.1,normalize=True)
self.previous_som_activation = numpy.zeros((10,10))
def run(self,inp):
self.som.train_single_instance(inp[:self.sensory_input_length])
self.reservoir_input.update(numpy.append(self.previous_som_activation.flatten().copy(),inp[-self.proprioception_input_length:])) # 3
self.reservoir.update(self.d_P.read())
self.reservoir_output.update(self.d_out.read())
print "error:",self.som.activation_map.flatten() - self.reservoir_output.state
self.d_out.learn(self.som.activation_map.flatten() - self.reservoir_output.state )
self.previous_som_activation = self.som.activation_map.flatten().copy()
示例7: make_treeview
def make_treeview(self, data, liststore):
#i = 0
cols = self.columns[self.combobox.get_active()]
#print type(cols)
#print len(cols)
for d in data:
#i += 1
tmp = d.tolist()
#print 'tmp', tmp
#while len(tmp) < cols:
#tmp.append(False)
#print 'tmp', tmp
#cols = cols - 1
Qe = MiniSom.quantization_error_subset(self.som,d,len(cols))
#print tmp
tmp.append(Qe)
tmp.append(4 * Qe ** 0.5)
liststore.append(tmp)
treeview = gtk.TreeView(model=liststore)
#i = 0
for d in range(len(self.test_data[0])):
#print i
#i += 1
renderer_text = gtk.CellRendererText()
column_text = gtk.TreeViewColumn(self.pattern_labels[d], renderer_text, text=d)
treeview.append_column(column_text)
column_text = gtk.TreeViewColumn('Qe', renderer_text, text=d+1)
treeview.append_column(column_text)
column_text = gtk.TreeViewColumn('NLT', renderer_text, text=d+2)
treeview.append_column(column_text)
return treeview
示例8: setUp
def setUp(self):
self.som = MiniSom(5, 5, 1)
for w in self.som.weights: # checking weights normalization
assert_almost_equal(1.0, np.linalg.norm(w))
self.som.weights = np.zeros((5, 5)) # fake weights
self.som.weights[2, 3] = 5.0
self.som.weights[1, 1] = 2.0
示例9: __init__
def __init__(self,proprioception_input_length,sensory_input_length,reservoir_size):
# Build the Reservoir
tau = .1 # execution timestep for the cortical rate model
sigma = .001 # intra-reservoir weights
eps = .1 # learning rate
som_size = 10*10
self.sensory_input_length = sensory_input_length
self.proprioception_input_length = proprioception_input_length
full_reservoir_input_length = proprioception_input_length+som_size
# Nodes: units, tau, method
self.reservoir_input = esn.Node((full_reservoir_input_length,), 0, esn._load )
self.reservoir = esn.Node((reservoir_size,), tau, esn._reservoir )
self.reservoir_output = esn.Node((som_size,), 0, esn._load )
# Arcs: target, source, weight, eps
# input from som
self.d_P = esn.Arc( self.reservoir, self.reservoir_input, sigma, 0 )
self.d_P.initConnections( numpy.random.randn, self.reservoir.shape+self.reservoir_input.shape ) # type of init numpy func
#print d_P.connections
# recurrent connections intra node
self.r_P = esn.Arc( self.reservoir, self.reservoir, sigma, 0 )
self.r_P.initConnections( numpy.random.randn, self.reservoir.shape+self.reservoir.shape ) # type of init numpy func
#print r_P.connections
# input from som
self.d_out = esn.Arc( self.reservoir_output, self.reservoir, 0, eps )
self.d_out.initConnections( numpy.random.randn, self.reservoir_output.shape+self.reservoir.shape ) # type of init numpy func
#print d_out.connections
from minisom import MiniSom
self.som = MiniSom(10, 10, sensory_input_length,sigma=0.3,learning_rate=0.1,normalize=True)
self.previous_som_activation = numpy.zeros((10,10))
示例10: SOM
def SOM(data,leninput,lentarget):
som = MiniSom(5,5,leninput,sigma=1.0,learning_rate=0.5)
som.random_weights_init(data)
print("Training...")
som.train_batch(data,10000) # training with 10000 iterations
print("\n...ready!")
numpy.save('weight_som.txt',som.weights)
bone()
pcolor(som.distance_map().T) # distance map as background
colorbar()
t = zeros(lentarget,dtype=int)
# use different colors and markers for each label
markers = ['o','s','D']
colors = ['r','g','b']
outfile = open('cluster-result.csv','w')
for cnt,xx in enumerate(data):
w = som.winner(xx) # getting the winner
#print cnt
#print xx
#print w
for z in xx:
outfile.write("%s " % str(z))
outfile.write("%s-%s \n" % (str(w[0]),str(w[1])))
#outfile.write("%s %s\n" % str(xx),str(w))
# palce a marker on the winning position for the sample xx
#plot(w[0]+.5,w[1]+.5,markers[t[cnt]],markerfacecolor='None',
# markeredgecolor=colors[t[cnt]],markersize=12,markeredgewidth=2)
outfile.close()
示例11: testSOMs
def testSOMs():
from sklearn import datasets
from minisom import MiniSom
d = datasets.load_iris()
data = np.apply_along_axis(lambda x: x/np.linalg.norm(x), 1, d['data']) # data normalization
som = MiniSom(7, 7, 4, sigma=1.0, learning_rate=0.5)
som.random_weights_init(data)
print("Training...")
som.train_random(data, 1000) # random training
print("\n...ready!")
### Plotting the response for each pattern in the iris dataset ###
from pylab import plot,axis,show,pcolor,colorbar,bone
bone()
pcolor(som.distance_map().T) # plotting the distance map as background
colorbar()
t = d['target']
# use different colors and markers for each label
markers = ['o','s','D']
colors = ['r','g','b']
for cnt,xx in enumerate(data):
w = som.winner(xx) # getting the winner
# palce a marker on the winning position for the sample xx
plot(w[0]+.5,w[1]+.5,markers[t[cnt]],markerfacecolor='None',
markeredgecolor=colors[t[cnt]],markersize=12,markeredgewidth=2)
axis([0,som.weights.shape[0],0,som.weights.shape[1]])
show() # show the figure
示例12: SOM
def SOM(data,leninput,lentarget,alpha_som,omega_som):
som = MiniSom(16,16,leninput,sigma=omega_som,learning_rate=alpha_som)
som.random_weights_init(data)
print("Training...")
som.train_batch(data,20000) # training with 10000 iterations
print("\n...ready!")
numpy.save('weight_som',som.weights)
bone()
pcolor(som.distance_map().T) # distance map as background
colorbar()
t = zeros(lentarget,dtype=int)
# use different colors and markers for each label
markers = ['o','s','D']
colors = ['r','g','b']
outfile = open('cluster-result.csv','w')
for cnt,xx in enumerate(data):
w = som.winner(xx) # getting the winner
for z in xx:
outfile.write("%s " % str(z))
outfile.write("%s-%s \n" % (str(w[0]),str(w[1])))
outfile.close()
示例13: train_som
def train_som(self):
training_data = [v[0] for v in self.vectors]
from minisom import MiniSom
size = len(training_data[0])
self.som = MiniSom(10, 10, size, sigma=0.3, learning_rate=0.5)
print "Training SOM..."
self.som.train_random(training_data, 100)
print "...ready!"
示例14: init_som
def init_som(self, widget=None, data=None):
##print self.data
### Initialization and training ###
cols = self.columns[self.combobox.get_active()]
data = self.data[:, 0:len(cols)]
#print len(cols)
self.som = MiniSom(self.width_spin_button.get_value_as_int(), self.height_spin_button.get_value_as_int(), len(cols),sigma=1.2,learning_rate=0.5)
# self.som.weights_init_gliozzi(data)
self.som.random_weights_init(data)
示例15: train_som
def train_som(data, offset=None):
"""
offset: offset between points used for training
"""
if offset:
data = data[::offset, :]
som = MiniSom(
param['nr_rows'],
param['nr_cols'],
data.shape[1],
data,
sigma=param['sigma'],
learning_rate=param['learning_rate'],
norm='minmax')
#som.random_weights_init() # choose initial nodes from data points
som.train_random(param['nr_epochs']) # random training
return som