本文整理汇总了Python中clustergrammer.Network.dat['mat']方法的典型用法代码示例。如果您正苦于以下问题:Python Network.dat['mat']方法的具体用法?Python Network.dat['mat']怎么用?Python Network.dat['mat']使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类clustergrammer.Network
的用法示例。
在下文中一共展示了Network.dat['mat']方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: clust_from_response
# 需要导入模块: from clustergrammer import Network [as 别名]
# 或者: from clustergrammer.Network import dat['mat'] [as 别名]
def clust_from_response(response_list):
from clustergrammer import Network
import scipy
import json
import pandas as pd
import math
from copy import deepcopy
print('----------------------')
print('enrichr_clust_from_response')
print('----------------------')
ini_enr = transfer_to_enr_dict( response_list )
enr = []
scores = {}
score_types = ['combined_score','pval','zscore']
for score_type in score_types:
scores[score_type] = pd.Series()
for inst_enr in ini_enr:
if inst_enr['combined_score'] > 0:
# make series of enriched terms with scores
for score_type in score_types:
# collect the scores of the enriched terms
if score_type == 'combined_score':
scores[score_type][inst_enr['name']] = inst_enr[score_type]
if score_type == 'pval':
scores[score_type][inst_enr['name']] = -math.log(inst_enr[score_type])
if score_type == 'zscore':
scores[score_type][inst_enr['name']] = -inst_enr[score_type]
# keep enrichement values
enr.append(inst_enr)
# sort and normalize the scores
for score_type in score_types:
scores[score_type] = scores[score_type]/scores[score_type].max()
scores[score_type].sort(ascending=False)
number_of_enriched_terms = len(scores['combined_score'])
enr_score_types = ['combined_score','pval','zscore']
if number_of_enriched_terms <10:
num_dict = {'ten':10}
elif number_of_enriched_terms <20:
num_dict = {'ten':10, 'twenty':20}
else:
num_dict = {'ten':10, 'twenty':20, 'thirty':30}
# gather lists of top scores
top_terms = {}
for enr_type in enr_score_types:
top_terms[enr_type] = {}
for num_terms in num_dict.keys():
inst_num = num_dict[num_terms]
top_terms[enr_type][num_terms] = scores[enr_type].index.tolist()[: inst_num]
# gather the terms that should be kept - they are at the top of the score list
keep_terms = []
for inst_enr_score in top_terms:
for tmp_num in num_dict.keys():
keep_terms.extend( top_terms[inst_enr_score][tmp_num] )
keep_terms = list(set(keep_terms))
# keep enriched terms that are at the top 10 based on at least one score
keep_enr = []
for inst_enr in enr:
if inst_enr['name'] in keep_terms:
keep_enr.append(inst_enr)
# fill in full matrix
#######################
# genes
row_node_names = []
# enriched terms
col_node_names = []
# gather information from the list of enriched terms
for inst_enr in keep_enr:
col_node_names.append(inst_enr['name'])
row_node_names.extend(inst_enr['int_genes'])
row_node_names = sorted(list(set(row_node_names)))
net = Network()
net.dat['nodes']['row'] = row_node_names
net.dat['nodes']['col'] = col_node_names
net.dat['mat'] = scipy.zeros([len(row_node_names),len(col_node_names)])
for inst_enr in keep_enr:
inst_term = inst_enr['name']
#.........这里部分代码省略.........
示例2: main
# 需要导入模块: from clustergrammer import Network [as 别名]
# 或者: from clustergrammer.Network import dat['mat'] [as 别名]
def main(mongo_address, viz_id, vect_post):
from bson.objectid import ObjectId
from pymongo import MongoClient
from clustergrammer import Network
# set up database connection
client = MongoClient(mongo_address)
db = client.clustergrammer
viz_id = ObjectId(viz_id)
# get placeholder viz data
found_viz = db.networks.find_one({'_id': viz_id })
# initialize export_dat
export_dat = {}
export_viz = {}
# try to make clustegram using vect_post
try:
# ini network obj
net = Network()
# vector endpoint
net.load_vect_post_to_net(vect_post)
# swap nans for zeros
net.swap_nan_for_zero()
# deprecated clustering modules
####################################
# cluster g2e using pandas
# net.fast_mult_views()
# # calculate top views rather than percentage views
# net.N_top_views()
####################################
net.make_filtered_views(dist_type='cosine', dendro=True, \
views=['N_row_sum'], linkage_type='average')
# export dat
try:
# convert data to list
net.dat['mat'] = net.dat['mat'].tolist()
net.dat['mat_up'] = net.dat['mat_up'].tolist()
net.dat['mat_dn'] = net.dat['mat_dn'].tolist()
export_dat['dat'] = net.export_net_json('dat')
export_dat['source'] = 'g2e_enr_vect'
dat_id = db.network_data.insert( export_dat )
print('G2E: network data successfully uploaded')
except:
export_dat['dat'] = 'data-too-large'
export_dat['source'] = 'g2e_enr_vect'
dat_id = db.network_data.insert( export_dat )
print('G2E: network data too large to be uploaded')
update_viz = net.viz
update_dat = dat_id
# if there is an error update json with error
except:
print('\n--------------------------------')
print('G2E clustering error')
print('----------------------------------\n')
update_viz = 'error'
update_dat = 'error'
# export vix to database
found_viz['viz'] = update_viz
found_viz['dat'] = update_dat
# update the viz data
try:
db.networks.update_one( {"_id":viz_id}, {"$set": found_viz} )
print('\n\n---------------------------------------------------')
print( 'G2E Successfully made and uploaded clustergram')
print('---------------------------------------------------\n\n')
except:
print('\n--------------------------------')
print('G2E error in loading viz into database')
print('----------------------------------\n')
# close database connection
client.close()