当前位置: 首页>>代码示例>>Python>>正文


Python Network.load_file方法代码示例

本文整理汇总了Python中clustergrammer.Network.load_file方法的典型用法代码示例。如果您正苦于以下问题:Python Network.load_file方法的具体用法?Python Network.load_file怎么用?Python Network.load_file使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在clustergrammer.Network的用法示例。


在下文中一共展示了Network.load_file方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from clustergrammer import Network [as 别名]
# 或者: from clustergrammer.Network import load_file [as 别名]
def main():
  from clustergrammer import Network

  net = Network()

  net.load_file('txt/rc_two_cats.txt')

  tmp_size = 50

  inst_dm = make_distance_matrix(net, tmp_size)

  randomly_sample_rows(net, inst_dm, tmp_size)
开发者ID:ErwanDavid,项目名称:clustergrammer.js,代码行数:14,代码来源:cat_arrangement_pval.py

示例2: make_plex_matrix

# 需要导入模块: from clustergrammer import Network [as 别名]
# 或者: from clustergrammer.Network import load_file [as 别名]
def make_plex_matrix():
  '''
  Make a cell line matrix with plex rows and cell line columns.
  This will be used as a negative control that should show worsening correlation
  as data is normalized/filtered.
  '''
  import numpy as np
  import pandas as pd
  from clustergrammer import Network

  # load cl_info
  net = Network()
  cl_info = net.load_json_to_dict('../cell_line_info/cell_line_info_dict.json')

  # load cell line expression
  net.load_file('../CCLE_gene_expression/CCLE_NSCLC_all_genes.txt')
  tmp_df = net.dat_to_df()
  df = tmp_df['mat']

  cols = df.columns.tolist()

  rows = range(9)
  rows = [i+1 for i in rows]
  print(rows)

  mat = np.zeros((len(rows), len(cols)))

  for inst_col in cols:

    for inst_cl in cl_info:

      if inst_col in inst_cl:
        inst_plex = int(cl_info[inst_cl]['Plex'])

        if inst_plex != -1:
          # print(inst_col + ' in ' + inst_cl + ': ' + str(inst_plex))

          row_index = rows.index(inst_plex)
          col_index = cols.index(inst_col)

          mat[row_index, col_index] = 1


  df_plex = pd.DataFrame(data=mat, columns=cols, index=rows)

  filename = '../lung_cellline_3_1_16/lung_cl_all_ptm/precalc_processed/' + \
            'exp-plex.txt'
  df_plex.to_csv(filename, sep='\t')
开发者ID:MaayanLab,项目名称:cst_drug_treatment,代码行数:50,代码来源:precalc_PTM_norm.py

示例3: main

# 需要导入模块: from clustergrammer import Network [as 别名]
# 或者: from clustergrammer.Network import load_file [as 别名]
def main():
  import numpy as np
  import pandas as pd
  from clustergrammer import Network

  rtk_list = load_rtks()

  net = Network()
  net.load_file('txt/tmp_cst_drug_treat_cl.txt')
  df_dict = net.dat_to_df()

  inst_df = df_dict['mat']

  inst_df = inst_df.ix[rtk_list]

  inst_df.to_csv('txt/RTK_exp_in_drug_treat_cl.txt', sep='\t')
开发者ID:MaayanLab,项目名称:cst_drug_treatment,代码行数:18,代码来源:get_RTK_CCLE.py

示例4: prepare_heatmap

# 需要导入模块: from clustergrammer import Network [as 别名]
# 或者: from clustergrammer.Network import load_file [as 别名]
def prepare_heatmap(matrix_input, html_file, html_dir, tools_dir, categories, distance, linkage):
    # prepare directory and html
    os.mkdir(html_dir)

    env = Environment(loader=FileSystemLoader(tools_dir + "/templates"))
    template = env.get_template("clustergrammer.template")
    overview = template.render()
    with open(html_file, "w") as outf:
        outf.write(overview)

    json_output = html_dir + "/mult_view.json"

    net = Network()
    net.load_file(matrix_input)
    if (categories['row']):
        net.add_cats('row', categories['row'])
    if (categories['col']):
        net.add_cats('col', categories['col'])
    net.cluster(dist_type=distance, linkage_type=linkage)
    net.write_json_to_file('viz', json_output)
开发者ID:ImmPortDB,项目名称:immport-galaxy,代码行数:22,代码来源:clustergrammerIPG.py

示例5: make_json_from_tsv

# 需要导入模块: from clustergrammer import Network [as 别名]
# 或者: from clustergrammer.Network import load_file [as 别名]
def make_json_from_tsv(name):
  '''
  make a clustergrammer json from a tsv file
  '''
  from clustergrammer import Network

  print('\n' + name)

  net = Network()

  filename = 'txt/'+ name + '.txt'

  net.load_file(filename)

  df = net.dat_to_df()

  net.swap_nan_for_zero()

  # zscore first to get the columns distributions to be similar
  net.normalize(axis='col', norm_type='zscore', keep_orig=True)

  # filter the rows to keep the perts with the largest normalizes values
  net.filter_N_top('row', 1000)

  num_rows = net.dat['mat'].shape[0]
  num_cols = net.dat['mat'].shape[1]

  print('num_rows ' + str(num_rows))
  print('num_cols ' + str(num_cols))

  if num_cols < 50 or num_rows < 1000:

    views = ['N_row_sum']
    net.make_clust(dist_type='cos', views=views)
    export_filename = 'json/' + name + '.json'
    net.write_json_to_file('viz', export_filename)

  else:
    print('did not cluster, too many columns ')
开发者ID:MaayanLab,项目名称:LINCS_GCT,代码行数:41,代码来源:process_gct_and_make_jsons.py

示例6: reproduce_Mark_correlation_matrix

# 需要导入模块: from clustergrammer import Network [as 别名]
# 或者: from clustergrammer.Network import load_file [as 别名]
def reproduce_Mark_correlation_matrix():
  import pandas as pd
  from scipy.spatial.distance import squareform
  from clustergrammer import Network
  from copy import deepcopy

  dist_vect = calc_custom_dist(data_type='ptm_none', dist_metric='correlation',
                              pairwise='True')


  dist_mat = squareform(dist_vect)

  # make similarity matrix
  dist_mat = 1 - dist_mat

  net = Network()

  data_type = 'ptm_none'

  filename = '../lung_cellline_3_1_16/lung_cl_all_ptm/precalc_processed/' + \
             data_type + '.txt'

  # load file and export dataframe
  net = deepcopy(Network())
  net.load_file(filename)
  net.swap_nan_for_zero()
  tmp_df = net.dat_to_df()
  df = tmp_df['mat']

  cols = df.columns.tolist()
  rows = cols

  mark_df = pd.DataFrame(data=dist_mat, columns=cols, index=rows)

  save_filename = '../lung_cellline_3_1_16/lung_cl_all_ptm/precalc_processed/' \
             + 'Mark_corr_sim_mat' + '.txt'
  mark_df.to_csv(save_filename, sep='\t')
开发者ID:MaayanLab,项目名称:cst_drug_treatment,代码行数:39,代码来源:compare_cl_distances.py

示例7: Network

# 需要导入模块: from clustergrammer import Network [as 别名]
# 或者: from clustergrammer.Network import load_file [as 别名]
# make network object and load file
from clustergrammer import Network
net = Network()
net.load_file('mult_view.tsv')




# Z-score normalize the rows
#net.normalize(axis='row', norm_type='zscore', keep_orig=True)





# calculate clustering using default parameters
net.cluster()

# save visualization JSON to file for use by front end
net.write_json_to_file('viz', 'mult_view.json')



#	needs pandas and sklearn as well
#	pip install --user --upgrade clustergrammer pandas sklearn
开发者ID:unreno,项目名称:observations,代码行数:27,代码来源:create_clustergrammer_json.py

示例8: Network

# 需要导入模块: from clustergrammer import Network [as 别名]
# 或者: from clustergrammer.Network import load_file [as 别名]
import time
# import StringIO

start_time = time.time()

# import network class from Network.py

from clustergrammer import Network
net = Network()

net.load_file('txt/rc_two_cats.txt')
# net.load_file('txt/example_tsv.txt')
# net.load_file('txt/col_categories.txt')
# net.load_file('txt/mat_cats.tsv')
# net.load_file('txt/mat_1mb.Txt')
# net.load_file('txt/mnist.txt')
# net.load_file('txt/sim_mat_4_cats.txt')

views = ['N_row_sum','N_row_var']

# # filtering rows and cols by sum 
# net.filter_sum('row', threshold=20)
# net.filter_sum('col', threshold=30)
  
# # keep top rows based on sum 
# net.filter_N_top('row', 10, 'sum')

net.make_clust(dist_type='cos',views=views , dendro=True,
               sim_mat=True, filter_sim=0.1)

# net.produce_view({'N_row_sum':10,'dist':'euclidean'})
开发者ID:jjdblast,项目名称:clustergrammer.js,代码行数:33,代码来源:make_clustergrammer.py

示例9: Network

# 需要导入模块: from clustergrammer import Network [as 别名]
# 或者: from clustergrammer.Network import load_file [as 别名]
import time
start_time = time.time()

from clustergrammer import Network
net = Network()

# choose tsv file
####################
inst_name = 'Tyrosine'
# net.load_file('txt/phos_ratios_all_treat_no_geld_ST.txt')
net.load_file('txt/phos_ratios_all_treat_no_geld_Tyrosine.txt')


net.swap_nan_for_zero()

# net.normalize(axis='row', norm_type='zscore', keep_orig=True)

print(net.dat.keys())

views = ['N_row_sum', 'N_row_var']

net.make_clust(dist_type='cos',views=views , dendro=True,
               sim_mat=True, filter_sim=0.1, calc_cat_pval=False)
               # run_enrichr=['KEA_2015'])
               # run_enrichr=['ENCODE_TF_ChIP-seq_2014'])
               # run_enrichr=['GO_Biological_Process_2015'])

net.write_json_to_file('viz', 'json/'+inst_name+'.json', 'no-indent')
net.write_json_to_file('sim_row', 'json/'+inst_name+'_sim_row.json', 'no-indent')
net.write_json_to_file('sim_col', 'json/'+inst_name+'_sim_col.json', 'no-indent')
开发者ID:MaayanLab,项目名称:cst_drug_treatment,代码行数:32,代码来源:make_drug_treatment_figures.py

示例10: StringIO

# 需要导入模块: from clustergrammer import Network [as 别名]
# 或者: from clustergrammer.Network import load_file [as 别名]
		# Format index/headers for clustergrammer
		gene_attribute_matrix.index = gene_attribute_matrix.index.map(lambda s: '%s: %s' % (gene_attribute_matrix.index.name, s))
		gene_attribute_matrix.columns = gene_attribute_matrix.columns.map(lambda s: '%s: %s' % (gene_attribute_matrix.columns.name, s))
		# Remove names for clustergrammer
		gene_attribute_matrix.index.name = ""
		gene_attribute_matrix.columns.name = ""
		# Write to file
		# fp = StringIO()
		# gene_attribute_matrix.to_csv(fp, sep='\t')
		gene_attribute_matrix.to_csv('tmp.txt', sep='\t')

		# Custergrammer
		from clustergrammer import Network
		net = Network()
		# net.load_tsv_to_net(fp, name) # StringIO
		net.load_file('tmp.txt')
		net.swap_nan_for_zero()
		# Generate
		net.make_clust(dist_type='cos',views=['N_row_sum', 'N_row_var'], dendro=True,
					   sim_mat=True, filter_sim=0.1, calc_cat_pval=False)

		# Insert into database
		cur.execute('insert into `datasets` (`Name`, `prot_att`, `att_att`, `prot_prot`) values (?, ?, ?, ?)',
			(name,
			 net.export_net_json('viz', indent='no-indent'),
			 net.export_net_json('sim_col', indent='no-indent'),
			 net.export_net_json('sim_row', indent='no-indent')))
		con.commit()
	except Exception as e:
		print "Couldn't process %s (%s)" % (name, e)
		continue
开发者ID:MaayanLab,项目名称:adhesome,代码行数:33,代码来源:process_matrix.py


注:本文中的clustergrammer.Network.load_file方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。