本文整理汇总了Python中formating.FormatingDataSets.FormatingDataSets类的典型用法代码示例。如果您正苦于以下问题:Python FormatingDataSets类的具体用法?Python FormatingDataSets怎么用?Python FormatingDataSets使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了FormatingDataSets类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, preparedParameters, filePathResults, filePathAnalyseResult, topRank):
print "Starting Analysing the results", datetime.today()
absFilePath = filePathResults
absfilePathAnalyseResult = filePathAnalyseResult #FormatingDataSets.get_abs_file_path(filePathAnalyseResult)
fResult = open(absFilePath, 'r')
with open(absfilePathAnalyseResult, 'w') as fnodes:
self.success = 0
element = 0
for line in fResult:
element = element+1
FormatingDataSets.printProgressofEvents(element, topRank, "Analysing the results: ")
cols = line.strip().replace('\n','').split('\t')
if len(list(networkx.common_neighbors(preparedParameters.testGraph, cols[len(cols)-2] , cols[len(cols)-1] ))) != 0:
self.success = self.success + 1
fnodes.write(cols[len(cols)-2] + '\t' + cols[len(cols)-1] + '\t' + 'SUCCESS \r\n')
else:
fnodes.write(cols[len(cols)-2] + '\t' + cols[len(cols)-1] + '\t' + 'FAILED \r\n')
if element == topRank:
break
result = float(self.success) / float(topRank) *100
strResult = 'Final Result: \t' + str(result) + '%'
fnodes.write(strResult)
fnodes.write('\n#\t'+str(self.success))
fnodes.close()
print "Analysing the results finished", datetime.today()
示例2: analise
def analise(calcDb, topRank,TestGraph, util, method):
order = sorted( list({ 'node1': r['node1'], 'node2': r['node2'], 'value' : r[method]} for r in calcDb) , key=lambda value: value['value'], reverse=True)
BD = None
if not os.path.exists(FormatingDataSets.get_abs_file_path(util.calculated_file + '.' + method +'.base.pdl')):
BD = generate_finalResult(order, topRank, TestGraph, FormatingDataSets.get_abs_file_path(util.calculated_file + '.' + method +'.base.pdl'))
else:
BD = reading_Database(FormatingDataSets.get_abs_file_path(util.calculated_file + '.' + method +'.base.pdl'))
return get_results(BD, method)
示例3: readingOrginalDataset
def readingOrginalDataset(self):
print "Starting Reading Original Dataset", datetime.today()
with open(self.OriginalDataSet) as f:
self.OrignalContent = f.readlines()
f.close()
articleid = 0
articles = []
authornames = []
authorofArticles = []
authors = []
article = None
element = 0
for line in self.OrignalContent:
element = element + 1
FormatingDataSets.printProgressofEvents(
element, len(self.OrignalContent), "Reading File Content to Generate Graph: "
)
line = line.strip()
if line.startswith("#*"):
articleid = articleid + 1
article = Article("p_" + str(articleid))
article.articlename = line.replace("#*", "").replace("\r\n", "")
if line.startswith("#t"):
article.time = line.replace("#t", "").replace("\r\n", "")
if line.startswith("#@"):
authorsofArticle = line.replace("#@", "").replace("\r\n", "").split(",")
for author in authorsofArticle:
author = author.strip()
if not author in authornames:
authornames.append(author)
articleauthor = AuthorInArticle(article.articleid, authornames.index(author) + 1)
authorofArticles.append(articleauthor)
if line.startswith("#!"):
articles.append(article)
for index in range(len(authornames)):
author = Author(index + 1, authornames[index])
authors.append(author)
self.Graph = networkx.Graph()
for item_article in articles:
self.Graph.add_node(
item_article.articleid,
{"node_type": "E", "title": item_article.articlename.decode("latin_1"), "time": int(item_article.time)},
)
for item_author in authors:
self.Graph.add_node(
int(item_author.authorid), {"node_type": "N", "name": item_author.name.decode("latin_1")}
)
for item_edge in authorofArticles:
self.Graph.add_edge(item_edge.articleid, int(item_edge.authorid))
print "Reading Original Dataset finished", datetime.today()
示例4: calculatingInputToFuzzy
def calculatingInputToFuzzy(graph, nodesnotLinked, params):
result = []
#pdb = Base(calculatingFile)
#pdb.create('node1', 'node2', 'IntensityNode1', 'IntencityNode2' ,'Similarity','AgesNode1', 'AgesNode2')
#pdb.create_index('node1', 'node2')
element = 0
qtyofNodesToProcess = len(nodesnotLinked)
for pair in nodesnotLinked:
element = element+1
FormatingDataSets.printProgressofEvents(element, qtyofNodesToProcess, "Calculating features for nodes not liked: ")
neighbors_node1 = all_neighbors(graph, pair[0])
neighbors_node2 = all_neighbors(graph, pair[1])
len_neihbors_node1 = len(neighbors_node1)
len_neihbors_node2 = len(neighbors_node2)
CommonNeigbors = neighbors_node1.intersection(neighbors_node2)
IntensityNode1 = 0;
IntensityNode2 = 0;
Similarities = 0;
Similarity = 0;
AgesNode1 = 0;
AgesNode2 = 0;
for cn in CommonNeigbors:
infoNode1 = list(edge for n1, n2, edge in graph.edges([ pair[0], cn], data=True) if ((n1 == pair[0] and n2 == cn) or (n1 == cn and n2 == pair[0])) )
infoNode2 = list(edge for n1, n2, edge in graph.edges([pair[1], cn], data=True) if ((n1 == pair[1] and n2 == cn) or (n1 == cn and n2 == pair[1])) )
IntensityNode1 = IntensityNode1 + len(infoNode1)
IntensityNode2 = IntensityNode2 + len(infoNode2)
MaxTimeNode1 = max(info['time'] for info in infoNode1 if 1==1)
MaxTimeNode2 = max(info['time'] for info in infoNode2 if 1==1)
AgesNode1 = max(AgesNode1,MaxTimeNode1)
AgesNode2 = max(AgesNode2,MaxTimeNode1)
bagofWordsNode1 = list(info['keywords'] for info in infoNode1 if 1==1)
bagofWordsNode2 = list(info['keywords'] for info in infoNode2 if 1==1)
Similarities = Similarities + get_jacard_domain(bagofWordsNode1, bagofWordsNode2)
AgesNode1 = abs(params.t0_ - AgesNode1)
AgesNode2 = abs(params.t0_ - AgesNode2)
if len(CommonNeigbors) > 0:
Similarity = (Similarities / len(CommonNeigbors)) *100
result.append({ 'no1': str(pair[0]), 'no2' :str(pair[1]), 'intensityno1' : IntensityNode1,'intensityno2' : IntensityNode2, 'similarity' : Similarity, 'ageno1' : AgesNode1, 'ageno2' :AgesNode2 })
return result
示例5: readingOrginalDataset
def readingOrginalDataset(self):
print "Starting Reading Original Dataset", datetime.today()
con = None
try:
con = psycopg2.connect(database='projetomestrado', user='postgres', password='123456')
curPublicacao = con.cursor()
curPublicacao.execute("select distinct p.idpublicacao, p.titulo, p.ano from projetomestrado.publicacao p inner join projetomestrado.autorpublicacao a on a.idpublicacao = p.idpublicacao where a.idautor in (select idautor from projetomestrado.autor where afiliacao = 'Instituto Militar de Engenharia')")
curPublicacaoData = curPublicacao.fetchall()
element = 0
qty = len(curPublicacaoData)
print qty
for linha in curPublicacaoData:
element = element+1
FormatingDataSets.printProgressofEvents(element, qty, "Adding paper to new graph: ")
idpublicacao = linha[0]
curPublicacaoPalavras = con.cursor()
curPublicacaoPalavras.execute("select k.keyword from projetomestrado.keyword k inner join projetomestrado.publicacaokeyword pk on pk.idkeyword = k.idkeyword where pk.idpublicacao =" + str(idpublicacao))
palavras = []
for palavra in curPublicacaoPalavras.fetchall():
palavras.append(palavra[0].strip())
curAutores = con.cursor()
curAutores.execute("select a.idautor, a.primeironome, a.ultimonome from projetomestrado.autorpublicacao ap inner join projetomestrado.autor a on a.idautor = ap.idautor where ap.idpublicacao = "+ str(idpublicacao))
autores = []
for autor in curAutores.fetchall():
autores.append([autor[0], autor[1] + "," + autor[2]])
self.Publications.append([idpublicacao, linha[1], linha[2], palavras, autores ])
self.Graph = networkx.Graph()
for item_article in self.Publications:
self.Graph.add_node('P_' + str(item_article[0]), {'node_type' : 'E', 'title' : item_article[1].decode("latin_1"), 'time' : int(item_article[2]), 'keywords': str(item_article[3]) })
for item_autor in item_article[4]:
self.Graph.add_node(int(item_autor[0]), {'node_type' : 'N', 'name' : item_autor[1].decode("latin_1") })
self.Graph.add_edge('P_' + str(item_article[0]), int(item_autor[0]) )
print "Reading Original Dataset finished", datetime.today()
except psycopg2.DatabaseError, e:
print 'Error %s' % e
示例6: execution
def execution(configFile):
#DEFINE THE FILE THAT WILL KEEP THE RESULT DATA
resultFile = open(FormatingDataSets.get_abs_file_path(configFile + 'T.EXPERIMENTO_ATUAL_CORE03.txt'), 'w')
resultFile.write("Inicio da operacao\n")
resultFile.write(str(datetime.datetime.now()))
resultFile.write("\n")
#READING THE CONFIG FILE
util = ParameterUtil(parameter_file = configFile)
#CREATING PARAMETRIZATION OBJECT WITH THE INFORMATIONS OF THE CONFIG FILE.
myparams = Parameterization(t0 = util.t0, t0_ = util.t0_, t1 = util.t1, t1_ = util.t1_, linear_combination=util.linear_combination,
filePathGraph = util.graph_file, filePathTrainingGraph = util.trainnig_graph_file, filePathTestGraph = util.test_graph_file, decay = util.decay, domain_decay = util.domain_decay, min_edges = util.min_edges, scoreChoiced = util.ScoresChoiced, weightsChoiced = util.WeightsChoiced, weightedScoresChoiced = util.WeightedScoresChoiced, FullGraph = None, result_random_file=util.result_random_file)
#GENERATING TRAINNING GRAPH BASED ON CONFIG FILE T0 AND T0_
myparams.generating_Training_Graph()
#GENERATING TEST GRAPH BASED ON CONcvb FIG FILE T1 AND T1_
myparams.generating_Test_Graph()
nodesSelection = NodeSelection(myparams.trainnigGraph, myparams.testGraph, util)
#GET THE AUTHORS THAT PUBLISH AT TRAINNING AND TEST
#A NUMBER OF PAPERS DEFINED AT MIN_EDGES IN CONFIG FILE
nodes = nodesSelection.get_NowellAuthorsCore()
#GET A PAIR OF AUTHORS THAT PUBLISH AT LEAST ONE ARTICLE AT TRAINNING AND TEST.
#DID NOT SEE ANY NEED
collaborations = nodesSelection.get_NowellColaboration()
#GET THE FIRST EDGES MADE BY THE COMBINATION OF NODES IN TRAINNING GRAPH
eOld = nodesSelection.get_NowellE(nodes,myparams.trainnigGraph)
#GET THE FIRST EDGES MADE BY THE COMBINATION OF NODES IN TEST GRAPH THAT DO NOT HAVE EDGES IN TRAINNING
eNew = nodesSelection.get_NowellE2(nodes, eOld, myparams.testGraph)
#GET THE NODES NOT LINKED OVER THE COMBINATION NODES.
nodesNotLinked = nodesSelection.get_PairsofNodesNotinEold(nodes)
#CREATING CALCULATION OBJECT
calc = CalculateInMemory(myparams,nodesNotLinked)
#CALCULATING THE SCORES.
resultsofCalculation = calc.executingCalculate()
#ORDERNING THE RESULTS RETURNING THE TOP N
orderingResults = calc.ordering(len(eNew), resultsofCalculation)
#SAVING THE ORDERED RESULTS.
calc.saving_orderedResult(util.ordered_file, orderingResults)
#ANALISE THE ORDERED RESULTS AND CHECK THE FUTURE.
ScoresResults = Analyse.AnalyseNodesWithScoresInFuture(orderingResults, myparams.testGraph)
#SAVING THE RESULTS.
for index in range(len(ScoresResults)):
Analyse.saving_analyseResult(ScoresResults[index], util.analysed_file + str(myparams.ScoresChoiced[index][0] ) + '.txt')
resultFile.write("TOTAL OF SUCESSS USING METRIC " + str(myparams.ScoresChoiced[index][0]) + " = " + str(Analyse.get_TotalSucess(ScoresResults[index]) ))
resultFile.write("\n")
resultFile.write("\n")
resultFile.write("Authors\tArticles\tCollaborations\tAuthors\tEold\tEnew\n")
resultFile.write( str(myparams.get_nodes(myparams.trainnigGraph))+ "\t" + str(myparams.get_edges(myparams.trainnigGraph)) + "\t\t" + str(len(collaborations)*2)+ "\t\t" + str(len(nodes)) + "\t" + str(len(eOld))+"\t" + str(len(eNew)))
resultFile.write("\n")
resultFile.write("Fim da Operacao\n")
resultFile.write(str(datetime.datetime.now()))
resultFile.close()
示例7: step05
def step05(paramFile):
#util = ParameterUtil(parameter_file = 'data/formatado/arxiv/nowell_example_1994_1999.txt')
util = ParameterUtil(parameter_file = paramFile)
myparams = Parameterization(util.keyword_decay, util.lengthVertex, util.t0, util.t0_, util.t1, util.t1_, util.FeaturesChoiced, util.graph_file, util.trainnig_graph_file, util.test_graph_file, util.decay)
calc = Calculate(myparams, util.nodes_notlinked_file, util.calculated_file, util.ordered_file, util.maxmincalculated_file)
myparams.generating_Test_Graph()
analise = Analyse(myparams, FormatingDataSets.get_abs_file_path(util.calculated_file), FormatingDataSets.get_abs_file_path(util.analysed_file) + '.random.analised.txt', calc.qtyDataCalculated)
示例8: execution
def execution(configFile):
#DEFINE THE FILE THAT WILL KEEP THE RESULT DATA
resultFile = open(FormatingDataSets.get_abs_file_path(configFile + 'core03.txt'), 'w')
resultFile.write("Inicio da operacao\n")
resultFile.write(str(datetime.datetime.now()))
resultFile.write("\n")
#READING THE CONFIG FILE
util = ParameterUtil(parameter_file = configFile)
#CREATING PARAMETRIZATION OBJECT WITH THE INFORMATIONS OF THE CONFIG FILE.
myparams = Parameterization(t0 = util.t0, t0_ = util.t0_, t1 = util.t1, t1_ = util.t1_, linear_combination=util.linear_combination,
filePathGraph = util.graph_file, filePathTrainingGraph = util.trainnig_graph_file, filePathTestGraph = util.test_graph_file, decay = util.decay, domain_decay = util.domain_decay, min_edges = util.min_edges, scoreChoiced = util.ScoresChoiced, weightsChoiced = util.WeightsChoiced, weightedScoresChoiced = util.WeightedScoresChoiced, FullGraph = None, result_random_file=util.result_random_file)
#GENERATING TRAINNING GRAPH BASED ON CONFIG FILE T0 AND T0_
myparams.generating_Training_Graph()
#GENERATING TEST GRAPH BASED ON CONcvb FIG FILE T1 AND T1_
myparams.generating_Test_Graph()
nodeSelection = NodeSelection(myparams.trainnigGraph, myparams.testGraph, util)
#if not os.path.exists(FormatingDataSets.get_abs_file_path(util.trainnig_graph_file + '.fuzzyinputy.txt')):
data = calculatingInputToFuzzy(myparams.trainnigGraph,nodeSelection.nodesNotLinked, myparams)
saving_files_calculting_input(FormatingDataSets.get_abs_file_path(util.trainnig_graph_file + '.inputFuzzy.txt'), data)
for item in data:
calc = FuzzyCalculation(item['intensityno1'], item['intensityno2'], item['similarity'], item['ageno1'], item['ageno2'])
print item['no1'], item['no2'], calc.potencial_ligacao, calc.grau_potencial_ligacao
resultFile.write("\n")
#
resultFile.write("Authors\tArticles\tCollaborations\tAuthors\tEold\tEnew\n")
resultFile.write( str(myparams.get_nodes(myparams.trainnigGraph))+ "\t" + str(myparams.get_edges(myparams.trainnigGraph)) + "\t\t" + str(len(nodeSelection.get_NowellColaboration())*2)+ "\t\t" + str(len(nodeSelection.nodes)) + "\t" + str(len(nodeSelection.eOld))+"\t" + str(len(nodeSelection.eNeW)))
resultFile.write("\n")
resultFile.write("Fim da Operacao\n")
resultFile.write(str(datetime.datetime.now()))
resultFile.close()
示例9: execution
def execution(configFile, metricas):
#DEFINE THE FILE THAT WILL KEEP THE RESULT DATA
resultFile = open(FormatingDataSets.get_abs_file_path(configFile + 'core03.txt'), 'w')
resultFile.write("Inicio da operacao\n")
resultFile.write(str(datetime.datetime.now()))
resultFile.write("\n")
#READING THE CONFIG FILE
util = ParameterUtil(parameter_file = configFile)
#CREATING PARAMETRIZATION OBJECT WITH THE INFORMATIONS OF THE CONFIG FILE.
myparams = Parameterization(t0 = util.t0, t0_ = util.t0_, t1 = util.t1, t1_ = util.t1_, linear_combination=util.linear_combination,
filePathGraph = util.graph_file, filePathTrainingGraph = util.trainnig_graph_file, filePathTestGraph = util.test_graph_file, decay = util.decay, domain_decay = util.domain_decay, min_edges = util.min_edges, scoreChoiced = util.ScoresChoiced, weightsChoiced = util.WeightsChoiced, weightedScoresChoiced = util.WeightedScoresChoiced, FullGraph = None, result_random_file=util.result_random_file)
#GENERATING TRAINNING GRAPH BASED ON CONFIG FILE T0 AND T0_
myparams.generating_Training_Graph()
#GENERATING TEST GRAPH BASED ON CONcvb FIG FILE T1 AND T1_
myparams.generating_Test_Graph()
nodeSelection = NodeSelection(myparams.trainnigGraph, myparams.testGraph, util)
#CREATING CALCULATION OBJECT
weights = {'cn' : 1, 'aas': 1, 'pa':1, 'jc': 1, 'ts08':1,'ts05': 1, 'ts02':1}
calc = CalculatingCombinationOnlyNowell(myparams, nodeSelection.nodesNotLinked,weights,False )
saving_files_calculting(FormatingDataSets.get_abs_file_path(util.calculated_file), calc.results, metricas)
Analise = nodeSelection.AnalyseAllNodesNotLinkedInFuture(nodeSelection.nodesNotLinked, myparams.testGraph)
salvar_analise(FormatingDataSets.get_abs_file_path(util.analysed_file) + '.allNodes.csv', Analise)
resultFile.write("Authors\tArticles\tCollaborations\tAuthors\tEold\tEnew\n")
resultFile.write( str(myparams.get_nodes(myparams.trainnigGraph))+ "\t" + str(myparams.get_edges(myparams.trainnigGraph)) + "\t\t" + str(len(nodeSelection.get_NowellColaboration())*2)+ "\t\t" + str(len(nodeSelection.nodes)) + "\t" + str(len(nodeSelection.eOld))+"\t" + str(len(nodeSelection.eNeW)))
resultFile.write("\n")
resultFile.write("Fim da Operacao\n")
resultFile.write(str(datetime.datetime.now()))
resultFile.close()
示例10: execution
def execution(configFile):
#DEFINE THE FILE THAT WILL KEEP THE RESULT DATA
resultFile = open(FormatingDataSets.get_abs_file_path(configFile + 'core03.txt'), 'w')
resultFile.write("Inicio da operacao\n")
resultFile.write(str(datetime.datetime.now()))
resultFile.write("\n")
#READING THE CONFIG FILE
util = ParameterUtil(parameter_file = configFile)
#CREATING PARAMETRIZATION OBJECT WITH THE INFORMATIONS OF THE CONFIG FILE.
myparams = Parameterization(t0 = util.t0, t0_ = util.t0_, t1 = util.t1, t1_ = util.t1_, linear_combination=util.linear_combination,
filePathGraph = util.graph_file, filePathTrainingGraph = util.trainnig_graph_file, filePathTestGraph = util.test_graph_file, decay = util.decay, domain_decay = util.domain_decay, min_edges = util.min_edges, scoreChoiced = util.ScoresChoiced, weightsChoiced = util.WeightsChoiced, weightedScoresChoiced = util.WeightedScoresChoiced, FullGraph = None, result_random_file=util.result_random_file)
#GENERATING TRAINNING GRAPH BASED ON CONFIG FILE T0 AND T0_
myparams.generating_Training_Graph()
#GENERATING TEST GRAPH BASED ON CONcvb FIG FILE T1 AND T1_
myparams.generating_Test_Graph()
nodeSelection = NodeSelection(myparams.trainnigGraph, myparams.testGraph, util)
#if not os.path.exists(FormatingDataSets.get_abs_file_path(util.trainnig_graph_file + '.fuzzyinputy.txt')):
data = calculatingInputToFuzzy(myparams.trainnigGraph,nodeSelection.nodesNotLinked, myparams)
dataSorted = sorted(data, key=lambda value: value['result'], reverse=True)
topRank = len(nodeSelection.eNeW)
totalCalculated = len(dataSorted)
dataToAnalysed = []
if (topRank >= totalCalculated):
for item in range(totalCalculated):
dataToAnalysed.append({'no1': dataSorted[item]['no1'], 'no2': dataSorted[item]['no2'], 'result': dataSorted[item]['result'] })
else:
for item in range(topRank):
dataToAnalysed.append({'no1': dataSorted[item]['no1'], 'no2': dataSorted[item]['no2'], 'result': dataSorted[item]['result'] })
analise = AnalyseNodesInFuture(dataToAnalysed, myparams.testGraph)
resultFile.write( repr(get_TotalSucess(analise)) )
resultFile.write("\n")
#
resultFile.write("Authors\tArticles\tCollaborations\tAuthors\tEold\tEnew\n")
resultFile.write( str(myparams.get_nodes(myparams.trainnigGraph))+ "\t" + str(myparams.get_edges(myparams.trainnigGraph)) + "\t\t" + str(len(nodeSelection.get_NowellColaboration())*2)+ "\t\t" + str(len(nodeSelection.nodes)) + "\t" + str(len(nodeSelection.eOld))+"\t" + str(len(nodeSelection.eNeW)))
resultFile.write("\n")
resultFile.write("Fim da Operacao\n")
resultFile.write(str(datetime.datetime.now()))
resultFile.close()
示例11: calculatingWeights
def calculatingWeights(graph, nodesnotLinked, database, calculatingFile):
pdb = Base(calculatingFile)
pdb.create('node1', 'node2', 'cnWTS02','cnWTS05','cnWTS08', 'aaWTS02', 'aaWTS05', 'aaWTS08')
pdb.create_index('node1', 'node2')
element = 0
qtyofNodesToProcess = len(nodesnotLinked)
for pair in nodesnotLinked:
element = element+1
FormatingDataSets.printProgressofEvents(element, qtyofNodesToProcess, "Calculating features for nodes not liked: ")
neighbors_node1 = all_neighbors(graph, pair[0])
neighbors_node2 = all_neighbors(graph, pair[1])
len_neihbors_node1 = len(neighbors_node1)
len_neihbors_node2 = len(neighbors_node2)
CommonNeigbors = neighbors_node1.intersection(neighbors_node2)
CNWts02Feature = 0;
CNWts05Feature = 0;
CNWts08Feature = 0;
AAWts02Feature = 0;
AAWts05Feature = 0;
AAWts08Feature = 0;
CNWJCFeature = 0;
AAWJCFeature = 0;
for cn in CommonNeigbors:
item = get_partOfWeightCalculating(graph, database, pair, cn)
CNWts02Feature = CNWts02Feature + item['cnWts02'];
CNWts05Feature = CNWts05Feature + item['cnWts05'];
CNWts08Feature = CNWts08Feature + item['cnWts08'];
AAWts02Feature = AAWts02Feature + item['aaWts02'];
AAWts05Feature = AAWts05Feature + item['aaWts05'];
AAWts08Feature = AAWts08Feature + item['aaWts08'];
#CNWJCFeature = CNWJCFeature + item['cnWJC'];
#AAWJCFeature = AAWJCFeature + item['aaWJC'];
pdb.insert(str(pair[0]), str(pair[1]), CNWts02Feature, CNWts05Feature, CNWts08Feature, AAWts02Feature, AAWts05Feature, AAWts08Feature )
pdb.commit()
return pdb;
示例12: get_pair_nodes_not_linked
def get_pair_nodes_not_linked(self):
print "Starting getting pair of nodes that is not liked", datetime.today()
results = []
nodesinGraph =self.graph.nodes()
nodesOrdered = sorted(nodesinGraph)
totalnodesOrdered = len(nodesOrdered)
element = 0
for node in nodesOrdered:
element = element+1
FormatingDataSets.printProgressofEvents(element, totalnodesOrdered, "Checking Node not liked: ")
publicacoes = self.graph.edges(node,data=False)
qtdepublicacoes = len(publicacoes)
#print "O autor e seus papers ",node,qtdepublicacoes ,publicacoes
if (qtdepublicacoes >= self.min_papers):
others = set(n for n in nodesOrdered if n > node)
for otherNode in others:
other_publicacoes = self.graph.edges(otherNode,data=False)
other_qtdepublicacoes = len(other_publicacoes)
if (other_qtdepublicacoes >= self.min_papers):
if (not self.graph.has_edge(node, otherNode)):
if self.USE_MAX_NUMBER_OF_PEOPLE_BETWEEN == True:
if networkx.has_path(self.graph, node, otherNode):
shortestPathResult = networkx.shortest_path(self.graph, node, otherNode)
#print shortestPathResult
tamanho_caminho = len(shortestPathResult) - 1
#print "%s ate %s: %s" %(node1, other_node,tamanho_caminho)
#print repr(networkx.shortest_path(graph, node1, other_node));
if ( tamanho_caminho > 0 ) and (tamanho_caminho <= self.MAX_NUMBER_OF_PEOPLE_BETWEEN ): # -2 porque inclui o inicio e fim
#print "adicionando %s - %s" %(node, otherNode)
results.append([node, otherNode])
else:
results.append([node, otherNode])
print "getting pair of nodes that is not liked finished", datetime.today()
return results
示例13: get_pair_nodes_not_linked
def get_pair_nodes_not_linked(self, graph, file, min_papers):
print "Starting getting pair of nodes that is not liked", datetime.today()
results = []
nodesinGraph =set(n for n,d in graph.nodes(data=True) if d['node_type'] == 'N')
currentNodes = set()
for n in nodesinGraph:
papers = set(networkx.all_neighbors(graph, n))
print papers
if (len(papers) >= min_papers):
currentNodes.add(n)
print 'qty of authors: ', len(currentNodes)
nodesOrdered = sorted(currentNodes)
element = 0
totalnodesOrdered = len(nodesOrdered)
for node1 in nodesOrdered:
element = element+1
FormatingDataSets.printProgressofEvents(element, totalnodesOrdered, "Checking Node not liked: ")
others = set(n for n in nodesOrdered if n > node1)
notLinked = set()
for other_node in others:
if len(set(networkx.common_neighbors(graph, node1, other_node))) == 0:
notLinked.add(other_node)
results.append([node1, notLinked])
if element % 2000 == 0:
for item in results:
file.write(str(item[0]) + '\t' + repr(item[1]) + '\n')
results = []
for item in results:
file.write(str(item[0]) + '\t' + repr(item[1]) + '\n')
results = []
print "getting pair of nodes that is not liked finished", datetime.today()
示例14: execution
def execution(configFile):
#DEFINE THE FILE THAT WILL KEEP THE RESULT DATA
resultFile = open(FormatingDataSets.get_abs_file_path(configFile + 'wTScore03_010304.txt'), 'w')
resultFile.write("Inicio da operacao\n")
resultFile.write(str(datetime.datetime.now()))
resultFile.write("\n")
#READING THE CONFIG FILE
util = ParameterUtil(parameter_file = configFile)
#CREATING PARAMETRIZATION OBJECT WITH THE INFORMATIONS OF THE CONFIG FILE.
myparams = Parameterization(t0 = util.t0, t0_ = util.t0_, t1 = util.t1, t1_ = util.t1_, linear_combination=util.linear_combination,
filePathGraph = util.graph_file, filePathTrainingGraph = util.trainnig_graph_file, filePathTestGraph = util.test_graph_file, decay = util.decay, domain_decay = util.domain_decay, min_edges = util.min_edges, scoreChoiced = util.ScoresChoiced, weightsChoiced = util.WeightsChoiced, weightedScoresChoiced = util.WeightedScoresChoiced, FullGraph = None, result_random_file=util.result_random_file)
#GENERATING TRAINNING GRAPH BASED ON CONFIG FILE T0 AND T0_
myparams.generating_Training_Graph()
#GENERATING TEST GRAPH BASED ON CONcvb FIG FILE T1 AND T1_
myparams.generating_Test_Graph()
nodeSelection = NodeSelection(myparams.trainnigGraph, myparams.testGraph, util)
db = None
if not os.path.exists(FormatingDataSets.get_abs_file_path(util.trainnig_graph_file + '.base.pdl')):
db = generateWeights(myparams.trainnigGraph, FormatingDataSets.get_abs_file_path(util.trainnig_graph_file + '.base.pdl') , myparams)
else:
db = reading_Database(FormatingDataSets.get_abs_file_path(util.trainnig_graph_file + '.base.pdl'))
calcDb = None
if not os.path.exists(FormatingDataSets.get_abs_file_path(util.calculated_file + '.base.pdl')):
calcDb = calculatingWeights(myparams.trainnigGraph, nodeSelection.nodesNotLinked, db, FormatingDataSets.get_abs_file_path(util.calculated_file) + '.base.pdl')
else:
calcDb = reading_Database(FormatingDataSets.get_abs_file_path(util.calculated_file + '.base.pdl'))
ordering = get_ordering(calcDb, len(nodeSelection.eNeW))
result = get_analyseNodesInFuture(ordering, myparams.testGraph)
resultFile.write(repr(result))
resultFile.write("\n")
#
resultFile.write("Authors\tArticles\tCollaborations\tAuthors\tEold\tEnew\n")
resultFile.write( str(myparams.get_nodes(myparams.trainnigGraph))+ "\t" + str(myparams.get_edges(myparams.trainnigGraph)) + "\t\t" + str(len(nodeSelection.get_NowellColaboration())*2)+ "\t\t" + str(len(nodeSelection.nodes)) + "\t" + str(len(nodeSelection.eOld))+"\t" + str(len(nodeSelection.eNeW)))
resultFile.write("\n")
resultFile.write("Fim da Operacao\n")
resultFile.write(str(datetime.datetime.now()))
resultFile.close()
示例15: execution
def execution(configFile):
#DEFINE THE FILE THAT WILL KEEP THE RESULT DATA
resultFile = open(FormatingDataSets.get_abs_file_path(configFile + 'core03_execucaoFinal_cstT02.txt'), 'w')
resultFile.write("Inicio da operacao\n")
resultFile.write(str(datetime.datetime.now()))
resultFile.write("\n")
#READING THE CONFIG FILE
util = ParameterUtil(parameter_file = configFile)
#CREATING PARAMETRIZATION OBJECT WITH THE INFORMATIONS OF THE CONFIG FILE.
myparams = Parameterization(t0 = util.t0, t0_ = util.t0_, t1 = util.t1, t1_ = util.t1_, linear_combination=util.linear_combination,
filePathGraph = util.graph_file, filePathTrainingGraph = util.trainnig_graph_file, filePathTestGraph = util.test_graph_file, decay = util.decay, domain_decay = util.domain_decay, min_edges = util.min_edges, scoreChoiced = util.ScoresChoiced, weightsChoiced = util.WeightsChoiced, weightedScoresChoiced = util.WeightedScoresChoiced, FullGraph = None, result_random_file=util.result_random_file)
#GENERATING TRAINNING GRAPH BASED ON CONFIG FILE T0 AND T0_
myparams.generating_Training_Graph()
#GENERATING TEST GRAPH BASED ON CONcvb FIG FILE T1 AND T1_
myparams.generating_Test_Graph()
nodeSelection = NodeSelection(myparams.trainnigGraph, myparams.testGraph, util)
#CREATING CALCULATION OBJECT
calc = CalculatingTogether(myparams, nodeSelection.nodesNotLinked)
ordering = calc.ordering(len(nodeSelection.eNeW))
#calc.saving_orderedResult(util.ordered_file, ordering)
calc.AnalyseNodesInFuture(ordering, myparams.testGraph)
resultFile.write(repr(calc.get_TotalSucess()))
resultFile.write("\n")
#
resultFile.write("Authors\tArticles\tCollaborations\tAuthors\tEold\tEnew\n")
resultFile.write( str(myparams.get_nodes(myparams.trainnigGraph))+ "\t" + str(myparams.get_edges(myparams.trainnigGraph)) + "\t\t" + str(len(nodeSelection.get_NowellColaboration())*2)+ "\t\t" + str(len(nodeSelection.nodes)) + "\t" + str(len(nodeSelection.eOld))+"\t" + str(len(nodeSelection.eNeW)))
resultFile.write("\n")
resultFile.write("Fim da Operacao\n")
resultFile.write(str(datetime.datetime.now()))
resultFile.close()