当前位置: 首页>>代码示例>>Python>>正文


Python client.GraphDatabase类代码示例

本文整理汇总了Python中neo4jrestclient.client.GraphDatabase的典型用法代码示例。如果您正苦于以下问题:Python GraphDatabase类的具体用法?Python GraphDatabase怎么用?Python GraphDatabase使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了GraphDatabase类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: obj_get

    def obj_get(self, bundle, **kwargs):
        
        gdb = GraphDatabase(GRAPH_DATABASE_REST_URL)
        document = gdb.nodes.get(GRAPH_DATABASE_REST_URL + "node/" + kwargs['pk'] + '/')
        
        new_obj = DataObject(kwargs['pk'])
        new_obj.__dict__['_data'] = document.properties
        new_obj.__dict__['_data']['id'] = kwargs['pk']
        new_obj.__dict__['_data']['user'] = str(document.relationships.incoming(types=["owns"])[0].start.properties['username'])
        
        sentences = gdb.query("""MATCH (u:`User`)-[:owns]->(d:`UserDocument`)-[:sentences]->(s:`UserSentence`) WHERE d.CTS='""" +document.properties['CTS']+ """' RETURN DISTINCT s ORDER BY ID(s)""")
        sentenceArray = []
        for s in sentences:
            sent = s[0]
            url = sent['self'].split('/')
            # this might seems a little hacky, but API resources are very decoupled,
            # which gives us great performance instead of creating relations amongst objects and referencing/dereferencing foreign keyed fields
            sent['data']['resource_uri'] = API_PATH + 'user_sentence/' + url[len(url)-1] + '/'
            sentenceArray.append(sent['data'])
                
            new_obj.__dict__['_data']['sentences'] = sentenceArray

        # get a dictionary of related translations of this document
        relatedDocuments = gdb.query("""MATCH (d:`UserDocument`)-[:sentences]->(s:`UserSentence`)-[:words]->(w:`Word`)-[:translation]->(t:`Word`)<-[:words]-(s1:`Sentence`)<-[:sentences]-(d1:`Document`) WHERE HAS (d.CTS) AND d.CTS='""" + document.properties['CTS'] + """' RETURN DISTINCT d1 ORDER BY ID(d1)""")
        
        new_obj.__dict__['_data']['translations']={}
        for rd in relatedDocuments:
            doc = rd[0]
            url = doc['self'].split('/')
            if doc['data']['lang'] in CTS_LANG:
                new_obj.__dict__['_data']['translations'][doc['data']['lang']] = doc['data']
                new_obj.__dict__['_data']['translations'][doc['data']['lang']]['resource_uri']= API_PATH + 'document/' + url[len(url)-1] +'/'


        return new_obj
开发者ID:OpenPhilology,项目名称:phaidra,代码行数:35,代码来源:contribute.py

示例2: buildings

def buildings(request):
    status={}
    if request.method == 'GET':
        gdb = GraphDatabase(NEO4J_HOST,NEO4J_USERNAME,NEO4J_PASSWORD)
        #gdb = GraphDatabase("http://localhost:7474", username="neo4j", password="papageno1")
        #building = gdb.labels.get('Building')
        #building.all()
        q = """MATCH (n:Building) return n.name, n.address, n.certification, n.leed_id"""
        results = gdb.query(q=q)
    
        buildings = []
        for building in results:
            building_info = {}
            building_info['name'] = building[0]
            building_info['address'] = building[1]
            building_info['certification'] = building[2]
            building_info['leed_id'] = building[3]
            buildings.append(building_info)
        
        status.update({'buildings': buildings})
        status.update({'status': 'Success'})
        return HttpResponse(json.dumps(status),content_type="application/json")
    else:
        status.update({'buildings': ''})
        status.update({'status': 'Invalid Request'})
        
    return HttpResponse(json.dumps(status))
开发者ID:ekta-yogaan,项目名称:eb_django_app,代码行数:27,代码来源:views.py

示例3: obj_get

    def obj_get(self, bundle, **kwargs):
        
        gdb = GraphDatabase(GRAPH_DATABASE_REST_URL)
        lemma = gdb.nodes.get(GRAPH_DATABASE_REST_URL + "node/" + kwargs['pk'] + '/')
        
        # ge the data of the word
        new_obj = DataObject(kwargs['pk'])
        new_obj.__dict__['_data'] = lemma.properties
        new_obj.__dict__['_data']['id'] = kwargs['pk']
        
        # get the values    
        values = lemma.relationships.outgoing(types=["values"])            
        valuesArray = []
        for v in range(0, len(values), 1):
            val = values[v].end
            val.properties['resource_uri'] = API_PATH + 'word/' + str(val.id) + '/'
            val.properties['translations'] = []

            # get the full translation # force API into full representation if cache is enabled
            if bundle.request.GET.get('full'):    
                
                translations = gdb.query("""MATCH (d:`Word`)-[:translation]->(w:`Word`) WHERE d.CTS='""" + val.properties['CTS'] + """' RETURN DISTINCT w ORDER BY ID(w)""")
                translationArray = []
                for t in translations:
                    trans = t[0]
                    transurl = trans['self'].split('/')
                    trans['data']['resource_uri'] = API_PATH + 'word/' + transurl[len(transurl)-1] + '/'
                    translationArray.append(trans['data'])
                    val.properties['translations'] = translationArray
            
            valuesArray.append(val.properties)
            
        new_obj.__dict__['_data']['values'] = valuesArray

        return new_obj
开发者ID:OpenPhilology,项目名称:phaidra,代码行数:35,代码来源:lemma.py

示例4: obj_get

 def obj_get(self, bundle, **kwargs):
     
     gdb = GraphDatabase(GRAPH_DATABASE_REST_URL)
     word = gdb.nodes.get(GRAPH_DATABASE_REST_URL + "node/" + kwargs['pk'] + '/')
     
     # ge the data of the word
     new_obj = DataObject(kwargs['pk'])
     new_obj.__dict__['_data'] = word.properties
     new_obj.__dict__['_data']['id'] = kwargs['pk']
     new_obj.__dict__['_data']['sentence_resource_uri'] = API_PATH + 'sentence/' + str(word.relationships.incoming(types=["words"])[0].start.id) + '/'
     
     # get the lemma
     lemmaRels = word.relationships.incoming(types=["values"])
     if len(lemmaRels) > 0:
         new_obj.__dict__['_data']['lemma_resource_uri'] = API_PATH + 'lemma/' + str(lemmaRels[0].start.id) + '/'
         
     translations = gdb.query("""MATCH (d:`Word`)-[:translation]->(w:`Word`) WHERE d.CTS='""" +word.properties['CTS']+ """' RETURN DISTINCT w ORDER BY ID(w)""")
     translationArray = []
     for t in translations:
         trans = t[0]
         url = trans['self'].split('/')
         trans['data']['resource_uri'] = API_PATH + 'word/' + url[len(url)-1] + '/'
         translationArray.append(trans['data'])
             
     new_obj.__dict__['_data']['translations'] = translationArray
             
     return new_obj
开发者ID:ThomasK81,项目名称:phaidra,代码行数:27,代码来源:word.py

示例5: getAllNodesAndRelations

def getAllNodesAndRelations():
	db1 = GraphDatabase("http://localhost:7474/db/data/")
	q = '''START n=node(*) MATCH n-[r]->m RETURN n,r,m'''
	results = db1.query(q,returns=(client.Node, unicode, client.Relationship))
	print len(results)
	graph = defaultdict()
	startnode = []
	endnode = []
	rel = []
	for i in xrange(len(results)):
		for word in results[i]:
			if word.__class__.__name__ == 'unicode':
				json1_str = str(word)
				rel.append(getRelType(json1_str))
			if word.__class__.__name__ == 'Node':
				startnode.append(str(word.properties['name']))
			if word.__class__.__name__ == 'Relationship':
				endnode.append(str(word.properties['name']))

	for i in xrange(len(startnode)):
		graph[(startnode[i],endnode[i])] = rel[i]

	for word in graph:
		print word,graph[word]

	return graph
开发者ID:vinaykola,项目名称:jarvis,代码行数:26,代码来源:neo4japi.py

示例6: getSubgraph

def getSubgraph():
    op={"nodes":[],"links":[]}
    nodes=[]

    db1 = GraphDatabase("http://localhost:7474/")
    q1 = ' '.join(['MATCH n-[r]->m','WHERE n.name="batman"','RETURN n,r,m;'])
    q2 = ' '.join(['MATCH n-[r]->m WHERE n.name="batman"','WITH n,r,m MATCH q-[r2]->p','WHERE n-[r]->q AND n-[r]->p','RETURN q,r2,p limit 200;'])
    print "starting"
    results1=db1.query(q1,returns=(client.Node, client.Relationship, client.Node))
    print "HERE"
    for result in results1:
        n1=result[0].properties['name']
        n2=result[2].properties['name']
        try:
            i1=nodes.index(n1)
        except:
            nodes.append(n1)
            i1=nodes.index(n1)
            op["nodes"].append({"name":n1})
        try:
            i2=nodes.index(n2)
        except:
            nodes.append(n2)
            i2=nodes.index(n2)
            op["nodes"].append({"name":n2})
        
        r = result[1].type
        op["links"].append({"source":i1,"target":i2,"type":r})
        
    print op


    results2 = db1.query(q2,returns=(client.Node, client.Relationship, client.Node))
    print "THERE!"
    for result in results2:
        n1=result[0].properties['name']
        n2=result[2].properties['name']
        #try:
        i1=nodes.index(n1)
        """
        except:
            nodes.append(n1)
            i1=nodes.index(n1)
            op["nodes"].append({"name":n1})
        """    
        #try:
        i2=nodes.index(n2)
        """
        except:
            nodes.append(n2)
            i2=nodes.index(n2)
            op["nodes"].append({"name":n2})
        """
        r = result[1].type
        op["links"].append({"source":i1,"target":i2,"type":r})
        
    print op
    json.dump(op,open('subgraph.json','w'))
开发者ID:vinaykola,项目名称:jarvis,代码行数:58,代码来源:getSubgraph.py

示例7: least_recently

    def least_recently(self, request, **kwargs):
        
        data = {}
        data['time_ranking'] = []
        gdb = GraphDatabase(GRAPH_DATABASE_REST_URL)
        
        time = {}

        # process time of grammar of submissions of a user
        gdb = GraphDatabase(GRAPH_DATABASE_REST_URL)    
        submissions = gdb.query("""MATCH (n:`User`)-[:submits]->(s:`Submission`) WHERE HAS (n.username) AND n.username =  '""" + request.user.username + """' RETURN s""")            
        
        # get the current time
        unix = datetime(1970,1,1)
                                    
        # get the accuray per ref key
        for sub in submissions.elements:
            
            try:
                if len(sub[0]['data']['ref']) == 0:
                    return self.error_response(request, {'error': 'Reference keys are necessary for calculating averaged lesson progress.'}, response_class=HttpBadRequest)
                
                t = dateutil.parser.parse(sub[0]['data']['timestamp'])
                t = t.replace(tzinfo=None)
                diff = (t-unix).total_seconds()
                try:
                    time[sub[0]['data']['ref']].append(diff)
                except KeyError as k:
                    time[sub[0]['data']['ref']] = []
                    time[sub[0]['data']['ref']].append(diff)
            except KeyError as k:
                continue
                
        
        # calculate the averages and sort by it
        average = {}
        for ref in time.keys():
            average[ref] = 0.0
            for value in time[ref]:
                average[ref] = average[ref] + value
                
            av = average[ref]/len(time[ref])
            av = datetime.fromtimestamp(int(av)).strftime('%Y-%m-%d %H:%M:%S')
            av = av.replace(' ', 'T')
            average[ref] = av
        
        sorted_dict = sorted(average.iteritems(), key=operator.itemgetter(1))
        #sorted_reverse = sorted_dict.reverse()
                
        for entry in sorted_dict:
            data['time_ranking'].append({'ref': entry[0],
                                         'average': average[entry[0]],
                                         'title': Grammar.objects.filter(ref=entry[0])[0].title,
                                         'query': Grammar.objects.filter(ref=entry[0])[0].query})
    
        #return the json
        return self.create_response(request, data)
开发者ID:ThomasK81,项目名称:phaidra,代码行数:57,代码来源:visualization.py

示例8: execute

def execute():

	url = 'http://ec2-54-211-27-90.compute-1.amazonaws.com:8080/db/data/'

	gdb = GraphDatabase(url)

	query = "start n=node(*) where n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() and n.postedTime + 3600000 > timestamp() - 24*60*60*1000 return \"0\" as Day, count(n) as Tweets UNION start n = node(*) where n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() - 24*60*60*1000 and n.postedTime + 3600000 > timestamp() - 2*24*60*60*1000 return \"-1\" as Day, n as Tweets UNION start n = node(*) where n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() - 2*24*60*60*1000 and n.postedTime + 3600000 > timestamp() - 3*24*60*60*1000 return \"-2\" as Day, count(n) as Tweets UNION start n = node(*) where n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() - 3*24*60*60*1000 and n.postedTime + 3600000 > timestamp() - 4*24*60*60*1000 return \"-3\" as Day, count(n) as Tweets UNION start n = node(*) where n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() - 4*24*60*60*1000 and n.postedTime + 3600000 > timestamp() - 5*24*60*60*1000  return \"-4\" as Day, count(n) as Tweets UNION start n = node(*) where n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() - 5*24*60*60*1000 and n.postedTime + 3600000 > timestamp() - 6*24*60*60*1000 return \"-5\" as Day, count(n) as Tweets UNION start n = node(*) where n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() - 6*24*60*60*1000 and n.postedTime + 3600000 > timestamp() - 7*24*60*60*1000 return \"-6\" as Day, count(n) as Tweets UNION start n = node(*) where n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() - 7*24*60*60*1000 and n.postedTime + 3600000 > timestamp() - 8*24*60*60*1000 return \"-7\" as Day, count(n) as Tweets UNION start n = node(*) where n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() - 8*24*60*60*1000 and n.postedTime + 3600000 > timestamp() - 9*24*60*60*1000 return \"-8\" as Day, count(n) as Tweets UNION start n = node(*) where  n.type = \"tweet\" and n.postedTime + 3600000 < timestamp() - 9*24*60*60*1000 and n.postedTime + 3600000 > timestamp() - 10*24*60*60*1000 return \"-9\" as Day, count(n) as Tweets"	
	results = gdb.query(query).get_response()
	print results
开发者ID:capoitas,项目名称:GnipIngestor,代码行数:9,代码来源:tests.py

示例9: extract_node_features

def extract_node_features(nodes, multiclass=False):
	X = []
	Y = []
	index_map = {}
	gdb = GraphDatabase('http://ec2-54-187-76-157.us-west-2.compute.amazonaws.com:7474/db/data/')
	for i, node in enumerate(nodes):
		# phi = [handle_length, num_non_alpha in handle, belief, num_links, |indicators for source urls|]
		phi = []
		node_handle = node['node_handle']
		# handle_length
		phi.append(len(node_handle))
		# num_non_alpha characters
		phi.append(len([c for c in node_handle if not c.isalpha()]))
		q = 'MATCH (n{handle:' + node_handle + '})-[r]-(x) RETURN r, n, x'
		links = gdb.query(q=q)
		source_urls = set()
		belief = 0
		neighbor_beliefs = []
		for link in links:
			s_url = link[0]['data']['source_url']
			source_urls.add(s_url)
			try:
				belief = link[1]['data']['belief']
			except KeyError:
				pass
		#belief
		phi.append(belief)
		# num_links
		phi.append(len(links))
		# indicator variables for urls
		for source in GRAPH_SOURCES:
			if source in source_urls:
				phi.append(1)
			else:
				phi.append(0)
		action_type = node['action_type']
		if not multiclass:
			# binary classification, 'GOOD_NODE' = 1
			if action_type == "'GOOD_NODE'":
				Y.append(1)
			else:
				Y.append(2)
		else:
			# multiclass classification
			if action_type == "'GOOD_NODE'":
				Y.append(1)
			elif action_type == "'REMOVE_NODE'":
				Y.append(2)
			elif action_type == "'SPLIT_NODE'":
				Y.append(3)
			elif action_type == "'RENAME_NODE'":
				Y.append(4)
			else:
				print action_type
		index_map[node['id_node']] = i
		X.append(phi)
	return X, Y, index_map
开发者ID:AlaRuba,项目名称:CS229Project,代码行数:57,代码来源:graph_clf_features.py

示例10: get

    def get(self, params={}):
        depth = params.get("depth", 3)
        q = params.get("q", "artist_10165")
        query = u"""start n=node:node_auto_index(id="{}") match n-[r*1..{}]-m  return m,r""".format(q, depth)
        print query
        from neo4jrestclient.client import GraphDatabase, Node, Relationship

        gdb = GraphDatabase("http://localhost:7474/db/data")
        res = gdb.query(q=query, returns=(Node, Relationship))
        return res
开发者ID:vindurriel,项目名称:nest,代码行数:10,代码来源:neo.py

示例11: getAllNodes

def getAllNodes():
	db1 = GraphDatabase("http://localhost:7474/db/data/")
	q = '''START n=node(*) RETURN n LIMIT 5'''
	results = db1.query(q,returns=client.Node)
	nodes = []
	print len(results)
	for i in xrange(len(results)):
		for word in results[i]:
			nodes.append(str(word.properties['name']))
	return nodes
开发者ID:vinaykola,项目名称:jarvis,代码行数:10,代码来源:neo4japi.py

示例12: UpsertItem

def UpsertItem(itemid):
    db = GraphDatabase("http://localhost:7474/db/data/")
    item_query = "START ee=node(*) WHERE ee.itemid! = \"" + itemid + "\" RETURN ee;"

    result = db.query(q=item_query, returns=(client.Node, unicode, client.Relationship))
    if len(result) == 0:
        item = db.nodes.create(itemid=itemid)
    else:
        for node in result:
            item = node.pop()

    return item
开发者ID:the-hof,项目名称:graph-recommender,代码行数:12,代码来源:api.py

示例13: UpsertUser

def UpsertUser(userid):
    db = GraphDatabase("http://localhost:7474/db/data/")
    user_query = "START ee=node(*) WHERE ee.userid! = \"" + userid + "\" RETURN ee;"

    result = db.query(q=user_query, returns=(client.Node, unicode, client.Relationship))
    if len(result) == 0:
        user = db.nodes.create(userid=userid)
    else:
        for node in result:
            user = node.pop()

    return user
开发者ID:the-hof,项目名称:graph-recommender,代码行数:12,代码来源:api.py

示例14: getFoes

def getFoes(name):
	db1 = GraphDatabase("http://localhost:7474/db/data/")
	q = '''MATCH (n { name: \''''+name+'''\'})-[r]->m WHERE type(r) = 'FOE' RETURN n,r,m'''
	results = db1.query(q,returns=(client.Node, unicode, client.Relationship))
	endnode = []
	for i in xrange(len(results)):
		for word in results[i]:
			if word.__class__.__name__ == 'Relationship':
				endnode.append(str(word.properties['name']))

	print endnode
	return endnode
开发者ID:vinaykola,项目名称:jarvis,代码行数:12,代码来源:neo4japi.py

示例15: calculateKnowledgeMap

    def calculateKnowledgeMap(self, user):
        
        gdb = GraphDatabase(GRAPH_DATABASE_REST_URL)    
        submissions = gdb.query("""MATCH (n:`User`)-[:submits]->(s:`Submission`) WHERE HAS (n.username) AND n.username =  '""" + user + """' RETURN s""")    
                    
        #filename = os.path.join(os.path.dirname(__file__), '../static/json/ref.json')
        #fileContent = {}
        #with open(filename, 'r') as json_data:
            #fileContent = json.load(json_data); json_data.close()                                  
                        
        vocab = {}
        ref = {}        
        lemmas = {}
        lemmaFreq = 0
        # flatten the ref and collect the vocab knowledge
        for sub in submissions.elements:            
            
            try:     
                for word in sub[0]['data']['encounteredWords']:
                        
                    try:
                        vocab[word] = vocab[word]+1
                    except KeyError as k:
                        vocab[word] = 1
                        # if vocab appears first time, get the lemmas frequency (two vocs can have same lemma, so save lemma as key)
                        try:
                            lemma = gdb.query("""MATCH (l:`Lemma`)-[:values]->(n:`Word`) WHERE n.CTS = '""" + word + """' RETURN l.value, l.frequency""")
                            if lemma.elements[0][0] is not None and lemma.elements[0][0] != "":
                                lemmas[lemma.elements[0][0]] = lemma.elements[0][1]
                        # in case of weird submission test data for encounteredWords
                        except IndexError as i:
                            continue
                    if sub[0]['data']['ref'] not in ref:
                        # get the morph info via a file lookup of submission's ref key, save params to test it on the words of the work
                        #ref[sub[0]['data']['ref']] = grammar[sub[0]['data']['ref']]
                        try:
                            params = {}
                            grammar = Grammar.objects.filter(ref=sub[0]['data']['ref'])[0].query.split('&')
                            for pair in params:
                                params[pair.split('=')[0]] = pair.split('=')[1] 
                            ref[sub[0]['data']['ref']] = params
                        except IndexError as k:
                            continue                        
            except KeyError as k:
                continue
        
        # get the lemma/vocab overall count
        for freq in lemmas:
            lemmaFreq = lemmaFreq + int(lemmas[freq])

        return [vocab, ref, lemmas, lemmaFreq]
开发者ID:ThomasK81,项目名称:phaidra,代码行数:51,代码来源:visualization.py


注:本文中的neo4jrestclient.client.GraphDatabase类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。