本文整理汇总了Python中tangelo.log函数的典型用法代码示例。如果您正苦于以下问题:Python log函数的具体用法?Python log怎么用?Python log使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了log函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: getRankedAddresses
def getRankedAddresses(*args, **kwargs):
tangelo.content_type("application/json")
tangelo.log("getRankedAddresses(args: %s kwargs: %s)" % (str(args), str(kwargs)))
data_set_id, start_datetime, end_datetime, size = parseParamDatetime(**kwargs)
# TODO - reminder no 'qs' here set to ''
# qs = parseParamTextQuery(**kwargs)
qs=''
# TODO this needs to come from UI
size = size if size >500 else 2500
ranked_addresses = get_ranked_email_address_from_email_addrs_index(data_set_id, start_datetime, end_datetime, size)
top_address_list = []
for i, email_address in enumerate(ranked_addresses["emails"]):
graph = es_get_all_email_by_address(data_set_id, email_address[0], qs, start_datetime, end_datetime, size )
top_address_list.append({
"address_search_url_path" : email_address[0],
"parameters" : kwargs,
"search_results" : {
"mail_sent_count" : email_address[6],
"mail_received_count" : email_address[5],
"mail_attachment_count" : email_address[7],
"query_matched_count" : graph["query_hits"],
"associated_count" : len(graph["graph"]["nodes"])
},
"TEMPORARY_GRAPH" : graph
})
return {"top_address_list" : top_address_list}
示例2: _build_graph_for_emails
def _build_graph_for_emails(index, emails, query_hits):
nodes = []
edge_map = {}
addr_index = {}
total = count(index,"email_address")
print total
for email in emails:
from_addr = email["from"]
if from_addr not in _EMAIL_ADDR_CACHE[index]:
tangelo.log("WARNING: From email address not found in cache <%s>" % email)
continue;
if from_addr not in addr_index:
nodes.append(_map_node(_EMAIL_ADDR_CACHE[index][from_addr],total))
addr_index[from_addr] = len(nodes)-1
for rcvr_addr in email["to"]+email["cc"]+email["bcc"]:
if rcvr_addr not in _EMAIL_ADDR_CACHE[index]:
tangelo.log("WARNING: RCVR email address not found in cache <%s>" % rcvr_addr)
continue;
if rcvr_addr not in addr_index:
nodes.append(_map_node(_EMAIL_ADDR_CACHE[index][rcvr_addr], total))
addr_index[rcvr_addr] = len(nodes)-1
#TODO reduce by key instead of mapping? src->target and sum on value
edge_key = from_addr+"#"+rcvr_addr
if edge_key not in edge_map:
edge_map[edge_key] = {"source" : addr_index[from_addr],"target": addr_index[rcvr_addr],"value": 1}
else:
edge_map[edge_key]["value"]=edge_map[edge_key]["value"]+1
return {"graph":{"nodes":nodes, "links":edge_map.values()}, "rows": [_map_emails_to_row(email) for email in emails], "query_hits" : query_hits}
示例3: es_get_sender_locations
def es_get_sender_locations(data_set_id, size):
tangelo.log("es_geo.es_get_sender_locations()" )
emails_resp = es().search(index=data_set_id, doc_type="emails", size=size, body=_geo_xoip_query())
tangelo.log("es_geo.es_get_sender_locations(total document hits = %s)" % emails_resp["hits"]["total"])
docs = [_map_geo_response(hit["_source"])for hit in emails_resp["hits"]["hits"]]
return {"total":emails_resp["hits"]["total"], "XOIP_locations" : docs}
示例4: run
def run(usertoken):
# Create an empty response object.
response = {}
collectionNames = []
# build custom girder header for authenticated access
girderheader = {'Girder-Token': usertoken}
print 'girderheader:',girderheader
# look through the collections in girder. Return a list of collections that are in this local # Arbor instance
girderlocation = 'http://localhost:9000'
resp = requests.get(girderlocation+'/api/v1/collection',headers=girderheader)
# nothing particularly interesting here
#print resp.headers
#print requests.utils.dict_from_cookiejar(resp.cookies)
for entry in resp.json():
collname = entry['name']
print "found collection:", collname
collectionNames.append(entry['name'])
# Pack the results into the response object, and return it.
response['result'] = collectionNames
# Return the response object.
tangelo.log(str(response))
return json.dumps(response)
示例5: module_reload_changed
def module_reload_changed(key):
"""
Reload a module if it has changed since we last imported it. This is
necessary if module a imports script b, script b is changed, and then
module c asks to import script b.
:param key: our key used in the WatchList.
:returns: True if reloaded.
"""
imp.acquire_lock()
try:
modkey = module_sys_modules_key(key)
if not modkey:
return False
found = None
if modkey:
for second in WatchList:
secmodkey = module_sys_modules_key(second)
if secmodkey and sys.modules[modkey] == sys.modules[secmodkey]:
found = second
foundmodkey = secmodkey
break
if not found:
return
filemtime = module_getmtime(WatchList[found]["file"])
filemtime = latest_submodule_time(found, filemtime)
if filemtime > WatchList[found]["time"]:
tangelo.log("Reloaded %s" % found)
reload_including_local(sys.modules[foundmodkey])
for second in WatchList:
if WatchList[second]["file"] == WatchList[found]["file"]:
WatchList[second]["time"] = filemtime
finally:
imp.release_lock()
return True
示例6: parseParamAllSenderAllRecipient
def parseParamAllSenderAllRecipient( **kwargs ):
tangelo.log("parseParamAllSenderAllRecipient(kwargs[%s] %s)" % (len(kwargs), str(kwargs)))
sender = kwargs.get('sender','').split(",")[0]
recipient = kwargs.get('recipient','').split(",")
recipient = [x for x in recipient if (x is not '' and x is not None)]
return sender, recipient
示例7: parseParamEmailAddress
def parseParamEmailAddress( **kwargs ):
tangelo.log("parseParamEmailAddress(kwargs[%s] %s)" % (len(kwargs), str(kwargs)))
email_regex = re.compile("[^@][email protected][^@]+\\.[^@]+")
key_list = [k for k in kwargs.keys() if email_regex.match(k)]
tangelo.log("\tkey_list[] = %s" % str(key_list))
return key_list
示例8: run
def run(host,database):
# Create an empty response object.
response = {}
collectionNames = ['select a dataset']
# look through the collections in the ivaan database and return the name of all collections
# that match the naming profile for tables. This is matching to see if the collection name
# begins with "table_"
client = MongoClient(host, 27017)
db = client[database]
# get a list of all collections (excluding system collections)
collection_list = db.collection_names(False)
for coll in collection_list:
# exclude the seeds collections
if coll[:6] == 'seeds_':
#print "found seeds:", coll
collectionNames.append(coll)
client.close()
# Pack the results into the response object, and return it.
response['result'] = collectionNames
# Return the response object.
tangelo.log(str(response))
return json.dumps(response)
示例9: query
def query(data):
url = data['url']
max_results_per_node = int(data['mrpn'])
indd = data['index']
search_terms = data['search_terms']
es = Elasticsearch([url])
if esauth.get(url) != None:
cred = esauth[url]
tangelo.log('http://' + cred + '@' + url)
es = Elasticsearch(['http://' + cred + '@' + url])
ind = indd
rr = []
num = 0
for t in search_terms:
if t['type'] == 'selection' or t['type'] == 'phone' or t['type'] == 'email' or t['type'] == 'info':
num_to_search = t['id']
if t['type'] == 'selection':
num_to_search = t['data']
if t['type'] == 'info':
num_to_search = t['id'].split('->')[1].strip()
results = es.search(index=ind,body={"size":max_results_per_node,"fields":["_index","_type","_id"],"query":{"match_phrase": {"_all": num_to_search}}})
num += results['hits']['total']
for hit in results['hits']['hits']:
rr.append({'nid':t['id'],'search_term':num_to_search,'eid':hit['_id'],'itype':hit['_type'],'jindex':ind,'url':url})
return json.dumps({'num':num,'hits':rr})
示例10: createResults
def createResults(field, args_array):
## is text search
if not field.lower() in ["email", "entity"]:
text = head(args_array)
if text:
tangelo.log("text search : %s" % text)
es = Elasticsearch()
res = es.search(index="newman", doc_type="emails", size=1000, q=text, body= {"fields": ["_id"], "query": {"match_all": {}}})
ingestESTextResults(jsonGet(['hits','hits'], res, []))
node_vals = getNodeVals(field, args_array)
colors = {k:v.get("group_id") for k,v in node_vals.iteritems()}
for k,v in node_vals.iteritems():
node_vals[k]["color"] = colors.get(k)
emails = sorted(getEmails(colors, field, args_array), key=lambda x: str(x.get('datetime')))
idx_lookup = {}
nodes = []
for i, o in enumerate(node_vals.iteritems()):
k,v = o
idx_lookup[k]=i
#nodes.append({"name": k, "num": v.get("num"), "rank": v.get("rank"), "group": v.get("color"), "community": colors.get(v.get("comm"))})
nodes.append({"name": k, "num": v.get("num"), "rank": v.get("rank"), "group": v.get("color"), "community": v.get("comm_id")})
edges = getEdges(idx_lookup, field, args_array)
results = { 'rows': emails, 'graph': { 'nodes': nodes, 'links': edges }}
return results
示例11: upload_file
def upload_file(*args, **kwargs):
domain_content_connector = factory.get_entity_data_connector()
try:
domain_file = kwargs.get("file_upload")
domain_name = kwargs.get("name")
domain_description = kwargs.get("description")
if not db.domain_exists(domain_name):
if domain_file is not None:
tangelo.log("read domain file")
domain_file_lines = domain_file.file.readlines()
domain_file_lines = map(lambda x: x.strip().replace('\0',''), domain_file_lines)
db.add_new_domain(domain_name, domain_description)
rowkeys = []
for line in domain_file_lines:
i = line.index(',') # split on the first comma
type = line[:i]
value = line[i+1:]
if type[0] == '"' and type[len(type)-1] == '"': type = type[1:-1]
if value[0] == '"' and value[len(value)-1] == '"': value = value[1:-1]
rowkeys.append( domain_name+'\0'+type+'\0'+value )
result = domain_content_connector.add_new_domain_items(rowkeys)
return json.dumps(dict(success=result))
else:
return json.dumps(dict(success=False))
else:
return json.dumps(dict(success=False))
finally:
domain_content_connector.close()
示例12: get_entities
def get_entities(trail_id):
tangelo.log('Getting entities for trail: %s' % trail_id)
entities = {}
entityList = []
urls = []
rows = db.getBrowsePathUrls(trail_id)
for row in rows:
urls.append(row['url'])
entity_data_connector = factory.get_entity_data_connector()
results = entity_data_connector.get_extracted_entities_from_urls(urls)
tangelo.log('Got entities')
for result in results:
for entityType in results[result]:
for entityName in results[result][entityType]:
if entityName in entities:
entities[entityName]['pages'] = entities[entityName]['pages'] + 1
else:
entities[entityName] = {'type': entityType, 'pages':1}
# TODO either figure out how how map the data or do this differently
for entity in entities:
entityList.append({'name': entity, 'type': entities[entity]['type'], 'pages': entities[entity]['pages']})
return json.dumps(entityList)
示例13: getAttachFileType
def getAttachFileType(*args, **kwargs):
tangelo.content_type("application/json")
tangelo.log("getAttachFileType(args: %s kwargs: %s)" % (str(args), str(kwargs)))
data_set_id, start_datetime, end_datetime, size = parseParamDatetime(**kwargs)
top_count = int(size)
attach_type = urllib.unquote(nth(args, 0, ''))
if not attach_type:
attach_type = 'all' #hack for now
email_address_list = parseParamEmailAddress(**kwargs);
if not email_address_list :
file_types = get_top_attachment_types(data_set_id, date_bounds=(start_datetime, end_datetime), num_top_attachments=top_count)[:top_count]
else :
#TODO: implement populating the attachment file-types under individual email-accounts; simulate result for now
file_types = get_top_attachment_types(data_set_id, date_bounds=(start_datetime, end_datetime), num_top_attachments=top_count)[:top_count]
result = {
"account_id" : data_set_id,
"data_set_id" : data_set_id,
"account_start_datetime" : start_datetime,
"account_end_datetime" : end_datetime,
"types" : file_types
}
return result
示例14: queryEmail
def queryEmail(email):
with newman_connector() as read_cnx:
with execute_query(read_cnx.conn(), stmt_email_by_id, email) as qry:
tangelo.log("node-vals: %s" % qry.stmt)
rtn = qry.cursor().fetchone()
tangelo.content_type("application/json")
return rtn if rtn else []
示例15: export
def export(cdr):
dd_url = '%s/%s/%s/'%(conf.get_deepdive_url(), conf.get_deepdive_user(), conf.get_deepdive_repo())
headers = {'Authorization': 'Token %s' % conf.get_deepdive_token()}
r = requests.post(dd_url, headers=headers, data=cdr)
tangelo.log('Sending page to deepdive at: %s' % r.url)