本文整理汇总了Python中elasticsearch.client.IndicesClient.get_mapping方法的典型用法代码示例。如果您正苦于以下问题:Python IndicesClient.get_mapping方法的具体用法?Python IndicesClient.get_mapping怎么用?Python IndicesClient.get_mapping使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类elasticsearch.client.IndicesClient
的用法示例。
在下文中一共展示了IndicesClient.get_mapping方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from elasticsearch.client import IndicesClient [as 别名]
# 或者: from elasticsearch.client.IndicesClient import get_mapping [as 别名]
def main():
# Define the globals
global index_names
global STARTED_TIMESTAMP
global es
global es_indices
try:
# Initiate the elasticsearch session using ES low-level client.
# By default nodes are randomized before passed into the pool and round-robin strategy is used for load balancing.
es = Elasticsearch(ES_HOSTS, timeout=30)
es_indices = IndicesClient(es)
except:
print("Could not connect to elasticsearch!")
sys.exit(1)
print("Creating indices.. \n"),
indices = generate_indices()
print("Done!\n")
# Register specific mapping definition for a specific type.
print("Put Mapping \n"),
es_indices.put_mapping(doc_type="_default_", body=mappings_body["_default_"], index="_all" )
for type_name in types:
es_indices.put_mapping(doc_type=type_name, body=mappings_body[type_name], index="_all" )
print("Done!\n")
# Retrieve mapping definition of index or index/type.
print("GET Mapping \n"),
print json.dumps(es_indices.get_mapping(index=["metrics_0", "metrics_1"],
doc_type=types),
sort_keys=True,
indent=4,
separators=(',', ': '))
#print json.dumps(es_indices.get_settings(index="_all"), sort_keys=True,indent=4, separators=(',', ': '))
print("Done!\n")
# We will Clean up the indices by default
# Default: True
if CLEANUP:
print("Cleaning up created indices.. "),
cleanup_indices()
print("Done!\n")
示例2: datetime
# 需要导入模块: from elasticsearch.client import IndicesClient [as 别名]
# 或者: from elasticsearch.client.IndicesClient import get_mapping [as 别名]
lat_dist = maxlat - minlat
lon_dist = maxlon - minlon
spread = 0.085
distance_in_meters = 5000
if lat_dist < spread or lon_dist < spread:
spread = 0.015
distance_in_meters = 1000
minlat = minlat + (0.01)
maxlat = maxlat - (0.01)
minlon = minlon + (0.01)
maxlon = maxlon - (0.01)
realtime = True
if not ic.get_mapping(index="instagram_remap",doc_type=direct):
body = ic.get_mapping(index="instagram_remap",doc_type="baltimore")["instagram_remap"]["mappings"]["baltimore"]
ic.put_mapping(index="instagram_remap",doc_type=direct,body=body)
start_date = datetime(int(sdate[0:4]),int(sdate[4:6]),int(sdate[6:8]),int(sdate[8:10]))
end_date = datetime(datetime.now().year,datetime.now().month,datetime.now().day,datetime.now().hour,datetime.now().minute)
if args.end_date:
end_date = datetime(int(args.end_date[0:4]),int(args.end_date[4:6]),int(args.end_date[6:8]),int(args.end_date[8:10]))
max_secs = 460800 # 128 hours
max_images = 40 # this is the artifical max limit instagram sets...for now we'll just make it something low
min_images = 10 # increase the time window for any calls netting less than 10 images
if args.images:
示例3: print
# 需要导入模块: from elasticsearch.client import IndicesClient [as 别名]
# 或者: from elasticsearch.client.IndicesClient import get_mapping [as 别名]
print ("file not found")
return
actions=[]
if not es.indices.exists(index=indexName,allow_no_indices=True):
es.indices.create(index=indexName,body={},ignore=400)
for item in csv.DictReader(open(fileName, 'r')):
actions.append({"_index":indexName,"_type":typeName,"_source":item})
res = helpers.bulk(es,actions,chunk_size=100)
es.indices.flush(index=[indexName])
return len(actions)
result=importCSV("platform","facebook","demo_test.csv")
from elasticsearch import Elasticsearch
from elasticsearch.client import IndicesClient
es = Elasticsearch([{'host': 'localhost', 'port': 9200}])
es_index=IndicesClient(es)
es_index.get_mapping(index="platform",doc_type="facebook")
need_save = es.search(index='platform',doc_type='facebook', body={"query":{"match_phrase":{"_all":u"蔡英文"}},"sort":{u"留言數":{"order":"desc"}}})
es.count(index='platform',doc_type='facebook', body={"query":{"match_phrase":{"_all":u"蔡英文"}},"sort":{u"留言數":{"order":"desc"}}})
len(need_save['hits']['hits'])
import json
need_save = json.dumps(need_save['hits']['hits'],ensure_ascii=False).encode('utf8')
g = open('save_the_file.csv','wb+')
g.write(need_save)
g.close()
pd.read_csv('save_the_file.csv')
#打開看存檔
g = open('save_the_file.csv','r+')
y = json.load(g)
g.close()
print (y)
g = open('save_the_file.json','wb+')