本文整理汇总了Python中pyes.ES.flush方法的典型用法代码示例。如果您正苦于以下问题:Python ES.flush方法的具体用法?Python ES.flush怎么用?Python ES.flush使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pyes.ES
的用法示例。
在下文中一共展示了ES.flush方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: SampleMaker
# 需要导入模块: from pyes import ES [as 别名]
# 或者: from pyes.ES import flush [as 别名]
class SampleMaker(object):
def __init__(self, name):
log = open(name, "wb")
self.log = log
self.conn = ES(("http", "127.0.0.1", 9200), timeout=300.0, log_curl=True, dump_curl=log)
self.index_name = "test-index"
self.document_type = "test-type"
self.conn.delete_index_if_exists(self.index_name)
self.init_default_index()
def init_default_index(self):
from pyes.helpers import SettingsBuilder
settings = SettingsBuilder()
from pyes.mappings import DocumentObjectField
from pyes.mappings import IntegerField
from pyes.mappings import NestedObject
from pyes.mappings import StringField, DateField, BooleanField, GeoPointField, FloatField
docmapping = DocumentObjectField(name=self.document_type)
docmapping.add_property(
StringField(name="description", store=True, term_vector="with_positions_offsets", index="analyzed"))
docmapping.add_property(
StringField(name="name", store=True, term_vector="with_positions_offsets", index="analyzed"))
docmapping.add_property(StringField(name="tag", store=True, index="not_analyzed"))
docmapping.add_property(IntegerField(name="age", store=True))
docmapping.add_property(FloatField(name="price"))
docmapping.add_property(DateField(name="date", store=True))
docmapping.add_property(BooleanField(name="in_stock", store=True, index="not_analyzed"))
docmapping.add_property(GeoPointField(name="position"))
nested_object = NestedObject(name="metadata")
nested_object.add_property(StringField(name="name", store=True))
nested_object.add_property(StringField(name="value", store=True))
nested_object.add_property(IntegerField(name="num", store=True))
docmapping.add_property(nested_object)
settings.add_mapping(docmapping)
self.conn.ensure_index(self.index_name, settings)
def generate_datafile(self, number_items=1000):
"""
Generate a dataset with number_items elements.
"""
names = get_names()
totalnames = len(names)
#init random seeder
random.seed()
#calculate items
# names = random.sample(names, number_items)
for i in xrange(number_items):
data = {"name": names[random.randint(0, totalnames - 1)],
"age": random.randint(1, 100),
"price": random.random()*100.0,
"tag":[words(1, False) for r in xrange(random.randint(1, 5))],
"in_stock": random.choice([True, False]),
"date": datetime.now()+timedelta(days=random.choice([1, -1])*random.randint(0,1000)),
"position": {
"lat" : random.choice([1, -1])* random.random()*90.0,
"lon" : random.choice([1, -1])* random.random()*180.0
},
"description": words(random.randint(1, 100), False),
"metadata":[{"name":names[random.randint(0, totalnames - 1)],
"value":str(random.randint(1, 5)), "num":random.randint(1, 50) } for r in xrange(random.randint(1, 5))]
}
self.conn.index(data, self.index_name, self.document_type, id=str(i+1))
def close(self):
self.conn.flush(self.index_name)
self.log.close()
开发者ID:Hafizirshaid,项目名称:elasticsearch-cookbook-second-edition,代码行数:74,代码来源:facets_data_generation.py
示例2: ESIndexerBase
# 需要导入模块: from pyes import ES [as 别名]
# 或者: from pyes.ES import flush [as 别名]
class ESIndexerBase(object):
ES_HOST = ES_HOST
ES_INDEX_NAME = ES_INDEX_NAME
ES_INDEX_TYPE = 'gene'
def __init__(self):
self.conn = ES(self.ES_HOST, default_indexes=[self.ES_INDEX_NAME],
timeout=10.0)
self.step = 10000
def create_index(self):
try:
print self.conn.open_index(self.ES_INDEX_NAME)
except IndexMissingException:
print self.conn.create_index(self.ES_INDEX_NAME)
def delete_index_type(self, index_type):
'''Delete all indexes for a given index_type.'''
index_name = self.ES_INDEX_NAME
# index_type = self.ES_INDEX_TYPE
#Check if index_type exists
mapping = self.conn.get_mapping(index_type, index_name)
if index_name not in mapping or index_type not in mapping[index_name]:
print 'Error: index type "%s" does not exist in index "%s".' % (index_type, index_name)
return
path = '/%s/%s' % (index_name, index_type)
if ask('Confirm to delete all data under "%s":' % path) == 'Y':
return self.conn.delete_mapping(index_name, index_type)
def index(self, doc, index_type, id=None):
'''add a doc to the index. If id is not None, the existing doc will be
updated.
'''
# index_type = self.ES_INDEX_TYPE
return self.conn.index(doc, self.ES_INDEX_NAME, index_type, id=id)
def delete_index(self, index_type, id):
'''delete a doc from the index based on passed id.'''
# index_type = self.ES_INDEX_TYPE
return self.conn.delete(self.ES_INDEX_NAME, index_type, id)
def optimize(self):
return self.conn.optimize(self.ES_INDEX_NAME, wait_for_merge=True)
def get_field_mapping(self):
import dataload
reload(dataload)
dataload.register_sources()
return dataload.get_mapping()
def build_index(self, doc_d, update_mapping=False, bulk=True):
index_name = self.ES_INDEX_NAME
index_type = self.ES_INDEX_TYPE
#Test if index exists
try:
print "Opening index...", self.conn.open_index(index_name)
except NotFoundException:
print 'Error: index "%s" does not exist. Create it first.' % index_name
return -1
try:
cur_mapping = self.conn.get_mapping(index_type, index_name)
empty_mapping = False
except ElasticSearchException:
#if no existing mapping available for index_type
#force update_mapping to True
empty_mapping = True
update_mapping = True
# empty_mapping = not cur_mapping[index_name].get(index_type, {})
# if empty_mapping:
# #if no existing mapping available for index_type
# #force update_mapping to True
# update_mapping = True
if update_mapping:
print "Updating mapping...",
if not empty_mapping:
print "\n\tRemoving existing mapping...",
print self.conn.delete_mapping(index_name, index_type)
_mapping = self.get_field_mapping()
print self.conn.put_mapping(index_type,
_mapping,
[index_name])
print "Building index..."
t0 = time.time()
for doc_id, doc in doc_d.items():
self.conn.index(doc, index_name, index_type, doc_id, bulk=bulk)
print self.conn.flush()
print self.conn.refresh()
print "Done[%s]" % timesofar(t0)
def query(self, qs, fields='symbol,name', **kwargs):
_q = StringQuery(qs)
res = self.conn.search(_q, fields=fields, **kwargs)
return res