本文整理汇总了Python中mkt.webapps.models.WebappIndexer.get_analysis方法的典型用法代码示例。如果您正苦于以下问题:Python WebappIndexer.get_analysis方法的具体用法?Python WebappIndexer.get_analysis怎么用?Python WebappIndexer.get_analysis使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mkt.webapps.models.WebappIndexer
的用法示例。
在下文中一共展示了WebappIndexer.get_analysis方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: handle
# 需要导入模块: from mkt.webapps.models import WebappIndexer [as 别名]
# 或者: from mkt.webapps.models.WebappIndexer import get_analysis [as 别名]
def handle(self, *args, **kwargs):
"""Set up reindexing tasks.
Creates a Tasktree that creates a new indexes and indexes all objects,
then points the alias to this new index when finished.
"""
force = kwargs.get('force', False)
prefix = kwargs.get('prefix', '')
if is_reindexing_mkt() and not force:
raise CommandError('Indexation already occuring - use --force to '
'bypass')
elif force:
unflag_database()
# The list of indexes that is currently aliased by `ALIAS`.
try:
aliases = ES.aliases(ALIAS).keys()
except pyelasticsearch.exceptions.ElasticHttpNotFoundError:
aliases = []
old_index = aliases[0] if aliases else None
# Create a new index, using the index name with a timestamp.
new_index = timestamp_index(prefix + ALIAS)
# See how the index is currently configured.
if old_index:
try:
s = (ES.get_settings(old_index).get(old_index, {})
.get('settings', {}))
except pyelasticsearch.exceptions.ElasticHttpNotFoundError:
s = {}
else:
s = {}
num_replicas = s.get('number_of_replicas',
settings.ES_DEFAULT_NUM_REPLICAS)
num_shards = s.get('number_of_shards', settings.ES_DEFAULT_NUM_SHARDS)
# Flag the database.
chain = flag_database.si(new_index, old_index, ALIAS)
# Create the index and mapping.
#
# Note: We set num_replicas=0 here to decrease load while re-indexing.
# In a later step we increase it which results in a more efficient bulk
# copy in Elasticsearch.
# For ES < 0.90 we manually enable compression.
chain |= create_index.si(new_index, ALIAS, {
'analysis': WebappIndexer.get_analysis(),
'number_of_replicas': 0, 'number_of_shards': num_shards,
'store.compress.tv': True, 'store.compress.stored': True,
'refresh_interval': '-1'})
# Index all the things!
chain |= run_indexing.si(new_index)
# After indexing we optimize the index, adjust settings, and point the
# alias to the new index.
chain |= update_alias.si(new_index, old_index, ALIAS, {
'number_of_replicas': num_replicas, 'refresh_interval': '5s'})
# Unflag the database.
chain |= unflag_database.si()
# Delete the old index, if any.
if old_index:
chain |= delete_index.si(old_index)
chain |= output_summary.si()
self.stdout.write('\nNew index and indexing tasks all queued up.\n')
os.environ['FORCE_INDEXING'] = '1'
try:
chain.apply_async()
finally:
del os.environ['FORCE_INDEXING']
示例2: handle
# 需要导入模块: from mkt.webapps.models import WebappIndexer [as 别名]
# 或者: from mkt.webapps.models.WebappIndexer import get_analysis [as 别名]
def handle(self, *args, **kwargs):
"""Set up reindexing tasks.
Creates a Tasktree that creates a new indexes and indexes all objects,
then points the alias to this new index when finished.
"""
if not settings.MARKETPLACE:
raise CommandError("This command affects only marketplace and " "should be run under Marketplace settings.")
force = kwargs.get("force", False)
prefix = kwargs.get("prefix", "")
if database_flagged() and not force:
raise CommandError("Indexation already occuring - use --force to " "bypass")
elif force:
unflag_database()
# The list of indexes that is currently aliased by `ALIAS`.
try:
aliases = ES.aliases(ALIAS).keys()
except pyelasticsearch.exceptions.ElasticHttpNotFoundError:
aliases = []
old_index = aliases[0] if aliases else None
# Create a new index, using the index name with a timestamp.
new_index = timestamp_index(prefix + ALIAS)
# See how the index is currently configured.
if old_index:
try:
s = ES.get_settings(old_index).get(old_index, {}).get("settings", {})
except pyelasticsearch.exceptions.ElasticHttpNotFoundError:
s = {}
else:
s = {}
num_replicas = s.get("number_of_replicas", settings.ES_DEFAULT_NUM_REPLICAS)
num_shards = s.get("number_of_shards", settings.ES_DEFAULT_NUM_SHARDS)
# Flag the database.
chain = flag_database.si(new_index, old_index, ALIAS)
# Create the index and mapping.
#
# Note: We set num_replicas=0 here to decrease load while re-indexing.
# In a later step we increase it which results in a more efficient bulk
# copy in Elasticsearch.
# For ES < 0.90 we manually enable compression.
chain |= create_index.si(
new_index,
ALIAS,
{
"analysis": WebappIndexer.get_analysis(),
"number_of_replicas": 0,
"number_of_shards": num_shards,
"store.compress.tv": True,
"store.compress.stored": True,
"refresh_interval": "-1",
},
)
# Index all the things!
chain |= run_indexing.si(new_index)
# After indexing we optimize the index, adjust settings, and point the
# alias to the new index.
chain |= update_alias.si(
new_index, old_index, ALIAS, {"number_of_replicas": num_replicas, "refresh_interval": "5s"}
)
# Unflag the database.
chain |= unflag_database.si()
# Delete the old index, if any.
if old_index:
chain |= delete_index.si(old_index)
chain |= output_summary.si()
self.stdout.write("\nNew index and indexing tasks all queued up.\n")
os.environ["FORCE_INDEXING"] = "1"
try:
chain.apply_async()
finally:
del os.environ["FORCE_INDEXING"]