当前位置: 首页>>代码示例>>Python>>正文


Python elasticsearch.ElasticsearchException方法代码示例

本文整理汇总了Python中elasticsearch.ElasticsearchException方法的典型用法代码示例。如果您正苦于以下问题:Python elasticsearch.ElasticsearchException方法的具体用法?Python elasticsearch.ElasticsearchException怎么用?Python elasticsearch.ElasticsearchException使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在elasticsearch的用法示例。


在下文中一共展示了elasticsearch.ElasticsearchException方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: extract_mappings_and_corpora

# 需要导入模块: import elasticsearch [as 别名]
# 或者: from elasticsearch import ElasticsearchException [as 别名]
def extract_mappings_and_corpora(client, output_path, indices_to_extract):
    indices = []
    corpora = []
    # first extract index metadata (which is cheap) and defer extracting data to reduce the potential for
    # errors due to invalid index names late in the process.
    for index_name in indices_to_extract:
        try:
            indices += index.extract(client, output_path, index_name)
        except ElasticsearchException:
            logging.getLogger(__name__).exception("Failed to extract index [%s]", index_name)

    # That list only contains valid indices (with index patterns already resolved)
    for i in indices:
        c = corpus.extract(client, output_path, i["name"])
        if c:
            corpora.append(c)

    return indices, corpora 
开发者ID:elastic,项目名称:rally,代码行数:20,代码来源:tracker.py

示例2: _bulk_upload

# 需要导入模块: import elasticsearch [as 别名]
# 或者: from elasticsearch import ElasticsearchException [as 别名]
def _bulk_upload(self, es_batch):
        max_attempts = 4
        attempts = 0
        # Initial time to wait between indexing attempts
        # Grows exponentially
        cooloff = 5
        while True:
            try:
                deque(helpers.parallel_bulk(self.es, es_batch, chunk_size=400))
            except elasticsearch.ElasticsearchException:
                # Something went wrong during indexing.
                log.warning(
                    f"Elasticsearch rejected bulk query. We will retry in"
                    f" {cooloff}s. Attempt {attempts}. Details: ",
                    exc_info=True
                )
                time.sleep(cooloff)
                cooloff *= 2
                if attempts >= max_attempts:
                    raise ValueError('Exceeded maximum bulk index retries')
                attempts += 1
                continue
            break 
开发者ID:creativecommons,项目名称:cccatalog-api,代码行数:25,代码来源:indexer.py

示例3: register_metadata_dashboard

# 需要导入模块: import elasticsearch [as 别名]
# 或者: from elasticsearch import ElasticsearchException [as 别名]
def register_metadata_dashboard(event, context):
    if event['RequestType'] != 'Create':
        return send_cfnresponse(event, context, CFN_SUCCESS, {})
    quickstart_bucket = s3_resource.Bucket(event['ResourceProperties']['QSS3BucketName'])
    kibana_dashboards_key = os.path.join(
        event['ResourceProperties']['QSS3KeyPrefix'],
        'assets/kibana/kibana_metadata_visualizations.json'
    )
    elasticsearch_endpoint = event['ResourceProperties']['ElasticsearchEndpoint']
    try:
        quickstart_bucket.download_file(kibana_dashboards_key, TMP_KIBANA_JSON_PATH)
        create_metadata_visualizations(elasticsearch_endpoint)
        return send_cfnresponse(event, context, CFN_SUCCESS, {})
    except (ClientError, ElasticsearchException) as e:
        print(e)
        return send_cfnresponse(event, context, CFN_FAILED, {}) 
开发者ID:aws-quickstart,项目名称:quickstart-datalake-47lining,代码行数:18,代码来源:lambdas.py

示例4: bulk_sync_wrapper

# 需要导入模块: import elasticsearch [as 别名]
# 或者: from elasticsearch import ElasticsearchException [as 别名]
def bulk_sync_wrapper(self, actions):
        """
        Wrapper to publish events.
        Workaround for elasticsearch_async not supporting bulk operations
        """
        from elasticsearch import ElasticsearchException
        from elasticsearch.helpers import bulk

        try:
            bulk_response = bulk(self._gateway.get_sync_client(), actions)
            LOGGER.debug("Elasticsearch bulk response: %s",
                         str(bulk_response))
            LOGGER.info("Publish Succeeded")
        except ElasticsearchException as err:
            LOGGER.exception(
                "Error publishing documents to Elasticsearch: %s", err) 
开发者ID:legrego,项目名称:homeassistant-elasticsearch,代码行数:18,代码来源:es_doc_publisher.py

示例5: queryAlerts

# 需要导入模块: import elasticsearch [as 别名]
# 或者: from elasticsearch import ElasticsearchException [as 别名]
def queryAlerts(maxAlerts, clientDomain, relevantIndex):
    """ Get IP addresses from alerts in elasticsearch """

    esquery="""{
            "query": {
                "terms": {
                    "clientDomain": [ %s ]
                }
            },
            "sort": {
                "recievedTime": {
                    "order": "desc"
                    }
                },
            "size": %s,
            "_source": [
                "createTime",
                "recievedTime",
                "peerIdent",
                "peerType",
                "country",
                "targetCountry",
                "originalRequestString",
                "location",
                "sourceEntryIp"
                ]
            }""" % (clientDomain, maxAlerts)
    try:
        res = es.search(index=relevantIndex, body=esquery)
        return res["hits"]["hits"]
    except ElasticsearchException as err:
        app.logger.error('ElasticSearch error: %s' %  err)

    return False 
开发者ID:dtag-dev-sec,项目名称:PEBA,代码行数:36,代码来源:peba.py

示例6: queryAlertsWithoutIP

# 需要导入模块: import elasticsearch [as 别名]
# 或者: from elasticsearch import ElasticsearchException [as 别名]
def queryAlertsWithoutIP(maxAlerts, clientDomain, relevantIndex):
    """ Get IP addresses from alerts in elasticsearch """

    esquery="""
    {
            "query": {
                "terms": {
                    "clientDomain": [ %s ]
                }
            },
            "sort": {
                "recievedTime": {
                    "order": "desc"
                    }
                },
            "size": %s,
            "_source": [
                "createTime",
                "peerType",
                "country",
                "originalRequestString",
                "location",
                "targetCountry",
                "countryName",
                "locationDestination",
                "recievedTime",
                "username",
                "password",
                "login",
                "clientDomain"
                ]
            }""" % (clientDomain, maxAlerts)

    try:
        res = es.search(index=relevantIndex, body=esquery)
        return res["hits"]["hits"]
    except ElasticsearchException as err:
        app.logger.error('ElasticSearch error: %s' % err)

    return False 
开发者ID:dtag-dev-sec,项目名称:PEBA,代码行数:42,代码来源:peba.py

示例7: getNumberAlerts

# 需要导入模块: import elasticsearch [as 别名]
# 或者: from elasticsearch import ElasticsearchException [as 别名]
def getNumberAlerts(timeframe, clientDomain):
    ''' retrieves number of alerts from index in timeframe (minutes)'''
    try:
        res = es.search(index=esindex, body={
            "query": {
                "bool": {
                    "must": [
                        {
                            "match": {
                                "clientDomain": clientDomain
                            }
                        }
                    ],
                    "filter": [
                        {
                            "range": {
                                "createTime": {
                                    "gte": "now-"+str(timeframe)+"m"
                                }
                            }
                        }
                    ]
                }
            },
            "size": 0
        })
        return res['hits']['total']
    except ElasticsearchException as err:
        print('ElasticSearch error: %s' % err)

    return False 
开发者ID:dtag-dev-sec,项目名称:PEBA,代码行数:33,代码来源:analysis.py

示例8: getHPStats

# 需要导入模块: import elasticsearch [as 别名]
# 或者: from elasticsearch import ElasticsearchException [as 别名]
def getHPStats(peerIdent):
    for i in range(days):

        esquery = """
           {
             "query": {
               "bool": 
                   {
                       "must":
                       [
                           {"term":
                               {"clientDomain" : "false" }
                           }
                       ]
                   }
             },
             "from": 0,
             "size": 0,
             "sort": [],
             "aggs": {
               "peers": {
                 "terms": {
                   "field": "peerIdent",
                   "size": 10000
                 }
               }
               }}
               """

        try:
            res = es.search(index=getRelevantIndex(i), body=esquery)
            print(res["aggregations"]["peers"]["buckets"])


        except ElasticsearchException as err:
            print('ElasticSearch error: %s' % err) 
开发者ID:dtag-dev-sec,项目名称:PEBA,代码行数:38,代码来源:analysis.py

示例9: exists

# 需要导入模块: import elasticsearch [as 别名]
# 或者: from elasticsearch import ElasticsearchException [as 别名]
def exists(self):
        """Check if this task has already run successfully in the past."""
        try:
            self.elasticsearch_client.get(
                index=self.marker_index,
                doc_type=self.marker_doc_type,
                id=self.marker_index_document_id()
            )
            return True
        except elasticsearch.NotFoundError:
            log.debug('Marker document not found.')
        except elasticsearch.ElasticsearchException as err:
            log.warn(err)
        return False 
开发者ID:edx,项目名称:edx-analytics-pipeline,代码行数:16,代码来源:elasticsearch_target.py

示例10: handle_bucket_event

# 需要导入模块: import elasticsearch [as 别名]
# 或者: from elasticsearch import ElasticsearchException [as 别名]
def handle_bucket_event(event, context):
    sns_message = json.loads(event["Records"][0]["Sns"]["Message"])
    bucket = sns_message["Records"][0]["s3"]["bucket"]["name"]
    key = urllib.parse.unquote_plus(sns_message["Records"][0]["s3"]["object"]["key"])
    print(bucket, key)
    try:
        response = s3.head_object(Bucket=bucket, Key=key)
    except ClientError as e:
        print(e)
        print('Error getting object {} from bucket {}. Make sure they exist, your bucket is in the same region as this function and necessary permissions have been granted.'.format(key, bucket))
        raise e

    metadata = {
        'key': key,
        'ContentLength': response['ContentLength'],
        'SizeMiB': response['ContentLength'] / 1024**2,
        'LastModified': response['LastModified'].isoformat(),
        'ContentType': response['ContentType'],
        'ETag': response['ETag'],
        'Dataset': key.split('/')[0]
    }
    print("METADATA: " + str(metadata))

    es_client = make_elasticsearch_client(os.environ['ELASTICSEARCH_ENDPOINT'])

    try:
        es_client.index(index=es_index, doc_type=bucket, body=json.dumps(metadata))
    except ElasticsearchException as e:
        print(e)
        print("Could not index in Elasticsearch")
        raise e 
开发者ID:aws-quickstart,项目名称:quickstart-datalake-47lining,代码行数:33,代码来源:lambdas.py

示例11: more_like_this

# 需要导入模块: import elasticsearch [as 别名]
# 或者: from elasticsearch import ElasticsearchException [as 别名]
def more_like_this(elastic_url, fields: list, like: list, size: int, filters: list, aggregations: list, include: bool, if_agg_only: bool, dataset: Dataset, return_fields=None):
        # Create the base query creator and unite with ES gateway.
        search = Search(using=Elasticsearch(elastic_url)).index(dataset.index).doc_type(dataset.mapping)
        mlt = MoreLikeThis(like=like, fields=fields, min_term_freq=1, max_query_terms=12, include=include)  # Prepare the MLT part of the query.

        paginated_search = search[0:size]  # Set how many documents to return.
        limited_search = paginated_search.source(return_fields) if return_fields else paginated_search  # If added, choose which FIELDS to return.
        finished_search = limited_search.query(mlt)  # Add the premade MLT into the query.

        # Apply all the user-set filters, if they didn't add any this value will be [] and it quits.
        for filter_dict in filters:
            finished_search = finished_search.filter(Q(filter_dict))

        # Apply all the user-set aggregations, if they didn't add any this value will be [] and it quits.
        for aggregation_dict in aggregations:
            # aggs.bucket() does not return a Search object but changes it instead.
            if aggregation_dict["agg_type"] == "composite":
                after = aggregation_dict.get("after_key", None)
                finished_search = ES_Manager.handle_composition_aggregation(finished_search.to_dict(), aggregation_dict, after)
            else:
                field_name = aggregation_dict["field"]
                index = like[0]["_index"]
                field = "{}.keyword".format(field_name) if ES_Manager.is_field_text_field(field_name=field_name, index_name=index) else field_name
                finished_search.aggs.bucket(name=aggregation_dict["bucket_name"], agg_type=aggregation_dict["agg_type"], field=field)

        # Choose if you want to return only the aggregations in {"bucket_name": {results...}} format.
        if if_agg_only:
            finished_search = finished_search.params(size=0)
            response = finished_search.execute()
            return response.aggs.to_dict()

        try:
            response = finished_search.execute()
            result = {"hits": [hit.to_dict() for hit in response]}  # Throw out all metadata and keep only the documents.
            if response.aggs: result.update({"aggregations": response.aggs.to_dict()})  # IF the aggregation query returned anything, THEN add the "aggregatons" key with results.
            return result

        except ElasticsearchException as e:
            logging.getLogger(ERROR_LOGGER).exception(e)
            return {"elasticsearch": [str(e)]} 
开发者ID:texta-tk,项目名称:texta,代码行数:42,代码来源:es_manager.py

示例12: elasticsearch_connection

# 需要导入模块: import elasticsearch [as 别名]
# 或者: from elasticsearch import ElasticsearchException [as 别名]
def elasticsearch_connection(hosts, sniff=False, sniffer_timeout=60):
    """Crea una conexión a Elasticsearch.

    Args:
        hosts (list): Lista de nodos Elasticsearch a los cuales conectarse.
        sniff (bool): Activa la función de sniffing, la cual permite descubrir
            nuevos nodos en un cluster y conectarse a ellos.

    Raises:
        DataConnectionException: si la conexión no pudo ser establecida.

    Returns:
        Elasticsearch: Conexión a Elasticsearch.

    """
    try:
        options = {
            'hosts': hosts
        }

        if sniff:
            options['sniff_on_start'] = True
            options['sniff_on_connection_fail'] = True
            options['sniffer_timeout'] = sniffer_timeout

        return elasticsearch.Elasticsearch(**options)
    except elasticsearch.ElasticsearchException as e:
        raise DataConnectionException from e 
开发者ID:datosgobar,项目名称:georef-ar-api,代码行数:30,代码来源:data.py

示例13: _run_multisearch

# 需要导入模块: import elasticsearch [as 别名]
# 或者: from elasticsearch import ElasticsearchException [as 别名]
def _run_multisearch(es, searches):
    """Ejecuta una lista de búsquedas Elasticsearch utilizando la función
    MultiSearch. La cantidad de búsquedas que se envían a la vez es
    configurable vía la variable ES_MULTISEARCH_MAX_LEN.

    Args:
        es (Elasticsearch): Conexión a Elasticsearch.
        searches (list): Lista de elasticsearch_dsl.Search.

    Raises:
        DataConnectionException: Si ocurrió un error al ejecutar las búsquedas.

    Returns:
        list: Lista de respuestas a cada búsqueda.

    """
    step_size = constants.ES_MULTISEARCH_MAX_LEN
    responses = []

    # Partir las búsquedas en varios baches si es necesario.
    for i in range(0, len(searches), step_size):
        end = min(i + step_size, len(searches))
        ms = MultiSearch(using=es)

        for j in range(i, end):
            ms = ms.add(searches[j])

        try:
            responses.extend(ms.execute(raise_on_error=True))
        except elasticsearch.ElasticsearchException as e:
            raise DataConnectionException() from e

    return responses 
开发者ID:datosgobar,项目名称:georef-ar-api,代码行数:35,代码来源:data.py

示例14: test_elasticsearch_connection_error

# 需要导入模块: import elasticsearch [as 别名]
# 或者: from elasticsearch import ElasticsearchException [as 别名]
def test_elasticsearch_connection_error(self):
        """Se debería devolver un error 500 cuando falla la conexión a
        Elasticsearch."""
        self.es.side_effect = elasticsearch.ElasticsearchException()
        self.assert_500_error(random.choice(ENDPOINTS)) 
开发者ID:datosgobar,项目名称:georef-ar-api,代码行数:7,代码来源:test_mock_normalizer.py

示例15: test_elasticsearch_msearch_error

# 需要导入模块: import elasticsearch [as 别名]
# 或者: from elasticsearch import ElasticsearchException [as 别名]
def test_elasticsearch_msearch_error(self):
        """Se debería devolver un error 500 cuando falla la query
        MultiSearch."""
        self.es.return_value.msearch.side_effect = \
            elasticsearch.ElasticsearchException()
        self.assert_500_error(random.choice(ENDPOINTS)) 
开发者ID:datosgobar,项目名称:georef-ar-api,代码行数:8,代码来源:test_mock_normalizer.py


注:本文中的elasticsearch.ElasticsearchException方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。