本文整理汇总了Golang中github.com/influxdb/influxdb/parser.QuerySpec.IsDropSeriesQuery方法的典型用法代码示例。如果您正苦于以下问题:Golang QuerySpec.IsDropSeriesQuery方法的具体用法?Golang QuerySpec.IsDropSeriesQuery怎么用?Golang QuerySpec.IsDropSeriesQuery使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/influxdb/influxdb/parser.QuerySpec
的用法示例。
在下文中一共展示了QuerySpec.IsDropSeriesQuery方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Query
func (self *LevelDbShard) Query(querySpec *parser.QuerySpec, processor cluster.QueryProcessor) error {
if querySpec.IsListSeriesQuery() {
return self.executeListSeriesQuery(querySpec, processor)
} else if querySpec.IsDeleteFromSeriesQuery() {
return self.executeDeleteQuery(querySpec, processor)
} else if querySpec.IsDropSeriesQuery() {
return self.executeDropSeriesQuery(querySpec, processor)
}
seriesAndColumns := querySpec.SelectQuery().GetReferencedColumns()
if !self.hasReadAccess(querySpec) {
return errors.New("User does not have access to one or more of the series requested.")
}
for series, columns := range seriesAndColumns {
if regex, ok := series.GetCompiledRegex(); ok {
seriesNames := self.getSeriesForDbAndRegex(querySpec.Database(), regex)
for _, name := range seriesNames {
if !querySpec.HasReadAccess(name) {
continue
}
err := self.executeQueryForSeries(querySpec, name, columns, processor)
if err != nil {
return err
}
}
} else {
err := self.executeQueryForSeries(querySpec, series.Name, columns, processor)
if err != nil {
return err
}
}
}
return nil
}
示例2: Query
func (self *ShardData) Query(querySpec *parser.QuerySpec, response chan<- *p.Response) {
log.Debug("QUERY: shard %d, query '%s'", self.Id(), querySpec.GetQueryStringWithTimeCondition())
defer common.RecoverFunc(querySpec.Database(), querySpec.GetQueryStringWithTimeCondition(), func(err interface{}) {
response <- &p.Response{
Type: p.Response_ERROR.Enum(),
ErrorMessage: p.String(fmt.Sprintf("%s", err)),
}
})
// This is only for queries that are deletes or drops. They need to be sent everywhere as opposed to just the local or one of the remote shards.
// But this boolean should only be set to true on the server that receives the initial query.
if querySpec.RunAgainstAllServersInShard {
if querySpec.IsDeleteFromSeriesQuery() {
self.logAndHandleDeleteQuery(querySpec, response)
} else if querySpec.IsDropSeriesQuery() {
self.logAndHandleDropSeriesQuery(querySpec, response)
}
}
if self.IsLocal {
var processor engine.Processor = NewResponseChannelProcessor(NewResponseChannelWrapper(response))
var err error
processor = NewShardIdInserterProcessor(self.Id(), processor)
processor, err = self.getProcessor(querySpec, processor)
if err != nil {
response <- &p.Response{
Type: p.Response_ERROR.Enum(),
ErrorMessage: p.String(err.Error()),
}
log.Error("Error while creating engine: %s", err)
return
}
shard, err := self.store.GetOrCreateShard(self.id)
if err != nil {
response <- &p.Response{
Type: p.Response_ERROR.Enum(),
ErrorMessage: p.String(err.Error()),
}
log.Error("Error while getting shards: %s", err)
return
}
defer self.store.ReturnShard(self.id)
log.Debug("Processor chain: %s\n", engine.ProcessorChain(processor))
err = shard.Query(querySpec, processor)
// if we call Close() in case of an error it will mask the error
if err != nil {
response <- &p.Response{
Type: p.Response_ERROR.Enum(),
ErrorMessage: p.String(err.Error()),
}
return
}
processor.Close()
response <- &p.Response{Type: p.Response_END_STREAM.Enum()}
return
}
if server := self.randomHealthyServer(); server != nil {
log.Debug("Querying server %d for shard %d", server.GetId(), self.Id())
request := self.createRequest(querySpec)
server.MakeRequest(request, response)
return
}
message := fmt.Sprintf("No servers up to query shard %d", self.id)
response <- &p.Response{
Type: p.Response_ERROR.Enum(),
ErrorMessage: &message,
}
log.Error(message)
}
示例3: Query
func (self *ShardData) Query(querySpec *parser.QuerySpec, response chan *p.Response) {
log.Debug("QUERY: shard %d, query '%s'", self.Id(), querySpec.GetQueryString())
defer common.RecoverFunc(querySpec.Database(), querySpec.GetQueryString(), func(err interface{}) {
response <- &p.Response{Type: &endStreamResponse, ErrorMessage: p.String(fmt.Sprintf("%s", err))}
})
// This is only for queries that are deletes or drops. They need to be sent everywhere as opposed to just the local or one of the remote shards.
// But this boolean should only be set to true on the server that receives the initial query.
if querySpec.RunAgainstAllServersInShard {
if querySpec.IsDeleteFromSeriesQuery() {
self.logAndHandleDeleteQuery(querySpec, response)
} else if querySpec.IsDropSeriesQuery() {
self.logAndHandleDropSeriesQuery(querySpec, response)
}
}
if self.IsLocal {
var processor QueryProcessor
var err error
if querySpec.IsListSeriesQuery() {
processor = engine.NewListSeriesEngine(response)
} else if querySpec.IsDeleteFromSeriesQuery() || querySpec.IsDropSeriesQuery() || querySpec.IsSinglePointQuery() {
maxDeleteResults := 10000
processor = engine.NewPassthroughEngine(response, maxDeleteResults)
} else {
query := querySpec.SelectQuery()
if self.ShouldAggregateLocally(querySpec) {
log.Debug("creating a query engine")
processor, err = engine.NewQueryEngine(query, response)
if err != nil {
response <- &p.Response{Type: &endStreamResponse, ErrorMessage: p.String(err.Error())}
log.Error("Error while creating engine: %s", err)
return
}
processor.SetShardInfo(int(self.Id()), self.IsLocal)
} else if query.HasAggregates() {
maxPointsToBufferBeforeSending := 1000
log.Debug("creating a passthrough engine")
processor = engine.NewPassthroughEngine(response, maxPointsToBufferBeforeSending)
} else {
maxPointsToBufferBeforeSending := 1000
log.Debug("creating a passthrough engine with limit")
processor = engine.NewPassthroughEngineWithLimit(response, maxPointsToBufferBeforeSending, query.Limit)
}
if query.GetFromClause().Type != parser.FromClauseInnerJoin {
// Joins do their own filtering since we need to get all
// points before filtering. This is due to the fact that some
// where expressions will be difficult to compute before the
// points are joined together, think where clause with
// left.column = 'something' or right.column =
// 'something_else'. We can't filter the individual series
// separately. The filtering happens in merge.go:55
processor = engine.NewFilteringEngine(query, processor)
}
}
shard, err := self.store.GetOrCreateShard(self.id)
if err != nil {
response <- &p.Response{Type: &endStreamResponse, ErrorMessage: p.String(err.Error())}
log.Error("Error while getting shards: %s", err)
return
}
defer self.store.ReturnShard(self.id)
err = shard.Query(querySpec, processor)
// if we call Close() in case of an error it will mask the error
if err != nil {
response <- &p.Response{Type: &endStreamResponse, ErrorMessage: p.String(err.Error())}
return
}
processor.Close()
response <- &p.Response{Type: &endStreamResponse}
return
}
if server := self.randomHealthyServer(); server != nil {
log.Debug("Querying server %d for shard %d", server.GetId(), self.Id())
request := self.createRequest(querySpec)
server.MakeRequest(request, response)
return
}
message := fmt.Sprintf("No servers up to query shard %d", self.id)
response <- &p.Response{Type: &endStreamResponse, ErrorMessage: &message}
log.Error(message)
}