本文整理匯總了Golang中cluster.QueryProcessor類的典型用法代碼示例。如果您正苦於以下問題:Golang QueryProcessor類的具體用法?Golang QueryProcessor怎麽用?Golang QueryProcessor使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了QueryProcessor類的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: executeListSeriesQuery
func (self *LevelDbShard) executeListSeriesQuery(querySpec *parser.QuerySpec, processor cluster.QueryProcessor) error {
it := self.db.NewIterator(self.readOptions)
defer it.Close()
database := querySpec.Database()
seekKey := append(DATABASE_SERIES_INDEX_PREFIX, []byte(querySpec.Database()+"~")...)
it.Seek(seekKey)
dbNameStart := len(DATABASE_SERIES_INDEX_PREFIX)
for it = it; it.Valid(); it.Next() {
key := it.Key()
if len(key) < dbNameStart || !bytes.Equal(key[:dbNameStart], DATABASE_SERIES_INDEX_PREFIX) {
break
}
dbSeries := string(key[dbNameStart:])
parts := strings.Split(dbSeries, "~")
if len(parts) > 1 {
if parts[0] != database {
break
}
name := parts[1]
shouldContinue := processor.YieldPoint(&name, nil, nil)
if !shouldContinue {
return nil
}
}
}
return nil
}
示例2: readFromResposneChannels
func (self *CoordinatorImpl) readFromResposneChannels(processor cluster.QueryProcessor,
writer SeriesWriter,
isExplainQuery bool,
errors chan<- error,
channels <-chan (<-chan *protocol.Response)) {
defer close(errors)
for responseChan := range channels {
for response := range responseChan {
//log.Debug("GOT RESPONSE: ", response.Type, response.Series)
log.Debug("GOT RESPONSE: ", response.Type)
if *response.Type == endStreamResponse || *response.Type == accessDeniedResponse {
if response.ErrorMessage == nil {
break
}
err := common.NewQueryError(common.InvalidArgument, *response.ErrorMessage)
log.Error("Error while executing query: %s", err)
errors <- err
return
}
if response.Series == nil || len(response.Series.Points) == 0 {
log.Debug("Series has no points, continue")
continue
}
// if we don't have a processor, yield the point to the writer
// this happens if shard took care of the query
// otherwise client will get points from passthrough engine
if processor != nil {
// if the data wasn't aggregated at the shard level, aggregate
// the data here
log.Debug("YIELDING: %d points with %d columns", len(response.Series.Points), len(response.Series.Fields))
processor.YieldSeries(response.Series)
continue
}
// If we have EXPLAIN query, we don't write actual points (of
// response.Type Query) to the client
if !(*response.Type == queryResponse && isExplainQuery) {
writer.Write(response.Series)
}
}
// once we're done with a response channel signal queryShards to
// start querying a new shard
errors <- nil
}
return
}
示例3: executeListSeriesQuery
func (self *Shard) executeListSeriesQuery(querySpec *parser.QuerySpec, processor cluster.QueryProcessor) error {
return self.yieldSeriesNamesForDb(querySpec.Database(), func(_name string) bool {
name := _name
return processor.YieldPoint(&name, nil, nil)
})
}
示例4: executeQueryForSeries
func (self *Shard) executeQueryForSeries(querySpec *parser.QuerySpec, seriesName string, columns []string, processor cluster.QueryProcessor) error {
startTimeBytes := self.byteArrayForTime(querySpec.GetStartTime())
endTimeBytes := self.byteArrayForTime(querySpec.GetEndTime())
fields, err := self.getFieldsForSeries(querySpec.Database(), seriesName, columns)
if err != nil {
// because a db is distributed across the cluster, it's possible we don't have the series indexed here. ignore
switch err := err.(type) {
case FieldLookupError:
log.Debug("Cannot find fields %v", columns)
return nil
default:
log.Error("Error looking up fields for %s: %s", seriesName, err)
return fmt.Errorf("Error looking up fields for %s: %s", seriesName, err)
}
}
fieldCount := len(fields)
rawColumnValues := make([]rawColumnValue, fieldCount, fieldCount)
query := querySpec.SelectQuery()
aliases := query.GetTableAliases(seriesName)
if querySpec.IsSinglePointQuery() {
series, err := self.fetchSinglePoint(querySpec, seriesName, fields)
if err != nil {
log.Error("Error reading a single point: %s", err)
return err
}
if len(series.Points) > 0 {
processor.YieldPoint(series.Name, series.Fields, series.Points[0])
}
return nil
}
fieldNames, iterators := self.getIterators(fields, startTimeBytes, endTimeBytes, query.Ascending)
defer func() {
for _, it := range iterators {
it.Close()
}
}()
seriesOutgoing := &protocol.Series{Name: protocol.String(seriesName), Fields: fieldNames, Points: make([]*protocol.Point, 0, self.pointBatchSize)}
// TODO: clean up, this is super gnarly
// optimize for the case where we're pulling back only a single column or aggregate
buffer := bytes.NewBuffer(nil)
valueBuffer := proto.NewBuffer(nil)
for {
isValid := false
point := &protocol.Point{Values: make([]*protocol.FieldValue, fieldCount, fieldCount)}
for i, it := range iterators {
if rawColumnValues[i].value != nil || !it.Valid() {
if err := it.Error(); err != nil {
return err
}
continue
}
key := it.Key()
if len(key) < 16 {
continue
}
if !isPointInRange(fields[i].Id, startTimeBytes, endTimeBytes, key) {
continue
}
value := it.Value()
sequenceNumber := key[16:]
rawTime := key[8:16]
rawColumnValues[i] = rawColumnValue{time: rawTime, sequence: sequenceNumber, value: value}
}
var pointTimeRaw []byte
var pointSequenceRaw []byte
// choose the highest (or lowest in case of ascending queries) timestamp
// and sequence number. that will become the timestamp and sequence of
// the next point.
for _, value := range rawColumnValues {
if value.value == nil {
continue
}
pointTimeRaw, pointSequenceRaw = value.updatePointTimeAndSequence(pointTimeRaw,
pointSequenceRaw, query.Ascending)
}
for i, iterator := range iterators {
// if the value is nil or doesn't match the point's timestamp and sequence number
// then skip it
if rawColumnValues[i].value == nil ||
!bytes.Equal(rawColumnValues[i].time, pointTimeRaw) ||
!bytes.Equal(rawColumnValues[i].sequence, pointSequenceRaw) {
point.Values[i] = &protocol.FieldValue{IsNull: &TRUE}
continue
}
//.........這裏部分代碼省略.........
示例5: runQuerySpec
func (self *CoordinatorImpl) runQuerySpec(querySpec *parser.QuerySpec, seriesWriter SeriesWriter) error {
shards := self.clusterConfiguration.GetShards(querySpec)
shouldAggregateLocally := true
var processor cluster.QueryProcessor
var responseChan chan *protocol.Response
var seriesClosed chan bool
for _, s := range shards {
// If the aggregation is done at the shard level, we don't need to
// do it here at the coordinator level.
if !s.ShouldAggregateLocally(querySpec) {
seriesClosed = make(chan bool)
shouldAggregateLocally = false
responseChan = make(chan *protocol.Response)
if querySpec.SelectQuery() != nil {
processor = engine.NewQueryEngine(querySpec.SelectQuery(), responseChan)
} else {
bufferSize := 100
processor = engine.NewPassthroughEngine(responseChan, bufferSize)
}
go func() {
for {
res := <-responseChan
if *res.Type == endStreamResponse || *res.Type == accessDeniedResponse {
seriesWriter.Close()
seriesClosed <- true
return
}
if res.Series != nil && len(res.Series.Points) > 0 {
seriesWriter.Write(res.Series)
}
}
}()
break
}
}
responses := make([]chan *protocol.Response, 0)
for _, shard := range shards {
responseChan := make(chan *protocol.Response, self.config.QueryShardBufferSize)
go shard.Query(querySpec, responseChan)
responses = append(responses, responseChan)
}
for i, responseChan := range responses {
log.Debug("READING: shard: ", shards[i].String())
for {
response := <-responseChan
log.Debug("GOT RESPONSE: ", response.Type, response.Series)
if *response.Type == endStreamResponse || *response.Type == accessDeniedResponse {
break
}
if shouldAggregateLocally {
log.Debug("WRITING: ", len(response.Series.Points))
seriesWriter.Write(response.Series)
log.Debug("WRITING (done)")
continue
}
// if the data wasn't aggregated at the shard level, aggregate
// the data here
log.Debug("YIELDING: ", len(response.Series.Points))
if response.Series != nil {
for _, p := range response.Series.Points {
processor.YieldPoint(response.Series.Name, response.Series.Fields, p)
}
}
}
log.Debug("DONE: shard: ", shards[i].String())
}
if !shouldAggregateLocally {
processor.Close()
<-seriesClosed
return nil
}
seriesWriter.Close()
return nil
}