本文整理汇总了Golang中github.com/influxdb/influxdb/parser.QuerySpec类的典型用法代码示例。如果您正苦于以下问题:Golang QuerySpec类的具体用法?Golang QuerySpec怎么用?Golang QuerySpec使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了QuerySpec类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: getShardsAndProcessor
func (self *Coordinator) getShardsAndProcessor(querySpec *parser.QuerySpec, writer engine.Processor) ([]*cluster.ShardData, engine.Processor, error) {
shards, err := self.clusterConfiguration.GetShardsForQuery(querySpec)
if err != nil {
return nil, nil, err
}
shouldAggregateLocally := shards.ShouldAggregateLocally(querySpec)
q := querySpec.SelectQuery()
if q == nil {
return shards, writer, nil
}
if !shouldAggregateLocally {
// if we should aggregate in the coordinator (i.e. aggregation
// isn't happening locally at the shard level), create an engine
shardIds := make([]uint32, len(shards))
for i, s := range shards {
shardIds[i] = s.Id()
}
writer, err = engine.NewQueryEngine(writer, q, shardIds)
if err != nil {
log.Error(err)
log.Debug("Coordinator processor chain: %s", engine.ProcessorChain(writer))
}
return shards, writer, err
}
// if we have a query with limit, then create an engine, or we can
// make the passthrough limit aware
writer = engine.NewPassthroughEngineWithLimit(writer, 100, q.Limit)
return shards, writer, nil
}
示例2: QueryResponseBufferSize
func (self *ShardData) QueryResponseBufferSize(querySpec *parser.QuerySpec, batchPointSize int) int {
groupByTime := querySpec.GetGroupByInterval()
if groupByTime == nil {
// If the group by time is nil, we shouldn't have to use a buffer since the shards should be queried sequentially.
// However, set this to something high just to be safe.
log.Debug("BUFFER SIZE: 1000")
return 1000
}
tickCount := int(self.shardNanoseconds / uint64(*groupByTime))
if tickCount < 10 {
tickCount = 100
} else if tickCount > 1000 {
// cap this because each response should have up to this number of points in it.
tickCount = tickCount / batchPointSize
// but make sure it's at least 1k
if tickCount < 1000 {
tickCount = 1000
}
}
columnCount := querySpec.GetGroupByColumnCount()
if columnCount > 1 {
// we don't really know the cardinality for any column up front. This is a just a multiplier so we'll see how this goes.
// each response can have many points, so having a buffer of the ticks * 100 should be safe, but we'll see.
tickCount = tickCount * 100
}
log.Debug("BUFFER SIZE: %d", tickCount)
return tickCount
}
示例3: runListSeriesQuery
func (self *Coordinator) runListSeriesQuery(querySpec *parser.QuerySpec, p engine.Processor) error {
allSeries := self.clusterConfiguration.MetaStore.GetSeriesForDatabase(querySpec.Database())
matchingSeries := allSeries
if q := querySpec.Query().GetListSeriesQuery(); q.HasRegex() {
matchingSeries = nil
regex := q.GetRegex()
for _, s := range allSeries {
if !regex.MatchString(s) {
continue
}
matchingSeries = append(matchingSeries, s)
}
}
name := "list_series_result"
fields := []string{"name"}
points := make([]*protocol.Point, len(matchingSeries), len(matchingSeries))
for i, s := range matchingSeries {
fieldValues := []*protocol.FieldValue{{StringValue: proto.String(s)}}
points[i] = &protocol.Point{Values: fieldValues}
}
seriesResult := &protocol.Series{Name: &name, Fields: fields, Points: points}
_, err := p.Yield(seriesResult)
return err
}
示例4: getShardsToMatchQuery
func (self *ClusterConfiguration) getShardsToMatchQuery(querySpec *parser.QuerySpec) ([]*ShardData, error) {
self.shardLock.RLock()
defer self.shardLock.RUnlock()
seriesNames, fromRegex := querySpec.TableNamesAndRegex()
db := querySpec.Database()
if fromRegex != nil {
seriesNames = self.MetaStore.GetSeriesForDatabaseAndRegex(db, fromRegex)
}
uniqueShards := make(map[uint32]*ShardData)
for _, name := range seriesNames {
if fs := self.MetaStore.GetFieldsForSeries(db, name); len(fs) == 0 {
return nil, fmt.Errorf("Couldn't find series: %s", name)
}
space := self.getShardSpaceToMatchSeriesName(db, name)
if space == nil {
continue
}
for _, shard := range space.shards {
uniqueShards[shard.id] = shard
}
}
shards := make([]*ShardData, 0, len(uniqueShards))
for _, shard := range uniqueShards {
shards = append(shards, shard)
}
SortShardsByTimeDescending(shards)
return shards, nil
}
示例5: executeListSeriesQuery
func (self *LevelDbShard) executeListSeriesQuery(querySpec *parser.QuerySpec, processor cluster.QueryProcessor) error {
it := self.db.NewIterator(self.readOptions)
defer it.Close()
database := querySpec.Database()
seekKey := append(DATABASE_SERIES_INDEX_PREFIX, []byte(querySpec.Database()+"~")...)
it.Seek(seekKey)
dbNameStart := len(DATABASE_SERIES_INDEX_PREFIX)
for it = it; it.Valid(); it.Next() {
key := it.Key()
if len(key) < dbNameStart || !bytes.Equal(key[:dbNameStart], DATABASE_SERIES_INDEX_PREFIX) {
break
}
dbSeries := string(key[dbNameStart:])
parts := strings.Split(dbSeries, "~")
if len(parts) > 1 {
if parts[0] != database {
break
}
name := parts[1]
shouldContinue := processor.YieldPoint(&name, nil, nil)
if !shouldContinue {
return nil
}
}
}
return nil
}
示例6: Query
func (self *Shard) Query(querySpec *parser.QuerySpec, processor cluster.QueryProcessor) error {
if querySpec.IsListSeriesQuery() {
return self.executeListSeriesQuery(querySpec, processor)
} else if querySpec.IsDeleteFromSeriesQuery() {
return self.executeDeleteQuery(querySpec, processor)
}
seriesAndColumns := querySpec.SelectQuery().GetReferencedColumns()
if !self.hasReadAccess(querySpec) {
return errors.New("User does not have access to one or more of the series requested.")
}
for series, columns := range seriesAndColumns {
if regex, ok := series.GetCompiledRegex(); ok {
seriesNames := self.metaStore.GetSeriesForDatabaseAndRegex(querySpec.Database(), regex)
for _, name := range seriesNames {
if !querySpec.HasReadAccess(name) {
continue
}
err := self.executeQueryForSeries(querySpec, name, columns, processor)
if err != nil {
return err
}
}
} else {
err := self.executeQueryForSeries(querySpec, series.Name, columns, processor)
if err != nil {
return err
}
}
}
return nil
}
示例7: GetShardsForQuery
func (self *ClusterConfiguration) GetShardsForQuery(querySpec *parser.QuerySpec) Shards {
shards := self.getShardsToMatchQuery(querySpec)
shards = self.getShardRange(querySpec, shards)
if querySpec.IsAscending() {
SortShardsByTimeAscending(shards)
}
return shards
}
示例8: runDeleteQuery
func (self *CoordinatorImpl) runDeleteQuery(querySpec *parser.QuerySpec, seriesWriter SeriesWriter) error {
user := querySpec.User()
db := querySpec.Database()
if ok, err := self.permissions.AuthorizeDeleteQuery(user, db); !ok {
return err
}
querySpec.RunAgainstAllServersInShard = true
return self.runQuerySpec(querySpec, seriesWriter)
}
示例9: runQuerySpec
func (self *CoordinatorImpl) runQuerySpec(querySpec *parser.QuerySpec, seriesWriter SeriesWriter) error {
shards, processor, seriesClosed, err := self.getShardsAndProcessor(querySpec, seriesWriter)
if err != nil {
return err
}
if len(shards) == 0 {
return fmt.Errorf("Couldn't look up columns")
}
defer func() {
if processor != nil {
processor.Close()
<-seriesClosed
} else {
seriesWriter.Close()
}
}()
shardConcurrentLimit := self.config.ConcurrentShardQueryLimit
if self.shouldQuerySequentially(shards, querySpec) {
log.Debug("Querying shards sequentially")
shardConcurrentLimit = 1
}
log.Debug("Shard concurrent limit: %d", shardConcurrentLimit)
errors := make(chan error, shardConcurrentLimit)
for i := 0; i < shardConcurrentLimit; i++ {
errors <- nil
}
responseChannels := make(chan (<-chan *protocol.Response), shardConcurrentLimit)
go self.readFromResponseChannels(processor, seriesWriter, querySpec.IsExplainQuery(), errors, responseChannels)
err = self.queryShards(querySpec, shards, errors, responseChannels)
// make sure we read the rest of the errors and responses
for _err := range errors {
if err == nil {
err = _err
}
}
for responsechan := range responseChannels {
for response := range responsechan {
if response.GetType() != endStreamResponse {
continue
}
if response.ErrorMessage != nil && err == nil {
err = common.NewQueryError(common.InvalidArgument, *response.ErrorMessage)
}
break
}
}
return err
}
示例10: CheckQueryPermissions
func (self *Permissions) CheckQueryPermissions(user common.User, db string, querySpec *parser.QuerySpec) (ok bool, err common.AuthorizationError) {
switch querySpec.Query().Type() {
case parser.Delete:
return self.AuthorizeDeleteQuery(user, db)
case parser.Select:
return self.AuthorizeSelectQuery(user, db, querySpec)
default:
return true, ""
}
}
示例11: hasReadAccess
func (self *Shard) hasReadAccess(querySpec *parser.QuerySpec) bool {
for series := range querySpec.SeriesValuesAndColumns() {
if _, isRegex := series.GetCompiledRegex(); !isRegex {
if !querySpec.HasReadAccess(series.Name) {
return false
}
}
}
return true
}
示例12: executeDropSeriesQuery
func (self *LevelDbShard) executeDropSeriesQuery(querySpec *parser.QuerySpec, processor cluster.QueryProcessor) error {
database := querySpec.Database()
series := querySpec.Query().DropSeriesQuery.GetTableName()
err := self.dropSeries(database, series)
if err != nil {
return err
}
self.compact()
return nil
}
示例13: expandRegex
func (self *Coordinator) expandRegex(spec *parser.QuerySpec) {
q := spec.SelectQuery()
if q == nil {
return
}
f := func(r *regexp.Regexp) []string {
return self.clusterConfiguration.MetaStore.GetSeriesForDatabaseAndRegex(spec.Database(), r)
}
parser.RewriteMergeQuery(q, f)
}
示例14: GetShardsForQuery
func (self *ClusterConfiguration) GetShardsForQuery(querySpec *parser.QuerySpec) (Shards, error) {
shards, err := self.getShardsToMatchQuery(querySpec)
if err != nil {
return nil, err
}
log.Debug("Querying %d shards for query", len(shards))
shards = self.getShardRange(querySpec, shards)
if querySpec.IsAscending() {
SortShardsByTimeAscending(shards)
}
return shards, nil
}
示例15: executeQueryForSeries
func (self *Shard) executeQueryForSeries(querySpec *parser.QuerySpec, name string, columns []string, processor engine.Processor) error {
if querySpec.IsSinglePointQuery() {
log.Debug("Running single query for series %s", name)
return self.executeSinglePointQuery(querySpec, name, columns, processor)
}
var pi *PointIterator
var err error
columns, pi, err = self.getPointIteratorForSeries(querySpec, name, columns)
if err != nil {
return err
}
defer pi.Close()
query := querySpec.SelectQuery()
aliases := query.GetTableAliases(name)
seriesOutgoing := &protocol.Series{Name: protocol.String(name), Fields: columns, Points: make([]*protocol.Point, 0, self.pointBatchSize)}
for pi.Valid() {
p := pi.Point()
seriesOutgoing.Points = append(seriesOutgoing.Points, p)
if len(seriesOutgoing.Points) >= self.pointBatchSize {
ok, err := yieldToProcessor(seriesOutgoing, processor, aliases)
if !ok || err != nil {
log.Debug("Stopping processing.")
if err != nil {
log.Error("Error while processing data: %v", err)
return err
}
return nil
}
seriesOutgoing = &protocol.Series{Name: protocol.String(name), Fields: columns, Points: make([]*protocol.Point, 0, self.pointBatchSize)}
}
pi.Next()
}
if err := pi.Error(); err != nil {
return err
}
//Yield remaining data
if ok, err := yieldToProcessor(seriesOutgoing, processor, aliases); !ok || err != nil {
log.Debug("Stopping processing remaining points...")
if err != nil {
log.Error("Error while processing data: %v", err)
return err
}
}
log.Debug("Finished running query %s", query.GetQueryString())
return nil
}