當前位置: 首頁>>代碼示例>>Golang>>正文


Golang config.SystemConfig類代碼示例

本文整理匯總了Golang中github.com/cockroachdb/cockroach/config.SystemConfig的典型用法代碼示例。如果您正苦於以下問題:Golang SystemConfig類的具體用法?Golang SystemConfig怎麽用?Golang SystemConfig使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


在下文中一共展示了SystemConfig類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。

示例1: shouldQueue

// shouldQueue determines whether a replica should be queued for garbage
// collection, and if so, at what priority. Returns true for shouldQ
// in the event that the cumulative ages of GC'able bytes or extant
// intents exceed thresholds.
func (*gcQueue) shouldQueue(now hlc.Timestamp, repl *Replica,
	sysCfg config.SystemConfig) (shouldQ bool, priority float64) {
	desc := repl.Desc()
	zone, err := sysCfg.GetZoneConfigForKey(desc.StartKey)
	if err != nil {
		log.Errorf(context.TODO(), "could not find zone config for range %s: %s", repl, err)
		return
	}

	ms := repl.GetMVCCStats()
	// GC score is the total GC'able bytes age normalized by 1 MB * the replica's TTL in seconds.
	gcScore := float64(ms.GCByteAge(now.WallTime)) / float64(zone.GC.TTLSeconds) / float64(gcByteCountNormalization)

	// Intent score. This computes the average age of outstanding intents
	// and normalizes.
	intentScore := ms.AvgIntentAge(now.WallTime) / float64(intentAgeNormalization.Nanoseconds()/1E9)

	// Compute priority.
	if gcScore >= considerThreshold {
		priority += gcScore
	}
	if intentScore >= considerThreshold {
		priority += intentScore
	}
	shouldQ = priority > 0
	return
}
開發者ID:yangxuanjia,項目名稱:cockroach,代碼行數:31,代碼來源:gc_queue.go

示例2: shouldQueue

func (rq *replicateQueue) shouldQueue(now roachpb.Timestamp, repl *Replica,
	sysCfg config.SystemConfig) (shouldQ bool, priority float64) {

	if repl.needsSplitBySize() {
		// If the range exceeds the split threshold, let that finish
		// first. Ranges must fit in memory on both sender and receiver
		// nodes while being replicated. This supplements the check
		// provided by acceptsUnsplitRanges, which looks at zone config
		// boundaries rather than data size.
		return
	}

	// Find the zone config for this range.
	desc := repl.Desc()
	zone, err := sysCfg.GetZoneConfigForKey(desc.StartKey)
	if err != nil {
		log.Error(err)
		return
	}

	action, priority := rq.allocator.ComputeAction(*zone, desc)
	if action != AllocatorNoop {
		return true, priority
	}
	// See if there is a rebalancing opportunity present.
	shouldRebalance := rq.allocator.ShouldRebalance(repl.store.StoreID())
	return shouldRebalance, 0
}
開發者ID:GitGoldie,項目名稱:cockroach,代碼行數:28,代碼來源:replicate_queue.go

示例3: GetZoneConfig

// GetZoneConfig returns the zone config for the object with 'id'.
func GetZoneConfig(cfg config.SystemConfig, id uint32) (config.ZoneConfig, bool, error) {
	// Look in the zones table.
	if zoneVal := cfg.GetValue(sqlbase.MakeZoneKey(sqlbase.ID(id))); zoneVal != nil {
		var zone config.ZoneConfig
		// We're done.
		return zone, true, zoneVal.GetProto(&zone)
	}

	// No zone config for this ID. We need to figure out if it's a database
	// or table. Lookup its descriptor.
	if descVal := cfg.GetValue(sqlbase.MakeDescMetadataKey(sqlbase.ID(id))); descVal != nil {
		// Determine whether this is a database or table.
		var desc sqlbase.Descriptor
		if err := descVal.GetProto(&desc); err != nil {
			return config.ZoneConfig{}, false, err
		}
		if tableDesc := desc.GetTable(); tableDesc != nil {
			// This is a table descriptor. Lookup its parent database zone config.
			return GetZoneConfig(cfg, uint32(tableDesc.ParentID))
		}
	}

	// Retrieve the default zone config, but only as long as that wasn't the ID
	// we were trying to retrieve (avoid infinite recursion).
	if id != keys.RootNamespaceID {
		return GetZoneConfig(cfg, keys.RootNamespaceID)
	}

	// No descriptor or not a table.
	return config.ZoneConfig{}, false, nil
}
開發者ID:yangxuanjia,項目名稱:cockroach,代碼行數:32,代碼來源:config.go

示例4: shouldQueue

func (rq *replicateQueue) shouldQueue(
	now hlc.Timestamp,
	repl *Replica,
	sysCfg config.SystemConfig,
) (shouldQ bool, priority float64) {
	if !repl.store.splitQueue.Disabled() && repl.needsSplitBySize() {
		// If the range exceeds the split threshold, let that finish first.
		// Ranges must fit in memory on both sender and receiver nodes while
		// being replicated. This supplements the check provided by
		// acceptsUnsplitRanges, which looks at zone config boundaries rather
		// than data size.
		//
		// This check is ignored if the split queue is disabled, since in that
		// case, the split will never come.
		return
	}

	// Find the zone config for this range.
	desc := repl.Desc()
	zone, err := sysCfg.GetZoneConfigForKey(desc.StartKey)
	if err != nil {
		log.Error(err)
		return
	}

	action, priority := rq.allocator.ComputeAction(*zone, desc)
	if action != AllocatorNoop {
		return true, priority
	}
	// See if there is a rebalancing opportunity present.
	shouldRebalance := rq.allocator.ShouldRebalance(repl.store.StoreID())
	return shouldRebalance, 0
}
開發者ID:YuleiXiao,項目名稱:cockroach,代碼行數:33,代碼來源:replicate_queue.go

示例5: shouldQueue

// shouldQueue determines whether a replica should be queued for garbage
// collection, and if so, at what priority. Returns true for shouldQ
// in the event that the cumulative ages of GC'able bytes or extant
// intents exceed thresholds.
func (gcq *gcQueue) shouldQueue(now roachpb.Timestamp, repl *Replica,
	sysCfg *config.SystemConfig) (shouldQ bool, priority float64) {

	desc := repl.Desc()
	zone, err := sysCfg.GetZoneConfigForKey(desc.StartKey)
	if err != nil {
		log.Errorf("could not find GC policy for range %s: %s", repl, err)
		return
	}
	policy := zone.GC

	// GC score is the total GC'able bytes age normalized by 1 MB * the replica's TTL in seconds.
	gcScore := float64(repl.stats.GetGCBytesAge(now.WallTime)) / float64(policy.TTLSeconds) / float64(gcByteCountNormalization)

	// Intent score. This computes the average age of outstanding intents
	// and normalizes.
	intentScore := repl.stats.GetAvgIntentAge(now.WallTime) / float64(intentAgeNormalization.Nanoseconds()/1E9)

	// Compute priority.
	if gcScore > 1 {
		priority += gcScore
	}
	if intentScore > 1 {
		priority += intentScore
	}
	shouldQ = priority > 0
	return
}
開發者ID:GokulSrinivas,項目名稱:cockroach,代碼行數:32,代碼來源:gc_queue.go

示例6: GetZoneConfig

// GetZoneConfig returns the zone config for the object with 'id'.
func GetZoneConfig(cfg config.SystemConfig, id uint32) (*config.ZoneConfig, error) {
	// Look in the zones table.
	if zoneVal := cfg.GetValue(MakeZoneKey(ID(id))); zoneVal != nil {
		zone := &config.ZoneConfig{}
		if err := zoneVal.GetProto(zone); err != nil {
			return nil, err
		}
		// We're done.
		return zone, nil
	}

	// No zone config for this ID. We need to figure out if it's a database
	// or table. Lookup its descriptor.
	if descVal := cfg.GetValue(MakeDescMetadataKey(ID(id))); descVal != nil {
		// Determine whether this is a database or table.
		desc := &Descriptor{}
		if err := descVal.GetProto(desc); err != nil {
			return nil, err
		}
		if tableDesc := desc.GetTable(); tableDesc != nil {
			// This is a table descriptor. Lookup its parent database zone config.
			return GetZoneConfig(cfg, uint32(tableDesc.ParentID))
		}
	}

	// No descriptor or not a table. This table/db could have been deleted, just
	// return the default config.
	return config.DefaultZoneConfig, nil
}
開發者ID:mbertschler,項目名稱:cockroach,代碼行數:30,代碼來源:config.go

示例7: process

// process synchronously invokes admin split for each proposed split key.
func (sq *splitQueue) process(now roachpb.Timestamp, rng *Replica,
	sysCfg *config.SystemConfig) error {

	// First handle case of splitting due to zone config maps.
	desc := rng.Desc()
	splitKeys := sysCfg.ComputeSplitKeys(desc.StartKey, desc.EndKey)
	if len(splitKeys) > 0 {
		log.Infof("splitting %s at keys %v", rng, splitKeys)
		for _, splitKey := range splitKeys {
			if err := sq.db.AdminSplit(splitKey.AsRawKey()); err != nil {
				return util.Errorf("unable to split %s at key %q: %s", rng, splitKey, err)
			}
		}
		return nil
	}

	// Next handle case of splitting due to size.
	zone, err := sysCfg.GetZoneConfigForKey(desc.StartKey)
	if err != nil {
		return err
	}
	// FIXME: why is this implementation not the same as the one above?
	if float64(rng.stats.GetSize())/float64(zone.RangeMaxBytes) > 1 {
		log.Infof("splitting %s size=%d max=%d", rng, rng.stats.GetSize(), zone.RangeMaxBytes)
		if _, pErr := client.SendWrapped(rng, rng.context(), &roachpb.AdminSplitRequest{
			Span: roachpb.Span{Key: desc.StartKey.AsRawKey()},
		}); pErr != nil {
			return pErr.GoError()
		}
	}
	return nil
}
開發者ID:billhongs,項目名稱:cockroach,代碼行數:33,代碼來源:split_queue.go

示例8: requiresSplit

func (bq *baseQueue) requiresSplit(cfg config.SystemConfig, repl *Replica) bool {
	// If there's no store (as is the case in some narrow unit tests), or if
	// the store's split queue is disabled, the "required" split will never
	// come. In that case, pretend we don't require the split.
	if store := repl.store; store == nil || store.splitQueue.Disabled() {
		return false
	}
	desc := repl.Desc()
	return !bq.acceptsUnsplitRanges && cfg.NeedsSplit(desc.StartKey, desc.EndKey)
}
開發者ID:YuleiXiao,項目名稱:cockroach,代碼行數:10,代碼來源:queue.go

示例9: GetTableDesc

// GetTableDesc returns the table descriptor for the table with 'id'.
// Returns nil if the descriptor is not present, or is present but is not a
// table.
func GetTableDesc(cfg config.SystemConfig, id sqlbase.ID) (*sqlbase.TableDescriptor, error) {
	if descVal := cfg.GetValue(sqlbase.MakeDescMetadataKey(id)); descVal != nil {
		desc := &sqlbase.Descriptor{}
		if err := descVal.GetProto(desc); err != nil {
			return nil, err
		}
		return desc.GetTable(), nil
	}
	return nil, nil
}
開發者ID:yangxuanjia,項目名稱:cockroach,代碼行數:13,代碼來源:config.go

示例10: isDeleted

func isDeleted(tableID sqlbase.ID, cfg config.SystemConfig) bool {
	descKey := sqlbase.MakeDescMetadataKey(tableID)
	val := cfg.GetValue(descKey)
	if val == nil {
		return false
	}
	var descriptor sqlbase.Descriptor
	if err := val.GetProto(&descriptor); err != nil {
		panic("unable to unmarshal table descriptor")
	}
	table := descriptor.GetTable()
	return table.Deleted()
}
開發者ID:JKhawaja,項目名稱:cockroach,代碼行數:13,代碼來源:lease_test.go

示例11: expectDescriptor

func expectDescriptor(systemConfig config.SystemConfig, idKey roachpb.Key, desc *Descriptor) error {
	descValue := systemConfig.GetValue(idKey)
	if descValue == nil {
		return errStaleMetadata
	}
	var cachedDesc Descriptor
	if err := descValue.GetProto(&cachedDesc); err != nil {
		return err
	}
	if !proto.Equal(&cachedDesc, desc) {
		return errStaleMetadata
	}
	return nil
}
開發者ID:mbertschler,項目名稱:cockroach,代碼行數:14,代碼來源:verify.go

示例12: expectDescriptorID

func expectDescriptorID(systemConfig config.SystemConfig, idKey roachpb.Key, id ID) error {
	idValue := systemConfig.GetValue(idKey)
	if idValue == nil {
		return errStaleMetadata
	}
	cachedID, err := idValue.GetInt()
	if err != nil {
		return err
	}
	if ID(cachedID) != id {
		return errStaleMetadata
	}
	return nil
}
開發者ID:mbertschler,項目名稱:cockroach,代碼行數:14,代碼來源:verify.go

示例13: process

// process iterates through all keys in a replica's range, calling the garbage
// collector for each key and associated set of values. GC'd keys are batched
// into GC calls. Extant intents are resolved if intents are older than
// intentAgeThreshold. The transaction and abort cache records are also
// scanned and old entries evicted. During normal operation, both of these
// records are cleaned up when their respective transaction finishes, so the
// amount of work done here is expected to be small.
//
// Some care needs to be taken to avoid cyclic recreation of entries during GC:
// * a Push initiated due to an intent may recreate a transaction entry
// * resolving an intent may write a new abort cache entry
// * obtaining the transaction for a abort cache entry requires a Push
//
// The following order is taken below:
// 1) collect all intents with sufficiently old txn record
// 2) collect these intents' transactions
// 3) scan the transaction table, collecting abandoned or completed txns
// 4) push all of these transactions (possibly recreating entries)
// 5) resolve all intents (unless the txn is still PENDING), which will recreate
//    abort cache entries (but with the txn timestamp; i.e. likely gc'able)
// 6) scan the abort cache table for old entries
// 7) push these transactions (again, recreating txn entries).
// 8) send a GCRequest.
func (gcq *gcQueue) process(
	ctx context.Context,
	now hlc.Timestamp,
	repl *Replica,
	sysCfg config.SystemConfig,
) error {
	snap := repl.store.Engine().NewSnapshot()
	desc := repl.Desc()
	defer snap.Close()

	// Lookup the GC policy for the zone containing this key range.
	zone, err := sysCfg.GetZoneConfigForKey(desc.StartKey)
	if err != nil {
		return errors.Errorf("could not find zone config for range %s: %s", repl, err)
	}

	gcKeys, info, err := RunGC(ctx, desc, snap, now, zone.GC,
		func(now hlc.Timestamp, txn *roachpb.Transaction, typ roachpb.PushTxnType) {
			pushTxn(gcq.store.DB(), now, txn, typ)
		},
		func(intents []roachpb.Intent, poison bool, wait bool) error {
			return repl.store.intentResolver.resolveIntents(ctx, intents, poison, wait)
		})

	if err != nil {
		return err
	}

	gcq.eventLog.VInfof(true, "completed with stats %+v", info)

	var ba roachpb.BatchRequest
	var gcArgs roachpb.GCRequest
	// TODO(tschottdorf): This is one of these instances in which we want
	// to be more careful that the request ends up on the correct Replica,
	// and we might have to worry about mixing range-local and global keys
	// in a batch which might end up spanning Ranges by the time it executes.
	gcArgs.Key = desc.StartKey.AsRawKey()
	gcArgs.EndKey = desc.EndKey.AsRawKey()
	gcArgs.Keys = gcKeys
	gcArgs.Threshold = info.Threshold

	// Technically not needed since we're talking directly to the Range.
	ba.RangeID = desc.RangeID
	ba.Timestamp = now
	ba.Add(&gcArgs)
	if _, pErr := repl.Send(ctx, ba); pErr != nil {
		return pErr.GoError()
	}
	return nil
}
開發者ID:yangxuanjia,項目名稱:cockroach,代碼行數:73,代碼來源:gc_queue.go

示例14: waitForConfigChange

func waitForConfigChange(t *testing.T, s *server.TestServer) (*config.SystemConfig, error) {
	var foundDesc sql.DatabaseDescriptor
	var cfg *config.SystemConfig
	return cfg, util.IsTrueWithin(func() bool {
		if cfg = s.Gossip().GetSystemConfig(); cfg != nil {
			if val := cfg.GetValue(configDescKey); val != nil {
				if err := val.GetProto(&foundDesc); err != nil {
					t.Fatal(err)
				}
				return foundDesc.ID == configID
			}
		}

		return false
	}, 10*time.Second)
}
開發者ID:JonathanHub,項目名稱:cockroach,代碼行數:16,代碼來源:descriptor_cache_test.go

示例15: TestGet

func TestGet(t *testing.T) {
	defer leaktest.AfterTest(t)

	emptyKeys := []proto.KeyValue{}
	someKeys := []proto.KeyValue{
		plainKV("a", "vala"),
		plainKV("c", "valc"),
		plainKV("d", "vald"),
	}

	testCases := []struct {
		values []proto.KeyValue
		key    string
		found  bool
		value  string
	}{
		{emptyKeys, "a", false, ""},
		{emptyKeys, "b", false, ""},
		{emptyKeys, "c", false, ""},
		{emptyKeys, "d", false, ""},
		{emptyKeys, "e", false, ""},

		{someKeys, "", false, ""},
		{someKeys, "b", false, ""},
		{someKeys, "e", false, ""},
		{someKeys, "a0", false, ""},

		{someKeys, "a", true, "vala"},
		{someKeys, "c", true, "valc"},
		{someKeys, "d", true, "vald"},
	}

	cfg := config.SystemConfig{}
	for tcNum, tc := range testCases {
		cfg.Values = tc.values
		val, found := cfg.GetValue([]byte(tc.key))
		if found != tc.found {
			t.Errorf("#%d: expected found=%t", tcNum, tc.found)
			continue
		}
		if string(val) != tc.value {
			t.Errorf("#%d: expected value=%s, found %s", tcNum, tc.value, string(val))
		}
	}
}
開發者ID:kumarh1982,項目名稱:cockroach,代碼行數:45,代碼來源:config_test.go


注:本文中的github.com/cockroachdb/cockroach/config.SystemConfig類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。