本文整理汇总了Golang中github.com/couchbase/clog.Printf函数的典型用法代码示例。如果您正苦于以下问题:Golang Printf函数的具体用法?Golang Printf怎么用?Golang Printf使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Printf函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: LoadDataDir
// Walk the data dir and register pindexes for a Manager instance.
func (mgr *Manager) LoadDataDir() error {
log.Printf("manager: loading dataDir...")
dirEntries, err := ioutil.ReadDir(mgr.dataDir)
if err != nil {
return fmt.Errorf("manager: could not read dataDir: %s, err: %v",
mgr.dataDir, err)
}
for _, dirInfo := range dirEntries {
path := mgr.dataDir + string(os.PathSeparator) + dirInfo.Name()
_, ok := mgr.ParsePIndexPath(path)
if !ok {
continue // Skip the entry that doesn't match the naming pattern.
}
log.Printf("manager: opening pindex: %s", path)
pindex, err := OpenPIndex(mgr, path)
if err != nil {
log.Printf("manager: could not open pindex: %s, err: %v",
path, err)
continue
}
mgr.registerPIndex(pindex)
}
log.Printf("manager: loading dataDir... done")
return nil
}
示例2: MainUUID
// MainUUID is a helper function for cmd-line tool developers, that
// reuses a previous "baseName.uuid" file from the dataDir if it
// exists, or generates a brand new UUID (and persists it).
func MainUUID(baseName, dataDir string) (string, error) {
uuid := cbgt.NewUUID()
uuidPath := dataDir + string(os.PathSeparator) + baseName + ".uuid"
uuidBuf, err := ioutil.ReadFile(uuidPath)
if err == nil {
uuid = strings.TrimSpace(string(uuidBuf))
if uuid == "" {
return "", fmt.Errorf("error: could not parse uuidPath: %s",
uuidPath)
}
log.Printf("main: manager uuid: %s", uuid)
log.Printf("main: manager uuid was reloaded")
} else {
log.Printf("main: manager uuid: %s", uuid)
log.Printf("main: manager uuid was generated")
}
err = ioutil.WriteFile(uuidPath, []byte(uuid), 0600)
if err != nil {
return "", fmt.Errorf("error: could not write uuidPath: %s\n"+
" Please check that your -data/-dataDir parameter (%q)\n"+
" is to a writable directory where %s can store\n"+
" index data.",
uuidPath, dataDir, baseName)
}
return uuid, nil
}
示例3: Start
func (t *TAPFeed) Start() error {
if t.disable {
log.Printf("feed_tap: disable, name: %s", t.Name())
return nil
}
log.Printf("feed_tap: start, name: %s", t.Name())
backoffFactor := t.params.BackoffFactor
if backoffFactor <= 0.0 {
backoffFactor = FEED_BACKOFF_FACTOR
}
sleepInitMS := t.params.SleepInitMS
if sleepInitMS <= 0 {
sleepInitMS = FEED_SLEEP_INIT_MS
}
sleepMaxMS := t.params.SleepMaxMS
if sleepMaxMS <= 0 {
sleepMaxMS = FEED_SLEEP_MAX_MS
}
go ExponentialBackoffLoop(t.Name(),
func() int {
progress, err := t.feed()
if err != nil {
log.Printf("feed_tap: name: %s, progress: %d, err: %v",
t.Name(), progress, err)
}
return progress
},
sleepInitMS, backoffFactor, sleepMaxMS)
return nil
}
示例4: GetCurrentTopology
func (m *CtlMgr) GetCurrentTopology(haveTopologyRev service.Revision,
cancelCh service.Cancel) (*service.Topology, error) {
ctlTopology, err :=
m.ctl.WaitGetTopology(string(haveTopologyRev), cancelCh)
if err != nil {
if err != service.ErrCanceled {
log.Printf("ctl/manager, GetCurrenTopology, haveTopologyRev: %s,"+
" err: %v", haveTopologyRev, err)
}
return nil, err
}
rv := &service.Topology{
Rev: service.Revision([]byte(ctlTopology.Rev)),
Nodes: []service.NodeID{},
}
for _, ctlNode := range ctlTopology.MemberNodes {
rv.Nodes = append(rv.Nodes, service.NodeID(ctlNode.UUID))
}
// TODO: Need a proper IsBalanced computation.
rv.IsBalanced =
len(ctlTopology.PrevWarnings) <= 0 && len(ctlTopology.PrevErrs) <= 0
for resourceName, resourceWarnings := range ctlTopology.PrevWarnings {
aggregate := map[string]bool{}
for _, resourceWarning := range resourceWarnings {
if strings.HasPrefix(resourceWarning, "could not meet constraints") {
aggregate["could not meet replication constraints"] = true
} else {
aggregate[resourceWarning] = true
}
}
for resourceWarning := range aggregate {
rv.Messages = append(rv.Messages,
fmt.Sprintf("warning: resource: %q -- %s",
resourceName, resourceWarning))
}
}
for _, err := range ctlTopology.PrevErrs {
rv.Messages = append(rv.Messages, fmt.Sprintf("error: %v", err))
}
m.mu.Lock()
m.lastTopology.Rev = rv.Rev
same := reflect.DeepEqual(&m.lastTopology, rv)
m.lastTopology = *rv
m.mu.Unlock()
if !same {
log.Printf("ctl/manager, GetCurrenTopology, haveTopologyRev: %s,"+
" changed, rv: %+v", haveTopologyRev, rv)
}
return rv, nil
}
示例5: RESTProfileCPU
// To start a cpu profiling...
// curl -X POST http://127.0.0.1:9090/api/runtime/profile/cpu -d secs=5
// To analyze a profiling...
// go tool pprof [program-binary] run-cpu.pprof
func RESTProfileCPU(w http.ResponseWriter, r *http.Request) {
secs, err := strconv.Atoi(r.FormValue("secs"))
if err != nil || secs <= 0 {
http.Error(w, "incorrect or missing secs parameter", 400)
return
}
fname := "./run-cpu.pprof"
os.Remove(fname)
f, err := os.Create(fname)
if err != nil {
http.Error(w, fmt.Sprintf("profileCPU:"+
" couldn't create file: %s, err: %v",
fname, err), 500)
return
}
log.Printf("profileCPU: start, file: %s", fname)
err = pprof.StartCPUProfile(f)
if err != nil {
http.Error(w, fmt.Sprintf("profileCPU:"+
" couldn't start CPU profile, file: %s, err: %v",
fname, err), 500)
return
}
go func() {
time.Sleep(time.Duration(secs) * time.Second)
pprof.StopCPUProfile()
f.Close()
log.Printf("profileCPU: end, file: %s", fname)
}()
w.WriteHeader(204)
}
示例6: CalcPlan
// Split logical indexes into PIndexes and assign PIndexes to nodes.
func CalcPlan(mode string, indexDefs *IndexDefs, nodeDefs *NodeDefs,
planPIndexesPrev *PlanPIndexes, version, server string) (
*PlanPIndexes, error) {
// This simple planner assigns at most MaxPartitionsPerPIndex
// number of partitions onto a PIndex. And then uses blance to
// assign the PIndex to 1 or more nodes (based on NumReplicas).
if indexDefs == nil || nodeDefs == nil {
return nil, nil
}
nodeUUIDsAll, nodeUUIDsToAdd, nodeUUIDsToRemove,
nodeWeights, nodeHierarchy :=
CalcNodesLayout(indexDefs, nodeDefs, planPIndexesPrev)
planPIndexes := NewPlanPIndexes(version)
// Examine every indexDef...
for _, indexDef := range indexDefs.IndexDefs {
// If the plan is frozen, CasePlanFrozen clones the previous
// plan for this index.
if CasePlanFrozen(indexDef, planPIndexesPrev, planPIndexes) {
continue
}
// Skip indexDef's with no instantiatable pindexImplType, such
// as index aliases.
pindexImplType, exists := PIndexImplTypes[indexDef.Type]
if !exists ||
pindexImplType == nil ||
pindexImplType.New == nil ||
pindexImplType.Open == nil {
continue
}
// Split each indexDef into 1 or more PlanPIndexes.
planPIndexesForIndex, err :=
SplitIndexDefIntoPlanPIndexes(indexDef, server, planPIndexes)
if err != nil {
log.Printf("planner: could not SplitIndexDefIntoPlanPIndexes,"+
" indexDef.Name: %s, server: %s, err: %v",
indexDef.Name, server, err)
continue // Keep planning the other IndexDefs.
}
// Once we have a 1 or more PlanPIndexes for an IndexDef, use
// blance to assign the PlanPIndexes to nodes.
warnings := BlancePlanPIndexes(mode, indexDef,
planPIndexesForIndex, planPIndexesPrev,
nodeUUIDsAll, nodeUUIDsToAdd, nodeUUIDsToRemove,
nodeWeights, nodeHierarchy)
planPIndexes.Warnings[indexDef.Name] = warnings
for _, warning := range warnings {
log.Printf("planner: indexDef.Name: %s,"+
" PlanNextMap warning: %s", indexDef.Name, warning)
}
}
return planPIndexes, nil
}
示例7: OnFeedError
func (meh *MainHandlers) OnFeedError(srcType string, r cbgt.Feed, err error) {
log.Printf("main: meh.OnFeedError, srcType: %s, err: %v", srcType, err)
if _, ok := err.(*couchbase.BucketNotFoundError); !ok ||
srcType != "couchbase" || r == nil {
return
}
dcpFeed, ok := r.(*cbgt.DCPFeed)
if !ok {
return
}
gone, err := dcpFeed.VerifyBucketNotExists()
log.Printf("main: meh.OnFeedError, VerifyBucketNotExists,"+
" srcType: %s, gone: %t, err: %v", srcType, gone, err)
if !gone {
return
}
bucketName, bucketUUID := dcpFeed.GetBucketDetails()
if bucketName == "" {
return
}
log.Printf("main: meh.OnFeedError, DeleteAllIndexFromSource,"+
" srcType: %s, bucketName: %s, bucketUUID: %s",
srcType, bucketName, bucketUUID)
meh.mgr.DeleteAllIndexFromSource(srcType, bucketName, bucketUUID)
}
示例8: main
func main() {
flag.Parse()
if flags.Help {
flag.Usage()
os.Exit(2)
}
if flags.Version {
fmt.Printf("%s main: %s, data: %s\n",
path.Base(os.Args[0]), cbgt.VERSION, cbgt.VERSION)
os.Exit(0)
}
cmd.MainCommon(cbgt.VERSION, flagAliases)
cfg, err := cmd.MainCfgClient(path.Base(os.Args[0]), flags.CfgConnect)
if err != nil {
log.Fatalf("%v", err)
return
}
if flags.IndexTypes != "" {
cmd.RegisterIndexTypes(strings.Split(flags.IndexTypes, ","))
}
nodesToRemove := []string(nil)
if len(flags.RemoveNodes) > 0 {
nodesToRemove = strings.Split(flags.RemoveNodes, ",")
}
var steps map[string]bool
if flags.Steps != "" {
steps = cbgt.StringsToMap(strings.Split(flags.Steps, ","))
}
// ------------------------------------------------
if steps == nil || steps["rebalance"] {
log.Printf("main: step rebalance")
err := runRebalance(cfg, flags.Server, nodesToRemove,
flags.FavorMinNodes, flags.DryRun, flags.Verbose)
if err != nil {
log.Fatalf("%v", err)
return
}
}
// ------------------------------------------------
err = cmd.PlannerSteps(steps, cfg, cbgt.VERSION,
flags.Server, nodesToRemove, flags.DryRun)
if err != nil {
log.Fatalf("%v", err)
return
}
log.Printf("main: done")
}
示例9: LogFlags
func LogFlags(flagAliases map[string][]string) {
flag.VisitAll(func(f *flag.Flag) {
if flagAliases[f.Name] != nil {
log.Printf(" -%s=%q\n", f.Name, f.Value)
}
})
log.Printf(" GOMAXPROCS=%d", runtime.GOMAXPROCS(-1))
}
示例10: StartTopologyChange
func (m *CtlMgr) StartTopologyChange(change service.TopologyChange) error {
log.Printf("ctl/manager, StartTopologyChange, change: %v", change)
m.mu.Lock()
defer m.mu.Unlock()
// Possible for caller to not care about current topology, but
// just wants to impose or force a topology change.
if len(change.CurrentTopologyRev) > 0 &&
string(change.CurrentTopologyRev) != m.ctl.GetTopology().Rev {
log.Printf("ctl/manager, StartTopologyChange, rev check, err: %v",
service.ErrConflict)
return service.ErrConflict
}
var err error
started := false
var taskHandlesNext []*taskHandle
for _, th := range m.tasks.taskHandles {
if th.task.Type == service.TaskTypeRebalance {
log.Printf("ctl/manager, StartTopologyChange,"+
" task rebalance check, err: %v",
service.ErrConflict)
return service.ErrConflict
}
if th.task.Type == service.TaskTypePrepared {
th, err = m.startTopologyChangeTaskHandleLOCKED(change)
if err != nil {
log.Printf("ctl/manager, StartTopologyChange,"+
" prepared, err: %v", err)
return err
}
started = true
}
taskHandlesNext = append(taskHandlesNext, th)
}
if !started {
return service.ErrNotFound
}
m.updateTasksLOCKED(func(s *tasks) {
s.taskHandles = taskHandlesNext
})
log.Printf("ctl/manager, StartTopologyChange, started")
return nil
}
示例11: JanitorLoop
// JanitorLoop is the main loop for the janitor.
func (mgr *Manager) JanitorLoop() {
if mgr.cfg != nil { // Might be nil for testing.
go func() {
ec := make(chan CfgEvent)
mgr.cfg.Subscribe(PLAN_PINDEXES_KEY, ec)
mgr.cfg.Subscribe(CfgNodeDefsKey(NODE_DEFS_WANTED), ec)
for {
select {
case <-mgr.stopCh:
return
case e := <-ec:
atomic.AddUint64(&mgr.stats.TotJanitorSubscriptionEvent, 1)
mgr.JanitorKick("cfg changed, key: " + e.Key)
}
}
}()
}
for {
select {
case <-mgr.stopCh:
return
case m := <-mgr.janitorCh:
log.Printf("janitor: awakes, reason: %s", m.msg)
var err error
if m.op == WORK_KICK {
atomic.AddUint64(&mgr.stats.TotJanitorKickStart, 1)
err = mgr.JanitorOnce(m.msg)
if err != nil {
// Keep looping as perhaps it's a transient issue.
// TODO: Perhaps need a rescheduled janitor kick.
log.Printf("janitor: JanitorOnce, err: %v", err)
atomic.AddUint64(&mgr.stats.TotJanitorKickErr, 1)
} else {
atomic.AddUint64(&mgr.stats.TotJanitorKickOk, 1)
}
} else if m.op == WORK_NOOP {
atomic.AddUint64(&mgr.stats.TotJanitorNOOPOk, 1)
} else if m.op == JANITOR_CLOSE_PINDEX {
mgr.stopPIndex(m.obj.(*PIndex), false)
atomic.AddUint64(&mgr.stats.TotJanitorClosePIndex, 1)
} else if m.op == JANITOR_REMOVE_PINDEX {
mgr.stopPIndex(m.obj.(*PIndex), true)
atomic.AddUint64(&mgr.stats.TotJanitorRemovePIndex, 1)
} else {
err = fmt.Errorf("janitor: unknown op: %s, m: %#v", m.op, m)
atomic.AddUint64(&mgr.stats.TotJanitorUnknownErr, 1)
}
if m.resCh != nil {
if err != nil {
m.resCh <- err
}
close(m.resCh)
}
}
}
}
示例12: Start
func (t *DCPFeed) Start() error {
if t.disable {
log.Printf("feed_dcp: disable, name: %s", t.Name())
return nil
}
log.Printf("feed_dcp: start, name: %s", t.Name())
return t.bds.Start()
}
示例13: DumpOnSignal
func DumpOnSignal(signals ...os.Signal) {
c := make(chan os.Signal, 1)
signal.Notify(c, signals...)
for _ = range c {
log.Printf("dump: goroutine...")
pprof.Lookup("goroutine").WriteTo(os.Stderr, 1)
log.Printf("dump: heap...")
pprof.Lookup("heap").WriteTo(os.Stderr, 1)
}
}
示例14: startPIndex
func (mgr *Manager) startPIndex(planPIndex *PlanPIndex) error {
var pindex *PIndex
var err error
path := mgr.PIndexPath(planPIndex.Name)
// First, try reading the path with OpenPIndex(). An
// existing path might happen during a case of rollback.
_, err = os.Stat(path)
if err == nil {
pindex, err = OpenPIndex(mgr, path)
if err != nil {
log.Printf("janitor: startPIndex, OpenPIndex error,"+
" cleaning up and trying NewPIndex,"+
" path: %s, err: %v", path, err)
os.RemoveAll(path)
} else {
if !PIndexMatchesPlan(pindex, planPIndex) {
log.Printf("janitor: startPIndex, pindex does not match plan,"+
" cleaning up and trying NewPIndex, path: %s, err: %v",
path, err)
pindex.Close(true)
pindex = nil
}
}
}
if pindex == nil {
pindex, err = NewPIndex(mgr, planPIndex.Name, NewUUID(),
planPIndex.IndexType,
planPIndex.IndexName,
planPIndex.IndexUUID,
planPIndex.IndexParams,
planPIndex.SourceType,
planPIndex.SourceName,
planPIndex.SourceUUID,
planPIndex.SourceParams,
planPIndex.SourcePartitions,
path)
if err != nil {
return fmt.Errorf("janitor: NewPIndex, name: %s, err: %v",
planPIndex.Name, err)
}
}
err = mgr.registerPIndex(pindex)
if err != nil {
pindex.Close(true)
return err
}
return nil
}
示例15: MainWelcome
func MainWelcome(flagAliases map[string][]string) {
cmd.LogFlags(flagAliases)
log.Printf("main: registered bleve stores")
types, instances := bleveRegistry.KVStoreTypesAndInstances()
for _, s := range types {
log.Printf(" %s", s)
}
for _, s := range instances {
log.Printf(" %s", s)
}
}