本文整理匯總了Golang中github.com/influxdata/influxdb/services/meta.NewClient函數的典型用法代碼示例。如果您正苦於以下問題:Golang NewClient函數的具體用法?Golang NewClient怎麽用?Golang NewClient使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了NewClient函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestMetaClient_PersistClusterIDAfterRestart
func TestMetaClient_PersistClusterIDAfterRestart(t *testing.T) {
t.Parallel()
cfg := newConfig()
defer os.RemoveAll(cfg.Dir)
c := meta.NewClient(cfg)
if err := c.Open(); err != nil {
t.Fatal(err)
}
id := c.ClusterID()
if id == 0 {
t.Fatal("cluster ID can't be zero")
}
c = meta.NewClient(cfg)
if err := c.Open(); err != nil {
t.Fatal(err)
}
defer c.Close()
idAfter := c.ClusterID()
if idAfter == 0 {
t.Fatal("cluster ID can't be zero")
} else if idAfter != id {
t.Fatalf("cluster id not the same: %d, %d", idAfter, id)
}
}
示例2: TestMetaService_NameChangeSingleNode
// Ensures that everything works after a host name change. This is
// skipped by default. To enable add hosts foobar and asdf to your
// /etc/hosts file and point those to 127.0.0.1
func TestMetaService_NameChangeSingleNode(t *testing.T) {
t.Skip("not enabled")
t.Parallel()
cfg := newConfig()
defer os.RemoveAll(cfg.Dir)
cfg.BindAddress = "foobar:0"
cfg.HTTPBindAddress = "foobar:0"
s := newService(cfg)
if err := s.Open(); err != nil {
t.Fatal(err)
}
defer s.Close()
c := meta.NewClient()
c.SetMetaServers([]string{s.HTTPAddr()})
if err := c.Open(); err != nil {
t.Fatal(err)
}
defer c.Close()
if _, err := c.CreateDatabase("foo"); err != nil {
t.Fatal(err)
}
s.Close()
time.Sleep(time.Second)
cfg.BindAddress = "asdf" + ":" + strings.Split(s.RaftAddr(), ":")[1]
cfg.HTTPBindAddress = "asdf" + ":" + strings.Split(s.HTTPAddr(), ":")[1]
s = newService(cfg)
if err := s.Open(); err != nil {
t.Fatal(err)
}
defer s.Close()
c2 := meta.NewClient()
c2.SetMetaServers([]string{s.HTTPAddr()})
if err := c2.Open(); err != nil {
t.Fatal(err)
}
defer c2.Close()
db, err := c2.Database("foo")
if db == nil || err != nil {
t.Fatal(err)
}
nodes, err := c2.MetaNodes()
if err != nil {
t.Fatal(err)
}
exp := []meta.NodeInfo{{ID: 1, Host: cfg.HTTPBindAddress, TCPHost: cfg.BindAddress}}
time.Sleep(10 * time.Second)
if !reflect.DeepEqual(nodes, exp) {
t.Fatalf("nodes don't match: %v", nodes)
}
}
示例3: initializeMetaClient
// initializeMetaClient will set the MetaClient and join the node to the cluster if needed
func (s *Server) initializeMetaClient() error {
// if the node ID is > 0 then we just need to initialize the metaclient
if s.Node.ID > 0 {
s.MetaClient = meta.NewClient(s.Node.MetaServers, s.metaUseTLS)
if err := s.MetaClient.Open(); err != nil {
return err
}
go s.updateMetaNodeInformation()
s.MetaClient.WaitForDataChanged()
return nil
}
// It's the first time starting up and we need to either join
// the cluster or initialize this node as the first member
if len(s.joinPeers) == 0 {
// start up a new single node cluster
if s.MetaService == nil {
return fmt.Errorf("server not set to join existing cluster must run also as a meta node")
}
s.MetaClient = meta.NewClient([]string{s.MetaService.HTTPAddr()}, s.metaUseTLS)
} else {
// join this node to the cluster
s.MetaClient = meta.NewClient(s.joinPeers, s.metaUseTLS)
}
if err := s.MetaClient.Open(); err != nil {
return err
}
n, err := s.MetaClient.CreateDataNode(s.httpAPIAddr, s.tcpAddr)
for err != nil {
log.Printf("Unable to create data node. retry in 1s: %s", err.Error())
time.Sleep(time.Second)
n, err = s.MetaClient.CreateDataNode(s.httpAPIAddr, s.tcpAddr)
}
s.Node.ID = n.ID
metaNodes, err := s.MetaClient.MetaNodes()
if err != nil {
return err
}
for _, n := range metaNodes {
s.Node.AddMetaServers([]string{n.Host})
}
if err := s.Node.Save(); err != nil {
return err
}
go s.updateMetaNodeInformation()
return nil
}
示例4: newClient
func newClient(s *testService) *meta.Client {
c := meta.NewClient([]string{s.HTTPAddr()}, false)
if err := c.Open(); err != nil {
panic(err)
}
return c
}
示例5: newClient
func newClient() (string, *meta.Client) {
cfg := newConfig()
c := meta.NewClient(cfg)
if err := c.Open(); err != nil {
panic(err)
}
return cfg.Dir, c
}
示例6: newClient
func newClient(s *testService) *meta.Client {
c := meta.NewClient()
c.SetMetaServers([]string{s.HTTPAddr()})
if err := c.Open(); err != nil {
panic(err)
}
return c
}
示例7: TestMetaService_PersistClusterIDAfterRestart
func TestMetaService_PersistClusterIDAfterRestart(t *testing.T) {
t.Parallel()
cfg := newConfig()
defer os.RemoveAll(cfg.Dir)
s := newService(cfg)
if err := s.Open(); err != nil {
t.Fatal(err)
}
defer s.Close()
c := meta.NewClient()
c.SetMetaServers([]string{s.HTTPAddr()})
if err := c.Open(); err != nil {
t.Fatal(err)
}
id := c.ClusterID()
if id == 0 {
t.Fatal("cluster ID can't be zero")
}
s.Close()
s = newService(cfg)
if err := s.Open(); err != nil {
t.Fatal(err)
}
c = meta.NewClient()
c.SetMetaServers([]string{s.HTTPAddr()})
if err := c.Open(); err != nil {
t.Fatal(err)
}
defer c.Close()
idAfter := c.ClusterID()
if idAfter == 0 {
t.Fatal("cluster ID can't be zero")
} else if idAfter != id {
t.Fatalf("cluster id not the same: %d, %d", idAfter, id)
}
}
示例8: TestMetaService_Ping
func TestMetaService_Ping(t *testing.T) {
cfgs := make([]*meta.Config, 3)
srvs := make([]*testService, 3)
joinPeers := freePorts(len(cfgs))
var swg sync.WaitGroup
swg.Add(len(cfgs))
for i, _ := range cfgs {
c := newConfig()
c.HTTPBindAddress = joinPeers[i]
c.JoinPeers = joinPeers
cfgs[i] = c
srvs[i] = newService(c)
go func(i int, srv *testService) {
defer swg.Done()
if err := srv.Open(); err != nil {
t.Fatalf("error opening server %d: %s", i, err)
}
}(i, srvs[i])
defer srvs[i].Close()
defer os.RemoveAll(c.Dir)
}
swg.Wait()
c := meta.NewClient()
c.SetMetaServers(joinPeers)
if err := c.Open(); err != nil {
t.Fatal(err)
}
defer c.Close()
if err := c.Ping(false); err != nil {
t.Fatalf("ping false all failed: %s", err)
}
if err := c.Ping(true); err != nil {
t.Fatalf("ping false true failed: %s", err)
}
srvs[1].Close()
// give the server time to close
time.Sleep(time.Second)
if err := c.Ping(false); err != nil {
t.Fatalf("ping false some failed: %s", err)
}
if err := c.Ping(true); err == nil {
t.Fatal("expected error on ping")
}
}
示例9: TestMetaService_CommandAgainstNonLeader
// Ensure that if we attempt to create a database and the client
// is pointed at a server that isn't the leader, it automatically
// hits the leader and finishes the command
func TestMetaService_CommandAgainstNonLeader(t *testing.T) {
t.Parallel()
cfgs := make([]*meta.Config, 3)
srvs := make([]*testService, 3)
joinPeers := freePorts(len(cfgs))
var wg sync.WaitGroup
wg.Add(len(cfgs))
for i, _ := range cfgs {
c := newConfig()
c.HTTPBindAddress = joinPeers[i]
c.JoinPeers = joinPeers
cfgs[i] = c
srvs[i] = newService(c)
go func(srv *testService) {
defer wg.Done()
if err := srv.Open(); err != nil {
t.Fatal(err)
}
}(srvs[i])
defer srvs[i].Close()
defer os.RemoveAll(c.Dir)
}
wg.Wait()
for i := range cfgs {
c := meta.NewClient()
c.SetMetaServers([]string{joinPeers[i]})
if err := c.Open(); err != nil {
t.Fatal(err)
}
defer c.Close()
metaNodes, _ := c.MetaNodes()
if len(metaNodes) != 3 {
t.Fatalf("node %d - meta nodes wrong: %v", i, metaNodes)
}
if _, err := c.CreateDatabase(fmt.Sprintf("foo%d", i)); err != nil {
t.Fatalf("node %d: %s", i, err)
}
if db, err := c.Database(fmt.Sprintf("foo%d", i)); db == nil || err != nil {
t.Fatalf("node %d: database foo wasn't created: %s", i, err)
}
}
}
示例10: TestMetaService_Ping
func TestMetaService_Ping(t *testing.T) {
cfgs := make([]*meta.Config, 3)
srvs := make([]*testService, 3)
for i := range cfgs {
c := newConfig()
cfgs[i] = c
if i > 0 {
c.JoinPeers = []string{srvs[0].HTTPAddr()}
}
srvs[i] = newService(c)
if err := srvs[i].Open(); err != nil {
t.Fatal(err.Error())
}
c.HTTPBindAddress = srvs[i].HTTPAddr()
c.BindAddress = srvs[i].RaftAddr()
c.JoinPeers = nil
defer srvs[i].Close()
defer os.RemoveAll(c.Dir)
}
c := meta.NewClient([]string{srvs[0].HTTPAddr(), srvs[1].HTTPAddr()}, false)
if err := c.Open(); err != nil {
t.Fatal(err.Error())
}
defer c.Close()
if err := c.Ping(false); err != nil {
t.Fatal(err.Error())
}
if err := c.Ping(true); err != nil {
t.Fatal(err.Error())
}
srvs[1].Close()
if err := c.Ping(false); err != nil {
t.Fatal(err.Error())
}
if err := c.Ping(true); err == nil {
t.Fatal("expected error on ping")
}
}
示例11: TestMetaService_CommandAgainstNonLeader
// Ensure that if we attempt to create a database and the client
// is pointed at a server that isn't the leader, it automatically
// hits the leader and finishes the command
func TestMetaService_CommandAgainstNonLeader(t *testing.T) {
t.Parallel()
cfgs := make([]*meta.Config, 3)
srvs := make([]*testService, 3)
for i := range cfgs {
c := newConfig()
cfgs[i] = c
if i > 0 {
c.JoinPeers = []string{srvs[0].HTTPAddr()}
}
srvs[i] = newService(c)
if err := srvs[i].Open(); err != nil {
t.Fatal(err.Error())
}
defer srvs[i].Close()
defer os.RemoveAll(c.Dir)
}
c := meta.NewClient([]string{srvs[2].HTTPAddr()}, false)
if err := c.Open(); err != nil {
t.Fatal(err.Error())
}
defer c.Close()
metaNodes, _ := c.MetaNodes()
if len(metaNodes) != 3 {
t.Fatalf("meta nodes wrong: %v", metaNodes)
}
if _, err := c.CreateDatabase("foo"); err != nil {
t.Fatal(err)
}
if db, err := c.Database("foo"); db == nil || err != nil {
t.Fatalf("database foo wasn't created: %s", err.Error())
}
}
示例12: TestMetaService_FailureAndRestartCluster
// Ensure that the client will fail over to another server if the leader goes
// down. Also ensure that the cluster will come back up successfully after restart
func TestMetaService_FailureAndRestartCluster(t *testing.T) {
t.Parallel()
cfgs := make([]*meta.Config, 3)
srvs := make([]*testService, 3)
joinPeers := freePorts(len(cfgs))
raftPeers := freePorts(len(cfgs))
var swg sync.WaitGroup
swg.Add(len(cfgs))
for i, _ := range cfgs {
c := newConfig()
c.HTTPBindAddress = joinPeers[i]
c.BindAddress = raftPeers[i]
c.JoinPeers = joinPeers
cfgs[i] = c
srvs[i] = newService(c)
go func(i int, srv *testService) {
defer swg.Done()
if err := srv.Open(); err != nil {
t.Logf("opening server %d", i)
t.Fatal(err)
}
}(i, srvs[i])
defer srvs[i].Close()
defer os.RemoveAll(c.Dir)
}
swg.Wait()
c := meta.NewClient()
c.SetMetaServers(joinPeers)
if err := c.Open(); err != nil {
t.Fatal(err)
}
defer c.Close()
// check to see we were assigned a valid clusterID
c1ID := c.ClusterID()
if c1ID == 0 {
t.Fatalf("invalid cluster id: %d", c1ID)
}
if _, err := c.CreateDatabase("foo"); err != nil {
t.Fatal(err)
}
if db, err := c.Database("foo"); db == nil || err != nil {
t.Fatalf("database foo wasn't created: %s", err)
}
if err := srvs[0].Close(); err != nil {
t.Fatal(err)
}
if _, err := c.CreateDatabase("bar"); err != nil {
t.Fatal(err)
}
if db, err := c.Database("bar"); db == nil || err != nil {
t.Fatalf("database bar wasn't created: %s", err)
}
if err := srvs[1].Close(); err != nil {
t.Fatal(err)
}
if err := srvs[2].Close(); err != nil {
t.Fatal(err)
}
// give them a second to shut down
time.Sleep(time.Second)
// need to start them all at once so they can discover the bind addresses for raft
var wg sync.WaitGroup
wg.Add(len(cfgs))
for i, cfg := range cfgs {
srvs[i] = newService(cfg)
go func(srv *testService) {
if err := srv.Open(); err != nil {
panic(err)
}
wg.Done()
}(srvs[i])
defer srvs[i].Close()
}
wg.Wait()
time.Sleep(time.Second)
c2 := meta.NewClient()
c2.SetMetaServers(joinPeers)
if err := c2.Open(); err != nil {
t.Fatal(err)
}
defer c2.Close()
c2ID := c2.ClusterID()
//.........這裏部分代碼省略.........
示例13: TestMetaService_CreateRemoveMetaNode
func TestMetaService_CreateRemoveMetaNode(t *testing.T) {
t.Parallel()
joinPeers := freePorts(4)
raftPeers := freePorts(4)
cfg1 := newConfig()
cfg1.HTTPBindAddress = joinPeers[0]
cfg1.BindAddress = raftPeers[0]
defer os.RemoveAll(cfg1.Dir)
cfg2 := newConfig()
cfg2.HTTPBindAddress = joinPeers[1]
cfg2.BindAddress = raftPeers[1]
defer os.RemoveAll(cfg2.Dir)
var wg sync.WaitGroup
wg.Add(2)
cfg1.JoinPeers = joinPeers[0:2]
s1 := newService(cfg1)
go func() {
defer wg.Done()
if err := s1.Open(); err != nil {
t.Fatal(err)
}
}()
defer s1.Close()
cfg2.JoinPeers = joinPeers[0:2]
s2 := newService(cfg2)
go func() {
defer wg.Done()
if err := s2.Open(); err != nil {
t.Fatal(err)
}
}()
defer s2.Close()
wg.Wait()
cfg3 := newConfig()
joinPeers[2] = freePort()
cfg3.HTTPBindAddress = joinPeers[2]
raftPeers[2] = freePort()
cfg3.BindAddress = raftPeers[2]
defer os.RemoveAll(cfg3.Dir)
cfg3.JoinPeers = joinPeers[0:3]
s3 := newService(cfg3)
if err := s3.Open(); err != nil {
t.Fatal(err)
}
defer s3.Close()
c1 := meta.NewClient()
c1.SetMetaServers(joinPeers[0:3])
if err := c1.Open(); err != nil {
t.Fatal(err)
}
defer c1.Close()
metaNodes, _ := c1.MetaNodes()
if len(metaNodes) != 3 {
t.Fatalf("meta nodes wrong: %v", metaNodes)
}
c := meta.NewClient()
c.SetMetaServers([]string{s1.HTTPAddr()})
if err := c.Open(); err != nil {
t.Fatal(err)
}
defer c.Close()
if err := c.DeleteMetaNode(3); err != nil {
t.Fatal(err)
}
metaNodes, _ = c.MetaNodes()
if len(metaNodes) != 2 {
t.Fatalf("meta nodes wrong: %v", metaNodes)
}
cfg4 := newConfig()
cfg4.HTTPBindAddress = freePort()
cfg4.BindAddress = freePort()
cfg4.JoinPeers = []string{joinPeers[0], joinPeers[1], cfg4.HTTPBindAddress}
defer os.RemoveAll(cfg4.Dir)
s4 := newService(cfg4)
if err := s4.Open(); err != nil {
t.Fatal(err)
}
defer s4.Close()
c2 := meta.NewClient()
c2.SetMetaServers(cfg4.JoinPeers)
if err := c2.Open(); err != nil {
t.Fatal(err)
}
defer c2.Close()
metaNodes, _ = c2.MetaNodes()
if len(metaNodes) != 3 {
//.........這裏部分代碼省略.........
示例14: NewServer
// NewServer returns a new instance of Server built from a config.
func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) {
// We need to ensure that a meta directory always exists even if
// we don't start the meta store. node.json is always stored under
// the meta directory.
if err := os.MkdirAll(c.Meta.Dir, 0777); err != nil {
return nil, fmt.Errorf("mkdir all: %s", err)
}
// 0.10-rc1 and prior would sometimes put the node.json at the root
// dir which breaks backup/restore and restarting nodes. This moves
// the file from the root so it's always under the meta dir.
oldPath := filepath.Join(filepath.Dir(c.Meta.Dir), "node.json")
newPath := filepath.Join(c.Meta.Dir, "node.json")
if _, err := os.Stat(oldPath); err == nil {
if err := os.Rename(oldPath, newPath); err != nil {
return nil, err
}
}
_, err := influxdb.LoadNode(c.Meta.Dir)
if err != nil {
if !os.IsNotExist(err) {
return nil, err
}
}
// Check to see if there is a raft db, if so, error out with a message
// to downgrade, export, and then import the meta data
raftFile := filepath.Join(c.Meta.Dir, "raft.db")
if _, err := os.Stat(raftFile); err == nil {
return nil, fmt.Errorf("detected %s. To proceed, you'll need to either 1) downgrade to v0.11.x, export your metadata, upgrade to the current version again, and then import the metadata or 2) delete the file, which will effectively reset your database. For more assistance with the upgrade, see: https://docs.influxdata.com/influxdb/v0.12/administration/upgrading/", raftFile)
}
// In 0.10.0 bind-address got moved to the top level. Check
// The old location to keep things backwards compatible
bind := c.BindAddress
s := &Server{
buildInfo: *buildInfo,
err: make(chan error),
closing: make(chan struct{}),
BindAddress: bind,
MetaClient: meta.NewClient(c.Meta),
Monitor: monitor.New(c.Monitor),
reportingDisabled: c.ReportingDisabled,
httpAPIAddr: c.HTTPD.BindAddress,
httpUseTLS: c.HTTPD.HTTPSEnabled,
tcpAddr: bind,
config: c,
}
if err := s.MetaClient.Open(); err != nil {
return nil, err
}
s.TSDBStore = tsdb.NewStore(c.Data.Dir)
s.TSDBStore.EngineOptions.Config = c.Data
// Copy TSDB configuration.
s.TSDBStore.EngineOptions.EngineVersion = c.Data.Engine
// Create the Subscriber service
s.Subscriber = subscriber.NewService(c.Subscriber)
// Initialize points writer.
s.PointsWriter = cluster.NewPointsWriter()
s.PointsWriter.WriteTimeout = time.Duration(c.Cluster.WriteTimeout)
s.PointsWriter.TSDBStore = s.TSDBStore
s.PointsWriter.Subscriber = s.Subscriber
// Initialize query executor.
s.QueryExecutor = influxql.NewQueryExecutor()
s.QueryExecutor.StatementExecutor = &cluster.StatementExecutor{
MetaClient: s.MetaClient,
TSDBStore: cluster.LocalTSDBStore{Store: s.TSDBStore},
Monitor: s.Monitor,
PointsWriter: s.PointsWriter,
MaxSelectPointN: c.Cluster.MaxSelectPointN,
MaxSelectSeriesN: c.Cluster.MaxSelectSeriesN,
MaxSelectBucketsN: c.Cluster.MaxSelectBucketsN,
}
s.QueryExecutor.QueryTimeout = time.Duration(c.Cluster.QueryTimeout)
s.QueryExecutor.MaxConcurrentQueries = c.Cluster.MaxConcurrentQueries
if c.Data.QueryLogEnabled {
s.QueryExecutor.LogOutput = os.Stderr
}
// Initialize the monitor
s.Monitor.Version = s.buildInfo.Version
s.Monitor.Commit = s.buildInfo.Commit
s.Monitor.Branch = s.buildInfo.Branch
s.Monitor.BuildTime = s.buildInfo.Time
//.........這裏部分代碼省略.........
示例15: NewServer
// NewServer returns a new instance of Server built from a config.
func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) {
// We need to ensure that a meta directory always exists even if
// we don't start the meta store. node.json is always stored under
// the meta directory.
if err := os.MkdirAll(c.Meta.Dir, 0777); err != nil {
return nil, fmt.Errorf("mkdir all: %s", err)
}
// 0.10-rc1 and prior would sometimes put the node.json at the root
// dir which breaks backup/restore and restarting nodes. This moves
// the file from the root so it's always under the meta dir.
oldPath := filepath.Join(filepath.Dir(c.Meta.Dir), "node.json")
newPath := filepath.Join(c.Meta.Dir, "node.json")
if _, err := os.Stat(oldPath); err == nil {
if err := os.Rename(oldPath, newPath); err != nil {
return nil, err
}
}
_, err := influxdb.LoadNode(c.Meta.Dir)
if err != nil {
if !os.IsNotExist(err) {
return nil, err
}
}
// In 0.10.0 bind-address got moved to the top level. Check
// The old location to keep things backwards compatible
bind := c.BindAddress
s := &Server{
buildInfo: *buildInfo,
err: make(chan error),
closing: make(chan struct{}),
BindAddress: bind,
MetaClient: meta.NewClient(c.Meta),
Monitor: monitor.New(c.Monitor),
reportingDisabled: c.ReportingDisabled,
httpAPIAddr: c.HTTPD.BindAddress,
httpUseTLS: c.HTTPD.HTTPSEnabled,
tcpAddr: bind,
config: c,
}
if err := s.MetaClient.Open(); err != nil {
return nil, err
}
s.TSDBStore = tsdb.NewStore(c.Data.Dir)
s.TSDBStore.EngineOptions.Config = c.Data
// Copy TSDB configuration.
s.TSDBStore.EngineOptions.EngineVersion = c.Data.Engine
s.TSDBStore.EngineOptions.MaxWALSize = c.Data.MaxWALSize
s.TSDBStore.EngineOptions.WALFlushInterval = time.Duration(c.Data.WALFlushInterval)
s.TSDBStore.EngineOptions.WALPartitionFlushDelay = time.Duration(c.Data.WALPartitionFlushDelay)
// Create the Subscriber service
s.Subscriber = subscriber.NewService(c.Subscriber)
// Initialize points writer.
s.PointsWriter = cluster.NewPointsWriter()
s.PointsWriter.WriteTimeout = time.Duration(c.Cluster.WriteTimeout)
s.PointsWriter.TSDBStore = s.TSDBStore
s.PointsWriter.Subscriber = s.Subscriber
// Initialize query executor.
s.QueryExecutor = cluster.NewQueryExecutor()
s.QueryExecutor.MetaClient = s.MetaClient
s.QueryExecutor.TSDBStore = s.TSDBStore
s.QueryExecutor.Monitor = s.Monitor
s.QueryExecutor.PointsWriter = s.PointsWriter
if c.Data.QueryLogEnabled {
s.QueryExecutor.LogOutput = os.Stderr
}
// Initialize the monitor
s.Monitor.Version = s.buildInfo.Version
s.Monitor.Commit = s.buildInfo.Commit
s.Monitor.Branch = s.buildInfo.Branch
s.Monitor.BuildTime = s.buildInfo.Time
s.Monitor.PointsWriter = (*monitorPointsWriter)(s.PointsWriter)
return s, nil
}