本文整理汇总了Golang中github.com/tsuru/docker-cluster/cluster.New函数的典型用法代码示例。如果您正苦于以下问题:Golang New函数的具体用法?Golang New怎么用?Golang New使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了New函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: dockerCluster
func dockerCluster() *cluster.Cluster {
cmutex.Lock()
defer cmutex.Unlock()
if dCluster == nil {
debug, _ := config.GetBool("debug")
clusterLog.SetDebug(debug)
clusterLog.SetLogger(log.GetStdLogger())
clusterStorage, err := buildClusterStorage()
if err != nil {
panic(err.Error())
}
var nodes []cluster.Node
if isSegregateScheduler() {
dCluster, _ = cluster.New(&segregatedScheduler{}, clusterStorage)
} else {
nodes = getDockerServers()
dCluster, _ = cluster.New(nil, clusterStorage, nodes...)
}
autoHealing, _ := config.GetBool("docker:auto-healing")
if autoHealing {
healer := Healer{}
dCluster.SetHealer(&healer)
}
activeMonitoring, _ := config.GetBool("docker:active-monitoring")
if activeMonitoring {
dCluster.StartActiveMonitoring(1 * time.Minute)
}
}
return dCluster
}
示例2: SetUpTest
func (s *S) SetUpTest(c *check.C) {
iaas.ResetAll()
repositorytest.Reset()
queue.ResetQueue()
s.p = &dockerProvisioner{storage: &cluster.MapStorage{}}
err := s.p.Initialize()
c.Assert(err, check.IsNil)
queue.ResetQueue()
app.Provisioner = s.p
s.server, err = dtesting.NewServer("127.0.0.1:0", nil, nil)
c.Assert(err, check.IsNil)
s.p.cluster, err = cluster.New(nil, s.p.storage,
cluster.Node{Address: s.server.URL(), Metadata: map[string]string{"pool": "test-fallback"}},
)
c.Assert(err, check.IsNil)
mainDockerProvisioner = s.p
coll := s.p.collection()
defer coll.Close()
err = dbtest.ClearAllCollectionsExcept(coll.Database, []string{"users", "tokens", "teams"})
c.Assert(err, check.IsNil)
err = clearClusterStorage(s.clusterSess)
c.Assert(err, check.IsNil)
routertest.FakeRouter.Reset()
opts := provision.AddPoolOptions{Name: "test-fallback"}
err = provision.AddPool(opts)
c.Assert(err, check.IsNil)
}
示例3: TestPushImage
func (s *S) TestPushImage(c *gocheck.C) {
var request *http.Request
server, err := dtesting.NewServer("127.0.0.1:0", nil, func(r *http.Request) {
request = r
})
c.Assert(err, gocheck.IsNil)
defer server.Stop()
config.Set("docker:registry", "localhost:3030")
defer config.Unset("docker:registry")
var storage cluster.MapStorage
storage.StoreImage("localhost:3030/base", server.URL())
cmutex.Lock()
oldDockerCluster := dCluster
dCluster, _ = cluster.New(nil, &storage,
cluster.Node{Address: server.URL()})
cmutex.Unlock()
defer func() {
cmutex.Lock()
defer cmutex.Unlock()
dCluster = oldDockerCluster
}()
err = newImage("localhost:3030/base", "http://index.docker.io")
c.Assert(err, gocheck.IsNil)
err = pushImage("localhost:3030/base")
c.Assert(err, gocheck.IsNil)
c.Assert(request.URL.Path, gocheck.Matches, ".*/images/localhost:3030/base/push$")
}
示例4: TestContainerNetworkInfoNotFound
func (s *S) TestContainerNetworkInfoNotFound(c *gocheck.C) {
inspectOut := `{
"NetworkSettings": {
"IpAddress": "10.10.10.10",
"IpPrefixLen": 8,
"Gateway": "10.65.41.1",
"Ports": {}
}
}`
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if strings.Contains(r.URL.Path, "/containers/") {
w.Write([]byte(inspectOut))
}
}))
defer server.Close()
var storage cluster.MapStorage
storage.StoreContainer("c-01", server.URL)
oldCluster := dockerCluster()
var err error
dCluster, err = cluster.New(nil, &storage,
cluster.Node{Address: server.URL},
)
c.Assert(err, gocheck.IsNil)
defer func() {
dCluster = oldCluster
}()
container := container{ID: "c-01"}
info, err := container.networkInfo()
c.Assert(info.IP, gocheck.Equals, "10.10.10.10")
c.Assert(info.SSHHostPort, gocheck.Equals, "")
c.Assert(info.HTTPHostPort, gocheck.Equals, "")
c.Assert(err, gocheck.NotNil)
c.Assert(err.Error(), gocheck.Equals, "Container port 8888 is not mapped to any host port")
}
示例5: TestContainerNetworkInfo
func (s *S) TestContainerNetworkInfo(c *check.C) {
inspectOut := `{
"NetworkSettings": {
"IpAddress": "10.10.10.10",
"IpPrefixLen": 8,
"Gateway": "10.65.41.1",
"Ports": {}
}
}`
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if strings.Contains(r.URL.Path, "/containers/") {
w.Write([]byte(inspectOut))
}
}))
defer server.Close()
var storage cluster.MapStorage
storage.StoreContainer("c-01", server.URL)
p, err := newFakeDockerProvisioner(server.URL)
c.Assert(err, check.IsNil)
p.cluster, err = cluster.New(nil, &storage,
cluster.Node{Address: server.URL},
)
c.Assert(err, check.IsNil)
container := Container{ID: "c-01"}
info, err := container.NetworkInfo(p)
c.Assert(err, check.IsNil)
c.Assert(info.IP, check.Equals, "10.10.10.10")
c.Assert(info.HTTPHostPort, check.Equals, "")
}
示例6: TestGetHostAddr
func (s *S) TestGetHostAddr(c *gocheck.C) {
cmutex.Lock()
old := dCluster
var err error
dCluster, err = cluster.New(nil, &mapStorage{},
cluster.Node{ID: "server0", Address: "http://localhost:8081"},
cluster.Node{ID: "server20", Address: "http://localhost:3234"},
cluster.Node{ID: "server21", Address: "http://10.10.10.10:4243"},
)
c.Assert(err, gocheck.IsNil)
cmutex.Unlock()
defer func() {
cmutex.Lock()
dCluster = old
cmutex.Unlock()
}()
var tests = []struct {
input string
expected string
}{
{"server0", "localhost"},
{"server20", "localhost"},
{"server21", "10.10.10.10"},
{"server33", ""},
}
for _, t := range tests {
c.Check(getHostAddr(t.input), gocheck.Equals, t.expected)
}
}
示例7: TestAutoScaleRunHandler
func (s *HandlersSuite) TestAutoScaleRunHandler(c *check.C) {
mainDockerProvisioner.cluster, _ = cluster.New(&segregatedScheduler{}, &cluster.MapStorage{},
cluster.Node{Address: "localhost:1999", Metadata: map[string]string{
"pool": "pool1",
}},
)
config.Set("docker:auto-scale:group-by-metadata", "pool")
config.Set("docker:auto-scale:max-container-count", 2)
defer config.Unset("docker:auto-scale:max-container-count")
defer config.Unset("docker:auto-scale:group-by-metadata")
recorder := httptest.NewRecorder()
request, err := http.NewRequest("POST", "/docker/autoscale/run", nil)
c.Assert(err, check.IsNil)
request.Header.Set("Authorization", "bearer "+s.token.GetValue())
server := api.RunServer(true)
server.ServeHTTP(recorder, request)
c.Assert(recorder.Code, check.Equals, http.StatusOK)
body := recorder.Body.String()
parts := strings.Split(body, "\n")
c.Assert(parts, check.DeepEquals, []string{
`{"Message":"[node autoscale] running scaler *docker.countScaler for \"pool\": \"pool1\"\n"}`,
`{"Message":"[node autoscale] nothing to do for \"pool\": \"pool1\"\n"}`,
``,
})
}
示例8: TestListContainersByHostHandler
func (s *HandlersSuite) TestListContainersByHostHandler(c *check.C) {
var result []container
var err error
mainDockerProvisioner.cluster, err = cluster.New(&segregatedScheduler{}, &cluster.MapStorage{})
c.Assert(err, check.IsNil)
coll := mainDockerProvisioner.collection()
defer coll.Close()
err = coll.Insert(container{ID: "blabla", Type: "python", HostAddr: "http://cittavld1182.globoi.com"})
c.Assert(err, check.IsNil)
defer coll.Remove(bson.M{"id": "blabla"})
err = coll.Insert(container{ID: "bleble", Type: "java", HostAddr: "http://cittavld1182.globoi.com"})
c.Assert(err, check.IsNil)
defer coll.Remove(bson.M{"id": "bleble"})
req, err := http.NewRequest("GET", "/node/cittavld1182.globoi.com/containers?:address=http://cittavld1182.globoi.com", nil)
rec := httptest.NewRecorder()
err = listContainersHandler(rec, req, nil)
c.Assert(err, check.IsNil)
body, err := ioutil.ReadAll(rec.Body)
c.Assert(err, check.IsNil)
err = json.Unmarshal(body, &result)
c.Assert(err, check.IsNil)
c.Assert(result[0].ID, check.DeepEquals, "blabla")
c.Assert(result[0].Type, check.DeepEquals, "python")
c.Assert(result[0].HostAddr, check.DeepEquals, "http://cittavld1182.globoi.com")
c.Assert(result[1].ID, check.DeepEquals, "bleble")
c.Assert(result[1].Type, check.DeepEquals, "java")
c.Assert(result[1].HostAddr, check.DeepEquals, "http://cittavld1182.globoi.com")
}
示例9: TestCollectStatusFixContainer
func (s *S) TestCollectStatusFixContainer(c *gocheck.C) {
coll := collection()
defer coll.Close()
err := coll.Insert(
container{
ID: "9930c24f1c4x",
AppName: "makea",
Type: "python",
Status: provision.StatusStarted.String(),
IP: "127.0.0.4",
HostPort: "9025",
HostAddr: "127.0.0.1",
},
)
c.Assert(err, gocheck.IsNil)
defer coll.RemoveAll(bson.M{"appname": "makea"})
cleanup, server := startDocker()
defer cleanup()
var storage mapStorage
storage.StoreContainer("9930c24f1c4x", "server0")
cmutex.Lock()
dCluster, err = cluster.New(nil, &storage,
cluster.Node{ID: "server0", Address: server.URL},
)
cmutex.Unlock()
c.Assert(err, gocheck.IsNil)
var p dockerProvisioner
err = p.CollectStatus()
c.Assert(err, gocheck.IsNil)
cont, err := getContainer("9930c24f1c4x")
c.Assert(err, gocheck.IsNil)
c.Assert(cont.IP, gocheck.Equals, "127.0.0.9")
c.Assert(cont.HostPort, gocheck.Equals, "9999")
}
示例10: TestHealerHealNodeWithoutIaaS
func (s *S) TestHealerHealNodeWithoutIaaS(c *check.C) {
node1, err := testing.NewServer("127.0.0.1:0", nil, nil)
c.Assert(err, check.IsNil)
cluster, err := cluster.New(nil, &cluster.MapStorage{},
cluster.Node{Address: node1.URL()},
)
c.Assert(err, check.IsNil)
var p dockerProvisioner
err = p.Initialize()
c.Assert(err, check.IsNil)
p.cluster = cluster
healer := nodeHealer{
locks: make(map[string]*sync.Mutex),
provisioner: &p,
disabledTime: 0,
failuresBeforeHealing: 1,
waitTimeNewMachine: 1 * time.Second,
}
nodes, err := p.getCluster().UnfilteredNodes()
c.Assert(err, check.IsNil)
c.Assert(nodes, check.HasLen, 1)
created, err := healer.healNode(&nodes[0])
c.Assert(err, check.ErrorMatches, ".*error creating new machine.*")
c.Assert(created.Address, check.Equals, "")
nodes, err = p.getCluster().UnfilteredNodes()
c.Assert(err, check.IsNil)
c.Assert(nodes, check.HasLen, 1)
c.Assert(urlPort(nodes[0].Address), check.Equals, urlPort(node1.URL()))
c.Assert(urlToHost(nodes[0].Address), check.Equals, "127.0.0.1")
}
示例11: startMultipleServersClusterSeggregated
func (s *S) startMultipleServersClusterSeggregated() (*dockerProvisioner, error) {
var err error
s.extraServer, err = dtesting.NewServer("localhost:0", nil, nil)
if err != nil {
return nil, err
}
otherURL := strings.Replace(s.extraServer.URL(), "127.0.0.1", "localhost", 1)
var p dockerProvisioner
err = p.Initialize()
if err != nil {
return nil, err
}
opts := provision.AddPoolOptions{Name: "pool1", Public: true}
err = provision.AddPool(opts)
if err != nil {
return nil, err
}
opts = provision.AddPoolOptions{Name: "pool2", Public: true}
err = provision.AddPool(opts)
if err != nil {
return nil, err
}
p.storage = &cluster.MapStorage{}
sched := segregatedScheduler{provisioner: &p}
p.cluster, err = cluster.New(&sched, p.storage,
cluster.Node{Address: s.server.URL(), Metadata: map[string]string{"pool": "pool1"}},
cluster.Node{Address: otherURL, Metadata: map[string]string{"pool": "pool2"}},
)
if err != nil {
return nil, err
}
return &p, nil
}
示例12: SetUpTest
func (s *S) SetUpTest(c *check.C) {
config.Set("docker:api-timeout", 2)
iaas.ResetAll()
repositorytest.Reset()
queue.ResetQueue()
repository.Manager().CreateUser(s.user.Email)
s.p = &dockerProvisioner{storage: &cluster.MapStorage{}}
err := s.p.Initialize()
c.Assert(err, check.IsNil)
queue.ResetQueue()
s.server, err = dtesting.NewServer("127.0.0.1:0", nil, nil)
c.Assert(err, check.IsNil)
s.p.cluster, err = cluster.New(nil, s.p.storage,
cluster.Node{Address: s.server.URL(), Metadata: map[string]string{"pool": "test-default"}},
)
c.Assert(err, check.IsNil)
mainDockerProvisioner = s.p
err = dbtest.ClearAllCollectionsExcept(s.storage.Apps().Database, []string{"users", "tokens", "teams"})
c.Assert(err, check.IsNil)
err = clearClusterStorage(s.clusterSess)
c.Assert(err, check.IsNil)
routertest.FakeRouter.Reset()
opts := provision.AddPoolOptions{Name: "test-default", Default: true}
err = provision.AddPool(opts)
c.Assert(err, check.IsNil)
s.storage.Tokens().Remove(bson.M{"appname": bson.M{"$ne": ""}})
s.logBuf = safe.NewBuffer(nil)
log.SetLogger(log.NewWriterLogger(s.logBuf, true))
s.token = createTokenForUser(s.user, "*", string(permission.CtxGlobal), "", c)
}
示例13: TestGetHostAddrWithSegregatedScheduler
func (s *S) TestGetHostAddrWithSegregatedScheduler(c *gocheck.C) {
conn, err := db.Conn()
c.Assert(err, gocheck.IsNil)
defer conn.Close()
coll := conn.Collection(schedulerCollection)
err = coll.Insert(
node{ID: "server0", Address: "http://remotehost:8080", Teams: []string{"tsuru"}},
node{ID: "server20", Address: "http://remotehost:8081", Teams: []string{"tsuru"}},
node{ID: "server21", Address: "http://10.10.10.1:8082", Teams: []string{"tsuru"}},
)
defer coll.RemoveAll(bson.M{"_id": bson.M{"$in": []string{"server0", "server1", "server2"}}})
cmutex.Lock()
old := dCluster
dCluster, err = cluster.New(segScheduler, &mapStorage{})
c.Assert(err, gocheck.IsNil)
cmutex.Unlock()
defer func() {
cmutex.Lock()
dCluster = old
cmutex.Unlock()
}()
var tests = []struct {
input string
expected string
}{
{"server0", "remotehost"},
{"server20", "remotehost"},
{"server21", "10.10.10.1"},
{"server33", ""},
}
for _, t := range tests {
c.Check(getHostAddr(t.input), gocheck.Equals, t.expected)
}
}
示例14: TestFixContainersEmptyPortDoesNothing
func (s *S) TestFixContainersEmptyPortDoesNothing(c *check.C) {
cleanup, server, p := startDocker("")
defer cleanup()
coll := p.collection()
defer coll.Close()
err := coll.Insert(
container{
ID: "9930c24f1c4x",
AppName: "makea",
Type: "python",
Status: provision.StatusStarted.String(),
IP: "",
HostPort: "",
HostAddr: "127.0.0.1",
},
)
c.Assert(err, check.IsNil)
defer coll.RemoveAll(bson.M{"appname": "makea"})
var storage cluster.MapStorage
storage.StoreContainer("9930c24f1c4x", server.URL)
p.cluster, err = cluster.New(nil, &storage,
cluster.Node{Address: server.URL},
)
c.Assert(err, check.IsNil)
err = p.fixContainers()
c.Assert(err, check.IsNil)
cont, err := p.getContainer("9930c24f1c4x")
c.Assert(err, check.IsNil)
c.Assert(cont.IP, check.Equals, "")
c.Assert(cont.HostPort, check.Equals, "")
}
示例15: TestCluster
func (s *S) TestCluster(c *check.C) {
var p FakeDockerProvisioner
cluster, err := cluster.New(nil, &cluster.MapStorage{})
c.Assert(err, check.IsNil)
p.cluster = cluster
c.Assert(p.Cluster(), check.Equals, cluster)
}