本文整理匯總了Golang中github.com/docker/swarmkit/manager/state/store.CreateNode函數的典型用法代碼示例。如果您正苦於以下問題:Golang CreateNode函數的具體用法?Golang CreateNode怎麽用?Golang CreateNode使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了CreateNode函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestSchedulerNoReadyNodes
func TestSchedulerNoReadyNodes(t *testing.T) {
ctx := context.Background()
initialTask := &api.Task{
ID: "id1",
DesiredState: api.TaskStateRunning,
ServiceAnnotations: api.Annotations{
Name: "name1",
},
Status: api.TaskStatus{
State: api.TaskStatePending,
},
}
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
defer s.Close()
err := s.Update(func(tx store.Tx) error {
// Add initial task
assert.NoError(t, store.CreateTask(tx, initialTask))
return nil
})
assert.NoError(t, err)
scheduler := New(s)
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancel()
go func() {
assert.NoError(t, scheduler.Run(ctx))
}()
defer scheduler.Stop()
failure := watchAssignmentFailure(t, watch)
assert.Equal(t, "no suitable node", failure.Status.Message)
err = s.Update(func(tx store.Tx) error {
// Create a ready node. The task should get assigned to this
// node.
node := &api.Node{
ID: "newnode",
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "newnode",
},
},
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
}
assert.NoError(t, store.CreateNode(tx, node))
return nil
})
assert.NoError(t, err)
assignment := watchAssignment(t, watch)
assert.Equal(t, "newnode", assignment.NodeID)
}
示例2: createNode
func createNode(t *testing.T, ts *testServer, id string, role api.NodeRole, membership api.NodeSpec_Membership) *api.Node {
node := &api.Node{
ID: id,
Spec: api.NodeSpec{
Role: role,
Membership: membership,
},
}
err := ts.Store.Update(func(tx store.Tx) error {
return store.CreateNode(tx, node)
})
assert.NoError(t, err)
return node
}
示例3: TestSchedulerPluginConstraint
//.........這裏部分代碼省略.........
},
},
},
},
Spec: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{
Mounts: []api.Mount{
{
Source: "testVol1",
Target: "/foo",
Type: api.MountTypeVolume,
VolumeOptions: volumeOptionsDriver("plugin1"),
},
},
},
},
},
ServiceAnnotations: api.Annotations{
Name: "task2",
},
Status: api.TaskStatus{
State: api.TaskStatePending,
},
}
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
defer s.Close()
// Add initial node and task
err := s.Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateTask(tx, t1))
assert.NoError(t, store.CreateNode(tx, n1))
return nil
})
assert.NoError(t, err)
scheduler := New(s)
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancel()
go func() {
assert.NoError(t, scheduler.Run(ctx))
}()
defer scheduler.Stop()
// t1 should get assigned
assignment := watchAssignment(t, watch)
assert.Equal(t, assignment.NodeID, "node1_ID")
// Create t2; it should stay in the pending state because there is
// no node that with volume plugin `plugin2`
err = s.Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateTask(tx, t2))
return nil
})
assert.NoError(t, err)
time.Sleep(100 * time.Millisecond)
s.View(func(tx store.ReadTx) {
task := store.GetTask(tx, "task2_ID")
if task.Status.State >= api.TaskStateAssigned {
t.Fatalf("task 'task2_ID' should not have been assigned to node %v", task.NodeID)
}
示例4: TestPreassignedTasks
func TestPreassignedTasks(t *testing.T) {
ctx := context.Background()
initialNodeSet := []*api.Node{
{
ID: "node1",
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "name1",
},
},
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
},
{
ID: "node2",
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "name2",
},
},
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
},
}
initialTaskSet := []*api.Task{
{
ID: "task1",
DesiredState: api.TaskStateRunning,
ServiceAnnotations: api.Annotations{
Name: "name1",
},
Status: api.TaskStatus{
State: api.TaskStatePending,
},
},
{
ID: "task2",
DesiredState: api.TaskStateRunning,
ServiceAnnotations: api.Annotations{
Name: "name2",
},
Status: api.TaskStatus{
State: api.TaskStatePending,
},
NodeID: initialNodeSet[0].ID,
},
{
ID: "task3",
DesiredState: api.TaskStateRunning,
ServiceAnnotations: api.Annotations{
Name: "name2",
},
Status: api.TaskStatus{
State: api.TaskStatePending,
},
NodeID: initialNodeSet[0].ID,
},
}
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
defer s.Close()
err := s.Update(func(tx store.Tx) error {
// Prepoulate nodes
for _, n := range initialNodeSet {
assert.NoError(t, store.CreateNode(tx, n))
}
// Prepopulate tasks
for _, task := range initialTaskSet {
assert.NoError(t, store.CreateTask(tx, task))
}
return nil
})
assert.NoError(t, err)
scheduler := New(s)
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancel()
go func() {
assert.NoError(t, scheduler.Run(ctx))
}()
//preassigned tasks would be processed first
assignment1 := watchAssignment(t, watch)
// task2 and task3 are preassigned to node1
assert.Equal(t, assignment1.NodeID, "node1")
assert.Regexp(t, assignment1.ID, "(task2|task3)")
assignment2 := watchAssignment(t, watch)
if assignment1.ID == "task2" {
assert.Equal(t, "task3", assignment2.ID)
} else {
//.........這裏部分代碼省略.........
示例5: TestSchedulerResourceConstraint
func TestSchedulerResourceConstraint(t *testing.T) {
ctx := context.Background()
// Create a ready node without enough memory to run the task.
underprovisionedNode := &api.Node{
ID: "underprovisioned",
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "underprovisioned",
},
},
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
Description: &api.NodeDescription{
Resources: &api.Resources{
NanoCPUs: 1e9,
MemoryBytes: 1e9,
},
},
}
initialTask := &api.Task{
ID: "id1",
Spec: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{},
},
Resources: &api.ResourceRequirements{
Reservations: &api.Resources{
MemoryBytes: 2e9,
},
},
},
ServiceAnnotations: api.Annotations{
Name: "name1",
},
Status: api.TaskStatus{
State: api.TaskStateAllocated,
},
}
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
err := s.Update(func(tx store.Tx) error {
// Add initial node and task
assert.NoError(t, store.CreateTask(tx, initialTask))
assert.NoError(t, store.CreateNode(tx, underprovisionedNode))
return nil
})
assert.NoError(t, err)
scheduler := New(s)
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancel()
go func() {
assert.NoError(t, scheduler.Run(ctx))
}()
defer scheduler.Stop()
err = s.Update(func(tx store.Tx) error {
// Create a node with enough memory. The task should get
// assigned to this node.
node := &api.Node{
ID: "bignode",
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "bignode",
},
},
Description: &api.NodeDescription{
Resources: &api.Resources{
NanoCPUs: 4e9,
MemoryBytes: 8e9,
},
},
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
}
assert.NoError(t, store.CreateNode(tx, node))
return nil
})
assert.NoError(t, err)
assignment := watchAssignment(t, watch)
assert.Equal(t, "bignode", assignment.NodeID)
}
示例6: becomeLeader
// becomeLeader starts the subsystems that are run on the leader.
func (m *Manager) becomeLeader(ctx context.Context) {
s := m.RaftNode.MemoryStore()
rootCA := m.config.SecurityConfig.RootCA()
nodeID := m.config.SecurityConfig.ClientTLSCreds.NodeID()
raftCfg := raft.DefaultRaftConfig()
raftCfg.ElectionTick = uint32(m.RaftNode.Config.ElectionTick)
raftCfg.HeartbeatTick = uint32(m.RaftNode.Config.HeartbeatTick)
clusterID := m.config.SecurityConfig.ClientTLSCreds.Organization()
initialCAConfig := ca.DefaultCAConfig()
initialCAConfig.ExternalCAs = m.config.ExternalCAs
s.Update(func(tx store.Tx) error {
// Add a default cluster object to the
// store. Don't check the error because
// we expect this to fail unless this
// is a brand new cluster.
store.CreateCluster(tx, defaultClusterObject(clusterID, initialCAConfig, raftCfg, rootCA))
// Add Node entry for ourself, if one
// doesn't exist already.
store.CreateNode(tx, managerNode(nodeID))
return nil
})
// Attempt to rotate the key-encrypting-key of the root CA key-material
err := m.rotateRootCAKEK(ctx, clusterID)
if err != nil {
log.G(ctx).WithError(err).Error("root key-encrypting-key rotation failed")
}
m.replicatedOrchestrator = orchestrator.NewReplicatedOrchestrator(s)
m.globalOrchestrator = orchestrator.NewGlobalOrchestrator(s)
m.taskReaper = orchestrator.NewTaskReaper(s)
m.scheduler = scheduler.New(s)
m.keyManager = keymanager.New(s, keymanager.DefaultConfig())
// TODO(stevvooe): Allocate a context that can be used to
// shutdown underlying manager processes when leadership is
// lost.
m.allocator, err = allocator.New(s)
if err != nil {
log.G(ctx).WithError(err).Error("failed to create allocator")
// TODO(stevvooe): It doesn't seem correct here to fail
// creating the allocator but then use it anyway.
}
if m.keyManager != nil {
go func(keyManager *keymanager.KeyManager) {
if err := keyManager.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("keymanager failed with an error")
}
}(m.keyManager)
}
go func(d *dispatcher.Dispatcher) {
if err := d.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("Dispatcher exited with an error")
}
}(m.Dispatcher)
go func(server *ca.Server) {
if err := server.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("CA signer exited with an error")
}
}(m.caserver)
// Start all sub-components in separate goroutines.
// TODO(aluzzardi): This should have some kind of error handling so that
// any component that goes down would bring the entire manager down.
if m.allocator != nil {
go func(allocator *allocator.Allocator) {
if err := allocator.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("allocator exited with an error")
}
}(m.allocator)
}
go func(scheduler *scheduler.Scheduler) {
if err := scheduler.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("scheduler exited with an error")
}
}(m.scheduler)
go func(taskReaper *orchestrator.TaskReaper) {
taskReaper.Run()
}(m.taskReaper)
go func(orchestrator *orchestrator.ReplicatedOrchestrator) {
if err := orchestrator.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("replicated orchestrator exited with an error")
}
}(m.replicatedOrchestrator)
go func(globalOrchestrator *orchestrator.GlobalOrchestrator) {
if err := globalOrchestrator.Run(ctx); err != nil {
//.........這裏部分代碼省略.........
示例7: TestUpdateNodeDemote
func TestUpdateNodeDemote(t *testing.T) {
tc := cautils.NewTestCA(nil, cautils.AcceptancePolicy(true, true, ""))
ts := newTestServer(t)
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
defer raftutils.TeardownCluster(t, nodes)
// Assign one of the raft node to the test server
ts.Server.raft = nodes[1].Node
ts.Server.store = nodes[1].MemoryStore()
// Create a node object for each of the managers
assert.NoError(t, nodes[1].MemoryStore().Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateNode(tx, &api.Node{
ID: nodes[1].SecurityConfig.ClientTLSCreds.NodeID(),
Spec: api.NodeSpec{
Role: api.NodeRoleManager,
Membership: api.NodeMembershipAccepted,
},
}))
assert.NoError(t, store.CreateNode(tx, &api.Node{
ID: nodes[2].SecurityConfig.ClientTLSCreds.NodeID(),
Spec: api.NodeSpec{
Role: api.NodeRoleManager,
Membership: api.NodeMembershipAccepted,
},
}))
assert.NoError(t, store.CreateNode(tx, &api.Node{
ID: nodes[3].SecurityConfig.ClientTLSCreds.NodeID(),
Spec: api.NodeSpec{
Role: api.NodeRoleManager,
Membership: api.NodeMembershipAccepted,
},
}))
return nil
}))
// Stop Node 3 (1 node out of 3)
nodes[3].Server.Stop()
nodes[3].Shutdown()
// Node 3 should be listed as Unreachable
assert.NoError(t, raftutils.PollFunc(clockSource, func() error {
members := nodes[1].GetMemberlist()
if len(members) != 3 {
return fmt.Errorf("expected 3 nodes, got %d", len(members))
}
if members[nodes[3].Config.ID].Status.Reachability == api.RaftMemberStatus_REACHABLE {
return fmt.Errorf("expected node 3 to be unreachable")
}
return nil
}))
// Try to demote Node 2, this should fail because of the quorum safeguard
r, err := ts.Client.GetNode(context.Background(), &api.GetNodeRequest{NodeID: nodes[2].SecurityConfig.ClientTLSCreds.NodeID()})
assert.NoError(t, err)
spec := r.Node.Spec.Copy()
spec.Role = api.NodeRoleWorker
version := &r.Node.Meta.Version
_, err = ts.Client.UpdateNode(context.Background(), &api.UpdateNodeRequest{
NodeID: nodes[2].SecurityConfig.ClientTLSCreds.NodeID(),
Spec: spec,
NodeVersion: version,
})
assert.Error(t, err)
assert.Equal(t, codes.FailedPrecondition, grpc.Code(err))
// Restart Node 3
nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false)
raftutils.WaitForCluster(t, clockSource, nodes)
// Node 3 should be listed as Reachable
assert.NoError(t, raftutils.PollFunc(clockSource, func() error {
members := nodes[1].GetMemberlist()
if len(members) != 3 {
return fmt.Errorf("expected 3 nodes, got %d", len(members))
}
if members[nodes[3].Config.ID].Status.Reachability == api.RaftMemberStatus_UNREACHABLE {
return fmt.Errorf("expected node 3 to be reachable")
}
return nil
}))
// Try to demote Node 3, this should succeed
r, err = ts.Client.GetNode(context.Background(), &api.GetNodeRequest{NodeID: nodes[3].SecurityConfig.ClientTLSCreds.NodeID()})
assert.NoError(t, err)
spec = r.Node.Spec.Copy()
spec.Role = api.NodeRoleWorker
version = &r.Node.Meta.Version
_, err = ts.Client.UpdateNode(context.Background(), &api.UpdateNodeRequest{
NodeID: nodes[3].SecurityConfig.ClientTLSCreds.NodeID(),
Spec: spec,
NodeVersion: version,
})
assert.NoError(t, err)
newCluster := map[uint64]*raftutils.TestNode{
1: nodes[1],
2: nodes[2],
}
//.........這裏部分代碼省略.........
示例8: TestHA
func TestHA(t *testing.T) {
ctx := context.Background()
initialNodeSet := []*api.Node{
{
ID: "id1",
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
},
{
ID: "id2",
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
},
{
ID: "id3",
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
},
{
ID: "id4",
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
},
{
ID: "id5",
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
},
}
taskTemplate1 := &api.Task{
DesiredState: api.TaskStateRunning,
ServiceID: "service1",
Spec: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{
Image: "v:1",
},
},
},
Status: api.TaskStatus{
State: api.TaskStatePending,
},
}
taskTemplate2 := &api.Task{
DesiredState: api.TaskStateRunning,
ServiceID: "service2",
Spec: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{
Image: "v:2",
},
},
},
Status: api.TaskStatus{
State: api.TaskStatePending,
},
}
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
defer s.Close()
t1Instances := 18
err := s.Update(func(tx store.Tx) error {
// Prepoulate nodes
for _, n := range initialNodeSet {
assert.NoError(t, store.CreateNode(tx, n))
}
// Prepopulate tasks from template 1
for i := 0; i != t1Instances; i++ {
taskTemplate1.ID = fmt.Sprintf("t1id%d", i)
assert.NoError(t, store.CreateTask(tx, taskTemplate1))
}
return nil
})
assert.NoError(t, err)
scheduler := New(s)
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancel()
go func() {
assert.NoError(t, scheduler.Run(ctx))
}()
defer scheduler.Stop()
t1Assignments := make(map[string]int)
for i := 0; i != t1Instances; i++ {
assignment := watchAssignment(t, watch)
if !strings.HasPrefix(assignment.ID, "t1") {
//.........這裏部分代碼省略.........
示例9: IssueNodeCertificate
// IssueNodeCertificate is responsible for gatekeeping both certificate requests from new nodes in the swarm,
// and authorizing certificate renewals.
// If a node presented a valid certificate, the corresponding certificate is set in a RENEW state.
// If a node failed to present a valid certificate, we check for a valid join token and set the
// role accordingly. A new random node ID is generated, and the corresponding node entry is created.
// IssueNodeCertificate is the only place where new node entries to raft should be created.
func (s *Server) IssueNodeCertificate(ctx context.Context, request *api.IssueNodeCertificateRequest) (*api.IssueNodeCertificateResponse, error) {
// First, let's see if the remote node is presenting a non-empty CSR
if len(request.CSR) == 0 {
return nil, grpc.Errorf(codes.InvalidArgument, codes.InvalidArgument.String())
}
if _, err := s.isRunningLocked(); err != nil {
return nil, err
}
var (
blacklistedCerts map[string]*api.BlacklistedCertificate
clusters []*api.Cluster
err error
)
s.store.View(func(readTx store.ReadTx) {
clusters, err = store.FindClusters(readTx, store.ByName("default"))
})
// Not having a cluster object yet means we can't check
// the blacklist.
if err == nil && len(clusters) == 1 {
blacklistedCerts = clusters[0].BlacklistedCertificates
}
// Renewing the cert with a local (unix socket) is always valid.
localNodeInfo := ctx.Value(LocalRequestKey)
if localNodeInfo != nil {
nodeInfo, ok := localNodeInfo.(RemoteNodeInfo)
if ok && nodeInfo.NodeID != "" {
return s.issueRenewCertificate(ctx, nodeInfo.NodeID, request.CSR)
}
}
// If the remote node is a worker (either forwarded by a manager, or calling directly),
// issue a renew worker certificate entry with the correct ID
nodeID, err := AuthorizeForwardedRoleAndOrg(ctx, []string{WorkerRole}, []string{ManagerRole}, s.securityConfig.ClientTLSCreds.Organization(), blacklistedCerts)
if err == nil {
return s.issueRenewCertificate(ctx, nodeID, request.CSR)
}
// If the remote node is a manager (either forwarded by another manager, or calling directly),
// issue a renew certificate entry with the correct ID
nodeID, err = AuthorizeForwardedRoleAndOrg(ctx, []string{ManagerRole}, []string{ManagerRole}, s.securityConfig.ClientTLSCreds.Organization(), blacklistedCerts)
if err == nil {
return s.issueRenewCertificate(ctx, nodeID, request.CSR)
}
// The remote node didn't successfully present a valid MTLS certificate, let's issue a
// certificate with a new random ID
role := api.NodeRole(-1)
s.mu.Lock()
if subtle.ConstantTimeCompare([]byte(s.joinTokens.Manager), []byte(request.Token)) == 1 {
role = api.NodeRoleManager
} else if subtle.ConstantTimeCompare([]byte(s.joinTokens.Worker), []byte(request.Token)) == 1 {
role = api.NodeRoleWorker
}
s.mu.Unlock()
if role < 0 {
return nil, grpc.Errorf(codes.InvalidArgument, "A valid join token is necessary to join this cluster")
}
// Max number of collisions of ID or CN to tolerate before giving up
maxRetries := 3
// Generate a random ID for this new node
for i := 0; ; i++ {
nodeID = identity.NewID()
// Create a new node
err := s.store.Update(func(tx store.Tx) error {
node := &api.Node{
Role: role,
ID: nodeID,
Certificate: api.Certificate{
CSR: request.CSR,
CN: nodeID,
Role: role,
Status: api.IssuanceStatus{
State: api.IssuanceStatePending,
},
},
Spec: api.NodeSpec{
DesiredRole: role,
Membership: api.NodeMembershipAccepted,
Availability: request.Availability,
},
}
return store.CreateNode(tx, node)
})
//.........這裏部分代碼省略.........
示例10: testUpdateNodeDemote
func testUpdateNodeDemote(leader bool, t *testing.T) {
tc := cautils.NewTestCA(nil)
defer tc.Stop()
ts := newTestServer(t)
defer ts.Stop()
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
defer raftutils.TeardownCluster(t, nodes)
// Assign one of the raft node to the test server
ts.Server.raft = nodes[1].Node
ts.Server.store = nodes[1].MemoryStore()
// Create a node object for each of the managers
assert.NoError(t, nodes[1].MemoryStore().Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateNode(tx, &api.Node{
ID: nodes[1].SecurityConfig.ClientTLSCreds.NodeID(),
Spec: api.NodeSpec{
Role: api.NodeRoleManager,
Membership: api.NodeMembershipAccepted,
},
}))
assert.NoError(t, store.CreateNode(tx, &api.Node{
ID: nodes[2].SecurityConfig.ClientTLSCreds.NodeID(),
Spec: api.NodeSpec{
Role: api.NodeRoleManager,
Membership: api.NodeMembershipAccepted,
},
}))
assert.NoError(t, store.CreateNode(tx, &api.Node{
ID: nodes[3].SecurityConfig.ClientTLSCreds.NodeID(),
Spec: api.NodeSpec{
Role: api.NodeRoleManager,
Membership: api.NodeMembershipAccepted,
},
}))
return nil
}))
// Stop Node 3 (1 node out of 3)
nodes[3].Server.Stop()
nodes[3].ShutdownRaft()
// Node 3 should be listed as Unreachable
assert.NoError(t, raftutils.PollFunc(clockSource, func() error {
members := nodes[1].GetMemberlist()
if len(members) != 3 {
return fmt.Errorf("expected 3 nodes, got %d", len(members))
}
if members[nodes[3].Config.ID].Status.Reachability == api.RaftMemberStatus_REACHABLE {
return fmt.Errorf("expected node 3 to be unreachable")
}
return nil
}))
// Try to demote Node 2, this should fail because of the quorum safeguard
r, err := ts.Client.GetNode(context.Background(), &api.GetNodeRequest{NodeID: nodes[2].SecurityConfig.ClientTLSCreds.NodeID()})
assert.NoError(t, err)
spec := r.Node.Spec.Copy()
spec.Role = api.NodeRoleWorker
version := &r.Node.Meta.Version
_, err = ts.Client.UpdateNode(context.Background(), &api.UpdateNodeRequest{
NodeID: nodes[2].SecurityConfig.ClientTLSCreds.NodeID(),
Spec: spec,
NodeVersion: version,
})
assert.Error(t, err)
assert.Equal(t, codes.FailedPrecondition, grpc.Code(err))
// Restart Node 3
nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false)
raftutils.WaitForCluster(t, clockSource, nodes)
// Node 3 should be listed as Reachable
assert.NoError(t, raftutils.PollFunc(clockSource, func() error {
members := nodes[1].GetMemberlist()
if len(members) != 3 {
return fmt.Errorf("expected 3 nodes, got %d", len(members))
}
if members[nodes[3].Config.ID].Status.Reachability == api.RaftMemberStatus_UNREACHABLE {
return fmt.Errorf("expected node 3 to be reachable")
}
return nil
}))
// Try to demote Node 3, this should succeed
r, err = ts.Client.GetNode(context.Background(), &api.GetNodeRequest{NodeID: nodes[3].SecurityConfig.ClientTLSCreds.NodeID()})
assert.NoError(t, err)
spec = r.Node.Spec.Copy()
spec.Role = api.NodeRoleWorker
version = &r.Node.Meta.Version
_, err = ts.Client.UpdateNode(context.Background(), &api.UpdateNodeRequest{
NodeID: nodes[3].SecurityConfig.ClientTLSCreds.NodeID(),
Spec: spec,
NodeVersion: version,
})
assert.NoError(t, err)
newCluster := map[uint64]*raftutils.TestNode{
1: nodes[1],
//.........這裏部分代碼省略.........
示例11: TestUpdateNode
func TestUpdateNode(t *testing.T) {
tc := cautils.NewTestCA(nil)
defer tc.Stop()
ts := newTestServer(t)
defer ts.Stop()
nodes := make(map[uint64]*raftutils.TestNode)
nodes[1], _ = raftutils.NewInitNode(t, tc, nil)
defer raftutils.TeardownCluster(t, nodes)
nodeID := nodes[1].SecurityConfig.ClientTLSCreds.NodeID()
// Assign one of the raft node to the test server
ts.Server.raft = nodes[1].Node
ts.Server.store = nodes[1].MemoryStore()
_, err := ts.Client.UpdateNode(context.Background(), &api.UpdateNodeRequest{
NodeID: nodeID,
Spec: &api.NodeSpec{
Availability: api.NodeAvailabilityDrain,
},
NodeVersion: &api.Version{},
})
assert.Error(t, err)
assert.Equal(t, codes.NotFound, grpc.Code(err))
// Create a node object for the manager
assert.NoError(t, nodes[1].MemoryStore().Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateNode(tx, &api.Node{
ID: nodes[1].SecurityConfig.ClientTLSCreds.NodeID(),
Spec: api.NodeSpec{
Role: api.NodeRoleManager,
Membership: api.NodeMembershipAccepted,
},
}))
return nil
}))
_, err = ts.Client.UpdateNode(context.Background(), &api.UpdateNodeRequest{})
assert.Error(t, err)
assert.Equal(t, codes.InvalidArgument, grpc.Code(err))
_, err = ts.Client.UpdateNode(context.Background(), &api.UpdateNodeRequest{NodeID: "invalid", Spec: &api.NodeSpec{}, NodeVersion: &api.Version{}})
assert.Error(t, err)
assert.Equal(t, codes.NotFound, grpc.Code(err))
r, err := ts.Client.GetNode(context.Background(), &api.GetNodeRequest{NodeID: nodeID})
assert.NoError(t, err)
if !assert.NotNil(t, r) {
assert.FailNow(t, "got unexpected nil response from GetNode")
}
assert.NotNil(t, r.Node)
_, err = ts.Client.UpdateNode(context.Background(), &api.UpdateNodeRequest{NodeID: nodeID})
assert.Error(t, err)
assert.Equal(t, codes.InvalidArgument, grpc.Code(err))
spec := r.Node.Spec.Copy()
spec.Availability = api.NodeAvailabilityDrain
_, err = ts.Client.UpdateNode(context.Background(), &api.UpdateNodeRequest{
NodeID: nodeID,
Spec: spec,
})
assert.Error(t, err)
assert.Equal(t, codes.InvalidArgument, grpc.Code(err))
_, err = ts.Client.UpdateNode(context.Background(), &api.UpdateNodeRequest{
NodeID: nodeID,
Spec: spec,
NodeVersion: &r.Node.Meta.Version,
})
assert.NoError(t, err)
r, err = ts.Client.GetNode(context.Background(), &api.GetNodeRequest{NodeID: nodeID})
assert.NoError(t, err)
if !assert.NotNil(t, r) {
assert.FailNow(t, "got unexpected nil response from GetNode")
}
assert.NotNil(t, r.Node)
assert.NotNil(t, r.Node.Spec)
assert.Equal(t, api.NodeAvailabilityDrain, r.Node.Spec.Availability)
version := &r.Node.Meta.Version
_, err = ts.Client.UpdateNode(context.Background(), &api.UpdateNodeRequest{NodeID: nodeID, Spec: &r.Node.Spec, NodeVersion: version})
assert.NoError(t, err)
// Perform an update with the "old" version.
_, err = ts.Client.UpdateNode(context.Background(), &api.UpdateNodeRequest{NodeID: nodeID, Spec: &r.Node.Spec, NodeVersion: version})
assert.Error(t, err)
}
示例12: TestListManagerNodes
func TestListManagerNodes(t *testing.T) {
t.Parallel()
tc := cautils.NewTestCA(nil)
defer tc.Stop()
ts := newTestServer(t)
defer ts.Stop()
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
defer raftutils.TeardownCluster(t, nodes)
// Create a node object for each of the managers
assert.NoError(t, nodes[1].MemoryStore().Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateNode(tx, &api.Node{ID: nodes[1].SecurityConfig.ClientTLSCreds.NodeID()}))
assert.NoError(t, store.CreateNode(tx, &api.Node{ID: nodes[2].SecurityConfig.ClientTLSCreds.NodeID()}))
assert.NoError(t, store.CreateNode(tx, &api.Node{ID: nodes[3].SecurityConfig.ClientTLSCreds.NodeID()}))
return nil
}))
// Assign one of the raft node to the test server
ts.Server.raft = nodes[1].Node
ts.Server.store = nodes[1].MemoryStore()
// There should be 3 reachable managers listed
r, err := ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{})
assert.NoError(t, err)
assert.NotNil(t, r)
managers := getMap(t, r.Nodes)
assert.Len(t, ts.Server.raft.GetMemberlist(), 3)
assert.Len(t, r.Nodes, 3)
// Node 1 should be the leader
for i := 1; i <= 3; i++ {
if i == 1 {
assert.True(t, managers[nodes[uint64(i)].Config.ID].Leader)
continue
}
assert.False(t, managers[nodes[uint64(i)].Config.ID].Leader)
}
// All nodes should be reachable
for i := 1; i <= 3; i++ {
assert.Equal(t, api.RaftMemberStatus_REACHABLE, managers[nodes[uint64(i)].Config.ID].Reachability)
}
// Add two more nodes to the cluster
raftutils.AddRaftNode(t, clockSource, nodes, tc)
raftutils.AddRaftNode(t, clockSource, nodes, tc)
raftutils.WaitForCluster(t, clockSource, nodes)
// Add node entries for these
assert.NoError(t, nodes[1].MemoryStore().Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateNode(tx, &api.Node{ID: nodes[4].SecurityConfig.ClientTLSCreds.NodeID()}))
assert.NoError(t, store.CreateNode(tx, &api.Node{ID: nodes[5].SecurityConfig.ClientTLSCreds.NodeID()}))
return nil
}))
// There should be 5 reachable managers listed
r, err = ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{})
assert.NoError(t, err)
assert.NotNil(t, r)
managers = getMap(t, r.Nodes)
assert.Len(t, ts.Server.raft.GetMemberlist(), 5)
assert.Len(t, r.Nodes, 5)
for i := 1; i <= 5; i++ {
assert.Equal(t, api.RaftMemberStatus_REACHABLE, managers[nodes[uint64(i)].Config.ID].Reachability)
}
// Stops 2 nodes
nodes[4].Server.Stop()
nodes[4].ShutdownRaft()
nodes[5].Server.Stop()
nodes[5].ShutdownRaft()
// Node 4 and Node 5 should be listed as Unreachable
assert.NoError(t, raftutils.PollFunc(clockSource, func() error {
r, err = ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{})
if err != nil {
return err
}
managers = getMap(t, r.Nodes)
if len(r.Nodes) != 5 {
return fmt.Errorf("expected 5 nodes, got %d", len(r.Nodes))
}
if managers[nodes[4].Config.ID].Reachability == api.RaftMemberStatus_REACHABLE {
return fmt.Errorf("expected node 4 to be unreachable")
}
if managers[nodes[5].Config.ID].Reachability == api.RaftMemberStatus_REACHABLE {
return fmt.Errorf("expected node 5 to be unreachable")
}
return nil
}))
// Restart the 2 nodes
nodes[4] = raftutils.RestartNode(t, clockSource, nodes[4], false)
//.........這裏部分代碼省略.........
示例13: TestManager
//.........這裏部分代碼省略.........
// Try to have a client in a different org access this manager
opts = []grpc.DialOption{
grpc.WithTimeout(10 * time.Second),
grpc.WithTransportCredentials(agentDiffOrgSecurityConfig.ClientTLSCreds),
}
conn2, err := grpc.Dial(tcpAddr, opts...)
assert.NoError(t, err)
defer func() {
assert.NoError(t, conn2.Close())
}()
client = api.NewDispatcherClient(conn2)
_, err = client.Heartbeat(context.Background(), &api.HeartbeatRequest{})
assert.Contains(t, grpc.ErrorDesc(err), "Permission denied: unauthorized peer role: rpc error: code = 7 desc = Permission denied: remote certificate not part of organization")
// Verify that requests to the various GRPC services running on TCP
// are rejected if they don't have certs.
opts = []grpc.DialOption{
grpc.WithTimeout(10 * time.Second),
grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{InsecureSkipVerify: true})),
}
noCertConn, err := grpc.Dial(tcpAddr, opts...)
assert.NoError(t, err)
defer func() {
assert.NoError(t, noCertConn.Close())
}()
client = api.NewDispatcherClient(noCertConn)
_, err = client.Heartbeat(context.Background(), &api.HeartbeatRequest{})
assert.EqualError(t, err, "rpc error: code = 7 desc = Permission denied: unauthorized peer role: rpc error: code = 7 desc = no client certificates in request")
controlClient := api.NewControlClient(noCertConn)
_, err = controlClient.ListNodes(context.Background(), &api.ListNodesRequest{})
assert.EqualError(t, err, "rpc error: code = 7 desc = Permission denied: unauthorized peer role: rpc error: code = 7 desc = no client certificates in request")
raftClient := api.NewRaftMembershipClient(noCertConn)
_, err = raftClient.Join(context.Background(), &api.JoinRequest{})
assert.EqualError(t, err, "rpc error: code = 7 desc = Permission denied: unauthorized peer role: rpc error: code = 7 desc = no client certificates in request")
opts = []grpc.DialOption{
grpc.WithTimeout(10 * time.Second),
grpc.WithTransportCredentials(managerSecurityConfig.ClientTLSCreds),
}
controlConn, err := grpc.Dial(tcpAddr, opts...)
assert.NoError(t, err)
defer func() {
assert.NoError(t, controlConn.Close())
}()
// check that the kek is added to the config
var cluster api.Cluster
m.raftNode.MemoryStore().View(func(tx store.ReadTx) {
clusters, err := store.FindClusters(tx, store.All)
require.NoError(t, err)
require.Len(t, clusters, 1)
cluster = *clusters[0]
})
require.NotNil(t, cluster)
require.Len(t, cluster.UnlockKeys, 1)
require.Equal(t, &api.EncryptionKey{
Subsystem: ca.ManagerRole,
Key: []byte("kek"),
}, cluster.UnlockKeys[0])
// Test removal of the agent node
agentID := agentSecurityConfig.ClientTLSCreds.NodeID()
assert.NoError(t, m.raftNode.MemoryStore().Update(func(tx store.Tx) error {
return store.CreateNode(tx,
&api.Node{
ID: agentID,
Certificate: api.Certificate{
Role: api.NodeRoleWorker,
CN: agentID,
},
},
)
}))
controlClient = api.NewControlClient(controlConn)
_, err = controlClient.RemoveNode(context.Background(),
&api.RemoveNodeRequest{
NodeID: agentID,
Force: true,
},
)
assert.NoError(t, err)
client = api.NewDispatcherClient(conn)
_, err = client.Heartbeat(context.Background(), &api.HeartbeatRequest{})
assert.Contains(t, grpc.ErrorDesc(err), "removed from swarm")
m.Stop(ctx)
// After stopping we should MAY receive an error from ListenAndServe if
// all this happened before WaitForLeader completed, so don't check the
// error.
<-done
}
示例14: benchScheduler
func benchScheduler(b *testing.B, nodes, tasks int, networkConstraints bool) {
ctx := context.Background()
for iters := 0; iters < b.N; iters++ {
b.StopTimer()
s := store.NewMemoryStore(nil)
scheduler := New(s)
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
go func() {
_ = scheduler.Run(ctx)
}()
// Let the scheduler get started
runtime.Gosched()
_ = s.Update(func(tx store.Tx) error {
// Create initial nodes and tasks
for i := 0; i < nodes; i++ {
n := &api.Node{
ID: identity.NewID(),
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "name" + strconv.Itoa(i),
Labels: make(map[string]string),
},
},
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
Description: &api.NodeDescription{
Engine: &api.EngineDescription{},
},
}
// Give every third node a special network
if i%3 == 0 {
n.Description.Engine.Plugins = []api.PluginDescription{
{
Name: "network",
Type: "Network",
},
}
}
err := store.CreateNode(tx, n)
if err != nil {
panic(err)
}
}
for i := 0; i < tasks; i++ {
id := "task" + strconv.Itoa(i)
t := &api.Task{
ID: id,
DesiredState: api.TaskStateRunning,
ServiceAnnotations: api.Annotations{
Name: id,
},
Status: api.TaskStatus{
State: api.TaskStatePending,
},
}
if networkConstraints {
t.Networks = []*api.NetworkAttachment{
{
Network: &api.Network{
DriverState: &api.Driver{
Name: "network",
},
},
},
}
}
err := store.CreateTask(tx, t)
if err != nil {
panic(err)
}
}
b.StartTimer()
return nil
})
for i := 0; i != tasks; i++ {
<-watch
}
scheduler.Stop()
cancel()
s.Close()
}
}
示例15: TestScheduler
func TestScheduler(t *testing.T) {
ctx := context.Background()
initialNodeSet := []*api.Node{
{
ID: "id1",
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "name1",
},
},
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
},
{
ID: "id2",
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "name2",
},
},
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
},
{
ID: "id3",
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "name2",
},
},
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
},
}
initialTaskSet := []*api.Task{
{
ID: "id1",
DesiredState: api.TaskStateRunning,
ServiceAnnotations: api.Annotations{
Name: "name1",
},
Status: api.TaskStatus{
State: api.TaskStateAssigned,
},
NodeID: initialNodeSet[0].ID,
},
{
ID: "id2",
DesiredState: api.TaskStateRunning,
ServiceAnnotations: api.Annotations{
Name: "name2",
},
Status: api.TaskStatus{
State: api.TaskStatePending,
},
},
{
ID: "id3",
DesiredState: api.TaskStateRunning,
ServiceAnnotations: api.Annotations{
Name: "name2",
},
Status: api.TaskStatus{
State: api.TaskStatePending,
},
},
}
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
defer s.Close()
err := s.Update(func(tx store.Tx) error {
// Prepoulate nodes
for _, n := range initialNodeSet {
assert.NoError(t, store.CreateNode(tx, n))
}
// Prepopulate tasks
for _, task := range initialTaskSet {
assert.NoError(t, store.CreateTask(tx, task))
}
return nil
})
assert.NoError(t, err)
scheduler := New(s)
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancel()
go func() {
assert.NoError(t, scheduler.Run(ctx))
}()
defer scheduler.Stop()
//.........這裏部分代碼省略.........