本文整理匯總了Golang中github.com/docker/swarmkit/manager/state/store.NewMemoryStore函數的典型用法代碼示例。如果您正苦於以下問題:Golang NewMemoryStore函數的具體用法?Golang NewMemoryStore怎麽用?Golang NewMemoryStore使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了NewMemoryStore函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestKeyManagerDefaultSubsystem
// Verify the key generation and rotation for default subsystems
func TestKeyManagerDefaultSubsystem(t *testing.T) {
st := store.NewMemoryStore(nil)
defer st.Close()
createCluster(t, st, "default", "default")
k := New(st, DefaultConfig())
ctx := context.Background()
go k.Run(ctx)
time.Sleep(250 * time.Millisecond)
// verify the number of keys allocated matches the keyring size.
var (
clusters []*api.Cluster
err error
)
k.store.View(func(readTx store.ReadTx) {
clusters, err = store.FindClusters(readTx, store.ByName(k.config.ClusterName))
})
assert.NoError(t, err)
assert.Equal(t, len(clusters[0].NetworkBootstrapKeys), len(k.config.Subsystems)*keyringSize)
key1 := clusters[0].NetworkBootstrapKeys[0].Key
k.rotateKey(ctx)
// verify that after a rotation oldest key has been removed from the keyring
assert.Equal(t, len(k.keyRing.keys), len(k.config.Subsystems)*keyringSize)
for _, key := range k.keyRing.keys {
match := bytes.Equal(key.Key, key1)
assert.False(t, match)
}
}
示例2: TestSchedulerNoReadyNodes
func TestSchedulerNoReadyNodes(t *testing.T) {
ctx := context.Background()
initialTask := &api.Task{
ID: "id1",
DesiredState: api.TaskStateRunning,
ServiceAnnotations: api.Annotations{
Name: "name1",
},
Status: api.TaskStatus{
State: api.TaskStatePending,
},
}
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
defer s.Close()
err := s.Update(func(tx store.Tx) error {
// Add initial task
assert.NoError(t, store.CreateTask(tx, initialTask))
return nil
})
assert.NoError(t, err)
scheduler := New(s)
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancel()
go func() {
assert.NoError(t, scheduler.Run(ctx))
}()
defer scheduler.Stop()
failure := watchAssignmentFailure(t, watch)
assert.Equal(t, "no suitable node", failure.Status.Message)
err = s.Update(func(tx store.Tx) error {
// Create a ready node. The task should get assigned to this
// node.
node := &api.Node{
ID: "newnode",
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "newnode",
},
},
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
}
assert.NoError(t, store.CreateNode(tx, node))
return nil
})
assert.NoError(t, err)
assignment := watchAssignment(t, watch)
assert.Equal(t, "newnode", assignment.NodeID)
}
示例3: NewNode
// NewNode generates a new Raft node
func NewNode(opts NodeOptions) *Node {
cfg := opts.Config
if cfg == nil {
cfg = DefaultNodeConfig()
}
if opts.TickInterval == 0 {
opts.TickInterval = time.Second
}
if opts.SendTimeout == 0 {
opts.SendTimeout = 2 * time.Second
}
raftStore := raft.NewMemoryStorage()
n := &Node{
cluster: membership.NewCluster(2 * cfg.ElectionTick),
raftStore: raftStore,
opts: opts,
Config: &raft.Config{
ElectionTick: cfg.ElectionTick,
HeartbeatTick: cfg.HeartbeatTick,
Storage: raftStore,
MaxSizePerMsg: cfg.MaxSizePerMsg,
MaxInflightMsgs: cfg.MaxInflightMsgs,
Logger: cfg.Logger,
},
doneCh: make(chan struct{}),
removeRaftCh: make(chan struct{}),
stopped: make(chan struct{}),
leadershipBroadcast: watch.NewQueue(),
lastSendToMember: make(map[uint64]chan struct{}),
keyRotator: opts.KeyRotator,
}
n.memoryStore = store.NewMemoryStore(n)
if opts.ClockSource == nil {
n.ticker = clock.NewClock().NewTicker(opts.TickInterval)
} else {
n.ticker = opts.ClockSource.NewTicker(opts.TickInterval)
}
n.reqIDGen = idutil.NewGenerator(uint16(n.Config.ID), time.Now())
n.wait = newWait()
n.removeRaftFunc = func(n *Node) func() {
var removeRaftOnce sync.Once
return func() {
removeRaftOnce.Do(func() {
close(n.removeRaftCh)
})
}
}(n)
return n
}
示例4: NewNode
// NewNode generates a new Raft node
func NewNode(ctx context.Context, opts NewNodeOptions) *Node {
cfg := opts.Config
if cfg == nil {
cfg = DefaultNodeConfig()
}
if opts.TickInterval == 0 {
opts.TickInterval = time.Second
}
raftStore := raft.NewMemoryStorage()
ctx, cancel := context.WithCancel(ctx)
n := &Node{
Ctx: ctx,
cancel: cancel,
cluster: membership.NewCluster(),
tlsCredentials: opts.TLSCredentials,
raftStore: raftStore,
Address: opts.Addr,
opts: opts,
Config: &raft.Config{
ElectionTick: cfg.ElectionTick,
HeartbeatTick: cfg.HeartbeatTick,
Storage: raftStore,
MaxSizePerMsg: cfg.MaxSizePerMsg,
MaxInflightMsgs: cfg.MaxInflightMsgs,
Logger: cfg.Logger,
},
forceNewCluster: opts.ForceNewCluster,
stopCh: make(chan struct{}),
doneCh: make(chan struct{}),
removeRaftCh: make(chan struct{}),
StateDir: opts.StateDir,
joinAddr: opts.JoinAddr,
sendTimeout: 2 * time.Second,
leadershipBroadcast: events.NewBroadcaster(),
}
n.memoryStore = store.NewMemoryStore(n)
if opts.ClockSource == nil {
n.ticker = clock.NewClock().NewTicker(opts.TickInterval)
} else {
n.ticker = opts.ClockSource.NewTicker(opts.TickInterval)
}
if opts.SendTimeout != 0 {
n.sendTimeout = opts.SendTimeout
}
n.reqIDGen = idutil.NewGenerator(uint16(n.Config.ID), time.Now())
n.wait = newWait()
return n
}
示例5: TestKeyManagerInvalidSubsystem
// Verify that instantiating keymanager fails if an invalid subsystem is
// passed
func TestKeyManagerInvalidSubsystem(t *testing.T) {
st := store.NewMemoryStore(nil)
createCluster(t, st, "default", "default")
config := &Config{
ClusterName: store.DefaultClusterName,
Keylen: DefaultKeyLen,
RotationInterval: DefaultKeyRotationInterval,
Subsystems: []string{"serf"},
}
k := New(st, config)
assert.Nil(t, k)
}
示例6: TestSetup
func TestSetup(t *testing.T) {
store := store.NewMemoryStore(nil)
assert.NotNil(t, store)
defer store.Close()
watch, cancel := state.Watch(store.WatchQueue() /*state.EventCreateTask{}, state.EventUpdateTask{}*/)
defer cancel()
observedTask1 := SetupCluster(t, store, watch)
assert.Equal(t, observedTask1.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1")
assert.Equal(t, observedTask1.NodeID, "id1")
}
示例7: TestAddNode
func TestAddNode(t *testing.T) {
store := store.NewMemoryStore(nil)
assert.NotNil(t, store)
defer store.Close()
watch, cancel := state.Watch(store.WatchQueue())
defer cancel()
SetupCluster(t, store, watch)
addNode(t, store, node2)
observedTask2 := testutils.WatchTaskCreate(t, watch)
assert.Equal(t, observedTask2.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1")
assert.Equal(t, observedTask2.NodeID, "id2")
}
示例8: TestDeleteService
func TestDeleteService(t *testing.T) {
store := store.NewMemoryStore(nil)
assert.NotNil(t, store)
defer store.Close()
watch, cancel := state.Watch(store.WatchQueue())
defer cancel()
SetupCluster(t, store, watch)
deleteService(t, store, service1)
// task should be deleted
observedTask := testutils.WatchTaskDelete(t, watch)
assert.Equal(t, observedTask.ServiceAnnotations.Name, "name1")
assert.Equal(t, observedTask.NodeID, "id1")
}
示例9: TestAddService
func TestAddService(t *testing.T) {
store := store.NewMemoryStore(nil)
assert.NotNil(t, store)
SetupCluster(t, store)
watch, cancel := state.Watch(store.WatchQueue())
defer cancel()
skipEvents(t, watch)
addService(t, store, service2)
observedTask := watchTaskCreate(t, watch)
assert.Equal(t, observedTask.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask.ServiceAnnotations.Name, "name2")
assert.True(t, observedTask.NodeID == "id1")
}
示例10: TestDeleteNode
func TestDeleteNode(t *testing.T) {
store := store.NewMemoryStore(nil)
assert.NotNil(t, store)
SetupCluster(t, store)
watch, cancel := state.Watch(store.WatchQueue())
defer cancel()
skipEvents(t, watch)
deleteNode(t, store, node1)
// task should be set to dead
observedTask := watchShutdownTask(t, watch)
assert.Equal(t, observedTask.ServiceAnnotations.Name, "name1")
assert.Equal(t, observedTask.NodeID, "id1")
}
示例11: TestKeyManagerCustomSubsystem
// Verify the key generation and rotation for IPsec subsystem
func TestKeyManagerCustomSubsystem(t *testing.T) {
st := store.NewMemoryStore(nil)
defer st.Close()
createCluster(t, st, "default", "default")
config := &Config{
ClusterName: store.DefaultClusterName,
Keylen: DefaultKeyLen,
RotationInterval: DefaultKeyRotationInterval,
Subsystems: []string{SubsystemIPSec},
}
k := New(st, config)
ctx := context.Background()
go k.Run(ctx)
time.Sleep(250 * time.Millisecond)
// verify the number of keys allocated matches the keyring size.
var (
clusters []*api.Cluster
err error
)
k.store.View(func(readTx store.ReadTx) {
clusters, err = store.FindClusters(readTx, store.ByName(k.config.ClusterName))
})
assert.NoError(t, err)
assert.Equal(t, len(clusters[0].NetworkBootstrapKeys), keyringSize)
key1 := clusters[0].NetworkBootstrapKeys[0].Key
k.rotateKey(ctx)
// verify that after a rotation oldest key has been removed from the keyring
// also verify that all keys are for the right subsystem
assert.Equal(t, len(k.keyRing.keys), keyringSize)
for _, key := range k.keyRing.keys {
match := bytes.Equal(key.Key, key1)
assert.False(t, match)
match = key.Subsystem == SubsystemIPSec
assert.True(t, match)
}
}
示例12: TestKeyManagerCustomSubsystem
// Verify the key generation and rotation for IPsec subsystem
func TestKeyManagerCustomSubsystem(t *testing.T) {
st := store.NewMemoryStore(nil)
createCluster(t, st, "default", "default")
config := &Config{
ClusterName: store.DefaultClusterName,
Keylen: DefaultKeyLen,
RotationInterval: DefaultKeyRotationInterval,
Subsystems: []string{SubsystemIPSec},
}
k := New(st, config)
ctx := context.Background()
go k.Run(ctx)
time.Sleep(250 * time.Millisecond)
// verify the first key has been allocated and updated in the
// store
var (
clusters []*api.Cluster
err error
)
k.store.View(func(readTx store.ReadTx) {
clusters, err = store.FindClusters(readTx, store.ByName(k.config.ClusterName))
})
assert.NoError(t, err)
assert.Equal(t, len(clusters[0].NetworkBootstrapKeys), 1)
key1 := clusters[0].NetworkBootstrapKeys[0].Key
k.rotateKey(ctx)
k.rotateKey(ctx)
// verify that after two rotations keyring has two keys and the very
// first key allocated has been removed
assert.Equal(t, len(k.keyRing.keys), 2)
for _, key := range k.keyRing.keys {
match := bytes.Equal(key.Key, key1)
assert.False(t, match)
}
}
示例13: newTestServer
func newTestServer(t *testing.T) *testServer {
ts := &testServer{}
// Create a testCA just to get a usable RootCA object
tc := cautils.NewTestCA(nil)
tc.Stop()
ts.Store = store.NewMemoryStore(&mockProposer{})
assert.NotNil(t, ts.Store)
ts.Server = NewServer(ts.Store, nil, &tc.RootCA)
assert.NotNil(t, ts.Server)
temp, err := ioutil.TempFile("", "test-socket")
assert.NoError(t, err)
assert.NoError(t, temp.Close())
assert.NoError(t, os.Remove(temp.Name()))
ts.tempUnixSocket = temp.Name()
lis, err := net.Listen("unix", temp.Name())
assert.NoError(t, err)
ts.grpcServer = grpc.NewServer()
api.RegisterControlServer(ts.grpcServer, ts.Server)
go func() {
// Serve will always return an error (even when properly stopped).
// Explicitly ignore it.
_ = ts.grpcServer.Serve(lis)
}()
conn, err := grpc.Dial(temp.Name(), grpc.WithInsecure(), grpc.WithTimeout(10*time.Second),
grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
return net.DialTimeout("unix", addr, timeout)
}))
assert.NoError(t, err)
ts.clientConn = conn
ts.Client = api.NewControlClient(conn)
return ts
}
示例14: TestUpdaterStopGracePeriod
func TestUpdaterStopGracePeriod(t *testing.T) {
ctx := context.Background()
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
// Move tasks to their desired state.
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancel()
go func() {
for {
select {
case e := <-watch:
task := e.(state.EventUpdateTask).Task
err := s.Update(func(tx store.Tx) error {
task = store.GetTask(tx, task.ID)
// Explicitly do not set task state to
// DEAD to trigger StopGracePeriod
if task.DesiredState == api.TaskStateRunning && task.Status.State != api.TaskStateRunning {
task.Status.State = api.TaskStateRunning
return store.UpdateTask(tx, task)
}
return nil
})
assert.NoError(t, err)
}
}
}()
var instances uint64 = 3
service := &api.Service{
ID: "id1",
Spec: api.ServiceSpec{
Annotations: api.Annotations{
Name: "name1",
},
Task: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{
Image: "v:1",
StopGracePeriod: ptypes.DurationProto(100 * time.Millisecond),
},
},
},
Mode: &api.ServiceSpec_Replicated{
Replicated: &api.ReplicatedService{
Replicas: instances,
},
},
},
}
err := s.Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateService(tx, service))
for i := uint64(0); i < instances; i++ {
task := newTask(nil, service, uint64(i))
task.Status.State = api.TaskStateRunning
assert.NoError(t, store.CreateTask(tx, task))
}
return nil
})
assert.NoError(t, err)
originalTasks := getRunnableServiceTasks(t, s, service)
for _, task := range originalTasks {
assert.Equal(t, "v:1", task.Spec.GetContainer().Image)
}
before := time.Now()
service.Spec.Task.GetContainer().Image = "v:2"
updater := NewUpdater(s, NewRestartSupervisor(s))
// Override the default (1 minute) to speed up the test.
updater.restarts.taskTimeout = 100 * time.Millisecond
updater.Run(ctx, nil, service, getRunnableServiceTasks(t, s, service))
updatedTasks := getRunnableServiceTasks(t, s, service)
for _, task := range updatedTasks {
assert.Equal(t, "v:2", task.Spec.GetContainer().Image)
}
after := time.Now()
// At least 100 ms should have elapsed. Only check the lower bound,
// because the system may be slow and it could have taken longer.
if after.Sub(before) < 100*time.Millisecond {
t.Fatal("stop timeout should have elapsed")
}
}
示例15: TestUpdater
func TestUpdater(t *testing.T) {
ctx := context.Background()
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
// Move tasks to their desired state.
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancel()
go func() {
for {
select {
case e := <-watch:
task := e.(state.EventUpdateTask).Task
if task.Status.State == task.DesiredState {
continue
}
err := s.Update(func(tx store.Tx) error {
task = store.GetTask(tx, task.ID)
task.Status.State = task.DesiredState
return store.UpdateTask(tx, task)
})
assert.NoError(t, err)
}
}
}()
instances := 3
cluster := &api.Cluster{
// test cluster configuration propagation to task creation.
Spec: api.ClusterSpec{
Annotations: api.Annotations{
Name: "default",
},
},
}
service := &api.Service{
ID: "id1",
Spec: api.ServiceSpec{
Annotations: api.Annotations{
Name: "name1",
},
Mode: &api.ServiceSpec_Replicated{
Replicated: &api.ReplicatedService{
Replicas: uint64(instances),
},
},
Task: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{
Image: "v:1",
// This won't apply in this test because we set the old tasks to DEAD.
StopGracePeriod: ptypes.DurationProto(time.Hour),
},
},
},
},
}
err := s.Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateCluster(tx, cluster))
assert.NoError(t, store.CreateService(tx, service))
for i := 0; i < instances; i++ {
assert.NoError(t, store.CreateTask(tx, newTask(cluster, service, uint64(i))))
}
return nil
})
assert.NoError(t, err)
originalTasks := getRunnableServiceTasks(t, s, service)
for _, task := range originalTasks {
assert.Equal(t, "v:1", task.Spec.GetContainer().Image)
assert.Nil(t, task.LogDriver) // should be left alone
}
service.Spec.Task.GetContainer().Image = "v:2"
service.Spec.Task.LogDriver = &api.Driver{Name: "tasklogdriver"}
updater := NewUpdater(s, NewRestartSupervisor(s))
updater.Run(ctx, cluster, service, getRunnableServiceTasks(t, s, service))
updatedTasks := getRunnableServiceTasks(t, s, service)
for _, task := range updatedTasks {
assert.Equal(t, "v:2", task.Spec.GetContainer().Image)
assert.Equal(t, service.Spec.Task.LogDriver, task.LogDriver) // pick up from task
}
service.Spec.Task.GetContainer().Image = "v:3"
cluster.Spec.DefaultLogDriver = &api.Driver{Name: "clusterlogdriver"} // make cluster default logdriver.
service.Spec.Update = &api.UpdateConfig{
Parallelism: 1,
}
updater = NewUpdater(s, NewRestartSupervisor(s))
updater.Run(ctx, cluster, service, getRunnableServiceTasks(t, s, service))
updatedTasks = getRunnableServiceTasks(t, s, service)
for _, task := range updatedTasks {
assert.Equal(t, "v:3", task.Spec.GetContainer().Image)
assert.Equal(t, service.Spec.Task.LogDriver, task.LogDriver) // still pick up from task
}
service.Spec.Task.GetContainer().Image = "v:4"
service.Spec.Task.LogDriver = nil // use cluster default now.
//.........這裏部分代碼省略.........