本文整理匯總了Golang中github.com/docker/swarmkit/manager/state/store.FindClusters函數的典型用法代碼示例。如果您正苦於以下問題:Golang FindClusters函數的具體用法?Golang FindClusters怎麽用?Golang FindClusters使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了FindClusters函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestClusterStorePasshphraseRotationForRootCA
func TestClusterStorePasshphraseRotationForRootCA(t *testing.T) {
if !*integrationTests {
t.Skip("integration test")
}
os.Setenv(ca.PassphraseENVVar, "password1")
defer os.Setenv(ca.PassphraseENVVar, "")
defer os.Setenv(ca.PassphraseENVVarPrev, "")
mCount, aCount := 5, 15
c := createManagersCluster(t, mCount, aCount)
require.NoError(t, testutils.PollFunc(nil, c.pollRegister))
// Get the leader
leader, err := c.leader()
assert.NoError(t, err)
// check key material in store
var clusters []*api.Cluster
leader.m.RaftNode.MemoryStore().View(func(tx store.ReadTx) {
clusters, err = store.FindClusters(tx, store.All)
})
assert.NoError(t, err)
assert.Len(t, clusters, 1, "there should be one cluster")
assert.NotNil(t, clusters[0].RootCA.CACert)
assert.NotNil(t, clusters[0].RootCA.CAKey)
assert.Contains(t, string(clusters[0].RootCA.CAKey), "Proc-Type: 4,ENCRYPTED")
firstEncryptedKey := clusters[0].RootCA.CAKey
// Set an ENV passphrase and kill the current leader
os.Setenv(ca.PassphraseENVVarPrev, "password1")
os.Setenv(ca.PassphraseENVVar, "password2")
require.NoError(t, c.destroyLeader())
// ensure that cluster will converge to expected number of agents, we need big timeout because of heartbeat times
require.NoError(t, testutils.PollFuncWithTimeout(nil, c.pollRegister, 30*time.Second))
// Get the new leader
leader, err = c.leader()
assert.NoError(t, err)
// check key material in store
leader.m.RaftNode.MemoryStore().View(func(tx store.ReadTx) {
clusters, err = store.FindClusters(tx, store.All)
})
assert.NoError(t, err)
assert.Len(t, clusters, 1, "there should be one cluster")
assert.NotNil(t, clusters[0].RootCA.CACert)
assert.NotNil(t, clusters[0].RootCA.CAKey)
assert.Contains(t, string(clusters[0].RootCA.CAKey), "Proc-Type: 4,ENCRYPTED")
assert.NotEqual(t, firstEncryptedKey, clusters[0].RootCA.CAKey)
}
示例2: TestKeyManagerDefaultSubsystem
// Verify the key generation and rotation for default subsystems
func TestKeyManagerDefaultSubsystem(t *testing.T) {
st := store.NewMemoryStore(nil)
defer st.Close()
createCluster(t, st, "default", "default")
k := New(st, DefaultConfig())
ctx := context.Background()
go k.Run(ctx)
time.Sleep(250 * time.Millisecond)
// verify the number of keys allocated matches the keyring size.
var (
clusters []*api.Cluster
err error
)
k.store.View(func(readTx store.ReadTx) {
clusters, err = store.FindClusters(readTx, store.ByName(k.config.ClusterName))
})
assert.NoError(t, err)
assert.Equal(t, len(clusters[0].NetworkBootstrapKeys), len(k.config.Subsystems)*keyringSize)
key1 := clusters[0].NetworkBootstrapKeys[0].Key
k.rotateKey(ctx)
// verify that after a rotation oldest key has been removed from the keyring
assert.Equal(t, len(k.keyRing.keys), len(k.config.Subsystems)*keyringSize)
for _, key := range k.keyRing.keys {
match := bytes.Equal(key.Key, key1)
assert.False(t, match)
}
}
示例3: WaitForCluster
// WaitForCluster waits until node observes that the cluster wide config is
// committed to raft. This ensures that we can see and serve informations
// related to the cluster.
func WaitForCluster(ctx context.Context, n *Node) (cluster *api.Cluster, err error) {
watch, cancel := state.Watch(n.MemoryStore().WatchQueue(), state.EventCreateCluster{})
defer cancel()
var clusters []*api.Cluster
n.MemoryStore().View(func(readTx store.ReadTx) {
clusters, err = store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
})
if err != nil {
return nil, err
}
if len(clusters) == 1 {
cluster = clusters[0]
} else {
select {
case e := <-watch:
cluster = e.(state.EventCreateCluster).Cluster
case <-ctx.Done():
return nil, ctx.Err()
}
}
return cluster, nil
}
示例4: TestClusterStoreWithPasshphraseForRootCA
func TestClusterStoreWithPasshphraseForRootCA(t *testing.T) {
if !*integrationTests {
t.Skip("integration test")
}
// Start with a passphrase from moment 0
os.Setenv(ca.PassphraseENVVar, "password1")
defer os.Setenv(ca.PassphraseENVVar, "")
defer os.Setenv(ca.PassphraseENVVarPrev, "")
mCount, aCount := 5, 15
c := createManagersCluster(t, mCount, aCount)
require.NoError(t, testutils.PollFunc(nil, c.pollRegister))
// Get the leader
leader, err := c.leader()
assert.NoError(t, err)
// check key material in store
var clusters []*api.Cluster
leader.m.RaftNode.MemoryStore().View(func(tx store.ReadTx) {
clusters, err = store.FindClusters(tx, store.All)
})
assert.NoError(t, err)
assert.Len(t, clusters, 1, "there should be one cluster")
assert.NotNil(t, clusters[0].RootCA.CACert)
assert.NotNil(t, clusters[0].RootCA.CAKey)
assert.Contains(t, string(clusters[0].RootCA.CAKey), "Proc-Type: 4,ENCRYPTED")
}
示例5: getCurrentRaftConfig
func (n *Node) getCurrentRaftConfig() api.RaftConfig {
raftConfig := DefaultRaftConfig()
n.memoryStore.View(func(readTx store.ReadTx) {
clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
if err == nil && len(clusters) == 1 {
raftConfig = clusters[0].Spec.Raft
}
})
return raftConfig
}
示例6: ListClusters
// ListClusters returns a list of all clusters.
func (s *Server) ListClusters(ctx context.Context, request *api.ListClustersRequest) (*api.ListClustersResponse, error) {
var (
clusters []*api.Cluster
err error
)
s.store.View(func(tx store.ReadTx) {
switch {
case request.Filters != nil && len(request.Filters.Names) > 0:
clusters, err = store.FindClusters(tx, buildFilters(store.ByName, request.Filters.Names))
case request.Filters != nil && len(request.Filters.NamePrefixes) > 0:
clusters, err = store.FindClusters(tx, buildFilters(store.ByNamePrefix, request.Filters.NamePrefixes))
case request.Filters != nil && len(request.Filters.IDPrefixes) > 0:
clusters, err = store.FindClusters(tx, buildFilters(store.ByIDPrefix, request.Filters.IDPrefixes))
default:
clusters, err = store.FindClusters(tx, store.All)
}
})
if err != nil {
return nil, err
}
if request.Filters != nil {
clusters = filterClusters(clusters,
func(e *api.Cluster) bool {
return filterContains(e.Spec.Annotations.Name, request.Filters.Names)
},
func(e *api.Cluster) bool {
return filterContainsPrefix(e.Spec.Annotations.Name, request.Filters.NamePrefixes)
},
func(e *api.Cluster) bool {
return filterContainsPrefix(e.ID, request.Filters.IDPrefixes)
},
func(e *api.Cluster) bool {
return filterMatchLabels(e.Spec.Annotations.Labels, request.Filters.Labels)
},
)
}
// WARN: we should never return cluster here. We need to redact the private fields first.
return &api.ListClustersResponse{
Clusters: redactClusters(clusters),
}, nil
}
示例7: initCluster
func (r *ReplicatedOrchestrator) initCluster(readTx store.ReadTx) error {
clusters, err := store.FindClusters(readTx, store.ByName("default"))
if err != nil {
return err
}
if len(clusters) != 1 {
// we'll just pick it when it is created.
return nil
}
r.cluster = clusters[0]
return nil
}
示例8: Run
// Run starts the keymanager, it doesn't return
func (k *KeyManager) Run(ctx context.Context) error {
k.mu.Lock()
log := log.G(ctx).WithField("module", "keymanager")
var (
clusters []*api.Cluster
err error
)
k.store.View(func(readTx store.ReadTx) {
clusters, err = store.FindClusters(readTx, store.ByName(k.config.ClusterName))
})
if err != nil {
log.Errorf("reading cluster config failed, %v", err)
k.mu.Unlock()
return err
}
cluster := clusters[0]
if len(cluster.NetworkBootstrapKeys) == 0 {
for _, subsys := range k.config.Subsystems {
for i := 0; i < keyringSize; i++ {
k.keyRing.keys = append(k.keyRing.keys, k.allocateKey(ctx, subsys))
}
}
if err := k.updateKey(cluster); err != nil {
log.Errorf("store update failed %v", err)
}
} else {
k.keyRing.lClock = cluster.EncryptionKeyLamportClock
k.keyRing.keys = cluster.NetworkBootstrapKeys
k.rotateKey(ctx)
}
ticker := time.NewTicker(k.config.RotationInterval)
defer ticker.Stop()
k.ctx, k.cancel = context.WithCancel(ctx)
k.mu.Unlock()
for {
select {
case <-ticker.C:
k.rotateKey(ctx)
case <-k.ctx.Done():
return nil
}
}
}
示例9: rotateKey
func (k *KeyManager) rotateKey(ctx context.Context) error {
log := log.G(ctx).WithField("module", "keymanager")
var (
clusters []*api.Cluster
err error
)
k.store.View(func(readTx store.ReadTx) {
clusters, err = store.FindClusters(readTx, store.ByName(k.config.ClusterName))
})
if err != nil {
log.Errorf("reading cluster config failed, %v", err)
return err
}
cluster := clusters[0]
if len(cluster.NetworkBootstrapKeys) == 0 {
panic(fmt.Errorf("no key in the cluster config"))
}
subsysKeys := map[string][]*api.EncryptionKey{}
for _, key := range k.keyRing.keys {
subsysKeys[key.Subsystem] = append(subsysKeys[key.Subsystem], key)
}
k.keyRing.keys = []*api.EncryptionKey{}
// We maintain the latest key and the one before in the key ring to allow
// agents to communicate without disruption on key change.
for subsys, keys := range subsysKeys {
if len(keys) == keyringSize {
min := 0
for i, key := range keys[1:] {
if key.LamportTime < keys[min].LamportTime {
min = i
}
}
keys = append(keys[0:min], keys[min+1:]...)
}
keys = append(keys, k.allocateKey(ctx, subsys))
subsysKeys[subsys] = keys
}
for _, keys := range subsysKeys {
k.keyRing.keys = append(k.keyRing.keys, keys...)
}
return k.updateKey(cluster)
}
示例10: TestGetUnlockKey
func TestGetUnlockKey(t *testing.T) {
t.Parallel()
tc := testutils.NewTestCA(t)
defer tc.Stop()
var cluster *api.Cluster
tc.MemoryStore.View(func(tx store.ReadTx) {
clusters, err := store.FindClusters(tx, store.ByName(store.DefaultClusterName))
require.NoError(t, err)
cluster = clusters[0]
})
resp, err := tc.CAClients[0].GetUnlockKey(context.Background(), &api.GetUnlockKeyRequest{})
require.NoError(t, err)
require.Nil(t, resp.UnlockKey)
require.Equal(t, cluster.Meta.Version, resp.Version)
// Update the unlock key
require.NoError(t, tc.MemoryStore.Update(func(tx store.Tx) error {
cluster = store.GetCluster(tx, cluster.ID)
cluster.Spec.EncryptionConfig.AutoLockManagers = true
cluster.UnlockKeys = []*api.EncryptionKey{{
Subsystem: ca.ManagerRole,
Key: []byte("secret"),
}}
return store.UpdateCluster(tx, cluster)
}))
tc.MemoryStore.View(func(tx store.ReadTx) {
cluster = store.GetCluster(tx, cluster.ID)
})
require.NoError(t, raftutils.PollFuncWithTimeout(nil, func() error {
resp, err = tc.CAClients[0].GetUnlockKey(context.Background(), &api.GetUnlockKeyRequest{})
if err != nil {
return fmt.Errorf("get unlock key: %v", err)
}
if !bytes.Equal(resp.UnlockKey, []byte("secret")) {
return fmt.Errorf("secret hasn't rotated yet")
}
if cluster.Meta.Version.Index > resp.Version.Index {
return fmt.Errorf("hasn't updated to the right version yet")
}
return nil
}, 250*time.Millisecond))
}
示例11: TestKeyManagerCustomSubsystem
// Verify the key generation and rotation for IPsec subsystem
func TestKeyManagerCustomSubsystem(t *testing.T) {
st := store.NewMemoryStore(nil)
defer st.Close()
createCluster(t, st, "default", "default")
config := &Config{
ClusterName: store.DefaultClusterName,
Keylen: DefaultKeyLen,
RotationInterval: DefaultKeyRotationInterval,
Subsystems: []string{SubsystemIPSec},
}
k := New(st, config)
ctx := context.Background()
go k.Run(ctx)
time.Sleep(250 * time.Millisecond)
// verify the number of keys allocated matches the keyring size.
var (
clusters []*api.Cluster
err error
)
k.store.View(func(readTx store.ReadTx) {
clusters, err = store.FindClusters(readTx, store.ByName(k.config.ClusterName))
})
assert.NoError(t, err)
assert.Equal(t, len(clusters[0].NetworkBootstrapKeys), keyringSize)
key1 := clusters[0].NetworkBootstrapKeys[0].Key
k.rotateKey(ctx)
// verify that after a rotation oldest key has been removed from the keyring
// also verify that all keys are for the right subsystem
assert.Equal(t, len(k.keyRing.keys), keyringSize)
for _, key := range k.keyRing.keys {
match := bytes.Equal(key.Key, key1)
assert.False(t, match)
match = key.Subsystem == SubsystemIPSec
assert.True(t, match)
}
}
示例12: TestKeyManagerCustomSubsystem
// Verify the key generation and rotation for IPsec subsystem
func TestKeyManagerCustomSubsystem(t *testing.T) {
st := store.NewMemoryStore(nil)
createCluster(t, st, "default", "default")
config := &Config{
ClusterName: store.DefaultClusterName,
Keylen: DefaultKeyLen,
RotationInterval: DefaultKeyRotationInterval,
Subsystems: []string{SubsystemIPSec},
}
k := New(st, config)
ctx := context.Background()
go k.Run(ctx)
time.Sleep(250 * time.Millisecond)
// verify the first key has been allocated and updated in the
// store
var (
clusters []*api.Cluster
err error
)
k.store.View(func(readTx store.ReadTx) {
clusters, err = store.FindClusters(readTx, store.ByName(k.config.ClusterName))
})
assert.NoError(t, err)
assert.Equal(t, len(clusters[0].NetworkBootstrapKeys), 1)
key1 := clusters[0].NetworkBootstrapKeys[0].Key
k.rotateKey(ctx)
k.rotateKey(ctx)
// verify that after two rotations keyring has two keys and the very
// first key allocated has been removed
assert.Equal(t, len(k.keyRing.keys), 2)
for _, key := range k.keyRing.keys {
match := bytes.Equal(key.Key, key1)
assert.False(t, match)
}
}
示例13: Run
// Run is the TaskReaper's main loop.
func (tr *TaskReaper) Run() {
defer close(tr.doneChan)
tr.store.View(func(readTx store.ReadTx) {
clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
if err == nil && len(clusters) == 1 {
tr.taskHistory = clusters[0].Spec.Orchestration.TaskHistoryRetentionLimit
}
})
timer := time.NewTimer(reaperBatchingInterval)
for {
select {
case event := <-tr.watcher:
switch v := event.(type) {
case state.EventCreateTask:
t := v.Task
tr.dirty[instanceTuple{
instance: t.Slot,
serviceID: t.ServiceID,
nodeID: t.NodeID,
}] = struct{}{}
if len(tr.dirty) > maxDirty {
timer.Stop()
tr.tick()
} else {
timer.Reset(reaperBatchingInterval)
}
case state.EventUpdateCluster:
tr.taskHistory = v.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit
}
case <-timer.C:
timer.Stop()
tr.tick()
case <-tr.stopChan:
timer.Stop()
return
}
}
}
示例14: Run
// Run runs the CA signer main loop.
// The CA signer can be stopped with cancelling ctx or calling Stop().
func (s *Server) Run(ctx context.Context) error {
s.mu.Lock()
if s.isRunning() {
s.mu.Unlock()
return fmt.Errorf("CA signer is already running")
}
s.wg.Add(1)
s.mu.Unlock()
defer s.wg.Done()
logger := log.G(ctx).WithField("module", "ca")
ctx = log.WithLogger(ctx, logger)
// Run() should never be called twice, but just in case, we're
// attempting to close the started channel in a safe way
select {
case <-s.started:
return fmt.Errorf("CA server cannot be started more than once")
default:
close(s.started)
}
// Retrieve the channels to keep track of changes in the cluster
// Retrieve all the currently registered nodes
var nodes []*api.Node
updates, cancel, err := store.ViewAndWatch(
s.store,
func(readTx store.ReadTx) error {
clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
if err != nil {
return err
}
if len(clusters) != 1 {
return fmt.Errorf("could not find cluster object")
}
s.updateCluster(ctx, clusters[0])
nodes, err = store.FindNodes(readTx, store.All)
return err
},
state.EventCreateNode{},
state.EventUpdateNode{},
state.EventUpdateCluster{},
)
// Do this after updateCluster has been called, so isRunning never
// returns true without joinTokens being set correctly.
s.mu.Lock()
s.ctx, s.cancel = context.WithCancel(ctx)
s.mu.Unlock()
if err != nil {
log.G(ctx).WithFields(logrus.Fields{
"method": "(*Server).Run",
}).WithError(err).Errorf("snapshot store view failed")
return err
}
defer cancel()
// We might have missed some updates if there was a leader election,
// so let's pick up the slack.
if err := s.reconcileNodeCertificates(ctx, nodes); err != nil {
// We don't return here because that means the Run loop would
// never run. Log an error instead.
log.G(ctx).WithFields(logrus.Fields{
"method": "(*Server).Run",
}).WithError(err).Errorf("error attempting to reconcile certificates")
}
// Watch for new nodes being created, new nodes being updated, and changes
// to the cluster
for {
select {
case event := <-updates:
switch v := event.(type) {
case state.EventCreateNode:
s.evaluateAndSignNodeCert(ctx, v.Node)
case state.EventUpdateNode:
// If this certificate is already at a final state
// no need to evaluate and sign it.
if !isFinalState(v.Node.Certificate.Status) {
s.evaluateAndSignNodeCert(ctx, v.Node)
}
case state.EventUpdateCluster:
s.updateCluster(ctx, v.Cluster)
}
case <-ctx.Done():
return ctx.Err()
case <-s.ctx.Done():
return nil
}
}
}
示例15: Run
// Run starts all manager sub-systems and the gRPC server at the configured
// address.
// The call never returns unless an error occurs or `Stop()` is called.
func (m *Manager) Run(parent context.Context) error {
ctx, ctxCancel := context.WithCancel(parent)
defer ctxCancel()
m.cancelFunc = ctxCancel
leadershipCh, cancel := m.raftNode.SubscribeLeadership()
defer cancel()
go m.handleLeadershipEvents(ctx, leadershipCh)
authorize := func(ctx context.Context, roles []string) error {
var (
blacklistedCerts map[string]*api.BlacklistedCertificate
clusters []*api.Cluster
err error
)
m.raftNode.MemoryStore().View(func(readTx store.ReadTx) {
clusters, err = store.FindClusters(readTx, store.ByName("default"))
})
// Not having a cluster object yet means we can't check
// the blacklist.
if err == nil && len(clusters) == 1 {
blacklistedCerts = clusters[0].BlacklistedCertificates
}
// Authorize the remote roles, ensure they can only be forwarded by managers
_, err = ca.AuthorizeForwardedRoleAndOrg(ctx, roles, []string{ca.ManagerRole}, m.config.SecurityConfig.ClientTLSCreds.Organization(), blacklistedCerts)
return err
}
baseControlAPI := controlapi.NewServer(m.raftNode.MemoryStore(), m.raftNode, m.config.SecurityConfig.RootCA(), m.config.PluginGetter)
baseResourceAPI := resourceapi.New(m.raftNode.MemoryStore())
healthServer := health.NewHealthServer()
localHealthServer := health.NewHealthServer()
authenticatedControlAPI := api.NewAuthenticatedWrapperControlServer(baseControlAPI, authorize)
authenticatedResourceAPI := api.NewAuthenticatedWrapperResourceAllocatorServer(baseResourceAPI, authorize)
authenticatedLogsServerAPI := api.NewAuthenticatedWrapperLogsServer(m.logbroker, authorize)
authenticatedLogBrokerAPI := api.NewAuthenticatedWrapperLogBrokerServer(m.logbroker, authorize)
authenticatedDispatcherAPI := api.NewAuthenticatedWrapperDispatcherServer(m.dispatcher, authorize)
authenticatedCAAPI := api.NewAuthenticatedWrapperCAServer(m.caserver, authorize)
authenticatedNodeCAAPI := api.NewAuthenticatedWrapperNodeCAServer(m.caserver, authorize)
authenticatedRaftAPI := api.NewAuthenticatedWrapperRaftServer(m.raftNode, authorize)
authenticatedHealthAPI := api.NewAuthenticatedWrapperHealthServer(healthServer, authorize)
authenticatedRaftMembershipAPI := api.NewAuthenticatedWrapperRaftMembershipServer(m.raftNode, authorize)
proxyDispatcherAPI := api.NewRaftProxyDispatcherServer(authenticatedDispatcherAPI, m.raftNode, nil, ca.WithMetadataForwardTLSInfo)
proxyCAAPI := api.NewRaftProxyCAServer(authenticatedCAAPI, m.raftNode, nil, ca.WithMetadataForwardTLSInfo)
proxyNodeCAAPI := api.NewRaftProxyNodeCAServer(authenticatedNodeCAAPI, m.raftNode, nil, ca.WithMetadataForwardTLSInfo)
proxyRaftMembershipAPI := api.NewRaftProxyRaftMembershipServer(authenticatedRaftMembershipAPI, m.raftNode, nil, ca.WithMetadataForwardTLSInfo)
proxyResourceAPI := api.NewRaftProxyResourceAllocatorServer(authenticatedResourceAPI, m.raftNode, nil, ca.WithMetadataForwardTLSInfo)
proxyLogBrokerAPI := api.NewRaftProxyLogBrokerServer(authenticatedLogBrokerAPI, m.raftNode, nil, ca.WithMetadataForwardTLSInfo)
// The following local proxies are only wired up to receive requests
// from a trusted local socket, and these requests don't use TLS,
// therefore the requests they handle locally should bypass
// authorization. When requests are proxied from these servers, they
// are sent as requests from this manager rather than forwarded
// requests (it has no TLS information to put in the metadata map).
forwardAsOwnRequest := func(ctx context.Context) (context.Context, error) { return ctx, nil }
handleRequestLocally := func(ctx context.Context) (context.Context, error) {
remoteAddr := "127.0.0.1:0"
m.addrMu.Lock()
if m.config.RemoteAPI != nil {
if m.config.RemoteAPI.AdvertiseAddr != "" {
remoteAddr = m.config.RemoteAPI.AdvertiseAddr
} else {
remoteAddr = m.config.RemoteAPI.ListenAddr
}
}
m.addrMu.Unlock()
creds := m.config.SecurityConfig.ClientTLSCreds
nodeInfo := ca.RemoteNodeInfo{
Roles: []string{creds.Role()},
Organization: creds.Organization(),
NodeID: creds.NodeID(),
RemoteAddr: remoteAddr,
}
return context.WithValue(ctx, ca.LocalRequestKey, nodeInfo), nil
}
localProxyControlAPI := api.NewRaftProxyControlServer(baseControlAPI, m.raftNode, handleRequestLocally, forwardAsOwnRequest)
localProxyLogsAPI := api.NewRaftProxyLogsServer(m.logbroker, m.raftNode, handleRequestLocally, forwardAsOwnRequest)
localProxyDispatcherAPI := api.NewRaftProxyDispatcherServer(m.dispatcher, m.raftNode, handleRequestLocally, forwardAsOwnRequest)
localProxyCAAPI := api.NewRaftProxyCAServer(m.caserver, m.raftNode, handleRequestLocally, forwardAsOwnRequest)
localProxyNodeCAAPI := api.NewRaftProxyNodeCAServer(m.caserver, m.raftNode, handleRequestLocally, forwardAsOwnRequest)
localProxyResourceAPI := api.NewRaftProxyResourceAllocatorServer(baseResourceAPI, m.raftNode, handleRequestLocally, forwardAsOwnRequest)
localProxyLogBrokerAPI := api.NewRaftProxyLogBrokerServer(m.logbroker, m.raftNode, handleRequestLocally, forwardAsOwnRequest)
// Everything registered on m.server should be an authenticated
//.........這裏部分代碼省略.........