本文整理汇总了Golang中github.com/sorintlab/stolon/pkg/store.NewStoreManager函数的典型用法代码示例。如果您正苦于以下问题:Golang NewStoreManager函数的具体用法?Golang NewStoreManager怎么用?Golang NewStoreManager使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewStoreManager函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: configReplace
func configReplace(cmd *cobra.Command, args []string) {
if crOpts.file == "" {
die("no config file provided (--file/-f option)")
}
config := []byte{}
var err error
if crOpts.file == "-" {
config, err = ioutil.ReadAll(os.Stdin)
if err != nil {
die("cannot read config file from stdin: %v", err)
}
} else {
config, err = ioutil.ReadFile(crOpts.file)
if err != nil {
die("cannot read provided config file: %v", err)
}
}
storePath := filepath.Join(common.StoreBasePath, cfg.clusterName)
kvstore, err := store.NewStore(store.Backend(cfg.storeBackend), cfg.storeEndpoints)
if err != nil {
die("cannot create store: %v", err)
}
e := store.NewStoreManager(kvstore, storePath)
if err = replaceConfig(e, config); err != nil {
die("error: %v", err)
}
}
示例2: spec
func spec(cmd *cobra.Command, args []string) {
storePath := filepath.Join(common.StoreBasePath, cfg.clusterName)
kvstore, err := store.NewStore(store.Backend(cfg.storeBackend), cfg.storeEndpoints)
if err != nil {
die("cannot create store: %v", err)
}
e := store.NewStoreManager(kvstore, storePath)
cd, _, err := getClusterData(e)
if err != nil {
die("%v", err)
}
if cd.Cluster == nil {
die("no cluster spec available")
}
if cd.Cluster.Spec == nil {
die("no cluster spec available")
}
specj, err := json.MarshalIndent(cd.Cluster.Spec, "", "\t")
if err != nil {
die("failed to marshall spec: %v", err)
}
stdout("%s", specj)
}
示例3: configGet
func configGet(cmd *cobra.Command, args []string) {
storePath := filepath.Join(common.StoreBasePath, cfg.clusterName)
kvstore, err := store.NewStore(store.Backend(cfg.storeBackend), cfg.storeEndpoints)
if err != nil {
die("cannot create store: %v", err)
}
e := store.NewStoreManager(kvstore, storePath)
cfg, err := getConfig(e)
if err != nil {
die("error: %v", err)
}
if cfg == nil {
stdout("config is not defined")
os.Exit(0)
}
cfgj, err := json.MarshalIndent(cfg, "", "\t")
if err != nil {
die("failed to marshall config: %v", err)
}
stdout(string(cfgj))
}
示例4: NewClusterChecker
func NewClusterChecker(uid string, cfg config) (*ClusterChecker, error) {
storePath := filepath.Join(common.StoreBasePath, cfg.clusterName)
kvstore, err := store.NewStore(store.Config{
Backend: store.Backend(cfg.storeBackend),
Endpoints: cfg.storeEndpoints,
CertFile: cfg.storeCertFile,
KeyFile: cfg.storeKeyFile,
CAFile: cfg.storeCAFile,
SkipTLSVerify: cfg.storeSkipTlsVerify,
})
if err != nil {
return nil, fmt.Errorf("cannot create store: %v", err)
}
e := store.NewStoreManager(kvstore, storePath)
return &ClusterChecker{
uid: uid,
listenAddress: cfg.listenAddress,
port: cfg.port,
stopListening: cfg.stopListening,
e: e,
endPollonProxyCh: make(chan error),
}, nil
}
示例5: testFailoverFailed
// Tests standby elected as new master but fails to become master. Then old
// master comes back and is re-elected as master.
func testFailoverFailed(t *testing.T, syncRepl bool) {
dir, err := ioutil.TempDir("", "stolon")
if err != nil {
t.Fatalf("unexpected err: %v", err)
}
defer os.RemoveAll(dir)
clusterName := uuid.NewV4().String()
tks, tss, tstore := setupServers(t, clusterName, dir, 2, 1, syncRepl, false)
defer shutdown(tks, tss, tstore)
storePath := filepath.Join(common.StoreBasePath, clusterName)
sm := store.NewStoreManager(tstore.store, storePath)
master, standbys := waitMasterStandbysReady(t, sm, tks)
standby := standbys[0]
if syncRepl {
if err := WaitClusterDataSynchronousStandbys([]string{standby.uid}, sm, 30*time.Second); err != nil {
t.Fatalf("expected synchronous standby on keeper %q in cluster data", standby.uid)
}
}
if err := populate(t, master); err != nil {
t.Fatalf("unexpected err: %v", err)
}
if err := write(t, master, 1, 1); err != nil {
t.Fatalf("unexpected err: %v", err)
}
// Stop the keeper process on master, should also stop the database
t.Logf("Stopping current master keeper: %s", master.uid)
master.Stop()
// Wait for cluster data containing standby as master
if err := WaitClusterDataMaster(standby.uid, sm, 30*time.Second); err != nil {
t.Fatalf("expected master %q in cluster view", standby.uid)
}
// Stopping standby before reading the new cluster data and promoting
// TODO(sgotti) this is flacky and the standby can read the data and
// publish new state before it's stopped
t.Logf("Stopping current standby keeper: %s", standby.uid)
standby.Stop()
t.Logf("Starting previous master keeper: %s", master.uid)
master.Start()
// Wait for cluster data containing previous master as master
err = WaitClusterDataMaster(master.uid, sm, 30*time.Second)
if !syncRepl && err != nil {
t.Fatalf("expected master %q in cluster view", master.uid)
}
if syncRepl {
if err == nil {
t.Fatalf("expected timeout since with synchronous replication the old master shouldn't be elected as master")
}
}
}
示例6: TestInitialClusterConfig
func TestInitialClusterConfig(t *testing.T) {
dir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("unexpected err: %v", err)
}
defer os.RemoveAll(dir)
tstore, err := NewTestStore(dir)
if err != nil {
t.Fatalf("unexpected err: %v", err)
}
if err := tstore.Start(); err != nil {
t.Fatalf("unexpected err: %v", err)
}
if err := tstore.WaitUp(10 * time.Second); err != nil {
t.Fatalf("error waiting on store up: %v", err)
}
defer tstore.Stop()
clusterName := uuid.NewV4().String()
storeEndpoints := fmt.Sprintf("%s:%s", tstore.listenAddress, tstore.port)
storePath := filepath.Join(common.StoreBasePath, clusterName)
kvstore, err := store.NewStore(tstore.storeBackend, storeEndpoints)
if err != nil {
t.Fatalf("cannot create store: %v", err)
}
e := store.NewStoreManager(kvstore, storePath)
tmpFile, err := ioutil.TempFile(dir, "initial-cluster-config.json")
if err != nil {
t.Fatalf("unexpected err: %v", err)
}
defer tmpFile.Close()
tmpFile.WriteString(`{ "synchronous_replication": true }`)
ts, err := NewTestSentinel(dir, clusterName, tstore.storeBackend, storeEndpoints, fmt.Sprintf("--initial-cluster-config=%s", tmpFile.Name()))
if err != nil {
t.Fatalf("unexpected err: %v", err)
}
if err := ts.Start(); err != nil {
t.Fatalf("unexpected err: %v", err)
}
defer ts.Stop()
if err := WaitClusterInitialized(e, 30*time.Second); err != nil {
t.Fatal("expected cluster initialized")
}
cv, _, err := e.GetClusterView()
if err != nil {
t.Fatalf("unexpected err: %v", err)
}
if !*cv.Config.SynchronousReplication {
t.Fatal("expected cluster config with InitWithMultipleKeepers enabled")
}
}
示例7: TestInitialClusterSpec
func TestInitialClusterSpec(t *testing.T) {
t.Parallel()
dir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("unexpected err: %v", err)
}
defer os.RemoveAll(dir)
tstore := setupStore(t, dir)
defer tstore.Stop()
clusterName := uuid.NewV4().String()
storeEndpoints := fmt.Sprintf("%s:%s", tstore.listenAddress, tstore.port)
storePath := filepath.Join(common.StoreBasePath, clusterName)
kvstore, err := store.NewStore(tstore.storeBackend, storeEndpoints)
if err != nil {
t.Fatalf("cannot create store: %v", err)
}
e := store.NewStoreManager(kvstore, storePath)
initialClusterSpec := &cluster.ClusterSpec{
InitMode: cluster.ClusterInitModeNew,
SleepInterval: cluster.Duration{Duration: 2 * time.Second},
FailInterval: cluster.Duration{Duration: 5 * time.Second},
ConvergenceTimeout: cluster.Duration{Duration: 30 * time.Second},
SynchronousReplication: true,
}
initialClusterSpecFile, err := writeClusterSpec(dir, initialClusterSpec)
if err != nil {
t.Fatalf("unexpected err: %v", err)
}
ts, err := NewTestSentinel(t, dir, clusterName, tstore.storeBackend, storeEndpoints, fmt.Sprintf("--initial-cluster-spec=%s", initialClusterSpecFile))
if err != nil {
t.Fatalf("unexpected err: %v", err)
}
if err := ts.Start(); err != nil {
t.Fatalf("unexpected err: %v", err)
}
defer ts.Stop()
if err := WaitClusterPhase(e, cluster.ClusterPhaseInitializing, 60*time.Second); err != nil {
t.Fatal("expected cluster in initializing phase")
}
cd, _, err := e.GetClusterData()
if err != nil {
t.Fatalf("unexpected err: %v", err)
}
if !cd.Cluster.Spec.SynchronousReplication {
t.Fatal("expected cluster spec with SynchronousReplication enabled")
}
}
示例8: TestMasterChangedAddress
// tests that a master restart with changed address for both keeper and
// postgres (without triggering failover since it restart before being marked
// ad failed) make the slave continue to sync using the new address
func TestMasterChangedAddress(t *testing.T) {
t.Parallel()
dir, err := ioutil.TempDir("", "stolon")
if err != nil {
t.Fatalf("unexpected err: %v", err)
}
defer os.RemoveAll(dir)
clusterName := uuid.NewV4().String()
tks, tss, tstore := setupServers(t, clusterName, dir, 2, 1, false, false)
defer shutdown(tks, tss, tstore)
storePath := filepath.Join(common.StoreBasePath, clusterName)
sm := store.NewStoreManager(tstore.store, storePath)
master, standbys := waitMasterStandbysReady(t, sm, tks)
if err := populate(t, master); err != nil {
t.Fatalf("unexpected err: %v", err)
}
if err := write(t, master, 1, 1); err != nil {
t.Fatalf("unexpected err: %v", err)
}
// Wait standby synced with master
if err := waitLines(t, master, 1, 60*time.Second); err != nil {
t.Fatalf("unexpected err: %v", err)
}
// Restart the keeper process on master with new keeper and postgres
// addresses (in this case only the port is changed)
t.Logf("Restarting current master keeper %q with different addresses", master.uid)
master.Stop()
storeEndpoints := fmt.Sprintf("%s:%s", tstore.listenAddress, tstore.port)
master, err = NewTestKeeperWithID(t, dir, master.uid, clusterName, pgSUUsername, pgSUPassword, pgReplUsername, pgReplPassword, tstore.storeBackend, storeEndpoints)
tks[master.uid] = master
if err := master.Start(); err != nil {
t.Fatalf("unexpected err: %v", err)
}
if err := master.WaitRole(common.RoleMaster, 30*time.Second); err != nil {
t.Fatalf("unexpected err: %v", err)
}
if err := write(t, master, 2, 2); err != nil {
t.Fatalf("unexpected err: %v", err)
}
// Wait standby synced to master with changed address
if err := waitLines(t, standbys[0], 2, 60*time.Second); err != nil {
t.Fatalf("unexpected err: %v", err)
}
}
示例9: testFailoverFailed
// Tests standby elected as new master but fails to become master. Then old
// master comes back and is re-elected as master.
func testFailoverFailed(t *testing.T, syncRepl bool) {
dir, err := ioutil.TempDir("", "stolon")
if err != nil {
t.Fatalf("unexpected err: %v", err)
}
defer os.RemoveAll(dir)
clusterName := uuid.NewV4().String()
tks, tss, tstore := setupServers(t, clusterName, dir, 2, 1, syncRepl, false)
defer shutdown(tks, tss, tstore)
storeEndpoints := fmt.Sprintf("%s:%s", tstore.listenAddress, tstore.port)
storePath := filepath.Join(common.StoreBasePath, clusterName)
kvstore, err := store.NewStore(tstore.storeBackend, storeEndpoints)
if err != nil {
t.Fatalf("cannot create store: %v", err)
}
e := store.NewStoreManager(kvstore, storePath)
master, standbys, err := getRoles(t, tks)
if err != nil {
t.Fatalf("unexpected err: %v", err)
}
standby := standbys[0]
if err := populate(t, master); err != nil {
t.Fatalf("unexpected err: %v", err)
}
if err := write(t, master, 1, 1); err != nil {
t.Fatalf("unexpected err: %v", err)
}
// Stop the keeper process on master, should also stop the database
t.Logf("Stopping current master keeper: %s", master.id)
master.Stop()
// Wait for cluster data containing standy as master
if err := WaitClusterDataMaster(standby.id, e, 30*time.Second); err != nil {
t.Fatalf("expected master %q in cluster view", standby.id)
}
// Stopping standby before reading the new cluster data and promoting
t.Logf("Stopping current stanby keeper: %s", master.id)
standby.Stop()
t.Logf("Starting previous master keeper: %s", master.id)
master.Start()
// Wait for cluster data containing previous master as master
if err := WaitClusterDataMaster(master.id, e, 30*time.Second); err != nil {
t.Fatalf("expected master %q in cluster view", master.id)
}
}
示例10: NewSentinel
func NewSentinel(uid string, cfg *config, stop chan bool, end chan bool) (*Sentinel, error) {
var initialClusterSpec *cluster.ClusterSpec
if cfg.initialClusterSpecFile != "" {
configData, err := ioutil.ReadFile(cfg.initialClusterSpecFile)
if err != nil {
return nil, fmt.Errorf("cannot read provided initial cluster config file: %v", err)
}
if err := json.Unmarshal(configData, &initialClusterSpec); err != nil {
return nil, fmt.Errorf("cannot parse provided initial cluster config: %v", err)
}
log.Debug("initialClusterSpec dump", zap.String("initialClusterSpec", spew.Sdump(initialClusterSpec)))
if err := initialClusterSpec.Validate(); err != nil {
return nil, fmt.Errorf("invalid initial cluster: %v", err)
}
}
storePath := filepath.Join(common.StoreBasePath, cfg.clusterName)
kvstore, err := store.NewStore(store.Config{
Backend: store.Backend(cfg.storeBackend),
Endpoints: cfg.storeEndpoints,
CertFile: cfg.storeCertFile,
KeyFile: cfg.storeKeyFile,
CAFile: cfg.storeCAFile,
SkipTLSVerify: cfg.storeSkipTlsVerify,
})
if err != nil {
return nil, fmt.Errorf("cannot create store: %v", err)
}
e := store.NewStoreManager(kvstore, storePath)
candidate := leadership.NewCandidate(kvstore, filepath.Join(storePath, common.SentinelLeaderKey), uid, store.MinTTL)
return &Sentinel{
uid: uid,
cfg: cfg,
e: e,
candidate: candidate,
leader: false,
initialClusterSpec: initialClusterSpec,
stop: stop,
end: end,
UIDFn: common.UID,
// This is just to choose a pseudo random keeper so
// use math.rand (no need for crypto.rand) without an
// initial seed.
RandFn: rand.Intn,
sleepInterval: cluster.DefaultSleepInterval,
requestTimeout: cluster.DefaultRequestTimeout,
}, nil
}
示例11: NewStore
func NewStore() (*store.StoreManager, error) {
storePath := filepath.Join(common.StoreBasePath, cfg.clusterName)
kvstore, err := store.NewStore(store.Config{
Backend: store.Backend(cfg.storeBackend),
Endpoints: cfg.storeEndpoints,
CertFile: cfg.storeCertFile,
KeyFile: cfg.storeKeyFile,
CAFile: cfg.storeCAFile,
SkipTLSVerify: cfg.storeSkipTlsVerify,
})
if err != nil {
return nil, fmt.Errorf("cannot create store: %v", err)
}
return store.NewStoreManager(kvstore, storePath), nil
}
示例12: testFailover
func testFailover(t *testing.T, syncRepl bool) {
dir, err := ioutil.TempDir("", "stolon")
if err != nil {
t.Fatalf("unexpected err: %v", err)
}
defer os.RemoveAll(dir)
clusterName := uuid.NewV4().String()
tks, tss, tstore := setupServers(t, clusterName, dir, 2, 1, syncRepl, false)
defer shutdown(tks, tss, tstore)
storePath := filepath.Join(common.StoreBasePath, clusterName)
sm := store.NewStoreManager(tstore.store, storePath)
master, standbys := waitMasterStandbysReady(t, sm, tks)
standby := standbys[0]
if syncRepl {
if err := WaitClusterDataSynchronousStandbys([]string{standby.uid}, sm, 30*time.Second); err != nil {
t.Fatalf("expected synchronous standby on keeper %q in cluster data", standby.uid)
}
}
if err := populate(t, master); err != nil {
t.Fatalf("unexpected err: %v", err)
}
if err := write(t, master, 1, 1); err != nil {
t.Fatalf("unexpected err: %v", err)
}
// Stop the keeper process on master, should also stop the database
t.Logf("Stopping current master keeper: %s", master.uid)
master.Stop()
if err := standby.WaitRole(common.RoleMaster, 30*time.Second); err != nil {
t.Fatalf("unexpected err: %v", err)
}
c, err := getLines(t, standby)
if err != nil {
t.Fatalf("unexpected err: %v", err)
}
if c != 1 {
t.Fatalf("wrong number of lines, want: %d, got: %d", 1, c)
}
}
示例13: NewClusterChecker
func NewClusterChecker(id string, cfg config) (*ClusterChecker, error) {
storePath := filepath.Join(common.StoreBasePath, cfg.clusterName)
kvstore, err := store.NewStore(store.Backend(cfg.storeBackend), cfg.storeEndpoints)
if err != nil {
return nil, fmt.Errorf("cannot create store: %v", err)
}
e := store.NewStoreManager(kvstore, storePath)
return &ClusterChecker{
id: id,
listenAddress: cfg.listenAddress,
port: cfg.port,
stopListening: cfg.stopListening,
e: e,
endPollonProxyCh: make(chan error),
}, nil
}
示例14: configPatch
func configPatch(cmd *cobra.Command, args []string) {
if len(args) > 1 {
die("too many arguments")
}
if cpOpts.file == "" && len(args) < 1 {
die("no patch provided as argument and no patch file provided (--file/-f option)")
}
if cpOpts.file != "" && len(args) == 1 {
die("only one of patch provided as argument or patch file must provided (--file/-f option)")
}
config := []byte{}
if len(args) == 1 {
config = []byte(args[0])
} else {
var err error
if cpOpts.file == "-" {
config, err = ioutil.ReadAll(os.Stdin)
if err != nil {
die("cannot read config file from stdin: %v", err)
}
} else {
config, err = ioutil.ReadFile(cpOpts.file)
if err != nil {
die("cannot read provided config file: %v", err)
}
}
}
storePath := filepath.Join(common.StoreBasePath, cfg.clusterName)
kvstore, err := store.NewStore(store.Backend(cfg.storeBackend), cfg.storeEndpoints)
if err != nil {
die("cannot create store: %v", err)
}
e := store.NewStoreManager(kvstore, storePath)
if err = patchConfig(e, config); err != nil {
die("failed to patch config: %v", err)
}
}
示例15: NewPostgresKeeper
func NewPostgresKeeper(id string, cfg config, stop chan bool, end chan error) (*PostgresKeeper, error) {
storePath := filepath.Join(common.StoreBasePath, cfg.clusterName)
kvstore, err := store.NewStore(store.Backend(cfg.storeBackend), cfg.storeEndpoints)
if err != nil {
return nil, fmt.Errorf("cannot create store: %v", err)
}
e := store.NewStoreManager(kvstore, storePath)
p := &PostgresKeeper{id: id,
dataDir: cfg.dataDir,
e: e,
listenAddress: cfg.listenAddress,
port: cfg.port,
pgListenAddress: cfg.pgListenAddress,
pgPort: cfg.pgPort,
stop: stop,
end: end,
}
return p, nil
}