当前位置: 首页>>代码示例>>Golang>>正文


Golang logutil.NewMemoryLogger函数代码示例

本文整理汇总了Golang中github.com/youtube/vitess/go/vt/logutil.NewMemoryLogger函数的典型用法代码示例。如果您正苦于以下问题:Golang NewMemoryLogger函数的具体用法?Golang NewMemoryLogger怎么用?Golang NewMemoryLogger使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了NewMemoryLogger函数的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。

示例1: ExecuteVtworkerCommand

// ExecuteVtworkerCommand is part of the pb.VtworkerServer interface
func (s *VtworkerServer) ExecuteVtworkerCommand(args *pb.ExecuteVtworkerCommandRequest, stream pbs.Vtworker_ExecuteVtworkerCommandServer) (err error) {
	// Please note that this panic handler catches only panics occuring in the code below.
	// The actual execution of the vtworker command takes place in a new go routine
	// (started in Instance.setAndStartWorker()) which has its own panic handler.
	defer servenv.HandlePanic("vtworker", &err)

	// create a logger, send the result back to the caller
	logstream := logutil.NewChannelLogger(10)
	logger := logutil.NewTeeLogger(logstream, logutil.NewMemoryLogger())

	// send logs to the caller
	wg := sync.WaitGroup{}
	wg.Add(1)
	go func() {
		for e := range logstream {
			// Note we don't interrupt the loop here, as
			// we still need to flush and finish the
			// command, even if the channel to the client
			// has been broken. We'll just keep trying.
			stream.Send(&pb.ExecuteVtworkerCommandResponse{
				Event: &pbl.Event{
					Time: &pbl.Time{
						Seconds:     e.Time.Unix(),
						Nanoseconds: int32(e.Time.Nanosecond()),
					},
					Level: pbl.Level(e.Level),
					File:  e.File,
					Line:  int64(e.Line),
					Value: e.Value,
				},
			})
		}
		wg.Done()
	}()

	// create the wrangler
	wr := s.wi.CreateWrangler(logger)

	// execute the command
	if len(args.Args) >= 1 && args.Args[0] == "Reset" {
		err = s.wi.Reset()
	} else {
		// Make sure we use the global "err" variable and do not redeclare it in this scope.
		var worker worker.Worker
		var done chan struct{}
		worker, done, err = s.wi.RunCommand(args.Args, wr, false /*runFromCli*/)
		if err == nil {
			err = s.wi.WaitForCommand(worker, done)
		}
	}

	// close the log channel, and wait for them all to be sent
	close(logstream)
	wg.Wait()

	return err
}
开发者ID:Drooids,项目名称:vitess,代码行数:58,代码来源:server.go

示例2: Instantiate

// Instantiate is part of the workflow.Factory interface.
func (f *SleepWorkflowFactory) Instantiate(w *workflowpb.Workflow) (Workflow, error) {
	data := &SleepWorkflowData{}
	if err := json.Unmarshal(w.Data, data); err != nil {
		return nil, err
	}
	return &SleepWorkflow{
		data:   data,
		logger: logutil.NewMemoryLogger(),
	}, nil
}
开发者ID:dumbunny,项目名称:vitess,代码行数:11,代码来源:sleep_workflow.go

示例3: setAndStartWorker

// setAndStartWorker will set the current worker.
// We always log to both memory logger (for display on the web) and
// console logger (for records / display of command line worker).
func (wi *Instance) setAndStartWorker(wrk Worker, wr *wrangler.Wrangler) (chan struct{}, error) {
	wi.currentWorkerMutex.Lock()
	defer wi.currentWorkerMutex.Unlock()
	if wi.currentWorker != nil {
		return nil, fmt.Errorf("A worker is already in progress: %v", wi.currentWorker)
	}

	wi.currentWorker = wrk
	wi.currentMemoryLogger = logutil.NewMemoryLogger()
	wi.currentContext, wi.currentCancelFunc = context.WithCancel(wi.backgroundContext)
	wi.lastRunError = nil
	done := make(chan struct{})
	wranglerLogger := wr.Logger()
	if wr == wi.wr {
		// If it's the default wrangler, do not reuse its logger because it may have been set before.
		// Resuing it would result into an endless recursion.
		wranglerLogger = logutil.NewConsoleLogger()
	}
	wr.SetLogger(logutil.NewTeeLogger(wi.currentMemoryLogger, wranglerLogger))

	// one go function runs the worker, changes state when done
	go func() {
		log.Infof("Starting worker...")
		var err error

		// Catch all panics and always save the execution state at the end.
		defer func() {
			// The recovery code is a copy of servenv.HandlePanic().
			if x := recover(); x != nil {
				err = fmt.Errorf("uncaught %v panic: %v", "vtworker", x)
			}

			wi.currentWorkerMutex.Lock()
			wi.currentContext = nil
			wi.currentCancelFunc = nil
			wi.lastRunError = err
			wi.currentWorkerMutex.Unlock()
			close(done)
		}()

		// run will take a long time
		err = wrk.Run(wi.currentContext)
	}()

	return done, nil
}
开发者ID:littleyang,项目名称:vitess,代码行数:49,代码来源:instance.go

示例4: TestShard

func TestShard(t *testing.T) {
	cell := "cell1"
	keyspace := "ks1"
	shard := "sh1"
	ctx := context.Background()
	ts := zktestserver.New(t, []string{cell})

	// Create a Keyspace / Shard
	if err := ts.CreateKeyspace(ctx, keyspace, &topodatapb.Keyspace{}); err != nil {
		t.Fatalf("CreateKeyspace failed: %v", err)
	}
	if err := ts.CreateShard(ctx, keyspace, shard); err != nil {
		t.Fatalf("CreateShard failed: %v", err)
	}

	// Hack the zookeeper backend to create an error for GetShard.
	zconn := ts.Impl.(*zktestserver.TestServer).Impl.(*zktopo.Server).GetZConn()
	if _, err := zconn.Set(path.Join(zktopo.GlobalKeyspacesPath, keyspace, "shards", shard), []byte{}, -1); err != nil {
		t.Fatalf("failed to hack the shard: %v", err)
	}

	// Create the workflow, run the validator.
	w := &Workflow{
		logger: logutil.NewMemoryLogger(),
	}
	sv := &ShardValidator{}
	if err := sv.Audit(ctx, ts, w); err != nil {
		t.Fatalf("Audit failed: %v", err)
	}
	if len(w.fixers) != 1 {
		t.Fatalf("fixer not added: %v", w.fixers)
	}
	if !strings.Contains(w.fixers[0].message, "bad shard data") {
		t.Errorf("bad message: %v ", w.fixers[0].message)
	}

	// Run Delete, make sure the entry is removed.
	if err := w.fixers[0].fixer.Action(ctx, "Delete"); err != nil {
		t.Fatalf("Action failed: %v", err)
	}
	shards, err := ts.GetShardNames(ctx, keyspace)
	if err != nil || len(shards) != 0 {
		t.Errorf("bad GetShardNames output: %v %v ", shards, err)
	}
}
开发者ID:dumbunny,项目名称:vitess,代码行数:45,代码来源:shard_test.go

示例5: ExecuteVtworkerCommand

// ExecuteVtworkerCommand is part of the pb.VtworkerServer interface
func (s *VtworkerServer) ExecuteVtworkerCommand(args *pb.ExecuteVtworkerCommandRequest, stream pbs.Vtworker_ExecuteVtworkerCommandServer) (err error) {
	// Please note that this panic handler catches only panics occuring in the code below.
	// The actual execution of the vtworker command takes place in a new go routine
	// (started in Instance.setAndStartWorker()) which has its own panic handler.
	defer servenv.HandlePanic("vtworker", &err)

	// create a logger, send the result back to the caller
	logstream := logutil.NewChannelLogger(10)
	logger := logutil.NewTeeLogger(logstream, logutil.NewMemoryLogger())

	// send logs to the caller
	wg := sync.WaitGroup{}
	wg.Add(1)
	go func() {
		for e := range logstream {
			// Note we don't interrupt the loop here, as
			// we still need to flush and finish the
			// command, even if the channel to the client
			// has been broken. We'll just keep trying.
			stream.Send(&pb.ExecuteVtworkerCommandResponse{
				Event: e,
			})
		}
		wg.Done()
	}()

	// create the wrangler
	wr := s.wi.CreateWrangler(logger)

	// execute the command
	worker, done, err := s.wi.RunCommand(args.Args, wr, false /*runFromCli*/)
	if err == nil && worker != nil && done != nil {
		err = s.wi.WaitForCommand(worker, done)
	}

	// close the log channel, and wait for them all to be sent
	close(logstream)
	wg.Wait()

	return err
}
开发者ID:zhangzzl,项目名称:vitess,代码行数:42,代码来源:server.go

示例6: setAndStartWorker

// setAndStartWorker will set the current worker.
// We always log to both memory logger (for display on the web) and
// console logger (for records / display of command line worker).
func setAndStartWorker(wrk worker.Worker) (chan struct{}, error) {
	currentWorkerMutex.Lock()
	defer currentWorkerMutex.Unlock()
	if currentWorker != nil {
		return nil, fmt.Errorf("A worker is already in progress: %v", currentWorker)
	}

	currentWorker = wrk
	currentMemoryLogger = logutil.NewMemoryLogger()
	currentDone = make(chan struct{})
	wr.SetLogger(logutil.NewTeeLogger(currentMemoryLogger, logutil.NewConsoleLogger()))

	// one go function runs the worker, closes 'done' when done
	go func() {
		log.Infof("Starting worker...")
		wrk.Run()
		close(currentDone)
	}()

	return currentDone, nil
}
开发者ID:nangong92t,项目名称:go_src,代码行数:24,代码来源:vtworker.go

示例7: TestRebuildShard

func TestRebuildShard(t *testing.T) {
	ctx := context.Background()
	cells := []string{"test_cell"}
	logger := logutil.NewMemoryLogger()

	// Set up topology.
	ts := zktopo.NewTestServer(t, cells)
	si, err := GetOrCreateShard(ctx, ts, testKeyspace, testShard)
	if err != nil {
		t.Fatalf("GetOrCreateShard: %v", err)
	}
	si.Cells = append(si.Cells, cells[0])
	if err := topo.UpdateShard(ctx, ts, si); err != nil {
		t.Fatalf("UpdateShard: %v", err)
	}

	masterInfo := addTablet(ctx, t, ts, 1, cells[0], topo.TYPE_MASTER)
	replicaInfo := addTablet(ctx, t, ts, 2, cells[0], topo.TYPE_REPLICA)

	// Do an initial rebuild.
	if _, err := RebuildShard(ctx, logger, ts, testKeyspace, testShard, cells, time.Minute); err != nil {
		t.Fatalf("RebuildShard: %v", err)
	}

	// Check initial state.
	ep, _, err := ts.GetEndPoints(ctx, cells[0], testKeyspace, testShard, topo.TYPE_MASTER)
	if err != nil {
		t.Fatalf("GetEndPoints: %v", err)
	}
	if got, want := len(ep.Entries), 1; got != want {
		t.Fatalf("len(Entries) = %v, want %v", got, want)
	}
	ep, _, err = ts.GetEndPoints(ctx, cells[0], testKeyspace, testShard, topo.TYPE_REPLICA)
	if err != nil {
		t.Fatalf("GetEndPoints: %v", err)
	}
	if got, want := len(ep.Entries), 1; got != want {
		t.Fatalf("len(Entries) = %v, want %v", got, want)
	}

	// Make a change.
	masterInfo.Type = topo.TYPE_SPARE
	if err := topo.UpdateTablet(ctx, ts, masterInfo); err != nil {
		t.Fatalf("UpdateTablet: %v", err)
	}
	if _, err := RebuildShard(ctx, logger, ts, testKeyspace, testShard, cells, time.Minute); err != nil {
		t.Fatalf("RebuildShard: %v", err)
	}

	// Make another change.
	replicaInfo.Type = topo.TYPE_SPARE
	if err := topo.UpdateTablet(ctx, ts, replicaInfo); err != nil {
		t.Fatalf("UpdateTablet: %v", err)
	}
	if _, err := RebuildShard(ctx, logger, ts, testKeyspace, testShard, cells, time.Minute); err != nil {
		t.Fatalf("RebuildShard: %v", err)
	}

	// Check that the rebuild picked up both changes.
	if _, _, err := ts.GetEndPoints(ctx, cells[0], testKeyspace, testShard, topo.TYPE_MASTER); err == nil || !strings.Contains(err.Error(), "node doesn't exist") {
		t.Errorf("first change wasn't picked up by second rebuild")
	}
	if _, _, err := ts.GetEndPoints(ctx, cells[0], testKeyspace, testShard, topo.TYPE_REPLICA); err == nil || !strings.Contains(err.Error(), "node doesn't exist") {
		t.Errorf("second change was overwritten by first rebuild finishing late")
	}
}
开发者ID:haoqoo,项目名称:vitess,代码行数:66,代码来源:rebuild_test.go

示例8: TestFixShardReplication

func TestFixShardReplication(t *testing.T) {
	cell := "cell1"
	keyspace := "ks1"
	shard := "shard1"
	ctx := context.Background()
	ts := zktestserver.New(t, []string{cell})

	// Create a tablet.
	alias := &topodatapb.TabletAlias{
		Cell: cell,
		Uid:  1,
	}
	tablet := &topodatapb.Tablet{
		Keyspace: keyspace,
		Shard:    shard,
		Alias:    alias,
	}
	if err := ts.CreateTablet(ctx, tablet); err != nil {
		t.Fatalf("CreateTablet failed: %v", err)
	}

	// Make sure it's in the ShardReplication.
	sri, err := ts.GetShardReplication(ctx, cell, keyspace, shard)
	if err != nil {
		t.Fatalf("GetShardReplication failed: %v", err)
	}
	if len(sri.Nodes) != 1 || !proto.Equal(sri.Nodes[0].TabletAlias, alias) {
		t.Errorf("Missing or wrong alias in ShardReplication: %v", sri)
	}

	// Run FixShardReplication, should do nothing.
	logger := logutil.NewMemoryLogger()
	if err := topo.FixShardReplication(ctx, ts, logger, cell, keyspace, shard); err != nil {
		t.Errorf("FixShardReplication failed: %v", err)
	}
	sri, err = ts.GetShardReplication(ctx, cell, keyspace, shard)
	if err != nil {
		t.Fatalf("GetShardReplication failed: %v", err)
	}
	if len(sri.Nodes) != 1 || !proto.Equal(sri.Nodes[0].TabletAlias, alias) {
		t.Errorf("Missing or wrong alias in ShardReplication: %v", sri)
	}
	if !strings.Contains(logger.String(), "All entries in replication graph are valid") {
		t.Errorf("Wrong log: %v", logger.String())
	}

	// Add a bogus entries: a non-existing tablet.
	if err := ts.UpdateShardReplicationFields(ctx, cell, keyspace, shard, func(sr *topodatapb.ShardReplication) error {
		sr.Nodes = append(sr.Nodes, &topodatapb.ShardReplication_Node{
			TabletAlias: &topodatapb.TabletAlias{
				Cell: cell,
				Uid:  2,
			},
		})
		return nil
	}); err != nil {
		t.Fatalf("UpdateShardReplicationFields failed: %v", err)
	}
	logger.Clear()
	if err := topo.FixShardReplication(ctx, ts, logger, cell, keyspace, shard); err != nil {
		t.Errorf("FixShardReplication failed: %v", err)
	}
	sri, err = ts.GetShardReplication(ctx, cell, keyspace, shard)
	if err != nil {
		t.Fatalf("GetShardReplication failed: %v", err)
	}
	if len(sri.Nodes) != 1 || !proto.Equal(sri.Nodes[0].TabletAlias, alias) {
		t.Errorf("Missing or wrong alias in ShardReplication: %v", sri)
	}
	if !strings.Contains(logger.String(), "but does not exist, removing it") {
		t.Errorf("Wrong log: %v", logger.String())
	}

	// Add a bogus entries: a tablet with wrong keyspace.
	if err := ts.CreateTablet(ctx, &topodatapb.Tablet{
		Keyspace: "other" + keyspace,
		Shard:    shard,
		Alias: &topodatapb.TabletAlias{
			Cell: cell,
			Uid:  3,
		},
	}); err != nil {
		t.Fatalf("CreateTablet failed: %v", err)
	}
	if err := ts.UpdateShardReplicationFields(ctx, cell, keyspace, shard, func(sr *topodatapb.ShardReplication) error {
		sr.Nodes = append(sr.Nodes, &topodatapb.ShardReplication_Node{
			TabletAlias: &topodatapb.TabletAlias{
				Cell: cell,
				Uid:  3,
			},
		})
		return nil
	}); err != nil {
		t.Fatalf("UpdateShardReplicationFields failed: %v", err)
	}
	logger.Clear()
	if err := topo.FixShardReplication(ctx, ts, logger, cell, keyspace, shard); err != nil {
		t.Errorf("FixShardReplication failed: %v", err)
	}
	sri, err = ts.GetShardReplication(ctx, cell, keyspace, shard)
//.........这里部分代码省略.........
开发者ID:dumbunny,项目名称:vitess,代码行数:101,代码来源:replication_test.go

示例9: setAndStartWorker

// setAndStartWorker will set the current worker.
// We always log to both memory logger (for display on the web) and
// console logger (for records / display of command line worker).
func (wi *Instance) setAndStartWorker(wrk Worker, wr *wrangler.Wrangler) (chan struct{}, error) {
	wi.currentWorkerMutex.Lock()
	defer wi.currentWorkerMutex.Unlock()

	if wi.currentContext != nil {
		return nil, vterrors.FromError(vtrpcpb.ErrorCode_TRANSIENT_ERROR,
			fmt.Errorf("A worker job is already in progress: %v", wi.currentWorker))
	}

	if wi.currentWorker != nil {
		// During the grace period, we answer with a retryable error.
		const gracePeriod = 1 * time.Minute
		gracePeriodEnd := time.Now().Add(gracePeriod)
		if wi.lastRunStopTime.Before(gracePeriodEnd) {
			return nil, vterrors.FromError(vtrpcpb.ErrorCode_TRANSIENT_ERROR,
				fmt.Errorf("A worker job was recently stopped (%f seconds ago): %v",
					time.Now().Sub(wi.lastRunStopTime).Seconds(),
					wi.currentWorker))
		}

		// QUERY_NOT_SERVED = FailedPrecondition => manual resolution required.
		return nil, vterrors.FromError(vtrpcpb.ErrorCode_QUERY_NOT_SERVED,
			fmt.Errorf("The worker job was stopped %.1f minutes ago, but not reset. You have to reset it manually. Job: %v",
				time.Now().Sub(wi.lastRunStopTime).Minutes(),
				wi.currentWorker))
	}

	wi.currentWorker = wrk
	wi.currentMemoryLogger = logutil.NewMemoryLogger()
	wi.currentContext, wi.currentCancelFunc = context.WithCancel(wi.backgroundContext)
	wi.lastRunError = nil
	wi.lastRunStopTime = time.Unix(0, 0)
	done := make(chan struct{})
	wranglerLogger := wr.Logger()
	if wr == wi.wr {
		// If it's the default wrangler, do not reuse its logger because it may have been set before.
		// Resuing it would result into an endless recursion.
		wranglerLogger = logutil.NewConsoleLogger()
	}
	wr.SetLogger(logutil.NewTeeLogger(wi.currentMemoryLogger, wranglerLogger))

	// one go function runs the worker, changes state when done
	go func() {
		log.Infof("Starting worker...")
		var err error

		// Catch all panics and always save the execution state at the end.
		defer func() {
			// The recovery code is a copy of servenv.HandlePanic().
			if x := recover(); x != nil {
				log.Errorf("uncaught vtworker panic: %v\n%s", x, tb.Stack(4))
				err = fmt.Errorf("uncaught vtworker panic: %v", x)
			}

			wi.currentWorkerMutex.Lock()
			wi.currentContext = nil
			wi.currentCancelFunc = nil
			wi.lastRunError = err
			wi.lastRunStopTime = time.Now()
			wi.currentWorkerMutex.Unlock()
			close(done)
		}()

		// run will take a long time
		err = wrk.Run(wi.currentContext)
	}()

	return done, nil
}
开发者ID:CowLeo,项目名称:vitess,代码行数:72,代码来源:instance.go

示例10: initAPI


//.........这里部分代码省略.........
		}
		tabletStat, err := realtimeStats.tabletStats(&tabletAlias)
		if err != nil {
			return nil, fmt.Errorf("could not get tabletStats: %v", err)
		}
		return tabletStat, nil
	})

	handleCollection("topology_info", func(r *http.Request) (interface{}, error) {
		targetPath := getItemPath(r.URL.Path)

		// Retrieving topology information (keyspaces, cells, and types) based on query params.
		if targetPath == "" {
			if err := r.ParseForm(); err != nil {
				return nil, err
			}
			keyspace := r.FormValue("keyspace")
			cell := r.FormValue("cell")

			// Setting default values if none was specified in the query params.
			if keyspace == "" {
				keyspace = "all"
			}
			if cell == "" {
				cell = "all"
			}

			if realtimeStats == nil {
				return nil, fmt.Errorf("realtimeStats not initialized")
			}

			return realtimeStats.topologyInfo(keyspace, cell), nil
		}
		return nil, fmt.Errorf("invalid target path: %q  expected path: ?keyspace=<keyspace>&cell=<cell>", targetPath)
	})

	// Vtctl Command
	http.HandleFunc(apiPrefix+"vtctl/", func(w http.ResponseWriter, r *http.Request) {
		if err := acl.CheckAccessHTTP(r, acl.ADMIN); err != nil {
			httpErrorf(w, r, "Access denied")
			return
		}
		var args []string
		resp := struct {
			Error  string
			Output string
		}{}
		if err := unmarshalRequest(r, &args); err != nil {
			httpErrorf(w, r, "can't unmarshal request: %v", err)
			return
		}

		logstream := logutil.NewMemoryLogger()

		wr := wrangler.New(logstream, ts, tmClient)
		// TODO(enisoc): Context for run command should be request-scoped.
		err := vtctl.RunCommand(ctx, wr, args)
		if err != nil {
			resp.Error = err.Error()
		}
		resp.Output = logstream.String()
		data, err := json.MarshalIndent(resp, "", "  ")
		if err != nil {
			httpErrorf(w, r, "json error: %v", err)
			return
		}
		w.Header().Set("Content-Type", jsonContentType)
		w.Write(data)
	})

	// Schema Change
	http.HandleFunc(apiPrefix+"schema/apply", func(w http.ResponseWriter, r *http.Request) {
		if err := acl.CheckAccessHTTP(r, acl.ADMIN); err != nil {
			httpErrorf(w, r, "Access denied")
			return
		}
		req := struct {
			Keyspace, SQL       string
			SlaveTimeoutSeconds int
		}{}
		if err := unmarshalRequest(r, &req); err != nil {
			httpErrorf(w, r, "can't unmarshal request: %v", err)
			return
		}
		if req.SlaveTimeoutSeconds <= 0 {
			req.SlaveTimeoutSeconds = 10
		}

		logger := logutil.NewCallbackLogger(func(ev *logutilpb.Event) {
			w.Write([]byte(logutil.EventString(ev)))
		})
		wr := wrangler.New(logger, ts, tmClient)

		executor := schemamanager.NewTabletExecutor(
			wr, time.Duration(req.SlaveTimeoutSeconds)*time.Second)

		schemamanager.Run(ctx,
			schemamanager.NewUIController(req.SQL, req.Keyspace, w), executor)
	})
}
开发者ID:erzel,项目名称:vitess,代码行数:101,代码来源:api.go

示例11: TestRebuildShardRace

func TestRebuildShardRace(t *testing.T) {
	cells := []string{"test_cell"}
	logger := logutil.NewMemoryLogger()
	timeout := 10 * time.Second
	interrupted := make(chan struct{})

	// Set up topology.
	ts := zktopo.NewTestServer(t, cells)
	f := faketopo.New(t, logger, ts, cells)
	defer f.TearDown()

	keyspace := faketopo.TestKeyspace
	shard := faketopo.TestShard
	master := f.AddTablet(1, "test_cell", topo.TYPE_MASTER, nil)
	f.AddTablet(2, "test_cell", topo.TYPE_REPLICA, master)

	// Do an initial rebuild.
	if _, err := RebuildShard(logger, f.Topo, keyspace, shard, cells, timeout, interrupted); err != nil {
		t.Fatalf("RebuildShard: %v", err)
	}

	// Check initial state.
	ep, err := ts.GetEndPoints(cells[0], keyspace, shard, topo.TYPE_MASTER)
	if err != nil {
		t.Fatalf("GetEndPoints: %v", err)
	}
	if got, want := len(ep.Entries), 1; got != want {
		t.Fatalf("len(Entries) = %v, want %v", got, want)
	}
	ep, err = ts.GetEndPoints(cells[0], keyspace, shard, topo.TYPE_REPLICA)
	if err != nil {
		t.Fatalf("GetEndPoints: %v", err)
	}
	if got, want := len(ep.Entries), 1; got != want {
		t.Fatalf("len(Entries) = %v, want %v", got, want)
	}

	// Install a hook that hands out locks out of order to simulate a race.
	trigger := make(chan struct{})
	stalled := make(chan struct{})
	done := make(chan struct{})
	wait := make(chan bool, 2)
	wait <- true  // first guy waits for trigger
	wait <- false // second guy doesn't wait
	ts.HookLockSrvShardForAction = func() {
		if <-wait {
			close(stalled)
			<-trigger
		}
	}

	// Make a change and start a rebuild that will stall when it tries to get
	// the SrvShard lock.
	masterInfo := f.GetTablet(1)
	masterInfo.Type = topo.TYPE_SPARE
	if err := topo.UpdateTablet(ts, masterInfo); err != nil {
		t.Fatalf("UpdateTablet: %v", err)
	}
	go func() {
		if _, err := RebuildShard(logger, f.Topo, keyspace, shard, cells, timeout, interrupted); err != nil {
			t.Fatalf("RebuildShard: %v", err)
		}
		close(done)
	}()

	// Wait for first rebuild to stall.
	<-stalled

	// While the first rebuild is stalled, make another change and start a rebuild
	// that doesn't stall.
	replicaInfo := f.GetTablet(2)
	replicaInfo.Type = topo.TYPE_SPARE
	if err := topo.UpdateTablet(ts, replicaInfo); err != nil {
		t.Fatalf("UpdateTablet: %v", err)
	}
	if _, err := RebuildShard(logger, f.Topo, keyspace, shard, cells, timeout, interrupted); err != nil {
		t.Fatalf("RebuildShard: %v", err)
	}

	// Now that the second rebuild is done, un-stall the first rebuild and wait
	// for it to finish.
	close(trigger)
	<-done

	// Check that the rebuild picked up both changes.
	if _, err := ts.GetEndPoints(cells[0], keyspace, shard, topo.TYPE_MASTER); err == nil || !strings.Contains(err.Error(), "node doesn't exist") {
		t.Errorf("first change wasn't picked up by second rebuild")
	}
	if _, err := ts.GetEndPoints(cells[0], keyspace, shard, topo.TYPE_REPLICA); err == nil || !strings.Contains(err.Error(), "node doesn't exist") {
		t.Errorf("second change was overwritten by first rebuild finishing late")
	}
}
开发者ID:plobsing,项目名称:vitess,代码行数:92,代码来源:rebuild_test.go

示例12: TestRebuildShardRace

func TestRebuildShardRace(t *testing.T) {
	ctx := context.Background()
	cells := []string{"test_cell"}
	logger := logutil.NewMemoryLogger()

	// Set up topology.
	ts := zktopo.NewTestServer(t, cells)
	si, err := GetOrCreateShard(ctx, ts, testKeyspace, testShard)
	if err != nil {
		t.Fatalf("GetOrCreateShard: %v", err)
	}
	si.Cells = append(si.Cells, cells[0])
	if err := topo.UpdateShard(ctx, ts, si); err != nil {
		t.Fatalf("UpdateShard: %v", err)
	}

	masterInfo := addTablet(ctx, t, ts, 1, cells[0], topo.TYPE_MASTER)
	replicaInfo := addTablet(ctx, t, ts, 2, cells[0], topo.TYPE_REPLICA)

	// Do an initial rebuild.
	if _, err := RebuildShard(ctx, logger, ts, testKeyspace, testShard, cells, time.Minute); err != nil {
		t.Fatalf("RebuildShard: %v", err)
	}

	// Check initial state.
	ep, _, err := ts.GetEndPoints(ctx, cells[0], testKeyspace, testShard, topo.TYPE_MASTER)
	if err != nil {
		t.Fatalf("GetEndPoints: %v", err)
	}
	if got, want := len(ep.Entries), 1; got != want {
		t.Fatalf("len(Entries) = %v, want %v", got, want)
	}
	ep, _, err = ts.GetEndPoints(ctx, cells[0], testKeyspace, testShard, topo.TYPE_REPLICA)
	if err != nil {
		t.Fatalf("GetEndPoints: %v", err)
	}
	if got, want := len(ep.Entries), 1; got != want {
		t.Fatalf("len(Entries) = %v, want %v", got, want)
	}

	// Install a hook that hands out locks out of order to simulate a race.
	trigger := make(chan struct{})
	stalled := make(chan struct{})
	done := make(chan struct{})
	wait := make(chan bool, 2)
	wait <- true  // first guy waits for trigger
	wait <- false // second guy doesn't wait
	ts.HookLockSrvShardForAction = func() {
		if <-wait {
			close(stalled)
			<-trigger
		}
	}

	// Make a change and start a rebuild that will stall when it
	// tries to get the SrvShard lock.
	masterInfo.Type = topo.TYPE_SPARE
	if err := topo.UpdateTablet(ctx, ts, masterInfo); err != nil {
		t.Fatalf("UpdateTablet: %v", err)
	}
	go func() {
		if _, err := RebuildShard(ctx, logger, ts, testKeyspace, testShard, cells, time.Minute); err != nil {
			t.Fatalf("RebuildShard: %v", err)
		}
		close(done)
	}()

	// Wait for first rebuild to stall.
	<-stalled

	// While the first rebuild is stalled, make another change and start a rebuild
	// that doesn't stall.
	replicaInfo.Type = topo.TYPE_SPARE
	if err := topo.UpdateTablet(ctx, ts, replicaInfo); err != nil {
		t.Fatalf("UpdateTablet: %v", err)
	}
	if _, err := RebuildShard(ctx, logger, ts, testKeyspace, testShard, cells, time.Minute); err != nil {
		t.Fatalf("RebuildShard: %v", err)
	}

	// Now that the second rebuild is done, un-stall the first rebuild and wait
	// for it to finish.
	close(trigger)
	<-done

	// Check that the rebuild picked up both changes.
	if _, _, err := ts.GetEndPoints(ctx, cells[0], testKeyspace, testShard, topo.TYPE_MASTER); err == nil || !strings.Contains(err.Error(), "node doesn't exist") {
		t.Errorf("first change wasn't picked up by second rebuild")
	}
	if _, _, err := ts.GetEndPoints(ctx, cells[0], testKeyspace, testShard, topo.TYPE_REPLICA); err == nil || !strings.Contains(err.Error(), "node doesn't exist") {
		t.Errorf("second change was overwritten by first rebuild finishing late")
	}
}
开发者ID:pranjal5215,项目名称:vitess,代码行数:93,代码来源:rebuild_test.go

示例13: Instantiate

// Instantiate is part of the workflow.Factory interface.
func (f *WorkflowFactory) Instantiate(w *workflowpb.Workflow) (workflow.Workflow, error) {
	return &Workflow{
		logger: logutil.NewMemoryLogger(),
	}, nil
}
开发者ID:dumbunny,项目名称:vitess,代码行数:6,代码来源:validator.go


注:本文中的github.com/youtube/vitess/go/vt/logutil.NewMemoryLogger函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。