本文整理汇总了Golang中runtime.GOMAXPROCS函数的典型用法代码示例。如果您正苦于以下问题:Golang GOMAXPROCS函数的具体用法?Golang GOMAXPROCS怎么用?Golang GOMAXPROCS使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了GOMAXPROCS函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: BenchmarkWriterAwaitMany
func BenchmarkWriterAwaitMany(b *testing.B) {
defer time.Sleep(DisruptorCleanup)
runtime.GOMAXPROCS(2)
defer runtime.GOMAXPROCS(1)
controller := disruptor.
Configure(RingBufferSize).
WithConsumerGroup(SampleConsumer{}).
Build()
controller.Start()
defer controller.Stop()
writer := controller.Writer()
iterations := int64(b.N)
sequence := disruptor.InitialSequenceValue
b.ReportAllocs()
b.ResetTimer()
for sequence < iterations {
sequence += ReserveMany
writer.Await(sequence)
for i := sequence - ReserveManyDelta; i <= sequence; i++ {
ringBuffer[i&RingBufferMask] = i
}
writer.Commit(sequence, sequence)
}
b.StopTimer()
}
示例2: init
func init() {
if cpu := runtime.NumCPU(); cpu == 1 {
runtime.GOMAXPROCS(2)
} else {
runtime.GOMAXPROCS(cpu)
}
}
示例3: BenchmarkFirst16
func BenchmarkFirst16(b *testing.B) {
const n = 5000
g := runtime.GOMAXPROCS(0)
defer runtime.GOMAXPROCS(g)
o := &Options{noClone: true}
db, err := CreateTemp("_testdata", "temp", ".db", o)
if err != nil {
b.Fatal(err)
}
dbname := db.Name()
defer func(n string) {
db.Close()
os.Remove(n)
os.Remove(o._WAL)
}(dbname)
rng := fc()
for i := 0; i < n; i++ {
if err := db.Set(n2b(rng.Next()), n2b(rng.Next())); err != nil {
b.Fatal(err)
}
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
db.First()
}
b.StopTimer()
}
示例4: doConcurrentTest
func doConcurrentTest(c *test.C, ct func()) {
maxProcs, numReqs := 1, 150
if testing.Short() {
maxProcs, numReqs = 4, 50
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(maxProcs))
var wg sync.WaitGroup
wg.Add(numReqs)
reqs := make(chan bool)
defer close(reqs)
for i := 0; i < maxProcs*2; i++ {
go func() {
for _ = range reqs {
ct()
if c.Failed() {
wg.Done()
continue
}
wg.Done()
}
}()
}
for i := 0; i < numReqs; i++ {
reqs <- true
}
wg.Wait()
}
示例5: TestHammer32
func TestHammer32(t *testing.T) {
const p = 4
n := 100000
if testing.Short() {
n = 1000
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p))
for name, testf := range hammer32 {
c := make(chan int)
var val uint32
for i := 0; i < p; i++ {
go func() {
defer func() {
if err := recover(); err != nil {
t.Error(err.(string))
}
c <- 1
}()
testf(&val, n)
}()
}
for i := 0; i < p; i++ {
<-c
}
if !strings.HasPrefix(name, "Swap") && val != uint32(n)*p {
t.Fatalf("%s: val=%d want %d", name, val, n*p)
}
}
}
示例6: runHiveTest
func runHiveTest(t *testing.T, opts ...HiveOption) {
runtime.GOMAXPROCS(4)
defer runtime.GOMAXPROCS(1)
testHiveCh = make(chan interface{})
defer func() {
close(testHiveCh)
testHiveCh = nil
}()
hive := newHiveForTest(opts...)
app := hive.NewApp("TestHiveApp")
app.Handle(MyMsg(0), &testHiveHandler{})
go hive.Start()
for i := 1; i <= msgs; i++ {
hive.Emit(MyMsg(i))
}
for i := 0; i < handlers; i++ {
<-testHiveCh
}
if err := hive.Stop(); err != nil {
t.Errorf("cannot stop the hive %v", err)
}
}
示例7: main
func main() {
var port = flag.Int("p", 0, "port to listen on")
flag.Parse()
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGINT, syscall.SIGTERM)
go func() {
for range c {
os.Exit(0)
}
}()
numThreads = runtime.NumCPU()
runtime.GOMAXPROCS(numThreads)
ln, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", *port))
if err != nil {
log.Printf("failed to listen on port %d: %v", port, err)
return
}
for {
conn, err := ln.Accept()
if err != nil {
log.Println("failed to accept connection:", err)
continue
}
nc := atomic.AddInt32(&numConns, 1)
if int(nc) >= numThreads {
numThreads *= 2
runtime.GOMAXPROCS(numThreads)
}
go handleConnection(conn)
}
}
示例8: TestRunOnNodesStress
func TestRunOnNodesStress(t *testing.T) {
n := 1000
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(16))
body := `{"Id":"e90302","Path":"date","Args":[]}`
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(body))
}))
defer server.Close()
id := "e90302"
cluster, err := New(nil, &MapStorage{}, Node{Address: server.URL})
if err != nil {
t.Fatal(err)
}
for i := 0; i < rand.Intn(10)+n; i++ {
result, err := cluster.runOnNodes(func(n node) (interface{}, error) {
return n.InspectContainer(id)
}, &docker.NoSuchContainer{ID: id}, false)
if err != nil {
t.Fatal(err)
}
container := result.(*docker.Container)
if container.ID != id {
t.Errorf("InspectContainer(%q): Wrong ID. Want %q. Got %q.", id, id, container.ID)
}
if container.Path != "date" {
t.Errorf("InspectContainer(%q): Wrong Path. Want %q. Got %q.", id, "date", container.Path)
}
}
}
示例9: TestClusterHandleNodeSuccessStressShouldntBlockNodes
func TestClusterHandleNodeSuccessStressShouldntBlockNodes(t *testing.T) {
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(10))
c, err := New(&roundRobin{}, &MapStorage{})
if err != nil {
t.Fatal(err)
}
_, err = c.Register("addr-1", nil)
if err != nil {
t.Fatal(err)
}
for i := 0; i < 100; i++ {
go func() {
err := c.handleNodeSuccess("addr-1")
if err != nil && err != errHealerInProgress {
t.Fatal(err)
}
}()
go func() {
nodes, err := c.Nodes()
if err != nil {
t.Fatal(err)
}
if len(nodes) != 1 {
t.Fatalf("Expected nodes len to be 1, got %d", len(nodes))
}
}()
}
}
示例10: TestAddRouteAndRemoteRouteAreSafe
func (s *S) TestAddRouteAndRemoteRouteAreSafe(c *check.C) {
var wg sync.WaitGroup
fake := fakeRouter{backends: make(map[string][]string)}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
for i := 1; i < 256; i++ {
wg.Add(5)
name := fmt.Sprintf("route-%d", i)
addr, _ := url.Parse(fmt.Sprintf("http://10.10.10.%d", i))
go func() {
fake.AddBackend(name)
wg.Done()
}()
go func() {
fake.AddRoute(name, addr)
wg.Done()
}()
go func() {
fake.RemoveRoute(name, addr)
wg.Done()
}()
go func() {
fake.HasRoute(name, addr.String())
wg.Done()
}()
go func() {
fake.RemoveBackend(name)
wg.Done()
}()
}
wg.Wait()
}
示例11: TestSetCPU
func TestSetCPU(t *testing.T) {
currentCPU := runtime.GOMAXPROCS(-1)
maxCPU := runtime.NumCPU()
for i, test := range []struct {
input string
output int
shouldErr bool
}{
{"1", 1, false},
{"-1", currentCPU, true},
{"0", currentCPU, true},
{"100%", maxCPU, false},
{"50%", int(0.5 * float32(maxCPU)), false},
{"110%", currentCPU, true},
{"-10%", currentCPU, true},
{"invalid input", currentCPU, true},
{"invalid input%", currentCPU, true},
{"9999", maxCPU, false}, // over available CPU
} {
err := setCPU(test.input)
if test.shouldErr && err == nil {
t.Errorf("Test %d: Expected error, but there wasn't any", i)
}
if !test.shouldErr && err != nil {
t.Errorf("Test %d: Expected no error, but there was one: %v", i, err)
}
if actual, expected := runtime.GOMAXPROCS(-1), test.output; actual != expected {
t.Errorf("Test %d: GOMAXPROCS was %d but expected %d", i, actual, expected)
}
// teardown
runtime.GOMAXPROCS(currentCPU)
}
}
示例12: TestGoroutineParallelism
func TestGoroutineParallelism(t *testing.T) {
P := 4
N := 10
if testing.Short() {
P = 3
N = 3
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
// If runtime triggers a forced GC during this test then it will deadlock,
// since the goroutines can't be stopped/preempted.
// Disable GC for this test (see issue #10958).
defer debug.SetGCPercent(debug.SetGCPercent(-1))
for try := 0; try < N; try++ {
done := make(chan bool)
x := uint32(0)
for p := 0; p < P; p++ {
// Test that all P goroutines are scheduled at the same time
go func(p int) {
for i := 0; i < 3; i++ {
expected := uint32(P*i + p)
for atomic.LoadUint32(&x) != expected {
}
atomic.StoreUint32(&x, expected+1)
}
done <- true
}(p)
}
for p := 0; p < P; p++ {
<-done
}
}
}
示例13: testStorageLockNodeHealingAfterTimeout
func testStorageLockNodeHealingAfterTimeout(storage cluster.Storage, t *testing.T) {
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(100))
node := cluster.Node{Address: "addr-xyz"}
defer storage.RemoveNode("addr-xyz")
err := storage.StoreNode(node)
assertIsNil(err, t)
locked, err := storage.LockNodeForHealing("addr-xyz", true, 200*time.Millisecond)
assertIsNil(err, t)
locked, err = storage.LockNodeForHealing("addr-xyz", true, 200*time.Millisecond)
assertIsNil(err, t)
if locked {
t.Fatal("Expected LockNodeForHealing to return false before timeout")
}
time.Sleep(300 * time.Millisecond)
successCount := int32(0)
wg := sync.WaitGroup{}
wg.Add(50)
for i := 0; i < 50; i++ {
go func() {
defer wg.Done()
locked, err := storage.LockNodeForHealing("addr-xyz", true, 5*time.Second)
assertIsNil(err, t)
if locked {
atomic.AddInt32(&successCount, 1)
}
}()
}
wg.Wait()
if successCount != 1 {
t.Fatalf("Expected LockNodeForHealing after timeout to lock only once, got: %d", successCount)
}
}
示例14: Primes
func Primes(limit uint) *bs.BitSlice {
length := int(limit / 8)
if limit%8 > 0 {
length++
}
list := make([]byte, uint(length))
// Initialize for values 2, 3, and 5 already run.
// Avoids the nasty small loops
// 0123456789 10 11 12 13 14 15 16 17...
// 0011010100 0 1 0 1 0 0 0 1
list[0] = 0x35 // Special case, 2 is prime
for i := 1; i < length; i += 3 {
list[i] = 0x14
}
for i := 2; i < length; i += 3 {
list[i] = 0x51
}
for i := 3; i < length; i += 3 {
list[i] = 0x45
}
primes := bs.New(limit)
primes.Arr = list
runtime.GOMAXPROCS(MAX_CONCURRENT)
generate(primes, limit)
runtime.GOMAXPROCS(1)
return primes
}
示例15: BenchmarkSharedWriterReserveOne
func BenchmarkSharedWriterReserveOne(b *testing.B) {
defer time.Sleep(DisruptorCleanup)
runtime.GOMAXPROCS(2)
defer runtime.GOMAXPROCS(1)
controller := disruptor.
Configure(RingBufferSize).
WithConsumerGroup(SampleConsumer{}).
BuildShared()
controller.Start()
defer controller.Stop()
writer := controller.Writer()
iterations := int64(b.N)
sequence := disruptor.InitialSequenceValue
b.ReportAllocs()
b.ResetTimer()
for sequence < iterations {
sequence = writer.Reserve(ReserveOne)
ringBuffer[sequence&RingBufferMask] = sequence
writer.Commit(sequence, sequence)
}
b.StopTimer()
}