Source file src/time/sleep_test.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package time_test
     6  
     7  import (
     8  	"errors"
     9  	"fmt"
    10  	"internal/testenv"
    11  	"math/rand"
    12  	"runtime"
    13  	"strings"
    14  	"sync"
    15  	"sync/atomic"
    16  	"testing"
    17  	. "time"
    18  )
    19  
    20  // Go runtime uses different Windows timers for time.Now and sleeping.
    21  // These can tick at different frequencies and can arrive out of sync.
    22  // The effect can be seen, for example, as time.Sleep(100ms) is actually
    23  // shorter then 100ms when measured as difference between time.Now before and
    24  // after time.Sleep call. This was observed on Windows XP SP3 (windows/386).
    25  // windowsInaccuracy is to ignore such errors.
    26  const windowsInaccuracy = 17 * Millisecond
    27  
    28  func TestSleep(t *testing.T) {
    29  	const delay = 100 * Millisecond
    30  	go func() {
    31  		Sleep(delay / 2)
    32  		Interrupt()
    33  	}()
    34  	start := Now()
    35  	Sleep(delay)
    36  	delayadj := delay
    37  	if runtime.GOOS == "windows" {
    38  		delayadj -= windowsInaccuracy
    39  	}
    40  	duration := Now().Sub(start)
    41  	if duration < delayadj {
    42  		t.Fatalf("Sleep(%s) slept for only %s", delay, duration)
    43  	}
    44  }
    45  
    46  // Test the basic function calling behavior. Correct queueing
    47  // behavior is tested elsewhere, since After and AfterFunc share
    48  // the same code.
    49  func TestAfterFunc(t *testing.T) {
    50  	i := 10
    51  	c := make(chan bool)
    52  	var f func()
    53  	f = func() {
    54  		i--
    55  		if i >= 0 {
    56  			AfterFunc(0, f)
    57  			Sleep(1 * Second)
    58  		} else {
    59  			c <- true
    60  		}
    61  	}
    62  
    63  	AfterFunc(0, f)
    64  	<-c
    65  }
    66  
    67  func TestAfterStress(t *testing.T) {
    68  	var stop atomic.Bool
    69  	go func() {
    70  		for !stop.Load() {
    71  			runtime.GC()
    72  			// Yield so that the OS can wake up the timer thread,
    73  			// so that it can generate channel sends for the main goroutine,
    74  			// which will eventually set stop = 1 for us.
    75  			Sleep(Nanosecond)
    76  		}
    77  	}()
    78  	ticker := NewTicker(1)
    79  	for i := 0; i < 100; i++ {
    80  		<-ticker.C
    81  	}
    82  	ticker.Stop()
    83  	stop.Store(true)
    84  }
    85  
    86  func benchmark(b *testing.B, bench func(n int)) {
    87  
    88  	// Create equal number of garbage timers on each P before starting
    89  	// the benchmark.
    90  	var wg sync.WaitGroup
    91  	garbageAll := make([][]*Timer, runtime.GOMAXPROCS(0))
    92  	for i := range garbageAll {
    93  		wg.Add(1)
    94  		go func(i int) {
    95  			defer wg.Done()
    96  			garbage := make([]*Timer, 1<<15)
    97  			for j := range garbage {
    98  				garbage[j] = AfterFunc(Hour, nil)
    99  			}
   100  			garbageAll[i] = garbage
   101  		}(i)
   102  	}
   103  	wg.Wait()
   104  
   105  	b.ResetTimer()
   106  	b.RunParallel(func(pb *testing.PB) {
   107  		for pb.Next() {
   108  			bench(1000)
   109  		}
   110  	})
   111  	b.StopTimer()
   112  
   113  	for _, garbage := range garbageAll {
   114  		for _, t := range garbage {
   115  			t.Stop()
   116  		}
   117  	}
   118  }
   119  
   120  func BenchmarkAfterFunc(b *testing.B) {
   121  	benchmark(b, func(n int) {
   122  		c := make(chan bool)
   123  		var f func()
   124  		f = func() {
   125  			n--
   126  			if n >= 0 {
   127  				AfterFunc(0, f)
   128  			} else {
   129  				c <- true
   130  			}
   131  		}
   132  
   133  		AfterFunc(0, f)
   134  		<-c
   135  	})
   136  }
   137  
   138  func BenchmarkAfter(b *testing.B) {
   139  	benchmark(b, func(n int) {
   140  		for i := 0; i < n; i++ {
   141  			<-After(1)
   142  		}
   143  	})
   144  }
   145  
   146  func BenchmarkStop(b *testing.B) {
   147  	benchmark(b, func(n int) {
   148  		for i := 0; i < n; i++ {
   149  			NewTimer(1 * Second).Stop()
   150  		}
   151  	})
   152  }
   153  
   154  func BenchmarkSimultaneousAfterFunc(b *testing.B) {
   155  	benchmark(b, func(n int) {
   156  		var wg sync.WaitGroup
   157  		wg.Add(n)
   158  		for i := 0; i < n; i++ {
   159  			AfterFunc(0, wg.Done)
   160  		}
   161  		wg.Wait()
   162  	})
   163  }
   164  
   165  func BenchmarkStartStop(b *testing.B) {
   166  	benchmark(b, func(n int) {
   167  		timers := make([]*Timer, n)
   168  		for i := 0; i < n; i++ {
   169  			timers[i] = AfterFunc(Hour, nil)
   170  		}
   171  
   172  		for i := 0; i < n; i++ {
   173  			timers[i].Stop()
   174  		}
   175  	})
   176  }
   177  
   178  func BenchmarkReset(b *testing.B) {
   179  	benchmark(b, func(n int) {
   180  		t := NewTimer(Hour)
   181  		for i := 0; i < n; i++ {
   182  			t.Reset(Hour)
   183  		}
   184  		t.Stop()
   185  	})
   186  }
   187  
   188  func BenchmarkSleep(b *testing.B) {
   189  	benchmark(b, func(n int) {
   190  		var wg sync.WaitGroup
   191  		wg.Add(n)
   192  		for i := 0; i < n; i++ {
   193  			go func() {
   194  				Sleep(Nanosecond)
   195  				wg.Done()
   196  			}()
   197  		}
   198  		wg.Wait()
   199  	})
   200  }
   201  
   202  func TestAfter(t *testing.T) {
   203  	const delay = 100 * Millisecond
   204  	start := Now()
   205  	end := <-After(delay)
   206  	delayadj := delay
   207  	if runtime.GOOS == "windows" {
   208  		delayadj -= windowsInaccuracy
   209  	}
   210  	if duration := Now().Sub(start); duration < delayadj {
   211  		t.Fatalf("After(%s) slept for only %d ns", delay, duration)
   212  	}
   213  	if min := start.Add(delayadj); end.Before(min) {
   214  		t.Fatalf("After(%s) expect >= %s, got %s", delay, min, end)
   215  	}
   216  }
   217  
   218  func TestAfterTick(t *testing.T) {
   219  	const Count = 10
   220  	Delta := 100 * Millisecond
   221  	if testing.Short() {
   222  		Delta = 10 * Millisecond
   223  	}
   224  	t0 := Now()
   225  	for i := 0; i < Count; i++ {
   226  		<-After(Delta)
   227  	}
   228  	t1 := Now()
   229  	d := t1.Sub(t0)
   230  	target := Delta * Count
   231  	if d < target*9/10 {
   232  		t.Fatalf("%d ticks of %s too fast: took %s, expected %s", Count, Delta, d, target)
   233  	}
   234  	if !testing.Short() && d > target*30/10 {
   235  		t.Fatalf("%d ticks of %s too slow: took %s, expected %s", Count, Delta, d, target)
   236  	}
   237  }
   238  
   239  func TestAfterStop(t *testing.T) {
   240  	// We want to test that we stop a timer before it runs.
   241  	// We also want to test that it didn't run after a longer timer.
   242  	// Since we don't want the test to run for too long, we don't
   243  	// want to use lengthy times. That makes the test inherently flaky.
   244  	// So only report an error if it fails five times in a row.
   245  
   246  	var errs []string
   247  	logErrs := func() {
   248  		for _, e := range errs {
   249  			t.Log(e)
   250  		}
   251  	}
   252  
   253  	for i := 0; i < 5; i++ {
   254  		AfterFunc(100*Millisecond, func() {})
   255  		t0 := NewTimer(50 * Millisecond)
   256  		c1 := make(chan bool, 1)
   257  		t1 := AfterFunc(150*Millisecond, func() { c1 <- true })
   258  		c2 := After(200 * Millisecond)
   259  		if !t0.Stop() {
   260  			errs = append(errs, "failed to stop event 0")
   261  			continue
   262  		}
   263  		if !t1.Stop() {
   264  			errs = append(errs, "failed to stop event 1")
   265  			continue
   266  		}
   267  		<-c2
   268  		select {
   269  		case <-t0.C:
   270  			errs = append(errs, "event 0 was not stopped")
   271  			continue
   272  		case <-c1:
   273  			errs = append(errs, "event 1 was not stopped")
   274  			continue
   275  		default:
   276  		}
   277  		if t1.Stop() {
   278  			errs = append(errs, "Stop returned true twice")
   279  			continue
   280  		}
   281  
   282  		// Test passed, so all done.
   283  		if len(errs) > 0 {
   284  			t.Logf("saw %d errors, ignoring to avoid flakiness", len(errs))
   285  			logErrs()
   286  		}
   287  
   288  		return
   289  	}
   290  
   291  	t.Errorf("saw %d errors", len(errs))
   292  	logErrs()
   293  }
   294  
   295  func TestAfterQueuing(t *testing.T) {
   296  	// This test flakes out on some systems,
   297  	// so we'll try it a few times before declaring it a failure.
   298  	const attempts = 5
   299  	err := errors.New("!=nil")
   300  	for i := 0; i < attempts && err != nil; i++ {
   301  		delta := Duration(20+i*50) * Millisecond
   302  		if err = testAfterQueuing(delta); err != nil {
   303  			t.Logf("attempt %v failed: %v", i, err)
   304  		}
   305  	}
   306  	if err != nil {
   307  		t.Fatal(err)
   308  	}
   309  }
   310  
   311  var slots = []int{5, 3, 6, 6, 6, 1, 1, 2, 7, 9, 4, 8, 0}
   312  
   313  type afterResult struct {
   314  	slot int
   315  	t    Time
   316  }
   317  
   318  func await(slot int, result chan<- afterResult, ac <-chan Time) {
   319  	result <- afterResult{slot, <-ac}
   320  }
   321  
   322  func testAfterQueuing(delta Duration) error {
   323  	// make the result channel buffered because we don't want
   324  	// to depend on channel queueing semantics that might
   325  	// possibly change in the future.
   326  	result := make(chan afterResult, len(slots))
   327  
   328  	t0 := Now()
   329  	for _, slot := range slots {
   330  		go await(slot, result, After(Duration(slot)*delta))
   331  	}
   332  	var order []int
   333  	var times []Time
   334  	for range slots {
   335  		r := <-result
   336  		order = append(order, r.slot)
   337  		times = append(times, r.t)
   338  	}
   339  	for i := range order {
   340  		if i > 0 && order[i] < order[i-1] {
   341  			return fmt.Errorf("After calls returned out of order: %v", order)
   342  		}
   343  	}
   344  	for i, t := range times {
   345  		dt := t.Sub(t0)
   346  		target := Duration(order[i]) * delta
   347  		if dt < target-delta/2 || dt > target+delta*10 {
   348  			return fmt.Errorf("After(%s) arrived at %s, expected [%s,%s]", target, dt, target-delta/2, target+delta*10)
   349  		}
   350  	}
   351  	return nil
   352  }
   353  
   354  func TestTimerStopStress(t *testing.T) {
   355  	if testing.Short() {
   356  		return
   357  	}
   358  	for i := 0; i < 100; i++ {
   359  		go func(i int) {
   360  			timer := AfterFunc(2*Second, func() {
   361  				t.Errorf("timer %d was not stopped", i)
   362  			})
   363  			Sleep(1 * Second)
   364  			timer.Stop()
   365  		}(i)
   366  	}
   367  	Sleep(3 * Second)
   368  }
   369  
   370  func TestSleepZeroDeadlock(t *testing.T) {
   371  	// Sleep(0) used to hang, the sequence of events was as follows.
   372  	// Sleep(0) sets G's status to Gwaiting, but then immediately returns leaving the status.
   373  	// Then the goroutine calls e.g. new and falls down into the scheduler due to pending GC.
   374  	// After the GC nobody wakes up the goroutine from Gwaiting status.
   375  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
   376  	c := make(chan bool)
   377  	go func() {
   378  		for i := 0; i < 100; i++ {
   379  			runtime.GC()
   380  		}
   381  		c <- true
   382  	}()
   383  	for i := 0; i < 100; i++ {
   384  		Sleep(0)
   385  		tmp := make(chan bool, 1)
   386  		tmp <- true
   387  		<-tmp
   388  	}
   389  	<-c
   390  }
   391  
   392  func testReset(d Duration) error {
   393  	t0 := NewTimer(2 * d)
   394  	Sleep(d)
   395  	if !t0.Reset(3 * d) {
   396  		return errors.New("resetting unfired timer returned false")
   397  	}
   398  	Sleep(2 * d)
   399  	select {
   400  	case <-t0.C:
   401  		return errors.New("timer fired early")
   402  	default:
   403  	}
   404  	Sleep(2 * d)
   405  	select {
   406  	case <-t0.C:
   407  	default:
   408  		return errors.New("reset timer did not fire")
   409  	}
   410  
   411  	if t0.Reset(50 * Millisecond) {
   412  		return errors.New("resetting expired timer returned true")
   413  	}
   414  	return nil
   415  }
   416  
   417  func TestReset(t *testing.T) {
   418  	// We try to run this test with increasingly larger multiples
   419  	// until one works so slow, loaded hardware isn't as flaky,
   420  	// but without slowing down fast machines unnecessarily.
   421  	//
   422  	// (maxDuration is several orders of magnitude longer than we
   423  	// expect this test to actually take on a fast, unloaded machine.)
   424  	d := 1 * Millisecond
   425  	const maxDuration = 10 * Second
   426  	for {
   427  		err := testReset(d)
   428  		if err == nil {
   429  			break
   430  		}
   431  		d *= 2
   432  		if d > maxDuration {
   433  			t.Error(err)
   434  		}
   435  		t.Logf("%v; trying duration %v", err, d)
   436  	}
   437  }
   438  
   439  // Test that sleeping (via Sleep or Timer) for an interval so large it
   440  // overflows does not result in a short sleep duration. Nor does it interfere
   441  // with execution of other timers. If it does, timers in this or subsequent
   442  // tests may not fire.
   443  func TestOverflowSleep(t *testing.T) {
   444  	const big = Duration(int64(1<<63 - 1))
   445  
   446  	go func() {
   447  		Sleep(big)
   448  		// On failure, this may return after the test has completed, so
   449  		// we need to panic instead.
   450  		panic("big sleep returned")
   451  	}()
   452  
   453  	select {
   454  	case <-After(big):
   455  		t.Fatalf("big timeout fired")
   456  	case <-After(25 * Millisecond):
   457  		// OK
   458  	}
   459  
   460  	const neg = Duration(-1 << 63)
   461  	Sleep(neg) // Returns immediately.
   462  	select {
   463  	case <-After(neg):
   464  		// OK
   465  	case <-After(1 * Second):
   466  		t.Fatalf("negative timeout didn't fire")
   467  	}
   468  }
   469  
   470  // Test that a panic while deleting a timer does not leave
   471  // the timers mutex held, deadlocking a ticker.Stop in a defer.
   472  func TestIssue5745(t *testing.T) {
   473  	ticker := NewTicker(Hour)
   474  	defer func() {
   475  		// would deadlock here before the fix due to
   476  		// lock taken before the segfault.
   477  		ticker.Stop()
   478  
   479  		if r := recover(); r == nil {
   480  			t.Error("Expected panic, but none happened.")
   481  		}
   482  	}()
   483  
   484  	// cause a panic due to a segfault
   485  	var timer *Timer
   486  	timer.Stop()
   487  	t.Error("Should be unreachable.")
   488  }
   489  
   490  func TestOverflowPeriodRuntimeTimer(t *testing.T) {
   491  	// This may hang forever if timers are broken. See comment near
   492  	// the end of CheckRuntimeTimerOverflow in internal_test.go.
   493  	CheckRuntimeTimerPeriodOverflow()
   494  }
   495  
   496  func checkZeroPanicString(t *testing.T) {
   497  	e := recover()
   498  	s, _ := e.(string)
   499  	if want := "called on uninitialized Timer"; !strings.Contains(s, want) {
   500  		t.Errorf("panic = %v; want substring %q", e, want)
   501  	}
   502  }
   503  
   504  func TestZeroTimerResetPanics(t *testing.T) {
   505  	defer checkZeroPanicString(t)
   506  	var tr Timer
   507  	tr.Reset(1)
   508  }
   509  
   510  func TestZeroTimerStopPanics(t *testing.T) {
   511  	defer checkZeroPanicString(t)
   512  	var tr Timer
   513  	tr.Stop()
   514  }
   515  
   516  // Test that zero duration timers aren't missed by the scheduler. Regression test for issue 44868.
   517  func TestZeroTimer(t *testing.T) {
   518  	if testing.Short() {
   519  		t.Skip("-short")
   520  	}
   521  
   522  	for i := 0; i < 1000000; i++ {
   523  		s := Now()
   524  		ti := NewTimer(0)
   525  		<-ti.C
   526  		if diff := Since(s); diff > 2*Second {
   527  			t.Errorf("Expected time to get value from Timer channel in less than 2 sec, took %v", diff)
   528  		}
   529  	}
   530  }
   531  
   532  // Test that rapidly moving a timer earlier doesn't cause it to get dropped.
   533  // Issue 47329.
   534  func TestTimerModifiedEarlier(t *testing.T) {
   535  	if runtime.GOOS == "plan9" && runtime.GOARCH == "arm" {
   536  		testenv.SkipFlaky(t, 50470)
   537  	}
   538  
   539  	past := Until(Unix(0, 0))
   540  	count := 1000
   541  	fail := 0
   542  	for i := 0; i < count; i++ {
   543  		timer := NewTimer(Hour)
   544  		for j := 0; j < 10; j++ {
   545  			if !timer.Stop() {
   546  				<-timer.C
   547  			}
   548  			timer.Reset(past)
   549  		}
   550  
   551  		deadline := NewTimer(10 * Second)
   552  		defer deadline.Stop()
   553  		now := Now()
   554  		select {
   555  		case <-timer.C:
   556  			if since := Since(now); since > 8*Second {
   557  				t.Errorf("timer took too long (%v)", since)
   558  				fail++
   559  			}
   560  		case <-deadline.C:
   561  			t.Error("deadline expired")
   562  		}
   563  	}
   564  
   565  	if fail > 0 {
   566  		t.Errorf("%d failures", fail)
   567  	}
   568  }
   569  
   570  // Test that rapidly moving timers earlier and later doesn't cause
   571  // some of the sleep times to be lost.
   572  // Issue 47762
   573  func TestAdjustTimers(t *testing.T) {
   574  	var rnd = rand.New(rand.NewSource(Now().UnixNano()))
   575  
   576  	timers := make([]*Timer, 100)
   577  	states := make([]int, len(timers))
   578  	indices := rnd.Perm(len(timers))
   579  
   580  	for len(indices) != 0 {
   581  		var ii = rnd.Intn(len(indices))
   582  		var i = indices[ii]
   583  
   584  		var timer = timers[i]
   585  		var state = states[i]
   586  		states[i]++
   587  
   588  		switch state {
   589  		case 0:
   590  			timers[i] = NewTimer(0)
   591  		case 1:
   592  			<-timer.C // Timer is now idle.
   593  
   594  		// Reset to various long durations, which we'll cancel.
   595  		case 2:
   596  			if timer.Reset(1 * Minute) {
   597  				panic("shouldn't be active (1)")
   598  			}
   599  		case 4:
   600  			if timer.Reset(3 * Minute) {
   601  				panic("shouldn't be active (3)")
   602  			}
   603  		case 6:
   604  			if timer.Reset(2 * Minute) {
   605  				panic("shouldn't be active (2)")
   606  			}
   607  
   608  		// Stop and drain a long-duration timer.
   609  		case 3, 5, 7:
   610  			if !timer.Stop() {
   611  				t.Logf("timer %d state %d Stop returned false", i, state)
   612  				<-timer.C
   613  			}
   614  
   615  		// Start a short-duration timer we expect to select without blocking.
   616  		case 8:
   617  			if timer.Reset(0) {
   618  				t.Fatal("timer.Reset returned true")
   619  			}
   620  		case 9:
   621  			now := Now()
   622  			<-timer.C
   623  			dur := Since(now)
   624  			if dur > 750*Millisecond {
   625  				t.Errorf("timer %d took %v to complete", i, dur)
   626  			}
   627  
   628  		// Timer is done. Swap with tail and remove.
   629  		case 10:
   630  			indices[ii] = indices[len(indices)-1]
   631  			indices = indices[:len(indices)-1]
   632  		}
   633  	}
   634  }
   635  
   636  // Benchmark timer latency when the thread that creates the timer is busy with
   637  // other work and the timers must be serviced by other threads.
   638  // https://golang.org/issue/38860
   639  func BenchmarkParallelTimerLatency(b *testing.B) {
   640  	gmp := runtime.GOMAXPROCS(0)
   641  	if gmp < 2 || runtime.NumCPU() < gmp {
   642  		b.Skip("skipping with GOMAXPROCS < 2 or NumCPU < GOMAXPROCS")
   643  	}
   644  
   645  	// allocate memory now to avoid GC interference later.
   646  	timerCount := gmp - 1
   647  	stats := make([]struct {
   648  		sum   float64
   649  		max   Duration
   650  		count int64
   651  		_     [5]int64 // cache line padding
   652  	}, timerCount)
   653  
   654  	// Ensure the time to start new threads to service timers will not pollute
   655  	// the results.
   656  	warmupScheduler(gmp)
   657  
   658  	// Note that other than the AfterFunc calls this benchmark is measuring it
   659  	// avoids using any other timers. In particular, the main goroutine uses
   660  	// doWork to spin for some durations because up through Go 1.15 if all
   661  	// threads are idle sysmon could leave deep sleep when we wake.
   662  
   663  	// Ensure sysmon is in deep sleep.
   664  	doWork(30 * Millisecond)
   665  
   666  	b.ResetTimer()
   667  
   668  	const delay = Millisecond
   669  	var wg sync.WaitGroup
   670  	var count int32
   671  	for i := 0; i < b.N; i++ {
   672  		wg.Add(timerCount)
   673  		atomic.StoreInt32(&count, 0)
   674  		for j := 0; j < timerCount; j++ {
   675  			j := j
   676  			expectedWakeup := Now().Add(delay)
   677  			AfterFunc(delay, func() {
   678  				late := Since(expectedWakeup)
   679  				if late < 0 {
   680  					late = 0
   681  				}
   682  				stats[j].count++
   683  				stats[j].sum += float64(late.Nanoseconds())
   684  				if late > stats[j].max {
   685  					stats[j].max = late
   686  				}
   687  				atomic.AddInt32(&count, 1)
   688  				for atomic.LoadInt32(&count) < int32(timerCount) {
   689  					// spin until all timers fired
   690  				}
   691  				wg.Done()
   692  			})
   693  		}
   694  
   695  		for atomic.LoadInt32(&count) < int32(timerCount) {
   696  			// spin until all timers fired
   697  		}
   698  		wg.Wait()
   699  
   700  		// Spin for a bit to let the other scheduler threads go idle before the
   701  		// next round.
   702  		doWork(Millisecond)
   703  	}
   704  	var total float64
   705  	var samples float64
   706  	max := Duration(0)
   707  	for _, s := range stats {
   708  		if s.max > max {
   709  			max = s.max
   710  		}
   711  		total += s.sum
   712  		samples += float64(s.count)
   713  	}
   714  	b.ReportMetric(0, "ns/op")
   715  	b.ReportMetric(total/samples, "avg-late-ns")
   716  	b.ReportMetric(float64(max.Nanoseconds()), "max-late-ns")
   717  }
   718  
   719  // Benchmark timer latency with staggered wakeup times and varying CPU bound
   720  // workloads. https://golang.org/issue/38860
   721  func BenchmarkStaggeredTickerLatency(b *testing.B) {
   722  	gmp := runtime.GOMAXPROCS(0)
   723  	if gmp < 2 || runtime.NumCPU() < gmp {
   724  		b.Skip("skipping with GOMAXPROCS < 2 or NumCPU < GOMAXPROCS")
   725  	}
   726  
   727  	const delay = 3 * Millisecond
   728  
   729  	for _, dur := range []Duration{300 * Microsecond, 2 * Millisecond} {
   730  		b.Run(fmt.Sprintf("work-dur=%s", dur), func(b *testing.B) {
   731  			for tickersPerP := 1; tickersPerP < int(delay/dur)+1; tickersPerP++ {
   732  				tickerCount := gmp * tickersPerP
   733  				b.Run(fmt.Sprintf("tickers-per-P=%d", tickersPerP), func(b *testing.B) {
   734  					// allocate memory now to avoid GC interference later.
   735  					stats := make([]struct {
   736  						sum   float64
   737  						max   Duration
   738  						count int64
   739  						_     [5]int64 // cache line padding
   740  					}, tickerCount)
   741  
   742  					// Ensure the time to start new threads to service timers
   743  					// will not pollute the results.
   744  					warmupScheduler(gmp)
   745  
   746  					b.ResetTimer()
   747  
   748  					var wg sync.WaitGroup
   749  					wg.Add(tickerCount)
   750  					for j := 0; j < tickerCount; j++ {
   751  						j := j
   752  						doWork(delay / Duration(gmp))
   753  						expectedWakeup := Now().Add(delay)
   754  						ticker := NewTicker(delay)
   755  						go func(c int, ticker *Ticker, firstWake Time) {
   756  							defer ticker.Stop()
   757  
   758  							for ; c > 0; c-- {
   759  								<-ticker.C
   760  								late := Since(expectedWakeup)
   761  								if late < 0 {
   762  									late = 0
   763  								}
   764  								stats[j].count++
   765  								stats[j].sum += float64(late.Nanoseconds())
   766  								if late > stats[j].max {
   767  									stats[j].max = late
   768  								}
   769  								expectedWakeup = expectedWakeup.Add(delay)
   770  								doWork(dur)
   771  							}
   772  							wg.Done()
   773  						}(b.N, ticker, expectedWakeup)
   774  					}
   775  					wg.Wait()
   776  
   777  					var total float64
   778  					var samples float64
   779  					max := Duration(0)
   780  					for _, s := range stats {
   781  						if s.max > max {
   782  							max = s.max
   783  						}
   784  						total += s.sum
   785  						samples += float64(s.count)
   786  					}
   787  					b.ReportMetric(0, "ns/op")
   788  					b.ReportMetric(total/samples, "avg-late-ns")
   789  					b.ReportMetric(float64(max.Nanoseconds()), "max-late-ns")
   790  				})
   791  			}
   792  		})
   793  	}
   794  }
   795  
   796  // warmupScheduler ensures the scheduler has at least targetThreadCount threads
   797  // in its thread pool.
   798  func warmupScheduler(targetThreadCount int) {
   799  	var wg sync.WaitGroup
   800  	var count int32
   801  	for i := 0; i < targetThreadCount; i++ {
   802  		wg.Add(1)
   803  		go func() {
   804  			atomic.AddInt32(&count, 1)
   805  			for atomic.LoadInt32(&count) < int32(targetThreadCount) {
   806  				// spin until all threads started
   807  			}
   808  
   809  			// spin a bit more to ensure they are all running on separate CPUs.
   810  			doWork(Millisecond)
   811  			wg.Done()
   812  		}()
   813  	}
   814  	wg.Wait()
   815  }
   816  
   817  func doWork(dur Duration) {
   818  	start := Now()
   819  	for Since(start) < dur {
   820  	}
   821  }
   822  

View as plain text