Black Lives Matter. Support the Equal Justice Initiative.

Source file src/testing/benchmark.go

Documentation: testing

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package testing
     6  
     7  import (
     8  	"flag"
     9  	"fmt"
    10  	"internal/race"
    11  	"io"
    12  	"math"
    13  	"os"
    14  	"runtime"
    15  	"sort"
    16  	"strconv"
    17  	"strings"
    18  	"sync"
    19  	"sync/atomic"
    20  	"time"
    21  	"unicode"
    22  )
    23  
    24  func initBenchmarkFlags() {
    25  	matchBenchmarks = flag.String("test.bench", "", "run only benchmarks matching `regexp`")
    26  	benchmarkMemory = flag.Bool("test.benchmem", false, "print memory allocations for benchmarks")
    27  	flag.Var(&benchTime, "test.benchtime", "run each benchmark for duration `d`")
    28  }
    29  
    30  var (
    31  	matchBenchmarks *string
    32  	benchmarkMemory *bool
    33  
    34  	benchTime = benchTimeFlag{d: 1 * time.Second} // changed during test of testing package
    35  )
    36  
    37  type benchTimeFlag struct {
    38  	d time.Duration
    39  	n int
    40  }
    41  
    42  func (f *benchTimeFlag) String() string {
    43  	if f.n > 0 {
    44  		return fmt.Sprintf("%dx", f.n)
    45  	}
    46  	return time.Duration(f.d).String()
    47  }
    48  
    49  func (f *benchTimeFlag) Set(s string) error {
    50  	if strings.HasSuffix(s, "x") {
    51  		n, err := strconv.ParseInt(s[:len(s)-1], 10, 0)
    52  		if err != nil || n <= 0 {
    53  			return fmt.Errorf("invalid count")
    54  		}
    55  		*f = benchTimeFlag{n: int(n)}
    56  		return nil
    57  	}
    58  	d, err := time.ParseDuration(s)
    59  	if err != nil || d <= 0 {
    60  		return fmt.Errorf("invalid duration")
    61  	}
    62  	*f = benchTimeFlag{d: d}
    63  	return nil
    64  }
    65  
    66  // Global lock to ensure only one benchmark runs at a time.
    67  var benchmarkLock sync.Mutex
    68  
    69  // Used for every benchmark for measuring memory.
    70  var memStats runtime.MemStats
    71  
    72  // InternalBenchmark is an internal type but exported because it is cross-package;
    73  // it is part of the implementation of the "go test" command.
    74  type InternalBenchmark struct {
    75  	Name string
    76  	F    func(b *B)
    77  }
    78  
    79  // B is a type passed to Benchmark functions to manage benchmark
    80  // timing and to specify the number of iterations to run.
    81  //
    82  // A benchmark ends when its Benchmark function returns or calls any of the methods
    83  // FailNow, Fatal, Fatalf, SkipNow, Skip, or Skipf. Those methods must be called
    84  // only from the goroutine running the Benchmark function.
    85  // The other reporting methods, such as the variations of Log and Error,
    86  // may be called simultaneously from multiple goroutines.
    87  //
    88  // Like in tests, benchmark logs are accumulated during execution
    89  // and dumped to standard output when done. Unlike in tests, benchmark logs
    90  // are always printed, so as not to hide output whose existence may be
    91  // affecting benchmark results.
    92  type B struct {
    93  	common
    94  	importPath       string // import path of the package containing the benchmark
    95  	context          *benchContext
    96  	N                int
    97  	previousN        int           // number of iterations in the previous run
    98  	previousDuration time.Duration // total duration of the previous run
    99  	benchFunc        func(b *B)
   100  	benchTime        benchTimeFlag
   101  	bytes            int64
   102  	missingBytes     bool // one of the subbenchmarks does not have bytes set.
   103  	timerOn          bool
   104  	showAllocResult  bool
   105  	result           BenchmarkResult
   106  	parallelism      int // RunParallel creates parallelism*GOMAXPROCS goroutines
   107  	// The initial states of memStats.Mallocs and memStats.TotalAlloc.
   108  	startAllocs uint64
   109  	startBytes  uint64
   110  	// The net total of this test after being run.
   111  	netAllocs uint64
   112  	netBytes  uint64
   113  	// Extra metrics collected by ReportMetric.
   114  	extra map[string]float64
   115  }
   116  
   117  // StartTimer starts timing a test. This function is called automatically
   118  // before a benchmark starts, but it can also be used to resume timing after
   119  // a call to StopTimer.
   120  func (b *B) StartTimer() {
   121  	if !b.timerOn {
   122  		runtime.ReadMemStats(&memStats)
   123  		b.startAllocs = memStats.Mallocs
   124  		b.startBytes = memStats.TotalAlloc
   125  		b.start = time.Now()
   126  		b.timerOn = true
   127  	}
   128  }
   129  
   130  // StopTimer stops timing a test. This can be used to pause the timer
   131  // while performing complex initialization that you don't
   132  // want to measure.
   133  func (b *B) StopTimer() {
   134  	if b.timerOn {
   135  		b.duration += time.Since(b.start)
   136  		runtime.ReadMemStats(&memStats)
   137  		b.netAllocs += memStats.Mallocs - b.startAllocs
   138  		b.netBytes += memStats.TotalAlloc - b.startBytes
   139  		b.timerOn = false
   140  	}
   141  }
   142  
   143  // ResetTimer zeroes the elapsed benchmark time and memory allocation counters
   144  // and deletes user-reported metrics.
   145  // It does not affect whether the timer is running.
   146  func (b *B) ResetTimer() {
   147  	if b.extra == nil {
   148  		// Allocate the extra map before reading memory stats.
   149  		// Pre-size it to make more allocation unlikely.
   150  		b.extra = make(map[string]float64, 16)
   151  	} else {
   152  		for k := range b.extra {
   153  			delete(b.extra, k)
   154  		}
   155  	}
   156  	if b.timerOn {
   157  		runtime.ReadMemStats(&memStats)
   158  		b.startAllocs = memStats.Mallocs
   159  		b.startBytes = memStats.TotalAlloc
   160  		b.start = time.Now()
   161  	}
   162  	b.duration = 0
   163  	b.netAllocs = 0
   164  	b.netBytes = 0
   165  }
   166  
   167  // SetBytes records the number of bytes processed in a single operation.
   168  // If this is called, the benchmark will report ns/op and MB/s.
   169  func (b *B) SetBytes(n int64) { b.bytes = n }
   170  
   171  // ReportAllocs enables malloc statistics for this benchmark.
   172  // It is equivalent to setting -test.benchmem, but it only affects the
   173  // benchmark function that calls ReportAllocs.
   174  func (b *B) ReportAllocs() {
   175  	b.showAllocResult = true
   176  }
   177  
   178  // runN runs a single benchmark for the specified number of iterations.
   179  func (b *B) runN(n int) {
   180  	benchmarkLock.Lock()
   181  	defer benchmarkLock.Unlock()
   182  	defer b.runCleanup(normalPanic)
   183  	// Try to get a comparable environment for each run
   184  	// by clearing garbage from previous runs.
   185  	runtime.GC()
   186  	b.raceErrors = -race.Errors()
   187  	b.N = n
   188  	b.parallelism = 1
   189  	b.ResetTimer()
   190  	b.StartTimer()
   191  	b.benchFunc(b)
   192  	b.StopTimer()
   193  	b.previousN = n
   194  	b.previousDuration = b.duration
   195  	b.raceErrors += race.Errors()
   196  	if b.raceErrors > 0 {
   197  		b.Errorf("race detected during execution of benchmark")
   198  	}
   199  }
   200  
   201  func min(x, y int64) int64 {
   202  	if x > y {
   203  		return y
   204  	}
   205  	return x
   206  }
   207  
   208  func max(x, y int64) int64 {
   209  	if x < y {
   210  		return y
   211  	}
   212  	return x
   213  }
   214  
   215  // run1 runs the first iteration of benchFunc. It reports whether more
   216  // iterations of this benchmarks should be run.
   217  func (b *B) run1() bool {
   218  	if ctx := b.context; ctx != nil {
   219  		// Extend maxLen, if needed.
   220  		if n := len(b.name) + ctx.extLen + 1; n > ctx.maxLen {
   221  			ctx.maxLen = n + 8 // Add additional slack to avoid too many jumps in size.
   222  		}
   223  	}
   224  	go func() {
   225  		// Signal that we're done whether we return normally
   226  		// or by FailNow's runtime.Goexit.
   227  		defer func() {
   228  			b.signal <- true
   229  		}()
   230  
   231  		b.runN(1)
   232  	}()
   233  	<-b.signal
   234  	if b.failed {
   235  		fmt.Fprintf(b.w, "--- FAIL: %s\n%s", b.name, b.output)
   236  		return false
   237  	}
   238  	// Only print the output if we know we are not going to proceed.
   239  	// Otherwise it is printed in processBench.
   240  	if atomic.LoadInt32(&b.hasSub) != 0 || b.finished {
   241  		tag := "BENCH"
   242  		if b.skipped {
   243  			tag = "SKIP"
   244  		}
   245  		if b.chatty && (len(b.output) > 0 || b.finished) {
   246  			b.trimOutput()
   247  			fmt.Fprintf(b.w, "--- %s: %s\n%s", tag, b.name, b.output)
   248  		}
   249  		return false
   250  	}
   251  	return true
   252  }
   253  
   254  var labelsOnce sync.Once
   255  
   256  // run executes the benchmark in a separate goroutine, including all of its
   257  // subbenchmarks. b must not have subbenchmarks.
   258  func (b *B) run() {
   259  	labelsOnce.Do(func() {
   260  		fmt.Fprintf(b.w, "goos: %s\n", runtime.GOOS)
   261  		fmt.Fprintf(b.w, "goarch: %s\n", runtime.GOARCH)
   262  		if b.importPath != "" {
   263  			fmt.Fprintf(b.w, "pkg: %s\n", b.importPath)
   264  		}
   265  	})
   266  	if b.context != nil {
   267  		// Running go test --test.bench
   268  		b.context.processBench(b) // Must call doBench.
   269  	} else {
   270  		// Running func Benchmark.
   271  		b.doBench()
   272  	}
   273  }
   274  
   275  func (b *B) doBench() BenchmarkResult {
   276  	go b.launch()
   277  	<-b.signal
   278  	return b.result
   279  }
   280  
   281  // launch launches the benchmark function. It gradually increases the number
   282  // of benchmark iterations until the benchmark runs for the requested benchtime.
   283  // launch is run by the doBench function as a separate goroutine.
   284  // run1 must have been called on b.
   285  func (b *B) launch() {
   286  	// Signal that we're done whether we return normally
   287  	// or by FailNow's runtime.Goexit.
   288  	defer func() {
   289  		b.signal <- true
   290  	}()
   291  
   292  	// Run the benchmark for at least the specified amount of time.
   293  	if b.benchTime.n > 0 {
   294  		b.runN(b.benchTime.n)
   295  	} else {
   296  		d := b.benchTime.d
   297  		for n := int64(1); !b.failed && b.duration < d && n < 1e9; {
   298  			last := n
   299  			// Predict required iterations.
   300  			goalns := d.Nanoseconds()
   301  			prevIters := int64(b.N)
   302  			prevns := b.duration.Nanoseconds()
   303  			if prevns <= 0 {
   304  				// Round up, to avoid div by zero.
   305  				prevns = 1
   306  			}
   307  			// Order of operations matters.
   308  			// For very fast benchmarks, prevIters ~= prevns.
   309  			// If you divide first, you get 0 or 1,
   310  			// which can hide an order of magnitude in execution time.
   311  			// So multiply first, then divide.
   312  			n = goalns * prevIters / prevns
   313  			// Run more iterations than we think we'll need (1.2x).
   314  			n += n / 5
   315  			// Don't grow too fast in case we had timing errors previously.
   316  			n = min(n, 100*last)
   317  			// Be sure to run at least one more than last time.
   318  			n = max(n, last+1)
   319  			// Don't run more than 1e9 times. (This also keeps n in int range on 32 bit platforms.)
   320  			n = min(n, 1e9)
   321  			b.runN(int(n))
   322  		}
   323  	}
   324  	b.result = BenchmarkResult{b.N, b.duration, b.bytes, b.netAllocs, b.netBytes, b.extra}
   325  }
   326  
   327  // ReportMetric adds "n unit" to the reported benchmark results.
   328  // If the metric is per-iteration, the caller should divide by b.N,
   329  // and by convention units should end in "/op".
   330  // ReportMetric overrides any previously reported value for the same unit.
   331  // ReportMetric panics if unit is the empty string or if unit contains
   332  // any whitespace.
   333  // If unit is a unit normally reported by the benchmark framework itself
   334  // (such as "allocs/op"), ReportMetric will override that metric.
   335  // Setting "ns/op" to 0 will suppress that built-in metric.
   336  func (b *B) ReportMetric(n float64, unit string) {
   337  	if unit == "" {
   338  		panic("metric unit must not be empty")
   339  	}
   340  	if strings.IndexFunc(unit, unicode.IsSpace) >= 0 {
   341  		panic("metric unit must not contain whitespace")
   342  	}
   343  	b.extra[unit] = n
   344  }
   345  
   346  // BenchmarkResult contains the results of a benchmark run.
   347  type BenchmarkResult struct {
   348  	N         int           // The number of iterations.
   349  	T         time.Duration // The total time taken.
   350  	Bytes     int64         // Bytes processed in one iteration.
   351  	MemAllocs uint64        // The total number of memory allocations.
   352  	MemBytes  uint64        // The total number of bytes allocated.
   353  
   354  	// Extra records additional metrics reported by ReportMetric.
   355  	Extra map[string]float64
   356  }
   357  
   358  // NsPerOp returns the "ns/op" metric.
   359  func (r BenchmarkResult) NsPerOp() int64 {
   360  	if v, ok := r.Extra["ns/op"]; ok {
   361  		return int64(v)
   362  	}
   363  	if r.N <= 0 {
   364  		return 0
   365  	}
   366  	return r.T.Nanoseconds() / int64(r.N)
   367  }
   368  
   369  // mbPerSec returns the "MB/s" metric.
   370  func (r BenchmarkResult) mbPerSec() float64 {
   371  	if v, ok := r.Extra["MB/s"]; ok {
   372  		return v
   373  	}
   374  	if r.Bytes <= 0 || r.T <= 0 || r.N <= 0 {
   375  		return 0
   376  	}
   377  	return (float64(r.Bytes) * float64(r.N) / 1e6) / r.T.Seconds()
   378  }
   379  
   380  // AllocsPerOp returns the "allocs/op" metric,
   381  // which is calculated as r.MemAllocs / r.N.
   382  func (r BenchmarkResult) AllocsPerOp() int64 {
   383  	if v, ok := r.Extra["allocs/op"]; ok {
   384  		return int64(v)
   385  	}
   386  	if r.N <= 0 {
   387  		return 0
   388  	}
   389  	return int64(r.MemAllocs) / int64(r.N)
   390  }
   391  
   392  // AllocedBytesPerOp returns the "B/op" metric,
   393  // which is calculated as r.MemBytes / r.N.
   394  func (r BenchmarkResult) AllocedBytesPerOp() int64 {
   395  	if v, ok := r.Extra["B/op"]; ok {
   396  		return int64(v)
   397  	}
   398  	if r.N <= 0 {
   399  		return 0
   400  	}
   401  	return int64(r.MemBytes) / int64(r.N)
   402  }
   403  
   404  // String returns a summary of the benchmark results.
   405  // It follows the benchmark result line format from
   406  // https://golang.org/design/14313-benchmark-format, not including the
   407  // benchmark name.
   408  // Extra metrics override built-in metrics of the same name.
   409  // String does not include allocs/op or B/op, since those are reported
   410  // by MemString.
   411  func (r BenchmarkResult) String() string {
   412  	buf := new(strings.Builder)
   413  	fmt.Fprintf(buf, "%8d", r.N)
   414  
   415  	// Get ns/op as a float.
   416  	ns, ok := r.Extra["ns/op"]
   417  	if !ok {
   418  		ns = float64(r.T.Nanoseconds()) / float64(r.N)
   419  	}
   420  	if ns != 0 {
   421  		buf.WriteByte('\t')
   422  		prettyPrint(buf, ns, "ns/op")
   423  	}
   424  
   425  	if mbs := r.mbPerSec(); mbs != 0 {
   426  		fmt.Fprintf(buf, "\t%7.2f MB/s", mbs)
   427  	}
   428  
   429  	// Print extra metrics that aren't represented in the standard
   430  	// metrics.
   431  	var extraKeys []string
   432  	for k := range r.Extra {
   433  		switch k {
   434  		case "ns/op", "MB/s", "B/op", "allocs/op":
   435  			// Built-in metrics reported elsewhere.
   436  			continue
   437  		}
   438  		extraKeys = append(extraKeys, k)
   439  	}
   440  	sort.Strings(extraKeys)
   441  	for _, k := range extraKeys {
   442  		buf.WriteByte('\t')
   443  		prettyPrint(buf, r.Extra[k], k)
   444  	}
   445  	return buf.String()
   446  }
   447  
   448  func prettyPrint(w io.Writer, x float64, unit string) {
   449  	// Print all numbers with 10 places before the decimal point
   450  	// and small numbers with three sig figs.
   451  	var format string
   452  	switch y := math.Abs(x); {
   453  	case y == 0 || y >= 99.95:
   454  		format = "%10.0f %s"
   455  	case y >= 9.995:
   456  		format = "%12.1f %s"
   457  	case y >= 0.9995:
   458  		format = "%13.2f %s"
   459  	case y >= 0.09995:
   460  		format = "%14.3f %s"
   461  	case y >= 0.009995:
   462  		format = "%15.4f %s"
   463  	case y >= 0.0009995:
   464  		format = "%16.5f %s"
   465  	default:
   466  		format = "%17.6f %s"
   467  	}
   468  	fmt.Fprintf(w, format, x, unit)
   469  }
   470  
   471  // MemString returns r.AllocedBytesPerOp and r.AllocsPerOp in the same format as 'go test'.
   472  func (r BenchmarkResult) MemString() string {
   473  	return fmt.Sprintf("%8d B/op\t%8d allocs/op",
   474  		r.AllocedBytesPerOp(), r.AllocsPerOp())
   475  }
   476  
   477  // benchmarkName returns full name of benchmark including procs suffix.
   478  func benchmarkName(name string, n int) string {
   479  	if n != 1 {
   480  		return fmt.Sprintf("%s-%d", name, n)
   481  	}
   482  	return name
   483  }
   484  
   485  type benchContext struct {
   486  	match *matcher
   487  
   488  	maxLen int // The largest recorded benchmark name.
   489  	extLen int // Maximum extension length.
   490  }
   491  
   492  // RunBenchmarks is an internal function but exported because it is cross-package;
   493  // it is part of the implementation of the "go test" command.
   494  func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) {
   495  	runBenchmarks("", matchString, benchmarks)
   496  }
   497  
   498  func runBenchmarks(importPath string, matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) bool {
   499  	// If no flag was specified, don't run benchmarks.
   500  	if len(*matchBenchmarks) == 0 {
   501  		return true
   502  	}
   503  	// Collect matching benchmarks and determine longest name.
   504  	maxprocs := 1
   505  	for _, procs := range cpuList {
   506  		if procs > maxprocs {
   507  			maxprocs = procs
   508  		}
   509  	}
   510  	ctx := &benchContext{
   511  		match:  newMatcher(matchString, *matchBenchmarks, "-test.bench"),
   512  		extLen: len(benchmarkName("", maxprocs)),
   513  	}
   514  	var bs []InternalBenchmark
   515  	for _, Benchmark := range benchmarks {
   516  		if _, matched, _ := ctx.match.fullName(nil, Benchmark.Name); matched {
   517  			bs = append(bs, Benchmark)
   518  			benchName := benchmarkName(Benchmark.Name, maxprocs)
   519  			if l := len(benchName) + ctx.extLen + 1; l > ctx.maxLen {
   520  				ctx.maxLen = l
   521  			}
   522  		}
   523  	}
   524  	main := &B{
   525  		common: common{
   526  			name:   "Main",
   527  			w:      os.Stdout,
   528  			chatty: *chatty,
   529  		},
   530  		importPath: importPath,
   531  		benchFunc: func(b *B) {
   532  			for _, Benchmark := range bs {
   533  				b.Run(Benchmark.Name, Benchmark.F)
   534  			}
   535  		},
   536  		benchTime: benchTime,
   537  		context:   ctx,
   538  	}
   539  	main.runN(1)
   540  	return !main.failed
   541  }
   542  
   543  // processBench runs bench b for the configured CPU counts and prints the results.
   544  func (ctx *benchContext) processBench(b *B) {
   545  	for i, procs := range cpuList {
   546  		for j := uint(0); j < *count; j++ {
   547  			runtime.GOMAXPROCS(procs)
   548  			benchName := benchmarkName(b.name, procs)
   549  
   550  			// If it's chatty, we've already printed this information.
   551  			if !b.chatty {
   552  				fmt.Fprintf(b.w, "%-*s\t", ctx.maxLen, benchName)
   553  			}
   554  			// Recompute the running time for all but the first iteration.
   555  			if i > 0 || j > 0 {
   556  				b = &B{
   557  					common: common{
   558  						signal: make(chan bool),
   559  						name:   b.name,
   560  						w:      b.w,
   561  						chatty: b.chatty,
   562  					},
   563  					benchFunc: b.benchFunc,
   564  					benchTime: b.benchTime,
   565  				}
   566  				b.run1()
   567  			}
   568  			r := b.doBench()
   569  			if b.failed {
   570  				// The output could be very long here, but probably isn't.
   571  				// We print it all, regardless, because we don't want to trim the reason
   572  				// the benchmark failed.
   573  				fmt.Fprintf(b.w, "--- FAIL: %s\n%s", benchName, b.output)
   574  				continue
   575  			}
   576  			results := r.String()
   577  			if b.chatty {
   578  				fmt.Fprintf(b.w, "%-*s\t", ctx.maxLen, benchName)
   579  			}
   580  			if *benchmarkMemory || b.showAllocResult {
   581  				results += "\t" + r.MemString()
   582  			}
   583  			fmt.Fprintln(b.w, results)
   584  			// Unlike with tests, we ignore the -chatty flag and always print output for
   585  			// benchmarks since the output generation time will skew the results.
   586  			if len(b.output) > 0 {
   587  				b.trimOutput()
   588  				fmt.Fprintf(b.w, "--- BENCH: %s\n%s", benchName, b.output)
   589  			}
   590  			if p := runtime.GOMAXPROCS(-1); p != procs {
   591  				fmt.Fprintf(os.Stderr, "testing: %s left GOMAXPROCS set to %d\n", benchName, p)
   592  			}
   593  		}
   594  	}
   595  }
   596  
   597  // Run benchmarks f as a subbenchmark with the given name. It reports
   598  // whether there were any failures.
   599  //
   600  // A subbenchmark is like any other benchmark. A benchmark that calls Run at
   601  // least once will not be measured itself and will be called once with N=1.
   602  func (b *B) Run(name string, f func(b *B)) bool {
   603  	// Since b has subbenchmarks, we will no longer run it as a benchmark itself.
   604  	// Release the lock and acquire it on exit to ensure locks stay paired.
   605  	atomic.StoreInt32(&b.hasSub, 1)
   606  	benchmarkLock.Unlock()
   607  	defer benchmarkLock.Lock()
   608  
   609  	benchName, ok, partial := b.name, true, false
   610  	if b.context != nil {
   611  		benchName, ok, partial = b.context.match.fullName(&b.common, name)
   612  	}
   613  	if !ok {
   614  		return true
   615  	}
   616  	var pc [maxStackLen]uintptr
   617  	n := runtime.Callers(2, pc[:])
   618  	sub := &B{
   619  		common: common{
   620  			signal:  make(chan bool),
   621  			name:    benchName,
   622  			parent:  &b.common,
   623  			level:   b.level + 1,
   624  			creator: pc[:n],
   625  			w:       b.w,
   626  			chatty:  b.chatty,
   627  		},
   628  		importPath: b.importPath,
   629  		benchFunc:  f,
   630  		benchTime:  b.benchTime,
   631  		context:    b.context,
   632  	}
   633  	if partial {
   634  		// Partial name match, like -bench=X/Y matching BenchmarkX.
   635  		// Only process sub-benchmarks, if any.
   636  		atomic.StoreInt32(&sub.hasSub, 1)
   637  	}
   638  
   639  	if b.chatty {
   640  		labelsOnce.Do(func() {
   641  			fmt.Printf("goos: %s\n", runtime.GOOS)
   642  			fmt.Printf("goarch: %s\n", runtime.GOARCH)
   643  			if b.importPath != "" {
   644  				fmt.Printf("pkg: %s\n", b.importPath)
   645  			}
   646  		})
   647  
   648  		fmt.Println(benchName)
   649  	}
   650  
   651  	if sub.run1() {
   652  		sub.run()
   653  	}
   654  	b.add(sub.result)
   655  	return !sub.failed
   656  }
   657  
   658  // add simulates running benchmarks in sequence in a single iteration. It is
   659  // used to give some meaningful results in case func Benchmark is used in
   660  // combination with Run.
   661  func (b *B) add(other BenchmarkResult) {
   662  	r := &b.result
   663  	// The aggregated BenchmarkResults resemble running all subbenchmarks as
   664  	// in sequence in a single benchmark.
   665  	r.N = 1
   666  	r.T += time.Duration(other.NsPerOp())
   667  	if other.Bytes == 0 {
   668  		// Summing Bytes is meaningless in aggregate if not all subbenchmarks
   669  		// set it.
   670  		b.missingBytes = true
   671  		r.Bytes = 0
   672  	}
   673  	if !b.missingBytes {
   674  		r.Bytes += other.Bytes
   675  	}
   676  	r.MemAllocs += uint64(other.AllocsPerOp())
   677  	r.MemBytes += uint64(other.AllocedBytesPerOp())
   678  }
   679  
   680  // trimOutput shortens the output from a benchmark, which can be very long.
   681  func (b *B) trimOutput() {
   682  	// The output is likely to appear multiple times because the benchmark
   683  	// is run multiple times, but at least it will be seen. This is not a big deal
   684  	// because benchmarks rarely print, but just in case, we trim it if it's too long.
   685  	const maxNewlines = 10
   686  	for nlCount, j := 0, 0; j < len(b.output); j++ {
   687  		if b.output[j] == '\n' {
   688  			nlCount++
   689  			if nlCount >= maxNewlines {
   690  				b.output = append(b.output[:j], "\n\t... [output truncated]\n"...)
   691  				break
   692  			}
   693  		}
   694  	}
   695  }
   696  
   697  // A PB is used by RunParallel for running parallel benchmarks.
   698  type PB struct {
   699  	globalN *uint64 // shared between all worker goroutines iteration counter
   700  	grain   uint64  // acquire that many iterations from globalN at once
   701  	cache   uint64  // local cache of acquired iterations
   702  	bN      uint64  // total number of iterations to execute (b.N)
   703  }
   704  
   705  // Next reports whether there are more iterations to execute.
   706  func (pb *PB) Next() bool {
   707  	if pb.cache == 0 {
   708  		n := atomic.AddUint64(pb.globalN, pb.grain)
   709  		if n <= pb.bN {
   710  			pb.cache = pb.grain
   711  		} else if n < pb.bN+pb.grain {
   712  			pb.cache = pb.bN + pb.grain - n
   713  		} else {
   714  			return false
   715  		}
   716  	}
   717  	pb.cache--
   718  	return true
   719  }
   720  
   721  // RunParallel runs a benchmark in parallel.
   722  // It creates multiple goroutines and distributes b.N iterations among them.
   723  // The number of goroutines defaults to GOMAXPROCS. To increase parallelism for
   724  // non-CPU-bound benchmarks, call SetParallelism before RunParallel.
   725  // RunParallel is usually used with the go test -cpu flag.
   726  //
   727  // The body function will be run in each goroutine. It should set up any
   728  // goroutine-local state and then iterate until pb.Next returns false.
   729  // It should not use the StartTimer, StopTimer, or ResetTimer functions,
   730  // because they have global effect. It should also not call Run.
   731  func (b *B) RunParallel(body func(*PB)) {
   732  	if b.N == 0 {
   733  		return // Nothing to do when probing.
   734  	}
   735  	// Calculate grain size as number of iterations that take ~100µs.
   736  	// 100µs is enough to amortize the overhead and provide sufficient
   737  	// dynamic load balancing.
   738  	grain := uint64(0)
   739  	if b.previousN > 0 && b.previousDuration > 0 {
   740  		grain = 1e5 * uint64(b.previousN) / uint64(b.previousDuration)
   741  	}
   742  	if grain < 1 {
   743  		grain = 1
   744  	}
   745  	// We expect the inner loop and function call to take at least 10ns,
   746  	// so do not do more than 100µs/10ns=1e4 iterations.
   747  	if grain > 1e4 {
   748  		grain = 1e4
   749  	}
   750  
   751  	n := uint64(0)
   752  	numProcs := b.parallelism * runtime.GOMAXPROCS(0)
   753  	var wg sync.WaitGroup
   754  	wg.Add(numProcs)
   755  	for p := 0; p < numProcs; p++ {
   756  		go func() {
   757  			defer wg.Done()
   758  			pb := &PB{
   759  				globalN: &n,
   760  				grain:   grain,
   761  				bN:      uint64(b.N),
   762  			}
   763  			body(pb)
   764  		}()
   765  	}
   766  	wg.Wait()
   767  	if n <= uint64(b.N) && !b.Failed() {
   768  		b.Fatal("RunParallel: body exited without pb.Next() == false")
   769  	}
   770  }
   771  
   772  // SetParallelism sets the number of goroutines used by RunParallel to p*GOMAXPROCS.
   773  // There is usually no need to call SetParallelism for CPU-bound benchmarks.
   774  // If p is less than 1, this call will have no effect.
   775  func (b *B) SetParallelism(p int) {
   776  	if p >= 1 {
   777  		b.parallelism = p
   778  	}
   779  }
   780  
   781  // Benchmark benchmarks a single function. It is useful for creating
   782  // custom benchmarks that do not use the "go test" command.
   783  //
   784  // If f depends on testing flags, then Init must be used to register
   785  // those flags before calling Benchmark and before calling flag.Parse.
   786  //
   787  // If f calls Run, the result will be an estimate of running all its
   788  // subbenchmarks that don't call Run in sequence in a single benchmark.
   789  func Benchmark(f func(b *B)) BenchmarkResult {
   790  	b := &B{
   791  		common: common{
   792  			signal: make(chan bool),
   793  			w:      discard{},
   794  		},
   795  		benchFunc: f,
   796  		benchTime: benchTime,
   797  	}
   798  	if b.run1() {
   799  		b.run()
   800  	}
   801  	return b.result
   802  }
   803  
   804  type discard struct{}
   805  
   806  func (discard) Write(b []byte) (n int, err error) { return len(b), nil }
   807  

View as plain text