Source file src/runtime/time.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Time-related runtime and pieces of package time.
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/abi"
    11  	"internal/runtime/atomic"
    12  	"runtime/internal/sys"
    13  	"unsafe"
    14  )
    15  
    16  // A timer is a potentially repeating trigger for calling t.f(t.arg, t.seq).
    17  // Timers are allocated by client code, often as part of other data structures.
    18  // Each P has a heap of pointers to timers that it manages.
    19  //
    20  // A timer is expected to be used by only one client goroutine at a time,
    21  // but there will be concurrent access by the P managing that timer.
    22  // Timer accesses are protected by the lock t.mu, with a snapshot of
    23  // t's state bits published in t.astate to enable certain fast paths to make
    24  // decisions about a timer without acquiring the lock.
    25  type timer struct {
    26  	// mu protects reads and writes to all fields, with exceptions noted below.
    27  	mu mutex
    28  
    29  	astate atomic.Uint8 // atomic copy of state bits at last unlock
    30  	state  uint8        // state bits
    31  	isChan bool         // timer has a channel; immutable; can be read without lock
    32  
    33  	// isSending is used to handle races between running a
    34  	// channel timer and stopping or resetting the timer.
    35  	// It is used only for channel timers (t.isChan == true).
    36  	// The lowest zero bit is set when about to send a value on the channel,
    37  	// and cleared after sending the value.
    38  	// The stop/reset code uses this to detect whether it
    39  	// stopped the channel send.
    40  	//
    41  	// An isSending bit is set only when t.mu is held.
    42  	// An isSending bit is cleared only when t.sendLock is held.
    43  	// isSending is read only when both t.mu and t.sendLock are held.
    44  	//
    45  	// Setting and clearing Uint8 bits handles the case of
    46  	// a timer that is reset concurrently with unlockAndRun.
    47  	// If the reset timer runs immediately, we can wind up with
    48  	// concurrent calls to unlockAndRun for the same timer.
    49  	// Using matched bit set and clear in unlockAndRun
    50  	// ensures that the value doesn't get temporarily out of sync.
    51  	//
    52  	// We use a uint8 to keep the timer struct small.
    53  	// This means that we can only support up to 8 concurrent
    54  	// runs of a timer, where a concurrent run can only occur if
    55  	// we start a run, unlock the timer, the timer is reset to a new
    56  	// value (or the ticker fires again), it is ready to run,
    57  	// and it is actually run, all before the first run completes.
    58  	// Since completing a run is fast, even 2 concurrent timer runs are
    59  	// nearly impossible, so this should be safe in practice.
    60  	isSending atomic.Uint8
    61  
    62  	blocked uint32 // number of goroutines blocked on timer's channel
    63  
    64  	// Timer wakes up at when, and then at when+period, ... (period > 0 only)
    65  	// each time calling f(arg, seq, delay) in the timer goroutine, so f must be
    66  	// a well-behaved function and not block.
    67  	//
    68  	// The arg and seq are client-specified opaque arguments passed back to f.
    69  	// When used from netpoll, arg and seq have meanings defined by netpoll
    70  	// and are completely opaque to this code; in that context, seq is a sequence
    71  	// number to recognize and squech stale function invocations.
    72  	// When used from package time, arg is a channel (for After, NewTicker)
    73  	// or the function to call (for AfterFunc) and seq is unused (0).
    74  	//
    75  	// Package time does not know about seq, but if this is a channel timer (t.isChan == true),
    76  	// this file uses t.seq as a sequence number to recognize and squelch
    77  	// sends that correspond to an earlier (stale) timer configuration,
    78  	// similar to its use in netpoll. In this usage (that is, when t.isChan == true),
    79  	// writes to seq are protected by both t.mu and t.sendLock,
    80  	// so reads are allowed when holding either of the two mutexes.
    81  	//
    82  	// The delay argument is nanotime() - t.when, meaning the delay in ns between
    83  	// when the timer should have gone off and now. Normally that amount is
    84  	// small enough not to matter, but for channel timers that are fed lazily,
    85  	// the delay can be arbitrarily long; package time subtracts it out to make
    86  	// it look like the send happened earlier than it actually did.
    87  	// (No one looked at the channel since then, or the send would have
    88  	// not happened so late, so no one can tell the difference.)
    89  	when   int64
    90  	period int64
    91  	f      func(arg any, seq uintptr, delay int64)
    92  	arg    any
    93  	seq    uintptr
    94  
    95  	// If non-nil, the timers containing t.
    96  	ts *timers
    97  
    98  	// sendLock protects sends on the timer's channel.
    99  	// Not used for async (pre-Go 1.23) behavior when debug.asynctimerchan.Load() != 0.
   100  	sendLock mutex
   101  }
   102  
   103  // init initializes a newly allocated timer t.
   104  // Any code that allocates a timer must call t.init before using it.
   105  // The arg and f can be set during init, or they can be nil in init
   106  // and set by a future call to t.modify.
   107  func (t *timer) init(f func(arg any, seq uintptr, delay int64), arg any) {
   108  	lockInit(&t.mu, lockRankTimer)
   109  	t.f = f
   110  	t.arg = arg
   111  }
   112  
   113  // A timers is a per-P set of timers.
   114  type timers struct {
   115  	// mu protects timers; timers are per-P, but the scheduler can
   116  	// access the timers of another P, so we have to lock.
   117  	mu mutex
   118  
   119  	// heap is the set of timers, ordered by heap[i].when.
   120  	// Must hold lock to access.
   121  	heap []timerWhen
   122  
   123  	// len is an atomic copy of len(heap).
   124  	len atomic.Uint32
   125  
   126  	// zombies is the number of timers in the heap
   127  	// that are marked for removal.
   128  	zombies atomic.Int32
   129  
   130  	// raceCtx is the race context used while executing timer functions.
   131  	raceCtx uintptr
   132  
   133  	// minWhenHeap is the minimum heap[i].when value (= heap[0].when).
   134  	// The wakeTime method uses minWhenHeap and minWhenModified
   135  	// to determine the next wake time.
   136  	// If minWhenHeap = 0, it means there are no timers in the heap.
   137  	minWhenHeap atomic.Int64
   138  
   139  	// minWhenModified is a lower bound on the minimum
   140  	// heap[i].when over timers with the timerModified bit set.
   141  	// If minWhenModified = 0, it means there are no timerModified timers in the heap.
   142  	minWhenModified atomic.Int64
   143  }
   144  
   145  type timerWhen struct {
   146  	timer *timer
   147  	when  int64
   148  }
   149  
   150  func (ts *timers) lock() {
   151  	lock(&ts.mu)
   152  }
   153  
   154  func (ts *timers) unlock() {
   155  	// Update atomic copy of len(ts.heap).
   156  	// We only update at unlock so that the len is always
   157  	// the most recent unlocked length, not an ephemeral length.
   158  	// This matters if we lock ts, delete the only timer from the heap,
   159  	// add it back, and unlock. We want ts.len.Load to return 1 the
   160  	// entire time, never 0. This is important for pidleput deciding
   161  	// whether ts is empty.
   162  	ts.len.Store(uint32(len(ts.heap)))
   163  
   164  	unlock(&ts.mu)
   165  }
   166  
   167  // Timer state field.
   168  const (
   169  	// timerHeaped is set when the timer is stored in some P's heap.
   170  	timerHeaped uint8 = 1 << iota
   171  
   172  	// timerModified is set when t.when has been modified
   173  	// but the heap's heap[i].when entry still needs to be updated.
   174  	// That change waits until the heap in which
   175  	// the timer appears can be locked and rearranged.
   176  	// timerModified is only set when timerHeaped is also set.
   177  	timerModified
   178  
   179  	// timerZombie is set when the timer has been stopped
   180  	// but is still present in some P's heap.
   181  	// Only set when timerHeaped is also set.
   182  	// It is possible for timerModified and timerZombie to both
   183  	// be set, meaning that the timer was modified and then stopped.
   184  	// A timer sending to a channel may be placed in timerZombie
   185  	// to take it out of the heap even though the timer is not stopped,
   186  	// as long as nothing is reading from the channel.
   187  	timerZombie
   188  )
   189  
   190  // timerDebug enables printing a textual debug trace of all timer operations to stderr.
   191  const timerDebug = false
   192  
   193  func (t *timer) trace(op string) {
   194  	if timerDebug {
   195  		t.trace1(op)
   196  	}
   197  }
   198  
   199  func (t *timer) trace1(op string) {
   200  	if !timerDebug {
   201  		return
   202  	}
   203  	bits := [4]string{"h", "m", "z", "c"}
   204  	for i := range 3 {
   205  		if t.state&(1<<i) == 0 {
   206  			bits[i] = "-"
   207  		}
   208  	}
   209  	if !t.isChan {
   210  		bits[3] = "-"
   211  	}
   212  	print("T ", t, " ", bits[0], bits[1], bits[2], bits[3], " b=", t.blocked, " ", op, "\n")
   213  }
   214  
   215  func (ts *timers) trace(op string) {
   216  	if timerDebug {
   217  		println("TS", ts, op)
   218  	}
   219  }
   220  
   221  // lock locks the timer, allowing reading or writing any of the timer fields.
   222  func (t *timer) lock() {
   223  	lock(&t.mu)
   224  	t.trace("lock")
   225  }
   226  
   227  // unlock updates t.astate and unlocks the timer.
   228  func (t *timer) unlock() {
   229  	t.trace("unlock")
   230  	// Let heap fast paths know whether heap[i].when is accurate.
   231  	// Also let maybeRunChan know whether channel is in heap.
   232  	t.astate.Store(t.state)
   233  	unlock(&t.mu)
   234  }
   235  
   236  // hchan returns the channel in t.arg.
   237  // t must be a timer with a channel.
   238  func (t *timer) hchan() *hchan {
   239  	if !t.isChan {
   240  		badTimer()
   241  	}
   242  	// Note: t.arg is a chan time.Time,
   243  	// and runtime cannot refer to that type,
   244  	// so we cannot use a type assertion.
   245  	return (*hchan)(efaceOf(&t.arg).data)
   246  }
   247  
   248  // updateHeap updates t as directed by t.state, updating t.state
   249  // and returning a bool indicating whether the state (and ts.heap[0].when) changed.
   250  // The caller must hold t's lock, or the world can be stopped instead.
   251  // The timer set t.ts must be non-nil and locked, t must be t.ts.heap[0], and updateHeap
   252  // takes care of moving t within the timers heap to preserve the heap invariants.
   253  // If ts == nil, then t must not be in a heap (or is in a heap that is
   254  // temporarily not maintaining its invariant, such as during timers.adjust).
   255  func (t *timer) updateHeap() (updated bool) {
   256  	assertWorldStoppedOrLockHeld(&t.mu)
   257  	t.trace("updateHeap")
   258  	ts := t.ts
   259  	if ts == nil || t != ts.heap[0].timer {
   260  		badTimer()
   261  	}
   262  	assertLockHeld(&ts.mu)
   263  	if t.state&timerZombie != 0 {
   264  		// Take timer out of heap.
   265  		t.state &^= timerHeaped | timerZombie | timerModified
   266  		ts.zombies.Add(-1)
   267  		ts.deleteMin()
   268  		return true
   269  	}
   270  
   271  	if t.state&timerModified != 0 {
   272  		// Update ts.heap[0].when and move within heap.
   273  		t.state &^= timerModified
   274  		ts.heap[0].when = t.when
   275  		ts.siftDown(0)
   276  		ts.updateMinWhenHeap()
   277  		return true
   278  	}
   279  
   280  	return false
   281  }
   282  
   283  // maxWhen is the maximum value for timer's when field.
   284  const maxWhen = 1<<63 - 1
   285  
   286  // verifyTimers can be set to true to add debugging checks that the
   287  // timer heaps are valid.
   288  const verifyTimers = false
   289  
   290  // Package time APIs.
   291  // Godoc uses the comments in package time, not these.
   292  
   293  // time.now is implemented in assembly.
   294  
   295  // timeSleep puts the current goroutine to sleep for at least ns nanoseconds.
   296  //
   297  //go:linkname timeSleep time.Sleep
   298  func timeSleep(ns int64) {
   299  	if ns <= 0 {
   300  		return
   301  	}
   302  
   303  	gp := getg()
   304  	t := gp.timer
   305  	if t == nil {
   306  		t = new(timer)
   307  		t.init(goroutineReady, gp)
   308  		gp.timer = t
   309  	}
   310  	when := nanotime() + ns
   311  	if when < 0 { // check for overflow.
   312  		when = maxWhen
   313  	}
   314  	gp.sleepWhen = when
   315  	gopark(resetForSleep, nil, waitReasonSleep, traceBlockSleep, 1)
   316  }
   317  
   318  // resetForSleep is called after the goroutine is parked for timeSleep.
   319  // We can't call timer.reset in timeSleep itself because if this is a short
   320  // sleep and there are many goroutines then the P can wind up running the
   321  // timer function, goroutineReady, before the goroutine has been parked.
   322  func resetForSleep(gp *g, _ unsafe.Pointer) bool {
   323  	gp.timer.reset(gp.sleepWhen, 0)
   324  	return true
   325  }
   326  
   327  // A timeTimer is a runtime-allocated time.Timer or time.Ticker
   328  // with the additional runtime state following it.
   329  // The runtime state is inaccessible to package time.
   330  type timeTimer struct {
   331  	c    unsafe.Pointer // <-chan time.Time
   332  	init bool
   333  	timer
   334  }
   335  
   336  // newTimer allocates and returns a new time.Timer or time.Ticker (same layout)
   337  // with the given parameters.
   338  //
   339  //go:linkname newTimer time.newTimer
   340  func newTimer(when, period int64, f func(arg any, seq uintptr, delay int64), arg any, c *hchan) *timeTimer {
   341  	t := new(timeTimer)
   342  	t.timer.init(nil, nil)
   343  	t.trace("new")
   344  	if raceenabled {
   345  		racerelease(unsafe.Pointer(&t.timer))
   346  	}
   347  	if c != nil {
   348  		lockInit(&t.sendLock, lockRankTimerSend)
   349  		t.isChan = true
   350  		c.timer = &t.timer
   351  		if c.dataqsiz == 0 {
   352  			throw("invalid timer channel: no capacity")
   353  		}
   354  	}
   355  	t.modify(when, period, f, arg, 0)
   356  	t.init = true
   357  	return t
   358  }
   359  
   360  // stopTimer stops a timer.
   361  // It reports whether t was stopped before being run.
   362  //
   363  //go:linkname stopTimer time.stopTimer
   364  func stopTimer(t *timeTimer) bool {
   365  	return t.stop()
   366  }
   367  
   368  // resetTimer resets an inactive timer, adding it to the timer heap.
   369  //
   370  // Reports whether the timer was modified before it was run.
   371  //
   372  //go:linkname resetTimer time.resetTimer
   373  func resetTimer(t *timeTimer, when, period int64) bool {
   374  	if raceenabled {
   375  		racerelease(unsafe.Pointer(&t.timer))
   376  	}
   377  	return t.reset(when, period)
   378  }
   379  
   380  // Go runtime.
   381  
   382  // Ready the goroutine arg.
   383  func goroutineReady(arg any, _ uintptr, _ int64) {
   384  	goready(arg.(*g), 0)
   385  }
   386  
   387  // addHeap adds t to the timers heap.
   388  // The caller must hold ts.lock or the world must be stopped.
   389  // The caller must also have checked that t belongs in the heap.
   390  // Callers that are not sure can call t.maybeAdd instead,
   391  // but note that maybeAdd has different locking requirements.
   392  func (ts *timers) addHeap(t *timer) {
   393  	assertWorldStoppedOrLockHeld(&ts.mu)
   394  	// Timers rely on the network poller, so make sure the poller
   395  	// has started.
   396  	if netpollInited.Load() == 0 {
   397  		netpollGenericInit()
   398  	}
   399  
   400  	if t.ts != nil {
   401  		throw("ts set in timer")
   402  	}
   403  	t.ts = ts
   404  	ts.heap = append(ts.heap, timerWhen{t, t.when})
   405  	ts.siftUp(len(ts.heap) - 1)
   406  	if t == ts.heap[0].timer {
   407  		ts.updateMinWhenHeap()
   408  	}
   409  }
   410  
   411  // maybeRunAsync checks whether t needs to be triggered and runs it if so.
   412  // The caller is responsible for locking the timer and for checking that we
   413  // are running timers in async mode. If the timer needs to be run,
   414  // maybeRunAsync will unlock and re-lock it.
   415  // The timer is always locked on return.
   416  func (t *timer) maybeRunAsync() {
   417  	assertLockHeld(&t.mu)
   418  	if t.state&timerHeaped == 0 && t.isChan && t.when > 0 {
   419  		// If timer should have triggered already (but nothing looked at it yet),
   420  		// trigger now, so that a receive after the stop sees the "old" value
   421  		// that should be there.
   422  		// (It is possible to have t.blocked > 0 if there is a racing receive
   423  		// in blockTimerChan, but timerHeaped not being set means
   424  		// it hasn't run t.maybeAdd yet; in that case, running the
   425  		// timer ourselves now is fine.)
   426  		if now := nanotime(); t.when <= now {
   427  			systemstack(func() {
   428  				t.unlockAndRun(now) // resets t.when
   429  			})
   430  			t.lock()
   431  		}
   432  	}
   433  }
   434  
   435  // stop stops the timer t. It may be on some other P, so we can't
   436  // actually remove it from the timers heap. We can only mark it as stopped.
   437  // It will be removed in due course by the P whose heap it is on.
   438  // Reports whether the timer was stopped before it was run.
   439  func (t *timer) stop() bool {
   440  	async := debug.asynctimerchan.Load() != 0
   441  	if !async && t.isChan {
   442  		lock(&t.sendLock)
   443  	}
   444  
   445  	t.lock()
   446  	t.trace("stop")
   447  	if async {
   448  		t.maybeRunAsync()
   449  	}
   450  	if t.state&timerHeaped != 0 {
   451  		t.state |= timerModified
   452  		if t.state&timerZombie == 0 {
   453  			t.state |= timerZombie
   454  			t.ts.zombies.Add(1)
   455  		}
   456  	}
   457  	pending := t.when > 0
   458  	t.when = 0
   459  
   460  	if !async && t.isChan {
   461  		// Stop any future sends with stale values.
   462  		// See timer.unlockAndRun.
   463  		t.seq++
   464  
   465  		// If there is currently a send in progress,
   466  		// incrementing seq is going to prevent that
   467  		// send from actually happening. That means
   468  		// that we should return true: the timer was
   469  		// stopped, even though t.when may be zero.
   470  		if t.isSending.Load() > 0 {
   471  			pending = true
   472  		}
   473  	}
   474  	t.unlock()
   475  	if !async && t.isChan {
   476  		unlock(&t.sendLock)
   477  		if timerchandrain(t.hchan()) {
   478  			pending = true
   479  		}
   480  	}
   481  
   482  	return pending
   483  }
   484  
   485  // deleteMin removes timer 0 from ts.
   486  // ts must be locked.
   487  func (ts *timers) deleteMin() {
   488  	assertLockHeld(&ts.mu)
   489  	t := ts.heap[0].timer
   490  	if t.ts != ts {
   491  		throw("wrong timers")
   492  	}
   493  	t.ts = nil
   494  	last := len(ts.heap) - 1
   495  	if last > 0 {
   496  		ts.heap[0] = ts.heap[last]
   497  	}
   498  	ts.heap[last] = timerWhen{}
   499  	ts.heap = ts.heap[:last]
   500  	if last > 0 {
   501  		ts.siftDown(0)
   502  	}
   503  	ts.updateMinWhenHeap()
   504  	if last == 0 {
   505  		// If there are no timers, then clearly there are no timerModified timers.
   506  		ts.minWhenModified.Store(0)
   507  	}
   508  }
   509  
   510  // modify modifies an existing timer.
   511  // This is called by the netpoll code or time.Ticker.Reset or time.Timer.Reset.
   512  // Reports whether the timer was modified before it was run.
   513  // If f == nil, then t.f, t.arg, and t.seq are not modified.
   514  func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay int64), arg any, seq uintptr) bool {
   515  	if when <= 0 {
   516  		throw("timer when must be positive")
   517  	}
   518  	if period < 0 {
   519  		throw("timer period must be non-negative")
   520  	}
   521  	async := debug.asynctimerchan.Load() != 0
   522  
   523  	if !async && t.isChan {
   524  		lock(&t.sendLock)
   525  	}
   526  
   527  	t.lock()
   528  	if async {
   529  		t.maybeRunAsync()
   530  	}
   531  	t.trace("modify")
   532  	t.period = period
   533  	if f != nil {
   534  		t.f = f
   535  		t.arg = arg
   536  		t.seq = seq
   537  	}
   538  
   539  	wake := false
   540  	pending := t.when > 0
   541  	t.when = when
   542  	if t.state&timerHeaped != 0 {
   543  		t.state |= timerModified
   544  		if t.state&timerZombie != 0 {
   545  			// In the heap but marked for removal (by a Stop).
   546  			// Unmark it, since it has been Reset and will be running again.
   547  			t.ts.zombies.Add(-1)
   548  			t.state &^= timerZombie
   549  		}
   550  		// The corresponding heap[i].when is updated later.
   551  		// See comment in type timer above and in timers.adjust below.
   552  		if min := t.ts.minWhenModified.Load(); min == 0 || when < min {
   553  			wake = true
   554  			// Force timerModified bit out to t.astate before updating t.minWhenModified,
   555  			// to synchronize with t.ts.adjust. See comment in adjust.
   556  			t.astate.Store(t.state)
   557  			t.ts.updateMinWhenModified(when)
   558  		}
   559  	}
   560  
   561  	add := t.needsAdd()
   562  
   563  	if !async && t.isChan {
   564  		// Stop any future sends with stale values.
   565  		// See timer.unlockAndRun.
   566  		t.seq++
   567  
   568  		// If there is currently a send in progress,
   569  		// incrementing seq is going to prevent that
   570  		// send from actually happening. That means
   571  		// that we should return true: the timer was
   572  		// stopped, even though t.when may be zero.
   573  		if t.isSending.Load() > 0 {
   574  			pending = true
   575  		}
   576  	}
   577  	t.unlock()
   578  	if !async && t.isChan {
   579  		if timerchandrain(t.hchan()) {
   580  			pending = true
   581  		}
   582  		unlock(&t.sendLock)
   583  	}
   584  
   585  	if add {
   586  		t.maybeAdd()
   587  	}
   588  	if wake {
   589  		wakeNetPoller(when)
   590  	}
   591  
   592  	return pending
   593  }
   594  
   595  // needsAdd reports whether t needs to be added to a timers heap.
   596  // t must be locked.
   597  func (t *timer) needsAdd() bool {
   598  	assertLockHeld(&t.mu)
   599  	need := t.state&timerHeaped == 0 && t.when > 0 && (!t.isChan || t.blocked > 0)
   600  	if need {
   601  		t.trace("needsAdd+")
   602  	} else {
   603  		t.trace("needsAdd-")
   604  	}
   605  	return need
   606  }
   607  
   608  // maybeAdd adds t to the local timers heap if it needs to be in a heap.
   609  // The caller must not hold t's lock nor any timers heap lock.
   610  // The caller probably just unlocked t, but that lock must be dropped
   611  // in order to acquire a ts.lock, to avoid lock inversions.
   612  // (timers.adjust holds ts.lock while acquiring each t's lock,
   613  // so we cannot hold any t's lock while acquiring ts.lock).
   614  //
   615  // Strictly speaking it *might* be okay to hold t.lock and
   616  // acquire ts.lock at the same time, because we know that
   617  // t is not in any ts.heap, so nothing holding a ts.lock would
   618  // be acquiring the t.lock at the same time, meaning there
   619  // isn't a possible deadlock. But it is easier and safer not to be
   620  // too clever and respect the static ordering.
   621  // (If we don't, we have to change the static lock checking of t and ts.)
   622  //
   623  // Concurrent calls to time.Timer.Reset or blockTimerChan
   624  // may result in concurrent calls to t.maybeAdd,
   625  // so we cannot assume that t is not in a heap on entry to t.maybeAdd.
   626  func (t *timer) maybeAdd() {
   627  	// Note: Not holding any locks on entry to t.maybeAdd,
   628  	// so the current g can be rescheduled to a different M and P
   629  	// at any time, including between the ts := assignment and the
   630  	// call to ts.lock. If a reschedule happened then, we would be
   631  	// adding t to some other P's timers, perhaps even a P that the scheduler
   632  	// has marked as idle with no timers, in which case the timer could
   633  	// go unnoticed until long after t.when.
   634  	// Calling acquirem instead of using getg().m makes sure that
   635  	// we end up locking and inserting into the current P's timers.
   636  	mp := acquirem()
   637  	ts := &mp.p.ptr().timers
   638  	ts.lock()
   639  	ts.cleanHead()
   640  	t.lock()
   641  	t.trace("maybeAdd")
   642  	when := int64(0)
   643  	wake := false
   644  	if t.needsAdd() {
   645  		t.state |= timerHeaped
   646  		when = t.when
   647  		wakeTime := ts.wakeTime()
   648  		wake = wakeTime == 0 || when < wakeTime
   649  		ts.addHeap(t)
   650  	}
   651  	t.unlock()
   652  	ts.unlock()
   653  	releasem(mp)
   654  	if wake {
   655  		wakeNetPoller(when)
   656  	}
   657  }
   658  
   659  // reset resets the time when a timer should fire.
   660  // If used for an inactive timer, the timer will become active.
   661  // Reports whether the timer was active and was stopped.
   662  func (t *timer) reset(when, period int64) bool {
   663  	return t.modify(when, period, nil, nil, 0)
   664  }
   665  
   666  // cleanHead cleans up the head of the timer queue. This speeds up
   667  // programs that create and delete timers; leaving them in the heap
   668  // slows down heap operations.
   669  // The caller must have locked ts.
   670  func (ts *timers) cleanHead() {
   671  	ts.trace("cleanHead")
   672  	assertLockHeld(&ts.mu)
   673  	gp := getg()
   674  	for {
   675  		if len(ts.heap) == 0 {
   676  			return
   677  		}
   678  
   679  		// This loop can theoretically run for a while, and because
   680  		// it is holding timersLock it cannot be preempted.
   681  		// If someone is trying to preempt us, just return.
   682  		// We can clean the timers later.
   683  		if gp.preemptStop {
   684  			return
   685  		}
   686  
   687  		// Delete zombies from tail of heap. It requires no heap adjustments at all,
   688  		// and doing so increases the chances that when we swap out a zombie
   689  		// in heap[0] for the tail of the heap, we'll get a non-zombie timer,
   690  		// shortening this loop.
   691  		n := len(ts.heap)
   692  		if t := ts.heap[n-1].timer; t.astate.Load()&timerZombie != 0 {
   693  			t.lock()
   694  			if t.state&timerZombie != 0 {
   695  				t.state &^= timerHeaped | timerZombie | timerModified
   696  				t.ts = nil
   697  				ts.zombies.Add(-1)
   698  				ts.heap[n-1] = timerWhen{}
   699  				ts.heap = ts.heap[:n-1]
   700  			}
   701  			t.unlock()
   702  			continue
   703  		}
   704  
   705  		t := ts.heap[0].timer
   706  		if t.ts != ts {
   707  			throw("bad ts")
   708  		}
   709  
   710  		if t.astate.Load()&(timerModified|timerZombie) == 0 {
   711  			// Fast path: head of timers does not need adjustment.
   712  			return
   713  		}
   714  
   715  		t.lock()
   716  		updated := t.updateHeap()
   717  		t.unlock()
   718  		if !updated {
   719  			// Head of timers does not need adjustment.
   720  			return
   721  		}
   722  	}
   723  }
   724  
   725  // take moves any timers from src into ts
   726  // and then clears the timer state from src,
   727  // because src is being destroyed.
   728  // The caller must not have locked either timers.
   729  // For now this is only called when the world is stopped.
   730  func (ts *timers) take(src *timers) {
   731  	ts.trace("take")
   732  	assertWorldStopped()
   733  	if len(src.heap) > 0 {
   734  		// The world is stopped, so we ignore the locking of ts and src here.
   735  		// That would introduce a sched < timers lock ordering,
   736  		// which we'd rather avoid in the static ranking.
   737  		for _, tw := range src.heap {
   738  			t := tw.timer
   739  			t.ts = nil
   740  			if t.state&timerZombie != 0 {
   741  				t.state &^= timerHeaped | timerZombie | timerModified
   742  			} else {
   743  				t.state &^= timerModified
   744  				ts.addHeap(t)
   745  			}
   746  		}
   747  		src.heap = nil
   748  		src.zombies.Store(0)
   749  		src.minWhenHeap.Store(0)
   750  		src.minWhenModified.Store(0)
   751  		src.len.Store(0)
   752  		ts.len.Store(uint32(len(ts.heap)))
   753  	}
   754  }
   755  
   756  // adjust looks through the timers in ts.heap for
   757  // any timers that have been modified to run earlier, and puts them in
   758  // the correct place in the heap. While looking for those timers,
   759  // it also moves timers that have been modified to run later,
   760  // and removes deleted timers. The caller must have locked ts.
   761  func (ts *timers) adjust(now int64, force bool) {
   762  	ts.trace("adjust")
   763  	assertLockHeld(&ts.mu)
   764  	// If we haven't yet reached the time of the earliest modified
   765  	// timer, don't do anything. This speeds up programs that adjust
   766  	// a lot of timers back and forth if the timers rarely expire.
   767  	// We'll postpone looking through all the adjusted timers until
   768  	// one would actually expire.
   769  	if !force {
   770  		first := ts.minWhenModified.Load()
   771  		if first == 0 || first > now {
   772  			if verifyTimers {
   773  				ts.verify()
   774  			}
   775  			return
   776  		}
   777  	}
   778  
   779  	// minWhenModified is a lower bound on the earliest t.when
   780  	// among the timerModified timers. We want to make it more precise:
   781  	// we are going to scan the heap and clean out all the timerModified bits,
   782  	// at which point minWhenModified can be set to 0 (indicating none at all).
   783  	//
   784  	// Other P's can be calling ts.wakeTime concurrently, and we'd like to
   785  	// keep ts.wakeTime returning an accurate value throughout this entire process.
   786  	//
   787  	// Setting minWhenModified = 0 *before* the scan could make wakeTime
   788  	// return an incorrect value: if minWhenModified < minWhenHeap, then clearing
   789  	// it to 0 will make wakeTime return minWhenHeap (too late) until the scan finishes.
   790  	// To avoid that, we want to set minWhenModified to 0 *after* the scan.
   791  	//
   792  	// Setting minWhenModified = 0 *after* the scan could result in missing
   793  	// concurrent timer modifications in other goroutines; those will lock
   794  	// the specific timer, set the timerModified bit, and set t.when.
   795  	// To avoid that, we want to set minWhenModified to 0 *before* the scan.
   796  	//
   797  	// The way out of this dilemma is to preserve wakeTime a different way.
   798  	// wakeTime is min(minWhenHeap, minWhenModified), and minWhenHeap
   799  	// is protected by ts.lock, which we hold, so we can modify it however we like
   800  	// in service of keeping wakeTime accurate.
   801  	//
   802  	// So we can:
   803  	//
   804  	//	1. Set minWhenHeap = min(minWhenHeap, minWhenModified)
   805  	//	2. Set minWhenModified = 0
   806  	//	   (Other goroutines may modify timers and update minWhenModified now.)
   807  	//	3. Scan timers
   808  	//	4. Set minWhenHeap = heap[0].when
   809  	//
   810  	// That order preserves a correct value of wakeTime throughout the entire
   811  	// operation:
   812  	// Step 1 “locks in” an accurate wakeTime even with minWhenModified cleared.
   813  	// Step 2 makes sure concurrent t.when updates are not lost during the scan.
   814  	// Step 3 processes all modified timer values, justifying minWhenModified = 0.
   815  	// Step 4 corrects minWhenHeap to a precise value.
   816  	//
   817  	// The wakeTime method implementation reads minWhenModified *before* minWhenHeap,
   818  	// so that if the minWhenModified is observed to be 0, that means the minWhenHeap that
   819  	// follows will include the information that was zeroed out of it.
   820  	//
   821  	// Originally Step 3 locked every timer, which made sure any timer update that was
   822  	// already in progress during Steps 1+2 completed and was observed by Step 3.
   823  	// All that locking was too expensive, so now we do an atomic load of t.astate to
   824  	// decide whether we need to do a full lock. To make sure that we still observe any
   825  	// timer update already in progress during Steps 1+2, t.modify sets timerModified
   826  	// in t.astate *before* calling t.updateMinWhenModified. That ensures that the
   827  	// overwrite in Step 2 cannot lose an update: if it does overwrite an update, Step 3
   828  	// will see the timerModified and do a full lock.
   829  	ts.minWhenHeap.Store(ts.wakeTime())
   830  	ts.minWhenModified.Store(0)
   831  
   832  	changed := false
   833  	for i := 0; i < len(ts.heap); i++ {
   834  		tw := &ts.heap[i]
   835  		t := tw.timer
   836  		if t.ts != ts {
   837  			throw("bad ts")
   838  		}
   839  
   840  		if t.astate.Load()&(timerModified|timerZombie) == 0 {
   841  			// Does not need adjustment.
   842  			continue
   843  		}
   844  
   845  		t.lock()
   846  		switch {
   847  		case t.state&timerHeaped == 0:
   848  			badTimer()
   849  
   850  		case t.state&timerZombie != 0:
   851  			ts.zombies.Add(-1)
   852  			t.state &^= timerHeaped | timerZombie | timerModified
   853  			n := len(ts.heap)
   854  			ts.heap[i] = ts.heap[n-1]
   855  			ts.heap[n-1] = timerWhen{}
   856  			ts.heap = ts.heap[:n-1]
   857  			t.ts = nil
   858  			i--
   859  			changed = true
   860  
   861  		case t.state&timerModified != 0:
   862  			tw.when = t.when
   863  			t.state &^= timerModified
   864  			changed = true
   865  		}
   866  		t.unlock()
   867  	}
   868  
   869  	if changed {
   870  		ts.initHeap()
   871  	}
   872  	ts.updateMinWhenHeap()
   873  
   874  	if verifyTimers {
   875  		ts.verify()
   876  	}
   877  }
   878  
   879  // wakeTime looks at ts's timers and returns the time when we
   880  // should wake up the netpoller. It returns 0 if there are no timers.
   881  // This function is invoked when dropping a P, so it must run without
   882  // any write barriers.
   883  //
   884  //go:nowritebarrierrec
   885  func (ts *timers) wakeTime() int64 {
   886  	// Note that the order of these two loads matters:
   887  	// adjust updates minWhen to make it safe to clear minNextWhen.
   888  	// We read minWhen after reading minNextWhen so that
   889  	// if we see a cleared minNextWhen, we are guaranteed to see
   890  	// the updated minWhen.
   891  	nextWhen := ts.minWhenModified.Load()
   892  	when := ts.minWhenHeap.Load()
   893  	if when == 0 || (nextWhen != 0 && nextWhen < when) {
   894  		when = nextWhen
   895  	}
   896  	return when
   897  }
   898  
   899  // check runs any timers in ts that are ready.
   900  // If now is not 0 it is the current time.
   901  // It returns the passed time or the current time if now was passed as 0.
   902  // and the time when the next timer should run or 0 if there is no next timer,
   903  // and reports whether it ran any timers.
   904  // If the time when the next timer should run is not 0,
   905  // it is always larger than the returned time.
   906  // We pass now in and out to avoid extra calls of nanotime.
   907  //
   908  //go:yeswritebarrierrec
   909  func (ts *timers) check(now int64) (rnow, pollUntil int64, ran bool) {
   910  	ts.trace("check")
   911  	// If it's not yet time for the first timer, or the first adjusted
   912  	// timer, then there is nothing to do.
   913  	next := ts.wakeTime()
   914  	if next == 0 {
   915  		// No timers to run or adjust.
   916  		return now, 0, false
   917  	}
   918  
   919  	if now == 0 {
   920  		now = nanotime()
   921  	}
   922  
   923  	// If this is the local P, and there are a lot of deleted timers,
   924  	// clear them out. We only do this for the local P to reduce
   925  	// lock contention on timersLock.
   926  	zombies := ts.zombies.Load()
   927  	if zombies < 0 {
   928  		badTimer()
   929  	}
   930  	force := ts == &getg().m.p.ptr().timers && int(zombies) > int(ts.len.Load())/4
   931  
   932  	if now < next && !force {
   933  		// Next timer is not ready to run, and we don't need to clear deleted timers.
   934  		return now, next, false
   935  	}
   936  
   937  	ts.lock()
   938  	if len(ts.heap) > 0 {
   939  		ts.adjust(now, false)
   940  		for len(ts.heap) > 0 {
   941  			// Note that runtimer may temporarily unlock ts.
   942  			if tw := ts.run(now); tw != 0 {
   943  				if tw > 0 {
   944  					pollUntil = tw
   945  				}
   946  				break
   947  			}
   948  			ran = true
   949  		}
   950  
   951  		// Note: Delaying the forced adjustment until after the ts.run
   952  		// (as opposed to calling ts.adjust(now, force) above)
   953  		// is significantly faster under contention, such as in
   954  		// package time's BenchmarkTimerAdjust10000,
   955  		// though we do not fully understand why.
   956  		force = ts == &getg().m.p.ptr().timers && int(ts.zombies.Load()) > int(ts.len.Load())/4
   957  		if force {
   958  			ts.adjust(now, true)
   959  		}
   960  	}
   961  	ts.unlock()
   962  
   963  	return now, pollUntil, ran
   964  }
   965  
   966  // run examines the first timer in ts. If it is ready based on now,
   967  // it runs the timer and removes or updates it.
   968  // Returns 0 if it ran a timer, -1 if there are no more timers, or the time
   969  // when the first timer should run.
   970  // The caller must have locked ts.
   971  // If a timer is run, this will temporarily unlock ts.
   972  //
   973  //go:systemstack
   974  func (ts *timers) run(now int64) int64 {
   975  	ts.trace("run")
   976  	assertLockHeld(&ts.mu)
   977  Redo:
   978  	if len(ts.heap) == 0 {
   979  		return -1
   980  	}
   981  	tw := ts.heap[0]
   982  	t := tw.timer
   983  	if t.ts != ts {
   984  		throw("bad ts")
   985  	}
   986  
   987  	if t.astate.Load()&(timerModified|timerZombie) == 0 && tw.when > now {
   988  		// Fast path: not ready to run.
   989  		return tw.when
   990  	}
   991  
   992  	t.lock()
   993  	if t.updateHeap() {
   994  		t.unlock()
   995  		goto Redo
   996  	}
   997  
   998  	if t.state&timerHeaped == 0 || t.state&timerModified != 0 {
   999  		badTimer()
  1000  	}
  1001  
  1002  	if t.when > now {
  1003  		// Not ready to run.
  1004  		t.unlock()
  1005  		return t.when
  1006  	}
  1007  
  1008  	t.unlockAndRun(now)
  1009  	assertLockHeld(&ts.mu) // t is unlocked now, but not ts
  1010  	return 0
  1011  }
  1012  
  1013  // unlockAndRun unlocks and runs the timer t (which must be locked).
  1014  // If t is in a timer set (t.ts != nil), the caller must also have locked the timer set,
  1015  // and this call will temporarily unlock the timer set while running the timer function.
  1016  // unlockAndRun returns with t unlocked and t.ts (re-)locked.
  1017  //
  1018  //go:systemstack
  1019  func (t *timer) unlockAndRun(now int64) {
  1020  	t.trace("unlockAndRun")
  1021  	assertLockHeld(&t.mu)
  1022  	if t.ts != nil {
  1023  		assertLockHeld(&t.ts.mu)
  1024  	}
  1025  	if raceenabled {
  1026  		// Note that we are running on a system stack,
  1027  		// so there is no chance of getg().m being reassigned
  1028  		// out from under us while this function executes.
  1029  		tsLocal := &getg().m.p.ptr().timers
  1030  		if tsLocal.raceCtx == 0 {
  1031  			tsLocal.raceCtx = racegostart(abi.FuncPCABIInternal((*timers).run) + sys.PCQuantum)
  1032  		}
  1033  		raceacquirectx(tsLocal.raceCtx, unsafe.Pointer(t))
  1034  	}
  1035  
  1036  	if t.state&(timerModified|timerZombie) != 0 {
  1037  		badTimer()
  1038  	}
  1039  
  1040  	f := t.f
  1041  	arg := t.arg
  1042  	seq := t.seq
  1043  	var next int64
  1044  	delay := now - t.when
  1045  	if t.period > 0 {
  1046  		// Leave in heap but adjust next time to fire.
  1047  		next = t.when + t.period*(1+delay/t.period)
  1048  		if next < 0 { // check for overflow.
  1049  			next = maxWhen
  1050  		}
  1051  	} else {
  1052  		next = 0
  1053  	}
  1054  	ts := t.ts
  1055  	t.when = next
  1056  	if t.state&timerHeaped != 0 {
  1057  		t.state |= timerModified
  1058  		if next == 0 {
  1059  			t.state |= timerZombie
  1060  			t.ts.zombies.Add(1)
  1061  		}
  1062  		t.updateHeap()
  1063  	}
  1064  
  1065  	async := debug.asynctimerchan.Load() != 0
  1066  	var isSendingClear uint8
  1067  	if !async && t.isChan {
  1068  		// Tell Stop/Reset that we are sending a value.
  1069  		// Set the lowest zero bit.
  1070  		// We do this awkward step because atomic.Uint8
  1071  		// doesn't support Add or CompareAndSwap.
  1072  		// We only set bits with t locked.
  1073  		v := t.isSending.Load()
  1074  		i := sys.TrailingZeros8(^v)
  1075  		if i == 8 {
  1076  			throw("too many concurrent timer firings")
  1077  		}
  1078  		isSendingClear = 1 << i
  1079  		t.isSending.Or(isSendingClear)
  1080  	}
  1081  
  1082  	t.unlock()
  1083  
  1084  	if raceenabled {
  1085  		// Temporarily use the current P's racectx for g0.
  1086  		gp := getg()
  1087  		if gp.racectx != 0 {
  1088  			throw("unexpected racectx")
  1089  		}
  1090  		gp.racectx = gp.m.p.ptr().timers.raceCtx
  1091  	}
  1092  
  1093  	if ts != nil {
  1094  		ts.unlock()
  1095  	}
  1096  
  1097  	if !async && t.isChan {
  1098  		// For a timer channel, we want to make sure that no stale sends
  1099  		// happen after a t.stop or t.modify, but we cannot hold t.mu
  1100  		// during the actual send (which f does) due to lock ordering.
  1101  		// It can happen that we are holding t's lock above, we decide
  1102  		// it's time to send a time value (by calling f), grab the parameters,
  1103  		// unlock above, and then a t.stop or t.modify changes the timer
  1104  		// and returns. At that point, the send needs not to happen after all.
  1105  		// The way we arrange for it not to happen is that t.stop and t.modify
  1106  		// both increment t.seq while holding both t.mu and t.sendLock.
  1107  		// We copied the seq value above while holding t.mu.
  1108  		// Now we can acquire t.sendLock (which will be held across the send)
  1109  		// and double-check that t.seq is still the seq value we saw above.
  1110  		// If not, the timer has been updated and we should skip the send.
  1111  		// We skip the send by reassigning f to a no-op function.
  1112  		//
  1113  		// The isSending field tells t.stop or t.modify that we have
  1114  		// started to send the value. That lets them correctly return
  1115  		// true meaning that no value was sent.
  1116  		lock(&t.sendLock)
  1117  
  1118  		// We are committed to possibly sending a value based on seq,
  1119  		// so no need to keep telling stop/modify that we are sending.
  1120  		t.isSending.And(^isSendingClear)
  1121  
  1122  		if t.seq != seq {
  1123  			f = func(any, uintptr, int64) {}
  1124  		}
  1125  	}
  1126  
  1127  	f(arg, seq, delay)
  1128  
  1129  	if !async && t.isChan {
  1130  		unlock(&t.sendLock)
  1131  	}
  1132  
  1133  	if ts != nil {
  1134  		ts.lock()
  1135  	}
  1136  
  1137  	if raceenabled {
  1138  		gp := getg()
  1139  		gp.racectx = 0
  1140  	}
  1141  }
  1142  
  1143  // verifyTimerHeap verifies that the timers is in a valid state.
  1144  // This is only for debugging, and is only called if verifyTimers is true.
  1145  // The caller must have locked ts.
  1146  func (ts *timers) verify() {
  1147  	assertLockHeld(&ts.mu)
  1148  	for i, tw := range ts.heap {
  1149  		if i == 0 {
  1150  			// First timer has no parent.
  1151  			continue
  1152  		}
  1153  
  1154  		// The heap is timerHeapN-ary. See siftupTimer and siftdownTimer.
  1155  		p := int(uint(i-1) / timerHeapN)
  1156  		if tw.when < ts.heap[p].when {
  1157  			print("bad timer heap at ", i, ": ", p, ": ", ts.heap[p].when, ", ", i, ": ", tw.when, "\n")
  1158  			throw("bad timer heap")
  1159  		}
  1160  	}
  1161  	if n := int(ts.len.Load()); len(ts.heap) != n {
  1162  		println("timer heap len", len(ts.heap), "!= atomic len", n)
  1163  		throw("bad timer heap len")
  1164  	}
  1165  }
  1166  
  1167  // updateMinWhenHeap sets ts.minWhenHeap to ts.heap[0].when.
  1168  // The caller must have locked ts or the world must be stopped.
  1169  func (ts *timers) updateMinWhenHeap() {
  1170  	assertWorldStoppedOrLockHeld(&ts.mu)
  1171  	if len(ts.heap) == 0 {
  1172  		ts.minWhenHeap.Store(0)
  1173  	} else {
  1174  		ts.minWhenHeap.Store(ts.heap[0].when)
  1175  	}
  1176  }
  1177  
  1178  // updateMinWhenModified updates ts.minWhenModified to be <= when.
  1179  // ts need not be (and usually is not) locked.
  1180  func (ts *timers) updateMinWhenModified(when int64) {
  1181  	for {
  1182  		old := ts.minWhenModified.Load()
  1183  		if old != 0 && old < when {
  1184  			return
  1185  		}
  1186  		if ts.minWhenModified.CompareAndSwap(old, when) {
  1187  			return
  1188  		}
  1189  	}
  1190  }
  1191  
  1192  // timeSleepUntil returns the time when the next timer should fire. Returns
  1193  // maxWhen if there are no timers.
  1194  // This is only called by sysmon and checkdead.
  1195  func timeSleepUntil() int64 {
  1196  	next := int64(maxWhen)
  1197  
  1198  	// Prevent allp slice changes. This is like retake.
  1199  	lock(&allpLock)
  1200  	for _, pp := range allp {
  1201  		if pp == nil {
  1202  			// This can happen if procresize has grown
  1203  			// allp but not yet created new Ps.
  1204  			continue
  1205  		}
  1206  
  1207  		if w := pp.timers.wakeTime(); w != 0 {
  1208  			next = min(next, w)
  1209  		}
  1210  	}
  1211  	unlock(&allpLock)
  1212  
  1213  	return next
  1214  }
  1215  
  1216  const timerHeapN = 4
  1217  
  1218  // Heap maintenance algorithms.
  1219  // These algorithms check for slice index errors manually.
  1220  // Slice index error can happen if the program is using racy
  1221  // access to timers. We don't want to panic here, because
  1222  // it will cause the program to crash with a mysterious
  1223  // "panic holding locks" message. Instead, we panic while not
  1224  // holding a lock.
  1225  
  1226  // siftUp puts the timer at position i in the right place
  1227  // in the heap by moving it up toward the top of the heap.
  1228  func (ts *timers) siftUp(i int) {
  1229  	heap := ts.heap
  1230  	if i >= len(heap) {
  1231  		badTimer()
  1232  	}
  1233  	tw := heap[i]
  1234  	when := tw.when
  1235  	if when <= 0 {
  1236  		badTimer()
  1237  	}
  1238  	for i > 0 {
  1239  		p := int(uint(i-1) / timerHeapN) // parent
  1240  		if when >= heap[p].when {
  1241  			break
  1242  		}
  1243  		heap[i] = heap[p]
  1244  		i = p
  1245  	}
  1246  	if heap[i].timer != tw.timer {
  1247  		heap[i] = tw
  1248  	}
  1249  }
  1250  
  1251  // siftDown puts the timer at position i in the right place
  1252  // in the heap by moving it down toward the bottom of the heap.
  1253  func (ts *timers) siftDown(i int) {
  1254  	heap := ts.heap
  1255  	n := len(heap)
  1256  	if i >= n {
  1257  		badTimer()
  1258  	}
  1259  	if i*timerHeapN+1 >= n {
  1260  		return
  1261  	}
  1262  	tw := heap[i]
  1263  	when := tw.when
  1264  	if when <= 0 {
  1265  		badTimer()
  1266  	}
  1267  	for {
  1268  		leftChild := i*timerHeapN + 1
  1269  		if leftChild >= n {
  1270  			break
  1271  		}
  1272  		w := when
  1273  		c := -1
  1274  		for j, tw := range heap[leftChild:min(leftChild+timerHeapN, n)] {
  1275  			if tw.when < w {
  1276  				w = tw.when
  1277  				c = leftChild + j
  1278  			}
  1279  		}
  1280  		if c < 0 {
  1281  			break
  1282  		}
  1283  		heap[i] = heap[c]
  1284  		i = c
  1285  	}
  1286  	if heap[i].timer != tw.timer {
  1287  		heap[i] = tw
  1288  	}
  1289  }
  1290  
  1291  // initHeap reestablishes the heap order in the slice ts.heap.
  1292  // It takes O(n) time for n=len(ts.heap), not the O(n log n) of n repeated add operations.
  1293  func (ts *timers) initHeap() {
  1294  	// Last possible element that needs sifting down is parent of last element;
  1295  	// last element is len(t)-1; parent of last element is (len(t)-1-1)/timerHeapN.
  1296  	if len(ts.heap) <= 1 {
  1297  		return
  1298  	}
  1299  	for i := int(uint(len(ts.heap)-1-1) / timerHeapN); i >= 0; i-- {
  1300  		ts.siftDown(i)
  1301  	}
  1302  }
  1303  
  1304  // badTimer is called if the timer data structures have been corrupted,
  1305  // presumably due to racy use by the program. We panic here rather than
  1306  // panicking due to invalid slice access while holding locks.
  1307  // See issue #25686.
  1308  func badTimer() {
  1309  	throw("timer data corruption")
  1310  }
  1311  
  1312  // Timer channels.
  1313  
  1314  // maybeRunChan checks whether the timer needs to run
  1315  // to send a value to its associated channel. If so, it does.
  1316  // The timer must not be locked.
  1317  func (t *timer) maybeRunChan() {
  1318  	if t.astate.Load()&timerHeaped != 0 {
  1319  		// If the timer is in the heap, the ordinary timer code
  1320  		// is in charge of sending when appropriate.
  1321  		return
  1322  	}
  1323  
  1324  	t.lock()
  1325  	now := nanotime()
  1326  	if t.state&timerHeaped != 0 || t.when == 0 || t.when > now {
  1327  		t.trace("maybeRunChan-")
  1328  		// Timer in the heap, or not running at all, or not triggered.
  1329  		t.unlock()
  1330  		return
  1331  	}
  1332  	t.trace("maybeRunChan+")
  1333  	systemstack(func() {
  1334  		t.unlockAndRun(now)
  1335  	})
  1336  }
  1337  
  1338  // blockTimerChan is called when a channel op has decided to block on c.
  1339  // The caller holds the channel lock for c and possibly other channels.
  1340  // blockTimerChan makes sure that c is in a timer heap,
  1341  // adding it if needed.
  1342  func blockTimerChan(c *hchan) {
  1343  	t := c.timer
  1344  	t.lock()
  1345  	t.trace("blockTimerChan")
  1346  	if !t.isChan {
  1347  		badTimer()
  1348  	}
  1349  
  1350  	t.blocked++
  1351  
  1352  	// If this is the first enqueue after a recent dequeue,
  1353  	// the timer may still be in the heap but marked as a zombie.
  1354  	// Unmark it in this case, if the timer is still pending.
  1355  	if t.state&timerHeaped != 0 && t.state&timerZombie != 0 && t.when > 0 {
  1356  		t.state &^= timerZombie
  1357  		t.ts.zombies.Add(-1)
  1358  	}
  1359  
  1360  	// t.maybeAdd must be called with t unlocked,
  1361  	// because it needs to lock t.ts before t.
  1362  	// Then it will do nothing if t.needsAdd(state) is false.
  1363  	// Check that now before the unlock,
  1364  	// avoiding the extra lock-lock-unlock-unlock
  1365  	// inside maybeAdd when t does not need to be added.
  1366  	add := t.needsAdd()
  1367  	t.unlock()
  1368  	if add {
  1369  		t.maybeAdd()
  1370  	}
  1371  }
  1372  
  1373  // unblockTimerChan is called when a channel op that was blocked on c
  1374  // is no longer blocked. Every call to blockTimerChan must be paired with
  1375  // a call to unblockTimerChan.
  1376  // The caller holds the channel lock for c and possibly other channels.
  1377  // unblockTimerChan removes c from the timer heap when nothing is
  1378  // blocked on it anymore.
  1379  func unblockTimerChan(c *hchan) {
  1380  	t := c.timer
  1381  	t.lock()
  1382  	t.trace("unblockTimerChan")
  1383  	if !t.isChan || t.blocked == 0 {
  1384  		badTimer()
  1385  	}
  1386  	t.blocked--
  1387  	if t.blocked == 0 && t.state&timerHeaped != 0 && t.state&timerZombie == 0 {
  1388  		// Last goroutine that was blocked on this timer.
  1389  		// Mark for removal from heap but do not clear t.when,
  1390  		// so that we know what time it is still meant to trigger.
  1391  		t.state |= timerZombie
  1392  		t.ts.zombies.Add(1)
  1393  	}
  1394  	t.unlock()
  1395  }
  1396  

View as plain text