// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build aix || darwin || netbsd || openbsd || plan9 || solaris || windows package runtime import ( "internal/runtime/atomic" "unsafe" ) const ( locked uintptr = 1 ) // One-time notifications. func noteclear(n *note) { n.key = 0 } func notewakeup(n *note) { var v uintptr for { v = atomic.Loaduintptr(&n.key) if atomic.Casuintptr(&n.key, v, locked) { break } } // Successfully set waitm to locked. // What was it before? switch { case v == 0: // Nothing was waiting. Done. case v == locked: // Two notewakeups! Not allowed. throw("notewakeup - double wakeup") default: // Must be the waiting m. Wake it up. semawakeup((*m)(unsafe.Pointer(v))) } } func notesleep(n *note) { gp := getg() if gp != gp.m.g0 { throw("notesleep not on g0") } semacreate(gp.m) if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) { // Must be locked (got wakeup). if n.key != locked { throw("notesleep - waitm out of sync") } return } // Queued. Sleep. gp.m.blocked = true if *cgo_yield == nil { semasleep(-1) } else { // Sleep for an arbitrary-but-moderate interval to poll libc interceptors. const ns = 10e6 for atomic.Loaduintptr(&n.key) == 0 { semasleep(ns) asmcgocall(*cgo_yield, nil) } } gp.m.blocked = false } //go:nosplit func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool { // gp and deadline are logically local variables, but they are written // as parameters so that the stack space they require is charged // to the caller. // This reduces the nosplit footprint of notetsleep_internal. gp = getg() // Register for wakeup on n->waitm. if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) { // Must be locked (got wakeup). if n.key != locked { throw("notetsleep - waitm out of sync") } return true } if ns < 0 { // Queued. Sleep. gp.m.blocked = true if *cgo_yield == nil { semasleep(-1) } else { // Sleep in arbitrary-but-moderate intervals to poll libc interceptors. const ns = 10e6 for semasleep(ns) < 0 { asmcgocall(*cgo_yield, nil) } } gp.m.blocked = false return true } deadline = nanotime() + ns for { // Registered. Sleep. gp.m.blocked = true if *cgo_yield != nil && ns > 10e6 { ns = 10e6 } if semasleep(ns) >= 0 { gp.m.blocked = false // Acquired semaphore, semawakeup unregistered us. // Done. return true } if *cgo_yield != nil { asmcgocall(*cgo_yield, nil) } gp.m.blocked = false // Interrupted or timed out. Still registered. Semaphore not acquired. ns = deadline - nanotime() if ns <= 0 { break } // Deadline hasn't arrived. Keep sleeping. } // Deadline arrived. Still registered. Semaphore not acquired. // Want to give up and return, but have to unregister first, // so that any notewakeup racing with the return does not // try to grant us the semaphore when we don't expect it. for { v := atomic.Loaduintptr(&n.key) switch v { case uintptr(unsafe.Pointer(gp.m)): // No wakeup yet; unregister if possible. if atomic.Casuintptr(&n.key, v, 0) { return false } case locked: // Wakeup happened so semaphore is available. // Grab it to avoid getting out of sync. gp.m.blocked = true if semasleep(-1) < 0 { throw("runtime: unable to acquire - semaphore out of sync") } gp.m.blocked = false return true default: throw("runtime: unexpected waitm - semaphore out of sync") } } } func notetsleep(n *note, ns int64) bool { gp := getg() if gp != gp.m.g0 { throw("notetsleep not on g0") } semacreate(gp.m) return notetsleep_internal(n, ns, nil, 0) } // same as runtimeĀ·notetsleep, but called on user g (not g0) // calls only nosplit functions between entersyscallblock/exitsyscall. func notetsleepg(n *note, ns int64) bool { gp := getg() if gp == gp.m.g0 { throw("notetsleepg on g0") } semacreate(gp.m) entersyscallblock() ok := notetsleep_internal(n, ns, nil, 0) exitsyscall() return ok } func beforeIdle(int64, int64) (*g, bool) { return nil, false } func checkTimeouts() {}