Source file src/runtime/pinner.go

     1  // Copyright 2023 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/atomic"
     9  	"unsafe"
    10  )
    11  
    12  // A Pinner is a set of Go objects each pinned to a fixed location in memory. The
    13  // [Pinner.Pin] method pins one object, while [Pinner.Unpin] unpins all pinned
    14  // objects. See their comments for more information.
    15  type Pinner struct {
    16  	*pinner
    17  }
    18  
    19  // Pin pins a Go object, preventing it from being moved or freed by the garbage
    20  // collector until the [Pinner.Unpin] method has been called.
    21  //
    22  // A pointer to a pinned object can be directly stored in C memory or can be
    23  // contained in Go memory passed to C functions. If the pinned object itself
    24  // contains pointers to Go objects, these objects must be pinned separately if they
    25  // are going to be accessed from C code.
    26  //
    27  // The argument must be a pointer of any type or an [unsafe.Pointer].
    28  // It's safe to call Pin on non-Go pointers, in which case Pin will do nothing.
    29  func (p *Pinner) Pin(pointer any) {
    30  	if p.pinner == nil {
    31  		// Check the pinner cache first.
    32  		mp := acquirem()
    33  		if pp := mp.p.ptr(); pp != nil {
    34  			p.pinner = pp.pinnerCache
    35  			pp.pinnerCache = nil
    36  		}
    37  		releasem(mp)
    38  
    39  		if p.pinner == nil {
    40  			// Didn't get anything from the pinner cache.
    41  			p.pinner = new(pinner)
    42  			p.refs = p.refStore[:0]
    43  
    44  			// We set this finalizer once and never clear it. Thus, if the
    45  			// pinner gets cached, we'll reuse it, along with its finalizer.
    46  			// This lets us avoid the relatively expensive SetFinalizer call
    47  			// when reusing from the cache. The finalizer however has to be
    48  			// resilient to an empty pinner being finalized, which is done
    49  			// by checking p.refs' length.
    50  			SetFinalizer(p.pinner, func(i *pinner) {
    51  				if len(i.refs) != 0 {
    52  					i.unpin() // only required to make the test idempotent
    53  					pinnerLeakPanic()
    54  				}
    55  			})
    56  		}
    57  	}
    58  	ptr := pinnerGetPtr(&pointer)
    59  	if setPinned(ptr, true) {
    60  		p.refs = append(p.refs, ptr)
    61  	}
    62  }
    63  
    64  // Unpin unpins all pinned objects of the [Pinner].
    65  func (p *Pinner) Unpin() {
    66  	p.pinner.unpin()
    67  
    68  	mp := acquirem()
    69  	if pp := mp.p.ptr(); pp != nil && pp.pinnerCache == nil {
    70  		// Put the pinner back in the cache, but only if the
    71  		// cache is empty. If application code is reusing Pinners
    72  		// on its own, we want to leave the backing store in place
    73  		// so reuse is more efficient.
    74  		pp.pinnerCache = p.pinner
    75  		p.pinner = nil
    76  	}
    77  	releasem(mp)
    78  }
    79  
    80  const (
    81  	pinnerSize         = 64
    82  	pinnerRefStoreSize = (pinnerSize - unsafe.Sizeof([]unsafe.Pointer{})) / unsafe.Sizeof(unsafe.Pointer(nil))
    83  )
    84  
    85  type pinner struct {
    86  	refs     []unsafe.Pointer
    87  	refStore [pinnerRefStoreSize]unsafe.Pointer
    88  }
    89  
    90  func (p *pinner) unpin() {
    91  	if p == nil || p.refs == nil {
    92  		return
    93  	}
    94  	for i := range p.refs {
    95  		setPinned(p.refs[i], false)
    96  	}
    97  	// The following two lines make all pointers to references
    98  	// in p.refs unreachable, either by deleting them or dropping
    99  	// p.refs' backing store (if it was not backed by refStore).
   100  	p.refStore = [pinnerRefStoreSize]unsafe.Pointer{}
   101  	p.refs = p.refStore[:0]
   102  }
   103  
   104  func pinnerGetPtr(i *any) unsafe.Pointer {
   105  	e := efaceOf(i)
   106  	etyp := e._type
   107  	if etyp == nil {
   108  		panic(errorString("runtime.Pinner: argument is nil"))
   109  	}
   110  	if kind := etyp.Kind_ & kindMask; kind != kindPtr && kind != kindUnsafePointer {
   111  		panic(errorString("runtime.Pinner: argument is not a pointer: " + toRType(etyp).string()))
   112  	}
   113  	if inUserArenaChunk(uintptr(e.data)) {
   114  		// Arena-allocated objects are not eligible for pinning.
   115  		panic(errorString("runtime.Pinner: object was allocated into an arena"))
   116  	}
   117  	return e.data
   118  }
   119  
   120  // isPinned checks if a Go pointer is pinned.
   121  // nosplit, because it's called from nosplit code in cgocheck.
   122  //
   123  //go:nosplit
   124  func isPinned(ptr unsafe.Pointer) bool {
   125  	span := spanOfHeap(uintptr(ptr))
   126  	if span == nil {
   127  		// this code is only called for Go pointer, so this must be a
   128  		// linker-allocated global object.
   129  		return true
   130  	}
   131  	pinnerBits := span.getPinnerBits()
   132  	// these pinnerBits might get unlinked by a concurrently running sweep, but
   133  	// that's OK because gcBits don't get cleared until the following GC cycle
   134  	// (nextMarkBitArenaEpoch)
   135  	if pinnerBits == nil {
   136  		return false
   137  	}
   138  	objIndex := span.objIndex(uintptr(ptr))
   139  	pinState := pinnerBits.ofObject(objIndex)
   140  	KeepAlive(ptr) // make sure ptr is alive until we are done so the span can't be freed
   141  	return pinState.isPinned()
   142  }
   143  
   144  // setPinned marks or unmarks a Go pointer as pinned, when the ptr is a Go pointer.
   145  // It will be ignored while try to pin a non-Go pointer,
   146  // and it will be panic while try to unpin a non-Go pointer,
   147  // which should not happen in normal usage.
   148  func setPinned(ptr unsafe.Pointer, pin bool) bool {
   149  	span := spanOfHeap(uintptr(ptr))
   150  	if span == nil {
   151  		if !pin {
   152  			panic(errorString("tried to unpin non-Go pointer"))
   153  		}
   154  		// This is a linker-allocated, zero size object or other object,
   155  		// nothing to do, silently ignore it.
   156  		return false
   157  	}
   158  
   159  	// ensure that the span is swept, b/c sweeping accesses the specials list
   160  	// w/o locks.
   161  	mp := acquirem()
   162  	span.ensureSwept()
   163  	KeepAlive(ptr) // make sure ptr is still alive after span is swept
   164  
   165  	objIndex := span.objIndex(uintptr(ptr))
   166  
   167  	lock(&span.speciallock) // guard against concurrent calls of setPinned on same span
   168  
   169  	pinnerBits := span.getPinnerBits()
   170  	if pinnerBits == nil {
   171  		pinnerBits = span.newPinnerBits()
   172  		span.setPinnerBits(pinnerBits)
   173  	}
   174  	pinState := pinnerBits.ofObject(objIndex)
   175  	if pin {
   176  		if pinState.isPinned() {
   177  			// multiple pins on same object, set multipin bit
   178  			pinState.setMultiPinned(true)
   179  			// and increase the pin counter
   180  			// TODO(mknyszek): investigate if systemstack is necessary here
   181  			systemstack(func() {
   182  				offset := objIndex * span.elemsize
   183  				span.incPinCounter(offset)
   184  			})
   185  		} else {
   186  			// set pin bit
   187  			pinState.setPinned(true)
   188  		}
   189  	} else {
   190  		// unpin
   191  		if pinState.isPinned() {
   192  			if pinState.isMultiPinned() {
   193  				var exists bool
   194  				// TODO(mknyszek): investigate if systemstack is necessary here
   195  				systemstack(func() {
   196  					offset := objIndex * span.elemsize
   197  					exists = span.decPinCounter(offset)
   198  				})
   199  				if !exists {
   200  					// counter is 0, clear multipin bit
   201  					pinState.setMultiPinned(false)
   202  				}
   203  			} else {
   204  				// no multipins recorded. unpin object.
   205  				pinState.setPinned(false)
   206  			}
   207  		} else {
   208  			// unpinning unpinned object, bail out
   209  			throw("runtime.Pinner: object already unpinned")
   210  		}
   211  	}
   212  	unlock(&span.speciallock)
   213  	releasem(mp)
   214  	return true
   215  }
   216  
   217  type pinState struct {
   218  	bytep   *uint8
   219  	byteVal uint8
   220  	mask    uint8
   221  }
   222  
   223  // nosplit, because it's called by isPinned, which is nosplit
   224  //
   225  //go:nosplit
   226  func (v *pinState) isPinned() bool {
   227  	return (v.byteVal & v.mask) != 0
   228  }
   229  
   230  func (v *pinState) isMultiPinned() bool {
   231  	return (v.byteVal & (v.mask << 1)) != 0
   232  }
   233  
   234  func (v *pinState) setPinned(val bool) {
   235  	v.set(val, false)
   236  }
   237  
   238  func (v *pinState) setMultiPinned(val bool) {
   239  	v.set(val, true)
   240  }
   241  
   242  // set sets the pin bit of the pinState to val. If multipin is true, it
   243  // sets/unsets the multipin bit instead.
   244  func (v *pinState) set(val bool, multipin bool) {
   245  	mask := v.mask
   246  	if multipin {
   247  		mask <<= 1
   248  	}
   249  	if val {
   250  		atomic.Or8(v.bytep, mask)
   251  	} else {
   252  		atomic.And8(v.bytep, ^mask)
   253  	}
   254  }
   255  
   256  // pinnerBits is the same type as gcBits but has different methods.
   257  type pinnerBits gcBits
   258  
   259  // ofObject returns the pinState of the n'th object.
   260  // nosplit, because it's called by isPinned, which is nosplit
   261  //
   262  //go:nosplit
   263  func (p *pinnerBits) ofObject(n uintptr) pinState {
   264  	bytep, mask := (*gcBits)(p).bitp(n * 2)
   265  	byteVal := atomic.Load8(bytep)
   266  	return pinState{bytep, byteVal, mask}
   267  }
   268  
   269  func (s *mspan) pinnerBitSize() uintptr {
   270  	return divRoundUp(uintptr(s.nelems)*2, 8)
   271  }
   272  
   273  // newPinnerBits returns a pointer to 8 byte aligned bytes to be used for this
   274  // span's pinner bits. newPinneBits is used to mark objects that are pinned.
   275  // They are copied when the span is swept.
   276  func (s *mspan) newPinnerBits() *pinnerBits {
   277  	return (*pinnerBits)(newMarkBits(uintptr(s.nelems) * 2))
   278  }
   279  
   280  // nosplit, because it's called by isPinned, which is nosplit
   281  //
   282  //go:nosplit
   283  func (s *mspan) getPinnerBits() *pinnerBits {
   284  	return (*pinnerBits)(atomic.Loadp(unsafe.Pointer(&s.pinnerBits)))
   285  }
   286  
   287  func (s *mspan) setPinnerBits(p *pinnerBits) {
   288  	atomicstorep(unsafe.Pointer(&s.pinnerBits), unsafe.Pointer(p))
   289  }
   290  
   291  // refreshPinnerBits replaces pinnerBits with a fresh copy in the arenas for the
   292  // next GC cycle. If it does not contain any pinned objects, pinnerBits of the
   293  // span is set to nil.
   294  func (s *mspan) refreshPinnerBits() {
   295  	p := s.getPinnerBits()
   296  	if p == nil {
   297  		return
   298  	}
   299  
   300  	hasPins := false
   301  	bytes := alignUp(s.pinnerBitSize(), 8)
   302  
   303  	// Iterate over each 8-byte chunk and check for pins. Note that
   304  	// newPinnerBits guarantees that pinnerBits will be 8-byte aligned, so we
   305  	// don't have to worry about edge cases, irrelevant bits will simply be
   306  	// zero.
   307  	for _, x := range unsafe.Slice((*uint64)(unsafe.Pointer(&p.x)), bytes/8) {
   308  		if x != 0 {
   309  			hasPins = true
   310  			break
   311  		}
   312  	}
   313  
   314  	if hasPins {
   315  		newPinnerBits := s.newPinnerBits()
   316  		memmove(unsafe.Pointer(&newPinnerBits.x), unsafe.Pointer(&p.x), bytes)
   317  		s.setPinnerBits(newPinnerBits)
   318  	} else {
   319  		s.setPinnerBits(nil)
   320  	}
   321  }
   322  
   323  // incPinCounter is only called for multiple pins of the same object and records
   324  // the _additional_ pins.
   325  func (span *mspan) incPinCounter(offset uintptr) {
   326  	var rec *specialPinCounter
   327  	ref, exists := span.specialFindSplicePoint(offset, _KindSpecialPinCounter)
   328  	if !exists {
   329  		lock(&mheap_.speciallock)
   330  		rec = (*specialPinCounter)(mheap_.specialPinCounterAlloc.alloc())
   331  		unlock(&mheap_.speciallock)
   332  		// splice in record, fill in offset.
   333  		rec.special.offset = uint16(offset)
   334  		rec.special.kind = _KindSpecialPinCounter
   335  		rec.special.next = *ref
   336  		*ref = (*special)(unsafe.Pointer(rec))
   337  		spanHasSpecials(span)
   338  	} else {
   339  		rec = (*specialPinCounter)(unsafe.Pointer(*ref))
   340  	}
   341  	rec.counter++
   342  }
   343  
   344  // decPinCounter decreases the counter. If the counter reaches 0, the counter
   345  // special is deleted and false is returned. Otherwise true is returned.
   346  func (span *mspan) decPinCounter(offset uintptr) bool {
   347  	ref, exists := span.specialFindSplicePoint(offset, _KindSpecialPinCounter)
   348  	if !exists {
   349  		throw("runtime.Pinner: decreased non-existing pin counter")
   350  	}
   351  	counter := (*specialPinCounter)(unsafe.Pointer(*ref))
   352  	counter.counter--
   353  	if counter.counter == 0 {
   354  		*ref = counter.special.next
   355  		if span.specials == nil {
   356  			spanHasNoSpecials(span)
   357  		}
   358  		lock(&mheap_.speciallock)
   359  		mheap_.specialPinCounterAlloc.free(unsafe.Pointer(counter))
   360  		unlock(&mheap_.speciallock)
   361  		return false
   362  	}
   363  	return true
   364  }
   365  
   366  // only for tests
   367  func pinnerGetPinCounter(addr unsafe.Pointer) *uintptr {
   368  	_, span, objIndex := findObject(uintptr(addr), 0, 0)
   369  	offset := objIndex * span.elemsize
   370  	t, exists := span.specialFindSplicePoint(offset, _KindSpecialPinCounter)
   371  	if !exists {
   372  		return nil
   373  	}
   374  	counter := (*specialPinCounter)(unsafe.Pointer(*t))
   375  	return &counter.counter
   376  }
   377  
   378  // to be able to test that the GC panics when a pinned pointer is leaking, this
   379  // panic function is a variable, that can be overwritten by a test.
   380  var pinnerLeakPanic = func() {
   381  	panic(errorString("runtime.Pinner: found leaking pinned pointer; forgot to call Unpin()?"))
   382  }
   383  

View as plain text