Source file src/runtime/mbitmap_allocheaders.go

     1  // Copyright 2023 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build goexperiment.allocheaders
     6  
     7  // Garbage collector: type and heap bitmaps.
     8  //
     9  // Stack, data, and bss bitmaps
    10  //
    11  // Stack frames and global variables in the data and bss sections are
    12  // described by bitmaps with 1 bit per pointer-sized word. A "1" bit
    13  // means the word is a live pointer to be visited by the GC (referred to
    14  // as "pointer"). A "0" bit means the word should be ignored by GC
    15  // (referred to as "scalar", though it could be a dead pointer value).
    16  //
    17  // Heap bitmaps
    18  //
    19  // The heap bitmap comprises 1 bit for each pointer-sized word in the heap,
    20  // recording whether a pointer is stored in that word or not. This bitmap
    21  // is stored at the end of a span for small objects and is unrolled at
    22  // runtime from type metadata for all larger objects. Objects without
    23  // pointers have neither a bitmap nor associated type metadata.
    24  //
    25  // Bits in all cases correspond to words in little-endian order.
    26  //
    27  // For small objects, if s is the mspan for the span starting at "start",
    28  // then s.heapBits() returns a slice containing the bitmap for the whole span.
    29  // That is, s.heapBits()[0] holds the goarch.PtrSize*8 bits for the first
    30  // goarch.PtrSize*8 words from "start" through "start+63*ptrSize" in the span.
    31  // On a related note, small objects are always small enough that their bitmap
    32  // fits in goarch.PtrSize*8 bits, so writing out bitmap data takes two bitmap
    33  // writes at most (because object boundaries don't generally lie on
    34  // s.heapBits()[i] boundaries).
    35  //
    36  // For larger objects, if t is the type for the object starting at "start",
    37  // within some span whose mspan is s, then the bitmap at t.GCData is "tiled"
    38  // from "start" through "start+s.elemsize".
    39  // Specifically, the first bit of t.GCData corresponds to the word at "start",
    40  // the second to the word after "start", and so on up to t.PtrBytes. At t.PtrBytes,
    41  // we skip to "start+t.Size_" and begin again from there. This process is
    42  // repeated until we hit "start+s.elemsize".
    43  // This tiling algorithm supports array data, since the type always refers to
    44  // the element type of the array. Single objects are considered the same as
    45  // single-element arrays.
    46  // The tiling algorithm may scan data past the end of the compiler-recognized
    47  // object, but any unused data within the allocation slot (i.e. within s.elemsize)
    48  // is zeroed, so the GC just observes nil pointers.
    49  // Note that this "tiled" bitmap isn't stored anywhere; it is generated on-the-fly.
    50  //
    51  // For objects without their own span, the type metadata is stored in the first
    52  // word before the object at the beginning of the allocation slot. For objects
    53  // with their own span, the type metadata is stored in the mspan.
    54  //
    55  // The bitmap for small unallocated objects in scannable spans is not maintained
    56  // (can be junk).
    57  
    58  package runtime
    59  
    60  import (
    61  	"internal/abi"
    62  	"internal/goarch"
    63  	"runtime/internal/sys"
    64  	"unsafe"
    65  )
    66  
    67  const (
    68  	// A malloc header is functionally a single type pointer, but
    69  	// we need to use 8 here to ensure 8-byte alignment of allocations
    70  	// on 32-bit platforms. It's wasteful, but a lot of code relies on
    71  	// 8-byte alignment for 8-byte atomics.
    72  	mallocHeaderSize = 8
    73  
    74  	// The minimum object size that has a malloc header, exclusive.
    75  	//
    76  	// The size of this value controls overheads from the malloc header.
    77  	// The minimum size is bound by writeHeapBitsSmall, which assumes that the
    78  	// pointer bitmap for objects of a size smaller than this doesn't cross
    79  	// more than one pointer-word boundary. This sets an upper-bound on this
    80  	// value at the number of bits in a uintptr, multiplied by the pointer
    81  	// size in bytes.
    82  	//
    83  	// We choose a value here that has a natural cutover point in terms of memory
    84  	// overheads. This value just happens to be the maximum possible value this
    85  	// can be.
    86  	//
    87  	// A span with heap bits in it will have 128 bytes of heap bits on 64-bit
    88  	// platforms, and 256 bytes of heap bits on 32-bit platforms. The first size
    89  	// class where malloc headers match this overhead for 64-bit platforms is
    90  	// 512 bytes (8 KiB / 512 bytes * 8 bytes-per-header = 128 bytes of overhead).
    91  	// On 32-bit platforms, this same point is the 256 byte size class
    92  	// (8 KiB / 256 bytes * 8 bytes-per-header = 256 bytes of overhead).
    93  	//
    94  	// Guaranteed to be exactly at a size class boundary. The reason this value is
    95  	// an exclusive minimum is subtle. Suppose we're allocating a 504-byte object
    96  	// and its rounded up to 512 bytes for the size class. If minSizeForMallocHeader
    97  	// is 512 and an inclusive minimum, then a comparison against minSizeForMallocHeader
    98  	// by the two values would produce different results. In other words, the comparison
    99  	// would not be invariant to size-class rounding. Eschewing this property means a
   100  	// more complex check or possibly storing additional state to determine whether a
   101  	// span has malloc headers.
   102  	minSizeForMallocHeader = goarch.PtrSize * ptrBits
   103  )
   104  
   105  // heapBitsInSpan returns true if the size of an object implies its ptr/scalar
   106  // data is stored at the end of the span, and is accessible via span.heapBits.
   107  //
   108  // Note: this works for both rounded-up sizes (span.elemsize) and unrounded
   109  // type sizes because minSizeForMallocHeader is guaranteed to be at a size
   110  // class boundary.
   111  //
   112  //go:nosplit
   113  func heapBitsInSpan(userSize uintptr) bool {
   114  	// N.B. minSizeForMallocHeader is an exclusive minimum so that this function is
   115  	// invariant under size-class rounding on its input.
   116  	return userSize <= minSizeForMallocHeader
   117  }
   118  
   119  // heapArenaPtrScalar contains the per-heapArena pointer/scalar metadata for the GC.
   120  type heapArenaPtrScalar struct {
   121  	// N.B. This is no longer necessary with allocation headers.
   122  }
   123  
   124  // typePointers is an iterator over the pointers in a heap object.
   125  //
   126  // Iteration through this type implements the tiling algorithm described at the
   127  // top of this file.
   128  type typePointers struct {
   129  	// elem is the address of the current array element of type typ being iterated over.
   130  	// Objects that are not arrays are treated as single-element arrays, in which case
   131  	// this value does not change.
   132  	elem uintptr
   133  
   134  	// addr is the address the iterator is currently working from and describes
   135  	// the address of the first word referenced by mask.
   136  	addr uintptr
   137  
   138  	// mask is a bitmask where each bit corresponds to pointer-words after addr.
   139  	// Bit 0 is the pointer-word at addr, Bit 1 is the next word, and so on.
   140  	// If a bit is 1, then there is a pointer at that word.
   141  	// nextFast and next mask out bits in this mask as their pointers are processed.
   142  	mask uintptr
   143  
   144  	// typ is a pointer to the type information for the heap object's type.
   145  	// This may be nil if the object is in a span where heapBitsInSpan(span.elemsize) is true.
   146  	typ *_type
   147  }
   148  
   149  // typePointersOf returns an iterator over all heap pointers in the range [addr, addr+size).
   150  //
   151  // addr and addr+size must be in the range [span.base(), span.limit).
   152  //
   153  // Note: addr+size must be passed as the limit argument to the iterator's next method on
   154  // each iteration. This slightly awkward API is to allow typePointers to be destructured
   155  // by the compiler.
   156  //
   157  // nosplit because it is used during write barriers and must not be preempted.
   158  //
   159  //go:nosplit
   160  func (span *mspan) typePointersOf(addr, size uintptr) typePointers {
   161  	base := span.objBase(addr)
   162  	tp := span.typePointersOfUnchecked(base)
   163  	if base == addr && size == span.elemsize {
   164  		return tp
   165  	}
   166  	return tp.fastForward(addr-tp.addr, addr+size)
   167  }
   168  
   169  // typePointersOfUnchecked is like typePointersOf, but assumes addr is the base
   170  // of an allocation slot in a span (the start of the object if no header, the
   171  // header otherwise). It returns an iterator that generates all pointers
   172  // in the range [addr, addr+span.elemsize).
   173  //
   174  // nosplit because it is used during write barriers and must not be preempted.
   175  //
   176  //go:nosplit
   177  func (span *mspan) typePointersOfUnchecked(addr uintptr) typePointers {
   178  	const doubleCheck = false
   179  	if doubleCheck && span.objBase(addr) != addr {
   180  		print("runtime: addr=", addr, " base=", span.objBase(addr), "\n")
   181  		throw("typePointersOfUnchecked consisting of non-base-address for object")
   182  	}
   183  
   184  	spc := span.spanclass
   185  	if spc.noscan() {
   186  		return typePointers{}
   187  	}
   188  	if heapBitsInSpan(span.elemsize) {
   189  		// Handle header-less objects.
   190  		return typePointers{elem: addr, addr: addr, mask: span.heapBitsSmallForAddr(addr)}
   191  	}
   192  
   193  	// All of these objects have a header.
   194  	var typ *_type
   195  	if spc.sizeclass() != 0 {
   196  		// Pull the allocation header from the first word of the object.
   197  		typ = *(**_type)(unsafe.Pointer(addr))
   198  		addr += mallocHeaderSize
   199  	} else {
   200  		typ = span.largeType
   201  	}
   202  	gcdata := typ.GCData
   203  	return typePointers{elem: addr, addr: addr, mask: readUintptr(gcdata), typ: typ}
   204  }
   205  
   206  // typePointersOfType is like typePointersOf, but assumes addr points to one or more
   207  // contiguous instances of the provided type. The provided type must not be nil and
   208  // it must not have its type metadata encoded as a gcprog.
   209  //
   210  // It returns an iterator that tiles typ.GCData starting from addr. It's the caller's
   211  // responsibility to limit iteration.
   212  //
   213  // nosplit because its callers are nosplit and require all their callees to be nosplit.
   214  //
   215  //go:nosplit
   216  func (span *mspan) typePointersOfType(typ *abi.Type, addr uintptr) typePointers {
   217  	const doubleCheck = false
   218  	if doubleCheck && (typ == nil || typ.Kind_&kindGCProg != 0) {
   219  		throw("bad type passed to typePointersOfType")
   220  	}
   221  	if span.spanclass.noscan() {
   222  		return typePointers{}
   223  	}
   224  	// Since we have the type, pretend we have a header.
   225  	gcdata := typ.GCData
   226  	return typePointers{elem: addr, addr: addr, mask: readUintptr(gcdata), typ: typ}
   227  }
   228  
   229  // nextFast is the fast path of next. nextFast is written to be inlineable and,
   230  // as the name implies, fast.
   231  //
   232  // Callers that are performance-critical should iterate using the following
   233  // pattern:
   234  //
   235  //	for {
   236  //		var addr uintptr
   237  //		if tp, addr = tp.nextFast(); addr == 0 {
   238  //			if tp, addr = tp.next(limit); addr == 0 {
   239  //				break
   240  //			}
   241  //		}
   242  //		// Use addr.
   243  //		...
   244  //	}
   245  //
   246  // nosplit because it is used during write barriers and must not be preempted.
   247  //
   248  //go:nosplit
   249  func (tp typePointers) nextFast() (typePointers, uintptr) {
   250  	// TESTQ/JEQ
   251  	if tp.mask == 0 {
   252  		return tp, 0
   253  	}
   254  	// BSFQ
   255  	var i int
   256  	if goarch.PtrSize == 8 {
   257  		i = sys.TrailingZeros64(uint64(tp.mask))
   258  	} else {
   259  		i = sys.TrailingZeros32(uint32(tp.mask))
   260  	}
   261  	// BTCQ
   262  	tp.mask ^= uintptr(1) << (i & (ptrBits - 1))
   263  	// LEAQ (XX)(XX*8)
   264  	return tp, tp.addr + uintptr(i)*goarch.PtrSize
   265  }
   266  
   267  // next advances the pointers iterator, returning the updated iterator and
   268  // the address of the next pointer.
   269  //
   270  // limit must be the same each time it is passed to next.
   271  //
   272  // nosplit because it is used during write barriers and must not be preempted.
   273  //
   274  //go:nosplit
   275  func (tp typePointers) next(limit uintptr) (typePointers, uintptr) {
   276  	for {
   277  		if tp.mask != 0 {
   278  			return tp.nextFast()
   279  		}
   280  
   281  		// Stop if we don't actually have type information.
   282  		if tp.typ == nil {
   283  			return typePointers{}, 0
   284  		}
   285  
   286  		// Advance to the next element if necessary.
   287  		if tp.addr+goarch.PtrSize*ptrBits >= tp.elem+tp.typ.PtrBytes {
   288  			tp.elem += tp.typ.Size_
   289  			tp.addr = tp.elem
   290  		} else {
   291  			tp.addr += ptrBits * goarch.PtrSize
   292  		}
   293  
   294  		// Check if we've exceeded the limit with the last update.
   295  		if tp.addr >= limit {
   296  			return typePointers{}, 0
   297  		}
   298  
   299  		// Grab more bits and try again.
   300  		tp.mask = readUintptr(addb(tp.typ.GCData, (tp.addr-tp.elem)/goarch.PtrSize/8))
   301  		if tp.addr+goarch.PtrSize*ptrBits > limit {
   302  			bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
   303  			tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
   304  		}
   305  	}
   306  }
   307  
   308  // fastForward moves the iterator forward by n bytes. n must be a multiple
   309  // of goarch.PtrSize. limit must be the same limit passed to next for this
   310  // iterator.
   311  //
   312  // nosplit because it is used during write barriers and must not be preempted.
   313  //
   314  //go:nosplit
   315  func (tp typePointers) fastForward(n, limit uintptr) typePointers {
   316  	// Basic bounds check.
   317  	target := tp.addr + n
   318  	if target >= limit {
   319  		return typePointers{}
   320  	}
   321  	if tp.typ == nil {
   322  		// Handle small objects.
   323  		// Clear any bits before the target address.
   324  		tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
   325  		// Clear any bits past the limit.
   326  		if tp.addr+goarch.PtrSize*ptrBits > limit {
   327  			bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
   328  			tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
   329  		}
   330  		return tp
   331  	}
   332  
   333  	// Move up elem and addr.
   334  	// Offsets within an element are always at a ptrBits*goarch.PtrSize boundary.
   335  	if n >= tp.typ.Size_ {
   336  		// elem needs to be moved to the element containing
   337  		// tp.addr + n.
   338  		oldelem := tp.elem
   339  		tp.elem += (tp.addr - tp.elem + n) / tp.typ.Size_ * tp.typ.Size_
   340  		tp.addr = tp.elem + alignDown(n-(tp.elem-oldelem), ptrBits*goarch.PtrSize)
   341  	} else {
   342  		tp.addr += alignDown(n, ptrBits*goarch.PtrSize)
   343  	}
   344  
   345  	if tp.addr-tp.elem >= tp.typ.PtrBytes {
   346  		// We're starting in the non-pointer area of an array.
   347  		// Move up to the next element.
   348  		tp.elem += tp.typ.Size_
   349  		tp.addr = tp.elem
   350  		tp.mask = readUintptr(tp.typ.GCData)
   351  
   352  		// We may have exceeded the limit after this. Bail just like next does.
   353  		if tp.addr >= limit {
   354  			return typePointers{}
   355  		}
   356  	} else {
   357  		// Grab the mask, but then clear any bits before the target address and any
   358  		// bits over the limit.
   359  		tp.mask = readUintptr(addb(tp.typ.GCData, (tp.addr-tp.elem)/goarch.PtrSize/8))
   360  		tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
   361  	}
   362  	if tp.addr+goarch.PtrSize*ptrBits > limit {
   363  		bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
   364  		tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
   365  	}
   366  	return tp
   367  }
   368  
   369  // objBase returns the base pointer for the object containing addr in span.
   370  //
   371  // Assumes that addr points into a valid part of span (span.base() <= addr < span.limit).
   372  //
   373  //go:nosplit
   374  func (span *mspan) objBase(addr uintptr) uintptr {
   375  	return span.base() + span.objIndex(addr)*span.elemsize
   376  }
   377  
   378  // bulkBarrierPreWrite executes a write barrier
   379  // for every pointer slot in the memory range [src, src+size),
   380  // using pointer/scalar information from [dst, dst+size).
   381  // This executes the write barriers necessary before a memmove.
   382  // src, dst, and size must be pointer-aligned.
   383  // The range [dst, dst+size) must lie within a single object.
   384  // It does not perform the actual writes.
   385  //
   386  // As a special case, src == 0 indicates that this is being used for a
   387  // memclr. bulkBarrierPreWrite will pass 0 for the src of each write
   388  // barrier.
   389  //
   390  // Callers should call bulkBarrierPreWrite immediately before
   391  // calling memmove(dst, src, size). This function is marked nosplit
   392  // to avoid being preempted; the GC must not stop the goroutine
   393  // between the memmove and the execution of the barriers.
   394  // The caller is also responsible for cgo pointer checks if this
   395  // may be writing Go pointers into non-Go memory.
   396  //
   397  // Pointer data is not maintained for allocations containing
   398  // no pointers at all; any caller of bulkBarrierPreWrite must first
   399  // make sure the underlying allocation contains pointers, usually
   400  // by checking typ.PtrBytes.
   401  //
   402  // The typ argument is the type of the space at src and dst (and the
   403  // element type if src and dst refer to arrays) and it is optional.
   404  // If typ is nil, the barrier will still behave as expected and typ
   405  // is used purely as an optimization. However, it must be used with
   406  // care.
   407  //
   408  // If typ is not nil, then src and dst must point to one or more values
   409  // of type typ. The caller must ensure that the ranges [src, src+size)
   410  // and [dst, dst+size) refer to one or more whole values of type src and
   411  // dst (leaving off the pointerless tail of the space is OK). If this
   412  // precondition is not followed, this function will fail to scan the
   413  // right pointers.
   414  //
   415  // When in doubt, pass nil for typ. That is safe and will always work.
   416  //
   417  // Callers must perform cgo checks if goexperiment.CgoCheck2.
   418  //
   419  //go:nosplit
   420  func bulkBarrierPreWrite(dst, src, size uintptr, typ *abi.Type) {
   421  	if (dst|src|size)&(goarch.PtrSize-1) != 0 {
   422  		throw("bulkBarrierPreWrite: unaligned arguments")
   423  	}
   424  	if !writeBarrier.enabled {
   425  		return
   426  	}
   427  	s := spanOf(dst)
   428  	if s == nil {
   429  		// If dst is a global, use the data or BSS bitmaps to
   430  		// execute write barriers.
   431  		for _, datap := range activeModules() {
   432  			if datap.data <= dst && dst < datap.edata {
   433  				bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
   434  				return
   435  			}
   436  		}
   437  		for _, datap := range activeModules() {
   438  			if datap.bss <= dst && dst < datap.ebss {
   439  				bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
   440  				return
   441  			}
   442  		}
   443  		return
   444  	} else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
   445  		// dst was heap memory at some point, but isn't now.
   446  		// It can't be a global. It must be either our stack,
   447  		// or in the case of direct channel sends, it could be
   448  		// another stack. Either way, no need for barriers.
   449  		// This will also catch if dst is in a freed span,
   450  		// though that should never have.
   451  		return
   452  	}
   453  	buf := &getg().m.p.ptr().wbBuf
   454  
   455  	// Double-check that the bitmaps generated in the two possible paths match.
   456  	const doubleCheck = false
   457  	if doubleCheck {
   458  		doubleCheckTypePointersOfType(s, typ, dst, size)
   459  	}
   460  
   461  	var tp typePointers
   462  	if typ != nil && typ.Kind_&kindGCProg == 0 {
   463  		tp = s.typePointersOfType(typ, dst)
   464  	} else {
   465  		tp = s.typePointersOf(dst, size)
   466  	}
   467  	if src == 0 {
   468  		for {
   469  			var addr uintptr
   470  			if tp, addr = tp.next(dst + size); addr == 0 {
   471  				break
   472  			}
   473  			dstx := (*uintptr)(unsafe.Pointer(addr))
   474  			p := buf.get1()
   475  			p[0] = *dstx
   476  		}
   477  	} else {
   478  		for {
   479  			var addr uintptr
   480  			if tp, addr = tp.next(dst + size); addr == 0 {
   481  				break
   482  			}
   483  			dstx := (*uintptr)(unsafe.Pointer(addr))
   484  			srcx := (*uintptr)(unsafe.Pointer(src + (addr - dst)))
   485  			p := buf.get2()
   486  			p[0] = *dstx
   487  			p[1] = *srcx
   488  		}
   489  	}
   490  }
   491  
   492  // bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but
   493  // does not execute write barriers for [dst, dst+size).
   494  //
   495  // In addition to the requirements of bulkBarrierPreWrite
   496  // callers need to ensure [dst, dst+size) is zeroed.
   497  //
   498  // This is used for special cases where e.g. dst was just
   499  // created and zeroed with malloc.
   500  //
   501  // The type of the space can be provided purely as an optimization.
   502  // See bulkBarrierPreWrite's comment for more details -- use this
   503  // optimization with great care.
   504  //
   505  //go:nosplit
   506  func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr, typ *abi.Type) {
   507  	if (dst|src|size)&(goarch.PtrSize-1) != 0 {
   508  		throw("bulkBarrierPreWrite: unaligned arguments")
   509  	}
   510  	if !writeBarrier.enabled {
   511  		return
   512  	}
   513  	buf := &getg().m.p.ptr().wbBuf
   514  	s := spanOf(dst)
   515  
   516  	// Double-check that the bitmaps generated in the two possible paths match.
   517  	const doubleCheck = false
   518  	if doubleCheck {
   519  		doubleCheckTypePointersOfType(s, typ, dst, size)
   520  	}
   521  
   522  	var tp typePointers
   523  	if typ != nil && typ.Kind_&kindGCProg == 0 {
   524  		tp = s.typePointersOfType(typ, dst)
   525  	} else {
   526  		tp = s.typePointersOf(dst, size)
   527  	}
   528  	for {
   529  		var addr uintptr
   530  		if tp, addr = tp.next(dst + size); addr == 0 {
   531  			break
   532  		}
   533  		srcx := (*uintptr)(unsafe.Pointer(addr - dst + src))
   534  		p := buf.get1()
   535  		p[0] = *srcx
   536  	}
   537  }
   538  
   539  // initHeapBits initializes the heap bitmap for a span.
   540  //
   541  // TODO(mknyszek): This should set the heap bits for single pointer
   542  // allocations eagerly to avoid calling heapSetType at allocation time,
   543  // just to write one bit.
   544  func (s *mspan) initHeapBits(forceClear bool) {
   545  	if (!s.spanclass.noscan() && heapBitsInSpan(s.elemsize)) || s.isUserArenaChunk {
   546  		b := s.heapBits()
   547  		for i := range b {
   548  			b[i] = 0
   549  		}
   550  	}
   551  }
   552  
   553  // bswapIfBigEndian swaps the byte order of the uintptr on goarch.BigEndian platforms,
   554  // and leaves it alone elsewhere.
   555  func bswapIfBigEndian(x uintptr) uintptr {
   556  	if goarch.BigEndian {
   557  		if goarch.PtrSize == 8 {
   558  			return uintptr(sys.Bswap64(uint64(x)))
   559  		}
   560  		return uintptr(sys.Bswap32(uint32(x)))
   561  	}
   562  	return x
   563  }
   564  
   565  type writeUserArenaHeapBits struct {
   566  	offset uintptr // offset in span that the low bit of mask represents the pointer state of.
   567  	mask   uintptr // some pointer bits starting at the address addr.
   568  	valid  uintptr // number of bits in buf that are valid (including low)
   569  	low    uintptr // number of low-order bits to not overwrite
   570  }
   571  
   572  func (s *mspan) writeUserArenaHeapBits(addr uintptr) (h writeUserArenaHeapBits) {
   573  	offset := addr - s.base()
   574  
   575  	// We start writing bits maybe in the middle of a heap bitmap word.
   576  	// Remember how many bits into the word we started, so we can be sure
   577  	// not to overwrite the previous bits.
   578  	h.low = offset / goarch.PtrSize % ptrBits
   579  
   580  	// round down to heap word that starts the bitmap word.
   581  	h.offset = offset - h.low*goarch.PtrSize
   582  
   583  	// We don't have any bits yet.
   584  	h.mask = 0
   585  	h.valid = h.low
   586  
   587  	return
   588  }
   589  
   590  // write appends the pointerness of the next valid pointer slots
   591  // using the low valid bits of bits. 1=pointer, 0=scalar.
   592  func (h writeUserArenaHeapBits) write(s *mspan, bits, valid uintptr) writeUserArenaHeapBits {
   593  	if h.valid+valid <= ptrBits {
   594  		// Fast path - just accumulate the bits.
   595  		h.mask |= bits << h.valid
   596  		h.valid += valid
   597  		return h
   598  	}
   599  	// Too many bits to fit in this word. Write the current word
   600  	// out and move on to the next word.
   601  
   602  	data := h.mask | bits<<h.valid       // mask for this word
   603  	h.mask = bits >> (ptrBits - h.valid) // leftover for next word
   604  	h.valid += valid - ptrBits           // have h.valid+valid bits, writing ptrBits of them
   605  
   606  	// Flush mask to the memory bitmap.
   607  	idx := h.offset / (ptrBits * goarch.PtrSize)
   608  	m := uintptr(1)<<h.low - 1
   609  	bitmap := s.heapBits()
   610  	bitmap[idx] = bswapIfBigEndian(bswapIfBigEndian(bitmap[idx])&m | data)
   611  	// Note: no synchronization required for this write because
   612  	// the allocator has exclusive access to the page, and the bitmap
   613  	// entries are all for a single page. Also, visibility of these
   614  	// writes is guaranteed by the publication barrier in mallocgc.
   615  
   616  	// Move to next word of bitmap.
   617  	h.offset += ptrBits * goarch.PtrSize
   618  	h.low = 0
   619  	return h
   620  }
   621  
   622  // Add padding of size bytes.
   623  func (h writeUserArenaHeapBits) pad(s *mspan, size uintptr) writeUserArenaHeapBits {
   624  	if size == 0 {
   625  		return h
   626  	}
   627  	words := size / goarch.PtrSize
   628  	for words > ptrBits {
   629  		h = h.write(s, 0, ptrBits)
   630  		words -= ptrBits
   631  	}
   632  	return h.write(s, 0, words)
   633  }
   634  
   635  // Flush the bits that have been written, and add zeros as needed
   636  // to cover the full object [addr, addr+size).
   637  func (h writeUserArenaHeapBits) flush(s *mspan, addr, size uintptr) {
   638  	offset := addr - s.base()
   639  
   640  	// zeros counts the number of bits needed to represent the object minus the
   641  	// number of bits we've already written. This is the number of 0 bits
   642  	// that need to be added.
   643  	zeros := (offset+size-h.offset)/goarch.PtrSize - h.valid
   644  
   645  	// Add zero bits up to the bitmap word boundary
   646  	if zeros > 0 {
   647  		z := ptrBits - h.valid
   648  		if z > zeros {
   649  			z = zeros
   650  		}
   651  		h.valid += z
   652  		zeros -= z
   653  	}
   654  
   655  	// Find word in bitmap that we're going to write.
   656  	bitmap := s.heapBits()
   657  	idx := h.offset / (ptrBits * goarch.PtrSize)
   658  
   659  	// Write remaining bits.
   660  	if h.valid != h.low {
   661  		m := uintptr(1)<<h.low - 1      // don't clear existing bits below "low"
   662  		m |= ^(uintptr(1)<<h.valid - 1) // don't clear existing bits above "valid"
   663  		bitmap[idx] = bswapIfBigEndian(bswapIfBigEndian(bitmap[idx])&m | h.mask)
   664  	}
   665  	if zeros == 0 {
   666  		return
   667  	}
   668  
   669  	// Advance to next bitmap word.
   670  	h.offset += ptrBits * goarch.PtrSize
   671  
   672  	// Continue on writing zeros for the rest of the object.
   673  	// For standard use of the ptr bits this is not required, as
   674  	// the bits are read from the beginning of the object. Some uses,
   675  	// like noscan spans, oblets, bulk write barriers, and cgocheck, might
   676  	// start mid-object, so these writes are still required.
   677  	for {
   678  		// Write zero bits.
   679  		idx := h.offset / (ptrBits * goarch.PtrSize)
   680  		if zeros < ptrBits {
   681  			bitmap[idx] = bswapIfBigEndian(bswapIfBigEndian(bitmap[idx]) &^ (uintptr(1)<<zeros - 1))
   682  			break
   683  		} else if zeros == ptrBits {
   684  			bitmap[idx] = 0
   685  			break
   686  		} else {
   687  			bitmap[idx] = 0
   688  			zeros -= ptrBits
   689  		}
   690  		h.offset += ptrBits * goarch.PtrSize
   691  	}
   692  }
   693  
   694  // heapBits returns the heap ptr/scalar bits stored at the end of the span for
   695  // small object spans and heap arena spans.
   696  //
   697  // Note that the uintptr of each element means something different for small object
   698  // spans and for heap arena spans. Small object spans are easy: they're never interpreted
   699  // as anything but uintptr, so they're immune to differences in endianness. However, the
   700  // heapBits for user arena spans is exposed through a dummy type descriptor, so the byte
   701  // ordering needs to match the same byte ordering the compiler would emit. The compiler always
   702  // emits the bitmap data in little endian byte ordering, so on big endian platforms these
   703  // uintptrs will have their byte orders swapped from what they normally would be.
   704  //
   705  // heapBitsInSpan(span.elemsize) or span.isUserArenaChunk must be true.
   706  //
   707  //go:nosplit
   708  func (span *mspan) heapBits() []uintptr {
   709  	const doubleCheck = false
   710  
   711  	if doubleCheck && !span.isUserArenaChunk {
   712  		if span.spanclass.noscan() {
   713  			throw("heapBits called for noscan")
   714  		}
   715  		if span.elemsize > minSizeForMallocHeader {
   716  			throw("heapBits called for span class that should have a malloc header")
   717  		}
   718  	}
   719  	// Find the bitmap at the end of the span.
   720  	//
   721  	// Nearly every span with heap bits is exactly one page in size. Arenas are the only exception.
   722  	if span.npages == 1 {
   723  		// This will be inlined and constant-folded down.
   724  		return heapBitsSlice(span.base(), pageSize)
   725  	}
   726  	return heapBitsSlice(span.base(), span.npages*pageSize)
   727  }
   728  
   729  // Helper for constructing a slice for the span's heap bits.
   730  //
   731  //go:nosplit
   732  func heapBitsSlice(spanBase, spanSize uintptr) []uintptr {
   733  	bitmapSize := spanSize / goarch.PtrSize / 8
   734  	elems := int(bitmapSize / goarch.PtrSize)
   735  	var sl notInHeapSlice
   736  	sl = notInHeapSlice{(*notInHeap)(unsafe.Pointer(spanBase + spanSize - bitmapSize)), elems, elems}
   737  	return *(*[]uintptr)(unsafe.Pointer(&sl))
   738  }
   739  
   740  // heapBitsSmallForAddr loads the heap bits for the object stored at addr from span.heapBits.
   741  //
   742  // addr must be the base pointer of an object in the span. heapBitsInSpan(span.elemsize)
   743  // must be true.
   744  //
   745  //go:nosplit
   746  func (span *mspan) heapBitsSmallForAddr(addr uintptr) uintptr {
   747  	spanSize := span.npages * pageSize
   748  	bitmapSize := spanSize / goarch.PtrSize / 8
   749  	hbits := (*byte)(unsafe.Pointer(span.base() + spanSize - bitmapSize))
   750  
   751  	// These objects are always small enough that their bitmaps
   752  	// fit in a single word, so just load the word or two we need.
   753  	//
   754  	// Mirrors mspan.writeHeapBitsSmall.
   755  	//
   756  	// We should be using heapBits(), but unfortunately it introduces
   757  	// both bounds checks panics and throw which causes us to exceed
   758  	// the nosplit limit in quite a few cases.
   759  	i := (addr - span.base()) / goarch.PtrSize / ptrBits
   760  	j := (addr - span.base()) / goarch.PtrSize % ptrBits
   761  	bits := span.elemsize / goarch.PtrSize
   762  	word0 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+0))))
   763  	word1 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+1))))
   764  
   765  	var read uintptr
   766  	if j+bits > ptrBits {
   767  		// Two reads.
   768  		bits0 := ptrBits - j
   769  		bits1 := bits - bits0
   770  		read = *word0 >> j
   771  		read |= (*word1 & ((1 << bits1) - 1)) << bits0
   772  	} else {
   773  		// One read.
   774  		read = (*word0 >> j) & ((1 << bits) - 1)
   775  	}
   776  	return read
   777  }
   778  
   779  // writeHeapBitsSmall writes the heap bits for small objects whose ptr/scalar data is
   780  // stored as a bitmap at the end of the span.
   781  //
   782  // Assumes dataSize is <= ptrBits*goarch.PtrSize. x must be a pointer into the span.
   783  // heapBitsInSpan(dataSize) must be true. dataSize must be >= typ.Size_.
   784  //
   785  //go:nosplit
   786  func (span *mspan) writeHeapBitsSmall(x, dataSize uintptr, typ *_type) (scanSize uintptr) {
   787  	// The objects here are always really small, so a single load is sufficient.
   788  	src0 := readUintptr(typ.GCData)
   789  
   790  	// Create repetitions of the bitmap if we have a small array.
   791  	bits := span.elemsize / goarch.PtrSize
   792  	scanSize = typ.PtrBytes
   793  	src := src0
   794  	switch typ.Size_ {
   795  	case goarch.PtrSize:
   796  		src = (1 << (dataSize / goarch.PtrSize)) - 1
   797  	default:
   798  		for i := typ.Size_; i < dataSize; i += typ.Size_ {
   799  			src |= src0 << (i / goarch.PtrSize)
   800  			scanSize += typ.Size_
   801  		}
   802  	}
   803  
   804  	// Since we're never writing more than one uintptr's worth of bits, we're either going
   805  	// to do one or two writes.
   806  	dst := span.heapBits()
   807  	o := (x - span.base()) / goarch.PtrSize
   808  	i := o / ptrBits
   809  	j := o % ptrBits
   810  	if j+bits > ptrBits {
   811  		// Two writes.
   812  		bits0 := ptrBits - j
   813  		bits1 := bits - bits0
   814  		dst[i+0] = dst[i+0]&(^uintptr(0)>>bits0) | (src << j)
   815  		dst[i+1] = dst[i+1]&^((1<<bits1)-1) | (src >> bits0)
   816  	} else {
   817  		// One write.
   818  		dst[i] = (dst[i] &^ (((1 << bits) - 1) << j)) | (src << j)
   819  	}
   820  
   821  	const doubleCheck = false
   822  	if doubleCheck {
   823  		srcRead := span.heapBitsSmallForAddr(x)
   824  		if srcRead != src {
   825  			print("runtime: x=", hex(x), " i=", i, " j=", j, " bits=", bits, "\n")
   826  			print("runtime: dataSize=", dataSize, " typ.Size_=", typ.Size_, " typ.PtrBytes=", typ.PtrBytes, "\n")
   827  			print("runtime: src0=", hex(src0), " src=", hex(src), " srcRead=", hex(srcRead), "\n")
   828  			throw("bad pointer bits written for small object")
   829  		}
   830  	}
   831  	return
   832  }
   833  
   834  // For !goexperiment.AllocHeaders.
   835  func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
   836  }
   837  
   838  // heapSetType records that the new allocation [x, x+size)
   839  // holds in [x, x+dataSize) one or more values of type typ.
   840  // (The number of values is given by dataSize / typ.Size.)
   841  // If dataSize < size, the fragment [x+dataSize, x+size) is
   842  // recorded as non-pointer data.
   843  // It is known that the type has pointers somewhere;
   844  // malloc does not call heapSetType when there are no pointers.
   845  //
   846  // There can be read-write races between heapSetType and things
   847  // that read the heap metadata like scanobject. However, since
   848  // heapSetType is only used for objects that have not yet been
   849  // made reachable, readers will ignore bits being modified by this
   850  // function. This does mean this function cannot transiently modify
   851  // shared memory that belongs to neighboring objects. Also, on weakly-ordered
   852  // machines, callers must execute a store/store (publication) barrier
   853  // between calling this function and making the object reachable.
   854  func heapSetType(x, dataSize uintptr, typ *_type, header **_type, span *mspan) (scanSize uintptr) {
   855  	const doubleCheck = false
   856  
   857  	gctyp := typ
   858  	if header == nil {
   859  		if doubleCheck && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(span.elemsize)) {
   860  			throw("tried to write heap bits, but no heap bits in span")
   861  		}
   862  		// Handle the case where we have no malloc header.
   863  		scanSize = span.writeHeapBitsSmall(x, dataSize, typ)
   864  	} else {
   865  		if typ.Kind_&kindGCProg != 0 {
   866  			// Allocate space to unroll the gcprog. This space will consist of
   867  			// a dummy _type value and the unrolled gcprog. The dummy _type will
   868  			// refer to the bitmap, and the mspan will refer to the dummy _type.
   869  			if span.spanclass.sizeclass() != 0 {
   870  				throw("GCProg for type that isn't large")
   871  			}
   872  			spaceNeeded := alignUp(unsafe.Sizeof(_type{}), goarch.PtrSize)
   873  			heapBitsOff := spaceNeeded
   874  			spaceNeeded += alignUp(typ.PtrBytes/goarch.PtrSize/8, goarch.PtrSize)
   875  			npages := alignUp(spaceNeeded, pageSize) / pageSize
   876  			var progSpan *mspan
   877  			systemstack(func() {
   878  				progSpan = mheap_.allocManual(npages, spanAllocPtrScalarBits)
   879  				memclrNoHeapPointers(unsafe.Pointer(progSpan.base()), progSpan.npages*pageSize)
   880  			})
   881  			// Write a dummy _type in the new space.
   882  			//
   883  			// We only need to write size, PtrBytes, and GCData, since that's all
   884  			// the GC cares about.
   885  			gctyp = (*_type)(unsafe.Pointer(progSpan.base()))
   886  			gctyp.Size_ = typ.Size_
   887  			gctyp.PtrBytes = typ.PtrBytes
   888  			gctyp.GCData = (*byte)(add(unsafe.Pointer(progSpan.base()), heapBitsOff))
   889  			gctyp.TFlag = abi.TFlagUnrolledBitmap
   890  
   891  			// Expand the GC program into space reserved at the end of the new span.
   892  			runGCProg(addb(typ.GCData, 4), gctyp.GCData)
   893  		}
   894  
   895  		// Write out the header.
   896  		*header = gctyp
   897  		scanSize = span.elemsize
   898  	}
   899  
   900  	if doubleCheck {
   901  		doubleCheckHeapPointers(x, dataSize, gctyp, header, span)
   902  
   903  		// To exercise the less common path more often, generate
   904  		// a random interior pointer and make sure iterating from
   905  		// that point works correctly too.
   906  		maxIterBytes := span.elemsize
   907  		if header == nil {
   908  			maxIterBytes = dataSize
   909  		}
   910  		off := alignUp(uintptr(cheaprand())%dataSize, goarch.PtrSize)
   911  		size := dataSize - off
   912  		if size == 0 {
   913  			off -= goarch.PtrSize
   914  			size += goarch.PtrSize
   915  		}
   916  		interior := x + off
   917  		size -= alignDown(uintptr(cheaprand())%size, goarch.PtrSize)
   918  		if size == 0 {
   919  			size = goarch.PtrSize
   920  		}
   921  		// Round up the type to the size of the type.
   922  		size = (size + gctyp.Size_ - 1) / gctyp.Size_ * gctyp.Size_
   923  		if interior+size > x+maxIterBytes {
   924  			size = x + maxIterBytes - interior
   925  		}
   926  		doubleCheckHeapPointersInterior(x, interior, size, dataSize, gctyp, header, span)
   927  	}
   928  	return
   929  }
   930  
   931  func doubleCheckHeapPointers(x, dataSize uintptr, typ *_type, header **_type, span *mspan) {
   932  	// Check that scanning the full object works.
   933  	tp := span.typePointersOfUnchecked(span.objBase(x))
   934  	maxIterBytes := span.elemsize
   935  	if header == nil {
   936  		maxIterBytes = dataSize
   937  	}
   938  	bad := false
   939  	for i := uintptr(0); i < maxIterBytes; i += goarch.PtrSize {
   940  		// Compute the pointer bit we want at offset i.
   941  		want := false
   942  		if i < span.elemsize {
   943  			off := i % typ.Size_
   944  			if off < typ.PtrBytes {
   945  				j := off / goarch.PtrSize
   946  				want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
   947  			}
   948  		}
   949  		if want {
   950  			var addr uintptr
   951  			tp, addr = tp.next(x + span.elemsize)
   952  			if addr == 0 {
   953  				println("runtime: found bad iterator")
   954  			}
   955  			if addr != x+i {
   956  				print("runtime: addr=", hex(addr), " x+i=", hex(x+i), "\n")
   957  				bad = true
   958  			}
   959  		}
   960  	}
   961  	if !bad {
   962  		var addr uintptr
   963  		tp, addr = tp.next(x + span.elemsize)
   964  		if addr == 0 {
   965  			return
   966  		}
   967  		println("runtime: extra pointer:", hex(addr))
   968  	}
   969  	print("runtime: hasHeader=", header != nil, " typ.Size_=", typ.Size_, " hasGCProg=", typ.Kind_&kindGCProg != 0, "\n")
   970  	print("runtime: x=", hex(x), " dataSize=", dataSize, " elemsize=", span.elemsize, "\n")
   971  	print("runtime: typ=", unsafe.Pointer(typ), " typ.PtrBytes=", typ.PtrBytes, "\n")
   972  	print("runtime: limit=", hex(x+span.elemsize), "\n")
   973  	tp = span.typePointersOfUnchecked(x)
   974  	dumpTypePointers(tp)
   975  	for {
   976  		var addr uintptr
   977  		if tp, addr = tp.next(x + span.elemsize); addr == 0 {
   978  			println("runtime: would've stopped here")
   979  			dumpTypePointers(tp)
   980  			break
   981  		}
   982  		print("runtime: addr=", hex(addr), "\n")
   983  		dumpTypePointers(tp)
   984  	}
   985  	throw("heapSetType: pointer entry not correct")
   986  }
   987  
   988  func doubleCheckHeapPointersInterior(x, interior, size, dataSize uintptr, typ *_type, header **_type, span *mspan) {
   989  	bad := false
   990  	if interior < x {
   991  		print("runtime: interior=", hex(interior), " x=", hex(x), "\n")
   992  		throw("found bad interior pointer")
   993  	}
   994  	off := interior - x
   995  	tp := span.typePointersOf(interior, size)
   996  	for i := off; i < off+size; i += goarch.PtrSize {
   997  		// Compute the pointer bit we want at offset i.
   998  		want := false
   999  		if i < span.elemsize {
  1000  			off := i % typ.Size_
  1001  			if off < typ.PtrBytes {
  1002  				j := off / goarch.PtrSize
  1003  				want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
  1004  			}
  1005  		}
  1006  		if want {
  1007  			var addr uintptr
  1008  			tp, addr = tp.next(interior + size)
  1009  			if addr == 0 {
  1010  				println("runtime: found bad iterator")
  1011  				bad = true
  1012  			}
  1013  			if addr != x+i {
  1014  				print("runtime: addr=", hex(addr), " x+i=", hex(x+i), "\n")
  1015  				bad = true
  1016  			}
  1017  		}
  1018  	}
  1019  	if !bad {
  1020  		var addr uintptr
  1021  		tp, addr = tp.next(interior + size)
  1022  		if addr == 0 {
  1023  			return
  1024  		}
  1025  		println("runtime: extra pointer:", hex(addr))
  1026  	}
  1027  	print("runtime: hasHeader=", header != nil, " typ.Size_=", typ.Size_, "\n")
  1028  	print("runtime: x=", hex(x), " dataSize=", dataSize, " elemsize=", span.elemsize, " interior=", hex(interior), " size=", size, "\n")
  1029  	print("runtime: limit=", hex(interior+size), "\n")
  1030  	tp = span.typePointersOf(interior, size)
  1031  	dumpTypePointers(tp)
  1032  	for {
  1033  		var addr uintptr
  1034  		if tp, addr = tp.next(interior + size); addr == 0 {
  1035  			println("runtime: would've stopped here")
  1036  			dumpTypePointers(tp)
  1037  			break
  1038  		}
  1039  		print("runtime: addr=", hex(addr), "\n")
  1040  		dumpTypePointers(tp)
  1041  	}
  1042  
  1043  	print("runtime: want: ")
  1044  	for i := off; i < off+size; i += goarch.PtrSize {
  1045  		// Compute the pointer bit we want at offset i.
  1046  		want := false
  1047  		if i < dataSize {
  1048  			off := i % typ.Size_
  1049  			if off < typ.PtrBytes {
  1050  				j := off / goarch.PtrSize
  1051  				want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
  1052  			}
  1053  		}
  1054  		if want {
  1055  			print("1")
  1056  		} else {
  1057  			print("0")
  1058  		}
  1059  	}
  1060  	println()
  1061  
  1062  	throw("heapSetType: pointer entry not correct")
  1063  }
  1064  
  1065  //go:nosplit
  1066  func doubleCheckTypePointersOfType(s *mspan, typ *_type, addr, size uintptr) {
  1067  	if typ == nil || typ.Kind_&kindGCProg != 0 {
  1068  		return
  1069  	}
  1070  	if typ.Kind_&kindMask == kindInterface {
  1071  		// Interfaces are unfortunately inconsistently handled
  1072  		// when it comes to the type pointer, so it's easy to
  1073  		// produce a lot of false positives here.
  1074  		return
  1075  	}
  1076  	tp0 := s.typePointersOfType(typ, addr)
  1077  	tp1 := s.typePointersOf(addr, size)
  1078  	failed := false
  1079  	for {
  1080  		var addr0, addr1 uintptr
  1081  		tp0, addr0 = tp0.next(addr + size)
  1082  		tp1, addr1 = tp1.next(addr + size)
  1083  		if addr0 != addr1 {
  1084  			failed = true
  1085  			break
  1086  		}
  1087  		if addr0 == 0 {
  1088  			break
  1089  		}
  1090  	}
  1091  	if failed {
  1092  		tp0 := s.typePointersOfType(typ, addr)
  1093  		tp1 := s.typePointersOf(addr, size)
  1094  		print("runtime: addr=", hex(addr), " size=", size, "\n")
  1095  		print("runtime: type=", toRType(typ).string(), "\n")
  1096  		dumpTypePointers(tp0)
  1097  		dumpTypePointers(tp1)
  1098  		for {
  1099  			var addr0, addr1 uintptr
  1100  			tp0, addr0 = tp0.next(addr + size)
  1101  			tp1, addr1 = tp1.next(addr + size)
  1102  			print("runtime: ", hex(addr0), " ", hex(addr1), "\n")
  1103  			if addr0 == 0 && addr1 == 0 {
  1104  				break
  1105  			}
  1106  		}
  1107  		throw("mismatch between typePointersOfType and typePointersOf")
  1108  	}
  1109  }
  1110  
  1111  func dumpTypePointers(tp typePointers) {
  1112  	print("runtime: tp.elem=", hex(tp.elem), " tp.typ=", unsafe.Pointer(tp.typ), "\n")
  1113  	print("runtime: tp.addr=", hex(tp.addr), " tp.mask=")
  1114  	for i := uintptr(0); i < ptrBits; i++ {
  1115  		if tp.mask&(uintptr(1)<<i) != 0 {
  1116  			print("1")
  1117  		} else {
  1118  			print("0")
  1119  		}
  1120  	}
  1121  	println()
  1122  }
  1123  
  1124  // Testing.
  1125  
  1126  // Returns GC type info for the pointer stored in ep for testing.
  1127  // If ep points to the stack, only static live information will be returned
  1128  // (i.e. not for objects which are only dynamically live stack objects).
  1129  func getgcmask(ep any) (mask []byte) {
  1130  	e := *efaceOf(&ep)
  1131  	p := e.data
  1132  	t := e._type
  1133  
  1134  	var et *_type
  1135  	if t.Kind_&kindMask != kindPtr {
  1136  		throw("bad argument to getgcmask: expected type to be a pointer to the value type whose mask is being queried")
  1137  	}
  1138  	et = (*ptrtype)(unsafe.Pointer(t)).Elem
  1139  
  1140  	// data or bss
  1141  	for _, datap := range activeModules() {
  1142  		// data
  1143  		if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
  1144  			bitmap := datap.gcdatamask.bytedata
  1145  			n := et.Size_
  1146  			mask = make([]byte, n/goarch.PtrSize)
  1147  			for i := uintptr(0); i < n; i += goarch.PtrSize {
  1148  				off := (uintptr(p) + i - datap.data) / goarch.PtrSize
  1149  				mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
  1150  			}
  1151  			return
  1152  		}
  1153  
  1154  		// bss
  1155  		if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
  1156  			bitmap := datap.gcbssmask.bytedata
  1157  			n := et.Size_
  1158  			mask = make([]byte, n/goarch.PtrSize)
  1159  			for i := uintptr(0); i < n; i += goarch.PtrSize {
  1160  				off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
  1161  				mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
  1162  			}
  1163  			return
  1164  		}
  1165  	}
  1166  
  1167  	// heap
  1168  	if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
  1169  		if s.spanclass.noscan() {
  1170  			return nil
  1171  		}
  1172  		limit := base + s.elemsize
  1173  
  1174  		// Move the base up to the iterator's start, because
  1175  		// we want to hide evidence of a malloc header from the
  1176  		// caller.
  1177  		tp := s.typePointersOfUnchecked(base)
  1178  		base = tp.addr
  1179  
  1180  		// Unroll the full bitmap the GC would actually observe.
  1181  		maskFromHeap := make([]byte, (limit-base)/goarch.PtrSize)
  1182  		for {
  1183  			var addr uintptr
  1184  			if tp, addr = tp.next(limit); addr == 0 {
  1185  				break
  1186  			}
  1187  			maskFromHeap[(addr-base)/goarch.PtrSize] = 1
  1188  		}
  1189  
  1190  		// Double-check that every part of the ptr/scalar we're not
  1191  		// showing the caller is zeroed. This keeps us honest that
  1192  		// that information is actually irrelevant.
  1193  		for i := limit; i < s.elemsize; i++ {
  1194  			if *(*byte)(unsafe.Pointer(i)) != 0 {
  1195  				throw("found non-zeroed tail of allocation")
  1196  			}
  1197  		}
  1198  
  1199  		// Callers (and a check we're about to run) expects this mask
  1200  		// to end at the last pointer.
  1201  		for len(maskFromHeap) > 0 && maskFromHeap[len(maskFromHeap)-1] == 0 {
  1202  			maskFromHeap = maskFromHeap[:len(maskFromHeap)-1]
  1203  		}
  1204  
  1205  		if et.Kind_&kindGCProg == 0 {
  1206  			// Unroll again, but this time from the type information.
  1207  			maskFromType := make([]byte, (limit-base)/goarch.PtrSize)
  1208  			tp = s.typePointersOfType(et, base)
  1209  			for {
  1210  				var addr uintptr
  1211  				if tp, addr = tp.next(limit); addr == 0 {
  1212  					break
  1213  				}
  1214  				maskFromType[(addr-base)/goarch.PtrSize] = 1
  1215  			}
  1216  
  1217  			// Validate that the prefix of maskFromType is equal to
  1218  			// maskFromHeap. maskFromType may contain more pointers than
  1219  			// maskFromHeap produces because maskFromHeap may be able to
  1220  			// get exact type information for certain classes of objects.
  1221  			// With maskFromType, we're always just tiling the type bitmap
  1222  			// through to the elemsize.
  1223  			//
  1224  			// It's OK if maskFromType has pointers in elemsize that extend
  1225  			// past the actual populated space; we checked above that all
  1226  			// that space is zeroed, so just the GC will just see nil pointers.
  1227  			differs := false
  1228  			for i := range maskFromHeap {
  1229  				if maskFromHeap[i] != maskFromType[i] {
  1230  					differs = true
  1231  					break
  1232  				}
  1233  			}
  1234  
  1235  			if differs {
  1236  				print("runtime: heap mask=")
  1237  				for _, b := range maskFromHeap {
  1238  					print(b)
  1239  				}
  1240  				println()
  1241  				print("runtime: type mask=")
  1242  				for _, b := range maskFromType {
  1243  					print(b)
  1244  				}
  1245  				println()
  1246  				print("runtime: type=", toRType(et).string(), "\n")
  1247  				throw("found two different masks from two different methods")
  1248  			}
  1249  		}
  1250  
  1251  		// Select the heap mask to return. We may not have a type mask.
  1252  		mask = maskFromHeap
  1253  
  1254  		// Make sure we keep ep alive. We may have stopped referencing
  1255  		// ep's data pointer sometime before this point and it's possible
  1256  		// for that memory to get freed.
  1257  		KeepAlive(ep)
  1258  		return
  1259  	}
  1260  
  1261  	// stack
  1262  	if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi {
  1263  		found := false
  1264  		var u unwinder
  1265  		for u.initAt(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0); u.valid(); u.next() {
  1266  			if u.frame.sp <= uintptr(p) && uintptr(p) < u.frame.varp {
  1267  				found = true
  1268  				break
  1269  			}
  1270  		}
  1271  		if found {
  1272  			locals, _, _ := u.frame.getStackMap(false)
  1273  			if locals.n == 0 {
  1274  				return
  1275  			}
  1276  			size := uintptr(locals.n) * goarch.PtrSize
  1277  			n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
  1278  			mask = make([]byte, n/goarch.PtrSize)
  1279  			for i := uintptr(0); i < n; i += goarch.PtrSize {
  1280  				off := (uintptr(p) + i - u.frame.varp + size) / goarch.PtrSize
  1281  				mask[i/goarch.PtrSize] = locals.ptrbit(off)
  1282  			}
  1283  		}
  1284  		return
  1285  	}
  1286  
  1287  	// otherwise, not something the GC knows about.
  1288  	// possibly read-only data, like malloc(0).
  1289  	// must not have pointers
  1290  	return
  1291  }
  1292  
  1293  // userArenaHeapBitsSetType is the equivalent of heapSetType but for
  1294  // non-slice-backing-store Go values allocated in a user arena chunk. It
  1295  // sets up the type metadata for the value with type typ allocated at address ptr.
  1296  // base is the base address of the arena chunk.
  1297  func userArenaHeapBitsSetType(typ *_type, ptr unsafe.Pointer, s *mspan) {
  1298  	base := s.base()
  1299  	h := s.writeUserArenaHeapBits(uintptr(ptr))
  1300  
  1301  	p := typ.GCData // start of 1-bit pointer mask (or GC program)
  1302  	var gcProgBits uintptr
  1303  	if typ.Kind_&kindGCProg != 0 {
  1304  		// Expand gc program, using the object itself for storage.
  1305  		gcProgBits = runGCProg(addb(p, 4), (*byte)(ptr))
  1306  		p = (*byte)(ptr)
  1307  	}
  1308  	nb := typ.PtrBytes / goarch.PtrSize
  1309  
  1310  	for i := uintptr(0); i < nb; i += ptrBits {
  1311  		k := nb - i
  1312  		if k > ptrBits {
  1313  			k = ptrBits
  1314  		}
  1315  		// N.B. On big endian platforms we byte swap the data that we
  1316  		// read from GCData, which is always stored in little-endian order
  1317  		// by the compiler. writeUserArenaHeapBits handles data in
  1318  		// a platform-ordered way for efficiency, but stores back the
  1319  		// data in little endian order, since we expose the bitmap through
  1320  		// a dummy type.
  1321  		h = h.write(s, readUintptr(addb(p, i/8)), k)
  1322  	}
  1323  	// Note: we call pad here to ensure we emit explicit 0 bits
  1324  	// for the pointerless tail of the object. This ensures that
  1325  	// there's only a single noMorePtrs mark for the next object
  1326  	// to clear. We don't need to do this to clear stale noMorePtrs
  1327  	// markers from previous uses because arena chunk pointer bitmaps
  1328  	// are always fully cleared when reused.
  1329  	h = h.pad(s, typ.Size_-typ.PtrBytes)
  1330  	h.flush(s, uintptr(ptr), typ.Size_)
  1331  
  1332  	if typ.Kind_&kindGCProg != 0 {
  1333  		// Zero out temporary ptrmask buffer inside object.
  1334  		memclrNoHeapPointers(ptr, (gcProgBits+7)/8)
  1335  	}
  1336  
  1337  	// Update the PtrBytes value in the type information. After this
  1338  	// point, the GC will observe the new bitmap.
  1339  	s.largeType.PtrBytes = uintptr(ptr) - base + typ.PtrBytes
  1340  
  1341  	// Double-check that the bitmap was written out correctly.
  1342  	const doubleCheck = false
  1343  	if doubleCheck {
  1344  		doubleCheckHeapPointersInterior(uintptr(ptr), uintptr(ptr), typ.Size_, typ.Size_, typ, &s.largeType, s)
  1345  	}
  1346  }
  1347  
  1348  // For !goexperiment.AllocHeaders, to pass TestIntendedInlining.
  1349  func writeHeapBitsForAddr() {
  1350  	panic("not implemented")
  1351  }
  1352  
  1353  // For !goexperiment.AllocHeaders.
  1354  type heapBits struct {
  1355  }
  1356  
  1357  // For !goexperiment.AllocHeaders.
  1358  //
  1359  //go:nosplit
  1360  func heapBitsForAddr(addr, size uintptr) heapBits {
  1361  	panic("not implemented")
  1362  }
  1363  
  1364  // For !goexperiment.AllocHeaders.
  1365  //
  1366  //go:nosplit
  1367  func (h heapBits) next() (heapBits, uintptr) {
  1368  	panic("not implemented")
  1369  }
  1370  
  1371  // For !goexperiment.AllocHeaders.
  1372  //
  1373  //go:nosplit
  1374  func (h heapBits) nextFast() (heapBits, uintptr) {
  1375  	panic("not implemented")
  1376  }
  1377  

View as plain text