Source file src/runtime/runtime1.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/bytealg"
     9  	"internal/goarch"
    10  	"internal/runtime/atomic"
    11  	"unsafe"
    12  )
    13  
    14  // Keep a cached value to make gotraceback fast,
    15  // since we call it on every call to gentraceback.
    16  // The cached value is a uint32 in which the low bits
    17  // are the "crash" and "all" settings and the remaining
    18  // bits are the traceback value (0 off, 1 on, 2 include system).
    19  const (
    20  	tracebackCrash = 1 << iota
    21  	tracebackAll
    22  	tracebackShift = iota
    23  )
    24  
    25  var traceback_cache uint32 = 2 << tracebackShift
    26  var traceback_env uint32
    27  
    28  // gotraceback returns the current traceback settings.
    29  //
    30  // If level is 0, suppress all tracebacks.
    31  // If level is 1, show tracebacks, but exclude runtime frames.
    32  // If level is 2, show tracebacks including runtime frames.
    33  // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
    34  // If crash is set, crash (core dump, etc) after tracebacking.
    35  //
    36  //go:nosplit
    37  func gotraceback() (level int32, all, crash bool) {
    38  	gp := getg()
    39  	t := atomic.Load(&traceback_cache)
    40  	crash = t&tracebackCrash != 0
    41  	all = gp.m.throwing >= throwTypeUser || t&tracebackAll != 0
    42  	if gp.m.traceback != 0 {
    43  		level = int32(gp.m.traceback)
    44  	} else if gp.m.throwing >= throwTypeRuntime {
    45  		// Always include runtime frames in runtime throws unless
    46  		// otherwise overridden by m.traceback.
    47  		level = 2
    48  	} else {
    49  		level = int32(t >> tracebackShift)
    50  	}
    51  	return
    52  }
    53  
    54  var (
    55  	argc int32
    56  	argv **byte
    57  )
    58  
    59  // nosplit for use in linux startup sysargs.
    60  //
    61  //go:nosplit
    62  func argv_index(argv **byte, i int32) *byte {
    63  	return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize))
    64  }
    65  
    66  func args(c int32, v **byte) {
    67  	argc = c
    68  	argv = v
    69  	sysargs(c, v)
    70  }
    71  
    72  func goargs() {
    73  	if GOOS == "windows" {
    74  		return
    75  	}
    76  	argslice = make([]string, argc)
    77  	for i := int32(0); i < argc; i++ {
    78  		argslice[i] = gostringnocopy(argv_index(argv, i))
    79  	}
    80  }
    81  
    82  func goenvs_unix() {
    83  	// TODO(austin): ppc64 in dynamic linking mode doesn't
    84  	// guarantee env[] will immediately follow argv. Might cause
    85  	// problems.
    86  	n := int32(0)
    87  	for argv_index(argv, argc+1+n) != nil {
    88  		n++
    89  	}
    90  
    91  	envs = make([]string, n)
    92  	for i := int32(0); i < n; i++ {
    93  		envs[i] = gostring(argv_index(argv, argc+1+i))
    94  	}
    95  }
    96  
    97  func environ() []string {
    98  	return envs
    99  }
   100  
   101  // TODO: These should be locals in testAtomic64, but we don't 8-byte
   102  // align stack variables on 386.
   103  var test_z64, test_x64 uint64
   104  
   105  func testAtomic64() {
   106  	test_z64 = 42
   107  	test_x64 = 0
   108  	if atomic.Cas64(&test_z64, test_x64, 1) {
   109  		throw("cas64 failed")
   110  	}
   111  	if test_x64 != 0 {
   112  		throw("cas64 failed")
   113  	}
   114  	test_x64 = 42
   115  	if !atomic.Cas64(&test_z64, test_x64, 1) {
   116  		throw("cas64 failed")
   117  	}
   118  	if test_x64 != 42 || test_z64 != 1 {
   119  		throw("cas64 failed")
   120  	}
   121  	if atomic.Load64(&test_z64) != 1 {
   122  		throw("load64 failed")
   123  	}
   124  	atomic.Store64(&test_z64, (1<<40)+1)
   125  	if atomic.Load64(&test_z64) != (1<<40)+1 {
   126  		throw("store64 failed")
   127  	}
   128  	if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
   129  		throw("xadd64 failed")
   130  	}
   131  	if atomic.Load64(&test_z64) != (2<<40)+2 {
   132  		throw("xadd64 failed")
   133  	}
   134  	if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
   135  		throw("xchg64 failed")
   136  	}
   137  	if atomic.Load64(&test_z64) != (3<<40)+3 {
   138  		throw("xchg64 failed")
   139  	}
   140  }
   141  
   142  func check() {
   143  	var (
   144  		a     int8
   145  		b     uint8
   146  		c     int16
   147  		d     uint16
   148  		e     int32
   149  		f     uint32
   150  		g     int64
   151  		h     uint64
   152  		i, i1 float32
   153  		j, j1 float64
   154  		k     unsafe.Pointer
   155  		l     *uint16
   156  		m     [4]byte
   157  	)
   158  	type x1t struct {
   159  		x uint8
   160  	}
   161  	type y1t struct {
   162  		x1 x1t
   163  		y  uint8
   164  	}
   165  	var x1 x1t
   166  	var y1 y1t
   167  
   168  	if unsafe.Sizeof(a) != 1 {
   169  		throw("bad a")
   170  	}
   171  	if unsafe.Sizeof(b) != 1 {
   172  		throw("bad b")
   173  	}
   174  	if unsafe.Sizeof(c) != 2 {
   175  		throw("bad c")
   176  	}
   177  	if unsafe.Sizeof(d) != 2 {
   178  		throw("bad d")
   179  	}
   180  	if unsafe.Sizeof(e) != 4 {
   181  		throw("bad e")
   182  	}
   183  	if unsafe.Sizeof(f) != 4 {
   184  		throw("bad f")
   185  	}
   186  	if unsafe.Sizeof(g) != 8 {
   187  		throw("bad g")
   188  	}
   189  	if unsafe.Sizeof(h) != 8 {
   190  		throw("bad h")
   191  	}
   192  	if unsafe.Sizeof(i) != 4 {
   193  		throw("bad i")
   194  	}
   195  	if unsafe.Sizeof(j) != 8 {
   196  		throw("bad j")
   197  	}
   198  	if unsafe.Sizeof(k) != goarch.PtrSize {
   199  		throw("bad k")
   200  	}
   201  	if unsafe.Sizeof(l) != goarch.PtrSize {
   202  		throw("bad l")
   203  	}
   204  	if unsafe.Sizeof(x1) != 1 {
   205  		throw("bad unsafe.Sizeof x1")
   206  	}
   207  	if unsafe.Offsetof(y1.y) != 1 {
   208  		throw("bad offsetof y1.y")
   209  	}
   210  	if unsafe.Sizeof(y1) != 2 {
   211  		throw("bad unsafe.Sizeof y1")
   212  	}
   213  
   214  	if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
   215  		throw("bad timediv")
   216  	}
   217  
   218  	var z uint32
   219  	z = 1
   220  	if !atomic.Cas(&z, 1, 2) {
   221  		throw("cas1")
   222  	}
   223  	if z != 2 {
   224  		throw("cas2")
   225  	}
   226  
   227  	z = 4
   228  	if atomic.Cas(&z, 5, 6) {
   229  		throw("cas3")
   230  	}
   231  	if z != 4 {
   232  		throw("cas4")
   233  	}
   234  
   235  	z = 0xffffffff
   236  	if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
   237  		throw("cas5")
   238  	}
   239  	if z != 0xfffffffe {
   240  		throw("cas6")
   241  	}
   242  
   243  	m = [4]byte{1, 1, 1, 1}
   244  	atomic.Or8(&m[1], 0xf0)
   245  	if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
   246  		throw("atomicor8")
   247  	}
   248  
   249  	m = [4]byte{0xff, 0xff, 0xff, 0xff}
   250  	atomic.And8(&m[1], 0x1)
   251  	if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
   252  		throw("atomicand8")
   253  	}
   254  
   255  	*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
   256  	if j == j {
   257  		throw("float64nan")
   258  	}
   259  	if !(j != j) {
   260  		throw("float64nan1")
   261  	}
   262  
   263  	*(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
   264  	if j == j1 {
   265  		throw("float64nan2")
   266  	}
   267  	if !(j != j1) {
   268  		throw("float64nan3")
   269  	}
   270  
   271  	*(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
   272  	if i == i {
   273  		throw("float32nan")
   274  	}
   275  	if i == i {
   276  		throw("float32nan1")
   277  	}
   278  
   279  	*(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
   280  	if i == i1 {
   281  		throw("float32nan2")
   282  	}
   283  	if i == i1 {
   284  		throw("float32nan3")
   285  	}
   286  
   287  	testAtomic64()
   288  
   289  	if fixedStack != round2(fixedStack) {
   290  		throw("FixedStack is not power-of-2")
   291  	}
   292  
   293  	if !checkASM() {
   294  		throw("assembly checks failed")
   295  	}
   296  }
   297  
   298  type dbgVar struct {
   299  	name   string
   300  	value  *int32        // for variables that can only be set at startup
   301  	atomic *atomic.Int32 // for variables that can be changed during execution
   302  	def    int32         // default value (ideally zero)
   303  }
   304  
   305  // Holds variables parsed from GODEBUG env var,
   306  // except for "memprofilerate" since there is an
   307  // existing int var for that value, which may
   308  // already have an initial value.
   309  var debug struct {
   310  	cgocheck                 int32
   311  	clobberfree              int32
   312  	disablethp               int32
   313  	dontfreezetheworld       int32
   314  	efence                   int32
   315  	gccheckmark              int32
   316  	gcpacertrace             int32
   317  	gcshrinkstackoff         int32
   318  	gcstoptheworld           int32
   319  	gctrace                  int32
   320  	invalidptr               int32
   321  	madvdontneed             int32 // for Linux; issue 28466
   322  	runtimeContentionStacks  atomic.Int32
   323  	scavtrace                int32
   324  	scheddetail              int32
   325  	schedtrace               int32
   326  	tracebackancestors       int32
   327  	asyncpreemptoff          int32
   328  	harddecommit             int32
   329  	adaptivestackstart       int32
   330  	tracefpunwindoff         int32
   331  	traceadvanceperiod       int32
   332  	traceCheckStackOwnership int32
   333  	profstackdepth           int32
   334  	dataindependenttiming    int32
   335  
   336  	// debug.malloc is used as a combined debug check
   337  	// in the malloc function and should be set
   338  	// if any of the below debug options is != 0.
   339  	malloc    bool
   340  	inittrace int32
   341  	sbrk      int32
   342  	// traceallocfree controls whether execution traces contain
   343  	// detailed trace data about memory allocation. This value
   344  	// affects debug.malloc only if it is != 0 and the execution
   345  	// tracer is enabled, in which case debug.malloc will be
   346  	// set to "true" if it isn't already while tracing is enabled.
   347  	// It will be set while the world is stopped, so it's safe.
   348  	// The value of traceallocfree can be changed any time in response
   349  	// to os.Setenv("GODEBUG").
   350  	traceallocfree atomic.Int32
   351  
   352  	panicnil atomic.Int32
   353  
   354  	// asynctimerchan controls whether timer channels
   355  	// behave asynchronously (as in Go 1.22 and earlier)
   356  	// instead of their Go 1.23+ synchronous behavior.
   357  	// The value can change at any time (in response to os.Setenv("GODEBUG"))
   358  	// and affects all extant timer channels immediately.
   359  	// Programs wouldn't normally change over an execution,
   360  	// but allowing it is convenient for testing and for programs
   361  	// that do an os.Setenv in main.init or main.main.
   362  	asynctimerchan atomic.Int32
   363  }
   364  
   365  var dbgvars = []*dbgVar{
   366  	{name: "adaptivestackstart", value: &debug.adaptivestackstart},
   367  	{name: "asyncpreemptoff", value: &debug.asyncpreemptoff},
   368  	{name: "asynctimerchan", atomic: &debug.asynctimerchan},
   369  	{name: "cgocheck", value: &debug.cgocheck},
   370  	{name: "clobberfree", value: &debug.clobberfree},
   371  	{name: "dataindependenttiming", value: &debug.dataindependenttiming},
   372  	{name: "disablethp", value: &debug.disablethp},
   373  	{name: "dontfreezetheworld", value: &debug.dontfreezetheworld},
   374  	{name: "efence", value: &debug.efence},
   375  	{name: "gccheckmark", value: &debug.gccheckmark},
   376  	{name: "gcpacertrace", value: &debug.gcpacertrace},
   377  	{name: "gcshrinkstackoff", value: &debug.gcshrinkstackoff},
   378  	{name: "gcstoptheworld", value: &debug.gcstoptheworld},
   379  	{name: "gctrace", value: &debug.gctrace},
   380  	{name: "harddecommit", value: &debug.harddecommit},
   381  	{name: "inittrace", value: &debug.inittrace},
   382  	{name: "invalidptr", value: &debug.invalidptr},
   383  	{name: "madvdontneed", value: &debug.madvdontneed},
   384  	{name: "panicnil", atomic: &debug.panicnil},
   385  	{name: "profstackdepth", value: &debug.profstackdepth, def: 128},
   386  	{name: "runtimecontentionstacks", atomic: &debug.runtimeContentionStacks},
   387  	{name: "sbrk", value: &debug.sbrk},
   388  	{name: "scavtrace", value: &debug.scavtrace},
   389  	{name: "scheddetail", value: &debug.scheddetail},
   390  	{name: "schedtrace", value: &debug.schedtrace},
   391  	{name: "traceadvanceperiod", value: &debug.traceadvanceperiod},
   392  	{name: "traceallocfree", atomic: &debug.traceallocfree},
   393  	{name: "tracecheckstackownership", value: &debug.traceCheckStackOwnership},
   394  	{name: "tracebackancestors", value: &debug.tracebackancestors},
   395  	{name: "tracefpunwindoff", value: &debug.tracefpunwindoff},
   396  }
   397  
   398  func parsedebugvars() {
   399  	// defaults
   400  	debug.cgocheck = 1
   401  	debug.invalidptr = 1
   402  	debug.adaptivestackstart = 1 // set this to 0 to turn larger initial goroutine stacks off
   403  	if GOOS == "linux" {
   404  		// On Linux, MADV_FREE is faster than MADV_DONTNEED,
   405  		// but doesn't affect many of the statistics that
   406  		// MADV_DONTNEED does until the memory is actually
   407  		// reclaimed. This generally leads to poor user
   408  		// experience, like confusing stats in top and other
   409  		// monitoring tools; and bad integration with
   410  		// management systems that respond to memory usage.
   411  		// Hence, default to MADV_DONTNEED.
   412  		debug.madvdontneed = 1
   413  	}
   414  	debug.traceadvanceperiod = defaultTraceAdvancePeriod
   415  
   416  	godebug := gogetenv("GODEBUG")
   417  
   418  	p := new(string)
   419  	*p = godebug
   420  	godebugEnv.Store(p)
   421  
   422  	// apply runtime defaults, if any
   423  	for _, v := range dbgvars {
   424  		if v.def != 0 {
   425  			// Every var should have either v.value or v.atomic set.
   426  			if v.value != nil {
   427  				*v.value = v.def
   428  			} else if v.atomic != nil {
   429  				v.atomic.Store(v.def)
   430  			}
   431  		}
   432  	}
   433  
   434  	// apply compile-time GODEBUG settings
   435  	parsegodebug(godebugDefault, nil)
   436  
   437  	// apply environment settings
   438  	parsegodebug(godebug, nil)
   439  
   440  	debug.malloc = (debug.inittrace | debug.sbrk) != 0
   441  	debug.profstackdepth = min(debug.profstackdepth, maxProfStackDepth)
   442  
   443  	setTraceback(gogetenv("GOTRACEBACK"))
   444  	traceback_env = traceback_cache
   445  }
   446  
   447  // reparsedebugvars reparses the runtime's debug variables
   448  // because the environment variable has been changed to env.
   449  func reparsedebugvars(env string) {
   450  	seen := make(map[string]bool)
   451  	// apply environment settings
   452  	parsegodebug(env, seen)
   453  	// apply compile-time GODEBUG settings for as-yet-unseen variables
   454  	parsegodebug(godebugDefault, seen)
   455  	// apply defaults for as-yet-unseen variables
   456  	for _, v := range dbgvars {
   457  		if v.atomic != nil && !seen[v.name] {
   458  			v.atomic.Store(0)
   459  		}
   460  	}
   461  }
   462  
   463  // parsegodebug parses the godebug string, updating variables listed in dbgvars.
   464  // If seen == nil, this is startup time and we process the string left to right
   465  // overwriting older settings with newer ones.
   466  // If seen != nil, $GODEBUG has changed and we are doing an
   467  // incremental update. To avoid flapping in the case where a value is
   468  // set multiple times (perhaps in the default and the environment,
   469  // or perhaps twice in the environment), we process the string right-to-left
   470  // and only change values not already seen. After doing this for both
   471  // the environment and the default settings, the caller must also call
   472  // cleargodebug(seen) to reset any now-unset values back to their defaults.
   473  func parsegodebug(godebug string, seen map[string]bool) {
   474  	for p := godebug; p != ""; {
   475  		var field string
   476  		if seen == nil {
   477  			// startup: process left to right, overwriting older settings with newer
   478  			i := bytealg.IndexByteString(p, ',')
   479  			if i < 0 {
   480  				field, p = p, ""
   481  			} else {
   482  				field, p = p[:i], p[i+1:]
   483  			}
   484  		} else {
   485  			// incremental update: process right to left, updating and skipping seen
   486  			i := len(p) - 1
   487  			for i >= 0 && p[i] != ',' {
   488  				i--
   489  			}
   490  			if i < 0 {
   491  				p, field = "", p
   492  			} else {
   493  				p, field = p[:i], p[i+1:]
   494  			}
   495  		}
   496  		i := bytealg.IndexByteString(field, '=')
   497  		if i < 0 {
   498  			continue
   499  		}
   500  		key, value := field[:i], field[i+1:]
   501  		if seen[key] {
   502  			continue
   503  		}
   504  		if seen != nil {
   505  			seen[key] = true
   506  		}
   507  
   508  		// Update MemProfileRate directly here since it
   509  		// is int, not int32, and should only be updated
   510  		// if specified in GODEBUG.
   511  		if seen == nil && key == "memprofilerate" {
   512  			if n, ok := atoi(value); ok {
   513  				MemProfileRate = n
   514  			}
   515  		} else {
   516  			for _, v := range dbgvars {
   517  				if v.name == key {
   518  					if n, ok := atoi32(value); ok {
   519  						if seen == nil && v.value != nil {
   520  							*v.value = n
   521  						} else if v.atomic != nil {
   522  							v.atomic.Store(n)
   523  						}
   524  					}
   525  				}
   526  			}
   527  		}
   528  	}
   529  
   530  	if debug.cgocheck > 1 {
   531  		throw("cgocheck > 1 mode is no longer supported at runtime. Use GOEXPERIMENT=cgocheck2 at build time instead.")
   532  	}
   533  }
   534  
   535  //go:linkname setTraceback runtime/debug.SetTraceback
   536  func setTraceback(level string) {
   537  	var t uint32
   538  	switch level {
   539  	case "none":
   540  		t = 0
   541  	case "single", "":
   542  		t = 1 << tracebackShift
   543  	case "all":
   544  		t = 1<<tracebackShift | tracebackAll
   545  	case "system":
   546  		t = 2<<tracebackShift | tracebackAll
   547  	case "crash":
   548  		t = 2<<tracebackShift | tracebackAll | tracebackCrash
   549  	case "wer":
   550  		if GOOS == "windows" {
   551  			t = 2<<tracebackShift | tracebackAll | tracebackCrash
   552  			enableWER()
   553  			break
   554  		}
   555  		fallthrough
   556  	default:
   557  		t = tracebackAll
   558  		if n, ok := atoi(level); ok && n == int(uint32(n)) {
   559  			t |= uint32(n) << tracebackShift
   560  		}
   561  	}
   562  	// when C owns the process, simply exit'ing the process on fatal errors
   563  	// and panics is surprising. Be louder and abort instead.
   564  	if islibrary || isarchive {
   565  		t |= tracebackCrash
   566  	}
   567  
   568  	t |= traceback_env
   569  
   570  	atomic.Store(&traceback_cache, t)
   571  }
   572  
   573  // Poor mans 64-bit division.
   574  // This is a very special function, do not use it if you are not sure what you are doing.
   575  // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
   576  // Handles overflow in a time-specific manner.
   577  // This keeps us within no-split stack limits on 32-bit processors.
   578  //
   579  //go:nosplit
   580  func timediv(v int64, div int32, rem *int32) int32 {
   581  	res := int32(0)
   582  	for bit := 30; bit >= 0; bit-- {
   583  		if v >= int64(div)<<uint(bit) {
   584  			v = v - (int64(div) << uint(bit))
   585  			// Before this for loop, res was 0, thus all these
   586  			// power of 2 increments are now just bitsets.
   587  			res |= 1 << uint(bit)
   588  		}
   589  	}
   590  	if v >= int64(div) {
   591  		if rem != nil {
   592  			*rem = 0
   593  		}
   594  		return 0x7fffffff
   595  	}
   596  	if rem != nil {
   597  		*rem = int32(v)
   598  	}
   599  	return res
   600  }
   601  
   602  // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
   603  
   604  //go:nosplit
   605  func acquirem() *m {
   606  	gp := getg()
   607  	gp.m.locks++
   608  	return gp.m
   609  }
   610  
   611  //go:nosplit
   612  func releasem(mp *m) {
   613  	gp := getg()
   614  	mp.locks--
   615  	if mp.locks == 0 && gp.preempt {
   616  		// restore the preemption request in case we've cleared it in newstack
   617  		gp.stackguard0 = stackPreempt
   618  	}
   619  }
   620  
   621  // reflect_typelinks is meant for package reflect,
   622  // but widely used packages access it using linkname.
   623  // Notable members of the hall of shame include:
   624  //   - gitee.com/quant1x/gox
   625  //   - github.com/goccy/json
   626  //   - github.com/modern-go/reflect2
   627  //   - github.com/vmware/govmomi
   628  //   - github.com/pinpoint-apm/pinpoint-go-agent
   629  //   - github.com/timandy/routine
   630  //   - github.com/v2pro/plz
   631  //
   632  // Do not remove or change the type signature.
   633  // See go.dev/issue/67401.
   634  //
   635  //go:linkname reflect_typelinks reflect.typelinks
   636  func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
   637  	modules := activeModules()
   638  	sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
   639  	ret := [][]int32{modules[0].typelinks}
   640  	for _, md := range modules[1:] {
   641  		sections = append(sections, unsafe.Pointer(md.types))
   642  		ret = append(ret, md.typelinks)
   643  	}
   644  	return sections, ret
   645  }
   646  
   647  // reflect_resolveNameOff resolves a name offset from a base pointer.
   648  //
   649  // reflect_resolveNameOff is for package reflect,
   650  // but widely used packages access it using linkname.
   651  // Notable members of the hall of shame include:
   652  //   - github.com/agiledragon/gomonkey/v2
   653  //
   654  // Do not remove or change the type signature.
   655  // See go.dev/issue/67401.
   656  //
   657  //go:linkname reflect_resolveNameOff reflect.resolveNameOff
   658  func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
   659  	return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes)
   660  }
   661  
   662  // reflect_resolveTypeOff resolves an *rtype offset from a base type.
   663  //
   664  // reflect_resolveTypeOff is meant for package reflect,
   665  // but widely used packages access it using linkname.
   666  // Notable members of the hall of shame include:
   667  //   - gitee.com/quant1x/gox
   668  //   - github.com/modern-go/reflect2
   669  //   - github.com/v2pro/plz
   670  //   - github.com/timandy/routine
   671  //
   672  // Do not remove or change the type signature.
   673  // See go.dev/issue/67401.
   674  //
   675  //go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
   676  func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   677  	return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off)))
   678  }
   679  
   680  // reflect_resolveTextOff resolves a function pointer offset from a base type.
   681  //
   682  // reflect_resolveTextOff is for package reflect,
   683  // but widely used packages access it using linkname.
   684  // Notable members of the hall of shame include:
   685  //   - github.com/agiledragon/gomonkey/v2
   686  //
   687  // Do not remove or change the type signature.
   688  // See go.dev/issue/67401.
   689  //
   690  //go:linkname reflect_resolveTextOff reflect.resolveTextOff
   691  func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   692  	return toRType((*_type)(rtype)).textOff(textOff(off))
   693  }
   694  
   695  // reflectlite_resolveNameOff resolves a name offset from a base pointer.
   696  //
   697  //go:linkname reflectlite_resolveNameOff internal/reflectlite.resolveNameOff
   698  func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
   699  	return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes)
   700  }
   701  
   702  // reflectlite_resolveTypeOff resolves an *rtype offset from a base type.
   703  //
   704  //go:linkname reflectlite_resolveTypeOff internal/reflectlite.resolveTypeOff
   705  func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   706  	return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off)))
   707  }
   708  
   709  // reflect_addReflectOff adds a pointer to the reflection offset lookup map.
   710  //
   711  //go:linkname reflect_addReflectOff reflect.addReflectOff
   712  func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
   713  	reflectOffsLock()
   714  	if reflectOffs.m == nil {
   715  		reflectOffs.m = make(map[int32]unsafe.Pointer)
   716  		reflectOffs.minv = make(map[unsafe.Pointer]int32)
   717  		reflectOffs.next = -1
   718  	}
   719  	id, found := reflectOffs.minv[ptr]
   720  	if !found {
   721  		id = reflectOffs.next
   722  		reflectOffs.next-- // use negative offsets as IDs to aid debugging
   723  		reflectOffs.m[id] = ptr
   724  		reflectOffs.minv[ptr] = id
   725  	}
   726  	reflectOffsUnlock()
   727  	return id
   728  }
   729  
   730  //go:linkname fips_getIndicator crypto/internal/fips140.getIndicator
   731  func fips_getIndicator() uint8 {
   732  	return getg().fipsIndicator
   733  }
   734  
   735  //go:linkname fips_setIndicator crypto/internal/fips140.setIndicator
   736  func fips_setIndicator(indicator uint8) {
   737  	getg().fipsIndicator = indicator
   738  }
   739  

View as plain text