Source file src/runtime/runtime1.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/bytealg"
     9  	"internal/goarch"
    10  	"internal/runtime/atomic"
    11  	"internal/runtime/strconv"
    12  	"unsafe"
    13  )
    14  
    15  // Keep a cached value to make gotraceback fast,
    16  // since we call it on every call to gentraceback.
    17  // The cached value is a uint32 in which the low bits
    18  // are the "crash" and "all" settings and the remaining
    19  // bits are the traceback value (0 off, 1 on, 2 include system).
    20  const (
    21  	tracebackCrash = 1 << iota
    22  	tracebackAll
    23  	tracebackShift = iota
    24  )
    25  
    26  var traceback_cache uint32 = 2 << tracebackShift
    27  var traceback_env uint32
    28  
    29  // gotraceback returns the current traceback settings.
    30  //
    31  // If level is 0, suppress all tracebacks.
    32  // If level is 1, show tracebacks, but exclude runtime frames.
    33  // If level is 2, show tracebacks including runtime frames.
    34  // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
    35  // If crash is set, crash (core dump, etc) after tracebacking.
    36  //
    37  //go:nosplit
    38  func gotraceback() (level int32, all, crash bool) {
    39  	gp := getg()
    40  	t := atomic.Load(&traceback_cache)
    41  	crash = t&tracebackCrash != 0
    42  	all = gp.m.throwing >= throwTypeUser || t&tracebackAll != 0
    43  	if gp.m.traceback != 0 {
    44  		level = int32(gp.m.traceback)
    45  	} else if gp.m.throwing >= throwTypeRuntime {
    46  		// Always include runtime frames in runtime throws unless
    47  		// otherwise overridden by m.traceback.
    48  		level = 2
    49  	} else {
    50  		level = int32(t >> tracebackShift)
    51  	}
    52  	return
    53  }
    54  
    55  var (
    56  	argc int32
    57  	argv **byte
    58  )
    59  
    60  // nosplit for use in linux startup sysargs.
    61  //
    62  //go:nosplit
    63  func argv_index(argv **byte, i int32) *byte {
    64  	return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize))
    65  }
    66  
    67  func args(c int32, v **byte) {
    68  	argc = c
    69  	argv = v
    70  	sysargs(c, v)
    71  }
    72  
    73  func goargs() {
    74  	if GOOS == "windows" {
    75  		return
    76  	}
    77  	argslice = make([]string, argc)
    78  	for i := int32(0); i < argc; i++ {
    79  		argslice[i] = gostringnocopy(argv_index(argv, i))
    80  	}
    81  }
    82  
    83  func goenvs_unix() {
    84  	// TODO(austin): ppc64 in dynamic linking mode doesn't
    85  	// guarantee env[] will immediately follow argv. Might cause
    86  	// problems.
    87  	n := int32(0)
    88  	for argv_index(argv, argc+1+n) != nil {
    89  		n++
    90  	}
    91  
    92  	envs = make([]string, n)
    93  	for i := int32(0); i < n; i++ {
    94  		envs[i] = gostring(argv_index(argv, argc+1+i))
    95  	}
    96  }
    97  
    98  func environ() []string {
    99  	return envs
   100  }
   101  
   102  // TODO: These should be locals in testAtomic64, but we don't 8-byte
   103  // align stack variables on 386.
   104  var test_z64, test_x64 uint64
   105  
   106  func testAtomic64() {
   107  	test_z64 = 42
   108  	test_x64 = 0
   109  	if atomic.Cas64(&test_z64, test_x64, 1) {
   110  		throw("cas64 failed")
   111  	}
   112  	if test_x64 != 0 {
   113  		throw("cas64 failed")
   114  	}
   115  	test_x64 = 42
   116  	if !atomic.Cas64(&test_z64, test_x64, 1) {
   117  		throw("cas64 failed")
   118  	}
   119  	if test_x64 != 42 || test_z64 != 1 {
   120  		throw("cas64 failed")
   121  	}
   122  	if atomic.Load64(&test_z64) != 1 {
   123  		throw("load64 failed")
   124  	}
   125  	atomic.Store64(&test_z64, (1<<40)+1)
   126  	if atomic.Load64(&test_z64) != (1<<40)+1 {
   127  		throw("store64 failed")
   128  	}
   129  	if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
   130  		throw("xadd64 failed")
   131  	}
   132  	if atomic.Load64(&test_z64) != (2<<40)+2 {
   133  		throw("xadd64 failed")
   134  	}
   135  	if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
   136  		throw("xchg64 failed")
   137  	}
   138  	if atomic.Load64(&test_z64) != (3<<40)+3 {
   139  		throw("xchg64 failed")
   140  	}
   141  }
   142  
   143  func check() {
   144  	var (
   145  		a     int8
   146  		b     uint8
   147  		c     int16
   148  		d     uint16
   149  		e     int32
   150  		f     uint32
   151  		g     int64
   152  		h     uint64
   153  		i, i1 float32
   154  		j, j1 float64
   155  		k     unsafe.Pointer
   156  		l     *uint16
   157  		m     [4]byte
   158  	)
   159  	type x1t struct {
   160  		x uint8
   161  	}
   162  	type y1t struct {
   163  		x1 x1t
   164  		y  uint8
   165  	}
   166  	var x1 x1t
   167  	var y1 y1t
   168  
   169  	if unsafe.Sizeof(a) != 1 {
   170  		throw("bad a")
   171  	}
   172  	if unsafe.Sizeof(b) != 1 {
   173  		throw("bad b")
   174  	}
   175  	if unsafe.Sizeof(c) != 2 {
   176  		throw("bad c")
   177  	}
   178  	if unsafe.Sizeof(d) != 2 {
   179  		throw("bad d")
   180  	}
   181  	if unsafe.Sizeof(e) != 4 {
   182  		throw("bad e")
   183  	}
   184  	if unsafe.Sizeof(f) != 4 {
   185  		throw("bad f")
   186  	}
   187  	if unsafe.Sizeof(g) != 8 {
   188  		throw("bad g")
   189  	}
   190  	if unsafe.Sizeof(h) != 8 {
   191  		throw("bad h")
   192  	}
   193  	if unsafe.Sizeof(i) != 4 {
   194  		throw("bad i")
   195  	}
   196  	if unsafe.Sizeof(j) != 8 {
   197  		throw("bad j")
   198  	}
   199  	if unsafe.Sizeof(k) != goarch.PtrSize {
   200  		throw("bad k")
   201  	}
   202  	if unsafe.Sizeof(l) != goarch.PtrSize {
   203  		throw("bad l")
   204  	}
   205  	if unsafe.Sizeof(x1) != 1 {
   206  		throw("bad unsafe.Sizeof x1")
   207  	}
   208  	if unsafe.Offsetof(y1.y) != 1 {
   209  		throw("bad offsetof y1.y")
   210  	}
   211  	if unsafe.Sizeof(y1) != 2 {
   212  		throw("bad unsafe.Sizeof y1")
   213  	}
   214  
   215  	if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
   216  		throw("bad timediv")
   217  	}
   218  
   219  	var z uint32
   220  	z = 1
   221  	if !atomic.Cas(&z, 1, 2) {
   222  		throw("cas1")
   223  	}
   224  	if z != 2 {
   225  		throw("cas2")
   226  	}
   227  
   228  	z = 4
   229  	if atomic.Cas(&z, 5, 6) {
   230  		throw("cas3")
   231  	}
   232  	if z != 4 {
   233  		throw("cas4")
   234  	}
   235  
   236  	z = 0xffffffff
   237  	if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
   238  		throw("cas5")
   239  	}
   240  	if z != 0xfffffffe {
   241  		throw("cas6")
   242  	}
   243  
   244  	m = [4]byte{1, 1, 1, 1}
   245  	atomic.Or8(&m[1], 0xf0)
   246  	if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
   247  		throw("atomicor8")
   248  	}
   249  
   250  	m = [4]byte{0xff, 0xff, 0xff, 0xff}
   251  	atomic.And8(&m[1], 0x1)
   252  	if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
   253  		throw("atomicand8")
   254  	}
   255  
   256  	*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
   257  	if j == j {
   258  		throw("float64nan")
   259  	}
   260  	if !(j != j) {
   261  		throw("float64nan1")
   262  	}
   263  
   264  	*(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
   265  	if j == j1 {
   266  		throw("float64nan2")
   267  	}
   268  	if !(j != j1) {
   269  		throw("float64nan3")
   270  	}
   271  
   272  	*(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
   273  	if i == i {
   274  		throw("float32nan")
   275  	}
   276  	if i == i {
   277  		throw("float32nan1")
   278  	}
   279  
   280  	*(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
   281  	if i == i1 {
   282  		throw("float32nan2")
   283  	}
   284  	if i == i1 {
   285  		throw("float32nan3")
   286  	}
   287  
   288  	testAtomic64()
   289  
   290  	if fixedStack != round2(fixedStack) {
   291  		throw("FixedStack is not power-of-2")
   292  	}
   293  
   294  	if !checkASM() {
   295  		throw("assembly checks failed")
   296  	}
   297  }
   298  
   299  type dbgVar struct {
   300  	name   string
   301  	value  *int32        // for variables that can only be set at startup
   302  	atomic *atomic.Int32 // for variables that can be changed during execution
   303  	def    int32         // default value (ideally zero)
   304  }
   305  
   306  // Holds variables parsed from GODEBUG env var,
   307  // except for "memprofilerate" since there is an
   308  // existing int var for that value, which may
   309  // already have an initial value.
   310  var debug struct {
   311  	cgocheck                 int32
   312  	clobberfree              int32
   313  	containermaxprocs        int32
   314  	decoratemappings         int32
   315  	disablethp               int32
   316  	dontfreezetheworld       int32
   317  	efence                   int32
   318  	gccheckmark              int32
   319  	gcpacertrace             int32
   320  	gcshrinkstackoff         int32
   321  	gcstoptheworld           int32
   322  	gctrace                  int32
   323  	invalidptr               int32
   324  	madvdontneed             int32 // for Linux; issue 28466
   325  	scavtrace                int32
   326  	scheddetail              int32
   327  	schedtrace               int32
   328  	tracebackancestors       int32
   329  	updatemaxprocs           int32
   330  	asyncpreemptoff          int32
   331  	harddecommit             int32
   332  	adaptivestackstart       int32
   333  	tracefpunwindoff         int32
   334  	traceadvanceperiod       int32
   335  	traceCheckStackOwnership int32
   336  	profstackdepth           int32
   337  	dataindependenttiming    int32
   338  
   339  	// debug.malloc is used as a combined debug check
   340  	// in the malloc function and should be set
   341  	// if any of the below debug options is != 0.
   342  	malloc          bool
   343  	inittrace       int32
   344  	sbrk            int32
   345  	checkfinalizers int32
   346  	// traceallocfree controls whether execution traces contain
   347  	// detailed trace data about memory allocation. This value
   348  	// affects debug.malloc only if it is != 0 and the execution
   349  	// tracer is enabled, in which case debug.malloc will be
   350  	// set to "true" if it isn't already while tracing is enabled.
   351  	// It will be set while the world is stopped, so it's safe.
   352  	// The value of traceallocfree can be changed any time in response
   353  	// to os.Setenv("GODEBUG").
   354  	traceallocfree atomic.Int32
   355  
   356  	panicnil atomic.Int32
   357  
   358  	// asynctimerchan controls whether timer channels
   359  	// behave asynchronously (as in Go 1.22 and earlier)
   360  	// instead of their Go 1.23+ synchronous behavior.
   361  	// The value can change at any time (in response to os.Setenv("GODEBUG"))
   362  	// and affects all extant timer channels immediately.
   363  	// Programs wouldn't normally change over an execution,
   364  	// but allowing it is convenient for testing and for programs
   365  	// that do an os.Setenv in main.init or main.main.
   366  	asynctimerchan atomic.Int32
   367  }
   368  
   369  var dbgvars = []*dbgVar{
   370  	{name: "adaptivestackstart", value: &debug.adaptivestackstart},
   371  	{name: "asyncpreemptoff", value: &debug.asyncpreemptoff},
   372  	{name: "asynctimerchan", atomic: &debug.asynctimerchan},
   373  	{name: "cgocheck", value: &debug.cgocheck},
   374  	{name: "clobberfree", value: &debug.clobberfree},
   375  	{name: "containermaxprocs", value: &debug.containermaxprocs, def: 1},
   376  	{name: "dataindependenttiming", value: &debug.dataindependenttiming},
   377  	{name: "decoratemappings", value: &debug.decoratemappings, def: 1},
   378  	{name: "disablethp", value: &debug.disablethp},
   379  	{name: "dontfreezetheworld", value: &debug.dontfreezetheworld},
   380  	{name: "checkfinalizers", value: &debug.checkfinalizers},
   381  	{name: "efence", value: &debug.efence},
   382  	{name: "gccheckmark", value: &debug.gccheckmark},
   383  	{name: "gcpacertrace", value: &debug.gcpacertrace},
   384  	{name: "gcshrinkstackoff", value: &debug.gcshrinkstackoff},
   385  	{name: "gcstoptheworld", value: &debug.gcstoptheworld},
   386  	{name: "gctrace", value: &debug.gctrace},
   387  	{name: "harddecommit", value: &debug.harddecommit},
   388  	{name: "inittrace", value: &debug.inittrace},
   389  	{name: "invalidptr", value: &debug.invalidptr},
   390  	{name: "madvdontneed", value: &debug.madvdontneed},
   391  	{name: "panicnil", atomic: &debug.panicnil},
   392  	{name: "profstackdepth", value: &debug.profstackdepth, def: 128},
   393  	{name: "sbrk", value: &debug.sbrk},
   394  	{name: "scavtrace", value: &debug.scavtrace},
   395  	{name: "scheddetail", value: &debug.scheddetail},
   396  	{name: "schedtrace", value: &debug.schedtrace},
   397  	{name: "traceadvanceperiod", value: &debug.traceadvanceperiod},
   398  	{name: "traceallocfree", atomic: &debug.traceallocfree},
   399  	{name: "tracecheckstackownership", value: &debug.traceCheckStackOwnership},
   400  	{name: "tracebackancestors", value: &debug.tracebackancestors},
   401  	{name: "tracefpunwindoff", value: &debug.tracefpunwindoff},
   402  	{name: "updatemaxprocs", value: &debug.updatemaxprocs, def: 1},
   403  }
   404  
   405  func parseRuntimeDebugVars(godebug string) {
   406  	// defaults
   407  	debug.cgocheck = 1
   408  	debug.invalidptr = 1
   409  	debug.adaptivestackstart = 1 // set this to 0 to turn larger initial goroutine stacks off
   410  	if GOOS == "linux" {
   411  		// On Linux, MADV_FREE is faster than MADV_DONTNEED,
   412  		// but doesn't affect many of the statistics that
   413  		// MADV_DONTNEED does until the memory is actually
   414  		// reclaimed. This generally leads to poor user
   415  		// experience, like confusing stats in top and other
   416  		// monitoring tools; and bad integration with
   417  		// management systems that respond to memory usage.
   418  		// Hence, default to MADV_DONTNEED.
   419  		debug.madvdontneed = 1
   420  	}
   421  	debug.traceadvanceperiod = defaultTraceAdvancePeriod
   422  
   423  	// apply runtime defaults, if any
   424  	for _, v := range dbgvars {
   425  		if v.def != 0 {
   426  			// Every var should have either v.value or v.atomic set.
   427  			if v.value != nil {
   428  				*v.value = v.def
   429  			} else if v.atomic != nil {
   430  				v.atomic.Store(v.def)
   431  			}
   432  		}
   433  	}
   434  	// apply compile-time GODEBUG settings
   435  	parsegodebug(godebugDefault, nil)
   436  
   437  	// apply environment settings
   438  	parsegodebug(godebug, nil)
   439  
   440  	debug.malloc = (debug.inittrace | debug.sbrk | debug.checkfinalizers) != 0
   441  	debug.profstackdepth = min(debug.profstackdepth, maxProfStackDepth)
   442  
   443  	// Disable async preemption in checkmark mode. The following situation is
   444  	// problematic with checkmark mode:
   445  	//
   446  	// - The GC doesn't mark object A because it is truly dead.
   447  	// - The GC stops the world, asynchronously preempting G1 which has a reference
   448  	//   to A in its top stack frame
   449  	// - During the stop the world, we run the second checkmark GC. It marks the roots
   450  	//   and discovers A through G1.
   451  	// - Checkmark mode reports a failure since there's a discrepancy in mark metadata.
   452  	//
   453  	// We could disable just conservative scanning during the checkmark scan, which is
   454  	// safe but makes checkmark slightly less powerful, but that's a lot more invasive
   455  	// than just disabling async preemption altogether.
   456  	if debug.gccheckmark > 0 {
   457  		debug.asyncpreemptoff = 1
   458  	}
   459  }
   460  
   461  func finishDebugVarsSetup() {
   462  	p := new(string)
   463  	*p = gogetenv("GODEBUG")
   464  	godebugEnv.Store(p)
   465  
   466  	setTraceback(gogetenv("GOTRACEBACK"))
   467  	traceback_env = traceback_cache
   468  }
   469  
   470  // reparsedebugvars reparses the runtime's debug variables
   471  // because the environment variable has been changed to env.
   472  func reparsedebugvars(env string) {
   473  	seen := make(map[string]bool)
   474  	// apply environment settings
   475  	parsegodebug(env, seen)
   476  	// apply compile-time GODEBUG settings for as-yet-unseen variables
   477  	parsegodebug(godebugDefault, seen)
   478  	// apply defaults for as-yet-unseen variables
   479  	for _, v := range dbgvars {
   480  		if v.atomic != nil && !seen[v.name] {
   481  			v.atomic.Store(0)
   482  		}
   483  	}
   484  }
   485  
   486  // parsegodebug parses the godebug string, updating variables listed in dbgvars.
   487  // If seen == nil, this is startup time and we process the string left to right
   488  // overwriting older settings with newer ones.
   489  // If seen != nil, $GODEBUG has changed and we are doing an
   490  // incremental update. To avoid flapping in the case where a value is
   491  // set multiple times (perhaps in the default and the environment,
   492  // or perhaps twice in the environment), we process the string right-to-left
   493  // and only change values not already seen. After doing this for both
   494  // the environment and the default settings, the caller must also call
   495  // cleargodebug(seen) to reset any now-unset values back to their defaults.
   496  func parsegodebug(godebug string, seen map[string]bool) {
   497  	for p := godebug; p != ""; {
   498  		var field string
   499  		if seen == nil {
   500  			// startup: process left to right, overwriting older settings with newer
   501  			i := bytealg.IndexByteString(p, ',')
   502  			if i < 0 {
   503  				field, p = p, ""
   504  			} else {
   505  				field, p = p[:i], p[i+1:]
   506  			}
   507  		} else {
   508  			// incremental update: process right to left, updating and skipping seen
   509  			i := len(p) - 1
   510  			for i >= 0 && p[i] != ',' {
   511  				i--
   512  			}
   513  			if i < 0 {
   514  				p, field = "", p
   515  			} else {
   516  				p, field = p[:i], p[i+1:]
   517  			}
   518  		}
   519  		i := bytealg.IndexByteString(field, '=')
   520  		if i < 0 {
   521  			continue
   522  		}
   523  		key, value := field[:i], field[i+1:]
   524  		if seen[key] {
   525  			continue
   526  		}
   527  		if seen != nil {
   528  			seen[key] = true
   529  		}
   530  
   531  		// Update MemProfileRate directly here since it
   532  		// is int, not int32, and should only be updated
   533  		// if specified in GODEBUG.
   534  		if seen == nil && key == "memprofilerate" {
   535  			if n, ok := strconv.Atoi(value); ok {
   536  				MemProfileRate = n
   537  			}
   538  		} else {
   539  			for _, v := range dbgvars {
   540  				if v.name == key {
   541  					if n, ok := strconv.Atoi32(value); ok {
   542  						if seen == nil && v.value != nil {
   543  							*v.value = n
   544  						} else if v.atomic != nil {
   545  							v.atomic.Store(n)
   546  						}
   547  					}
   548  				}
   549  			}
   550  		}
   551  	}
   552  
   553  	if debug.cgocheck > 1 {
   554  		throw("cgocheck > 1 mode is no longer supported at runtime. Use GOEXPERIMENT=cgocheck2 at build time instead.")
   555  	}
   556  }
   557  
   558  //go:linkname setTraceback runtime/debug.SetTraceback
   559  func setTraceback(level string) {
   560  	var t uint32
   561  	switch level {
   562  	case "none":
   563  		t = 0
   564  	case "single", "":
   565  		t = 1 << tracebackShift
   566  	case "all":
   567  		t = 1<<tracebackShift | tracebackAll
   568  	case "system":
   569  		t = 2<<tracebackShift | tracebackAll
   570  	case "crash":
   571  		t = 2<<tracebackShift | tracebackAll | tracebackCrash
   572  	case "wer":
   573  		if GOOS == "windows" {
   574  			t = 2<<tracebackShift | tracebackAll | tracebackCrash
   575  			enableWER()
   576  			break
   577  		}
   578  		fallthrough
   579  	default:
   580  		t = tracebackAll
   581  		if n, ok := strconv.Atoi(level); ok && n == int(uint32(n)) {
   582  			t |= uint32(n) << tracebackShift
   583  		}
   584  	}
   585  	// when C owns the process, simply exit'ing the process on fatal errors
   586  	// and panics is surprising. Be louder and abort instead.
   587  	if islibrary || isarchive {
   588  		t |= tracebackCrash
   589  	}
   590  
   591  	t |= traceback_env
   592  
   593  	atomic.Store(&traceback_cache, t)
   594  }
   595  
   596  // Poor mans 64-bit division.
   597  // This is a very special function, do not use it if you are not sure what you are doing.
   598  // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
   599  // Handles overflow in a time-specific manner.
   600  // This keeps us within no-split stack limits on 32-bit processors.
   601  //
   602  //go:nosplit
   603  func timediv(v int64, div int32, rem *int32) int32 {
   604  	res := int32(0)
   605  	for bit := 30; bit >= 0; bit-- {
   606  		if v >= int64(div)<<uint(bit) {
   607  			v = v - (int64(div) << uint(bit))
   608  			// Before this for loop, res was 0, thus all these
   609  			// power of 2 increments are now just bitsets.
   610  			res |= 1 << uint(bit)
   611  		}
   612  	}
   613  	if v >= int64(div) {
   614  		if rem != nil {
   615  			*rem = 0
   616  		}
   617  		return 0x7fffffff
   618  	}
   619  	if rem != nil {
   620  		*rem = int32(v)
   621  	}
   622  	return res
   623  }
   624  
   625  // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
   626  
   627  //go:nosplit
   628  func acquirem() *m {
   629  	gp := getg()
   630  	gp.m.locks++
   631  	return gp.m
   632  }
   633  
   634  //go:nosplit
   635  func releasem(mp *m) {
   636  	gp := getg()
   637  	mp.locks--
   638  	if mp.locks == 0 && gp.preempt {
   639  		// restore the preemption request in case we've cleared it in newstack
   640  		gp.stackguard0 = stackPreempt
   641  	}
   642  }
   643  
   644  // reflect_typelinks is meant for package reflect,
   645  // but widely used packages access it using linkname.
   646  // Notable members of the hall of shame include:
   647  //   - gitee.com/quant1x/gox
   648  //   - github.com/goccy/json
   649  //   - github.com/modern-go/reflect2
   650  //   - github.com/vmware/govmomi
   651  //   - github.com/pinpoint-apm/pinpoint-go-agent
   652  //   - github.com/timandy/routine
   653  //   - github.com/v2pro/plz
   654  //
   655  // Do not remove or change the type signature.
   656  // See go.dev/issue/67401.
   657  //
   658  //go:linkname reflect_typelinks reflect.typelinks
   659  func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
   660  	modules := activeModules()
   661  	sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
   662  	ret := [][]int32{modules[0].typelinks}
   663  	for _, md := range modules[1:] {
   664  		sections = append(sections, unsafe.Pointer(md.types))
   665  		ret = append(ret, md.typelinks)
   666  	}
   667  	return sections, ret
   668  }
   669  
   670  // reflect_resolveNameOff resolves a name offset from a base pointer.
   671  //
   672  // reflect_resolveNameOff is for package reflect,
   673  // but widely used packages access it using linkname.
   674  // Notable members of the hall of shame include:
   675  //   - github.com/agiledragon/gomonkey/v2
   676  //
   677  // Do not remove or change the type signature.
   678  // See go.dev/issue/67401.
   679  //
   680  //go:linkname reflect_resolveNameOff reflect.resolveNameOff
   681  func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
   682  	return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes)
   683  }
   684  
   685  // reflect_resolveTypeOff resolves an *rtype offset from a base type.
   686  //
   687  // reflect_resolveTypeOff is meant for package reflect,
   688  // but widely used packages access it using linkname.
   689  // Notable members of the hall of shame include:
   690  //   - gitee.com/quant1x/gox
   691  //   - github.com/modern-go/reflect2
   692  //   - github.com/v2pro/plz
   693  //   - github.com/timandy/routine
   694  //
   695  // Do not remove or change the type signature.
   696  // See go.dev/issue/67401.
   697  //
   698  //go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
   699  func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   700  	return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off)))
   701  }
   702  
   703  // reflect_resolveTextOff resolves a function pointer offset from a base type.
   704  //
   705  // reflect_resolveTextOff is for package reflect,
   706  // but widely used packages access it using linkname.
   707  // Notable members of the hall of shame include:
   708  //   - github.com/agiledragon/gomonkey/v2
   709  //
   710  // Do not remove or change the type signature.
   711  // See go.dev/issue/67401.
   712  //
   713  //go:linkname reflect_resolveTextOff reflect.resolveTextOff
   714  func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   715  	return toRType((*_type)(rtype)).textOff(textOff(off))
   716  }
   717  
   718  // reflectlite_resolveNameOff resolves a name offset from a base pointer.
   719  //
   720  //go:linkname reflectlite_resolveNameOff internal/reflectlite.resolveNameOff
   721  func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
   722  	return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes)
   723  }
   724  
   725  // reflectlite_resolveTypeOff resolves an *rtype offset from a base type.
   726  //
   727  //go:linkname reflectlite_resolveTypeOff internal/reflectlite.resolveTypeOff
   728  func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   729  	return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off)))
   730  }
   731  
   732  // reflect_addReflectOff adds a pointer to the reflection offset lookup map.
   733  //
   734  //go:linkname reflect_addReflectOff reflect.addReflectOff
   735  func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
   736  	reflectOffsLock()
   737  	if reflectOffs.m == nil {
   738  		reflectOffs.m = make(map[int32]unsafe.Pointer)
   739  		reflectOffs.minv = make(map[unsafe.Pointer]int32)
   740  		reflectOffs.next = -1
   741  	}
   742  	id, found := reflectOffs.minv[ptr]
   743  	if !found {
   744  		id = reflectOffs.next
   745  		reflectOffs.next-- // use negative offsets as IDs to aid debugging
   746  		reflectOffs.m[id] = ptr
   747  		reflectOffs.minv[ptr] = id
   748  	}
   749  	reflectOffsUnlock()
   750  	return id
   751  }
   752  
   753  //go:linkname fips_getIndicator crypto/internal/fips140.getIndicator
   754  func fips_getIndicator() uint8 {
   755  	return getg().fipsIndicator
   756  }
   757  
   758  //go:linkname fips_setIndicator crypto/internal/fips140.setIndicator
   759  func fips_setIndicator(indicator uint8) {
   760  	getg().fipsIndicator = indicator
   761  }
   762  

View as plain text