Source file
src/runtime/runtime1.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/bytealg"
9 "internal/goarch"
10 "internal/runtime/atomic"
11 "unsafe"
12 )
13
14
15
16
17
18
19 const (
20 tracebackCrash = 1 << iota
21 tracebackAll
22 tracebackShift = iota
23 )
24
25 var traceback_cache uint32 = 2 << tracebackShift
26 var traceback_env uint32
27
28
29
30
31
32
33
34
35
36
37 func gotraceback() (level int32, all, crash bool) {
38 gp := getg()
39 t := atomic.Load(&traceback_cache)
40 crash = t&tracebackCrash != 0
41 all = gp.m.throwing >= throwTypeUser || t&tracebackAll != 0
42 if gp.m.traceback != 0 {
43 level = int32(gp.m.traceback)
44 } else if gp.m.throwing >= throwTypeRuntime {
45
46
47 level = 2
48 } else {
49 level = int32(t >> tracebackShift)
50 }
51 return
52 }
53
54 var (
55 argc int32
56 argv **byte
57 )
58
59
60
61
62 func argv_index(argv **byte, i int32) *byte {
63 return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize))
64 }
65
66 func args(c int32, v **byte) {
67 argc = c
68 argv = v
69 sysargs(c, v)
70 }
71
72 func goargs() {
73 if GOOS == "windows" {
74 return
75 }
76 argslice = make([]string, argc)
77 for i := int32(0); i < argc; i++ {
78 argslice[i] = gostringnocopy(argv_index(argv, i))
79 }
80 }
81
82 func goenvs_unix() {
83
84
85
86 n := int32(0)
87 for argv_index(argv, argc+1+n) != nil {
88 n++
89 }
90
91 envs = make([]string, n)
92 for i := int32(0); i < n; i++ {
93 envs[i] = gostring(argv_index(argv, argc+1+i))
94 }
95 }
96
97 func environ() []string {
98 return envs
99 }
100
101
102
103 var test_z64, test_x64 uint64
104
105 func testAtomic64() {
106 test_z64 = 42
107 test_x64 = 0
108 if atomic.Cas64(&test_z64, test_x64, 1) {
109 throw("cas64 failed")
110 }
111 if test_x64 != 0 {
112 throw("cas64 failed")
113 }
114 test_x64 = 42
115 if !atomic.Cas64(&test_z64, test_x64, 1) {
116 throw("cas64 failed")
117 }
118 if test_x64 != 42 || test_z64 != 1 {
119 throw("cas64 failed")
120 }
121 if atomic.Load64(&test_z64) != 1 {
122 throw("load64 failed")
123 }
124 atomic.Store64(&test_z64, (1<<40)+1)
125 if atomic.Load64(&test_z64) != (1<<40)+1 {
126 throw("store64 failed")
127 }
128 if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
129 throw("xadd64 failed")
130 }
131 if atomic.Load64(&test_z64) != (2<<40)+2 {
132 throw("xadd64 failed")
133 }
134 if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
135 throw("xchg64 failed")
136 }
137 if atomic.Load64(&test_z64) != (3<<40)+3 {
138 throw("xchg64 failed")
139 }
140 }
141
142 func check() {
143 var (
144 a int8
145 b uint8
146 c int16
147 d uint16
148 e int32
149 f uint32
150 g int64
151 h uint64
152 i, i1 float32
153 j, j1 float64
154 k unsafe.Pointer
155 l *uint16
156 m [4]byte
157 )
158 type x1t struct {
159 x uint8
160 }
161 type y1t struct {
162 x1 x1t
163 y uint8
164 }
165 var x1 x1t
166 var y1 y1t
167
168 if unsafe.Sizeof(a) != 1 {
169 throw("bad a")
170 }
171 if unsafe.Sizeof(b) != 1 {
172 throw("bad b")
173 }
174 if unsafe.Sizeof(c) != 2 {
175 throw("bad c")
176 }
177 if unsafe.Sizeof(d) != 2 {
178 throw("bad d")
179 }
180 if unsafe.Sizeof(e) != 4 {
181 throw("bad e")
182 }
183 if unsafe.Sizeof(f) != 4 {
184 throw("bad f")
185 }
186 if unsafe.Sizeof(g) != 8 {
187 throw("bad g")
188 }
189 if unsafe.Sizeof(h) != 8 {
190 throw("bad h")
191 }
192 if unsafe.Sizeof(i) != 4 {
193 throw("bad i")
194 }
195 if unsafe.Sizeof(j) != 8 {
196 throw("bad j")
197 }
198 if unsafe.Sizeof(k) != goarch.PtrSize {
199 throw("bad k")
200 }
201 if unsafe.Sizeof(l) != goarch.PtrSize {
202 throw("bad l")
203 }
204 if unsafe.Sizeof(x1) != 1 {
205 throw("bad unsafe.Sizeof x1")
206 }
207 if unsafe.Offsetof(y1.y) != 1 {
208 throw("bad offsetof y1.y")
209 }
210 if unsafe.Sizeof(y1) != 2 {
211 throw("bad unsafe.Sizeof y1")
212 }
213
214 if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
215 throw("bad timediv")
216 }
217
218 var z uint32
219 z = 1
220 if !atomic.Cas(&z, 1, 2) {
221 throw("cas1")
222 }
223 if z != 2 {
224 throw("cas2")
225 }
226
227 z = 4
228 if atomic.Cas(&z, 5, 6) {
229 throw("cas3")
230 }
231 if z != 4 {
232 throw("cas4")
233 }
234
235 z = 0xffffffff
236 if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
237 throw("cas5")
238 }
239 if z != 0xfffffffe {
240 throw("cas6")
241 }
242
243 m = [4]byte{1, 1, 1, 1}
244 atomic.Or8(&m[1], 0xf0)
245 if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
246 throw("atomicor8")
247 }
248
249 m = [4]byte{0xff, 0xff, 0xff, 0xff}
250 atomic.And8(&m[1], 0x1)
251 if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
252 throw("atomicand8")
253 }
254
255 *(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
256 if j == j {
257 throw("float64nan")
258 }
259 if !(j != j) {
260 throw("float64nan1")
261 }
262
263 *(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
264 if j == j1 {
265 throw("float64nan2")
266 }
267 if !(j != j1) {
268 throw("float64nan3")
269 }
270
271 *(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
272 if i == i {
273 throw("float32nan")
274 }
275 if i == i {
276 throw("float32nan1")
277 }
278
279 *(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
280 if i == i1 {
281 throw("float32nan2")
282 }
283 if i == i1 {
284 throw("float32nan3")
285 }
286
287 testAtomic64()
288
289 if fixedStack != round2(fixedStack) {
290 throw("FixedStack is not power-of-2")
291 }
292
293 if !checkASM() {
294 throw("assembly checks failed")
295 }
296 }
297
298 type dbgVar struct {
299 name string
300 value *int32
301 atomic *atomic.Int32
302 def int32
303 }
304
305
306
307
308
309 var debug struct {
310 cgocheck int32
311 clobberfree int32
312 disablethp int32
313 dontfreezetheworld int32
314 efence int32
315 gccheckmark int32
316 gcpacertrace int32
317 gcshrinkstackoff int32
318 gcstoptheworld int32
319 gctrace int32
320 invalidptr int32
321 madvdontneed int32
322 runtimeContentionStacks atomic.Int32
323 scavtrace int32
324 scheddetail int32
325 schedtrace int32
326 tracebackancestors int32
327 asyncpreemptoff int32
328 harddecommit int32
329 adaptivestackstart int32
330 tracefpunwindoff int32
331 traceadvanceperiod int32
332 traceCheckStackOwnership int32
333 profstackdepth int32
334 dataindependenttiming int32
335
336
337
338
339 malloc bool
340 inittrace int32
341 sbrk int32
342
343
344
345
346
347
348
349
350 traceallocfree atomic.Int32
351
352 panicnil atomic.Int32
353
354
355
356
357
358
359
360
361
362 asynctimerchan atomic.Int32
363 }
364
365 var dbgvars = []*dbgVar{
366 {name: "adaptivestackstart", value: &debug.adaptivestackstart},
367 {name: "asyncpreemptoff", value: &debug.asyncpreemptoff},
368 {name: "asynctimerchan", atomic: &debug.asynctimerchan},
369 {name: "cgocheck", value: &debug.cgocheck},
370 {name: "clobberfree", value: &debug.clobberfree},
371 {name: "dataindependenttiming", value: &debug.dataindependenttiming},
372 {name: "disablethp", value: &debug.disablethp},
373 {name: "dontfreezetheworld", value: &debug.dontfreezetheworld},
374 {name: "efence", value: &debug.efence},
375 {name: "gccheckmark", value: &debug.gccheckmark},
376 {name: "gcpacertrace", value: &debug.gcpacertrace},
377 {name: "gcshrinkstackoff", value: &debug.gcshrinkstackoff},
378 {name: "gcstoptheworld", value: &debug.gcstoptheworld},
379 {name: "gctrace", value: &debug.gctrace},
380 {name: "harddecommit", value: &debug.harddecommit},
381 {name: "inittrace", value: &debug.inittrace},
382 {name: "invalidptr", value: &debug.invalidptr},
383 {name: "madvdontneed", value: &debug.madvdontneed},
384 {name: "panicnil", atomic: &debug.panicnil},
385 {name: "profstackdepth", value: &debug.profstackdepth, def: 128},
386 {name: "runtimecontentionstacks", atomic: &debug.runtimeContentionStacks},
387 {name: "sbrk", value: &debug.sbrk},
388 {name: "scavtrace", value: &debug.scavtrace},
389 {name: "scheddetail", value: &debug.scheddetail},
390 {name: "schedtrace", value: &debug.schedtrace},
391 {name: "traceadvanceperiod", value: &debug.traceadvanceperiod},
392 {name: "traceallocfree", atomic: &debug.traceallocfree},
393 {name: "tracecheckstackownership", value: &debug.traceCheckStackOwnership},
394 {name: "tracebackancestors", value: &debug.tracebackancestors},
395 {name: "tracefpunwindoff", value: &debug.tracefpunwindoff},
396 }
397
398 func parsedebugvars() {
399
400 debug.cgocheck = 1
401 debug.invalidptr = 1
402 debug.adaptivestackstart = 1
403 if GOOS == "linux" {
404
405
406
407
408
409
410
411
412 debug.madvdontneed = 1
413 }
414 debug.traceadvanceperiod = defaultTraceAdvancePeriod
415
416 godebug := gogetenv("GODEBUG")
417
418 p := new(string)
419 *p = godebug
420 godebugEnv.Store(p)
421
422
423 for _, v := range dbgvars {
424 if v.def != 0 {
425
426 if v.value != nil {
427 *v.value = v.def
428 } else if v.atomic != nil {
429 v.atomic.Store(v.def)
430 }
431 }
432 }
433
434
435 parsegodebug(godebugDefault, nil)
436
437
438 parsegodebug(godebug, nil)
439
440 debug.malloc = (debug.inittrace | debug.sbrk) != 0
441 debug.profstackdepth = min(debug.profstackdepth, maxProfStackDepth)
442
443 setTraceback(gogetenv("GOTRACEBACK"))
444 traceback_env = traceback_cache
445 }
446
447
448
449 func reparsedebugvars(env string) {
450 seen := make(map[string]bool)
451
452 parsegodebug(env, seen)
453
454 parsegodebug(godebugDefault, seen)
455
456 for _, v := range dbgvars {
457 if v.atomic != nil && !seen[v.name] {
458 v.atomic.Store(0)
459 }
460 }
461 }
462
463
464
465
466
467
468
469
470
471
472
473 func parsegodebug(godebug string, seen map[string]bool) {
474 for p := godebug; p != ""; {
475 var field string
476 if seen == nil {
477
478 i := bytealg.IndexByteString(p, ',')
479 if i < 0 {
480 field, p = p, ""
481 } else {
482 field, p = p[:i], p[i+1:]
483 }
484 } else {
485
486 i := len(p) - 1
487 for i >= 0 && p[i] != ',' {
488 i--
489 }
490 if i < 0 {
491 p, field = "", p
492 } else {
493 p, field = p[:i], p[i+1:]
494 }
495 }
496 i := bytealg.IndexByteString(field, '=')
497 if i < 0 {
498 continue
499 }
500 key, value := field[:i], field[i+1:]
501 if seen[key] {
502 continue
503 }
504 if seen != nil {
505 seen[key] = true
506 }
507
508
509
510
511 if seen == nil && key == "memprofilerate" {
512 if n, ok := atoi(value); ok {
513 MemProfileRate = n
514 }
515 } else {
516 for _, v := range dbgvars {
517 if v.name == key {
518 if n, ok := atoi32(value); ok {
519 if seen == nil && v.value != nil {
520 *v.value = n
521 } else if v.atomic != nil {
522 v.atomic.Store(n)
523 }
524 }
525 }
526 }
527 }
528 }
529
530 if debug.cgocheck > 1 {
531 throw("cgocheck > 1 mode is no longer supported at runtime. Use GOEXPERIMENT=cgocheck2 at build time instead.")
532 }
533 }
534
535
536 func setTraceback(level string) {
537 var t uint32
538 switch level {
539 case "none":
540 t = 0
541 case "single", "":
542 t = 1 << tracebackShift
543 case "all":
544 t = 1<<tracebackShift | tracebackAll
545 case "system":
546 t = 2<<tracebackShift | tracebackAll
547 case "crash":
548 t = 2<<tracebackShift | tracebackAll | tracebackCrash
549 case "wer":
550 if GOOS == "windows" {
551 t = 2<<tracebackShift | tracebackAll | tracebackCrash
552 enableWER()
553 break
554 }
555 fallthrough
556 default:
557 t = tracebackAll
558 if n, ok := atoi(level); ok && n == int(uint32(n)) {
559 t |= uint32(n) << tracebackShift
560 }
561 }
562
563
564 if islibrary || isarchive {
565 t |= tracebackCrash
566 }
567
568 t |= traceback_env
569
570 atomic.Store(&traceback_cache, t)
571 }
572
573
574
575
576
577
578
579
580 func timediv(v int64, div int32, rem *int32) int32 {
581 res := int32(0)
582 for bit := 30; bit >= 0; bit-- {
583 if v >= int64(div)<<uint(bit) {
584 v = v - (int64(div) << uint(bit))
585
586
587 res |= 1 << uint(bit)
588 }
589 }
590 if v >= int64(div) {
591 if rem != nil {
592 *rem = 0
593 }
594 return 0x7fffffff
595 }
596 if rem != nil {
597 *rem = int32(v)
598 }
599 return res
600 }
601
602
603
604
605 func acquirem() *m {
606 gp := getg()
607 gp.m.locks++
608 return gp.m
609 }
610
611
612 func releasem(mp *m) {
613 gp := getg()
614 mp.locks--
615 if mp.locks == 0 && gp.preempt {
616
617 gp.stackguard0 = stackPreempt
618 }
619 }
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636 func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
637 modules := activeModules()
638 sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
639 ret := [][]int32{modules[0].typelinks}
640 for _, md := range modules[1:] {
641 sections = append(sections, unsafe.Pointer(md.types))
642 ret = append(ret, md.typelinks)
643 }
644 return sections, ret
645 }
646
647
648
649
650
651
652
653
654
655
656
657
658 func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
659 return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes)
660 }
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676 func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
677 return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off)))
678 }
679
680
681
682
683
684
685
686
687
688
689
690
691 func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
692 return toRType((*_type)(rtype)).textOff(textOff(off))
693 }
694
695
696
697
698 func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
699 return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes)
700 }
701
702
703
704
705 func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
706 return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off)))
707 }
708
709
710
711
712 func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
713 reflectOffsLock()
714 if reflectOffs.m == nil {
715 reflectOffs.m = make(map[int32]unsafe.Pointer)
716 reflectOffs.minv = make(map[unsafe.Pointer]int32)
717 reflectOffs.next = -1
718 }
719 id, found := reflectOffs.minv[ptr]
720 if !found {
721 id = reflectOffs.next
722 reflectOffs.next--
723 reflectOffs.m[id] = ptr
724 reflectOffs.minv[ptr] = id
725 }
726 reflectOffsUnlock()
727 return id
728 }
729
730
731 func fips_getIndicator() uint8 {
732 return getg().fipsIndicator
733 }
734
735
736 func fips_setIndicator(indicator uint8) {
737 getg().fipsIndicator = indicator
738 }
739
View as plain text