Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goos"
12 "internal/runtime/atomic"
13 "internal/runtime/exithook"
14 "internal/runtime/sys"
15 "internal/stringslite"
16 "unsafe"
17 )
18
19
20 var modinfo string
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116 var (
117 m0 m
118 g0 g
119 mcache0 *mcache
120 raceprocctx0 uintptr
121 raceFiniLock mutex
122 )
123
124
125
126 var runtime_inittasks []*initTask
127
128
129
130
131
132 var main_init_done chan bool
133
134
135 func main_main()
136
137
138 var mainStarted bool
139
140
141 var runtimeInitTime int64
142
143
144 var initSigmask sigset
145
146
147 func main() {
148 mp := getg().m
149
150
151
152 mp.g0.racectx = 0
153
154
155
156
157 if goarch.PtrSize == 8 {
158 maxstacksize = 1000000000
159 } else {
160 maxstacksize = 250000000
161 }
162
163
164
165
166 maxstackceiling = 2 * maxstacksize
167
168
169 mainStarted = true
170
171 if haveSysmon {
172 systemstack(func() {
173 newm(sysmon, nil, -1)
174 })
175 }
176
177
178
179
180
181
182
183 lockOSThread()
184
185 if mp != &m0 {
186 throw("runtime.main not on m0")
187 }
188
189
190
191 runtimeInitTime = nanotime()
192 if runtimeInitTime == 0 {
193 throw("nanotime returning zero")
194 }
195
196 if debug.inittrace != 0 {
197 inittrace.id = getg().goid
198 inittrace.active = true
199 }
200
201 doInit(runtime_inittasks)
202
203
204 needUnlock := true
205 defer func() {
206 if needUnlock {
207 unlockOSThread()
208 }
209 }()
210
211 gcenable()
212
213 main_init_done = make(chan bool)
214 if iscgo {
215 if _cgo_pthread_key_created == nil {
216 throw("_cgo_pthread_key_created missing")
217 }
218
219 if _cgo_thread_start == nil {
220 throw("_cgo_thread_start missing")
221 }
222 if GOOS != "windows" {
223 if _cgo_setenv == nil {
224 throw("_cgo_setenv missing")
225 }
226 if _cgo_unsetenv == nil {
227 throw("_cgo_unsetenv missing")
228 }
229 }
230 if _cgo_notify_runtime_init_done == nil {
231 throw("_cgo_notify_runtime_init_done missing")
232 }
233
234
235 if set_crosscall2 == nil {
236 throw("set_crosscall2 missing")
237 }
238 set_crosscall2()
239
240
241
242 startTemplateThread()
243 cgocall(_cgo_notify_runtime_init_done, nil)
244 }
245
246
247
248
249
250
251
252
253 for m := &firstmoduledata; m != nil; m = m.next {
254 doInit(m.inittasks)
255 }
256
257
258
259 inittrace.active = false
260
261 close(main_init_done)
262
263 needUnlock = false
264 unlockOSThread()
265
266 if isarchive || islibrary {
267
268
269 if GOARCH == "wasm" {
270
271
272
273
274
275
276
277 pause(sys.GetCallerSP() - 16)
278 panic("unreachable")
279 }
280 return
281 }
282 fn := main_main
283 fn()
284 if raceenabled {
285 runExitHooks(0)
286 racefini()
287 }
288
289
290
291
292
293 if runningPanicDefers.Load() != 0 {
294
295 for c := 0; c < 1000; c++ {
296 if runningPanicDefers.Load() == 0 {
297 break
298 }
299 Gosched()
300 }
301 }
302 if panicking.Load() != 0 {
303 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
304 }
305 runExitHooks(0)
306
307 exit(0)
308 for {
309 var x *int32
310 *x = 0
311 }
312 }
313
314
315
316
317 func os_beforeExit(exitCode int) {
318 runExitHooks(exitCode)
319 if exitCode == 0 && raceenabled {
320 racefini()
321 }
322 }
323
324 func init() {
325 exithook.Gosched = Gosched
326 exithook.Goid = func() uint64 { return getg().goid }
327 exithook.Throw = throw
328 }
329
330 func runExitHooks(code int) {
331 exithook.Run(code)
332 }
333
334
335 func init() {
336 go forcegchelper()
337 }
338
339 func forcegchelper() {
340 forcegc.g = getg()
341 lockInit(&forcegc.lock, lockRankForcegc)
342 for {
343 lock(&forcegc.lock)
344 if forcegc.idle.Load() {
345 throw("forcegc: phase error")
346 }
347 forcegc.idle.Store(true)
348 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
349
350 if debug.gctrace > 0 {
351 println("GC forced")
352 }
353
354 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
355 }
356 }
357
358
359
360
361
362 func Gosched() {
363 checkTimeouts()
364 mcall(gosched_m)
365 }
366
367
368
369
370
371 func goschedguarded() {
372 mcall(goschedguarded_m)
373 }
374
375
376
377
378
379
380 func goschedIfBusy() {
381 gp := getg()
382
383
384 if !gp.preempt && sched.npidle.Load() > 0 {
385 return
386 }
387 mcall(gosched_m)
388 }
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
419 if reason != waitReasonSleep {
420 checkTimeouts()
421 }
422 mp := acquirem()
423 gp := mp.curg
424 status := readgstatus(gp)
425 if status != _Grunning && status != _Gscanrunning {
426 throw("gopark: bad g status")
427 }
428 mp.waitlock = lock
429 mp.waitunlockf = unlockf
430 gp.waitreason = reason
431 mp.waitTraceBlockReason = traceReason
432 mp.waitTraceSkip = traceskip
433 releasem(mp)
434
435 mcall(park_m)
436 }
437
438
439
440 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
441 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
442 }
443
444
445
446
447
448
449
450
451
452
453
454 func goready(gp *g, traceskip int) {
455 systemstack(func() {
456 ready(gp, traceskip, true)
457 })
458 }
459
460
461 func acquireSudog() *sudog {
462
463
464
465
466
467
468
469
470 mp := acquirem()
471 pp := mp.p.ptr()
472 if len(pp.sudogcache) == 0 {
473 lock(&sched.sudoglock)
474
475 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
476 s := sched.sudogcache
477 sched.sudogcache = s.next
478 s.next = nil
479 pp.sudogcache = append(pp.sudogcache, s)
480 }
481 unlock(&sched.sudoglock)
482
483 if len(pp.sudogcache) == 0 {
484 pp.sudogcache = append(pp.sudogcache, new(sudog))
485 }
486 }
487 n := len(pp.sudogcache)
488 s := pp.sudogcache[n-1]
489 pp.sudogcache[n-1] = nil
490 pp.sudogcache = pp.sudogcache[:n-1]
491 if s.elem != nil {
492 throw("acquireSudog: found s.elem != nil in cache")
493 }
494 releasem(mp)
495 return s
496 }
497
498
499 func releaseSudog(s *sudog) {
500 if s.elem != nil {
501 throw("runtime: sudog with non-nil elem")
502 }
503 if s.isSelect {
504 throw("runtime: sudog with non-false isSelect")
505 }
506 if s.next != nil {
507 throw("runtime: sudog with non-nil next")
508 }
509 if s.prev != nil {
510 throw("runtime: sudog with non-nil prev")
511 }
512 if s.waitlink != nil {
513 throw("runtime: sudog with non-nil waitlink")
514 }
515 if s.c != nil {
516 throw("runtime: sudog with non-nil c")
517 }
518 gp := getg()
519 if gp.param != nil {
520 throw("runtime: releaseSudog with non-nil gp.param")
521 }
522 mp := acquirem()
523 pp := mp.p.ptr()
524 if len(pp.sudogcache) == cap(pp.sudogcache) {
525
526 var first, last *sudog
527 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
528 n := len(pp.sudogcache)
529 p := pp.sudogcache[n-1]
530 pp.sudogcache[n-1] = nil
531 pp.sudogcache = pp.sudogcache[:n-1]
532 if first == nil {
533 first = p
534 } else {
535 last.next = p
536 }
537 last = p
538 }
539 lock(&sched.sudoglock)
540 last.next = sched.sudogcache
541 sched.sudogcache = first
542 unlock(&sched.sudoglock)
543 }
544 pp.sudogcache = append(pp.sudogcache, s)
545 releasem(mp)
546 }
547
548
549 func badmcall(fn func(*g)) {
550 throw("runtime: mcall called on m->g0 stack")
551 }
552
553 func badmcall2(fn func(*g)) {
554 throw("runtime: mcall function returned")
555 }
556
557 func badreflectcall() {
558 panic(plainError("arg size to reflect.call more than 1GB"))
559 }
560
561
562
563 func badmorestackg0() {
564 if !crashStackImplemented {
565 writeErrStr("fatal: morestack on g0\n")
566 return
567 }
568
569 g := getg()
570 switchToCrashStack(func() {
571 print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
572 g.m.traceback = 2
573 traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
574 print("\n")
575
576 throw("morestack on g0")
577 })
578 }
579
580
581
582 func badmorestackgsignal() {
583 writeErrStr("fatal: morestack on gsignal\n")
584 }
585
586
587 func badctxt() {
588 throw("ctxt != 0")
589 }
590
591
592
593 var gcrash g
594
595 var crashingG atomic.Pointer[g]
596
597
598
599
600
601
602
603
604
605 func switchToCrashStack(fn func()) {
606 me := getg()
607 if crashingG.CompareAndSwapNoWB(nil, me) {
608 switchToCrashStack0(fn)
609 abort()
610 }
611 if crashingG.Load() == me {
612
613 writeErrStr("fatal: recursive switchToCrashStack\n")
614 abort()
615 }
616
617 usleep_no_g(100)
618 writeErrStr("fatal: concurrent switchToCrashStack\n")
619 abort()
620 }
621
622
623
624
625 const crashStackImplemented = GOOS != "windows"
626
627
628 func switchToCrashStack0(fn func())
629
630 func lockedOSThread() bool {
631 gp := getg()
632 return gp.lockedm != 0 && gp.m.lockedg != 0
633 }
634
635 var (
636
637
638
639
640
641
642 allglock mutex
643 allgs []*g
644
645
646
647
648
649
650
651
652
653
654
655
656
657 allglen uintptr
658 allgptr **g
659 )
660
661 func allgadd(gp *g) {
662 if readgstatus(gp) == _Gidle {
663 throw("allgadd: bad status Gidle")
664 }
665
666 lock(&allglock)
667 allgs = append(allgs, gp)
668 if &allgs[0] != allgptr {
669 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
670 }
671 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
672 unlock(&allglock)
673 }
674
675
676
677
678 func allGsSnapshot() []*g {
679 assertWorldStoppedOrLockHeld(&allglock)
680
681
682
683
684
685
686 return allgs[:len(allgs):len(allgs)]
687 }
688
689
690 func atomicAllG() (**g, uintptr) {
691 length := atomic.Loaduintptr(&allglen)
692 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
693 return ptr, length
694 }
695
696
697 func atomicAllGIndex(ptr **g, i uintptr) *g {
698 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
699 }
700
701
702
703
704 func forEachG(fn func(gp *g)) {
705 lock(&allglock)
706 for _, gp := range allgs {
707 fn(gp)
708 }
709 unlock(&allglock)
710 }
711
712
713
714
715
716 func forEachGRace(fn func(gp *g)) {
717 ptr, length := atomicAllG()
718 for i := uintptr(0); i < length; i++ {
719 gp := atomicAllGIndex(ptr, i)
720 fn(gp)
721 }
722 return
723 }
724
725 const (
726
727
728 _GoidCacheBatch = 16
729 )
730
731
732
733 func cpuinit(env string) {
734 switch GOOS {
735 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
736 cpu.DebugOptions = true
737 }
738 cpu.Initialize(env)
739
740
741
742 switch GOARCH {
743 case "386", "amd64":
744 x86HasPOPCNT = cpu.X86.HasPOPCNT
745 x86HasSSE41 = cpu.X86.HasSSE41
746 x86HasFMA = cpu.X86.HasFMA
747
748 case "arm":
749 armHasVFPv4 = cpu.ARM.HasVFPv4
750
751 case "arm64":
752 arm64HasATOMICS = cpu.ARM64.HasATOMICS
753
754 case "loong64":
755 loong64HasLAMCAS = cpu.Loong64.HasLAMCAS
756 loong64HasLAM_BH = cpu.Loong64.HasLAM_BH
757 loong64HasLSX = cpu.Loong64.HasLSX
758 }
759 }
760
761
762
763
764 func getGodebugEarly() string {
765 const prefix = "GODEBUG="
766 var env string
767 switch GOOS {
768 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
769
770
771
772 n := int32(0)
773 for argv_index(argv, argc+1+n) != nil {
774 n++
775 }
776
777 for i := int32(0); i < n; i++ {
778 p := argv_index(argv, argc+1+i)
779 s := unsafe.String(p, findnull(p))
780
781 if stringslite.HasPrefix(s, prefix) {
782 env = gostring(p)[len(prefix):]
783 break
784 }
785 }
786 }
787 return env
788 }
789
790
791
792
793
794
795
796
797
798 func schedinit() {
799 lockInit(&sched.lock, lockRankSched)
800 lockInit(&sched.sysmonlock, lockRankSysmon)
801 lockInit(&sched.deferlock, lockRankDefer)
802 lockInit(&sched.sudoglock, lockRankSudog)
803 lockInit(&deadlock, lockRankDeadlock)
804 lockInit(&paniclk, lockRankPanic)
805 lockInit(&allglock, lockRankAllg)
806 lockInit(&allpLock, lockRankAllp)
807 lockInit(&reflectOffs.lock, lockRankReflectOffs)
808 lockInit(&finlock, lockRankFin)
809 lockInit(&cpuprof.lock, lockRankCpuprof)
810 allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
811 execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
812 traceLockInit()
813
814
815
816 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
817
818 lockVerifyMSize()
819
820
821
822 gp := getg()
823 if raceenabled {
824 gp.racectx, raceprocctx0 = raceinit()
825 }
826
827 sched.maxmcount = 10000
828 crashFD.Store(^uintptr(0))
829
830
831 worldStopped()
832
833 ticks.init()
834 moduledataverify()
835 stackinit()
836 mallocinit()
837 godebug := getGodebugEarly()
838 cpuinit(godebug)
839 randinit()
840 alginit()
841 mcommoninit(gp.m, -1)
842 modulesinit()
843 typelinksinit()
844 itabsinit()
845 stkobjinit()
846
847 sigsave(&gp.m.sigmask)
848 initSigmask = gp.m.sigmask
849
850 goargs()
851 goenvs()
852 secure()
853 checkfds()
854 parsedebugvars()
855 gcinit()
856
857
858
859 gcrash.stack = stackalloc(16384)
860 gcrash.stackguard0 = gcrash.stack.lo + 1000
861 gcrash.stackguard1 = gcrash.stack.lo + 1000
862
863
864
865
866
867 if disableMemoryProfiling {
868 MemProfileRate = 0
869 }
870
871
872 mProfStackInit(gp.m)
873
874 lock(&sched.lock)
875 sched.lastpoll.Store(nanotime())
876 procs := ncpu
877 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
878 procs = n
879 }
880 if procresize(procs) != nil {
881 throw("unknown runnable goroutine during bootstrap")
882 }
883 unlock(&sched.lock)
884
885
886 worldStarted()
887
888 if buildVersion == "" {
889
890
891 buildVersion = "unknown"
892 }
893 if len(modinfo) == 1 {
894
895
896 modinfo = ""
897 }
898 }
899
900 func dumpgstatus(gp *g) {
901 thisg := getg()
902 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
903 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
904 }
905
906
907 func checkmcount() {
908 assertLockHeld(&sched.lock)
909
910
911
912
913
914
915
916
917
918 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
919 if count > sched.maxmcount {
920 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
921 throw("thread exhaustion")
922 }
923 }
924
925
926
927
928
929 func mReserveID() int64 {
930 assertLockHeld(&sched.lock)
931
932 if sched.mnext+1 < sched.mnext {
933 throw("runtime: thread ID overflow")
934 }
935 id := sched.mnext
936 sched.mnext++
937 checkmcount()
938 return id
939 }
940
941
942 func mcommoninit(mp *m, id int64) {
943 gp := getg()
944
945
946 if gp != gp.m.g0 {
947 callers(1, mp.createstack[:])
948 }
949
950 lock(&sched.lock)
951
952 if id >= 0 {
953 mp.id = id
954 } else {
955 mp.id = mReserveID()
956 }
957
958 mrandinit(mp)
959
960 mpreinit(mp)
961 if mp.gsignal != nil {
962 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
963 }
964
965
966
967 mp.alllink = allm
968
969
970
971 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
972 unlock(&sched.lock)
973
974
975 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
976 mp.cgoCallers = new(cgoCallers)
977 }
978 mProfStackInit(mp)
979 }
980
981
982
983
984
985 func mProfStackInit(mp *m) {
986 if debug.profstackdepth == 0 {
987
988
989 return
990 }
991 mp.profStack = makeProfStackFP()
992 mp.mLockProfile.stack = makeProfStackFP()
993 }
994
995
996
997
998 func makeProfStackFP() []uintptr {
999
1000
1001
1002
1003
1004
1005 return make([]uintptr, 1+maxSkip+debug.profstackdepth)
1006 }
1007
1008
1009
1010 func makeProfStack() []uintptr { return make([]uintptr, debug.profstackdepth) }
1011
1012
1013 func pprof_makeProfStack() []uintptr { return makeProfStack() }
1014
1015 func (mp *m) becomeSpinning() {
1016 mp.spinning = true
1017 sched.nmspinning.Add(1)
1018 sched.needspinning.Store(0)
1019 }
1020
1021 func (mp *m) hasCgoOnStack() bool {
1022 return mp.ncgo > 0 || mp.isextra
1023 }
1024
1025 const (
1026
1027
1028 osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
1029
1030
1031
1032 osHasLowResClockInt = goos.IsWindows
1033
1034
1035
1036 osHasLowResClock = osHasLowResClockInt > 0
1037 )
1038
1039
1040 func ready(gp *g, traceskip int, next bool) {
1041 status := readgstatus(gp)
1042
1043
1044 mp := acquirem()
1045 if status&^_Gscan != _Gwaiting {
1046 dumpgstatus(gp)
1047 throw("bad g->status in ready")
1048 }
1049
1050
1051 trace := traceAcquire()
1052 casgstatus(gp, _Gwaiting, _Grunnable)
1053 if trace.ok() {
1054 trace.GoUnpark(gp, traceskip)
1055 traceRelease(trace)
1056 }
1057 runqput(mp.p.ptr(), gp, next)
1058 wakep()
1059 releasem(mp)
1060 }
1061
1062
1063
1064 const freezeStopWait = 0x7fffffff
1065
1066
1067
1068 var freezing atomic.Bool
1069
1070
1071
1072
1073 func freezetheworld() {
1074 freezing.Store(true)
1075 if debug.dontfreezetheworld > 0 {
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100 usleep(1000)
1101 return
1102 }
1103
1104
1105
1106
1107 for i := 0; i < 5; i++ {
1108
1109 sched.stopwait = freezeStopWait
1110 sched.gcwaiting.Store(true)
1111
1112 if !preemptall() {
1113 break
1114 }
1115 usleep(1000)
1116 }
1117
1118 usleep(1000)
1119 preemptall()
1120 usleep(1000)
1121 }
1122
1123
1124
1125
1126
1127 func readgstatus(gp *g) uint32 {
1128 return gp.atomicstatus.Load()
1129 }
1130
1131
1132
1133
1134
1135 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1136 success := false
1137
1138
1139 switch oldval {
1140 default:
1141 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1142 dumpgstatus(gp)
1143 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1144 case _Gscanrunnable,
1145 _Gscanwaiting,
1146 _Gscanrunning,
1147 _Gscansyscall,
1148 _Gscanpreempted:
1149 if newval == oldval&^_Gscan {
1150 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1151 }
1152 }
1153 if !success {
1154 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1155 dumpgstatus(gp)
1156 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1157 }
1158 releaseLockRankAndM(lockRankGscan)
1159 }
1160
1161
1162
1163 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1164 switch oldval {
1165 case _Grunnable,
1166 _Grunning,
1167 _Gwaiting,
1168 _Gsyscall:
1169 if newval == oldval|_Gscan {
1170 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1171 if r {
1172 acquireLockRankAndM(lockRankGscan)
1173 }
1174 return r
1175
1176 }
1177 }
1178 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1179 throw("castogscanstatus")
1180 panic("not reached")
1181 }
1182
1183
1184
1185 var casgstatusAlwaysTrack = false
1186
1187
1188
1189
1190
1191
1192
1193 func casgstatus(gp *g, oldval, newval uint32) {
1194 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1195 systemstack(func() {
1196
1197
1198 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1199 throw("casgstatus: bad incoming values")
1200 })
1201 }
1202
1203 lockWithRankMayAcquire(nil, lockRankGscan)
1204
1205
1206 const yieldDelay = 5 * 1000
1207 var nextYield int64
1208
1209
1210
1211 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1212 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1213 systemstack(func() {
1214
1215
1216 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1217 })
1218 }
1219 if i == 0 {
1220 nextYield = nanotime() + yieldDelay
1221 }
1222 if nanotime() < nextYield {
1223 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1224 procyield(1)
1225 }
1226 } else {
1227 osyield()
1228 nextYield = nanotime() + yieldDelay/2
1229 }
1230 }
1231
1232 if gp.syncGroup != nil {
1233 systemstack(func() {
1234 gp.syncGroup.changegstatus(gp, oldval, newval)
1235 })
1236 }
1237
1238 if oldval == _Grunning {
1239
1240 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1241 gp.tracking = true
1242 }
1243 gp.trackingSeq++
1244 }
1245 if !gp.tracking {
1246 return
1247 }
1248
1249
1250
1251
1252
1253
1254 switch oldval {
1255 case _Grunnable:
1256
1257
1258
1259 now := nanotime()
1260 gp.runnableTime += now - gp.trackingStamp
1261 gp.trackingStamp = 0
1262 case _Gwaiting:
1263 if !gp.waitreason.isMutexWait() {
1264
1265 break
1266 }
1267
1268
1269
1270
1271
1272 now := nanotime()
1273 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1274 gp.trackingStamp = 0
1275 }
1276 switch newval {
1277 case _Gwaiting:
1278 if !gp.waitreason.isMutexWait() {
1279
1280 break
1281 }
1282
1283 now := nanotime()
1284 gp.trackingStamp = now
1285 case _Grunnable:
1286
1287
1288 now := nanotime()
1289 gp.trackingStamp = now
1290 case _Grunning:
1291
1292
1293
1294 gp.tracking = false
1295 sched.timeToRun.record(gp.runnableTime)
1296 gp.runnableTime = 0
1297 }
1298 }
1299
1300
1301
1302
1303 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1304
1305 gp.waitreason = reason
1306 casgstatus(gp, old, _Gwaiting)
1307 }
1308
1309
1310
1311
1312
1313 func casGToWaitingForSuspendG(gp *g, old uint32, reason waitReason) {
1314 if !reason.isWaitingForSuspendG() {
1315 throw("casGToWaitingForSuspendG with non-isWaitingForSuspendG wait reason")
1316 }
1317 casGToWaiting(gp, old, reason)
1318 }
1319
1320
1321
1322
1323
1324 func casGToPreemptScan(gp *g, old, new uint32) {
1325 if old != _Grunning || new != _Gscan|_Gpreempted {
1326 throw("bad g transition")
1327 }
1328 acquireLockRankAndM(lockRankGscan)
1329 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1330 }
1331
1332
1333
1334
1335
1336
1337 }
1338
1339
1340
1341
1342 func casGFromPreempted(gp *g, old, new uint32) bool {
1343 if old != _Gpreempted || new != _Gwaiting {
1344 throw("bad g transition")
1345 }
1346 gp.waitreason = waitReasonPreempted
1347 if !gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting) {
1348 return false
1349 }
1350 if sg := gp.syncGroup; sg != nil {
1351 sg.changegstatus(gp, _Gpreempted, _Gwaiting)
1352 }
1353 return true
1354 }
1355
1356
1357 type stwReason uint8
1358
1359
1360
1361
1362 const (
1363 stwUnknown stwReason = iota
1364 stwGCMarkTerm
1365 stwGCSweepTerm
1366 stwWriteHeapDump
1367 stwGoroutineProfile
1368 stwGoroutineProfileCleanup
1369 stwAllGoroutinesStack
1370 stwReadMemStats
1371 stwAllThreadsSyscall
1372 stwGOMAXPROCS
1373 stwStartTrace
1374 stwStopTrace
1375 stwForTestCountPagesInUse
1376 stwForTestReadMetricsSlow
1377 stwForTestReadMemStatsSlow
1378 stwForTestPageCachePagesLeaked
1379 stwForTestResetDebugLog
1380 )
1381
1382 func (r stwReason) String() string {
1383 return stwReasonStrings[r]
1384 }
1385
1386 func (r stwReason) isGC() bool {
1387 return r == stwGCMarkTerm || r == stwGCSweepTerm
1388 }
1389
1390
1391
1392
1393 var stwReasonStrings = [...]string{
1394 stwUnknown: "unknown",
1395 stwGCMarkTerm: "GC mark termination",
1396 stwGCSweepTerm: "GC sweep termination",
1397 stwWriteHeapDump: "write heap dump",
1398 stwGoroutineProfile: "goroutine profile",
1399 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1400 stwAllGoroutinesStack: "all goroutines stack trace",
1401 stwReadMemStats: "read mem stats",
1402 stwAllThreadsSyscall: "AllThreadsSyscall",
1403 stwGOMAXPROCS: "GOMAXPROCS",
1404 stwStartTrace: "start trace",
1405 stwStopTrace: "stop trace",
1406 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1407 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1408 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1409 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1410 stwForTestResetDebugLog: "ResetDebugLog (test)",
1411 }
1412
1413
1414
1415 type worldStop struct {
1416 reason stwReason
1417 startedStopping int64
1418 finishedStopping int64
1419 stoppingCPUTime int64
1420 }
1421
1422
1423
1424
1425 var stopTheWorldContext worldStop
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444 func stopTheWorld(reason stwReason) worldStop {
1445 semacquire(&worldsema)
1446 gp := getg()
1447 gp.m.preemptoff = reason.String()
1448 systemstack(func() {
1449 stopTheWorldContext = stopTheWorldWithSema(reason)
1450 })
1451 return stopTheWorldContext
1452 }
1453
1454
1455
1456
1457 func startTheWorld(w worldStop) {
1458 systemstack(func() { startTheWorldWithSema(0, w) })
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475 mp := acquirem()
1476 mp.preemptoff = ""
1477 semrelease1(&worldsema, true, 0)
1478 releasem(mp)
1479 }
1480
1481
1482
1483
1484 func stopTheWorldGC(reason stwReason) worldStop {
1485 semacquire(&gcsema)
1486 return stopTheWorld(reason)
1487 }
1488
1489
1490
1491
1492 func startTheWorldGC(w worldStop) {
1493 startTheWorld(w)
1494 semrelease(&gcsema)
1495 }
1496
1497
1498 var worldsema uint32 = 1
1499
1500
1501
1502
1503
1504
1505
1506 var gcsema uint32 = 1
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540 func stopTheWorldWithSema(reason stwReason) worldStop {
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560 casGToWaitingForSuspendG(getg().m.curg, _Grunning, waitReasonStoppingTheWorld)
1561
1562 trace := traceAcquire()
1563 if trace.ok() {
1564 trace.STWStart(reason)
1565 traceRelease(trace)
1566 }
1567 gp := getg()
1568
1569
1570
1571 if gp.m.locks > 0 {
1572 throw("stopTheWorld: holding locks")
1573 }
1574
1575 lock(&sched.lock)
1576 start := nanotime()
1577 sched.stopwait = gomaxprocs
1578 sched.gcwaiting.Store(true)
1579 preemptall()
1580
1581 gp.m.p.ptr().status = _Pgcstop
1582 gp.m.p.ptr().gcStopTime = start
1583 sched.stopwait--
1584
1585 trace = traceAcquire()
1586 for _, pp := range allp {
1587 s := pp.status
1588 if s == _Psyscall && atomic.Cas(&pp.status, s, _Pgcstop) {
1589 if trace.ok() {
1590 trace.ProcSteal(pp, false)
1591 }
1592 pp.syscalltick++
1593 pp.gcStopTime = nanotime()
1594 sched.stopwait--
1595 }
1596 }
1597 if trace.ok() {
1598 traceRelease(trace)
1599 }
1600
1601
1602 now := nanotime()
1603 for {
1604 pp, _ := pidleget(now)
1605 if pp == nil {
1606 break
1607 }
1608 pp.status = _Pgcstop
1609 pp.gcStopTime = nanotime()
1610 sched.stopwait--
1611 }
1612 wait := sched.stopwait > 0
1613 unlock(&sched.lock)
1614
1615
1616 if wait {
1617 for {
1618
1619 if notetsleep(&sched.stopnote, 100*1000) {
1620 noteclear(&sched.stopnote)
1621 break
1622 }
1623 preemptall()
1624 }
1625 }
1626
1627 finish := nanotime()
1628 startTime := finish - start
1629 if reason.isGC() {
1630 sched.stwStoppingTimeGC.record(startTime)
1631 } else {
1632 sched.stwStoppingTimeOther.record(startTime)
1633 }
1634
1635
1636
1637
1638
1639 stoppingCPUTime := int64(0)
1640 bad := ""
1641 if sched.stopwait != 0 {
1642 bad = "stopTheWorld: not stopped (stopwait != 0)"
1643 } else {
1644 for _, pp := range allp {
1645 if pp.status != _Pgcstop {
1646 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1647 }
1648 if pp.gcStopTime == 0 && bad == "" {
1649 bad = "stopTheWorld: broken CPU time accounting"
1650 }
1651 stoppingCPUTime += finish - pp.gcStopTime
1652 pp.gcStopTime = 0
1653 }
1654 }
1655 if freezing.Load() {
1656
1657
1658
1659
1660 lock(&deadlock)
1661 lock(&deadlock)
1662 }
1663 if bad != "" {
1664 throw(bad)
1665 }
1666
1667 worldStopped()
1668
1669
1670 casgstatus(getg().m.curg, _Gwaiting, _Grunning)
1671
1672 return worldStop{
1673 reason: reason,
1674 startedStopping: start,
1675 finishedStopping: finish,
1676 stoppingCPUTime: stoppingCPUTime,
1677 }
1678 }
1679
1680
1681
1682
1683
1684
1685
1686 func startTheWorldWithSema(now int64, w worldStop) int64 {
1687 assertWorldStopped()
1688
1689 mp := acquirem()
1690 if netpollinited() {
1691 list, delta := netpoll(0)
1692 injectglist(&list)
1693 netpollAdjustWaiters(delta)
1694 }
1695 lock(&sched.lock)
1696
1697 procs := gomaxprocs
1698 if newprocs != 0 {
1699 procs = newprocs
1700 newprocs = 0
1701 }
1702 p1 := procresize(procs)
1703 sched.gcwaiting.Store(false)
1704 if sched.sysmonwait.Load() {
1705 sched.sysmonwait.Store(false)
1706 notewakeup(&sched.sysmonnote)
1707 }
1708 unlock(&sched.lock)
1709
1710 worldStarted()
1711
1712 for p1 != nil {
1713 p := p1
1714 p1 = p1.link.ptr()
1715 if p.m != 0 {
1716 mp := p.m.ptr()
1717 p.m = 0
1718 if mp.nextp != 0 {
1719 throw("startTheWorld: inconsistent mp->nextp")
1720 }
1721 mp.nextp.set(p)
1722 notewakeup(&mp.park)
1723 } else {
1724
1725 newm(nil, p, -1)
1726 }
1727 }
1728
1729
1730 if now == 0 {
1731 now = nanotime()
1732 }
1733 totalTime := now - w.startedStopping
1734 if w.reason.isGC() {
1735 sched.stwTotalTimeGC.record(totalTime)
1736 } else {
1737 sched.stwTotalTimeOther.record(totalTime)
1738 }
1739 trace := traceAcquire()
1740 if trace.ok() {
1741 trace.STWDone()
1742 traceRelease(trace)
1743 }
1744
1745
1746
1747
1748 wakep()
1749
1750 releasem(mp)
1751
1752 return now
1753 }
1754
1755
1756
1757 func usesLibcall() bool {
1758 switch GOOS {
1759 case "aix", "darwin", "illumos", "ios", "solaris", "windows":
1760 return true
1761 case "openbsd":
1762 return GOARCH != "mips64"
1763 }
1764 return false
1765 }
1766
1767
1768
1769 func mStackIsSystemAllocated() bool {
1770 switch GOOS {
1771 case "aix", "darwin", "plan9", "illumos", "ios", "solaris", "windows":
1772 return true
1773 case "openbsd":
1774 return GOARCH != "mips64"
1775 }
1776 return false
1777 }
1778
1779
1780
1781 func mstart()
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792 func mstart0() {
1793 gp := getg()
1794
1795 osStack := gp.stack.lo == 0
1796 if osStack {
1797
1798
1799
1800
1801
1802
1803
1804
1805 size := gp.stack.hi
1806 if size == 0 {
1807 size = 16384 * sys.StackGuardMultiplier
1808 }
1809 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1810 gp.stack.lo = gp.stack.hi - size + 1024
1811 }
1812
1813
1814 gp.stackguard0 = gp.stack.lo + stackGuard
1815
1816
1817 gp.stackguard1 = gp.stackguard0
1818 mstart1()
1819
1820
1821 if mStackIsSystemAllocated() {
1822
1823
1824
1825 osStack = true
1826 }
1827 mexit(osStack)
1828 }
1829
1830
1831
1832
1833
1834 func mstart1() {
1835 gp := getg()
1836
1837 if gp != gp.m.g0 {
1838 throw("bad runtime·mstart")
1839 }
1840
1841
1842
1843
1844
1845
1846
1847 gp.sched.g = guintptr(unsafe.Pointer(gp))
1848 gp.sched.pc = sys.GetCallerPC()
1849 gp.sched.sp = sys.GetCallerSP()
1850
1851 asminit()
1852 minit()
1853
1854
1855
1856 if gp.m == &m0 {
1857 mstartm0()
1858 }
1859
1860 if debug.dataindependenttiming == 1 {
1861 sys.EnableDIT()
1862 }
1863
1864 if fn := gp.m.mstartfn; fn != nil {
1865 fn()
1866 }
1867
1868 if gp.m != &m0 {
1869 acquirep(gp.m.nextp.ptr())
1870 gp.m.nextp = 0
1871 }
1872 schedule()
1873 }
1874
1875
1876
1877
1878
1879
1880
1881 func mstartm0() {
1882
1883
1884
1885 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1886 cgoHasExtraM = true
1887 newextram()
1888 }
1889 initsig(false)
1890 }
1891
1892
1893
1894
1895 func mPark() {
1896 gp := getg()
1897 notesleep(&gp.m.park)
1898 noteclear(&gp.m.park)
1899 }
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911 func mexit(osStack bool) {
1912 mp := getg().m
1913
1914 if mp == &m0 {
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926 handoffp(releasep())
1927 lock(&sched.lock)
1928 sched.nmfreed++
1929 checkdead()
1930 unlock(&sched.lock)
1931 mPark()
1932 throw("locked m0 woke up")
1933 }
1934
1935 sigblock(true)
1936 unminit()
1937
1938
1939 if mp.gsignal != nil {
1940 stackfree(mp.gsignal.stack)
1941
1942
1943
1944
1945 mp.gsignal = nil
1946 }
1947
1948
1949 vgetrandomDestroy(mp)
1950
1951
1952 lock(&sched.lock)
1953 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
1954 if *pprev == mp {
1955 *pprev = mp.alllink
1956 goto found
1957 }
1958 }
1959 throw("m not found in allm")
1960 found:
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975 mp.freeWait.Store(freeMWait)
1976 mp.freelink = sched.freem
1977 sched.freem = mp
1978 unlock(&sched.lock)
1979
1980 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
1981 sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
1982
1983
1984 handoffp(releasep())
1985
1986
1987
1988
1989
1990 lock(&sched.lock)
1991 sched.nmfreed++
1992 checkdead()
1993 unlock(&sched.lock)
1994
1995 if GOOS == "darwin" || GOOS == "ios" {
1996
1997
1998 if mp.signalPending.Load() != 0 {
1999 pendingPreemptSignals.Add(-1)
2000 }
2001 }
2002
2003
2004
2005 mdestroy(mp)
2006
2007 if osStack {
2008
2009 mp.freeWait.Store(freeMRef)
2010
2011
2012
2013 return
2014 }
2015
2016
2017
2018
2019
2020 exitThread(&mp.freeWait)
2021 }
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033 func forEachP(reason waitReason, fn func(*p)) {
2034 systemstack(func() {
2035 gp := getg().m.curg
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052 casGToWaitingForSuspendG(gp, _Grunning, reason)
2053 forEachPInternal(fn)
2054 casgstatus(gp, _Gwaiting, _Grunning)
2055 })
2056 }
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067 func forEachPInternal(fn func(*p)) {
2068 mp := acquirem()
2069 pp := getg().m.p.ptr()
2070
2071 lock(&sched.lock)
2072 if sched.safePointWait != 0 {
2073 throw("forEachP: sched.safePointWait != 0")
2074 }
2075 sched.safePointWait = gomaxprocs - 1
2076 sched.safePointFn = fn
2077
2078
2079 for _, p2 := range allp {
2080 if p2 != pp {
2081 atomic.Store(&p2.runSafePointFn, 1)
2082 }
2083 }
2084 preemptall()
2085
2086
2087
2088
2089
2090
2091
2092 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
2093 if atomic.Cas(&p.runSafePointFn, 1, 0) {
2094 fn(p)
2095 sched.safePointWait--
2096 }
2097 }
2098
2099 wait := sched.safePointWait > 0
2100 unlock(&sched.lock)
2101
2102
2103 fn(pp)
2104
2105
2106
2107 for _, p2 := range allp {
2108 s := p2.status
2109
2110
2111
2112 trace := traceAcquire()
2113 if s == _Psyscall && p2.runSafePointFn == 1 && atomic.Cas(&p2.status, s, _Pidle) {
2114 if trace.ok() {
2115
2116 trace.ProcSteal(p2, false)
2117 traceRelease(trace)
2118 }
2119 p2.syscalltick++
2120 handoffp(p2)
2121 } else if trace.ok() {
2122 traceRelease(trace)
2123 }
2124 }
2125
2126
2127 if wait {
2128 for {
2129
2130
2131
2132
2133 if notetsleep(&sched.safePointNote, 100*1000) {
2134 noteclear(&sched.safePointNote)
2135 break
2136 }
2137 preemptall()
2138 }
2139 }
2140 if sched.safePointWait != 0 {
2141 throw("forEachP: not done")
2142 }
2143 for _, p2 := range allp {
2144 if p2.runSafePointFn != 0 {
2145 throw("forEachP: P did not run fn")
2146 }
2147 }
2148
2149 lock(&sched.lock)
2150 sched.safePointFn = nil
2151 unlock(&sched.lock)
2152 releasem(mp)
2153 }
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166 func runSafePointFn() {
2167 p := getg().m.p.ptr()
2168
2169
2170
2171 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
2172 return
2173 }
2174 sched.safePointFn(p)
2175 lock(&sched.lock)
2176 sched.safePointWait--
2177 if sched.safePointWait == 0 {
2178 notewakeup(&sched.safePointNote)
2179 }
2180 unlock(&sched.lock)
2181 }
2182
2183
2184
2185
2186 var cgoThreadStart unsafe.Pointer
2187
2188 type cgothreadstart struct {
2189 g guintptr
2190 tls *uint64
2191 fn unsafe.Pointer
2192 }
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203 func allocm(pp *p, fn func(), id int64) *m {
2204 allocmLock.rlock()
2205
2206
2207
2208
2209 acquirem()
2210
2211 gp := getg()
2212 if gp.m.p == 0 {
2213 acquirep(pp)
2214 }
2215
2216
2217
2218 if sched.freem != nil {
2219 lock(&sched.lock)
2220 var newList *m
2221 for freem := sched.freem; freem != nil; {
2222
2223 wait := freem.freeWait.Load()
2224 if wait == freeMWait {
2225 next := freem.freelink
2226 freem.freelink = newList
2227 newList = freem
2228 freem = next
2229 continue
2230 }
2231
2232
2233
2234 if traceEnabled() || traceShuttingDown() {
2235 traceThreadDestroy(freem)
2236 }
2237
2238
2239
2240 if wait == freeMStack {
2241
2242
2243
2244 systemstack(func() {
2245 stackfree(freem.g0.stack)
2246 })
2247 }
2248 freem = freem.freelink
2249 }
2250 sched.freem = newList
2251 unlock(&sched.lock)
2252 }
2253
2254 mp := new(m)
2255 mp.mstartfn = fn
2256 mcommoninit(mp, id)
2257
2258
2259
2260 if iscgo || mStackIsSystemAllocated() {
2261 mp.g0 = malg(-1)
2262 } else {
2263 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
2264 }
2265 mp.g0.m = mp
2266
2267 if pp == gp.m.p.ptr() {
2268 releasep()
2269 }
2270
2271 releasem(gp.m)
2272 allocmLock.runlock()
2273 return mp
2274 }
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315 func needm(signal bool) {
2316 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
2317
2318
2319
2320
2321
2322
2323 writeErrStr("fatal error: cgo callback before cgo call\n")
2324 exit(1)
2325 }
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335 var sigmask sigset
2336 sigsave(&sigmask)
2337 sigblock(false)
2338
2339
2340
2341
2342 mp, last := getExtraM()
2343
2344
2345
2346
2347
2348
2349
2350
2351 mp.needextram = last
2352
2353
2354 mp.sigmask = sigmask
2355
2356
2357
2358 osSetupTLS(mp)
2359
2360
2361
2362 setg(mp.g0)
2363 sp := sys.GetCallerSP()
2364 callbackUpdateSystemStack(mp, sp, signal)
2365
2366
2367
2368
2369 mp.isExtraInC = false
2370
2371
2372 asminit()
2373 minit()
2374
2375
2376
2377
2378
2379
2380 var trace traceLocker
2381 if !signal {
2382 trace = traceAcquire()
2383 }
2384
2385
2386 casgstatus(mp.curg, _Gdead, _Gsyscall)
2387 sched.ngsys.Add(-1)
2388
2389 if !signal {
2390 if trace.ok() {
2391 trace.GoCreateSyscall(mp.curg)
2392 traceRelease(trace)
2393 }
2394 }
2395 mp.isExtraInSig = signal
2396 }
2397
2398
2399
2400
2401 func needAndBindM() {
2402 needm(false)
2403
2404 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2405 cgoBindM()
2406 }
2407 }
2408
2409
2410
2411
2412 func newextram() {
2413 c := extraMWaiters.Swap(0)
2414 if c > 0 {
2415 for i := uint32(0); i < c; i++ {
2416 oneNewExtraM()
2417 }
2418 } else if extraMLength.Load() == 0 {
2419
2420 oneNewExtraM()
2421 }
2422 }
2423
2424
2425 func oneNewExtraM() {
2426
2427
2428
2429
2430
2431 mp := allocm(nil, nil, -1)
2432 gp := malg(4096)
2433 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2434 gp.sched.sp = gp.stack.hi
2435 gp.sched.sp -= 4 * goarch.PtrSize
2436 gp.sched.lr = 0
2437 gp.sched.g = guintptr(unsafe.Pointer(gp))
2438 gp.syscallpc = gp.sched.pc
2439 gp.syscallsp = gp.sched.sp
2440 gp.stktopsp = gp.sched.sp
2441
2442
2443
2444
2445 casgstatus(gp, _Gidle, _Gdead)
2446 gp.m = mp
2447 mp.curg = gp
2448 mp.isextra = true
2449
2450 mp.isExtraInC = true
2451 mp.lockedInt++
2452 mp.lockedg.set(gp)
2453 gp.lockedm.set(mp)
2454 gp.goid = sched.goidgen.Add(1)
2455 if raceenabled {
2456 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2457 }
2458
2459 allgadd(gp)
2460
2461
2462
2463
2464
2465 sched.ngsys.Add(1)
2466
2467
2468 addExtraM(mp)
2469 }
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504 func dropm() {
2505
2506
2507
2508 mp := getg().m
2509
2510
2511
2512
2513
2514 var trace traceLocker
2515 if !mp.isExtraInSig {
2516 trace = traceAcquire()
2517 }
2518
2519
2520 casgstatus(mp.curg, _Gsyscall, _Gdead)
2521 mp.curg.preemptStop = false
2522 sched.ngsys.Add(1)
2523
2524 if !mp.isExtraInSig {
2525 if trace.ok() {
2526 trace.GoDestroySyscall()
2527 traceRelease(trace)
2528 }
2529 }
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544 mp.syscalltick--
2545
2546
2547
2548 mp.curg.trace.reset()
2549
2550
2551
2552
2553 if traceEnabled() || traceShuttingDown() {
2554
2555
2556
2557
2558
2559
2560
2561 lock(&sched.lock)
2562 traceThreadDestroy(mp)
2563 unlock(&sched.lock)
2564 }
2565 mp.isExtraInSig = false
2566
2567
2568
2569
2570
2571 sigmask := mp.sigmask
2572 sigblock(false)
2573 unminit()
2574
2575 setg(nil)
2576
2577
2578
2579 g0 := mp.g0
2580 g0.stack.hi = 0
2581 g0.stack.lo = 0
2582 g0.stackguard0 = 0
2583 g0.stackguard1 = 0
2584 mp.g0StackAccurate = false
2585
2586 putExtraM(mp)
2587
2588 msigrestore(sigmask)
2589 }
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611 func cgoBindM() {
2612 if GOOS == "windows" || GOOS == "plan9" {
2613 fatal("bindm in unexpected GOOS")
2614 }
2615 g := getg()
2616 if g.m.g0 != g {
2617 fatal("the current g is not g0")
2618 }
2619 if _cgo_bindm != nil {
2620 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2621 }
2622 }
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635 func getm() uintptr {
2636 return uintptr(unsafe.Pointer(getg().m))
2637 }
2638
2639 var (
2640
2641
2642
2643
2644
2645
2646 extraM atomic.Uintptr
2647
2648 extraMLength atomic.Uint32
2649
2650 extraMWaiters atomic.Uint32
2651
2652
2653 extraMInUse atomic.Uint32
2654 )
2655
2656
2657
2658
2659
2660
2661
2662
2663 func lockextra(nilokay bool) *m {
2664 const locked = 1
2665
2666 incr := false
2667 for {
2668 old := extraM.Load()
2669 if old == locked {
2670 osyield_no_g()
2671 continue
2672 }
2673 if old == 0 && !nilokay {
2674 if !incr {
2675
2676
2677
2678 extraMWaiters.Add(1)
2679 incr = true
2680 }
2681 usleep_no_g(1)
2682 continue
2683 }
2684 if extraM.CompareAndSwap(old, locked) {
2685 return (*m)(unsafe.Pointer(old))
2686 }
2687 osyield_no_g()
2688 continue
2689 }
2690 }
2691
2692
2693 func unlockextra(mp *m, delta int32) {
2694 extraMLength.Add(delta)
2695 extraM.Store(uintptr(unsafe.Pointer(mp)))
2696 }
2697
2698
2699
2700
2701
2702
2703
2704
2705 func getExtraM() (mp *m, last bool) {
2706 mp = lockextra(false)
2707 extraMInUse.Add(1)
2708 unlockextra(mp.schedlink.ptr(), -1)
2709 return mp, mp.schedlink.ptr() == nil
2710 }
2711
2712
2713
2714
2715
2716 func putExtraM(mp *m) {
2717 extraMInUse.Add(-1)
2718 addExtraM(mp)
2719 }
2720
2721
2722
2723
2724 func addExtraM(mp *m) {
2725 mnext := lockextra(true)
2726 mp.schedlink.set(mnext)
2727 unlockextra(mp, 1)
2728 }
2729
2730 var (
2731
2732
2733
2734 allocmLock rwmutex
2735
2736
2737
2738
2739 execLock rwmutex
2740 )
2741
2742
2743
2744 const (
2745 failthreadcreate = "runtime: failed to create new OS thread\n"
2746 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2747 )
2748
2749
2750
2751
2752 var newmHandoff struct {
2753 lock mutex
2754
2755
2756
2757 newm muintptr
2758
2759
2760
2761 waiting bool
2762 wake note
2763
2764
2765
2766
2767 haveTemplateThread uint32
2768 }
2769
2770
2771
2772
2773
2774
2775
2776
2777 func newm(fn func(), pp *p, id int64) {
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788 acquirem()
2789
2790 mp := allocm(pp, fn, id)
2791 mp.nextp.set(pp)
2792 mp.sigmask = initSigmask
2793 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805 lock(&newmHandoff.lock)
2806 if newmHandoff.haveTemplateThread == 0 {
2807 throw("on a locked thread with no template thread")
2808 }
2809 mp.schedlink = newmHandoff.newm
2810 newmHandoff.newm.set(mp)
2811 if newmHandoff.waiting {
2812 newmHandoff.waiting = false
2813 notewakeup(&newmHandoff.wake)
2814 }
2815 unlock(&newmHandoff.lock)
2816
2817
2818
2819 releasem(getg().m)
2820 return
2821 }
2822 newm1(mp)
2823 releasem(getg().m)
2824 }
2825
2826 func newm1(mp *m) {
2827 if iscgo {
2828 var ts cgothreadstart
2829 if _cgo_thread_start == nil {
2830 throw("_cgo_thread_start missing")
2831 }
2832 ts.g.set(mp.g0)
2833 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2834 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2835 if msanenabled {
2836 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2837 }
2838 if asanenabled {
2839 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2840 }
2841 execLock.rlock()
2842 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2843 execLock.runlock()
2844 return
2845 }
2846 execLock.rlock()
2847 newosproc(mp)
2848 execLock.runlock()
2849 }
2850
2851
2852
2853
2854
2855 func startTemplateThread() {
2856 if GOARCH == "wasm" {
2857 return
2858 }
2859
2860
2861
2862 mp := acquirem()
2863 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2864 releasem(mp)
2865 return
2866 }
2867 newm(templateThread, nil, -1)
2868 releasem(mp)
2869 }
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883 func templateThread() {
2884 lock(&sched.lock)
2885 sched.nmsys++
2886 checkdead()
2887 unlock(&sched.lock)
2888
2889 for {
2890 lock(&newmHandoff.lock)
2891 for newmHandoff.newm != 0 {
2892 newm := newmHandoff.newm.ptr()
2893 newmHandoff.newm = 0
2894 unlock(&newmHandoff.lock)
2895 for newm != nil {
2896 next := newm.schedlink.ptr()
2897 newm.schedlink = 0
2898 newm1(newm)
2899 newm = next
2900 }
2901 lock(&newmHandoff.lock)
2902 }
2903 newmHandoff.waiting = true
2904 noteclear(&newmHandoff.wake)
2905 unlock(&newmHandoff.lock)
2906 notesleep(&newmHandoff.wake)
2907 }
2908 }
2909
2910
2911
2912 func stopm() {
2913 gp := getg()
2914
2915 if gp.m.locks != 0 {
2916 throw("stopm holding locks")
2917 }
2918 if gp.m.p != 0 {
2919 throw("stopm holding p")
2920 }
2921 if gp.m.spinning {
2922 throw("stopm spinning")
2923 }
2924
2925 lock(&sched.lock)
2926 mput(gp.m)
2927 unlock(&sched.lock)
2928 mPark()
2929 acquirep(gp.m.nextp.ptr())
2930 gp.m.nextp = 0
2931 }
2932
2933 func mspinning() {
2934
2935 getg().m.spinning = true
2936 }
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955 func startm(pp *p, spinning, lockheld bool) {
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972 mp := acquirem()
2973 if !lockheld {
2974 lock(&sched.lock)
2975 }
2976 if pp == nil {
2977 if spinning {
2978
2979
2980
2981 throw("startm: P required for spinning=true")
2982 }
2983 pp, _ = pidleget(0)
2984 if pp == nil {
2985 if !lockheld {
2986 unlock(&sched.lock)
2987 }
2988 releasem(mp)
2989 return
2990 }
2991 }
2992 nmp := mget()
2993 if nmp == nil {
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008 id := mReserveID()
3009 unlock(&sched.lock)
3010
3011 var fn func()
3012 if spinning {
3013
3014 fn = mspinning
3015 }
3016 newm(fn, pp, id)
3017
3018 if lockheld {
3019 lock(&sched.lock)
3020 }
3021
3022
3023 releasem(mp)
3024 return
3025 }
3026 if !lockheld {
3027 unlock(&sched.lock)
3028 }
3029 if nmp.spinning {
3030 throw("startm: m is spinning")
3031 }
3032 if nmp.nextp != 0 {
3033 throw("startm: m has p")
3034 }
3035 if spinning && !runqempty(pp) {
3036 throw("startm: p has runnable gs")
3037 }
3038
3039 nmp.spinning = spinning
3040 nmp.nextp.set(pp)
3041 notewakeup(&nmp.park)
3042
3043
3044 releasem(mp)
3045 }
3046
3047
3048
3049
3050
3051 func handoffp(pp *p) {
3052
3053
3054
3055
3056 if !runqempty(pp) || sched.runqsize != 0 {
3057 startm(pp, false, false)
3058 return
3059 }
3060
3061 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
3062 startm(pp, false, false)
3063 return
3064 }
3065
3066 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) {
3067 startm(pp, false, false)
3068 return
3069 }
3070
3071
3072 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
3073 sched.needspinning.Store(0)
3074 startm(pp, true, false)
3075 return
3076 }
3077 lock(&sched.lock)
3078 if sched.gcwaiting.Load() {
3079 pp.status = _Pgcstop
3080 pp.gcStopTime = nanotime()
3081 sched.stopwait--
3082 if sched.stopwait == 0 {
3083 notewakeup(&sched.stopnote)
3084 }
3085 unlock(&sched.lock)
3086 return
3087 }
3088 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
3089 sched.safePointFn(pp)
3090 sched.safePointWait--
3091 if sched.safePointWait == 0 {
3092 notewakeup(&sched.safePointNote)
3093 }
3094 }
3095 if sched.runqsize != 0 {
3096 unlock(&sched.lock)
3097 startm(pp, false, false)
3098 return
3099 }
3100
3101
3102 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
3103 unlock(&sched.lock)
3104 startm(pp, false, false)
3105 return
3106 }
3107
3108
3109
3110 when := pp.timers.wakeTime()
3111 pidleput(pp, 0)
3112 unlock(&sched.lock)
3113
3114 if when != 0 {
3115 wakeNetPoller(when)
3116 }
3117 }
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132 func wakep() {
3133
3134
3135 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
3136 return
3137 }
3138
3139
3140
3141
3142
3143
3144 mp := acquirem()
3145
3146 var pp *p
3147 lock(&sched.lock)
3148 pp, _ = pidlegetSpinning(0)
3149 if pp == nil {
3150 if sched.nmspinning.Add(-1) < 0 {
3151 throw("wakep: negative nmspinning")
3152 }
3153 unlock(&sched.lock)
3154 releasem(mp)
3155 return
3156 }
3157
3158
3159
3160
3161 unlock(&sched.lock)
3162
3163 startm(pp, true, false)
3164
3165 releasem(mp)
3166 }
3167
3168
3169
3170 func stoplockedm() {
3171 gp := getg()
3172
3173 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3174 throw("stoplockedm: inconsistent locking")
3175 }
3176 if gp.m.p != 0 {
3177
3178 pp := releasep()
3179 handoffp(pp)
3180 }
3181 incidlelocked(1)
3182
3183 mPark()
3184 status := readgstatus(gp.m.lockedg.ptr())
3185 if status&^_Gscan != _Grunnable {
3186 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
3187 dumpgstatus(gp.m.lockedg.ptr())
3188 throw("stoplockedm: not runnable")
3189 }
3190 acquirep(gp.m.nextp.ptr())
3191 gp.m.nextp = 0
3192 }
3193
3194
3195
3196
3197
3198 func startlockedm(gp *g) {
3199 mp := gp.lockedm.ptr()
3200 if mp == getg().m {
3201 throw("startlockedm: locked to me")
3202 }
3203 if mp.nextp != 0 {
3204 throw("startlockedm: m has p")
3205 }
3206
3207 incidlelocked(-1)
3208 pp := releasep()
3209 mp.nextp.set(pp)
3210 notewakeup(&mp.park)
3211 stopm()
3212 }
3213
3214
3215
3216 func gcstopm() {
3217 gp := getg()
3218
3219 if !sched.gcwaiting.Load() {
3220 throw("gcstopm: not waiting for gc")
3221 }
3222 if gp.m.spinning {
3223 gp.m.spinning = false
3224
3225
3226 if sched.nmspinning.Add(-1) < 0 {
3227 throw("gcstopm: negative nmspinning")
3228 }
3229 }
3230 pp := releasep()
3231 lock(&sched.lock)
3232 pp.status = _Pgcstop
3233 pp.gcStopTime = nanotime()
3234 sched.stopwait--
3235 if sched.stopwait == 0 {
3236 notewakeup(&sched.stopnote)
3237 }
3238 unlock(&sched.lock)
3239 stopm()
3240 }
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251 func execute(gp *g, inheritTime bool) {
3252 mp := getg().m
3253
3254 if goroutineProfile.active {
3255
3256
3257
3258 tryRecordGoroutineProfile(gp, nil, osyield)
3259 }
3260
3261
3262
3263 mp.curg = gp
3264 gp.m = mp
3265 casgstatus(gp, _Grunnable, _Grunning)
3266 gp.waitsince = 0
3267 gp.preempt = false
3268 gp.stackguard0 = gp.stack.lo + stackGuard
3269 if !inheritTime {
3270 mp.p.ptr().schedtick++
3271 }
3272
3273
3274 hz := sched.profilehz
3275 if mp.profilehz != hz {
3276 setThreadCPUProfiler(hz)
3277 }
3278
3279 trace := traceAcquire()
3280 if trace.ok() {
3281 trace.GoStart()
3282 traceRelease(trace)
3283 }
3284
3285 gogo(&gp.sched)
3286 }
3287
3288
3289
3290
3291
3292 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3293 mp := getg().m
3294
3295
3296
3297
3298
3299 top:
3300 pp := mp.p.ptr()
3301 if sched.gcwaiting.Load() {
3302 gcstopm()
3303 goto top
3304 }
3305 if pp.runSafePointFn != 0 {
3306 runSafePointFn()
3307 }
3308
3309
3310
3311
3312
3313 now, pollUntil, _ := pp.timers.check(0)
3314
3315
3316 if traceEnabled() || traceShuttingDown() {
3317 gp := traceReader()
3318 if gp != nil {
3319 trace := traceAcquire()
3320 casgstatus(gp, _Gwaiting, _Grunnable)
3321 if trace.ok() {
3322 trace.GoUnpark(gp, 0)
3323 traceRelease(trace)
3324 }
3325 return gp, false, true
3326 }
3327 }
3328
3329
3330 if gcBlackenEnabled != 0 {
3331 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3332 if gp != nil {
3333 return gp, false, true
3334 }
3335 now = tnow
3336 }
3337
3338
3339
3340
3341 if pp.schedtick%61 == 0 && sched.runqsize > 0 {
3342 lock(&sched.lock)
3343 gp := globrunqget(pp, 1)
3344 unlock(&sched.lock)
3345 if gp != nil {
3346 return gp, false, false
3347 }
3348 }
3349
3350
3351 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
3352 if gp := wakefing(); gp != nil {
3353 ready(gp, 0, true)
3354 }
3355 }
3356 if *cgo_yield != nil {
3357 asmcgocall(*cgo_yield, nil)
3358 }
3359
3360
3361 if gp, inheritTime := runqget(pp); gp != nil {
3362 return gp, inheritTime, false
3363 }
3364
3365
3366 if sched.runqsize != 0 {
3367 lock(&sched.lock)
3368 gp := globrunqget(pp, 0)
3369 unlock(&sched.lock)
3370 if gp != nil {
3371 return gp, false, false
3372 }
3373 }
3374
3375
3376
3377
3378
3379
3380
3381
3382 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3383 if list, delta := netpoll(0); !list.empty() {
3384 gp := list.pop()
3385 injectglist(&list)
3386 netpollAdjustWaiters(delta)
3387 trace := traceAcquire()
3388 casgstatus(gp, _Gwaiting, _Grunnable)
3389 if trace.ok() {
3390 trace.GoUnpark(gp, 0)
3391 traceRelease(trace)
3392 }
3393 return gp, false, false
3394 }
3395 }
3396
3397
3398
3399
3400
3401
3402 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
3403 if !mp.spinning {
3404 mp.becomeSpinning()
3405 }
3406
3407 gp, inheritTime, tnow, w, newWork := stealWork(now)
3408 if gp != nil {
3409
3410 return gp, inheritTime, false
3411 }
3412 if newWork {
3413
3414
3415 goto top
3416 }
3417
3418 now = tnow
3419 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3420
3421 pollUntil = w
3422 }
3423 }
3424
3425
3426
3427
3428
3429 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) && gcController.addIdleMarkWorker() {
3430 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3431 if node != nil {
3432 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3433 gp := node.gp.ptr()
3434
3435 trace := traceAcquire()
3436 casgstatus(gp, _Gwaiting, _Grunnable)
3437 if trace.ok() {
3438 trace.GoUnpark(gp, 0)
3439 traceRelease(trace)
3440 }
3441 return gp, false, false
3442 }
3443 gcController.removeIdleMarkWorker()
3444 }
3445
3446
3447
3448
3449
3450 gp, otherReady := beforeIdle(now, pollUntil)
3451 if gp != nil {
3452 trace := traceAcquire()
3453 casgstatus(gp, _Gwaiting, _Grunnable)
3454 if trace.ok() {
3455 trace.GoUnpark(gp, 0)
3456 traceRelease(trace)
3457 }
3458 return gp, false, false
3459 }
3460 if otherReady {
3461 goto top
3462 }
3463
3464
3465
3466
3467
3468 allpSnapshot := allp
3469
3470
3471 idlepMaskSnapshot := idlepMask
3472 timerpMaskSnapshot := timerpMask
3473
3474
3475 lock(&sched.lock)
3476 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3477 unlock(&sched.lock)
3478 goto top
3479 }
3480 if sched.runqsize != 0 {
3481 gp := globrunqget(pp, 0)
3482 unlock(&sched.lock)
3483 return gp, false, false
3484 }
3485 if !mp.spinning && sched.needspinning.Load() == 1 {
3486
3487 mp.becomeSpinning()
3488 unlock(&sched.lock)
3489 goto top
3490 }
3491 if releasep() != pp {
3492 throw("findrunnable: wrong p")
3493 }
3494 now = pidleput(pp, now)
3495 unlock(&sched.lock)
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533 wasSpinning := mp.spinning
3534 if mp.spinning {
3535 mp.spinning = false
3536 if sched.nmspinning.Add(-1) < 0 {
3537 throw("findrunnable: negative nmspinning")
3538 }
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551 lock(&sched.lock)
3552 if sched.runqsize != 0 {
3553 pp, _ := pidlegetSpinning(0)
3554 if pp != nil {
3555 gp := globrunqget(pp, 0)
3556 if gp == nil {
3557 throw("global runq empty with non-zero runqsize")
3558 }
3559 unlock(&sched.lock)
3560 acquirep(pp)
3561 mp.becomeSpinning()
3562 return gp, false, false
3563 }
3564 }
3565 unlock(&sched.lock)
3566
3567 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3568 if pp != nil {
3569 acquirep(pp)
3570 mp.becomeSpinning()
3571 goto top
3572 }
3573
3574
3575 pp, gp := checkIdleGCNoP()
3576 if pp != nil {
3577 acquirep(pp)
3578 mp.becomeSpinning()
3579
3580
3581 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3582 trace := traceAcquire()
3583 casgstatus(gp, _Gwaiting, _Grunnable)
3584 if trace.ok() {
3585 trace.GoUnpark(gp, 0)
3586 traceRelease(trace)
3587 }
3588 return gp, false, false
3589 }
3590
3591
3592
3593
3594
3595
3596
3597 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3598 }
3599
3600
3601 if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3602 sched.pollUntil.Store(pollUntil)
3603 if mp.p != 0 {
3604 throw("findrunnable: netpoll with p")
3605 }
3606 if mp.spinning {
3607 throw("findrunnable: netpoll with spinning")
3608 }
3609 delay := int64(-1)
3610 if pollUntil != 0 {
3611 if now == 0 {
3612 now = nanotime()
3613 }
3614 delay = pollUntil - now
3615 if delay < 0 {
3616 delay = 0
3617 }
3618 }
3619 if faketime != 0 {
3620
3621 delay = 0
3622 }
3623 list, delta := netpoll(delay)
3624
3625 now = nanotime()
3626 sched.pollUntil.Store(0)
3627 sched.lastpoll.Store(now)
3628 if faketime != 0 && list.empty() {
3629
3630
3631 stopm()
3632 goto top
3633 }
3634 lock(&sched.lock)
3635 pp, _ := pidleget(now)
3636 unlock(&sched.lock)
3637 if pp == nil {
3638 injectglist(&list)
3639 netpollAdjustWaiters(delta)
3640 } else {
3641 acquirep(pp)
3642 if !list.empty() {
3643 gp := list.pop()
3644 injectglist(&list)
3645 netpollAdjustWaiters(delta)
3646 trace := traceAcquire()
3647 casgstatus(gp, _Gwaiting, _Grunnable)
3648 if trace.ok() {
3649 trace.GoUnpark(gp, 0)
3650 traceRelease(trace)
3651 }
3652 return gp, false, false
3653 }
3654 if wasSpinning {
3655 mp.becomeSpinning()
3656 }
3657 goto top
3658 }
3659 } else if pollUntil != 0 && netpollinited() {
3660 pollerPollUntil := sched.pollUntil.Load()
3661 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3662 netpollBreak()
3663 }
3664 }
3665 stopm()
3666 goto top
3667 }
3668
3669
3670
3671
3672
3673 func pollWork() bool {
3674 if sched.runqsize != 0 {
3675 return true
3676 }
3677 p := getg().m.p.ptr()
3678 if !runqempty(p) {
3679 return true
3680 }
3681 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3682 if list, delta := netpoll(0); !list.empty() {
3683 injectglist(&list)
3684 netpollAdjustWaiters(delta)
3685 return true
3686 }
3687 }
3688 return false
3689 }
3690
3691
3692
3693
3694
3695
3696
3697 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3698 pp := getg().m.p.ptr()
3699
3700 ranTimer := false
3701
3702 const stealTries = 4
3703 for i := 0; i < stealTries; i++ {
3704 stealTimersOrRunNextG := i == stealTries-1
3705
3706 for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
3707 if sched.gcwaiting.Load() {
3708
3709 return nil, false, now, pollUntil, true
3710 }
3711 p2 := allp[enum.position()]
3712 if pp == p2 {
3713 continue
3714 }
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3730 tnow, w, ran := p2.timers.check(now)
3731 now = tnow
3732 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3733 pollUntil = w
3734 }
3735 if ran {
3736
3737
3738
3739
3740
3741
3742
3743
3744 if gp, inheritTime := runqget(pp); gp != nil {
3745 return gp, inheritTime, now, pollUntil, ranTimer
3746 }
3747 ranTimer = true
3748 }
3749 }
3750
3751
3752 if !idlepMask.read(enum.position()) {
3753 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3754 return gp, false, now, pollUntil, ranTimer
3755 }
3756 }
3757 }
3758 }
3759
3760
3761
3762
3763 return nil, false, now, pollUntil, ranTimer
3764 }
3765
3766
3767
3768
3769
3770
3771 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3772 for id, p2 := range allpSnapshot {
3773 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3774 lock(&sched.lock)
3775 pp, _ := pidlegetSpinning(0)
3776 if pp == nil {
3777
3778 unlock(&sched.lock)
3779 return nil
3780 }
3781 unlock(&sched.lock)
3782 return pp
3783 }
3784 }
3785
3786
3787 return nil
3788 }
3789
3790
3791
3792
3793 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3794 for id, p2 := range allpSnapshot {
3795 if timerpMaskSnapshot.read(uint32(id)) {
3796 w := p2.timers.wakeTime()
3797 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3798 pollUntil = w
3799 }
3800 }
3801 }
3802
3803 return pollUntil
3804 }
3805
3806
3807
3808
3809
3810 func checkIdleGCNoP() (*p, *g) {
3811
3812
3813
3814
3815
3816
3817 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3818 return nil, nil
3819 }
3820 if !gcMarkWorkAvailable(nil) {
3821 return nil, nil
3822 }
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841 lock(&sched.lock)
3842 pp, now := pidlegetSpinning(0)
3843 if pp == nil {
3844 unlock(&sched.lock)
3845 return nil, nil
3846 }
3847
3848
3849 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3850 pidleput(pp, now)
3851 unlock(&sched.lock)
3852 return nil, nil
3853 }
3854
3855 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3856 if node == nil {
3857 pidleput(pp, now)
3858 unlock(&sched.lock)
3859 gcController.removeIdleMarkWorker()
3860 return nil, nil
3861 }
3862
3863 unlock(&sched.lock)
3864
3865 return pp, node.gp.ptr()
3866 }
3867
3868
3869
3870
3871 func wakeNetPoller(when int64) {
3872 if sched.lastpoll.Load() == 0 {
3873
3874
3875
3876
3877 pollerPollUntil := sched.pollUntil.Load()
3878 if pollerPollUntil == 0 || pollerPollUntil > when {
3879 netpollBreak()
3880 }
3881 } else {
3882
3883
3884 if GOOS != "plan9" {
3885 wakep()
3886 }
3887 }
3888 }
3889
3890 func resetspinning() {
3891 gp := getg()
3892 if !gp.m.spinning {
3893 throw("resetspinning: not a spinning m")
3894 }
3895 gp.m.spinning = false
3896 nmspinning := sched.nmspinning.Add(-1)
3897 if nmspinning < 0 {
3898 throw("findrunnable: negative nmspinning")
3899 }
3900
3901
3902
3903 wakep()
3904 }
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914 func injectglist(glist *gList) {
3915 if glist.empty() {
3916 return
3917 }
3918
3919
3920
3921 head := glist.head.ptr()
3922 var tail *g
3923 qsize := 0
3924 trace := traceAcquire()
3925 for gp := head; gp != nil; gp = gp.schedlink.ptr() {
3926 tail = gp
3927 qsize++
3928 casgstatus(gp, _Gwaiting, _Grunnable)
3929 if trace.ok() {
3930 trace.GoUnpark(gp, 0)
3931 }
3932 }
3933 if trace.ok() {
3934 traceRelease(trace)
3935 }
3936
3937
3938 var q gQueue
3939 q.head.set(head)
3940 q.tail.set(tail)
3941 *glist = gList{}
3942
3943 startIdle := func(n int) {
3944 for i := 0; i < n; i++ {
3945 mp := acquirem()
3946 lock(&sched.lock)
3947
3948 pp, _ := pidlegetSpinning(0)
3949 if pp == nil {
3950 unlock(&sched.lock)
3951 releasem(mp)
3952 break
3953 }
3954
3955 startm(pp, false, true)
3956 unlock(&sched.lock)
3957 releasem(mp)
3958 }
3959 }
3960
3961 pp := getg().m.p.ptr()
3962 if pp == nil {
3963 lock(&sched.lock)
3964 globrunqputbatch(&q, int32(qsize))
3965 unlock(&sched.lock)
3966 startIdle(qsize)
3967 return
3968 }
3969
3970 npidle := int(sched.npidle.Load())
3971 var (
3972 globq gQueue
3973 n int
3974 )
3975 for n = 0; n < npidle && !q.empty(); n++ {
3976 g := q.pop()
3977 globq.pushBack(g)
3978 }
3979 if n > 0 {
3980 lock(&sched.lock)
3981 globrunqputbatch(&globq, int32(n))
3982 unlock(&sched.lock)
3983 startIdle(n)
3984 qsize -= n
3985 }
3986
3987 if !q.empty() {
3988 runqputbatch(pp, &q, qsize)
3989 }
3990
3991
3992
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004 wakep()
4005 }
4006
4007
4008
4009 func schedule() {
4010 mp := getg().m
4011
4012 if mp.locks != 0 {
4013 throw("schedule: holding locks")
4014 }
4015
4016 if mp.lockedg != 0 {
4017 stoplockedm()
4018 execute(mp.lockedg.ptr(), false)
4019 }
4020
4021
4022
4023 if mp.incgo {
4024 throw("schedule: in cgo")
4025 }
4026
4027 top:
4028 pp := mp.p.ptr()
4029 pp.preempt = false
4030
4031
4032
4033
4034 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
4035 throw("schedule: spinning with local work")
4036 }
4037
4038 gp, inheritTime, tryWakeP := findRunnable()
4039
4040 if debug.dontfreezetheworld > 0 && freezing.Load() {
4041
4042
4043
4044
4045
4046
4047
4048 lock(&deadlock)
4049 lock(&deadlock)
4050 }
4051
4052
4053
4054
4055 if mp.spinning {
4056 resetspinning()
4057 }
4058
4059 if sched.disable.user && !schedEnabled(gp) {
4060
4061
4062
4063 lock(&sched.lock)
4064 if schedEnabled(gp) {
4065
4066
4067 unlock(&sched.lock)
4068 } else {
4069 sched.disable.runnable.pushBack(gp)
4070 sched.disable.n++
4071 unlock(&sched.lock)
4072 goto top
4073 }
4074 }
4075
4076
4077
4078 if tryWakeP {
4079 wakep()
4080 }
4081 if gp.lockedm != 0 {
4082
4083
4084 startlockedm(gp)
4085 goto top
4086 }
4087
4088 execute(gp, inheritTime)
4089 }
4090
4091
4092
4093
4094
4095
4096
4097
4098 func dropg() {
4099 gp := getg()
4100
4101 setMNoWB(&gp.m.curg.m, nil)
4102 setGNoWB(&gp.m.curg, nil)
4103 }
4104
4105 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
4106 unlock((*mutex)(lock))
4107 return true
4108 }
4109
4110
4111 func park_m(gp *g) {
4112 mp := getg().m
4113
4114 trace := traceAcquire()
4115
4116
4117
4118
4119
4120 sg := gp.syncGroup
4121 if sg != nil {
4122 sg.incActive()
4123 }
4124
4125 if trace.ok() {
4126
4127
4128
4129 trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
4130 }
4131
4132
4133 casgstatus(gp, _Grunning, _Gwaiting)
4134 if trace.ok() {
4135 traceRelease(trace)
4136 }
4137
4138 dropg()
4139
4140 if fn := mp.waitunlockf; fn != nil {
4141 ok := fn(gp, mp.waitlock)
4142 mp.waitunlockf = nil
4143 mp.waitlock = nil
4144 if !ok {
4145 trace := traceAcquire()
4146 casgstatus(gp, _Gwaiting, _Grunnable)
4147 if sg != nil {
4148 sg.decActive()
4149 }
4150 if trace.ok() {
4151 trace.GoUnpark(gp, 2)
4152 traceRelease(trace)
4153 }
4154 execute(gp, true)
4155 }
4156 }
4157
4158 if sg != nil {
4159 sg.decActive()
4160 }
4161
4162 schedule()
4163 }
4164
4165 func goschedImpl(gp *g, preempted bool) {
4166 trace := traceAcquire()
4167 status := readgstatus(gp)
4168 if status&^_Gscan != _Grunning {
4169 dumpgstatus(gp)
4170 throw("bad g status")
4171 }
4172 if trace.ok() {
4173
4174
4175
4176 if preempted {
4177 trace.GoPreempt()
4178 } else {
4179 trace.GoSched()
4180 }
4181 }
4182 casgstatus(gp, _Grunning, _Grunnable)
4183 if trace.ok() {
4184 traceRelease(trace)
4185 }
4186
4187 dropg()
4188 lock(&sched.lock)
4189 globrunqput(gp)
4190 unlock(&sched.lock)
4191
4192 if mainStarted {
4193 wakep()
4194 }
4195
4196 schedule()
4197 }
4198
4199
4200 func gosched_m(gp *g) {
4201 goschedImpl(gp, false)
4202 }
4203
4204
4205 func goschedguarded_m(gp *g) {
4206 if !canPreemptM(gp.m) {
4207 gogo(&gp.sched)
4208 }
4209 goschedImpl(gp, false)
4210 }
4211
4212 func gopreempt_m(gp *g) {
4213 goschedImpl(gp, true)
4214 }
4215
4216
4217
4218
4219 func preemptPark(gp *g) {
4220 status := readgstatus(gp)
4221 if status&^_Gscan != _Grunning {
4222 dumpgstatus(gp)
4223 throw("bad g status")
4224 }
4225
4226 if gp.asyncSafePoint {
4227
4228
4229
4230 f := findfunc(gp.sched.pc)
4231 if !f.valid() {
4232 throw("preempt at unknown pc")
4233 }
4234 if f.flag&abi.FuncFlagSPWrite != 0 {
4235 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
4236 throw("preempt SPWRITE")
4237 }
4238 }
4239
4240
4241
4242
4243
4244
4245
4246 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4247 dropg()
4248
4249
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259
4260
4261
4262
4263
4264 trace := traceAcquire()
4265 if trace.ok() {
4266 trace.GoPark(traceBlockPreempted, 0)
4267 }
4268 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4269 if trace.ok() {
4270 traceRelease(trace)
4271 }
4272 schedule()
4273 }
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289 func goyield() {
4290 checkTimeouts()
4291 mcall(goyield_m)
4292 }
4293
4294 func goyield_m(gp *g) {
4295 trace := traceAcquire()
4296 pp := gp.m.p.ptr()
4297 if trace.ok() {
4298
4299
4300
4301 trace.GoPreempt()
4302 }
4303 casgstatus(gp, _Grunning, _Grunnable)
4304 if trace.ok() {
4305 traceRelease(trace)
4306 }
4307 dropg()
4308 runqput(pp, gp, false)
4309 schedule()
4310 }
4311
4312
4313 func goexit1() {
4314 if raceenabled {
4315 if gp := getg(); gp.syncGroup != nil {
4316 racereleasemergeg(gp, gp.syncGroup.raceaddr())
4317 }
4318 racegoend()
4319 }
4320 trace := traceAcquire()
4321 if trace.ok() {
4322 trace.GoEnd()
4323 traceRelease(trace)
4324 }
4325 mcall(goexit0)
4326 }
4327
4328
4329 func goexit0(gp *g) {
4330 gdestroy(gp)
4331 schedule()
4332 }
4333
4334 func gdestroy(gp *g) {
4335 mp := getg().m
4336 pp := mp.p.ptr()
4337
4338 casgstatus(gp, _Grunning, _Gdead)
4339 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4340 if isSystemGoroutine(gp, false) {
4341 sched.ngsys.Add(-1)
4342 }
4343 gp.m = nil
4344 locked := gp.lockedm != 0
4345 gp.lockedm = 0
4346 mp.lockedg = 0
4347 gp.preemptStop = false
4348 gp.paniconfault = false
4349 gp._defer = nil
4350 gp._panic = nil
4351 gp.writebuf = nil
4352 gp.waitreason = waitReasonZero
4353 gp.param = nil
4354 gp.labels = nil
4355 gp.timer = nil
4356 gp.syncGroup = nil
4357
4358 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4359
4360
4361
4362 assistWorkPerByte := gcController.assistWorkPerByte.Load()
4363 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4364 gcController.bgScanCredit.Add(scanCredit)
4365 gp.gcAssistBytes = 0
4366 }
4367
4368 dropg()
4369
4370 if GOARCH == "wasm" {
4371 gfput(pp, gp)
4372 return
4373 }
4374
4375 if locked && mp.lockedInt != 0 {
4376 print("runtime: mp.lockedInt = ", mp.lockedInt, "\n")
4377 if mp.isextra {
4378 throw("runtime.Goexit called in a thread that was not created by the Go runtime")
4379 }
4380 throw("exited a goroutine internally locked to the OS thread")
4381 }
4382 gfput(pp, gp)
4383 if locked {
4384
4385
4386
4387
4388
4389
4390 if GOOS != "plan9" {
4391 gogo(&mp.g0.sched)
4392 } else {
4393
4394
4395 mp.lockedExt = 0
4396 }
4397 }
4398 }
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408 func save(pc, sp, bp uintptr) {
4409 gp := getg()
4410
4411 if gp == gp.m.g0 || gp == gp.m.gsignal {
4412
4413
4414
4415
4416
4417 throw("save on system g not allowed")
4418 }
4419
4420 gp.sched.pc = pc
4421 gp.sched.sp = sp
4422 gp.sched.lr = 0
4423 gp.sched.ret = 0
4424 gp.sched.bp = bp
4425
4426
4427
4428 if gp.sched.ctxt != nil {
4429 badctxt()
4430 }
4431 }
4432
4433
4434
4435
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457 func reentersyscall(pc, sp, bp uintptr) {
4458 trace := traceAcquire()
4459 gp := getg()
4460
4461
4462
4463 gp.m.locks++
4464
4465
4466
4467
4468
4469 gp.stackguard0 = stackPreempt
4470 gp.throwsplit = true
4471
4472
4473 save(pc, sp, bp)
4474 gp.syscallsp = sp
4475 gp.syscallpc = pc
4476 gp.syscallbp = bp
4477 casgstatus(gp, _Grunning, _Gsyscall)
4478 if staticLockRanking {
4479
4480
4481 save(pc, sp, bp)
4482 }
4483 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4484 systemstack(func() {
4485 print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4486 throw("entersyscall")
4487 })
4488 }
4489 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4490 systemstack(func() {
4491 print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4492 throw("entersyscall")
4493 })
4494 }
4495
4496 if trace.ok() {
4497 systemstack(func() {
4498 trace.GoSysCall()
4499 traceRelease(trace)
4500 })
4501
4502
4503
4504 save(pc, sp, bp)
4505 }
4506
4507 if sched.sysmonwait.Load() {
4508 systemstack(entersyscall_sysmon)
4509 save(pc, sp, bp)
4510 }
4511
4512 if gp.m.p.ptr().runSafePointFn != 0 {
4513
4514 systemstack(runSafePointFn)
4515 save(pc, sp, bp)
4516 }
4517
4518 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4519 pp := gp.m.p.ptr()
4520 pp.m = 0
4521 gp.m.oldp.set(pp)
4522 gp.m.p = 0
4523 atomic.Store(&pp.status, _Psyscall)
4524 if sched.gcwaiting.Load() {
4525 systemstack(entersyscall_gcwait)
4526 save(pc, sp, bp)
4527 }
4528
4529 gp.m.locks--
4530 }
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546 func entersyscall() {
4547
4548
4549
4550
4551 fp := getcallerfp()
4552 reentersyscall(sys.GetCallerPC(), sys.GetCallerSP(), fp)
4553 }
4554
4555 func entersyscall_sysmon() {
4556 lock(&sched.lock)
4557 if sched.sysmonwait.Load() {
4558 sched.sysmonwait.Store(false)
4559 notewakeup(&sched.sysmonnote)
4560 }
4561 unlock(&sched.lock)
4562 }
4563
4564 func entersyscall_gcwait() {
4565 gp := getg()
4566 pp := gp.m.oldp.ptr()
4567
4568 lock(&sched.lock)
4569 trace := traceAcquire()
4570 if sched.stopwait > 0 && atomic.Cas(&pp.status, _Psyscall, _Pgcstop) {
4571 if trace.ok() {
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581 trace.ProcSteal(pp, true)
4582 traceRelease(trace)
4583 }
4584 pp.gcStopTime = nanotime()
4585 pp.syscalltick++
4586 if sched.stopwait--; sched.stopwait == 0 {
4587 notewakeup(&sched.stopnote)
4588 }
4589 } else if trace.ok() {
4590 traceRelease(trace)
4591 }
4592 unlock(&sched.lock)
4593 }
4594
4595
4596
4597
4598
4599
4600
4601
4602
4603
4604
4605
4606
4607 func entersyscallblock() {
4608 gp := getg()
4609
4610 gp.m.locks++
4611 gp.throwsplit = true
4612 gp.stackguard0 = stackPreempt
4613 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4614 gp.m.p.ptr().syscalltick++
4615
4616
4617 pc := sys.GetCallerPC()
4618 sp := sys.GetCallerSP()
4619 bp := getcallerfp()
4620 save(pc, sp, bp)
4621 gp.syscallsp = gp.sched.sp
4622 gp.syscallpc = gp.sched.pc
4623 gp.syscallbp = gp.sched.bp
4624 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4625 sp1 := sp
4626 sp2 := gp.sched.sp
4627 sp3 := gp.syscallsp
4628 systemstack(func() {
4629 print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4630 throw("entersyscallblock")
4631 })
4632 }
4633 casgstatus(gp, _Grunning, _Gsyscall)
4634 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4635 systemstack(func() {
4636 print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4637 throw("entersyscallblock")
4638 })
4639 }
4640 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4641 systemstack(func() {
4642 print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4643 throw("entersyscallblock")
4644 })
4645 }
4646
4647 systemstack(entersyscallblock_handoff)
4648
4649
4650 save(sys.GetCallerPC(), sys.GetCallerSP(), getcallerfp())
4651
4652 gp.m.locks--
4653 }
4654
4655 func entersyscallblock_handoff() {
4656 trace := traceAcquire()
4657 if trace.ok() {
4658 trace.GoSysCall()
4659 traceRelease(trace)
4660 }
4661 handoffp(releasep())
4662 }
4663
4664
4665
4666
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680
4681
4682
4683
4684 func exitsyscall() {
4685 gp := getg()
4686
4687 gp.m.locks++
4688 if sys.GetCallerSP() > gp.syscallsp {
4689 throw("exitsyscall: syscall frame is no longer valid")
4690 }
4691
4692 gp.waitsince = 0
4693 oldp := gp.m.oldp.ptr()
4694 gp.m.oldp = 0
4695 if exitsyscallfast(oldp) {
4696
4697
4698 if goroutineProfile.active {
4699
4700
4701
4702 systemstack(func() {
4703 tryRecordGoroutineProfileWB(gp)
4704 })
4705 }
4706 trace := traceAcquire()
4707 if trace.ok() {
4708 lostP := oldp != gp.m.p.ptr() || gp.m.syscalltick != gp.m.p.ptr().syscalltick
4709 systemstack(func() {
4710
4711
4712
4713
4714 trace.GoSysExit(lostP)
4715 if lostP {
4716
4717
4718
4719
4720 trace.GoStart()
4721 }
4722 })
4723 }
4724
4725 gp.m.p.ptr().syscalltick++
4726
4727 casgstatus(gp, _Gsyscall, _Grunning)
4728 if trace.ok() {
4729 traceRelease(trace)
4730 }
4731
4732
4733
4734 gp.syscallsp = 0
4735 gp.m.locks--
4736 if gp.preempt {
4737
4738 gp.stackguard0 = stackPreempt
4739 } else {
4740
4741 gp.stackguard0 = gp.stack.lo + stackGuard
4742 }
4743 gp.throwsplit = false
4744
4745 if sched.disable.user && !schedEnabled(gp) {
4746
4747 Gosched()
4748 }
4749
4750 return
4751 }
4752
4753 gp.m.locks--
4754
4755
4756 mcall(exitsyscall0)
4757
4758
4759
4760
4761
4762
4763
4764 gp.syscallsp = 0
4765 gp.m.p.ptr().syscalltick++
4766 gp.throwsplit = false
4767 }
4768
4769
4770 func exitsyscallfast(oldp *p) bool {
4771
4772 if sched.stopwait == freezeStopWait {
4773 return false
4774 }
4775
4776
4777 trace := traceAcquire()
4778 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
4779
4780 wirep(oldp)
4781 exitsyscallfast_reacquired(trace)
4782 if trace.ok() {
4783 traceRelease(trace)
4784 }
4785 return true
4786 }
4787 if trace.ok() {
4788 traceRelease(trace)
4789 }
4790
4791
4792 if sched.pidle != 0 {
4793 var ok bool
4794 systemstack(func() {
4795 ok = exitsyscallfast_pidle()
4796 })
4797 if ok {
4798 return true
4799 }
4800 }
4801 return false
4802 }
4803
4804
4805
4806
4807
4808
4809 func exitsyscallfast_reacquired(trace traceLocker) {
4810 gp := getg()
4811 if gp.m.syscalltick != gp.m.p.ptr().syscalltick {
4812 if trace.ok() {
4813
4814
4815
4816 systemstack(func() {
4817
4818
4819 trace.ProcSteal(gp.m.p.ptr(), true)
4820 trace.ProcStart()
4821 })
4822 }
4823 gp.m.p.ptr().syscalltick++
4824 }
4825 }
4826
4827 func exitsyscallfast_pidle() bool {
4828 lock(&sched.lock)
4829 pp, _ := pidleget(0)
4830 if pp != nil && sched.sysmonwait.Load() {
4831 sched.sysmonwait.Store(false)
4832 notewakeup(&sched.sysmonnote)
4833 }
4834 unlock(&sched.lock)
4835 if pp != nil {
4836 acquirep(pp)
4837 return true
4838 }
4839 return false
4840 }
4841
4842
4843
4844
4845
4846
4847
4848 func exitsyscall0(gp *g) {
4849 var trace traceLocker
4850 traceExitingSyscall()
4851 trace = traceAcquire()
4852 casgstatus(gp, _Gsyscall, _Grunnable)
4853 traceExitedSyscall()
4854 if trace.ok() {
4855
4856
4857
4858
4859 trace.GoSysExit(true)
4860 traceRelease(trace)
4861 }
4862 dropg()
4863 lock(&sched.lock)
4864 var pp *p
4865 if schedEnabled(gp) {
4866 pp, _ = pidleget(0)
4867 }
4868 var locked bool
4869 if pp == nil {
4870 globrunqput(gp)
4871
4872
4873
4874
4875
4876
4877 locked = gp.lockedm != 0
4878 } else if sched.sysmonwait.Load() {
4879 sched.sysmonwait.Store(false)
4880 notewakeup(&sched.sysmonnote)
4881 }
4882 unlock(&sched.lock)
4883 if pp != nil {
4884 acquirep(pp)
4885 execute(gp, false)
4886 }
4887 if locked {
4888
4889
4890
4891
4892 stoplockedm()
4893 execute(gp, false)
4894 }
4895 stopm()
4896 schedule()
4897 }
4898
4899
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910
4911 func syscall_runtime_BeforeFork() {
4912 gp := getg().m.curg
4913
4914
4915
4916
4917 gp.m.locks++
4918 sigsave(&gp.m.sigmask)
4919 sigblock(false)
4920
4921
4922
4923
4924
4925 gp.stackguard0 = stackFork
4926 }
4927
4928
4929
4930
4931
4932
4933
4934
4935
4936
4937
4938
4939
4940 func syscall_runtime_AfterFork() {
4941 gp := getg().m.curg
4942
4943
4944 gp.stackguard0 = gp.stack.lo + stackGuard
4945
4946 msigrestore(gp.m.sigmask)
4947
4948 gp.m.locks--
4949 }
4950
4951
4952
4953 var inForkedChild bool
4954
4955
4956
4957
4958
4959
4960
4961
4962
4963
4964
4965
4966
4967
4968
4969
4970
4971
4972
4973
4974 func syscall_runtime_AfterForkInChild() {
4975
4976
4977
4978
4979 inForkedChild = true
4980
4981 clearSignalHandlers()
4982
4983
4984
4985 msigrestore(getg().m.sigmask)
4986
4987 inForkedChild = false
4988 }
4989
4990
4991
4992
4993 var pendingPreemptSignals atomic.Int32
4994
4995
4996
4997
4998 func syscall_runtime_BeforeExec() {
4999
5000 execLock.lock()
5001
5002
5003
5004 if GOOS == "darwin" || GOOS == "ios" {
5005 for pendingPreemptSignals.Load() > 0 {
5006 osyield()
5007 }
5008 }
5009 }
5010
5011
5012
5013
5014 func syscall_runtime_AfterExec() {
5015 execLock.unlock()
5016 }
5017
5018
5019 func malg(stacksize int32) *g {
5020 newg := new(g)
5021 if stacksize >= 0 {
5022 stacksize = round2(stackSystem + stacksize)
5023 systemstack(func() {
5024 newg.stack = stackalloc(uint32(stacksize))
5025 })
5026 newg.stackguard0 = newg.stack.lo + stackGuard
5027 newg.stackguard1 = ^uintptr(0)
5028
5029
5030 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
5031 }
5032 return newg
5033 }
5034
5035
5036
5037
5038 func newproc(fn *funcval) {
5039 gp := getg()
5040 pc := sys.GetCallerPC()
5041 systemstack(func() {
5042 newg := newproc1(fn, gp, pc, false, waitReasonZero)
5043
5044 pp := getg().m.p.ptr()
5045 runqput(pp, newg, true)
5046
5047 if mainStarted {
5048 wakep()
5049 }
5050 })
5051 }
5052
5053
5054
5055
5056 func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
5057 if fn == nil {
5058 fatal("go of nil func value")
5059 }
5060
5061 mp := acquirem()
5062 pp := mp.p.ptr()
5063 newg := gfget(pp)
5064 if newg == nil {
5065 newg = malg(stackMin)
5066 casgstatus(newg, _Gidle, _Gdead)
5067 allgadd(newg)
5068 }
5069 if newg.stack.hi == 0 {
5070 throw("newproc1: newg missing stack")
5071 }
5072
5073 if readgstatus(newg) != _Gdead {
5074 throw("newproc1: new g is not Gdead")
5075 }
5076
5077 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
5078 totalSize = alignUp(totalSize, sys.StackAlign)
5079 sp := newg.stack.hi - totalSize
5080 if usesLR {
5081
5082 *(*uintptr)(unsafe.Pointer(sp)) = 0
5083 prepGoExitFrame(sp)
5084 }
5085 if GOARCH == "arm64" {
5086
5087 *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
5088 }
5089
5090 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
5091 newg.sched.sp = sp
5092 newg.stktopsp = sp
5093 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
5094 newg.sched.g = guintptr(unsafe.Pointer(newg))
5095 gostartcallfn(&newg.sched, fn)
5096 newg.parentGoid = callergp.goid
5097 newg.gopc = callerpc
5098 newg.ancestors = saveAncestors(callergp)
5099 newg.startpc = fn.fn
5100 if isSystemGoroutine(newg, false) {
5101 sched.ngsys.Add(1)
5102 } else {
5103
5104 newg.syncGroup = callergp.syncGroup
5105 if mp.curg != nil {
5106 newg.labels = mp.curg.labels
5107 }
5108 if goroutineProfile.active {
5109
5110
5111
5112
5113
5114 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
5115 }
5116 }
5117
5118 newg.trackingSeq = uint8(cheaprand())
5119 if newg.trackingSeq%gTrackingPeriod == 0 {
5120 newg.tracking = true
5121 }
5122 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
5123
5124
5125 trace := traceAcquire()
5126 var status uint32 = _Grunnable
5127 if parked {
5128 status = _Gwaiting
5129 newg.waitreason = waitreason
5130 }
5131 if pp.goidcache == pp.goidcacheend {
5132
5133
5134
5135 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
5136 pp.goidcache -= _GoidCacheBatch - 1
5137 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
5138 }
5139 newg.goid = pp.goidcache
5140 casgstatus(newg, _Gdead, status)
5141 pp.goidcache++
5142 newg.trace.reset()
5143 if trace.ok() {
5144 trace.GoCreate(newg, newg.startpc, parked)
5145 traceRelease(trace)
5146 }
5147
5148
5149 if raceenabled {
5150 newg.racectx = racegostart(callerpc)
5151 newg.raceignore = 0
5152 if newg.labels != nil {
5153
5154
5155 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
5156 }
5157 }
5158 releasem(mp)
5159
5160 return newg
5161 }
5162
5163
5164
5165
5166 func saveAncestors(callergp *g) *[]ancestorInfo {
5167
5168 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
5169 return nil
5170 }
5171 var callerAncestors []ancestorInfo
5172 if callergp.ancestors != nil {
5173 callerAncestors = *callergp.ancestors
5174 }
5175 n := int32(len(callerAncestors)) + 1
5176 if n > debug.tracebackancestors {
5177 n = debug.tracebackancestors
5178 }
5179 ancestors := make([]ancestorInfo, n)
5180 copy(ancestors[1:], callerAncestors)
5181
5182 var pcs [tracebackInnerFrames]uintptr
5183 npcs := gcallers(callergp, 0, pcs[:])
5184 ipcs := make([]uintptr, npcs)
5185 copy(ipcs, pcs[:])
5186 ancestors[0] = ancestorInfo{
5187 pcs: ipcs,
5188 goid: callergp.goid,
5189 gopc: callergp.gopc,
5190 }
5191
5192 ancestorsp := new([]ancestorInfo)
5193 *ancestorsp = ancestors
5194 return ancestorsp
5195 }
5196
5197
5198
5199 func gfput(pp *p, gp *g) {
5200 if readgstatus(gp) != _Gdead {
5201 throw("gfput: bad status (not Gdead)")
5202 }
5203
5204 stksize := gp.stack.hi - gp.stack.lo
5205
5206 if stksize != uintptr(startingStackSize) {
5207
5208 stackfree(gp.stack)
5209 gp.stack.lo = 0
5210 gp.stack.hi = 0
5211 gp.stackguard0 = 0
5212 }
5213
5214 pp.gFree.push(gp)
5215 pp.gFree.n++
5216 if pp.gFree.n >= 64 {
5217 var (
5218 inc int32
5219 stackQ gQueue
5220 noStackQ gQueue
5221 )
5222 for pp.gFree.n >= 32 {
5223 gp := pp.gFree.pop()
5224 pp.gFree.n--
5225 if gp.stack.lo == 0 {
5226 noStackQ.push(gp)
5227 } else {
5228 stackQ.push(gp)
5229 }
5230 inc++
5231 }
5232 lock(&sched.gFree.lock)
5233 sched.gFree.noStack.pushAll(noStackQ)
5234 sched.gFree.stack.pushAll(stackQ)
5235 sched.gFree.n += inc
5236 unlock(&sched.gFree.lock)
5237 }
5238 }
5239
5240
5241
5242 func gfget(pp *p) *g {
5243 retry:
5244 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
5245 lock(&sched.gFree.lock)
5246
5247 for pp.gFree.n < 32 {
5248
5249 gp := sched.gFree.stack.pop()
5250 if gp == nil {
5251 gp = sched.gFree.noStack.pop()
5252 if gp == nil {
5253 break
5254 }
5255 }
5256 sched.gFree.n--
5257 pp.gFree.push(gp)
5258 pp.gFree.n++
5259 }
5260 unlock(&sched.gFree.lock)
5261 goto retry
5262 }
5263 gp := pp.gFree.pop()
5264 if gp == nil {
5265 return nil
5266 }
5267 pp.gFree.n--
5268 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5269
5270
5271
5272 systemstack(func() {
5273 stackfree(gp.stack)
5274 gp.stack.lo = 0
5275 gp.stack.hi = 0
5276 gp.stackguard0 = 0
5277 })
5278 }
5279 if gp.stack.lo == 0 {
5280
5281 systemstack(func() {
5282 gp.stack = stackalloc(startingStackSize)
5283 })
5284 gp.stackguard0 = gp.stack.lo + stackGuard
5285 } else {
5286 if raceenabled {
5287 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5288 }
5289 if msanenabled {
5290 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5291 }
5292 if asanenabled {
5293 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5294 }
5295 }
5296 return gp
5297 }
5298
5299
5300 func gfpurge(pp *p) {
5301 var (
5302 inc int32
5303 stackQ gQueue
5304 noStackQ gQueue
5305 )
5306 for !pp.gFree.empty() {
5307 gp := pp.gFree.pop()
5308 pp.gFree.n--
5309 if gp.stack.lo == 0 {
5310 noStackQ.push(gp)
5311 } else {
5312 stackQ.push(gp)
5313 }
5314 inc++
5315 }
5316 lock(&sched.gFree.lock)
5317 sched.gFree.noStack.pushAll(noStackQ)
5318 sched.gFree.stack.pushAll(stackQ)
5319 sched.gFree.n += inc
5320 unlock(&sched.gFree.lock)
5321 }
5322
5323
5324 func Breakpoint() {
5325 breakpoint()
5326 }
5327
5328
5329
5330
5331
5332
5333 func dolockOSThread() {
5334 if GOARCH == "wasm" {
5335 return
5336 }
5337 gp := getg()
5338 gp.m.lockedg.set(gp)
5339 gp.lockedm.set(gp.m)
5340 }
5341
5342
5343
5344
5345
5346
5347
5348
5349
5350
5351
5352
5353
5354
5355
5356
5357
5358 func LockOSThread() {
5359 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
5360
5361
5362
5363 startTemplateThread()
5364 }
5365 gp := getg()
5366 gp.m.lockedExt++
5367 if gp.m.lockedExt == 0 {
5368 gp.m.lockedExt--
5369 panic("LockOSThread nesting overflow")
5370 }
5371 dolockOSThread()
5372 }
5373
5374
5375 func lockOSThread() {
5376 getg().m.lockedInt++
5377 dolockOSThread()
5378 }
5379
5380
5381
5382
5383
5384
5385 func dounlockOSThread() {
5386 if GOARCH == "wasm" {
5387 return
5388 }
5389 gp := getg()
5390 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5391 return
5392 }
5393 gp.m.lockedg = 0
5394 gp.lockedm = 0
5395 }
5396
5397
5398
5399
5400
5401
5402
5403
5404
5405
5406
5407
5408
5409
5410
5411 func UnlockOSThread() {
5412 gp := getg()
5413 if gp.m.lockedExt == 0 {
5414 return
5415 }
5416 gp.m.lockedExt--
5417 dounlockOSThread()
5418 }
5419
5420
5421 func unlockOSThread() {
5422 gp := getg()
5423 if gp.m.lockedInt == 0 {
5424 systemstack(badunlockosthread)
5425 }
5426 gp.m.lockedInt--
5427 dounlockOSThread()
5428 }
5429
5430 func badunlockosthread() {
5431 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
5432 }
5433
5434 func gcount() int32 {
5435 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - sched.ngsys.Load()
5436 for _, pp := range allp {
5437 n -= pp.gFree.n
5438 }
5439
5440
5441
5442 if n < 1 {
5443 n = 1
5444 }
5445 return n
5446 }
5447
5448 func mcount() int32 {
5449 return int32(sched.mnext - sched.nmfreed)
5450 }
5451
5452 var prof struct {
5453 signalLock atomic.Uint32
5454
5455
5456
5457 hz atomic.Int32
5458 }
5459
5460 func _System() { _System() }
5461 func _ExternalCode() { _ExternalCode() }
5462 func _LostExternalCode() { _LostExternalCode() }
5463 func _GC() { _GC() }
5464 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
5465 func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
5466 func _VDSO() { _VDSO() }
5467
5468
5469
5470
5471
5472 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5473 if prof.hz.Load() == 0 {
5474 return
5475 }
5476
5477
5478
5479
5480 if mp != nil && mp.profilehz == 0 {
5481 return
5482 }
5483
5484
5485
5486
5487
5488
5489
5490 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
5491 if f := findfunc(pc); f.valid() {
5492 if stringslite.HasPrefix(funcname(f), "internal/runtime/atomic") {
5493 cpuprof.lostAtomic++
5494 return
5495 }
5496 }
5497 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
5498
5499
5500
5501 cpuprof.lostAtomic++
5502 return
5503 }
5504 }
5505
5506
5507
5508
5509
5510
5511
5512 getg().m.mallocing++
5513
5514 var u unwinder
5515 var stk [maxCPUProfStack]uintptr
5516 n := 0
5517 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
5518 cgoOff := 0
5519
5520
5521
5522
5523
5524 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
5525 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
5526 cgoOff++
5527 }
5528 n += copy(stk[:], mp.cgoCallers[:cgoOff])
5529 mp.cgoCallers[0] = 0
5530 }
5531
5532
5533 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
5534 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
5535
5536
5537 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
5538 } else if mp != nil && mp.vdsoSP != 0 {
5539
5540
5541 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5542 } else {
5543 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5544 }
5545 n += tracebackPCs(&u, 0, stk[n:])
5546
5547 if n <= 0 {
5548
5549
5550 n = 2
5551 if inVDSOPage(pc) {
5552 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
5553 } else if pc > firstmoduledata.etext {
5554
5555 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
5556 }
5557 stk[0] = pc
5558 if mp.preemptoff != "" {
5559 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
5560 } else {
5561 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
5562 }
5563 }
5564
5565 if prof.hz.Load() != 0 {
5566
5567
5568
5569 var tagPtr *unsafe.Pointer
5570 if gp != nil && gp.m != nil && gp.m.curg != nil {
5571 tagPtr = &gp.m.curg.labels
5572 }
5573 cpuprof.add(tagPtr, stk[:n])
5574
5575 gprof := gp
5576 var mp *m
5577 var pp *p
5578 if gp != nil && gp.m != nil {
5579 if gp.m.curg != nil {
5580 gprof = gp.m.curg
5581 }
5582 mp = gp.m
5583 pp = gp.m.p.ptr()
5584 }
5585 traceCPUSample(gprof, mp, pp, stk[:n])
5586 }
5587 getg().m.mallocing--
5588 }
5589
5590
5591
5592 func setcpuprofilerate(hz int32) {
5593
5594 if hz < 0 {
5595 hz = 0
5596 }
5597
5598
5599
5600 gp := getg()
5601 gp.m.locks++
5602
5603
5604
5605
5606 setThreadCPUProfiler(0)
5607
5608 for !prof.signalLock.CompareAndSwap(0, 1) {
5609 osyield()
5610 }
5611 if prof.hz.Load() != hz {
5612 setProcessCPUProfiler(hz)
5613 prof.hz.Store(hz)
5614 }
5615 prof.signalLock.Store(0)
5616
5617 lock(&sched.lock)
5618 sched.profilehz = hz
5619 unlock(&sched.lock)
5620
5621 if hz != 0 {
5622 setThreadCPUProfiler(hz)
5623 }
5624
5625 gp.m.locks--
5626 }
5627
5628
5629
5630 func (pp *p) init(id int32) {
5631 pp.id = id
5632 pp.status = _Pgcstop
5633 pp.sudogcache = pp.sudogbuf[:0]
5634 pp.deferpool = pp.deferpoolbuf[:0]
5635 pp.wbBuf.reset()
5636 if pp.mcache == nil {
5637 if id == 0 {
5638 if mcache0 == nil {
5639 throw("missing mcache?")
5640 }
5641
5642
5643 pp.mcache = mcache0
5644 } else {
5645 pp.mcache = allocmcache()
5646 }
5647 }
5648 if raceenabled && pp.raceprocctx == 0 {
5649 if id == 0 {
5650 pp.raceprocctx = raceprocctx0
5651 raceprocctx0 = 0
5652 } else {
5653 pp.raceprocctx = raceproccreate()
5654 }
5655 }
5656 lockInit(&pp.timers.mu, lockRankTimers)
5657
5658
5659
5660 timerpMask.set(id)
5661
5662
5663 idlepMask.clear(id)
5664 }
5665
5666
5667
5668
5669
5670 func (pp *p) destroy() {
5671 assertLockHeld(&sched.lock)
5672 assertWorldStopped()
5673
5674
5675 for pp.runqhead != pp.runqtail {
5676
5677 pp.runqtail--
5678 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5679
5680 globrunqputhead(gp)
5681 }
5682 if pp.runnext != 0 {
5683 globrunqputhead(pp.runnext.ptr())
5684 pp.runnext = 0
5685 }
5686
5687
5688 getg().m.p.ptr().timers.take(&pp.timers)
5689
5690
5691 if gcphase != _GCoff {
5692 wbBufFlush1(pp)
5693 pp.gcw.dispose()
5694 }
5695 for i := range pp.sudogbuf {
5696 pp.sudogbuf[i] = nil
5697 }
5698 pp.sudogcache = pp.sudogbuf[:0]
5699 pp.pinnerCache = nil
5700 for j := range pp.deferpoolbuf {
5701 pp.deferpoolbuf[j] = nil
5702 }
5703 pp.deferpool = pp.deferpoolbuf[:0]
5704 systemstack(func() {
5705 for i := 0; i < pp.mspancache.len; i++ {
5706
5707 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
5708 }
5709 pp.mspancache.len = 0
5710 lock(&mheap_.lock)
5711 pp.pcache.flush(&mheap_.pages)
5712 unlock(&mheap_.lock)
5713 })
5714 freemcache(pp.mcache)
5715 pp.mcache = nil
5716 gfpurge(pp)
5717 if raceenabled {
5718 if pp.timers.raceCtx != 0 {
5719
5720
5721
5722
5723
5724 mp := getg().m
5725 phold := mp.p.ptr()
5726 mp.p.set(pp)
5727
5728 racectxend(pp.timers.raceCtx)
5729 pp.timers.raceCtx = 0
5730
5731 mp.p.set(phold)
5732 }
5733 raceprocdestroy(pp.raceprocctx)
5734 pp.raceprocctx = 0
5735 }
5736 pp.gcAssistTime = 0
5737 pp.status = _Pdead
5738 }
5739
5740
5741
5742
5743
5744
5745
5746
5747
5748 func procresize(nprocs int32) *p {
5749 assertLockHeld(&sched.lock)
5750 assertWorldStopped()
5751
5752 old := gomaxprocs
5753 if old < 0 || nprocs <= 0 {
5754 throw("procresize: invalid arg")
5755 }
5756 trace := traceAcquire()
5757 if trace.ok() {
5758 trace.Gomaxprocs(nprocs)
5759 traceRelease(trace)
5760 }
5761
5762
5763 now := nanotime()
5764 if sched.procresizetime != 0 {
5765 sched.totaltime += int64(old) * (now - sched.procresizetime)
5766 }
5767 sched.procresizetime = now
5768
5769 maskWords := (nprocs + 31) / 32
5770
5771
5772 if nprocs > int32(len(allp)) {
5773
5774
5775 lock(&allpLock)
5776 if nprocs <= int32(cap(allp)) {
5777 allp = allp[:nprocs]
5778 } else {
5779 nallp := make([]*p, nprocs)
5780
5781
5782 copy(nallp, allp[:cap(allp)])
5783 allp = nallp
5784 }
5785
5786 if maskWords <= int32(cap(idlepMask)) {
5787 idlepMask = idlepMask[:maskWords]
5788 timerpMask = timerpMask[:maskWords]
5789 } else {
5790 nidlepMask := make([]uint32, maskWords)
5791
5792 copy(nidlepMask, idlepMask)
5793 idlepMask = nidlepMask
5794
5795 ntimerpMask := make([]uint32, maskWords)
5796 copy(ntimerpMask, timerpMask)
5797 timerpMask = ntimerpMask
5798 }
5799 unlock(&allpLock)
5800 }
5801
5802
5803 for i := old; i < nprocs; i++ {
5804 pp := allp[i]
5805 if pp == nil {
5806 pp = new(p)
5807 }
5808 pp.init(i)
5809 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
5810 }
5811
5812 gp := getg()
5813 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
5814
5815 gp.m.p.ptr().status = _Prunning
5816 gp.m.p.ptr().mcache.prepareForSweep()
5817 } else {
5818
5819
5820
5821
5822
5823 if gp.m.p != 0 {
5824 trace := traceAcquire()
5825 if trace.ok() {
5826
5827
5828
5829 trace.GoSched()
5830 trace.ProcStop(gp.m.p.ptr())
5831 traceRelease(trace)
5832 }
5833 gp.m.p.ptr().m = 0
5834 }
5835 gp.m.p = 0
5836 pp := allp[0]
5837 pp.m = 0
5838 pp.status = _Pidle
5839 acquirep(pp)
5840 trace := traceAcquire()
5841 if trace.ok() {
5842 trace.GoStart()
5843 traceRelease(trace)
5844 }
5845 }
5846
5847
5848 mcache0 = nil
5849
5850
5851 for i := nprocs; i < old; i++ {
5852 pp := allp[i]
5853 pp.destroy()
5854
5855 }
5856
5857
5858 if int32(len(allp)) != nprocs {
5859 lock(&allpLock)
5860 allp = allp[:nprocs]
5861 idlepMask = idlepMask[:maskWords]
5862 timerpMask = timerpMask[:maskWords]
5863 unlock(&allpLock)
5864 }
5865
5866 var runnablePs *p
5867 for i := nprocs - 1; i >= 0; i-- {
5868 pp := allp[i]
5869 if gp.m.p.ptr() == pp {
5870 continue
5871 }
5872 pp.status = _Pidle
5873 if runqempty(pp) {
5874 pidleput(pp, now)
5875 } else {
5876 pp.m.set(mget())
5877 pp.link.set(runnablePs)
5878 runnablePs = pp
5879 }
5880 }
5881 stealOrder.reset(uint32(nprocs))
5882 var int32p *int32 = &gomaxprocs
5883 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
5884 if old != nprocs {
5885
5886 gcCPULimiter.resetCapacity(now, nprocs)
5887 }
5888 return runnablePs
5889 }
5890
5891
5892
5893
5894
5895
5896
5897 func acquirep(pp *p) {
5898
5899 wirep(pp)
5900
5901
5902
5903
5904
5905 pp.mcache.prepareForSweep()
5906
5907 trace := traceAcquire()
5908 if trace.ok() {
5909 trace.ProcStart()
5910 traceRelease(trace)
5911 }
5912 }
5913
5914
5915
5916
5917
5918
5919
5920 func wirep(pp *p) {
5921 gp := getg()
5922
5923 if gp.m.p != 0 {
5924
5925
5926 systemstack(func() {
5927 throw("wirep: already in go")
5928 })
5929 }
5930 if pp.m != 0 || pp.status != _Pidle {
5931
5932
5933 systemstack(func() {
5934 id := int64(0)
5935 if pp.m != 0 {
5936 id = pp.m.ptr().id
5937 }
5938 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
5939 throw("wirep: invalid p state")
5940 })
5941 }
5942 gp.m.p.set(pp)
5943 pp.m.set(gp.m)
5944 pp.status = _Prunning
5945 }
5946
5947
5948 func releasep() *p {
5949 trace := traceAcquire()
5950 if trace.ok() {
5951 trace.ProcStop(getg().m.p.ptr())
5952 traceRelease(trace)
5953 }
5954 return releasepNoTrace()
5955 }
5956
5957
5958 func releasepNoTrace() *p {
5959 gp := getg()
5960
5961 if gp.m.p == 0 {
5962 throw("releasep: invalid arg")
5963 }
5964 pp := gp.m.p.ptr()
5965 if pp.m.ptr() != gp.m || pp.status != _Prunning {
5966 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
5967 throw("releasep: invalid p state")
5968 }
5969 gp.m.p = 0
5970 pp.m = 0
5971 pp.status = _Pidle
5972 return pp
5973 }
5974
5975 func incidlelocked(v int32) {
5976 lock(&sched.lock)
5977 sched.nmidlelocked += v
5978 if v > 0 {
5979 checkdead()
5980 }
5981 unlock(&sched.lock)
5982 }
5983
5984
5985
5986
5987 func checkdead() {
5988 assertLockHeld(&sched.lock)
5989
5990
5991
5992
5993
5994
5995 if (islibrary || isarchive) && GOARCH != "wasm" {
5996 return
5997 }
5998
5999
6000
6001
6002
6003 if panicking.Load() > 0 {
6004 return
6005 }
6006
6007
6008
6009
6010
6011 var run0 int32
6012 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
6013 run0 = 1
6014 }
6015
6016 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
6017 if run > run0 {
6018 return
6019 }
6020 if run < 0 {
6021 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
6022 unlock(&sched.lock)
6023 throw("checkdead: inconsistent counts")
6024 }
6025
6026 grunning := 0
6027 forEachG(func(gp *g) {
6028 if isSystemGoroutine(gp, false) {
6029 return
6030 }
6031 s := readgstatus(gp)
6032 switch s &^ _Gscan {
6033 case _Gwaiting,
6034 _Gpreempted:
6035 grunning++
6036 case _Grunnable,
6037 _Grunning,
6038 _Gsyscall:
6039 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
6040 unlock(&sched.lock)
6041 throw("checkdead: runnable g")
6042 }
6043 })
6044 if grunning == 0 {
6045 unlock(&sched.lock)
6046 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
6047 }
6048
6049
6050 if faketime != 0 {
6051 if when := timeSleepUntil(); when < maxWhen {
6052 faketime = when
6053
6054
6055 pp, _ := pidleget(faketime)
6056 if pp == nil {
6057
6058
6059 unlock(&sched.lock)
6060 throw("checkdead: no p for timer")
6061 }
6062 mp := mget()
6063 if mp == nil {
6064
6065
6066 unlock(&sched.lock)
6067 throw("checkdead: no m for timer")
6068 }
6069
6070
6071
6072 sched.nmspinning.Add(1)
6073 mp.spinning = true
6074 mp.nextp.set(pp)
6075 notewakeup(&mp.park)
6076 return
6077 }
6078 }
6079
6080
6081 for _, pp := range allp {
6082 if len(pp.timers.heap) > 0 {
6083 return
6084 }
6085 }
6086
6087 unlock(&sched.lock)
6088 fatal("all goroutines are asleep - deadlock!")
6089 }
6090
6091
6092
6093
6094
6095
6096 var forcegcperiod int64 = 2 * 60 * 1e9
6097
6098
6099
6100 var needSysmonWorkaround bool = false
6101
6102
6103
6104
6105 const haveSysmon = GOARCH != "wasm"
6106
6107
6108
6109
6110 func sysmon() {
6111 lock(&sched.lock)
6112 sched.nmsys++
6113 checkdead()
6114 unlock(&sched.lock)
6115
6116 lasttrace := int64(0)
6117 idle := 0
6118 delay := uint32(0)
6119
6120 for {
6121 if idle == 0 {
6122 delay = 20
6123 } else if idle > 50 {
6124 delay *= 2
6125 }
6126 if delay > 10*1000 {
6127 delay = 10 * 1000
6128 }
6129 usleep(delay)
6130
6131
6132
6133
6134
6135
6136
6137
6138
6139
6140
6141
6142
6143
6144
6145
6146 now := nanotime()
6147 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
6148 lock(&sched.lock)
6149 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
6150 syscallWake := false
6151 next := timeSleepUntil()
6152 if next > now {
6153 sched.sysmonwait.Store(true)
6154 unlock(&sched.lock)
6155
6156
6157 sleep := forcegcperiod / 2
6158 if next-now < sleep {
6159 sleep = next - now
6160 }
6161 shouldRelax := sleep >= osRelaxMinNS
6162 if shouldRelax {
6163 osRelax(true)
6164 }
6165 syscallWake = notetsleep(&sched.sysmonnote, sleep)
6166 if shouldRelax {
6167 osRelax(false)
6168 }
6169 lock(&sched.lock)
6170 sched.sysmonwait.Store(false)
6171 noteclear(&sched.sysmonnote)
6172 }
6173 if syscallWake {
6174 idle = 0
6175 delay = 20
6176 }
6177 }
6178 unlock(&sched.lock)
6179 }
6180
6181 lock(&sched.sysmonlock)
6182
6183
6184 now = nanotime()
6185
6186
6187 if *cgo_yield != nil {
6188 asmcgocall(*cgo_yield, nil)
6189 }
6190
6191 lastpoll := sched.lastpoll.Load()
6192 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
6193 sched.lastpoll.CompareAndSwap(lastpoll, now)
6194 list, delta := netpoll(0)
6195 if !list.empty() {
6196
6197
6198
6199
6200
6201
6202
6203 incidlelocked(-1)
6204 injectglist(&list)
6205 incidlelocked(1)
6206 netpollAdjustWaiters(delta)
6207 }
6208 }
6209 if GOOS == "netbsd" && needSysmonWorkaround {
6210
6211
6212
6213
6214
6215
6216
6217
6218
6219
6220
6221
6222
6223
6224
6225 if next := timeSleepUntil(); next < now {
6226 startm(nil, false, false)
6227 }
6228 }
6229 if scavenger.sysmonWake.Load() != 0 {
6230
6231 scavenger.wake()
6232 }
6233
6234
6235 if retake(now) != 0 {
6236 idle = 0
6237 } else {
6238 idle++
6239 }
6240
6241 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
6242 lock(&forcegc.lock)
6243 forcegc.idle.Store(false)
6244 var list gList
6245 list.push(forcegc.g)
6246 injectglist(&list)
6247 unlock(&forcegc.lock)
6248 }
6249 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
6250 lasttrace = now
6251 schedtrace(debug.scheddetail > 0)
6252 }
6253 unlock(&sched.sysmonlock)
6254 }
6255 }
6256
6257 type sysmontick struct {
6258 schedtick uint32
6259 syscalltick uint32
6260 schedwhen int64
6261 syscallwhen int64
6262 }
6263
6264
6265
6266 const forcePreemptNS = 10 * 1000 * 1000
6267
6268 func retake(now int64) uint32 {
6269 n := 0
6270
6271
6272 lock(&allpLock)
6273
6274
6275
6276 for i := 0; i < len(allp); i++ {
6277 pp := allp[i]
6278 if pp == nil {
6279
6280
6281 continue
6282 }
6283 pd := &pp.sysmontick
6284 s := pp.status
6285 sysretake := false
6286 if s == _Prunning || s == _Psyscall {
6287
6288
6289
6290
6291 t := int64(pp.schedtick)
6292 if int64(pd.schedtick) != t {
6293 pd.schedtick = uint32(t)
6294 pd.schedwhen = now
6295 } else if pd.schedwhen+forcePreemptNS <= now {
6296 preemptone(pp)
6297
6298
6299 sysretake = true
6300 }
6301 }
6302 if s == _Psyscall {
6303
6304 t := int64(pp.syscalltick)
6305 if !sysretake && int64(pd.syscalltick) != t {
6306 pd.syscalltick = uint32(t)
6307 pd.syscallwhen = now
6308 continue
6309 }
6310
6311
6312
6313 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
6314 continue
6315 }
6316
6317 unlock(&allpLock)
6318
6319
6320
6321
6322 incidlelocked(-1)
6323 trace := traceAcquire()
6324 if atomic.Cas(&pp.status, s, _Pidle) {
6325 if trace.ok() {
6326 trace.ProcSteal(pp, false)
6327 traceRelease(trace)
6328 }
6329 n++
6330 pp.syscalltick++
6331 handoffp(pp)
6332 } else if trace.ok() {
6333 traceRelease(trace)
6334 }
6335 incidlelocked(1)
6336 lock(&allpLock)
6337 }
6338 }
6339 unlock(&allpLock)
6340 return uint32(n)
6341 }
6342
6343
6344
6345
6346
6347
6348 func preemptall() bool {
6349 res := false
6350 for _, pp := range allp {
6351 if pp.status != _Prunning {
6352 continue
6353 }
6354 if preemptone(pp) {
6355 res = true
6356 }
6357 }
6358 return res
6359 }
6360
6361
6362
6363
6364
6365
6366
6367
6368
6369
6370
6371 func preemptone(pp *p) bool {
6372 mp := pp.m.ptr()
6373 if mp == nil || mp == getg().m {
6374 return false
6375 }
6376 gp := mp.curg
6377 if gp == nil || gp == mp.g0 {
6378 return false
6379 }
6380
6381 gp.preempt = true
6382
6383
6384
6385
6386
6387 gp.stackguard0 = stackPreempt
6388
6389
6390 if preemptMSupported && debug.asyncpreemptoff == 0 {
6391 pp.preempt = true
6392 preemptM(mp)
6393 }
6394
6395 return true
6396 }
6397
6398 var starttime int64
6399
6400 func schedtrace(detailed bool) {
6401 now := nanotime()
6402 if starttime == 0 {
6403 starttime = now
6404 }
6405
6406 lock(&sched.lock)
6407 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
6408 if detailed {
6409 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
6410 }
6411
6412
6413
6414 for i, pp := range allp {
6415 mp := pp.m.ptr()
6416 h := atomic.Load(&pp.runqhead)
6417 t := atomic.Load(&pp.runqtail)
6418 if detailed {
6419 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
6420 if mp != nil {
6421 print(mp.id)
6422 } else {
6423 print("nil")
6424 }
6425 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.n, " timerslen=", len(pp.timers.heap), "\n")
6426 } else {
6427
6428
6429 print(" ")
6430 if i == 0 {
6431 print("[")
6432 }
6433 print(t - h)
6434 if i == len(allp)-1 {
6435 print("]\n")
6436 }
6437 }
6438 }
6439
6440 if !detailed {
6441 unlock(&sched.lock)
6442 return
6443 }
6444
6445 for mp := allm; mp != nil; mp = mp.alllink {
6446 pp := mp.p.ptr()
6447 print(" M", mp.id, ": p=")
6448 if pp != nil {
6449 print(pp.id)
6450 } else {
6451 print("nil")
6452 }
6453 print(" curg=")
6454 if mp.curg != nil {
6455 print(mp.curg.goid)
6456 } else {
6457 print("nil")
6458 }
6459 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
6460 if lockedg := mp.lockedg.ptr(); lockedg != nil {
6461 print(lockedg.goid)
6462 } else {
6463 print("nil")
6464 }
6465 print("\n")
6466 }
6467
6468 forEachG(func(gp *g) {
6469 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
6470 if gp.m != nil {
6471 print(gp.m.id)
6472 } else {
6473 print("nil")
6474 }
6475 print(" lockedm=")
6476 if lockedm := gp.lockedm.ptr(); lockedm != nil {
6477 print(lockedm.id)
6478 } else {
6479 print("nil")
6480 }
6481 print("\n")
6482 })
6483 unlock(&sched.lock)
6484 }
6485
6486
6487
6488
6489
6490
6491 func schedEnableUser(enable bool) {
6492 lock(&sched.lock)
6493 if sched.disable.user == !enable {
6494 unlock(&sched.lock)
6495 return
6496 }
6497 sched.disable.user = !enable
6498 if enable {
6499 n := sched.disable.n
6500 sched.disable.n = 0
6501 globrunqputbatch(&sched.disable.runnable, n)
6502 unlock(&sched.lock)
6503 for ; n != 0 && sched.npidle.Load() != 0; n-- {
6504 startm(nil, false, false)
6505 }
6506 } else {
6507 unlock(&sched.lock)
6508 }
6509 }
6510
6511
6512
6513
6514
6515 func schedEnabled(gp *g) bool {
6516 assertLockHeld(&sched.lock)
6517
6518 if sched.disable.user {
6519 return isSystemGoroutine(gp, true)
6520 }
6521 return true
6522 }
6523
6524
6525
6526
6527
6528
6529 func mput(mp *m) {
6530 assertLockHeld(&sched.lock)
6531
6532 mp.schedlink = sched.midle
6533 sched.midle.set(mp)
6534 sched.nmidle++
6535 checkdead()
6536 }
6537
6538
6539
6540
6541
6542
6543 func mget() *m {
6544 assertLockHeld(&sched.lock)
6545
6546 mp := sched.midle.ptr()
6547 if mp != nil {
6548 sched.midle = mp.schedlink
6549 sched.nmidle--
6550 }
6551 return mp
6552 }
6553
6554
6555
6556
6557
6558
6559 func globrunqput(gp *g) {
6560 assertLockHeld(&sched.lock)
6561
6562 sched.runq.pushBack(gp)
6563 sched.runqsize++
6564 }
6565
6566
6567
6568
6569
6570
6571 func globrunqputhead(gp *g) {
6572 assertLockHeld(&sched.lock)
6573
6574 sched.runq.push(gp)
6575 sched.runqsize++
6576 }
6577
6578
6579
6580
6581
6582
6583
6584 func globrunqputbatch(batch *gQueue, n int32) {
6585 assertLockHeld(&sched.lock)
6586
6587 sched.runq.pushBackAll(*batch)
6588 sched.runqsize += n
6589 *batch = gQueue{}
6590 }
6591
6592
6593
6594 func globrunqget(pp *p, max int32) *g {
6595 assertLockHeld(&sched.lock)
6596
6597 if sched.runqsize == 0 {
6598 return nil
6599 }
6600
6601 n := sched.runqsize/gomaxprocs + 1
6602 if n > sched.runqsize {
6603 n = sched.runqsize
6604 }
6605 if max > 0 && n > max {
6606 n = max
6607 }
6608 if n > int32(len(pp.runq))/2 {
6609 n = int32(len(pp.runq)) / 2
6610 }
6611
6612 sched.runqsize -= n
6613
6614 gp := sched.runq.pop()
6615 n--
6616 for ; n > 0; n-- {
6617 gp1 := sched.runq.pop()
6618 runqput(pp, gp1, false)
6619 }
6620 return gp
6621 }
6622
6623
6624 type pMask []uint32
6625
6626
6627 func (p pMask) read(id uint32) bool {
6628 word := id / 32
6629 mask := uint32(1) << (id % 32)
6630 return (atomic.Load(&p[word]) & mask) != 0
6631 }
6632
6633
6634 func (p pMask) set(id int32) {
6635 word := id / 32
6636 mask := uint32(1) << (id % 32)
6637 atomic.Or(&p[word], mask)
6638 }
6639
6640
6641 func (p pMask) clear(id int32) {
6642 word := id / 32
6643 mask := uint32(1) << (id % 32)
6644 atomic.And(&p[word], ^mask)
6645 }
6646
6647
6648
6649
6650
6651
6652
6653
6654
6655
6656
6657
6658 func pidleput(pp *p, now int64) int64 {
6659 assertLockHeld(&sched.lock)
6660
6661 if !runqempty(pp) {
6662 throw("pidleput: P has non-empty run queue")
6663 }
6664 if now == 0 {
6665 now = nanotime()
6666 }
6667 if pp.timers.len.Load() == 0 {
6668 timerpMask.clear(pp.id)
6669 }
6670 idlepMask.set(pp.id)
6671 pp.link = sched.pidle
6672 sched.pidle.set(pp)
6673 sched.npidle.Add(1)
6674 if !pp.limiterEvent.start(limiterEventIdle, now) {
6675 throw("must be able to track idle limiter event")
6676 }
6677 return now
6678 }
6679
6680
6681
6682
6683
6684
6685
6686
6687 func pidleget(now int64) (*p, int64) {
6688 assertLockHeld(&sched.lock)
6689
6690 pp := sched.pidle.ptr()
6691 if pp != nil {
6692
6693 if now == 0 {
6694 now = nanotime()
6695 }
6696 timerpMask.set(pp.id)
6697 idlepMask.clear(pp.id)
6698 sched.pidle = pp.link
6699 sched.npidle.Add(-1)
6700 pp.limiterEvent.stop(limiterEventIdle, now)
6701 }
6702 return pp, now
6703 }
6704
6705
6706
6707
6708
6709
6710
6711
6712
6713
6714
6715 func pidlegetSpinning(now int64) (*p, int64) {
6716 assertLockHeld(&sched.lock)
6717
6718 pp, now := pidleget(now)
6719 if pp == nil {
6720
6721
6722
6723 sched.needspinning.Store(1)
6724 return nil, now
6725 }
6726
6727 return pp, now
6728 }
6729
6730
6731
6732 func runqempty(pp *p) bool {
6733
6734
6735
6736
6737 for {
6738 head := atomic.Load(&pp.runqhead)
6739 tail := atomic.Load(&pp.runqtail)
6740 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
6741 if tail == atomic.Load(&pp.runqtail) {
6742 return head == tail && runnext == 0
6743 }
6744 }
6745 }
6746
6747
6748
6749
6750
6751
6752
6753
6754
6755
6756 const randomizeScheduler = raceenabled
6757
6758
6759
6760
6761
6762
6763 func runqput(pp *p, gp *g, next bool) {
6764 if !haveSysmon && next {
6765
6766
6767
6768
6769
6770
6771
6772
6773 next = false
6774 }
6775 if randomizeScheduler && next && randn(2) == 0 {
6776 next = false
6777 }
6778
6779 if next {
6780 retryNext:
6781 oldnext := pp.runnext
6782 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
6783 goto retryNext
6784 }
6785 if oldnext == 0 {
6786 return
6787 }
6788
6789 gp = oldnext.ptr()
6790 }
6791
6792 retry:
6793 h := atomic.LoadAcq(&pp.runqhead)
6794 t := pp.runqtail
6795 if t-h < uint32(len(pp.runq)) {
6796 pp.runq[t%uint32(len(pp.runq))].set(gp)
6797 atomic.StoreRel(&pp.runqtail, t+1)
6798 return
6799 }
6800 if runqputslow(pp, gp, h, t) {
6801 return
6802 }
6803
6804 goto retry
6805 }
6806
6807
6808
6809 func runqputslow(pp *p, gp *g, h, t uint32) bool {
6810 var batch [len(pp.runq)/2 + 1]*g
6811
6812
6813 n := t - h
6814 n = n / 2
6815 if n != uint32(len(pp.runq)/2) {
6816 throw("runqputslow: queue is not full")
6817 }
6818 for i := uint32(0); i < n; i++ {
6819 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
6820 }
6821 if !atomic.CasRel(&pp.runqhead, h, h+n) {
6822 return false
6823 }
6824 batch[n] = gp
6825
6826 if randomizeScheduler {
6827 for i := uint32(1); i <= n; i++ {
6828 j := cheaprandn(i + 1)
6829 batch[i], batch[j] = batch[j], batch[i]
6830 }
6831 }
6832
6833
6834 for i := uint32(0); i < n; i++ {
6835 batch[i].schedlink.set(batch[i+1])
6836 }
6837 var q gQueue
6838 q.head.set(batch[0])
6839 q.tail.set(batch[n])
6840
6841
6842 lock(&sched.lock)
6843 globrunqputbatch(&q, int32(n+1))
6844 unlock(&sched.lock)
6845 return true
6846 }
6847
6848
6849
6850
6851
6852 func runqputbatch(pp *p, q *gQueue, qsize int) {
6853 h := atomic.LoadAcq(&pp.runqhead)
6854 t := pp.runqtail
6855 n := uint32(0)
6856 for !q.empty() && t-h < uint32(len(pp.runq)) {
6857 gp := q.pop()
6858 pp.runq[t%uint32(len(pp.runq))].set(gp)
6859 t++
6860 n++
6861 }
6862 qsize -= int(n)
6863
6864 if randomizeScheduler {
6865 off := func(o uint32) uint32 {
6866 return (pp.runqtail + o) % uint32(len(pp.runq))
6867 }
6868 for i := uint32(1); i < n; i++ {
6869 j := cheaprandn(i + 1)
6870 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
6871 }
6872 }
6873
6874 atomic.StoreRel(&pp.runqtail, t)
6875 if !q.empty() {
6876 lock(&sched.lock)
6877 globrunqputbatch(q, int32(qsize))
6878 unlock(&sched.lock)
6879 }
6880 }
6881
6882
6883
6884
6885
6886 func runqget(pp *p) (gp *g, inheritTime bool) {
6887
6888 next := pp.runnext
6889
6890
6891
6892 if next != 0 && pp.runnext.cas(next, 0) {
6893 return next.ptr(), true
6894 }
6895
6896 for {
6897 h := atomic.LoadAcq(&pp.runqhead)
6898 t := pp.runqtail
6899 if t == h {
6900 return nil, false
6901 }
6902 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
6903 if atomic.CasRel(&pp.runqhead, h, h+1) {
6904 return gp, false
6905 }
6906 }
6907 }
6908
6909
6910
6911 func runqdrain(pp *p) (drainQ gQueue, n uint32) {
6912 oldNext := pp.runnext
6913 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
6914 drainQ.pushBack(oldNext.ptr())
6915 n++
6916 }
6917
6918 retry:
6919 h := atomic.LoadAcq(&pp.runqhead)
6920 t := pp.runqtail
6921 qn := t - h
6922 if qn == 0 {
6923 return
6924 }
6925 if qn > uint32(len(pp.runq)) {
6926 goto retry
6927 }
6928
6929 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
6930 goto retry
6931 }
6932
6933
6934
6935
6936
6937
6938
6939
6940 for i := uint32(0); i < qn; i++ {
6941 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
6942 drainQ.pushBack(gp)
6943 n++
6944 }
6945 return
6946 }
6947
6948
6949
6950
6951
6952 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
6953 for {
6954 h := atomic.LoadAcq(&pp.runqhead)
6955 t := atomic.LoadAcq(&pp.runqtail)
6956 n := t - h
6957 n = n - n/2
6958 if n == 0 {
6959 if stealRunNextG {
6960
6961 if next := pp.runnext; next != 0 {
6962 if pp.status == _Prunning {
6963
6964
6965
6966
6967
6968
6969
6970
6971
6972
6973 if !osHasLowResTimer {
6974 usleep(3)
6975 } else {
6976
6977
6978
6979 osyield()
6980 }
6981 }
6982 if !pp.runnext.cas(next, 0) {
6983 continue
6984 }
6985 batch[batchHead%uint32(len(batch))] = next
6986 return 1
6987 }
6988 }
6989 return 0
6990 }
6991 if n > uint32(len(pp.runq)/2) {
6992 continue
6993 }
6994 for i := uint32(0); i < n; i++ {
6995 g := pp.runq[(h+i)%uint32(len(pp.runq))]
6996 batch[(batchHead+i)%uint32(len(batch))] = g
6997 }
6998 if atomic.CasRel(&pp.runqhead, h, h+n) {
6999 return n
7000 }
7001 }
7002 }
7003
7004
7005
7006
7007 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
7008 t := pp.runqtail
7009 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
7010 if n == 0 {
7011 return nil
7012 }
7013 n--
7014 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
7015 if n == 0 {
7016 return gp
7017 }
7018 h := atomic.LoadAcq(&pp.runqhead)
7019 if t-h+n >= uint32(len(pp.runq)) {
7020 throw("runqsteal: runq overflow")
7021 }
7022 atomic.StoreRel(&pp.runqtail, t+n)
7023 return gp
7024 }
7025
7026
7027
7028 type gQueue struct {
7029 head guintptr
7030 tail guintptr
7031 }
7032
7033
7034 func (q *gQueue) empty() bool {
7035 return q.head == 0
7036 }
7037
7038
7039 func (q *gQueue) push(gp *g) {
7040 gp.schedlink = q.head
7041 q.head.set(gp)
7042 if q.tail == 0 {
7043 q.tail.set(gp)
7044 }
7045 }
7046
7047
7048 func (q *gQueue) pushBack(gp *g) {
7049 gp.schedlink = 0
7050 if q.tail != 0 {
7051 q.tail.ptr().schedlink.set(gp)
7052 } else {
7053 q.head.set(gp)
7054 }
7055 q.tail.set(gp)
7056 }
7057
7058
7059
7060 func (q *gQueue) pushBackAll(q2 gQueue) {
7061 if q2.tail == 0 {
7062 return
7063 }
7064 q2.tail.ptr().schedlink = 0
7065 if q.tail != 0 {
7066 q.tail.ptr().schedlink = q2.head
7067 } else {
7068 q.head = q2.head
7069 }
7070 q.tail = q2.tail
7071 }
7072
7073
7074
7075 func (q *gQueue) pop() *g {
7076 gp := q.head.ptr()
7077 if gp != nil {
7078 q.head = gp.schedlink
7079 if q.head == 0 {
7080 q.tail = 0
7081 }
7082 }
7083 return gp
7084 }
7085
7086
7087 func (q *gQueue) popList() gList {
7088 stack := gList{q.head}
7089 *q = gQueue{}
7090 return stack
7091 }
7092
7093
7094
7095 type gList struct {
7096 head guintptr
7097 }
7098
7099
7100 func (l *gList) empty() bool {
7101 return l.head == 0
7102 }
7103
7104
7105 func (l *gList) push(gp *g) {
7106 gp.schedlink = l.head
7107 l.head.set(gp)
7108 }
7109
7110
7111 func (l *gList) pushAll(q gQueue) {
7112 if !q.empty() {
7113 q.tail.ptr().schedlink = l.head
7114 l.head = q.head
7115 }
7116 }
7117
7118
7119 func (l *gList) pop() *g {
7120 gp := l.head.ptr()
7121 if gp != nil {
7122 l.head = gp.schedlink
7123 }
7124 return gp
7125 }
7126
7127
7128 func setMaxThreads(in int) (out int) {
7129 lock(&sched.lock)
7130 out = int(sched.maxmcount)
7131 if in > 0x7fffffff {
7132 sched.maxmcount = 0x7fffffff
7133 } else {
7134 sched.maxmcount = int32(in)
7135 }
7136 checkmcount()
7137 unlock(&sched.lock)
7138 return
7139 }
7140
7141
7142
7143
7144
7145
7146
7147
7148
7149
7150
7151
7152
7153 func procPin() int {
7154 gp := getg()
7155 mp := gp.m
7156
7157 mp.locks++
7158 return int(mp.p.ptr().id)
7159 }
7160
7161
7162
7163
7164
7165
7166
7167
7168
7169
7170
7171
7172
7173 func procUnpin() {
7174 gp := getg()
7175 gp.m.locks--
7176 }
7177
7178
7179
7180 func sync_runtime_procPin() int {
7181 return procPin()
7182 }
7183
7184
7185
7186 func sync_runtime_procUnpin() {
7187 procUnpin()
7188 }
7189
7190
7191
7192 func sync_atomic_runtime_procPin() int {
7193 return procPin()
7194 }
7195
7196
7197
7198 func sync_atomic_runtime_procUnpin() {
7199 procUnpin()
7200 }
7201
7202
7203
7204
7205
7206 func internal_sync_runtime_canSpin(i int) bool {
7207
7208
7209
7210
7211
7212 if i >= active_spin || ncpu <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
7213 return false
7214 }
7215 if p := getg().m.p.ptr(); !runqempty(p) {
7216 return false
7217 }
7218 return true
7219 }
7220
7221
7222
7223 func internal_sync_runtime_doSpin() {
7224 procyield(active_spin_cnt)
7225 }
7226
7227
7228
7229
7230
7231
7232
7233
7234
7235
7236
7237
7238
7239
7240
7241 func sync_runtime_canSpin(i int) bool {
7242 return internal_sync_runtime_canSpin(i)
7243 }
7244
7245
7246
7247
7248
7249
7250
7251
7252
7253
7254
7255
7256
7257 func sync_runtime_doSpin() {
7258 internal_sync_runtime_doSpin()
7259 }
7260
7261 var stealOrder randomOrder
7262
7263
7264
7265
7266
7267 type randomOrder struct {
7268 count uint32
7269 coprimes []uint32
7270 }
7271
7272 type randomEnum struct {
7273 i uint32
7274 count uint32
7275 pos uint32
7276 inc uint32
7277 }
7278
7279 func (ord *randomOrder) reset(count uint32) {
7280 ord.count = count
7281 ord.coprimes = ord.coprimes[:0]
7282 for i := uint32(1); i <= count; i++ {
7283 if gcd(i, count) == 1 {
7284 ord.coprimes = append(ord.coprimes, i)
7285 }
7286 }
7287 }
7288
7289 func (ord *randomOrder) start(i uint32) randomEnum {
7290 return randomEnum{
7291 count: ord.count,
7292 pos: i % ord.count,
7293 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
7294 }
7295 }
7296
7297 func (enum *randomEnum) done() bool {
7298 return enum.i == enum.count
7299 }
7300
7301 func (enum *randomEnum) next() {
7302 enum.i++
7303 enum.pos = (enum.pos + enum.inc) % enum.count
7304 }
7305
7306 func (enum *randomEnum) position() uint32 {
7307 return enum.pos
7308 }
7309
7310 func gcd(a, b uint32) uint32 {
7311 for b != 0 {
7312 a, b = b, a%b
7313 }
7314 return a
7315 }
7316
7317
7318
7319 type initTask struct {
7320 state uint32
7321 nfns uint32
7322
7323 }
7324
7325
7326
7327 var inittrace tracestat
7328
7329 type tracestat struct {
7330 active bool
7331 id uint64
7332 allocs uint64
7333 bytes uint64
7334 }
7335
7336 func doInit(ts []*initTask) {
7337 for _, t := range ts {
7338 doInit1(t)
7339 }
7340 }
7341
7342 func doInit1(t *initTask) {
7343 switch t.state {
7344 case 2:
7345 return
7346 case 1:
7347 throw("recursive call during initialization - linker skew")
7348 default:
7349 t.state = 1
7350
7351 var (
7352 start int64
7353 before tracestat
7354 )
7355
7356 if inittrace.active {
7357 start = nanotime()
7358
7359 before = inittrace
7360 }
7361
7362 if t.nfns == 0 {
7363
7364 throw("inittask with no functions")
7365 }
7366
7367 firstFunc := add(unsafe.Pointer(t), 8)
7368 for i := uint32(0); i < t.nfns; i++ {
7369 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
7370 f := *(*func())(unsafe.Pointer(&p))
7371 f()
7372 }
7373
7374 if inittrace.active {
7375 end := nanotime()
7376
7377 after := inittrace
7378
7379 f := *(*func())(unsafe.Pointer(&firstFunc))
7380 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
7381
7382 var sbuf [24]byte
7383 print("init ", pkg, " @")
7384 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
7385 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
7386 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
7387 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
7388 print("\n")
7389 }
7390
7391 t.state = 2
7392 }
7393 }
7394
View as plain text