Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goos"
12 "internal/runtime/atomic"
13 "internal/runtime/exithook"
14 "internal/runtime/sys"
15 "internal/stringslite"
16 "unsafe"
17 )
18
19
20 var modinfo string
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116 var (
117 m0 m
118 g0 g
119 mcache0 *mcache
120 raceprocctx0 uintptr
121 raceFiniLock mutex
122 )
123
124
125
126 var runtime_inittasks []*initTask
127
128
129
130
131
132 var main_init_done chan bool
133
134
135 func main_main()
136
137
138 var mainStarted bool
139
140
141 var runtimeInitTime int64
142
143
144 var initSigmask sigset
145
146
147 func main() {
148 mp := getg().m
149
150
151
152 mp.g0.racectx = 0
153
154
155
156
157 if goarch.PtrSize == 8 {
158 maxstacksize = 1000000000
159 } else {
160 maxstacksize = 250000000
161 }
162
163
164
165
166 maxstackceiling = 2 * maxstacksize
167
168
169 mainStarted = true
170
171 if haveSysmon {
172 systemstack(func() {
173 newm(sysmon, nil, -1)
174 })
175 }
176
177
178
179
180
181
182
183 lockOSThread()
184
185 if mp != &m0 {
186 throw("runtime.main not on m0")
187 }
188
189
190
191 runtimeInitTime = nanotime()
192 if runtimeInitTime == 0 {
193 throw("nanotime returning zero")
194 }
195
196 if debug.inittrace != 0 {
197 inittrace.id = getg().goid
198 inittrace.active = true
199 }
200
201 doInit(runtime_inittasks)
202
203
204 needUnlock := true
205 defer func() {
206 if needUnlock {
207 unlockOSThread()
208 }
209 }()
210
211 gcenable()
212
213 main_init_done = make(chan bool)
214 if iscgo {
215 if _cgo_pthread_key_created == nil {
216 throw("_cgo_pthread_key_created missing")
217 }
218
219 if _cgo_thread_start == nil {
220 throw("_cgo_thread_start missing")
221 }
222 if GOOS != "windows" {
223 if _cgo_setenv == nil {
224 throw("_cgo_setenv missing")
225 }
226 if _cgo_unsetenv == nil {
227 throw("_cgo_unsetenv missing")
228 }
229 }
230 if _cgo_notify_runtime_init_done == nil {
231 throw("_cgo_notify_runtime_init_done missing")
232 }
233
234
235 if set_crosscall2 == nil {
236 throw("set_crosscall2 missing")
237 }
238 set_crosscall2()
239
240
241
242 startTemplateThread()
243 cgocall(_cgo_notify_runtime_init_done, nil)
244 }
245
246
247
248
249
250
251
252
253 for m := &firstmoduledata; m != nil; m = m.next {
254 doInit(m.inittasks)
255 }
256
257
258
259 inittrace.active = false
260
261 close(main_init_done)
262
263 needUnlock = false
264 unlockOSThread()
265
266 if isarchive || islibrary {
267
268
269 if GOARCH == "wasm" {
270
271
272
273
274
275
276
277 pause(sys.GetCallerSP() - 16)
278 panic("unreachable")
279 }
280 return
281 }
282 fn := main_main
283 fn()
284 if raceenabled {
285 runExitHooks(0)
286 racefini()
287 }
288
289
290
291
292
293 if runningPanicDefers.Load() != 0 {
294
295 for c := 0; c < 1000; c++ {
296 if runningPanicDefers.Load() == 0 {
297 break
298 }
299 Gosched()
300 }
301 }
302 if panicking.Load() != 0 {
303 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
304 }
305 runExitHooks(0)
306
307 exit(0)
308 for {
309 var x *int32
310 *x = 0
311 }
312 }
313
314
315
316
317 func os_beforeExit(exitCode int) {
318 runExitHooks(exitCode)
319 if exitCode == 0 && raceenabled {
320 racefini()
321 }
322 }
323
324 func init() {
325 exithook.Gosched = Gosched
326 exithook.Goid = func() uint64 { return getg().goid }
327 exithook.Throw = throw
328 }
329
330 func runExitHooks(code int) {
331 exithook.Run(code)
332 }
333
334
335 func init() {
336 go forcegchelper()
337 }
338
339 func forcegchelper() {
340 forcegc.g = getg()
341 lockInit(&forcegc.lock, lockRankForcegc)
342 for {
343 lock(&forcegc.lock)
344 if forcegc.idle.Load() {
345 throw("forcegc: phase error")
346 }
347 forcegc.idle.Store(true)
348 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
349
350 if debug.gctrace > 0 {
351 println("GC forced")
352 }
353
354 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
355 }
356 }
357
358
359
360
361
362 func Gosched() {
363 checkTimeouts()
364 mcall(gosched_m)
365 }
366
367
368
369
370
371 func goschedguarded() {
372 mcall(goschedguarded_m)
373 }
374
375
376
377
378
379
380 func goschedIfBusy() {
381 gp := getg()
382
383
384 if !gp.preempt && sched.npidle.Load() > 0 {
385 return
386 }
387 mcall(gosched_m)
388 }
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
419 if reason != waitReasonSleep {
420 checkTimeouts()
421 }
422 mp := acquirem()
423 gp := mp.curg
424 status := readgstatus(gp)
425 if status != _Grunning && status != _Gscanrunning {
426 throw("gopark: bad g status")
427 }
428 mp.waitlock = lock
429 mp.waitunlockf = unlockf
430 gp.waitreason = reason
431 mp.waitTraceBlockReason = traceReason
432 mp.waitTraceSkip = traceskip
433 releasem(mp)
434
435 mcall(park_m)
436 }
437
438
439
440 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
441 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
442 }
443
444
445
446
447
448
449
450
451
452
453
454 func goready(gp *g, traceskip int) {
455 systemstack(func() {
456 ready(gp, traceskip, true)
457 })
458 }
459
460
461 func acquireSudog() *sudog {
462
463
464
465
466
467
468
469
470 mp := acquirem()
471 pp := mp.p.ptr()
472 if len(pp.sudogcache) == 0 {
473 lock(&sched.sudoglock)
474
475 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
476 s := sched.sudogcache
477 sched.sudogcache = s.next
478 s.next = nil
479 pp.sudogcache = append(pp.sudogcache, s)
480 }
481 unlock(&sched.sudoglock)
482
483 if len(pp.sudogcache) == 0 {
484 pp.sudogcache = append(pp.sudogcache, new(sudog))
485 }
486 }
487 n := len(pp.sudogcache)
488 s := pp.sudogcache[n-1]
489 pp.sudogcache[n-1] = nil
490 pp.sudogcache = pp.sudogcache[:n-1]
491 if s.elem != nil {
492 throw("acquireSudog: found s.elem != nil in cache")
493 }
494 releasem(mp)
495 return s
496 }
497
498
499 func releaseSudog(s *sudog) {
500 if s.elem != nil {
501 throw("runtime: sudog with non-nil elem")
502 }
503 if s.isSelect {
504 throw("runtime: sudog with non-false isSelect")
505 }
506 if s.next != nil {
507 throw("runtime: sudog with non-nil next")
508 }
509 if s.prev != nil {
510 throw("runtime: sudog with non-nil prev")
511 }
512 if s.waitlink != nil {
513 throw("runtime: sudog with non-nil waitlink")
514 }
515 if s.c != nil {
516 throw("runtime: sudog with non-nil c")
517 }
518 gp := getg()
519 if gp.param != nil {
520 throw("runtime: releaseSudog with non-nil gp.param")
521 }
522 mp := acquirem()
523 pp := mp.p.ptr()
524 if len(pp.sudogcache) == cap(pp.sudogcache) {
525
526 var first, last *sudog
527 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
528 n := len(pp.sudogcache)
529 p := pp.sudogcache[n-1]
530 pp.sudogcache[n-1] = nil
531 pp.sudogcache = pp.sudogcache[:n-1]
532 if first == nil {
533 first = p
534 } else {
535 last.next = p
536 }
537 last = p
538 }
539 lock(&sched.sudoglock)
540 last.next = sched.sudogcache
541 sched.sudogcache = first
542 unlock(&sched.sudoglock)
543 }
544 pp.sudogcache = append(pp.sudogcache, s)
545 releasem(mp)
546 }
547
548
549 func badmcall(fn func(*g)) {
550 throw("runtime: mcall called on m->g0 stack")
551 }
552
553 func badmcall2(fn func(*g)) {
554 throw("runtime: mcall function returned")
555 }
556
557 func badreflectcall() {
558 panic(plainError("arg size to reflect.call more than 1GB"))
559 }
560
561
562
563 func badmorestackg0() {
564 if !crashStackImplemented {
565 writeErrStr("fatal: morestack on g0\n")
566 return
567 }
568
569 g := getg()
570 switchToCrashStack(func() {
571 print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
572 g.m.traceback = 2
573 traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
574 print("\n")
575
576 throw("morestack on g0")
577 })
578 }
579
580
581
582 func badmorestackgsignal() {
583 writeErrStr("fatal: morestack on gsignal\n")
584 }
585
586
587 func badctxt() {
588 throw("ctxt != 0")
589 }
590
591
592
593 var gcrash g
594
595 var crashingG atomic.Pointer[g]
596
597
598
599
600
601
602
603
604
605 func switchToCrashStack(fn func()) {
606 me := getg()
607 if crashingG.CompareAndSwapNoWB(nil, me) {
608 switchToCrashStack0(fn)
609 abort()
610 }
611 if crashingG.Load() == me {
612
613 writeErrStr("fatal: recursive switchToCrashStack\n")
614 abort()
615 }
616
617 usleep_no_g(100)
618 writeErrStr("fatal: concurrent switchToCrashStack\n")
619 abort()
620 }
621
622
623
624
625 const crashStackImplemented = GOOS != "windows"
626
627
628 func switchToCrashStack0(fn func())
629
630 func lockedOSThread() bool {
631 gp := getg()
632 return gp.lockedm != 0 && gp.m.lockedg != 0
633 }
634
635 var (
636
637
638
639
640
641
642 allglock mutex
643 allgs []*g
644
645
646
647
648
649
650
651
652
653
654
655
656
657 allglen uintptr
658 allgptr **g
659 )
660
661 func allgadd(gp *g) {
662 if readgstatus(gp) == _Gidle {
663 throw("allgadd: bad status Gidle")
664 }
665
666 lock(&allglock)
667 allgs = append(allgs, gp)
668 if &allgs[0] != allgptr {
669 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
670 }
671 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
672 unlock(&allglock)
673 }
674
675
676
677
678 func allGsSnapshot() []*g {
679 assertWorldStoppedOrLockHeld(&allglock)
680
681
682
683
684
685
686 return allgs[:len(allgs):len(allgs)]
687 }
688
689
690 func atomicAllG() (**g, uintptr) {
691 length := atomic.Loaduintptr(&allglen)
692 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
693 return ptr, length
694 }
695
696
697 func atomicAllGIndex(ptr **g, i uintptr) *g {
698 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
699 }
700
701
702
703
704 func forEachG(fn func(gp *g)) {
705 lock(&allglock)
706 for _, gp := range allgs {
707 fn(gp)
708 }
709 unlock(&allglock)
710 }
711
712
713
714
715
716 func forEachGRace(fn func(gp *g)) {
717 ptr, length := atomicAllG()
718 for i := uintptr(0); i < length; i++ {
719 gp := atomicAllGIndex(ptr, i)
720 fn(gp)
721 }
722 return
723 }
724
725 const (
726
727
728 _GoidCacheBatch = 16
729 )
730
731
732
733 func cpuinit(env string) {
734 switch GOOS {
735 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
736 cpu.DebugOptions = true
737 }
738 cpu.Initialize(env)
739
740
741
742 switch GOARCH {
743 case "386", "amd64":
744 x86HasPOPCNT = cpu.X86.HasPOPCNT
745 x86HasSSE41 = cpu.X86.HasSSE41
746 x86HasFMA = cpu.X86.HasFMA
747
748 case "arm":
749 armHasVFPv4 = cpu.ARM.HasVFPv4
750
751 case "arm64":
752 arm64HasATOMICS = cpu.ARM64.HasATOMICS
753
754 case "loong64":
755 loong64HasLAMCAS = cpu.Loong64.HasLAMCAS
756 loong64HasLAM_BH = cpu.Loong64.HasLAM_BH
757 loong64HasLSX = cpu.Loong64.HasLSX
758 }
759 }
760
761
762
763
764 func getGodebugEarly() string {
765 const prefix = "GODEBUG="
766 var env string
767 switch GOOS {
768 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
769
770
771
772 n := int32(0)
773 for argv_index(argv, argc+1+n) != nil {
774 n++
775 }
776
777 for i := int32(0); i < n; i++ {
778 p := argv_index(argv, argc+1+i)
779 s := unsafe.String(p, findnull(p))
780
781 if stringslite.HasPrefix(s, prefix) {
782 env = gostring(p)[len(prefix):]
783 break
784 }
785 }
786 }
787 return env
788 }
789
790
791
792
793
794
795
796
797
798 func schedinit() {
799 lockInit(&sched.lock, lockRankSched)
800 lockInit(&sched.sysmonlock, lockRankSysmon)
801 lockInit(&sched.deferlock, lockRankDefer)
802 lockInit(&sched.sudoglock, lockRankSudog)
803 lockInit(&deadlock, lockRankDeadlock)
804 lockInit(&paniclk, lockRankPanic)
805 lockInit(&allglock, lockRankAllg)
806 lockInit(&allpLock, lockRankAllp)
807 lockInit(&reflectOffs.lock, lockRankReflectOffs)
808 lockInit(&finlock, lockRankFin)
809 lockInit(&cpuprof.lock, lockRankCpuprof)
810 allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
811 execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
812 traceLockInit()
813
814
815
816 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
817
818 lockVerifyMSize()
819
820
821
822 gp := getg()
823 if raceenabled {
824 gp.racectx, raceprocctx0 = raceinit()
825 }
826
827 sched.maxmcount = 10000
828 crashFD.Store(^uintptr(0))
829
830
831 worldStopped()
832
833 ticks.init()
834 moduledataverify()
835 stackinit()
836 mallocinit()
837 godebug := getGodebugEarly()
838 cpuinit(godebug)
839 randinit()
840 alginit()
841 mcommoninit(gp.m, -1)
842 modulesinit()
843 typelinksinit()
844 itabsinit()
845 stkobjinit()
846
847 sigsave(&gp.m.sigmask)
848 initSigmask = gp.m.sigmask
849
850 goargs()
851 goenvs()
852 secure()
853 checkfds()
854 parsedebugvars()
855 gcinit()
856
857
858
859 gcrash.stack = stackalloc(16384)
860 gcrash.stackguard0 = gcrash.stack.lo + 1000
861 gcrash.stackguard1 = gcrash.stack.lo + 1000
862
863
864
865
866
867 if disableMemoryProfiling {
868 MemProfileRate = 0
869 }
870
871
872 mProfStackInit(gp.m)
873
874 lock(&sched.lock)
875 sched.lastpoll.Store(nanotime())
876 procs := ncpu
877 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
878 procs = n
879 }
880 if procresize(procs) != nil {
881 throw("unknown runnable goroutine during bootstrap")
882 }
883 unlock(&sched.lock)
884
885
886 worldStarted()
887
888 if buildVersion == "" {
889
890
891 buildVersion = "unknown"
892 }
893 if len(modinfo) == 1 {
894
895
896 modinfo = ""
897 }
898 }
899
900 func dumpgstatus(gp *g) {
901 thisg := getg()
902 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
903 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
904 }
905
906
907 func checkmcount() {
908 assertLockHeld(&sched.lock)
909
910
911
912
913
914
915
916
917
918 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
919 if count > sched.maxmcount {
920 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
921 throw("thread exhaustion")
922 }
923 }
924
925
926
927
928
929 func mReserveID() int64 {
930 assertLockHeld(&sched.lock)
931
932 if sched.mnext+1 < sched.mnext {
933 throw("runtime: thread ID overflow")
934 }
935 id := sched.mnext
936 sched.mnext++
937 checkmcount()
938 return id
939 }
940
941
942 func mcommoninit(mp *m, id int64) {
943 gp := getg()
944
945
946 if gp != gp.m.g0 {
947 callers(1, mp.createstack[:])
948 }
949
950 lock(&sched.lock)
951
952 if id >= 0 {
953 mp.id = id
954 } else {
955 mp.id = mReserveID()
956 }
957
958 mrandinit(mp)
959
960 mpreinit(mp)
961 if mp.gsignal != nil {
962 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
963 }
964
965
966
967 mp.alllink = allm
968
969
970
971 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
972 unlock(&sched.lock)
973
974
975 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
976 mp.cgoCallers = new(cgoCallers)
977 }
978 mProfStackInit(mp)
979 }
980
981
982
983
984
985 func mProfStackInit(mp *m) {
986 if debug.profstackdepth == 0 {
987
988
989 return
990 }
991 mp.profStack = makeProfStackFP()
992 mp.mLockProfile.stack = makeProfStackFP()
993 }
994
995
996
997
998 func makeProfStackFP() []uintptr {
999
1000
1001
1002
1003
1004
1005 return make([]uintptr, 1+maxSkip+debug.profstackdepth)
1006 }
1007
1008
1009
1010 func makeProfStack() []uintptr { return make([]uintptr, debug.profstackdepth) }
1011
1012
1013 func pprof_makeProfStack() []uintptr { return makeProfStack() }
1014
1015 func (mp *m) becomeSpinning() {
1016 mp.spinning = true
1017 sched.nmspinning.Add(1)
1018 sched.needspinning.Store(0)
1019 }
1020
1021 func (mp *m) hasCgoOnStack() bool {
1022 return mp.ncgo > 0 || mp.isextra
1023 }
1024
1025 const (
1026
1027
1028 osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
1029
1030
1031
1032 osHasLowResClockInt = goos.IsWindows
1033
1034
1035
1036 osHasLowResClock = osHasLowResClockInt > 0
1037 )
1038
1039
1040 func ready(gp *g, traceskip int, next bool) {
1041 status := readgstatus(gp)
1042
1043
1044 mp := acquirem()
1045 if status&^_Gscan != _Gwaiting {
1046 dumpgstatus(gp)
1047 throw("bad g->status in ready")
1048 }
1049
1050
1051 trace := traceAcquire()
1052 casgstatus(gp, _Gwaiting, _Grunnable)
1053 if trace.ok() {
1054 trace.GoUnpark(gp, traceskip)
1055 traceRelease(trace)
1056 }
1057 runqput(mp.p.ptr(), gp, next)
1058 wakep()
1059 releasem(mp)
1060 }
1061
1062
1063
1064 const freezeStopWait = 0x7fffffff
1065
1066
1067
1068 var freezing atomic.Bool
1069
1070
1071
1072
1073 func freezetheworld() {
1074 freezing.Store(true)
1075 if debug.dontfreezetheworld > 0 {
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100 usleep(1000)
1101 return
1102 }
1103
1104
1105
1106
1107 for i := 0; i < 5; i++ {
1108
1109 sched.stopwait = freezeStopWait
1110 sched.gcwaiting.Store(true)
1111
1112 if !preemptall() {
1113 break
1114 }
1115 usleep(1000)
1116 }
1117
1118 usleep(1000)
1119 preemptall()
1120 usleep(1000)
1121 }
1122
1123
1124
1125
1126
1127 func readgstatus(gp *g) uint32 {
1128 return gp.atomicstatus.Load()
1129 }
1130
1131
1132
1133
1134
1135 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1136 success := false
1137
1138
1139 switch oldval {
1140 default:
1141 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1142 dumpgstatus(gp)
1143 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1144 case _Gscanrunnable,
1145 _Gscanwaiting,
1146 _Gscanrunning,
1147 _Gscansyscall,
1148 _Gscanpreempted:
1149 if newval == oldval&^_Gscan {
1150 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1151 }
1152 }
1153 if !success {
1154 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1155 dumpgstatus(gp)
1156 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1157 }
1158 releaseLockRankAndM(lockRankGscan)
1159 }
1160
1161
1162
1163 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1164 switch oldval {
1165 case _Grunnable,
1166 _Grunning,
1167 _Gwaiting,
1168 _Gsyscall:
1169 if newval == oldval|_Gscan {
1170 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1171 if r {
1172 acquireLockRankAndM(lockRankGscan)
1173 }
1174 return r
1175
1176 }
1177 }
1178 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1179 throw("castogscanstatus")
1180 panic("not reached")
1181 }
1182
1183
1184
1185 var casgstatusAlwaysTrack = false
1186
1187
1188
1189
1190
1191
1192
1193 func casgstatus(gp *g, oldval, newval uint32) {
1194 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1195 systemstack(func() {
1196
1197
1198 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1199 throw("casgstatus: bad incoming values")
1200 })
1201 }
1202
1203 lockWithRankMayAcquire(nil, lockRankGscan)
1204
1205
1206 const yieldDelay = 5 * 1000
1207 var nextYield int64
1208
1209
1210
1211 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1212 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1213 systemstack(func() {
1214
1215
1216 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1217 })
1218 }
1219 if i == 0 {
1220 nextYield = nanotime() + yieldDelay
1221 }
1222 if nanotime() < nextYield {
1223 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1224 procyield(1)
1225 }
1226 } else {
1227 osyield()
1228 nextYield = nanotime() + yieldDelay/2
1229 }
1230 }
1231
1232 if gp.syncGroup != nil {
1233 systemstack(func() {
1234 gp.syncGroup.changegstatus(gp, oldval, newval)
1235 })
1236 }
1237
1238 if oldval == _Grunning {
1239
1240 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1241 gp.tracking = true
1242 }
1243 gp.trackingSeq++
1244 }
1245 if !gp.tracking {
1246 return
1247 }
1248
1249
1250
1251
1252
1253
1254 switch oldval {
1255 case _Grunnable:
1256
1257
1258
1259 now := nanotime()
1260 gp.runnableTime += now - gp.trackingStamp
1261 gp.trackingStamp = 0
1262 case _Gwaiting:
1263 if !gp.waitreason.isMutexWait() {
1264
1265 break
1266 }
1267
1268
1269
1270
1271
1272 now := nanotime()
1273 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1274 gp.trackingStamp = 0
1275 }
1276 switch newval {
1277 case _Gwaiting:
1278 if !gp.waitreason.isMutexWait() {
1279
1280 break
1281 }
1282
1283 now := nanotime()
1284 gp.trackingStamp = now
1285 case _Grunnable:
1286
1287
1288 now := nanotime()
1289 gp.trackingStamp = now
1290 case _Grunning:
1291
1292
1293
1294 gp.tracking = false
1295 sched.timeToRun.record(gp.runnableTime)
1296 gp.runnableTime = 0
1297 }
1298 }
1299
1300
1301
1302
1303 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1304
1305 gp.waitreason = reason
1306 casgstatus(gp, old, _Gwaiting)
1307 }
1308
1309
1310
1311
1312
1313 func casGToWaitingForGC(gp *g, old uint32, reason waitReason) {
1314 if !reason.isWaitingForGC() {
1315 throw("casGToWaitingForGC with non-isWaitingForGC wait reason")
1316 }
1317 casGToWaiting(gp, old, reason)
1318 }
1319
1320
1321
1322
1323
1324 func casGToPreemptScan(gp *g, old, new uint32) {
1325 if old != _Grunning || new != _Gscan|_Gpreempted {
1326 throw("bad g transition")
1327 }
1328 acquireLockRankAndM(lockRankGscan)
1329 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1330 }
1331
1332
1333
1334
1335
1336
1337 }
1338
1339
1340
1341
1342 func casGFromPreempted(gp *g, old, new uint32) bool {
1343 if old != _Gpreempted || new != _Gwaiting {
1344 throw("bad g transition")
1345 }
1346 gp.waitreason = waitReasonPreempted
1347 if !gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting) {
1348 return false
1349 }
1350 if sg := gp.syncGroup; sg != nil {
1351 sg.changegstatus(gp, _Gpreempted, _Gwaiting)
1352 }
1353 return true
1354 }
1355
1356
1357 type stwReason uint8
1358
1359
1360
1361
1362 const (
1363 stwUnknown stwReason = iota
1364 stwGCMarkTerm
1365 stwGCSweepTerm
1366 stwWriteHeapDump
1367 stwGoroutineProfile
1368 stwGoroutineProfileCleanup
1369 stwAllGoroutinesStack
1370 stwReadMemStats
1371 stwAllThreadsSyscall
1372 stwGOMAXPROCS
1373 stwStartTrace
1374 stwStopTrace
1375 stwForTestCountPagesInUse
1376 stwForTestReadMetricsSlow
1377 stwForTestReadMemStatsSlow
1378 stwForTestPageCachePagesLeaked
1379 stwForTestResetDebugLog
1380 )
1381
1382 func (r stwReason) String() string {
1383 return stwReasonStrings[r]
1384 }
1385
1386 func (r stwReason) isGC() bool {
1387 return r == stwGCMarkTerm || r == stwGCSweepTerm
1388 }
1389
1390
1391
1392
1393 var stwReasonStrings = [...]string{
1394 stwUnknown: "unknown",
1395 stwGCMarkTerm: "GC mark termination",
1396 stwGCSweepTerm: "GC sweep termination",
1397 stwWriteHeapDump: "write heap dump",
1398 stwGoroutineProfile: "goroutine profile",
1399 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1400 stwAllGoroutinesStack: "all goroutines stack trace",
1401 stwReadMemStats: "read mem stats",
1402 stwAllThreadsSyscall: "AllThreadsSyscall",
1403 stwGOMAXPROCS: "GOMAXPROCS",
1404 stwStartTrace: "start trace",
1405 stwStopTrace: "stop trace",
1406 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1407 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1408 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1409 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1410 stwForTestResetDebugLog: "ResetDebugLog (test)",
1411 }
1412
1413
1414
1415 type worldStop struct {
1416 reason stwReason
1417 startedStopping int64
1418 finishedStopping int64
1419 stoppingCPUTime int64
1420 }
1421
1422
1423
1424
1425 var stopTheWorldContext worldStop
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444 func stopTheWorld(reason stwReason) worldStop {
1445 semacquire(&worldsema)
1446 gp := getg()
1447 gp.m.preemptoff = reason.String()
1448 systemstack(func() {
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463 casGToWaitingForGC(gp, _Grunning, waitReasonStoppingTheWorld)
1464 stopTheWorldContext = stopTheWorldWithSema(reason)
1465 casgstatus(gp, _Gwaiting, _Grunning)
1466 })
1467 return stopTheWorldContext
1468 }
1469
1470
1471
1472
1473 func startTheWorld(w worldStop) {
1474 systemstack(func() { startTheWorldWithSema(0, w) })
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491 mp := acquirem()
1492 mp.preemptoff = ""
1493 semrelease1(&worldsema, true, 0)
1494 releasem(mp)
1495 }
1496
1497
1498
1499
1500 func stopTheWorldGC(reason stwReason) worldStop {
1501 semacquire(&gcsema)
1502 return stopTheWorld(reason)
1503 }
1504
1505
1506
1507
1508 func startTheWorldGC(w worldStop) {
1509 startTheWorld(w)
1510 semrelease(&gcsema)
1511 }
1512
1513
1514 var worldsema uint32 = 1
1515
1516
1517
1518
1519
1520
1521
1522 var gcsema uint32 = 1
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554 func stopTheWorldWithSema(reason stwReason) worldStop {
1555 trace := traceAcquire()
1556 if trace.ok() {
1557 trace.STWStart(reason)
1558 traceRelease(trace)
1559 }
1560 gp := getg()
1561
1562
1563
1564 if gp.m.locks > 0 {
1565 throw("stopTheWorld: holding locks")
1566 }
1567
1568 lock(&sched.lock)
1569 start := nanotime()
1570 sched.stopwait = gomaxprocs
1571 sched.gcwaiting.Store(true)
1572 preemptall()
1573
1574 gp.m.p.ptr().status = _Pgcstop
1575 gp.m.p.ptr().gcStopTime = start
1576 sched.stopwait--
1577
1578 trace = traceAcquire()
1579 for _, pp := range allp {
1580 s := pp.status
1581 if s == _Psyscall && atomic.Cas(&pp.status, s, _Pgcstop) {
1582 if trace.ok() {
1583 trace.ProcSteal(pp, false)
1584 }
1585 pp.syscalltick++
1586 pp.gcStopTime = nanotime()
1587 sched.stopwait--
1588 }
1589 }
1590 if trace.ok() {
1591 traceRelease(trace)
1592 }
1593
1594
1595 now := nanotime()
1596 for {
1597 pp, _ := pidleget(now)
1598 if pp == nil {
1599 break
1600 }
1601 pp.status = _Pgcstop
1602 pp.gcStopTime = nanotime()
1603 sched.stopwait--
1604 }
1605 wait := sched.stopwait > 0
1606 unlock(&sched.lock)
1607
1608
1609 if wait {
1610 for {
1611
1612 if notetsleep(&sched.stopnote, 100*1000) {
1613 noteclear(&sched.stopnote)
1614 break
1615 }
1616 preemptall()
1617 }
1618 }
1619
1620 finish := nanotime()
1621 startTime := finish - start
1622 if reason.isGC() {
1623 sched.stwStoppingTimeGC.record(startTime)
1624 } else {
1625 sched.stwStoppingTimeOther.record(startTime)
1626 }
1627
1628
1629
1630
1631
1632 stoppingCPUTime := int64(0)
1633 bad := ""
1634 if sched.stopwait != 0 {
1635 bad = "stopTheWorld: not stopped (stopwait != 0)"
1636 } else {
1637 for _, pp := range allp {
1638 if pp.status != _Pgcstop {
1639 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1640 }
1641 if pp.gcStopTime == 0 && bad == "" {
1642 bad = "stopTheWorld: broken CPU time accounting"
1643 }
1644 stoppingCPUTime += finish - pp.gcStopTime
1645 pp.gcStopTime = 0
1646 }
1647 }
1648 if freezing.Load() {
1649
1650
1651
1652
1653 lock(&deadlock)
1654 lock(&deadlock)
1655 }
1656 if bad != "" {
1657 throw(bad)
1658 }
1659
1660 worldStopped()
1661
1662 return worldStop{
1663 reason: reason,
1664 startedStopping: start,
1665 finishedStopping: finish,
1666 stoppingCPUTime: stoppingCPUTime,
1667 }
1668 }
1669
1670
1671
1672
1673
1674
1675
1676 func startTheWorldWithSema(now int64, w worldStop) int64 {
1677 assertWorldStopped()
1678
1679 mp := acquirem()
1680 if netpollinited() {
1681 list, delta := netpoll(0)
1682 injectglist(&list)
1683 netpollAdjustWaiters(delta)
1684 }
1685 lock(&sched.lock)
1686
1687 procs := gomaxprocs
1688 if newprocs != 0 {
1689 procs = newprocs
1690 newprocs = 0
1691 }
1692 p1 := procresize(procs)
1693 sched.gcwaiting.Store(false)
1694 if sched.sysmonwait.Load() {
1695 sched.sysmonwait.Store(false)
1696 notewakeup(&sched.sysmonnote)
1697 }
1698 unlock(&sched.lock)
1699
1700 worldStarted()
1701
1702 for p1 != nil {
1703 p := p1
1704 p1 = p1.link.ptr()
1705 if p.m != 0 {
1706 mp := p.m.ptr()
1707 p.m = 0
1708 if mp.nextp != 0 {
1709 throw("startTheWorld: inconsistent mp->nextp")
1710 }
1711 mp.nextp.set(p)
1712 notewakeup(&mp.park)
1713 } else {
1714
1715 newm(nil, p, -1)
1716 }
1717 }
1718
1719
1720 if now == 0 {
1721 now = nanotime()
1722 }
1723 totalTime := now - w.startedStopping
1724 if w.reason.isGC() {
1725 sched.stwTotalTimeGC.record(totalTime)
1726 } else {
1727 sched.stwTotalTimeOther.record(totalTime)
1728 }
1729 trace := traceAcquire()
1730 if trace.ok() {
1731 trace.STWDone()
1732 traceRelease(trace)
1733 }
1734
1735
1736
1737
1738 wakep()
1739
1740 releasem(mp)
1741
1742 return now
1743 }
1744
1745
1746
1747 func usesLibcall() bool {
1748 switch GOOS {
1749 case "aix", "darwin", "illumos", "ios", "solaris", "windows":
1750 return true
1751 case "openbsd":
1752 return GOARCH != "mips64"
1753 }
1754 return false
1755 }
1756
1757
1758
1759 func mStackIsSystemAllocated() bool {
1760 switch GOOS {
1761 case "aix", "darwin", "plan9", "illumos", "ios", "solaris", "windows":
1762 return true
1763 case "openbsd":
1764 return GOARCH != "mips64"
1765 }
1766 return false
1767 }
1768
1769
1770
1771 func mstart()
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782 func mstart0() {
1783 gp := getg()
1784
1785 osStack := gp.stack.lo == 0
1786 if osStack {
1787
1788
1789
1790
1791
1792
1793
1794
1795 size := gp.stack.hi
1796 if size == 0 {
1797 size = 16384 * sys.StackGuardMultiplier
1798 }
1799 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1800 gp.stack.lo = gp.stack.hi - size + 1024
1801 }
1802
1803
1804 gp.stackguard0 = gp.stack.lo + stackGuard
1805
1806
1807 gp.stackguard1 = gp.stackguard0
1808 mstart1()
1809
1810
1811 if mStackIsSystemAllocated() {
1812
1813
1814
1815 osStack = true
1816 }
1817 mexit(osStack)
1818 }
1819
1820
1821
1822
1823
1824 func mstart1() {
1825 gp := getg()
1826
1827 if gp != gp.m.g0 {
1828 throw("bad runtime·mstart")
1829 }
1830
1831
1832
1833
1834
1835
1836
1837 gp.sched.g = guintptr(unsafe.Pointer(gp))
1838 gp.sched.pc = sys.GetCallerPC()
1839 gp.sched.sp = sys.GetCallerSP()
1840
1841 asminit()
1842 minit()
1843
1844
1845
1846 if gp.m == &m0 {
1847 mstartm0()
1848 }
1849
1850 if debug.dataindependenttiming == 1 {
1851 sys.EnableDIT()
1852 }
1853
1854 if fn := gp.m.mstartfn; fn != nil {
1855 fn()
1856 }
1857
1858 if gp.m != &m0 {
1859 acquirep(gp.m.nextp.ptr())
1860 gp.m.nextp = 0
1861 }
1862 schedule()
1863 }
1864
1865
1866
1867
1868
1869
1870
1871 func mstartm0() {
1872
1873
1874
1875 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1876 cgoHasExtraM = true
1877 newextram()
1878 }
1879 initsig(false)
1880 }
1881
1882
1883
1884
1885 func mPark() {
1886 gp := getg()
1887 notesleep(&gp.m.park)
1888 noteclear(&gp.m.park)
1889 }
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901 func mexit(osStack bool) {
1902 mp := getg().m
1903
1904 if mp == &m0 {
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916 handoffp(releasep())
1917 lock(&sched.lock)
1918 sched.nmfreed++
1919 checkdead()
1920 unlock(&sched.lock)
1921 mPark()
1922 throw("locked m0 woke up")
1923 }
1924
1925 sigblock(true)
1926 unminit()
1927
1928
1929 if mp.gsignal != nil {
1930 stackfree(mp.gsignal.stack)
1931
1932
1933
1934
1935 mp.gsignal = nil
1936 }
1937
1938
1939 lock(&sched.lock)
1940 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
1941 if *pprev == mp {
1942 *pprev = mp.alllink
1943 goto found
1944 }
1945 }
1946 throw("m not found in allm")
1947 found:
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962 mp.freeWait.Store(freeMWait)
1963 mp.freelink = sched.freem
1964 sched.freem = mp
1965 unlock(&sched.lock)
1966
1967 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
1968 sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
1969
1970
1971 handoffp(releasep())
1972
1973
1974
1975
1976
1977 lock(&sched.lock)
1978 sched.nmfreed++
1979 checkdead()
1980 unlock(&sched.lock)
1981
1982 if GOOS == "darwin" || GOOS == "ios" {
1983
1984
1985 if mp.signalPending.Load() != 0 {
1986 pendingPreemptSignals.Add(-1)
1987 }
1988 }
1989
1990
1991
1992 mdestroy(mp)
1993
1994 if osStack {
1995
1996 mp.freeWait.Store(freeMRef)
1997
1998
1999
2000 return
2001 }
2002
2003
2004
2005
2006
2007 exitThread(&mp.freeWait)
2008 }
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020 func forEachP(reason waitReason, fn func(*p)) {
2021 systemstack(func() {
2022 gp := getg().m.curg
2023
2024
2025
2026
2027
2028
2029
2030
2031 casGToWaitingForGC(gp, _Grunning, reason)
2032 forEachPInternal(fn)
2033 casgstatus(gp, _Gwaiting, _Grunning)
2034 })
2035 }
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046 func forEachPInternal(fn func(*p)) {
2047 mp := acquirem()
2048 pp := getg().m.p.ptr()
2049
2050 lock(&sched.lock)
2051 if sched.safePointWait != 0 {
2052 throw("forEachP: sched.safePointWait != 0")
2053 }
2054 sched.safePointWait = gomaxprocs - 1
2055 sched.safePointFn = fn
2056
2057
2058 for _, p2 := range allp {
2059 if p2 != pp {
2060 atomic.Store(&p2.runSafePointFn, 1)
2061 }
2062 }
2063 preemptall()
2064
2065
2066
2067
2068
2069
2070
2071 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
2072 if atomic.Cas(&p.runSafePointFn, 1, 0) {
2073 fn(p)
2074 sched.safePointWait--
2075 }
2076 }
2077
2078 wait := sched.safePointWait > 0
2079 unlock(&sched.lock)
2080
2081
2082 fn(pp)
2083
2084
2085
2086 for _, p2 := range allp {
2087 s := p2.status
2088
2089
2090
2091 trace := traceAcquire()
2092 if s == _Psyscall && p2.runSafePointFn == 1 && atomic.Cas(&p2.status, s, _Pidle) {
2093 if trace.ok() {
2094
2095 trace.ProcSteal(p2, false)
2096 traceRelease(trace)
2097 }
2098 p2.syscalltick++
2099 handoffp(p2)
2100 } else if trace.ok() {
2101 traceRelease(trace)
2102 }
2103 }
2104
2105
2106 if wait {
2107 for {
2108
2109
2110
2111
2112 if notetsleep(&sched.safePointNote, 100*1000) {
2113 noteclear(&sched.safePointNote)
2114 break
2115 }
2116 preemptall()
2117 }
2118 }
2119 if sched.safePointWait != 0 {
2120 throw("forEachP: not done")
2121 }
2122 for _, p2 := range allp {
2123 if p2.runSafePointFn != 0 {
2124 throw("forEachP: P did not run fn")
2125 }
2126 }
2127
2128 lock(&sched.lock)
2129 sched.safePointFn = nil
2130 unlock(&sched.lock)
2131 releasem(mp)
2132 }
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145 func runSafePointFn() {
2146 p := getg().m.p.ptr()
2147
2148
2149
2150 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
2151 return
2152 }
2153 sched.safePointFn(p)
2154 lock(&sched.lock)
2155 sched.safePointWait--
2156 if sched.safePointWait == 0 {
2157 notewakeup(&sched.safePointNote)
2158 }
2159 unlock(&sched.lock)
2160 }
2161
2162
2163
2164
2165 var cgoThreadStart unsafe.Pointer
2166
2167 type cgothreadstart struct {
2168 g guintptr
2169 tls *uint64
2170 fn unsafe.Pointer
2171 }
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182 func allocm(pp *p, fn func(), id int64) *m {
2183 allocmLock.rlock()
2184
2185
2186
2187
2188 acquirem()
2189
2190 gp := getg()
2191 if gp.m.p == 0 {
2192 acquirep(pp)
2193 }
2194
2195
2196
2197 if sched.freem != nil {
2198 lock(&sched.lock)
2199 var newList *m
2200 for freem := sched.freem; freem != nil; {
2201
2202 wait := freem.freeWait.Load()
2203 if wait == freeMWait {
2204 next := freem.freelink
2205 freem.freelink = newList
2206 newList = freem
2207 freem = next
2208 continue
2209 }
2210
2211
2212
2213 if traceEnabled() || traceShuttingDown() {
2214 traceThreadDestroy(freem)
2215 }
2216
2217
2218
2219 if wait == freeMStack {
2220
2221
2222
2223 systemstack(func() {
2224 stackfree(freem.g0.stack)
2225 })
2226 }
2227 freem = freem.freelink
2228 }
2229 sched.freem = newList
2230 unlock(&sched.lock)
2231 }
2232
2233 mp := new(m)
2234 mp.mstartfn = fn
2235 mcommoninit(mp, id)
2236
2237
2238
2239 if iscgo || mStackIsSystemAllocated() {
2240 mp.g0 = malg(-1)
2241 } else {
2242 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
2243 }
2244 mp.g0.m = mp
2245
2246 if pp == gp.m.p.ptr() {
2247 releasep()
2248 }
2249
2250 releasem(gp.m)
2251 allocmLock.runlock()
2252 return mp
2253 }
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294 func needm(signal bool) {
2295 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
2296
2297
2298
2299
2300
2301
2302 writeErrStr("fatal error: cgo callback before cgo call\n")
2303 exit(1)
2304 }
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314 var sigmask sigset
2315 sigsave(&sigmask)
2316 sigblock(false)
2317
2318
2319
2320
2321 mp, last := getExtraM()
2322
2323
2324
2325
2326
2327
2328
2329
2330 mp.needextram = last
2331
2332
2333 mp.sigmask = sigmask
2334
2335
2336
2337 osSetupTLS(mp)
2338
2339
2340
2341 setg(mp.g0)
2342 sp := sys.GetCallerSP()
2343 callbackUpdateSystemStack(mp, sp, signal)
2344
2345
2346
2347
2348 mp.isExtraInC = false
2349
2350
2351 asminit()
2352 minit()
2353
2354
2355
2356
2357
2358
2359 var trace traceLocker
2360 if !signal {
2361 trace = traceAcquire()
2362 }
2363
2364
2365 casgstatus(mp.curg, _Gdead, _Gsyscall)
2366 sched.ngsys.Add(-1)
2367
2368 if !signal {
2369 if trace.ok() {
2370 trace.GoCreateSyscall(mp.curg)
2371 traceRelease(trace)
2372 }
2373 }
2374 mp.isExtraInSig = signal
2375 }
2376
2377
2378
2379
2380 func needAndBindM() {
2381 needm(false)
2382
2383 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2384 cgoBindM()
2385 }
2386 }
2387
2388
2389
2390
2391 func newextram() {
2392 c := extraMWaiters.Swap(0)
2393 if c > 0 {
2394 for i := uint32(0); i < c; i++ {
2395 oneNewExtraM()
2396 }
2397 } else if extraMLength.Load() == 0 {
2398
2399 oneNewExtraM()
2400 }
2401 }
2402
2403
2404 func oneNewExtraM() {
2405
2406
2407
2408
2409
2410 mp := allocm(nil, nil, -1)
2411 gp := malg(4096)
2412 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2413 gp.sched.sp = gp.stack.hi
2414 gp.sched.sp -= 4 * goarch.PtrSize
2415 gp.sched.lr = 0
2416 gp.sched.g = guintptr(unsafe.Pointer(gp))
2417 gp.syscallpc = gp.sched.pc
2418 gp.syscallsp = gp.sched.sp
2419 gp.stktopsp = gp.sched.sp
2420
2421
2422
2423
2424 casgstatus(gp, _Gidle, _Gdead)
2425 gp.m = mp
2426 mp.curg = gp
2427 mp.isextra = true
2428
2429 mp.isExtraInC = true
2430 mp.lockedInt++
2431 mp.lockedg.set(gp)
2432 gp.lockedm.set(mp)
2433 gp.goid = sched.goidgen.Add(1)
2434 if raceenabled {
2435 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2436 }
2437
2438 allgadd(gp)
2439
2440
2441
2442
2443
2444 sched.ngsys.Add(1)
2445
2446
2447 addExtraM(mp)
2448 }
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483 func dropm() {
2484
2485
2486
2487 mp := getg().m
2488
2489
2490
2491
2492
2493 var trace traceLocker
2494 if !mp.isExtraInSig {
2495 trace = traceAcquire()
2496 }
2497
2498
2499 casgstatus(mp.curg, _Gsyscall, _Gdead)
2500 mp.curg.preemptStop = false
2501 sched.ngsys.Add(1)
2502
2503 if !mp.isExtraInSig {
2504 if trace.ok() {
2505 trace.GoDestroySyscall()
2506 traceRelease(trace)
2507 }
2508 }
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523 mp.syscalltick--
2524
2525
2526
2527 mp.curg.trace.reset()
2528
2529
2530
2531
2532 if traceEnabled() || traceShuttingDown() {
2533
2534
2535
2536
2537
2538
2539
2540 lock(&sched.lock)
2541 traceThreadDestroy(mp)
2542 unlock(&sched.lock)
2543 }
2544 mp.isExtraInSig = false
2545
2546
2547
2548
2549
2550 sigmask := mp.sigmask
2551 sigblock(false)
2552 unminit()
2553
2554 setg(nil)
2555
2556
2557
2558 g0 := mp.g0
2559 g0.stack.hi = 0
2560 g0.stack.lo = 0
2561 g0.stackguard0 = 0
2562 g0.stackguard1 = 0
2563 mp.g0StackAccurate = false
2564
2565 putExtraM(mp)
2566
2567 msigrestore(sigmask)
2568 }
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590 func cgoBindM() {
2591 if GOOS == "windows" || GOOS == "plan9" {
2592 fatal("bindm in unexpected GOOS")
2593 }
2594 g := getg()
2595 if g.m.g0 != g {
2596 fatal("the current g is not g0")
2597 }
2598 if _cgo_bindm != nil {
2599 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2600 }
2601 }
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614 func getm() uintptr {
2615 return uintptr(unsafe.Pointer(getg().m))
2616 }
2617
2618 var (
2619
2620
2621
2622
2623
2624
2625 extraM atomic.Uintptr
2626
2627 extraMLength atomic.Uint32
2628
2629 extraMWaiters atomic.Uint32
2630
2631
2632 extraMInUse atomic.Uint32
2633 )
2634
2635
2636
2637
2638
2639
2640
2641
2642 func lockextra(nilokay bool) *m {
2643 const locked = 1
2644
2645 incr := false
2646 for {
2647 old := extraM.Load()
2648 if old == locked {
2649 osyield_no_g()
2650 continue
2651 }
2652 if old == 0 && !nilokay {
2653 if !incr {
2654
2655
2656
2657 extraMWaiters.Add(1)
2658 incr = true
2659 }
2660 usleep_no_g(1)
2661 continue
2662 }
2663 if extraM.CompareAndSwap(old, locked) {
2664 return (*m)(unsafe.Pointer(old))
2665 }
2666 osyield_no_g()
2667 continue
2668 }
2669 }
2670
2671
2672 func unlockextra(mp *m, delta int32) {
2673 extraMLength.Add(delta)
2674 extraM.Store(uintptr(unsafe.Pointer(mp)))
2675 }
2676
2677
2678
2679
2680
2681
2682
2683
2684 func getExtraM() (mp *m, last bool) {
2685 mp = lockextra(false)
2686 extraMInUse.Add(1)
2687 unlockextra(mp.schedlink.ptr(), -1)
2688 return mp, mp.schedlink.ptr() == nil
2689 }
2690
2691
2692
2693
2694
2695 func putExtraM(mp *m) {
2696 extraMInUse.Add(-1)
2697 addExtraM(mp)
2698 }
2699
2700
2701
2702
2703 func addExtraM(mp *m) {
2704 mnext := lockextra(true)
2705 mp.schedlink.set(mnext)
2706 unlockextra(mp, 1)
2707 }
2708
2709 var (
2710
2711
2712
2713 allocmLock rwmutex
2714
2715
2716
2717
2718 execLock rwmutex
2719 )
2720
2721
2722
2723 const (
2724 failthreadcreate = "runtime: failed to create new OS thread\n"
2725 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2726 )
2727
2728
2729
2730
2731 var newmHandoff struct {
2732 lock mutex
2733
2734
2735
2736 newm muintptr
2737
2738
2739
2740 waiting bool
2741 wake note
2742
2743
2744
2745
2746 haveTemplateThread uint32
2747 }
2748
2749
2750
2751
2752
2753
2754
2755
2756 func newm(fn func(), pp *p, id int64) {
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767 acquirem()
2768
2769 mp := allocm(pp, fn, id)
2770 mp.nextp.set(pp)
2771 mp.sigmask = initSigmask
2772 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784 lock(&newmHandoff.lock)
2785 if newmHandoff.haveTemplateThread == 0 {
2786 throw("on a locked thread with no template thread")
2787 }
2788 mp.schedlink = newmHandoff.newm
2789 newmHandoff.newm.set(mp)
2790 if newmHandoff.waiting {
2791 newmHandoff.waiting = false
2792 notewakeup(&newmHandoff.wake)
2793 }
2794 unlock(&newmHandoff.lock)
2795
2796
2797
2798 releasem(getg().m)
2799 return
2800 }
2801 newm1(mp)
2802 releasem(getg().m)
2803 }
2804
2805 func newm1(mp *m) {
2806 if iscgo {
2807 var ts cgothreadstart
2808 if _cgo_thread_start == nil {
2809 throw("_cgo_thread_start missing")
2810 }
2811 ts.g.set(mp.g0)
2812 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2813 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2814 if msanenabled {
2815 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2816 }
2817 if asanenabled {
2818 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2819 }
2820 execLock.rlock()
2821 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2822 execLock.runlock()
2823 return
2824 }
2825 execLock.rlock()
2826 newosproc(mp)
2827 execLock.runlock()
2828 }
2829
2830
2831
2832
2833
2834 func startTemplateThread() {
2835 if GOARCH == "wasm" {
2836 return
2837 }
2838
2839
2840
2841 mp := acquirem()
2842 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2843 releasem(mp)
2844 return
2845 }
2846 newm(templateThread, nil, -1)
2847 releasem(mp)
2848 }
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862 func templateThread() {
2863 lock(&sched.lock)
2864 sched.nmsys++
2865 checkdead()
2866 unlock(&sched.lock)
2867
2868 for {
2869 lock(&newmHandoff.lock)
2870 for newmHandoff.newm != 0 {
2871 newm := newmHandoff.newm.ptr()
2872 newmHandoff.newm = 0
2873 unlock(&newmHandoff.lock)
2874 for newm != nil {
2875 next := newm.schedlink.ptr()
2876 newm.schedlink = 0
2877 newm1(newm)
2878 newm = next
2879 }
2880 lock(&newmHandoff.lock)
2881 }
2882 newmHandoff.waiting = true
2883 noteclear(&newmHandoff.wake)
2884 unlock(&newmHandoff.lock)
2885 notesleep(&newmHandoff.wake)
2886 }
2887 }
2888
2889
2890
2891 func stopm() {
2892 gp := getg()
2893
2894 if gp.m.locks != 0 {
2895 throw("stopm holding locks")
2896 }
2897 if gp.m.p != 0 {
2898 throw("stopm holding p")
2899 }
2900 if gp.m.spinning {
2901 throw("stopm spinning")
2902 }
2903
2904 lock(&sched.lock)
2905 mput(gp.m)
2906 unlock(&sched.lock)
2907 mPark()
2908 acquirep(gp.m.nextp.ptr())
2909 gp.m.nextp = 0
2910 }
2911
2912 func mspinning() {
2913
2914 getg().m.spinning = true
2915 }
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934 func startm(pp *p, spinning, lockheld bool) {
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951 mp := acquirem()
2952 if !lockheld {
2953 lock(&sched.lock)
2954 }
2955 if pp == nil {
2956 if spinning {
2957
2958
2959
2960 throw("startm: P required for spinning=true")
2961 }
2962 pp, _ = pidleget(0)
2963 if pp == nil {
2964 if !lockheld {
2965 unlock(&sched.lock)
2966 }
2967 releasem(mp)
2968 return
2969 }
2970 }
2971 nmp := mget()
2972 if nmp == nil {
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987 id := mReserveID()
2988 unlock(&sched.lock)
2989
2990 var fn func()
2991 if spinning {
2992
2993 fn = mspinning
2994 }
2995 newm(fn, pp, id)
2996
2997 if lockheld {
2998 lock(&sched.lock)
2999 }
3000
3001
3002 releasem(mp)
3003 return
3004 }
3005 if !lockheld {
3006 unlock(&sched.lock)
3007 }
3008 if nmp.spinning {
3009 throw("startm: m is spinning")
3010 }
3011 if nmp.nextp != 0 {
3012 throw("startm: m has p")
3013 }
3014 if spinning && !runqempty(pp) {
3015 throw("startm: p has runnable gs")
3016 }
3017
3018 nmp.spinning = spinning
3019 nmp.nextp.set(pp)
3020 notewakeup(&nmp.park)
3021
3022
3023 releasem(mp)
3024 }
3025
3026
3027
3028
3029
3030 func handoffp(pp *p) {
3031
3032
3033
3034
3035 if !runqempty(pp) || sched.runqsize != 0 {
3036 startm(pp, false, false)
3037 return
3038 }
3039
3040 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
3041 startm(pp, false, false)
3042 return
3043 }
3044
3045 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) {
3046 startm(pp, false, false)
3047 return
3048 }
3049
3050
3051 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
3052 sched.needspinning.Store(0)
3053 startm(pp, true, false)
3054 return
3055 }
3056 lock(&sched.lock)
3057 if sched.gcwaiting.Load() {
3058 pp.status = _Pgcstop
3059 pp.gcStopTime = nanotime()
3060 sched.stopwait--
3061 if sched.stopwait == 0 {
3062 notewakeup(&sched.stopnote)
3063 }
3064 unlock(&sched.lock)
3065 return
3066 }
3067 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
3068 sched.safePointFn(pp)
3069 sched.safePointWait--
3070 if sched.safePointWait == 0 {
3071 notewakeup(&sched.safePointNote)
3072 }
3073 }
3074 if sched.runqsize != 0 {
3075 unlock(&sched.lock)
3076 startm(pp, false, false)
3077 return
3078 }
3079
3080
3081 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
3082 unlock(&sched.lock)
3083 startm(pp, false, false)
3084 return
3085 }
3086
3087
3088
3089 when := pp.timers.wakeTime()
3090 pidleput(pp, 0)
3091 unlock(&sched.lock)
3092
3093 if when != 0 {
3094 wakeNetPoller(when)
3095 }
3096 }
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111 func wakep() {
3112
3113
3114 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
3115 return
3116 }
3117
3118
3119
3120
3121
3122
3123 mp := acquirem()
3124
3125 var pp *p
3126 lock(&sched.lock)
3127 pp, _ = pidlegetSpinning(0)
3128 if pp == nil {
3129 if sched.nmspinning.Add(-1) < 0 {
3130 throw("wakep: negative nmspinning")
3131 }
3132 unlock(&sched.lock)
3133 releasem(mp)
3134 return
3135 }
3136
3137
3138
3139
3140 unlock(&sched.lock)
3141
3142 startm(pp, true, false)
3143
3144 releasem(mp)
3145 }
3146
3147
3148
3149 func stoplockedm() {
3150 gp := getg()
3151
3152 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3153 throw("stoplockedm: inconsistent locking")
3154 }
3155 if gp.m.p != 0 {
3156
3157 pp := releasep()
3158 handoffp(pp)
3159 }
3160 incidlelocked(1)
3161
3162 mPark()
3163 status := readgstatus(gp.m.lockedg.ptr())
3164 if status&^_Gscan != _Grunnable {
3165 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
3166 dumpgstatus(gp.m.lockedg.ptr())
3167 throw("stoplockedm: not runnable")
3168 }
3169 acquirep(gp.m.nextp.ptr())
3170 gp.m.nextp = 0
3171 }
3172
3173
3174
3175
3176
3177 func startlockedm(gp *g) {
3178 mp := gp.lockedm.ptr()
3179 if mp == getg().m {
3180 throw("startlockedm: locked to me")
3181 }
3182 if mp.nextp != 0 {
3183 throw("startlockedm: m has p")
3184 }
3185
3186 incidlelocked(-1)
3187 pp := releasep()
3188 mp.nextp.set(pp)
3189 notewakeup(&mp.park)
3190 stopm()
3191 }
3192
3193
3194
3195 func gcstopm() {
3196 gp := getg()
3197
3198 if !sched.gcwaiting.Load() {
3199 throw("gcstopm: not waiting for gc")
3200 }
3201 if gp.m.spinning {
3202 gp.m.spinning = false
3203
3204
3205 if sched.nmspinning.Add(-1) < 0 {
3206 throw("gcstopm: negative nmspinning")
3207 }
3208 }
3209 pp := releasep()
3210 lock(&sched.lock)
3211 pp.status = _Pgcstop
3212 pp.gcStopTime = nanotime()
3213 sched.stopwait--
3214 if sched.stopwait == 0 {
3215 notewakeup(&sched.stopnote)
3216 }
3217 unlock(&sched.lock)
3218 stopm()
3219 }
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230 func execute(gp *g, inheritTime bool) {
3231 mp := getg().m
3232
3233 if goroutineProfile.active {
3234
3235
3236
3237 tryRecordGoroutineProfile(gp, nil, osyield)
3238 }
3239
3240
3241
3242 mp.curg = gp
3243 gp.m = mp
3244 casgstatus(gp, _Grunnable, _Grunning)
3245 gp.waitsince = 0
3246 gp.preempt = false
3247 gp.stackguard0 = gp.stack.lo + stackGuard
3248 if !inheritTime {
3249 mp.p.ptr().schedtick++
3250 }
3251
3252
3253 hz := sched.profilehz
3254 if mp.profilehz != hz {
3255 setThreadCPUProfiler(hz)
3256 }
3257
3258 trace := traceAcquire()
3259 if trace.ok() {
3260 trace.GoStart()
3261 traceRelease(trace)
3262 }
3263
3264 gogo(&gp.sched)
3265 }
3266
3267
3268
3269
3270
3271 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3272 mp := getg().m
3273
3274
3275
3276
3277
3278 top:
3279 pp := mp.p.ptr()
3280 if sched.gcwaiting.Load() {
3281 gcstopm()
3282 goto top
3283 }
3284 if pp.runSafePointFn != 0 {
3285 runSafePointFn()
3286 }
3287
3288
3289
3290
3291
3292 now, pollUntil, _ := pp.timers.check(0)
3293
3294
3295 if traceEnabled() || traceShuttingDown() {
3296 gp := traceReader()
3297 if gp != nil {
3298 trace := traceAcquire()
3299 casgstatus(gp, _Gwaiting, _Grunnable)
3300 if trace.ok() {
3301 trace.GoUnpark(gp, 0)
3302 traceRelease(trace)
3303 }
3304 return gp, false, true
3305 }
3306 }
3307
3308
3309 if gcBlackenEnabled != 0 {
3310 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3311 if gp != nil {
3312 return gp, false, true
3313 }
3314 now = tnow
3315 }
3316
3317
3318
3319
3320 if pp.schedtick%61 == 0 && sched.runqsize > 0 {
3321 lock(&sched.lock)
3322 gp := globrunqget(pp, 1)
3323 unlock(&sched.lock)
3324 if gp != nil {
3325 return gp, false, false
3326 }
3327 }
3328
3329
3330 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
3331 if gp := wakefing(); gp != nil {
3332 ready(gp, 0, true)
3333 }
3334 }
3335 if *cgo_yield != nil {
3336 asmcgocall(*cgo_yield, nil)
3337 }
3338
3339
3340 if gp, inheritTime := runqget(pp); gp != nil {
3341 return gp, inheritTime, false
3342 }
3343
3344
3345 if sched.runqsize != 0 {
3346 lock(&sched.lock)
3347 gp := globrunqget(pp, 0)
3348 unlock(&sched.lock)
3349 if gp != nil {
3350 return gp, false, false
3351 }
3352 }
3353
3354
3355
3356
3357
3358
3359
3360
3361 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3362 if list, delta := netpoll(0); !list.empty() {
3363 gp := list.pop()
3364 injectglist(&list)
3365 netpollAdjustWaiters(delta)
3366 trace := traceAcquire()
3367 casgstatus(gp, _Gwaiting, _Grunnable)
3368 if trace.ok() {
3369 trace.GoUnpark(gp, 0)
3370 traceRelease(trace)
3371 }
3372 return gp, false, false
3373 }
3374 }
3375
3376
3377
3378
3379
3380
3381 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
3382 if !mp.spinning {
3383 mp.becomeSpinning()
3384 }
3385
3386 gp, inheritTime, tnow, w, newWork := stealWork(now)
3387 if gp != nil {
3388
3389 return gp, inheritTime, false
3390 }
3391 if newWork {
3392
3393
3394 goto top
3395 }
3396
3397 now = tnow
3398 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3399
3400 pollUntil = w
3401 }
3402 }
3403
3404
3405
3406
3407
3408 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) && gcController.addIdleMarkWorker() {
3409 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3410 if node != nil {
3411 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3412 gp := node.gp.ptr()
3413
3414 trace := traceAcquire()
3415 casgstatus(gp, _Gwaiting, _Grunnable)
3416 if trace.ok() {
3417 trace.GoUnpark(gp, 0)
3418 traceRelease(trace)
3419 }
3420 return gp, false, false
3421 }
3422 gcController.removeIdleMarkWorker()
3423 }
3424
3425
3426
3427
3428
3429 gp, otherReady := beforeIdle(now, pollUntil)
3430 if gp != nil {
3431 trace := traceAcquire()
3432 casgstatus(gp, _Gwaiting, _Grunnable)
3433 if trace.ok() {
3434 trace.GoUnpark(gp, 0)
3435 traceRelease(trace)
3436 }
3437 return gp, false, false
3438 }
3439 if otherReady {
3440 goto top
3441 }
3442
3443
3444
3445
3446
3447 allpSnapshot := allp
3448
3449
3450 idlepMaskSnapshot := idlepMask
3451 timerpMaskSnapshot := timerpMask
3452
3453
3454 lock(&sched.lock)
3455 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3456 unlock(&sched.lock)
3457 goto top
3458 }
3459 if sched.runqsize != 0 {
3460 gp := globrunqget(pp, 0)
3461 unlock(&sched.lock)
3462 return gp, false, false
3463 }
3464 if !mp.spinning && sched.needspinning.Load() == 1 {
3465
3466 mp.becomeSpinning()
3467 unlock(&sched.lock)
3468 goto top
3469 }
3470 if releasep() != pp {
3471 throw("findrunnable: wrong p")
3472 }
3473 now = pidleput(pp, now)
3474 unlock(&sched.lock)
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512 wasSpinning := mp.spinning
3513 if mp.spinning {
3514 mp.spinning = false
3515 if sched.nmspinning.Add(-1) < 0 {
3516 throw("findrunnable: negative nmspinning")
3517 }
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530 lock(&sched.lock)
3531 if sched.runqsize != 0 {
3532 pp, _ := pidlegetSpinning(0)
3533 if pp != nil {
3534 gp := globrunqget(pp, 0)
3535 if gp == nil {
3536 throw("global runq empty with non-zero runqsize")
3537 }
3538 unlock(&sched.lock)
3539 acquirep(pp)
3540 mp.becomeSpinning()
3541 return gp, false, false
3542 }
3543 }
3544 unlock(&sched.lock)
3545
3546 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3547 if pp != nil {
3548 acquirep(pp)
3549 mp.becomeSpinning()
3550 goto top
3551 }
3552
3553
3554 pp, gp := checkIdleGCNoP()
3555 if pp != nil {
3556 acquirep(pp)
3557 mp.becomeSpinning()
3558
3559
3560 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3561 trace := traceAcquire()
3562 casgstatus(gp, _Gwaiting, _Grunnable)
3563 if trace.ok() {
3564 trace.GoUnpark(gp, 0)
3565 traceRelease(trace)
3566 }
3567 return gp, false, false
3568 }
3569
3570
3571
3572
3573
3574
3575
3576 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3577 }
3578
3579
3580 if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3581 sched.pollUntil.Store(pollUntil)
3582 if mp.p != 0 {
3583 throw("findrunnable: netpoll with p")
3584 }
3585 if mp.spinning {
3586 throw("findrunnable: netpoll with spinning")
3587 }
3588 delay := int64(-1)
3589 if pollUntil != 0 {
3590 if now == 0 {
3591 now = nanotime()
3592 }
3593 delay = pollUntil - now
3594 if delay < 0 {
3595 delay = 0
3596 }
3597 }
3598 if faketime != 0 {
3599
3600 delay = 0
3601 }
3602 list, delta := netpoll(delay)
3603
3604 now = nanotime()
3605 sched.pollUntil.Store(0)
3606 sched.lastpoll.Store(now)
3607 if faketime != 0 && list.empty() {
3608
3609
3610 stopm()
3611 goto top
3612 }
3613 lock(&sched.lock)
3614 pp, _ := pidleget(now)
3615 unlock(&sched.lock)
3616 if pp == nil {
3617 injectglist(&list)
3618 netpollAdjustWaiters(delta)
3619 } else {
3620 acquirep(pp)
3621 if !list.empty() {
3622 gp := list.pop()
3623 injectglist(&list)
3624 netpollAdjustWaiters(delta)
3625 trace := traceAcquire()
3626 casgstatus(gp, _Gwaiting, _Grunnable)
3627 if trace.ok() {
3628 trace.GoUnpark(gp, 0)
3629 traceRelease(trace)
3630 }
3631 return gp, false, false
3632 }
3633 if wasSpinning {
3634 mp.becomeSpinning()
3635 }
3636 goto top
3637 }
3638 } else if pollUntil != 0 && netpollinited() {
3639 pollerPollUntil := sched.pollUntil.Load()
3640 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3641 netpollBreak()
3642 }
3643 }
3644 stopm()
3645 goto top
3646 }
3647
3648
3649
3650
3651
3652 func pollWork() bool {
3653 if sched.runqsize != 0 {
3654 return true
3655 }
3656 p := getg().m.p.ptr()
3657 if !runqempty(p) {
3658 return true
3659 }
3660 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3661 if list, delta := netpoll(0); !list.empty() {
3662 injectglist(&list)
3663 netpollAdjustWaiters(delta)
3664 return true
3665 }
3666 }
3667 return false
3668 }
3669
3670
3671
3672
3673
3674
3675
3676 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3677 pp := getg().m.p.ptr()
3678
3679 ranTimer := false
3680
3681 const stealTries = 4
3682 for i := 0; i < stealTries; i++ {
3683 stealTimersOrRunNextG := i == stealTries-1
3684
3685 for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
3686 if sched.gcwaiting.Load() {
3687
3688 return nil, false, now, pollUntil, true
3689 }
3690 p2 := allp[enum.position()]
3691 if pp == p2 {
3692 continue
3693 }
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3709 tnow, w, ran := p2.timers.check(now)
3710 now = tnow
3711 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3712 pollUntil = w
3713 }
3714 if ran {
3715
3716
3717
3718
3719
3720
3721
3722
3723 if gp, inheritTime := runqget(pp); gp != nil {
3724 return gp, inheritTime, now, pollUntil, ranTimer
3725 }
3726 ranTimer = true
3727 }
3728 }
3729
3730
3731 if !idlepMask.read(enum.position()) {
3732 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3733 return gp, false, now, pollUntil, ranTimer
3734 }
3735 }
3736 }
3737 }
3738
3739
3740
3741
3742 return nil, false, now, pollUntil, ranTimer
3743 }
3744
3745
3746
3747
3748
3749
3750 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3751 for id, p2 := range allpSnapshot {
3752 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3753 lock(&sched.lock)
3754 pp, _ := pidlegetSpinning(0)
3755 if pp == nil {
3756
3757 unlock(&sched.lock)
3758 return nil
3759 }
3760 unlock(&sched.lock)
3761 return pp
3762 }
3763 }
3764
3765
3766 return nil
3767 }
3768
3769
3770
3771
3772 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3773 for id, p2 := range allpSnapshot {
3774 if timerpMaskSnapshot.read(uint32(id)) {
3775 w := p2.timers.wakeTime()
3776 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3777 pollUntil = w
3778 }
3779 }
3780 }
3781
3782 return pollUntil
3783 }
3784
3785
3786
3787
3788
3789 func checkIdleGCNoP() (*p, *g) {
3790
3791
3792
3793
3794
3795
3796 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3797 return nil, nil
3798 }
3799 if !gcMarkWorkAvailable(nil) {
3800 return nil, nil
3801 }
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820 lock(&sched.lock)
3821 pp, now := pidlegetSpinning(0)
3822 if pp == nil {
3823 unlock(&sched.lock)
3824 return nil, nil
3825 }
3826
3827
3828 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3829 pidleput(pp, now)
3830 unlock(&sched.lock)
3831 return nil, nil
3832 }
3833
3834 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3835 if node == nil {
3836 pidleput(pp, now)
3837 unlock(&sched.lock)
3838 gcController.removeIdleMarkWorker()
3839 return nil, nil
3840 }
3841
3842 unlock(&sched.lock)
3843
3844 return pp, node.gp.ptr()
3845 }
3846
3847
3848
3849
3850 func wakeNetPoller(when int64) {
3851 if sched.lastpoll.Load() == 0 {
3852
3853
3854
3855
3856 pollerPollUntil := sched.pollUntil.Load()
3857 if pollerPollUntil == 0 || pollerPollUntil > when {
3858 netpollBreak()
3859 }
3860 } else {
3861
3862
3863 if GOOS != "plan9" {
3864 wakep()
3865 }
3866 }
3867 }
3868
3869 func resetspinning() {
3870 gp := getg()
3871 if !gp.m.spinning {
3872 throw("resetspinning: not a spinning m")
3873 }
3874 gp.m.spinning = false
3875 nmspinning := sched.nmspinning.Add(-1)
3876 if nmspinning < 0 {
3877 throw("findrunnable: negative nmspinning")
3878 }
3879
3880
3881
3882 wakep()
3883 }
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893 func injectglist(glist *gList) {
3894 if glist.empty() {
3895 return
3896 }
3897
3898
3899
3900 head := glist.head.ptr()
3901 var tail *g
3902 qsize := 0
3903 trace := traceAcquire()
3904 for gp := head; gp != nil; gp = gp.schedlink.ptr() {
3905 tail = gp
3906 qsize++
3907 casgstatus(gp, _Gwaiting, _Grunnable)
3908 if trace.ok() {
3909 trace.GoUnpark(gp, 0)
3910 }
3911 }
3912 if trace.ok() {
3913 traceRelease(trace)
3914 }
3915
3916
3917 var q gQueue
3918 q.head.set(head)
3919 q.tail.set(tail)
3920 *glist = gList{}
3921
3922 startIdle := func(n int) {
3923 for i := 0; i < n; i++ {
3924 mp := acquirem()
3925 lock(&sched.lock)
3926
3927 pp, _ := pidlegetSpinning(0)
3928 if pp == nil {
3929 unlock(&sched.lock)
3930 releasem(mp)
3931 break
3932 }
3933
3934 startm(pp, false, true)
3935 unlock(&sched.lock)
3936 releasem(mp)
3937 }
3938 }
3939
3940 pp := getg().m.p.ptr()
3941 if pp == nil {
3942 lock(&sched.lock)
3943 globrunqputbatch(&q, int32(qsize))
3944 unlock(&sched.lock)
3945 startIdle(qsize)
3946 return
3947 }
3948
3949 npidle := int(sched.npidle.Load())
3950 var (
3951 globq gQueue
3952 n int
3953 )
3954 for n = 0; n < npidle && !q.empty(); n++ {
3955 g := q.pop()
3956 globq.pushBack(g)
3957 }
3958 if n > 0 {
3959 lock(&sched.lock)
3960 globrunqputbatch(&globq, int32(n))
3961 unlock(&sched.lock)
3962 startIdle(n)
3963 qsize -= n
3964 }
3965
3966 if !q.empty() {
3967 runqputbatch(pp, &q, qsize)
3968 }
3969
3970
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981
3982
3983 wakep()
3984 }
3985
3986
3987
3988 func schedule() {
3989 mp := getg().m
3990
3991 if mp.locks != 0 {
3992 throw("schedule: holding locks")
3993 }
3994
3995 if mp.lockedg != 0 {
3996 stoplockedm()
3997 execute(mp.lockedg.ptr(), false)
3998 }
3999
4000
4001
4002 if mp.incgo {
4003 throw("schedule: in cgo")
4004 }
4005
4006 top:
4007 pp := mp.p.ptr()
4008 pp.preempt = false
4009
4010
4011
4012
4013 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
4014 throw("schedule: spinning with local work")
4015 }
4016
4017 gp, inheritTime, tryWakeP := findRunnable()
4018
4019 if debug.dontfreezetheworld > 0 && freezing.Load() {
4020
4021
4022
4023
4024
4025
4026
4027 lock(&deadlock)
4028 lock(&deadlock)
4029 }
4030
4031
4032
4033
4034 if mp.spinning {
4035 resetspinning()
4036 }
4037
4038 if sched.disable.user && !schedEnabled(gp) {
4039
4040
4041
4042 lock(&sched.lock)
4043 if schedEnabled(gp) {
4044
4045
4046 unlock(&sched.lock)
4047 } else {
4048 sched.disable.runnable.pushBack(gp)
4049 sched.disable.n++
4050 unlock(&sched.lock)
4051 goto top
4052 }
4053 }
4054
4055
4056
4057 if tryWakeP {
4058 wakep()
4059 }
4060 if gp.lockedm != 0 {
4061
4062
4063 startlockedm(gp)
4064 goto top
4065 }
4066
4067 execute(gp, inheritTime)
4068 }
4069
4070
4071
4072
4073
4074
4075
4076
4077 func dropg() {
4078 gp := getg()
4079
4080 setMNoWB(&gp.m.curg.m, nil)
4081 setGNoWB(&gp.m.curg, nil)
4082 }
4083
4084 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
4085 unlock((*mutex)(lock))
4086 return true
4087 }
4088
4089
4090 func park_m(gp *g) {
4091 mp := getg().m
4092
4093 trace := traceAcquire()
4094
4095
4096
4097
4098
4099 sg := gp.syncGroup
4100 if sg != nil {
4101 sg.incActive()
4102 }
4103
4104 if trace.ok() {
4105
4106
4107
4108 trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
4109 }
4110
4111
4112 casgstatus(gp, _Grunning, _Gwaiting)
4113 if trace.ok() {
4114 traceRelease(trace)
4115 }
4116
4117 dropg()
4118
4119 if fn := mp.waitunlockf; fn != nil {
4120 ok := fn(gp, mp.waitlock)
4121 mp.waitunlockf = nil
4122 mp.waitlock = nil
4123 if !ok {
4124 trace := traceAcquire()
4125 casgstatus(gp, _Gwaiting, _Grunnable)
4126 if sg != nil {
4127 sg.decActive()
4128 }
4129 if trace.ok() {
4130 trace.GoUnpark(gp, 2)
4131 traceRelease(trace)
4132 }
4133 execute(gp, true)
4134 }
4135 }
4136
4137 if sg != nil {
4138 sg.decActive()
4139 }
4140
4141 schedule()
4142 }
4143
4144 func goschedImpl(gp *g, preempted bool) {
4145 trace := traceAcquire()
4146 status := readgstatus(gp)
4147 if status&^_Gscan != _Grunning {
4148 dumpgstatus(gp)
4149 throw("bad g status")
4150 }
4151 if trace.ok() {
4152
4153
4154
4155 if preempted {
4156 trace.GoPreempt()
4157 } else {
4158 trace.GoSched()
4159 }
4160 }
4161 casgstatus(gp, _Grunning, _Grunnable)
4162 if trace.ok() {
4163 traceRelease(trace)
4164 }
4165
4166 dropg()
4167 lock(&sched.lock)
4168 globrunqput(gp)
4169 unlock(&sched.lock)
4170
4171 if mainStarted {
4172 wakep()
4173 }
4174
4175 schedule()
4176 }
4177
4178
4179 func gosched_m(gp *g) {
4180 goschedImpl(gp, false)
4181 }
4182
4183
4184 func goschedguarded_m(gp *g) {
4185 if !canPreemptM(gp.m) {
4186 gogo(&gp.sched)
4187 }
4188 goschedImpl(gp, false)
4189 }
4190
4191 func gopreempt_m(gp *g) {
4192 goschedImpl(gp, true)
4193 }
4194
4195
4196
4197
4198 func preemptPark(gp *g) {
4199 status := readgstatus(gp)
4200 if status&^_Gscan != _Grunning {
4201 dumpgstatus(gp)
4202 throw("bad g status")
4203 }
4204
4205 if gp.asyncSafePoint {
4206
4207
4208
4209 f := findfunc(gp.sched.pc)
4210 if !f.valid() {
4211 throw("preempt at unknown pc")
4212 }
4213 if f.flag&abi.FuncFlagSPWrite != 0 {
4214 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
4215 throw("preempt SPWRITE")
4216 }
4217 }
4218
4219
4220
4221
4222
4223
4224
4225 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4226 dropg()
4227
4228
4229
4230
4231
4232
4233
4234
4235
4236
4237
4238
4239
4240
4241
4242
4243 trace := traceAcquire()
4244 if trace.ok() {
4245 trace.GoPark(traceBlockPreempted, 0)
4246 }
4247 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4248 if trace.ok() {
4249 traceRelease(trace)
4250 }
4251 schedule()
4252 }
4253
4254
4255
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268 func goyield() {
4269 checkTimeouts()
4270 mcall(goyield_m)
4271 }
4272
4273 func goyield_m(gp *g) {
4274 trace := traceAcquire()
4275 pp := gp.m.p.ptr()
4276 if trace.ok() {
4277
4278
4279
4280 trace.GoPreempt()
4281 }
4282 casgstatus(gp, _Grunning, _Grunnable)
4283 if trace.ok() {
4284 traceRelease(trace)
4285 }
4286 dropg()
4287 runqput(pp, gp, false)
4288 schedule()
4289 }
4290
4291
4292 func goexit1() {
4293 if raceenabled {
4294 if gp := getg(); gp.syncGroup != nil {
4295 racereleasemergeg(gp, gp.syncGroup.raceaddr())
4296 }
4297 racegoend()
4298 }
4299 trace := traceAcquire()
4300 if trace.ok() {
4301 trace.GoEnd()
4302 traceRelease(trace)
4303 }
4304 mcall(goexit0)
4305 }
4306
4307
4308 func goexit0(gp *g) {
4309 gdestroy(gp)
4310 schedule()
4311 }
4312
4313 func gdestroy(gp *g) {
4314 mp := getg().m
4315 pp := mp.p.ptr()
4316
4317 casgstatus(gp, _Grunning, _Gdead)
4318 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4319 if isSystemGoroutine(gp, false) {
4320 sched.ngsys.Add(-1)
4321 }
4322 gp.m = nil
4323 locked := gp.lockedm != 0
4324 gp.lockedm = 0
4325 mp.lockedg = 0
4326 gp.preemptStop = false
4327 gp.paniconfault = false
4328 gp._defer = nil
4329 gp._panic = nil
4330 gp.writebuf = nil
4331 gp.waitreason = waitReasonZero
4332 gp.param = nil
4333 gp.labels = nil
4334 gp.timer = nil
4335 gp.syncGroup = nil
4336
4337 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4338
4339
4340
4341 assistWorkPerByte := gcController.assistWorkPerByte.Load()
4342 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4343 gcController.bgScanCredit.Add(scanCredit)
4344 gp.gcAssistBytes = 0
4345 }
4346
4347 dropg()
4348
4349 if GOARCH == "wasm" {
4350 gfput(pp, gp)
4351 return
4352 }
4353
4354 if locked && mp.lockedInt != 0 {
4355 print("runtime: mp.lockedInt = ", mp.lockedInt, "\n")
4356 if mp.isextra {
4357 throw("runtime.Goexit called in a thread that was not created by the Go runtime")
4358 }
4359 throw("exited a goroutine internally locked to the OS thread")
4360 }
4361 gfput(pp, gp)
4362 if locked {
4363
4364
4365
4366
4367
4368
4369 if GOOS != "plan9" {
4370 gogo(&mp.g0.sched)
4371 } else {
4372
4373
4374 mp.lockedExt = 0
4375 }
4376 }
4377 }
4378
4379
4380
4381
4382
4383
4384
4385
4386
4387 func save(pc, sp, bp uintptr) {
4388 gp := getg()
4389
4390 if gp == gp.m.g0 || gp == gp.m.gsignal {
4391
4392
4393
4394
4395
4396 throw("save on system g not allowed")
4397 }
4398
4399 gp.sched.pc = pc
4400 gp.sched.sp = sp
4401 gp.sched.lr = 0
4402 gp.sched.ret = 0
4403 gp.sched.bp = bp
4404
4405
4406
4407 if gp.sched.ctxt != nil {
4408 badctxt()
4409 }
4410 }
4411
4412
4413
4414
4415
4416
4417
4418
4419
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430
4431
4432
4433
4434
4435
4436 func reentersyscall(pc, sp, bp uintptr) {
4437 trace := traceAcquire()
4438 gp := getg()
4439
4440
4441
4442 gp.m.locks++
4443
4444
4445
4446
4447
4448 gp.stackguard0 = stackPreempt
4449 gp.throwsplit = true
4450
4451
4452 save(pc, sp, bp)
4453 gp.syscallsp = sp
4454 gp.syscallpc = pc
4455 gp.syscallbp = bp
4456 casgstatus(gp, _Grunning, _Gsyscall)
4457 if staticLockRanking {
4458
4459
4460 save(pc, sp, bp)
4461 }
4462 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4463 systemstack(func() {
4464 print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4465 throw("entersyscall")
4466 })
4467 }
4468 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4469 systemstack(func() {
4470 print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4471 throw("entersyscall")
4472 })
4473 }
4474
4475 if trace.ok() {
4476 systemstack(func() {
4477 trace.GoSysCall()
4478 traceRelease(trace)
4479 })
4480
4481
4482
4483 save(pc, sp, bp)
4484 }
4485
4486 if sched.sysmonwait.Load() {
4487 systemstack(entersyscall_sysmon)
4488 save(pc, sp, bp)
4489 }
4490
4491 if gp.m.p.ptr().runSafePointFn != 0 {
4492
4493 systemstack(runSafePointFn)
4494 save(pc, sp, bp)
4495 }
4496
4497 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4498 pp := gp.m.p.ptr()
4499 pp.m = 0
4500 gp.m.oldp.set(pp)
4501 gp.m.p = 0
4502 atomic.Store(&pp.status, _Psyscall)
4503 if sched.gcwaiting.Load() {
4504 systemstack(entersyscall_gcwait)
4505 save(pc, sp, bp)
4506 }
4507
4508 gp.m.locks--
4509 }
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525 func entersyscall() {
4526
4527
4528
4529
4530 fp := getcallerfp()
4531 reentersyscall(sys.GetCallerPC(), sys.GetCallerSP(), fp)
4532 }
4533
4534 func entersyscall_sysmon() {
4535 lock(&sched.lock)
4536 if sched.sysmonwait.Load() {
4537 sched.sysmonwait.Store(false)
4538 notewakeup(&sched.sysmonnote)
4539 }
4540 unlock(&sched.lock)
4541 }
4542
4543 func entersyscall_gcwait() {
4544 gp := getg()
4545 pp := gp.m.oldp.ptr()
4546
4547 lock(&sched.lock)
4548 trace := traceAcquire()
4549 if sched.stopwait > 0 && atomic.Cas(&pp.status, _Psyscall, _Pgcstop) {
4550 if trace.ok() {
4551
4552
4553
4554
4555
4556
4557
4558
4559
4560 trace.ProcSteal(pp, true)
4561 traceRelease(trace)
4562 }
4563 pp.gcStopTime = nanotime()
4564 pp.syscalltick++
4565 if sched.stopwait--; sched.stopwait == 0 {
4566 notewakeup(&sched.stopnote)
4567 }
4568 } else if trace.ok() {
4569 traceRelease(trace)
4570 }
4571 unlock(&sched.lock)
4572 }
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586 func entersyscallblock() {
4587 gp := getg()
4588
4589 gp.m.locks++
4590 gp.throwsplit = true
4591 gp.stackguard0 = stackPreempt
4592 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4593 gp.m.p.ptr().syscalltick++
4594
4595
4596 pc := sys.GetCallerPC()
4597 sp := sys.GetCallerSP()
4598 bp := getcallerfp()
4599 save(pc, sp, bp)
4600 gp.syscallsp = gp.sched.sp
4601 gp.syscallpc = gp.sched.pc
4602 gp.syscallbp = gp.sched.bp
4603 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4604 sp1 := sp
4605 sp2 := gp.sched.sp
4606 sp3 := gp.syscallsp
4607 systemstack(func() {
4608 print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4609 throw("entersyscallblock")
4610 })
4611 }
4612 casgstatus(gp, _Grunning, _Gsyscall)
4613 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4614 systemstack(func() {
4615 print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4616 throw("entersyscallblock")
4617 })
4618 }
4619 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4620 systemstack(func() {
4621 print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4622 throw("entersyscallblock")
4623 })
4624 }
4625
4626 systemstack(entersyscallblock_handoff)
4627
4628
4629 save(sys.GetCallerPC(), sys.GetCallerSP(), getcallerfp())
4630
4631 gp.m.locks--
4632 }
4633
4634 func entersyscallblock_handoff() {
4635 trace := traceAcquire()
4636 if trace.ok() {
4637 trace.GoSysCall()
4638 traceRelease(trace)
4639 }
4640 handoffp(releasep())
4641 }
4642
4643
4644
4645
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662
4663 func exitsyscall() {
4664 gp := getg()
4665
4666 gp.m.locks++
4667 if sys.GetCallerSP() > gp.syscallsp {
4668 throw("exitsyscall: syscall frame is no longer valid")
4669 }
4670
4671 gp.waitsince = 0
4672 oldp := gp.m.oldp.ptr()
4673 gp.m.oldp = 0
4674 if exitsyscallfast(oldp) {
4675
4676
4677 if goroutineProfile.active {
4678
4679
4680
4681 systemstack(func() {
4682 tryRecordGoroutineProfileWB(gp)
4683 })
4684 }
4685 trace := traceAcquire()
4686 if trace.ok() {
4687 lostP := oldp != gp.m.p.ptr() || gp.m.syscalltick != gp.m.p.ptr().syscalltick
4688 systemstack(func() {
4689
4690
4691
4692
4693 trace.GoSysExit(lostP)
4694 if lostP {
4695
4696
4697
4698
4699 trace.GoStart()
4700 }
4701 })
4702 }
4703
4704 gp.m.p.ptr().syscalltick++
4705
4706 casgstatus(gp, _Gsyscall, _Grunning)
4707 if trace.ok() {
4708 traceRelease(trace)
4709 }
4710
4711
4712
4713 gp.syscallsp = 0
4714 gp.m.locks--
4715 if gp.preempt {
4716
4717 gp.stackguard0 = stackPreempt
4718 } else {
4719
4720 gp.stackguard0 = gp.stack.lo + stackGuard
4721 }
4722 gp.throwsplit = false
4723
4724 if sched.disable.user && !schedEnabled(gp) {
4725
4726 Gosched()
4727 }
4728
4729 return
4730 }
4731
4732 gp.m.locks--
4733
4734
4735 mcall(exitsyscall0)
4736
4737
4738
4739
4740
4741
4742
4743 gp.syscallsp = 0
4744 gp.m.p.ptr().syscalltick++
4745 gp.throwsplit = false
4746 }
4747
4748
4749 func exitsyscallfast(oldp *p) bool {
4750
4751 if sched.stopwait == freezeStopWait {
4752 return false
4753 }
4754
4755
4756 trace := traceAcquire()
4757 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
4758
4759 wirep(oldp)
4760 exitsyscallfast_reacquired(trace)
4761 if trace.ok() {
4762 traceRelease(trace)
4763 }
4764 return true
4765 }
4766 if trace.ok() {
4767 traceRelease(trace)
4768 }
4769
4770
4771 if sched.pidle != 0 {
4772 var ok bool
4773 systemstack(func() {
4774 ok = exitsyscallfast_pidle()
4775 })
4776 if ok {
4777 return true
4778 }
4779 }
4780 return false
4781 }
4782
4783
4784
4785
4786
4787
4788 func exitsyscallfast_reacquired(trace traceLocker) {
4789 gp := getg()
4790 if gp.m.syscalltick != gp.m.p.ptr().syscalltick {
4791 if trace.ok() {
4792
4793
4794
4795 systemstack(func() {
4796
4797
4798 trace.ProcSteal(gp.m.p.ptr(), true)
4799 trace.ProcStart()
4800 })
4801 }
4802 gp.m.p.ptr().syscalltick++
4803 }
4804 }
4805
4806 func exitsyscallfast_pidle() bool {
4807 lock(&sched.lock)
4808 pp, _ := pidleget(0)
4809 if pp != nil && sched.sysmonwait.Load() {
4810 sched.sysmonwait.Store(false)
4811 notewakeup(&sched.sysmonnote)
4812 }
4813 unlock(&sched.lock)
4814 if pp != nil {
4815 acquirep(pp)
4816 return true
4817 }
4818 return false
4819 }
4820
4821
4822
4823
4824
4825
4826
4827 func exitsyscall0(gp *g) {
4828 var trace traceLocker
4829 traceExitingSyscall()
4830 trace = traceAcquire()
4831 casgstatus(gp, _Gsyscall, _Grunnable)
4832 traceExitedSyscall()
4833 if trace.ok() {
4834
4835
4836
4837
4838 trace.GoSysExit(true)
4839 traceRelease(trace)
4840 }
4841 dropg()
4842 lock(&sched.lock)
4843 var pp *p
4844 if schedEnabled(gp) {
4845 pp, _ = pidleget(0)
4846 }
4847 var locked bool
4848 if pp == nil {
4849 globrunqput(gp)
4850
4851
4852
4853
4854
4855
4856 locked = gp.lockedm != 0
4857 } else if sched.sysmonwait.Load() {
4858 sched.sysmonwait.Store(false)
4859 notewakeup(&sched.sysmonnote)
4860 }
4861 unlock(&sched.lock)
4862 if pp != nil {
4863 acquirep(pp)
4864 execute(gp, false)
4865 }
4866 if locked {
4867
4868
4869
4870
4871 stoplockedm()
4872 execute(gp, false)
4873 }
4874 stopm()
4875 schedule()
4876 }
4877
4878
4879
4880
4881
4882
4883
4884
4885
4886
4887
4888
4889
4890 func syscall_runtime_BeforeFork() {
4891 gp := getg().m.curg
4892
4893
4894
4895
4896 gp.m.locks++
4897 sigsave(&gp.m.sigmask)
4898 sigblock(false)
4899
4900
4901
4902
4903
4904 gp.stackguard0 = stackFork
4905 }
4906
4907
4908
4909
4910
4911
4912
4913
4914
4915
4916
4917
4918
4919 func syscall_runtime_AfterFork() {
4920 gp := getg().m.curg
4921
4922
4923 gp.stackguard0 = gp.stack.lo + stackGuard
4924
4925 msigrestore(gp.m.sigmask)
4926
4927 gp.m.locks--
4928 }
4929
4930
4931
4932 var inForkedChild bool
4933
4934
4935
4936
4937
4938
4939
4940
4941
4942
4943
4944
4945
4946
4947
4948
4949
4950
4951
4952
4953 func syscall_runtime_AfterForkInChild() {
4954
4955
4956
4957
4958 inForkedChild = true
4959
4960 clearSignalHandlers()
4961
4962
4963
4964 msigrestore(getg().m.sigmask)
4965
4966 inForkedChild = false
4967 }
4968
4969
4970
4971
4972 var pendingPreemptSignals atomic.Int32
4973
4974
4975
4976
4977 func syscall_runtime_BeforeExec() {
4978
4979 execLock.lock()
4980
4981
4982
4983 if GOOS == "darwin" || GOOS == "ios" {
4984 for pendingPreemptSignals.Load() > 0 {
4985 osyield()
4986 }
4987 }
4988 }
4989
4990
4991
4992
4993 func syscall_runtime_AfterExec() {
4994 execLock.unlock()
4995 }
4996
4997
4998 func malg(stacksize int32) *g {
4999 newg := new(g)
5000 if stacksize >= 0 {
5001 stacksize = round2(stackSystem + stacksize)
5002 systemstack(func() {
5003 newg.stack = stackalloc(uint32(stacksize))
5004 })
5005 newg.stackguard0 = newg.stack.lo + stackGuard
5006 newg.stackguard1 = ^uintptr(0)
5007
5008
5009 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
5010 }
5011 return newg
5012 }
5013
5014
5015
5016
5017 func newproc(fn *funcval) {
5018 gp := getg()
5019 pc := sys.GetCallerPC()
5020 systemstack(func() {
5021 newg := newproc1(fn, gp, pc, false, waitReasonZero)
5022
5023 pp := getg().m.p.ptr()
5024 runqput(pp, newg, true)
5025
5026 if mainStarted {
5027 wakep()
5028 }
5029 })
5030 }
5031
5032
5033
5034
5035 func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
5036 if fn == nil {
5037 fatal("go of nil func value")
5038 }
5039
5040 mp := acquirem()
5041 pp := mp.p.ptr()
5042 newg := gfget(pp)
5043 if newg == nil {
5044 newg = malg(stackMin)
5045 casgstatus(newg, _Gidle, _Gdead)
5046 allgadd(newg)
5047 }
5048 if newg.stack.hi == 0 {
5049 throw("newproc1: newg missing stack")
5050 }
5051
5052 if readgstatus(newg) != _Gdead {
5053 throw("newproc1: new g is not Gdead")
5054 }
5055
5056 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
5057 totalSize = alignUp(totalSize, sys.StackAlign)
5058 sp := newg.stack.hi - totalSize
5059 if usesLR {
5060
5061 *(*uintptr)(unsafe.Pointer(sp)) = 0
5062 prepGoExitFrame(sp)
5063 }
5064 if GOARCH == "arm64" {
5065
5066 *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
5067 }
5068
5069 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
5070 newg.sched.sp = sp
5071 newg.stktopsp = sp
5072 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
5073 newg.sched.g = guintptr(unsafe.Pointer(newg))
5074 gostartcallfn(&newg.sched, fn)
5075 newg.parentGoid = callergp.goid
5076 newg.gopc = callerpc
5077 newg.ancestors = saveAncestors(callergp)
5078 newg.startpc = fn.fn
5079 if isSystemGoroutine(newg, false) {
5080 sched.ngsys.Add(1)
5081 } else {
5082
5083 newg.syncGroup = callergp.syncGroup
5084 if mp.curg != nil {
5085 newg.labels = mp.curg.labels
5086 }
5087 if goroutineProfile.active {
5088
5089
5090
5091
5092
5093 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
5094 }
5095 }
5096
5097 newg.trackingSeq = uint8(cheaprand())
5098 if newg.trackingSeq%gTrackingPeriod == 0 {
5099 newg.tracking = true
5100 }
5101 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
5102
5103
5104 trace := traceAcquire()
5105 var status uint32 = _Grunnable
5106 if parked {
5107 status = _Gwaiting
5108 newg.waitreason = waitreason
5109 }
5110 if pp.goidcache == pp.goidcacheend {
5111
5112
5113
5114 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
5115 pp.goidcache -= _GoidCacheBatch - 1
5116 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
5117 }
5118 newg.goid = pp.goidcache
5119 casgstatus(newg, _Gdead, status)
5120 pp.goidcache++
5121 newg.trace.reset()
5122 if trace.ok() {
5123 trace.GoCreate(newg, newg.startpc, parked)
5124 traceRelease(trace)
5125 }
5126
5127
5128 if raceenabled {
5129 newg.racectx = racegostart(callerpc)
5130 newg.raceignore = 0
5131 if newg.labels != nil {
5132
5133
5134 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
5135 }
5136 }
5137 releasem(mp)
5138
5139 return newg
5140 }
5141
5142
5143
5144
5145 func saveAncestors(callergp *g) *[]ancestorInfo {
5146
5147 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
5148 return nil
5149 }
5150 var callerAncestors []ancestorInfo
5151 if callergp.ancestors != nil {
5152 callerAncestors = *callergp.ancestors
5153 }
5154 n := int32(len(callerAncestors)) + 1
5155 if n > debug.tracebackancestors {
5156 n = debug.tracebackancestors
5157 }
5158 ancestors := make([]ancestorInfo, n)
5159 copy(ancestors[1:], callerAncestors)
5160
5161 var pcs [tracebackInnerFrames]uintptr
5162 npcs := gcallers(callergp, 0, pcs[:])
5163 ipcs := make([]uintptr, npcs)
5164 copy(ipcs, pcs[:])
5165 ancestors[0] = ancestorInfo{
5166 pcs: ipcs,
5167 goid: callergp.goid,
5168 gopc: callergp.gopc,
5169 }
5170
5171 ancestorsp := new([]ancestorInfo)
5172 *ancestorsp = ancestors
5173 return ancestorsp
5174 }
5175
5176
5177
5178 func gfput(pp *p, gp *g) {
5179 if readgstatus(gp) != _Gdead {
5180 throw("gfput: bad status (not Gdead)")
5181 }
5182
5183 stksize := gp.stack.hi - gp.stack.lo
5184
5185 if stksize != uintptr(startingStackSize) {
5186
5187 stackfree(gp.stack)
5188 gp.stack.lo = 0
5189 gp.stack.hi = 0
5190 gp.stackguard0 = 0
5191 }
5192
5193 pp.gFree.push(gp)
5194 pp.gFree.n++
5195 if pp.gFree.n >= 64 {
5196 var (
5197 inc int32
5198 stackQ gQueue
5199 noStackQ gQueue
5200 )
5201 for pp.gFree.n >= 32 {
5202 gp := pp.gFree.pop()
5203 pp.gFree.n--
5204 if gp.stack.lo == 0 {
5205 noStackQ.push(gp)
5206 } else {
5207 stackQ.push(gp)
5208 }
5209 inc++
5210 }
5211 lock(&sched.gFree.lock)
5212 sched.gFree.noStack.pushAll(noStackQ)
5213 sched.gFree.stack.pushAll(stackQ)
5214 sched.gFree.n += inc
5215 unlock(&sched.gFree.lock)
5216 }
5217 }
5218
5219
5220
5221 func gfget(pp *p) *g {
5222 retry:
5223 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
5224 lock(&sched.gFree.lock)
5225
5226 for pp.gFree.n < 32 {
5227
5228 gp := sched.gFree.stack.pop()
5229 if gp == nil {
5230 gp = sched.gFree.noStack.pop()
5231 if gp == nil {
5232 break
5233 }
5234 }
5235 sched.gFree.n--
5236 pp.gFree.push(gp)
5237 pp.gFree.n++
5238 }
5239 unlock(&sched.gFree.lock)
5240 goto retry
5241 }
5242 gp := pp.gFree.pop()
5243 if gp == nil {
5244 return nil
5245 }
5246 pp.gFree.n--
5247 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5248
5249
5250
5251 systemstack(func() {
5252 stackfree(gp.stack)
5253 gp.stack.lo = 0
5254 gp.stack.hi = 0
5255 gp.stackguard0 = 0
5256 })
5257 }
5258 if gp.stack.lo == 0 {
5259
5260 systemstack(func() {
5261 gp.stack = stackalloc(startingStackSize)
5262 })
5263 gp.stackguard0 = gp.stack.lo + stackGuard
5264 } else {
5265 if raceenabled {
5266 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5267 }
5268 if msanenabled {
5269 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5270 }
5271 if asanenabled {
5272 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5273 }
5274 }
5275 return gp
5276 }
5277
5278
5279 func gfpurge(pp *p) {
5280 var (
5281 inc int32
5282 stackQ gQueue
5283 noStackQ gQueue
5284 )
5285 for !pp.gFree.empty() {
5286 gp := pp.gFree.pop()
5287 pp.gFree.n--
5288 if gp.stack.lo == 0 {
5289 noStackQ.push(gp)
5290 } else {
5291 stackQ.push(gp)
5292 }
5293 inc++
5294 }
5295 lock(&sched.gFree.lock)
5296 sched.gFree.noStack.pushAll(noStackQ)
5297 sched.gFree.stack.pushAll(stackQ)
5298 sched.gFree.n += inc
5299 unlock(&sched.gFree.lock)
5300 }
5301
5302
5303 func Breakpoint() {
5304 breakpoint()
5305 }
5306
5307
5308
5309
5310
5311
5312 func dolockOSThread() {
5313 if GOARCH == "wasm" {
5314 return
5315 }
5316 gp := getg()
5317 gp.m.lockedg.set(gp)
5318 gp.lockedm.set(gp.m)
5319 }
5320
5321
5322
5323
5324
5325
5326
5327
5328
5329
5330
5331
5332
5333
5334
5335
5336
5337 func LockOSThread() {
5338 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
5339
5340
5341
5342 startTemplateThread()
5343 }
5344 gp := getg()
5345 gp.m.lockedExt++
5346 if gp.m.lockedExt == 0 {
5347 gp.m.lockedExt--
5348 panic("LockOSThread nesting overflow")
5349 }
5350 dolockOSThread()
5351 }
5352
5353
5354 func lockOSThread() {
5355 getg().m.lockedInt++
5356 dolockOSThread()
5357 }
5358
5359
5360
5361
5362
5363
5364 func dounlockOSThread() {
5365 if GOARCH == "wasm" {
5366 return
5367 }
5368 gp := getg()
5369 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5370 return
5371 }
5372 gp.m.lockedg = 0
5373 gp.lockedm = 0
5374 }
5375
5376
5377
5378
5379
5380
5381
5382
5383
5384
5385
5386
5387
5388
5389
5390 func UnlockOSThread() {
5391 gp := getg()
5392 if gp.m.lockedExt == 0 {
5393 return
5394 }
5395 gp.m.lockedExt--
5396 dounlockOSThread()
5397 }
5398
5399
5400 func unlockOSThread() {
5401 gp := getg()
5402 if gp.m.lockedInt == 0 {
5403 systemstack(badunlockosthread)
5404 }
5405 gp.m.lockedInt--
5406 dounlockOSThread()
5407 }
5408
5409 func badunlockosthread() {
5410 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
5411 }
5412
5413 func gcount() int32 {
5414 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - sched.ngsys.Load()
5415 for _, pp := range allp {
5416 n -= pp.gFree.n
5417 }
5418
5419
5420
5421 if n < 1 {
5422 n = 1
5423 }
5424 return n
5425 }
5426
5427 func mcount() int32 {
5428 return int32(sched.mnext - sched.nmfreed)
5429 }
5430
5431 var prof struct {
5432 signalLock atomic.Uint32
5433
5434
5435
5436 hz atomic.Int32
5437 }
5438
5439 func _System() { _System() }
5440 func _ExternalCode() { _ExternalCode() }
5441 func _LostExternalCode() { _LostExternalCode() }
5442 func _GC() { _GC() }
5443 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
5444 func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
5445 func _VDSO() { _VDSO() }
5446
5447
5448
5449
5450
5451 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5452 if prof.hz.Load() == 0 {
5453 return
5454 }
5455
5456
5457
5458
5459 if mp != nil && mp.profilehz == 0 {
5460 return
5461 }
5462
5463
5464
5465
5466
5467
5468
5469 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
5470 if f := findfunc(pc); f.valid() {
5471 if stringslite.HasPrefix(funcname(f), "internal/runtime/atomic") {
5472 cpuprof.lostAtomic++
5473 return
5474 }
5475 }
5476 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
5477
5478
5479
5480 cpuprof.lostAtomic++
5481 return
5482 }
5483 }
5484
5485
5486
5487
5488
5489
5490
5491 getg().m.mallocing++
5492
5493 var u unwinder
5494 var stk [maxCPUProfStack]uintptr
5495 n := 0
5496 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
5497 cgoOff := 0
5498
5499
5500
5501
5502
5503 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
5504 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
5505 cgoOff++
5506 }
5507 n += copy(stk[:], mp.cgoCallers[:cgoOff])
5508 mp.cgoCallers[0] = 0
5509 }
5510
5511
5512 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
5513 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
5514
5515
5516 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
5517 } else if mp != nil && mp.vdsoSP != 0 {
5518
5519
5520 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5521 } else {
5522 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5523 }
5524 n += tracebackPCs(&u, 0, stk[n:])
5525
5526 if n <= 0 {
5527
5528
5529 n = 2
5530 if inVDSOPage(pc) {
5531 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
5532 } else if pc > firstmoduledata.etext {
5533
5534 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
5535 }
5536 stk[0] = pc
5537 if mp.preemptoff != "" {
5538 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
5539 } else {
5540 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
5541 }
5542 }
5543
5544 if prof.hz.Load() != 0 {
5545
5546
5547
5548 var tagPtr *unsafe.Pointer
5549 if gp != nil && gp.m != nil && gp.m.curg != nil {
5550 tagPtr = &gp.m.curg.labels
5551 }
5552 cpuprof.add(tagPtr, stk[:n])
5553
5554 gprof := gp
5555 var mp *m
5556 var pp *p
5557 if gp != nil && gp.m != nil {
5558 if gp.m.curg != nil {
5559 gprof = gp.m.curg
5560 }
5561 mp = gp.m
5562 pp = gp.m.p.ptr()
5563 }
5564 traceCPUSample(gprof, mp, pp, stk[:n])
5565 }
5566 getg().m.mallocing--
5567 }
5568
5569
5570
5571 func setcpuprofilerate(hz int32) {
5572
5573 if hz < 0 {
5574 hz = 0
5575 }
5576
5577
5578
5579 gp := getg()
5580 gp.m.locks++
5581
5582
5583
5584
5585 setThreadCPUProfiler(0)
5586
5587 for !prof.signalLock.CompareAndSwap(0, 1) {
5588 osyield()
5589 }
5590 if prof.hz.Load() != hz {
5591 setProcessCPUProfiler(hz)
5592 prof.hz.Store(hz)
5593 }
5594 prof.signalLock.Store(0)
5595
5596 lock(&sched.lock)
5597 sched.profilehz = hz
5598 unlock(&sched.lock)
5599
5600 if hz != 0 {
5601 setThreadCPUProfiler(hz)
5602 }
5603
5604 gp.m.locks--
5605 }
5606
5607
5608
5609 func (pp *p) init(id int32) {
5610 pp.id = id
5611 pp.status = _Pgcstop
5612 pp.sudogcache = pp.sudogbuf[:0]
5613 pp.deferpool = pp.deferpoolbuf[:0]
5614 pp.wbBuf.reset()
5615 if pp.mcache == nil {
5616 if id == 0 {
5617 if mcache0 == nil {
5618 throw("missing mcache?")
5619 }
5620
5621
5622 pp.mcache = mcache0
5623 } else {
5624 pp.mcache = allocmcache()
5625 }
5626 }
5627 if raceenabled && pp.raceprocctx == 0 {
5628 if id == 0 {
5629 pp.raceprocctx = raceprocctx0
5630 raceprocctx0 = 0
5631 } else {
5632 pp.raceprocctx = raceproccreate()
5633 }
5634 }
5635 lockInit(&pp.timers.mu, lockRankTimers)
5636
5637
5638
5639 timerpMask.set(id)
5640
5641
5642 idlepMask.clear(id)
5643 }
5644
5645
5646
5647
5648
5649 func (pp *p) destroy() {
5650 assertLockHeld(&sched.lock)
5651 assertWorldStopped()
5652
5653
5654 for pp.runqhead != pp.runqtail {
5655
5656 pp.runqtail--
5657 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5658
5659 globrunqputhead(gp)
5660 }
5661 if pp.runnext != 0 {
5662 globrunqputhead(pp.runnext.ptr())
5663 pp.runnext = 0
5664 }
5665
5666
5667 getg().m.p.ptr().timers.take(&pp.timers)
5668
5669
5670 if gcphase != _GCoff {
5671 wbBufFlush1(pp)
5672 pp.gcw.dispose()
5673 }
5674 for i := range pp.sudogbuf {
5675 pp.sudogbuf[i] = nil
5676 }
5677 pp.sudogcache = pp.sudogbuf[:0]
5678 pp.pinnerCache = nil
5679 for j := range pp.deferpoolbuf {
5680 pp.deferpoolbuf[j] = nil
5681 }
5682 pp.deferpool = pp.deferpoolbuf[:0]
5683 systemstack(func() {
5684 for i := 0; i < pp.mspancache.len; i++ {
5685
5686 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
5687 }
5688 pp.mspancache.len = 0
5689 lock(&mheap_.lock)
5690 pp.pcache.flush(&mheap_.pages)
5691 unlock(&mheap_.lock)
5692 })
5693 freemcache(pp.mcache)
5694 pp.mcache = nil
5695 gfpurge(pp)
5696 if raceenabled {
5697 if pp.timers.raceCtx != 0 {
5698
5699
5700
5701
5702
5703 mp := getg().m
5704 phold := mp.p.ptr()
5705 mp.p.set(pp)
5706
5707 racectxend(pp.timers.raceCtx)
5708 pp.timers.raceCtx = 0
5709
5710 mp.p.set(phold)
5711 }
5712 raceprocdestroy(pp.raceprocctx)
5713 pp.raceprocctx = 0
5714 }
5715 pp.gcAssistTime = 0
5716 pp.status = _Pdead
5717 }
5718
5719
5720
5721
5722
5723
5724
5725
5726
5727 func procresize(nprocs int32) *p {
5728 assertLockHeld(&sched.lock)
5729 assertWorldStopped()
5730
5731 old := gomaxprocs
5732 if old < 0 || nprocs <= 0 {
5733 throw("procresize: invalid arg")
5734 }
5735 trace := traceAcquire()
5736 if trace.ok() {
5737 trace.Gomaxprocs(nprocs)
5738 traceRelease(trace)
5739 }
5740
5741
5742 now := nanotime()
5743 if sched.procresizetime != 0 {
5744 sched.totaltime += int64(old) * (now - sched.procresizetime)
5745 }
5746 sched.procresizetime = now
5747
5748 maskWords := (nprocs + 31) / 32
5749
5750
5751 if nprocs > int32(len(allp)) {
5752
5753
5754 lock(&allpLock)
5755 if nprocs <= int32(cap(allp)) {
5756 allp = allp[:nprocs]
5757 } else {
5758 nallp := make([]*p, nprocs)
5759
5760
5761 copy(nallp, allp[:cap(allp)])
5762 allp = nallp
5763 }
5764
5765 if maskWords <= int32(cap(idlepMask)) {
5766 idlepMask = idlepMask[:maskWords]
5767 timerpMask = timerpMask[:maskWords]
5768 } else {
5769 nidlepMask := make([]uint32, maskWords)
5770
5771 copy(nidlepMask, idlepMask)
5772 idlepMask = nidlepMask
5773
5774 ntimerpMask := make([]uint32, maskWords)
5775 copy(ntimerpMask, timerpMask)
5776 timerpMask = ntimerpMask
5777 }
5778 unlock(&allpLock)
5779 }
5780
5781
5782 for i := old; i < nprocs; i++ {
5783 pp := allp[i]
5784 if pp == nil {
5785 pp = new(p)
5786 }
5787 pp.init(i)
5788 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
5789 }
5790
5791 gp := getg()
5792 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
5793
5794 gp.m.p.ptr().status = _Prunning
5795 gp.m.p.ptr().mcache.prepareForSweep()
5796 } else {
5797
5798
5799
5800
5801
5802 if gp.m.p != 0 {
5803 trace := traceAcquire()
5804 if trace.ok() {
5805
5806
5807
5808 trace.GoSched()
5809 trace.ProcStop(gp.m.p.ptr())
5810 traceRelease(trace)
5811 }
5812 gp.m.p.ptr().m = 0
5813 }
5814 gp.m.p = 0
5815 pp := allp[0]
5816 pp.m = 0
5817 pp.status = _Pidle
5818 acquirep(pp)
5819 trace := traceAcquire()
5820 if trace.ok() {
5821 trace.GoStart()
5822 traceRelease(trace)
5823 }
5824 }
5825
5826
5827 mcache0 = nil
5828
5829
5830 for i := nprocs; i < old; i++ {
5831 pp := allp[i]
5832 pp.destroy()
5833
5834 }
5835
5836
5837 if int32(len(allp)) != nprocs {
5838 lock(&allpLock)
5839 allp = allp[:nprocs]
5840 idlepMask = idlepMask[:maskWords]
5841 timerpMask = timerpMask[:maskWords]
5842 unlock(&allpLock)
5843 }
5844
5845 var runnablePs *p
5846 for i := nprocs - 1; i >= 0; i-- {
5847 pp := allp[i]
5848 if gp.m.p.ptr() == pp {
5849 continue
5850 }
5851 pp.status = _Pidle
5852 if runqempty(pp) {
5853 pidleput(pp, now)
5854 } else {
5855 pp.m.set(mget())
5856 pp.link.set(runnablePs)
5857 runnablePs = pp
5858 }
5859 }
5860 stealOrder.reset(uint32(nprocs))
5861 var int32p *int32 = &gomaxprocs
5862 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
5863 if old != nprocs {
5864
5865 gcCPULimiter.resetCapacity(now, nprocs)
5866 }
5867 return runnablePs
5868 }
5869
5870
5871
5872
5873
5874
5875
5876 func acquirep(pp *p) {
5877
5878 wirep(pp)
5879
5880
5881
5882
5883
5884 pp.mcache.prepareForSweep()
5885
5886 trace := traceAcquire()
5887 if trace.ok() {
5888 trace.ProcStart()
5889 traceRelease(trace)
5890 }
5891 }
5892
5893
5894
5895
5896
5897
5898
5899 func wirep(pp *p) {
5900 gp := getg()
5901
5902 if gp.m.p != 0 {
5903
5904
5905 systemstack(func() {
5906 throw("wirep: already in go")
5907 })
5908 }
5909 if pp.m != 0 || pp.status != _Pidle {
5910
5911
5912 systemstack(func() {
5913 id := int64(0)
5914 if pp.m != 0 {
5915 id = pp.m.ptr().id
5916 }
5917 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
5918 throw("wirep: invalid p state")
5919 })
5920 }
5921 gp.m.p.set(pp)
5922 pp.m.set(gp.m)
5923 pp.status = _Prunning
5924 }
5925
5926
5927 func releasep() *p {
5928 trace := traceAcquire()
5929 if trace.ok() {
5930 trace.ProcStop(getg().m.p.ptr())
5931 traceRelease(trace)
5932 }
5933 return releasepNoTrace()
5934 }
5935
5936
5937 func releasepNoTrace() *p {
5938 gp := getg()
5939
5940 if gp.m.p == 0 {
5941 throw("releasep: invalid arg")
5942 }
5943 pp := gp.m.p.ptr()
5944 if pp.m.ptr() != gp.m || pp.status != _Prunning {
5945 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
5946 throw("releasep: invalid p state")
5947 }
5948 gp.m.p = 0
5949 pp.m = 0
5950 pp.status = _Pidle
5951 return pp
5952 }
5953
5954 func incidlelocked(v int32) {
5955 lock(&sched.lock)
5956 sched.nmidlelocked += v
5957 if v > 0 {
5958 checkdead()
5959 }
5960 unlock(&sched.lock)
5961 }
5962
5963
5964
5965
5966 func checkdead() {
5967 assertLockHeld(&sched.lock)
5968
5969
5970
5971
5972
5973
5974 if (islibrary || isarchive) && GOARCH != "wasm" {
5975 return
5976 }
5977
5978
5979
5980
5981
5982 if panicking.Load() > 0 {
5983 return
5984 }
5985
5986
5987
5988
5989
5990 var run0 int32
5991 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
5992 run0 = 1
5993 }
5994
5995 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
5996 if run > run0 {
5997 return
5998 }
5999 if run < 0 {
6000 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
6001 unlock(&sched.lock)
6002 throw("checkdead: inconsistent counts")
6003 }
6004
6005 grunning := 0
6006 forEachG(func(gp *g) {
6007 if isSystemGoroutine(gp, false) {
6008 return
6009 }
6010 s := readgstatus(gp)
6011 switch s &^ _Gscan {
6012 case _Gwaiting,
6013 _Gpreempted:
6014 grunning++
6015 case _Grunnable,
6016 _Grunning,
6017 _Gsyscall:
6018 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
6019 unlock(&sched.lock)
6020 throw("checkdead: runnable g")
6021 }
6022 })
6023 if grunning == 0 {
6024 unlock(&sched.lock)
6025 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
6026 }
6027
6028
6029 if faketime != 0 {
6030 if when := timeSleepUntil(); when < maxWhen {
6031 faketime = when
6032
6033
6034 pp, _ := pidleget(faketime)
6035 if pp == nil {
6036
6037
6038 unlock(&sched.lock)
6039 throw("checkdead: no p for timer")
6040 }
6041 mp := mget()
6042 if mp == nil {
6043
6044
6045 unlock(&sched.lock)
6046 throw("checkdead: no m for timer")
6047 }
6048
6049
6050
6051 sched.nmspinning.Add(1)
6052 mp.spinning = true
6053 mp.nextp.set(pp)
6054 notewakeup(&mp.park)
6055 return
6056 }
6057 }
6058
6059
6060 for _, pp := range allp {
6061 if len(pp.timers.heap) > 0 {
6062 return
6063 }
6064 }
6065
6066 unlock(&sched.lock)
6067 fatal("all goroutines are asleep - deadlock!")
6068 }
6069
6070
6071
6072
6073
6074
6075 var forcegcperiod int64 = 2 * 60 * 1e9
6076
6077
6078
6079 var needSysmonWorkaround bool = false
6080
6081
6082
6083
6084 const haveSysmon = GOARCH != "wasm"
6085
6086
6087
6088
6089 func sysmon() {
6090 lock(&sched.lock)
6091 sched.nmsys++
6092 checkdead()
6093 unlock(&sched.lock)
6094
6095 lasttrace := int64(0)
6096 idle := 0
6097 delay := uint32(0)
6098
6099 for {
6100 if idle == 0 {
6101 delay = 20
6102 } else if idle > 50 {
6103 delay *= 2
6104 }
6105 if delay > 10*1000 {
6106 delay = 10 * 1000
6107 }
6108 usleep(delay)
6109
6110
6111
6112
6113
6114
6115
6116
6117
6118
6119
6120
6121
6122
6123
6124
6125 now := nanotime()
6126 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
6127 lock(&sched.lock)
6128 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
6129 syscallWake := false
6130 next := timeSleepUntil()
6131 if next > now {
6132 sched.sysmonwait.Store(true)
6133 unlock(&sched.lock)
6134
6135
6136 sleep := forcegcperiod / 2
6137 if next-now < sleep {
6138 sleep = next - now
6139 }
6140 shouldRelax := sleep >= osRelaxMinNS
6141 if shouldRelax {
6142 osRelax(true)
6143 }
6144 syscallWake = notetsleep(&sched.sysmonnote, sleep)
6145 if shouldRelax {
6146 osRelax(false)
6147 }
6148 lock(&sched.lock)
6149 sched.sysmonwait.Store(false)
6150 noteclear(&sched.sysmonnote)
6151 }
6152 if syscallWake {
6153 idle = 0
6154 delay = 20
6155 }
6156 }
6157 unlock(&sched.lock)
6158 }
6159
6160 lock(&sched.sysmonlock)
6161
6162
6163 now = nanotime()
6164
6165
6166 if *cgo_yield != nil {
6167 asmcgocall(*cgo_yield, nil)
6168 }
6169
6170 lastpoll := sched.lastpoll.Load()
6171 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
6172 sched.lastpoll.CompareAndSwap(lastpoll, now)
6173 list, delta := netpoll(0)
6174 if !list.empty() {
6175
6176
6177
6178
6179
6180
6181
6182 incidlelocked(-1)
6183 injectglist(&list)
6184 incidlelocked(1)
6185 netpollAdjustWaiters(delta)
6186 }
6187 }
6188 if GOOS == "netbsd" && needSysmonWorkaround {
6189
6190
6191
6192
6193
6194
6195
6196
6197
6198
6199
6200
6201
6202
6203
6204 if next := timeSleepUntil(); next < now {
6205 startm(nil, false, false)
6206 }
6207 }
6208 if scavenger.sysmonWake.Load() != 0 {
6209
6210 scavenger.wake()
6211 }
6212
6213
6214 if retake(now) != 0 {
6215 idle = 0
6216 } else {
6217 idle++
6218 }
6219
6220 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
6221 lock(&forcegc.lock)
6222 forcegc.idle.Store(false)
6223 var list gList
6224 list.push(forcegc.g)
6225 injectglist(&list)
6226 unlock(&forcegc.lock)
6227 }
6228 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
6229 lasttrace = now
6230 schedtrace(debug.scheddetail > 0)
6231 }
6232 unlock(&sched.sysmonlock)
6233 }
6234 }
6235
6236 type sysmontick struct {
6237 schedtick uint32
6238 syscalltick uint32
6239 schedwhen int64
6240 syscallwhen int64
6241 }
6242
6243
6244
6245 const forcePreemptNS = 10 * 1000 * 1000
6246
6247 func retake(now int64) uint32 {
6248 n := 0
6249
6250
6251 lock(&allpLock)
6252
6253
6254
6255 for i := 0; i < len(allp); i++ {
6256 pp := allp[i]
6257 if pp == nil {
6258
6259
6260 continue
6261 }
6262 pd := &pp.sysmontick
6263 s := pp.status
6264 sysretake := false
6265 if s == _Prunning || s == _Psyscall {
6266
6267
6268
6269
6270 t := int64(pp.schedtick)
6271 if int64(pd.schedtick) != t {
6272 pd.schedtick = uint32(t)
6273 pd.schedwhen = now
6274 } else if pd.schedwhen+forcePreemptNS <= now {
6275 preemptone(pp)
6276
6277
6278 sysretake = true
6279 }
6280 }
6281 if s == _Psyscall {
6282
6283 t := int64(pp.syscalltick)
6284 if !sysretake && int64(pd.syscalltick) != t {
6285 pd.syscalltick = uint32(t)
6286 pd.syscallwhen = now
6287 continue
6288 }
6289
6290
6291
6292 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
6293 continue
6294 }
6295
6296 unlock(&allpLock)
6297
6298
6299
6300
6301 incidlelocked(-1)
6302 trace := traceAcquire()
6303 if atomic.Cas(&pp.status, s, _Pidle) {
6304 if trace.ok() {
6305 trace.ProcSteal(pp, false)
6306 traceRelease(trace)
6307 }
6308 n++
6309 pp.syscalltick++
6310 handoffp(pp)
6311 } else if trace.ok() {
6312 traceRelease(trace)
6313 }
6314 incidlelocked(1)
6315 lock(&allpLock)
6316 }
6317 }
6318 unlock(&allpLock)
6319 return uint32(n)
6320 }
6321
6322
6323
6324
6325
6326
6327 func preemptall() bool {
6328 res := false
6329 for _, pp := range allp {
6330 if pp.status != _Prunning {
6331 continue
6332 }
6333 if preemptone(pp) {
6334 res = true
6335 }
6336 }
6337 return res
6338 }
6339
6340
6341
6342
6343
6344
6345
6346
6347
6348
6349
6350 func preemptone(pp *p) bool {
6351 mp := pp.m.ptr()
6352 if mp == nil || mp == getg().m {
6353 return false
6354 }
6355 gp := mp.curg
6356 if gp == nil || gp == mp.g0 {
6357 return false
6358 }
6359
6360 gp.preempt = true
6361
6362
6363
6364
6365
6366 gp.stackguard0 = stackPreempt
6367
6368
6369 if preemptMSupported && debug.asyncpreemptoff == 0 {
6370 pp.preempt = true
6371 preemptM(mp)
6372 }
6373
6374 return true
6375 }
6376
6377 var starttime int64
6378
6379 func schedtrace(detailed bool) {
6380 now := nanotime()
6381 if starttime == 0 {
6382 starttime = now
6383 }
6384
6385 lock(&sched.lock)
6386 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
6387 if detailed {
6388 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
6389 }
6390
6391
6392
6393 for i, pp := range allp {
6394 mp := pp.m.ptr()
6395 h := atomic.Load(&pp.runqhead)
6396 t := atomic.Load(&pp.runqtail)
6397 if detailed {
6398 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
6399 if mp != nil {
6400 print(mp.id)
6401 } else {
6402 print("nil")
6403 }
6404 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.n, " timerslen=", len(pp.timers.heap), "\n")
6405 } else {
6406
6407
6408 print(" ")
6409 if i == 0 {
6410 print("[")
6411 }
6412 print(t - h)
6413 if i == len(allp)-1 {
6414 print("]\n")
6415 }
6416 }
6417 }
6418
6419 if !detailed {
6420 unlock(&sched.lock)
6421 return
6422 }
6423
6424 for mp := allm; mp != nil; mp = mp.alllink {
6425 pp := mp.p.ptr()
6426 print(" M", mp.id, ": p=")
6427 if pp != nil {
6428 print(pp.id)
6429 } else {
6430 print("nil")
6431 }
6432 print(" curg=")
6433 if mp.curg != nil {
6434 print(mp.curg.goid)
6435 } else {
6436 print("nil")
6437 }
6438 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
6439 if lockedg := mp.lockedg.ptr(); lockedg != nil {
6440 print(lockedg.goid)
6441 } else {
6442 print("nil")
6443 }
6444 print("\n")
6445 }
6446
6447 forEachG(func(gp *g) {
6448 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
6449 if gp.m != nil {
6450 print(gp.m.id)
6451 } else {
6452 print("nil")
6453 }
6454 print(" lockedm=")
6455 if lockedm := gp.lockedm.ptr(); lockedm != nil {
6456 print(lockedm.id)
6457 } else {
6458 print("nil")
6459 }
6460 print("\n")
6461 })
6462 unlock(&sched.lock)
6463 }
6464
6465
6466
6467
6468
6469
6470 func schedEnableUser(enable bool) {
6471 lock(&sched.lock)
6472 if sched.disable.user == !enable {
6473 unlock(&sched.lock)
6474 return
6475 }
6476 sched.disable.user = !enable
6477 if enable {
6478 n := sched.disable.n
6479 sched.disable.n = 0
6480 globrunqputbatch(&sched.disable.runnable, n)
6481 unlock(&sched.lock)
6482 for ; n != 0 && sched.npidle.Load() != 0; n-- {
6483 startm(nil, false, false)
6484 }
6485 } else {
6486 unlock(&sched.lock)
6487 }
6488 }
6489
6490
6491
6492
6493
6494 func schedEnabled(gp *g) bool {
6495 assertLockHeld(&sched.lock)
6496
6497 if sched.disable.user {
6498 return isSystemGoroutine(gp, true)
6499 }
6500 return true
6501 }
6502
6503
6504
6505
6506
6507
6508 func mput(mp *m) {
6509 assertLockHeld(&sched.lock)
6510
6511 mp.schedlink = sched.midle
6512 sched.midle.set(mp)
6513 sched.nmidle++
6514 checkdead()
6515 }
6516
6517
6518
6519
6520
6521
6522 func mget() *m {
6523 assertLockHeld(&sched.lock)
6524
6525 mp := sched.midle.ptr()
6526 if mp != nil {
6527 sched.midle = mp.schedlink
6528 sched.nmidle--
6529 }
6530 return mp
6531 }
6532
6533
6534
6535
6536
6537
6538 func globrunqput(gp *g) {
6539 assertLockHeld(&sched.lock)
6540
6541 sched.runq.pushBack(gp)
6542 sched.runqsize++
6543 }
6544
6545
6546
6547
6548
6549
6550 func globrunqputhead(gp *g) {
6551 assertLockHeld(&sched.lock)
6552
6553 sched.runq.push(gp)
6554 sched.runqsize++
6555 }
6556
6557
6558
6559
6560
6561
6562
6563 func globrunqputbatch(batch *gQueue, n int32) {
6564 assertLockHeld(&sched.lock)
6565
6566 sched.runq.pushBackAll(*batch)
6567 sched.runqsize += n
6568 *batch = gQueue{}
6569 }
6570
6571
6572
6573 func globrunqget(pp *p, max int32) *g {
6574 assertLockHeld(&sched.lock)
6575
6576 if sched.runqsize == 0 {
6577 return nil
6578 }
6579
6580 n := sched.runqsize/gomaxprocs + 1
6581 if n > sched.runqsize {
6582 n = sched.runqsize
6583 }
6584 if max > 0 && n > max {
6585 n = max
6586 }
6587 if n > int32(len(pp.runq))/2 {
6588 n = int32(len(pp.runq)) / 2
6589 }
6590
6591 sched.runqsize -= n
6592
6593 gp := sched.runq.pop()
6594 n--
6595 for ; n > 0; n-- {
6596 gp1 := sched.runq.pop()
6597 runqput(pp, gp1, false)
6598 }
6599 return gp
6600 }
6601
6602
6603 type pMask []uint32
6604
6605
6606 func (p pMask) read(id uint32) bool {
6607 word := id / 32
6608 mask := uint32(1) << (id % 32)
6609 return (atomic.Load(&p[word]) & mask) != 0
6610 }
6611
6612
6613 func (p pMask) set(id int32) {
6614 word := id / 32
6615 mask := uint32(1) << (id % 32)
6616 atomic.Or(&p[word], mask)
6617 }
6618
6619
6620 func (p pMask) clear(id int32) {
6621 word := id / 32
6622 mask := uint32(1) << (id % 32)
6623 atomic.And(&p[word], ^mask)
6624 }
6625
6626
6627
6628
6629
6630
6631
6632
6633
6634
6635
6636
6637 func pidleput(pp *p, now int64) int64 {
6638 assertLockHeld(&sched.lock)
6639
6640 if !runqempty(pp) {
6641 throw("pidleput: P has non-empty run queue")
6642 }
6643 if now == 0 {
6644 now = nanotime()
6645 }
6646 if pp.timers.len.Load() == 0 {
6647 timerpMask.clear(pp.id)
6648 }
6649 idlepMask.set(pp.id)
6650 pp.link = sched.pidle
6651 sched.pidle.set(pp)
6652 sched.npidle.Add(1)
6653 if !pp.limiterEvent.start(limiterEventIdle, now) {
6654 throw("must be able to track idle limiter event")
6655 }
6656 return now
6657 }
6658
6659
6660
6661
6662
6663
6664
6665
6666 func pidleget(now int64) (*p, int64) {
6667 assertLockHeld(&sched.lock)
6668
6669 pp := sched.pidle.ptr()
6670 if pp != nil {
6671
6672 if now == 0 {
6673 now = nanotime()
6674 }
6675 timerpMask.set(pp.id)
6676 idlepMask.clear(pp.id)
6677 sched.pidle = pp.link
6678 sched.npidle.Add(-1)
6679 pp.limiterEvent.stop(limiterEventIdle, now)
6680 }
6681 return pp, now
6682 }
6683
6684
6685
6686
6687
6688
6689
6690
6691
6692
6693
6694 func pidlegetSpinning(now int64) (*p, int64) {
6695 assertLockHeld(&sched.lock)
6696
6697 pp, now := pidleget(now)
6698 if pp == nil {
6699
6700
6701
6702 sched.needspinning.Store(1)
6703 return nil, now
6704 }
6705
6706 return pp, now
6707 }
6708
6709
6710
6711 func runqempty(pp *p) bool {
6712
6713
6714
6715
6716 for {
6717 head := atomic.Load(&pp.runqhead)
6718 tail := atomic.Load(&pp.runqtail)
6719 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
6720 if tail == atomic.Load(&pp.runqtail) {
6721 return head == tail && runnext == 0
6722 }
6723 }
6724 }
6725
6726
6727
6728
6729
6730
6731
6732
6733
6734
6735 const randomizeScheduler = raceenabled
6736
6737
6738
6739
6740
6741
6742 func runqput(pp *p, gp *g, next bool) {
6743 if !haveSysmon && next {
6744
6745
6746
6747
6748
6749
6750
6751
6752 next = false
6753 }
6754 if randomizeScheduler && next && randn(2) == 0 {
6755 next = false
6756 }
6757
6758 if next {
6759 retryNext:
6760 oldnext := pp.runnext
6761 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
6762 goto retryNext
6763 }
6764 if oldnext == 0 {
6765 return
6766 }
6767
6768 gp = oldnext.ptr()
6769 }
6770
6771 retry:
6772 h := atomic.LoadAcq(&pp.runqhead)
6773 t := pp.runqtail
6774 if t-h < uint32(len(pp.runq)) {
6775 pp.runq[t%uint32(len(pp.runq))].set(gp)
6776 atomic.StoreRel(&pp.runqtail, t+1)
6777 return
6778 }
6779 if runqputslow(pp, gp, h, t) {
6780 return
6781 }
6782
6783 goto retry
6784 }
6785
6786
6787
6788 func runqputslow(pp *p, gp *g, h, t uint32) bool {
6789 var batch [len(pp.runq)/2 + 1]*g
6790
6791
6792 n := t - h
6793 n = n / 2
6794 if n != uint32(len(pp.runq)/2) {
6795 throw("runqputslow: queue is not full")
6796 }
6797 for i := uint32(0); i < n; i++ {
6798 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
6799 }
6800 if !atomic.CasRel(&pp.runqhead, h, h+n) {
6801 return false
6802 }
6803 batch[n] = gp
6804
6805 if randomizeScheduler {
6806 for i := uint32(1); i <= n; i++ {
6807 j := cheaprandn(i + 1)
6808 batch[i], batch[j] = batch[j], batch[i]
6809 }
6810 }
6811
6812
6813 for i := uint32(0); i < n; i++ {
6814 batch[i].schedlink.set(batch[i+1])
6815 }
6816 var q gQueue
6817 q.head.set(batch[0])
6818 q.tail.set(batch[n])
6819
6820
6821 lock(&sched.lock)
6822 globrunqputbatch(&q, int32(n+1))
6823 unlock(&sched.lock)
6824 return true
6825 }
6826
6827
6828
6829
6830
6831 func runqputbatch(pp *p, q *gQueue, qsize int) {
6832 h := atomic.LoadAcq(&pp.runqhead)
6833 t := pp.runqtail
6834 n := uint32(0)
6835 for !q.empty() && t-h < uint32(len(pp.runq)) {
6836 gp := q.pop()
6837 pp.runq[t%uint32(len(pp.runq))].set(gp)
6838 t++
6839 n++
6840 }
6841 qsize -= int(n)
6842
6843 if randomizeScheduler {
6844 off := func(o uint32) uint32 {
6845 return (pp.runqtail + o) % uint32(len(pp.runq))
6846 }
6847 for i := uint32(1); i < n; i++ {
6848 j := cheaprandn(i + 1)
6849 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
6850 }
6851 }
6852
6853 atomic.StoreRel(&pp.runqtail, t)
6854 if !q.empty() {
6855 lock(&sched.lock)
6856 globrunqputbatch(q, int32(qsize))
6857 unlock(&sched.lock)
6858 }
6859 }
6860
6861
6862
6863
6864
6865 func runqget(pp *p) (gp *g, inheritTime bool) {
6866
6867 next := pp.runnext
6868
6869
6870
6871 if next != 0 && pp.runnext.cas(next, 0) {
6872 return next.ptr(), true
6873 }
6874
6875 for {
6876 h := atomic.LoadAcq(&pp.runqhead)
6877 t := pp.runqtail
6878 if t == h {
6879 return nil, false
6880 }
6881 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
6882 if atomic.CasRel(&pp.runqhead, h, h+1) {
6883 return gp, false
6884 }
6885 }
6886 }
6887
6888
6889
6890 func runqdrain(pp *p) (drainQ gQueue, n uint32) {
6891 oldNext := pp.runnext
6892 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
6893 drainQ.pushBack(oldNext.ptr())
6894 n++
6895 }
6896
6897 retry:
6898 h := atomic.LoadAcq(&pp.runqhead)
6899 t := pp.runqtail
6900 qn := t - h
6901 if qn == 0 {
6902 return
6903 }
6904 if qn > uint32(len(pp.runq)) {
6905 goto retry
6906 }
6907
6908 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
6909 goto retry
6910 }
6911
6912
6913
6914
6915
6916
6917
6918
6919 for i := uint32(0); i < qn; i++ {
6920 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
6921 drainQ.pushBack(gp)
6922 n++
6923 }
6924 return
6925 }
6926
6927
6928
6929
6930
6931 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
6932 for {
6933 h := atomic.LoadAcq(&pp.runqhead)
6934 t := atomic.LoadAcq(&pp.runqtail)
6935 n := t - h
6936 n = n - n/2
6937 if n == 0 {
6938 if stealRunNextG {
6939
6940 if next := pp.runnext; next != 0 {
6941 if pp.status == _Prunning {
6942
6943
6944
6945
6946
6947
6948
6949
6950
6951
6952 if !osHasLowResTimer {
6953 usleep(3)
6954 } else {
6955
6956
6957
6958 osyield()
6959 }
6960 }
6961 if !pp.runnext.cas(next, 0) {
6962 continue
6963 }
6964 batch[batchHead%uint32(len(batch))] = next
6965 return 1
6966 }
6967 }
6968 return 0
6969 }
6970 if n > uint32(len(pp.runq)/2) {
6971 continue
6972 }
6973 for i := uint32(0); i < n; i++ {
6974 g := pp.runq[(h+i)%uint32(len(pp.runq))]
6975 batch[(batchHead+i)%uint32(len(batch))] = g
6976 }
6977 if atomic.CasRel(&pp.runqhead, h, h+n) {
6978 return n
6979 }
6980 }
6981 }
6982
6983
6984
6985
6986 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
6987 t := pp.runqtail
6988 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
6989 if n == 0 {
6990 return nil
6991 }
6992 n--
6993 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
6994 if n == 0 {
6995 return gp
6996 }
6997 h := atomic.LoadAcq(&pp.runqhead)
6998 if t-h+n >= uint32(len(pp.runq)) {
6999 throw("runqsteal: runq overflow")
7000 }
7001 atomic.StoreRel(&pp.runqtail, t+n)
7002 return gp
7003 }
7004
7005
7006
7007 type gQueue struct {
7008 head guintptr
7009 tail guintptr
7010 }
7011
7012
7013 func (q *gQueue) empty() bool {
7014 return q.head == 0
7015 }
7016
7017
7018 func (q *gQueue) push(gp *g) {
7019 gp.schedlink = q.head
7020 q.head.set(gp)
7021 if q.tail == 0 {
7022 q.tail.set(gp)
7023 }
7024 }
7025
7026
7027 func (q *gQueue) pushBack(gp *g) {
7028 gp.schedlink = 0
7029 if q.tail != 0 {
7030 q.tail.ptr().schedlink.set(gp)
7031 } else {
7032 q.head.set(gp)
7033 }
7034 q.tail.set(gp)
7035 }
7036
7037
7038
7039 func (q *gQueue) pushBackAll(q2 gQueue) {
7040 if q2.tail == 0 {
7041 return
7042 }
7043 q2.tail.ptr().schedlink = 0
7044 if q.tail != 0 {
7045 q.tail.ptr().schedlink = q2.head
7046 } else {
7047 q.head = q2.head
7048 }
7049 q.tail = q2.tail
7050 }
7051
7052
7053
7054 func (q *gQueue) pop() *g {
7055 gp := q.head.ptr()
7056 if gp != nil {
7057 q.head = gp.schedlink
7058 if q.head == 0 {
7059 q.tail = 0
7060 }
7061 }
7062 return gp
7063 }
7064
7065
7066 func (q *gQueue) popList() gList {
7067 stack := gList{q.head}
7068 *q = gQueue{}
7069 return stack
7070 }
7071
7072
7073
7074 type gList struct {
7075 head guintptr
7076 }
7077
7078
7079 func (l *gList) empty() bool {
7080 return l.head == 0
7081 }
7082
7083
7084 func (l *gList) push(gp *g) {
7085 gp.schedlink = l.head
7086 l.head.set(gp)
7087 }
7088
7089
7090 func (l *gList) pushAll(q gQueue) {
7091 if !q.empty() {
7092 q.tail.ptr().schedlink = l.head
7093 l.head = q.head
7094 }
7095 }
7096
7097
7098 func (l *gList) pop() *g {
7099 gp := l.head.ptr()
7100 if gp != nil {
7101 l.head = gp.schedlink
7102 }
7103 return gp
7104 }
7105
7106
7107 func setMaxThreads(in int) (out int) {
7108 lock(&sched.lock)
7109 out = int(sched.maxmcount)
7110 if in > 0x7fffffff {
7111 sched.maxmcount = 0x7fffffff
7112 } else {
7113 sched.maxmcount = int32(in)
7114 }
7115 checkmcount()
7116 unlock(&sched.lock)
7117 return
7118 }
7119
7120
7121
7122
7123
7124
7125
7126
7127
7128
7129
7130
7131
7132 func procPin() int {
7133 gp := getg()
7134 mp := gp.m
7135
7136 mp.locks++
7137 return int(mp.p.ptr().id)
7138 }
7139
7140
7141
7142
7143
7144
7145
7146
7147
7148
7149
7150
7151
7152 func procUnpin() {
7153 gp := getg()
7154 gp.m.locks--
7155 }
7156
7157
7158
7159 func sync_runtime_procPin() int {
7160 return procPin()
7161 }
7162
7163
7164
7165 func sync_runtime_procUnpin() {
7166 procUnpin()
7167 }
7168
7169
7170
7171 func sync_atomic_runtime_procPin() int {
7172 return procPin()
7173 }
7174
7175
7176
7177 func sync_atomic_runtime_procUnpin() {
7178 procUnpin()
7179 }
7180
7181
7182
7183
7184
7185 func internal_sync_runtime_canSpin(i int) bool {
7186
7187
7188
7189
7190
7191 if i >= active_spin || ncpu <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
7192 return false
7193 }
7194 if p := getg().m.p.ptr(); !runqempty(p) {
7195 return false
7196 }
7197 return true
7198 }
7199
7200
7201
7202 func internal_sync_runtime_doSpin() {
7203 procyield(active_spin_cnt)
7204 }
7205
7206
7207
7208
7209
7210
7211
7212
7213
7214
7215
7216
7217
7218
7219
7220 func sync_runtime_canSpin(i int) bool {
7221 return internal_sync_runtime_canSpin(i)
7222 }
7223
7224
7225
7226
7227
7228
7229
7230
7231
7232
7233
7234
7235
7236 func sync_runtime_doSpin() {
7237 internal_sync_runtime_doSpin()
7238 }
7239
7240 var stealOrder randomOrder
7241
7242
7243
7244
7245
7246 type randomOrder struct {
7247 count uint32
7248 coprimes []uint32
7249 }
7250
7251 type randomEnum struct {
7252 i uint32
7253 count uint32
7254 pos uint32
7255 inc uint32
7256 }
7257
7258 func (ord *randomOrder) reset(count uint32) {
7259 ord.count = count
7260 ord.coprimes = ord.coprimes[:0]
7261 for i := uint32(1); i <= count; i++ {
7262 if gcd(i, count) == 1 {
7263 ord.coprimes = append(ord.coprimes, i)
7264 }
7265 }
7266 }
7267
7268 func (ord *randomOrder) start(i uint32) randomEnum {
7269 return randomEnum{
7270 count: ord.count,
7271 pos: i % ord.count,
7272 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
7273 }
7274 }
7275
7276 func (enum *randomEnum) done() bool {
7277 return enum.i == enum.count
7278 }
7279
7280 func (enum *randomEnum) next() {
7281 enum.i++
7282 enum.pos = (enum.pos + enum.inc) % enum.count
7283 }
7284
7285 func (enum *randomEnum) position() uint32 {
7286 return enum.pos
7287 }
7288
7289 func gcd(a, b uint32) uint32 {
7290 for b != 0 {
7291 a, b = b, a%b
7292 }
7293 return a
7294 }
7295
7296
7297
7298 type initTask struct {
7299 state uint32
7300 nfns uint32
7301
7302 }
7303
7304
7305
7306 var inittrace tracestat
7307
7308 type tracestat struct {
7309 active bool
7310 id uint64
7311 allocs uint64
7312 bytes uint64
7313 }
7314
7315 func doInit(ts []*initTask) {
7316 for _, t := range ts {
7317 doInit1(t)
7318 }
7319 }
7320
7321 func doInit1(t *initTask) {
7322 switch t.state {
7323 case 2:
7324 return
7325 case 1:
7326 throw("recursive call during initialization - linker skew")
7327 default:
7328 t.state = 1
7329
7330 var (
7331 start int64
7332 before tracestat
7333 )
7334
7335 if inittrace.active {
7336 start = nanotime()
7337
7338 before = inittrace
7339 }
7340
7341 if t.nfns == 0 {
7342
7343 throw("inittask with no functions")
7344 }
7345
7346 firstFunc := add(unsafe.Pointer(t), 8)
7347 for i := uint32(0); i < t.nfns; i++ {
7348 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
7349 f := *(*func())(unsafe.Pointer(&p))
7350 f()
7351 }
7352
7353 if inittrace.active {
7354 end := nanotime()
7355
7356 after := inittrace
7357
7358 f := *(*func())(unsafe.Pointer(&firstFunc))
7359 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
7360
7361 var sbuf [24]byte
7362 print("init ", pkg, " @")
7363 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
7364 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
7365 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
7366 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
7367 print("\n")
7368 }
7369
7370 t.state = 2
7371 }
7372 }
7373
View as plain text