Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goos"
12 "internal/runtime/atomic"
13 "internal/runtime/exithook"
14 "internal/runtime/sys"
15 "internal/stringslite"
16 "unsafe"
17 )
18
19
20 var modinfo string
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116 var (
117 m0 m
118 g0 g
119 mcache0 *mcache
120 raceprocctx0 uintptr
121 raceFiniLock mutex
122 )
123
124
125
126 var runtime_inittasks []*initTask
127
128
129
130
131
132 var main_init_done chan bool
133
134
135 func main_main()
136
137
138 var mainStarted bool
139
140
141 var runtimeInitTime int64
142
143
144 var initSigmask sigset
145
146
147 func main() {
148 mp := getg().m
149
150
151
152 mp.g0.racectx = 0
153
154
155
156
157 if goarch.PtrSize == 8 {
158 maxstacksize = 1000000000
159 } else {
160 maxstacksize = 250000000
161 }
162
163
164
165
166 maxstackceiling = 2 * maxstacksize
167
168
169 mainStarted = true
170
171 if haveSysmon {
172 systemstack(func() {
173 newm(sysmon, nil, -1)
174 })
175 }
176
177
178
179
180
181
182
183 lockOSThread()
184
185 if mp != &m0 {
186 throw("runtime.main not on m0")
187 }
188
189
190
191 runtimeInitTime = nanotime()
192 if runtimeInitTime == 0 {
193 throw("nanotime returning zero")
194 }
195
196 if debug.inittrace != 0 {
197 inittrace.id = getg().goid
198 inittrace.active = true
199 }
200
201 doInit(runtime_inittasks)
202
203
204 needUnlock := true
205 defer func() {
206 if needUnlock {
207 unlockOSThread()
208 }
209 }()
210
211 gcenable()
212
213 main_init_done = make(chan bool)
214 if iscgo {
215 if _cgo_pthread_key_created == nil {
216 throw("_cgo_pthread_key_created missing")
217 }
218
219 if _cgo_thread_start == nil {
220 throw("_cgo_thread_start missing")
221 }
222 if GOOS != "windows" {
223 if _cgo_setenv == nil {
224 throw("_cgo_setenv missing")
225 }
226 if _cgo_unsetenv == nil {
227 throw("_cgo_unsetenv missing")
228 }
229 }
230 if _cgo_notify_runtime_init_done == nil {
231 throw("_cgo_notify_runtime_init_done missing")
232 }
233
234
235 if set_crosscall2 == nil {
236 throw("set_crosscall2 missing")
237 }
238 set_crosscall2()
239
240
241
242 startTemplateThread()
243 cgocall(_cgo_notify_runtime_init_done, nil)
244 }
245
246
247
248
249
250
251
252
253 for m := &firstmoduledata; m != nil; m = m.next {
254 doInit(m.inittasks)
255 }
256
257
258
259 inittrace.active = false
260
261 close(main_init_done)
262
263 needUnlock = false
264 unlockOSThread()
265
266 if isarchive || islibrary {
267
268
269 if GOARCH == "wasm" {
270
271
272
273
274
275
276
277 pause(sys.GetCallerSP() - 16)
278 panic("unreachable")
279 }
280 return
281 }
282 fn := main_main
283 fn()
284 if raceenabled {
285 runExitHooks(0)
286 racefini()
287 }
288
289
290
291
292
293 if runningPanicDefers.Load() != 0 {
294
295 for c := 0; c < 1000; c++ {
296 if runningPanicDefers.Load() == 0 {
297 break
298 }
299 Gosched()
300 }
301 }
302 if panicking.Load() != 0 {
303 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
304 }
305 runExitHooks(0)
306
307 exit(0)
308 for {
309 var x *int32
310 *x = 0
311 }
312 }
313
314
315
316
317 func os_beforeExit(exitCode int) {
318 runExitHooks(exitCode)
319 if exitCode == 0 && raceenabled {
320 racefini()
321 }
322 }
323
324 func init() {
325 exithook.Gosched = Gosched
326 exithook.Goid = func() uint64 { return getg().goid }
327 exithook.Throw = throw
328 }
329
330 func runExitHooks(code int) {
331 exithook.Run(code)
332 }
333
334
335 func init() {
336 go forcegchelper()
337 }
338
339 func forcegchelper() {
340 forcegc.g = getg()
341 lockInit(&forcegc.lock, lockRankForcegc)
342 for {
343 lock(&forcegc.lock)
344 if forcegc.idle.Load() {
345 throw("forcegc: phase error")
346 }
347 forcegc.idle.Store(true)
348 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
349
350 if debug.gctrace > 0 {
351 println("GC forced")
352 }
353
354 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
355 }
356 }
357
358
359
360
361
362 func Gosched() {
363 checkTimeouts()
364 mcall(gosched_m)
365 }
366
367
368
369
370
371 func goschedguarded() {
372 mcall(goschedguarded_m)
373 }
374
375
376
377
378
379
380 func goschedIfBusy() {
381 gp := getg()
382
383
384 if !gp.preempt && sched.npidle.Load() > 0 {
385 return
386 }
387 mcall(gosched_m)
388 }
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
419 if reason != waitReasonSleep {
420 checkTimeouts()
421 }
422 mp := acquirem()
423 gp := mp.curg
424 status := readgstatus(gp)
425 if status != _Grunning && status != _Gscanrunning {
426 throw("gopark: bad g status")
427 }
428 mp.waitlock = lock
429 mp.waitunlockf = unlockf
430 gp.waitreason = reason
431 mp.waitTraceBlockReason = traceReason
432 mp.waitTraceSkip = traceskip
433 releasem(mp)
434
435 mcall(park_m)
436 }
437
438
439
440 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
441 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
442 }
443
444
445
446
447
448
449
450
451
452
453
454 func goready(gp *g, traceskip int) {
455 systemstack(func() {
456 ready(gp, traceskip, true)
457 })
458 }
459
460
461 func acquireSudog() *sudog {
462
463
464
465
466
467
468
469
470 mp := acquirem()
471 pp := mp.p.ptr()
472 if len(pp.sudogcache) == 0 {
473 lock(&sched.sudoglock)
474
475 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
476 s := sched.sudogcache
477 sched.sudogcache = s.next
478 s.next = nil
479 pp.sudogcache = append(pp.sudogcache, s)
480 }
481 unlock(&sched.sudoglock)
482
483 if len(pp.sudogcache) == 0 {
484 pp.sudogcache = append(pp.sudogcache, new(sudog))
485 }
486 }
487 n := len(pp.sudogcache)
488 s := pp.sudogcache[n-1]
489 pp.sudogcache[n-1] = nil
490 pp.sudogcache = pp.sudogcache[:n-1]
491 if s.elem != nil {
492 throw("acquireSudog: found s.elem != nil in cache")
493 }
494 releasem(mp)
495 return s
496 }
497
498
499 func releaseSudog(s *sudog) {
500 if s.elem != nil {
501 throw("runtime: sudog with non-nil elem")
502 }
503 if s.isSelect {
504 throw("runtime: sudog with non-false isSelect")
505 }
506 if s.next != nil {
507 throw("runtime: sudog with non-nil next")
508 }
509 if s.prev != nil {
510 throw("runtime: sudog with non-nil prev")
511 }
512 if s.waitlink != nil {
513 throw("runtime: sudog with non-nil waitlink")
514 }
515 if s.c != nil {
516 throw("runtime: sudog with non-nil c")
517 }
518 gp := getg()
519 if gp.param != nil {
520 throw("runtime: releaseSudog with non-nil gp.param")
521 }
522 mp := acquirem()
523 pp := mp.p.ptr()
524 if len(pp.sudogcache) == cap(pp.sudogcache) {
525
526 var first, last *sudog
527 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
528 n := len(pp.sudogcache)
529 p := pp.sudogcache[n-1]
530 pp.sudogcache[n-1] = nil
531 pp.sudogcache = pp.sudogcache[:n-1]
532 if first == nil {
533 first = p
534 } else {
535 last.next = p
536 }
537 last = p
538 }
539 lock(&sched.sudoglock)
540 last.next = sched.sudogcache
541 sched.sudogcache = first
542 unlock(&sched.sudoglock)
543 }
544 pp.sudogcache = append(pp.sudogcache, s)
545 releasem(mp)
546 }
547
548
549 func badmcall(fn func(*g)) {
550 throw("runtime: mcall called on m->g0 stack")
551 }
552
553 func badmcall2(fn func(*g)) {
554 throw("runtime: mcall function returned")
555 }
556
557 func badreflectcall() {
558 panic(plainError("arg size to reflect.call more than 1GB"))
559 }
560
561
562
563 func badmorestackg0() {
564 if !crashStackImplemented {
565 writeErrStr("fatal: morestack on g0\n")
566 return
567 }
568
569 g := getg()
570 switchToCrashStack(func() {
571 print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
572 g.m.traceback = 2
573 traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
574 print("\n")
575
576 throw("morestack on g0")
577 })
578 }
579
580
581
582 func badmorestackgsignal() {
583 writeErrStr("fatal: morestack on gsignal\n")
584 }
585
586
587 func badctxt() {
588 throw("ctxt != 0")
589 }
590
591
592
593 var gcrash g
594
595 var crashingG atomic.Pointer[g]
596
597
598
599
600
601
602
603
604
605 func switchToCrashStack(fn func()) {
606 me := getg()
607 if crashingG.CompareAndSwapNoWB(nil, me) {
608 switchToCrashStack0(fn)
609 abort()
610 }
611 if crashingG.Load() == me {
612
613 writeErrStr("fatal: recursive switchToCrashStack\n")
614 abort()
615 }
616
617 usleep_no_g(100)
618 writeErrStr("fatal: concurrent switchToCrashStack\n")
619 abort()
620 }
621
622
623
624
625 const crashStackImplemented = GOOS != "windows"
626
627
628 func switchToCrashStack0(fn func())
629
630 func lockedOSThread() bool {
631 gp := getg()
632 return gp.lockedm != 0 && gp.m.lockedg != 0
633 }
634
635 var (
636
637
638
639
640
641
642 allglock mutex
643 allgs []*g
644
645
646
647
648
649
650
651
652
653
654
655
656
657 allglen uintptr
658 allgptr **g
659 )
660
661 func allgadd(gp *g) {
662 if readgstatus(gp) == _Gidle {
663 throw("allgadd: bad status Gidle")
664 }
665
666 lock(&allglock)
667 allgs = append(allgs, gp)
668 if &allgs[0] != allgptr {
669 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
670 }
671 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
672 unlock(&allglock)
673 }
674
675
676
677
678 func allGsSnapshot() []*g {
679 assertWorldStoppedOrLockHeld(&allglock)
680
681
682
683
684
685
686 return allgs[:len(allgs):len(allgs)]
687 }
688
689
690 func atomicAllG() (**g, uintptr) {
691 length := atomic.Loaduintptr(&allglen)
692 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
693 return ptr, length
694 }
695
696
697 func atomicAllGIndex(ptr **g, i uintptr) *g {
698 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
699 }
700
701
702
703
704 func forEachG(fn func(gp *g)) {
705 lock(&allglock)
706 for _, gp := range allgs {
707 fn(gp)
708 }
709 unlock(&allglock)
710 }
711
712
713
714
715
716 func forEachGRace(fn func(gp *g)) {
717 ptr, length := atomicAllG()
718 for i := uintptr(0); i < length; i++ {
719 gp := atomicAllGIndex(ptr, i)
720 fn(gp)
721 }
722 return
723 }
724
725 const (
726
727
728 _GoidCacheBatch = 16
729 )
730
731
732
733 func cpuinit(env string) {
734 switch GOOS {
735 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
736 cpu.DebugOptions = true
737 }
738 cpu.Initialize(env)
739
740
741
742 switch GOARCH {
743 case "386", "amd64":
744 x86HasPOPCNT = cpu.X86.HasPOPCNT
745 x86HasSSE41 = cpu.X86.HasSSE41
746 x86HasFMA = cpu.X86.HasFMA
747
748 case "arm":
749 armHasVFPv4 = cpu.ARM.HasVFPv4
750
751 case "arm64":
752 arm64HasATOMICS = cpu.ARM64.HasATOMICS
753
754 case "loong64":
755 loong64HasLAMCAS = cpu.Loong64.HasLAMCAS
756 loong64HasLAM_BH = cpu.Loong64.HasLAM_BH
757 loong64HasLSX = cpu.Loong64.HasLSX
758 }
759 }
760
761
762
763
764 func getGodebugEarly() string {
765 const prefix = "GODEBUG="
766 var env string
767 switch GOOS {
768 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
769
770
771
772 n := int32(0)
773 for argv_index(argv, argc+1+n) != nil {
774 n++
775 }
776
777 for i := int32(0); i < n; i++ {
778 p := argv_index(argv, argc+1+i)
779 s := unsafe.String(p, findnull(p))
780
781 if stringslite.HasPrefix(s, prefix) {
782 env = gostring(p)[len(prefix):]
783 break
784 }
785 }
786 }
787 return env
788 }
789
790
791
792
793
794
795
796
797
798 func schedinit() {
799 lockInit(&sched.lock, lockRankSched)
800 lockInit(&sched.sysmonlock, lockRankSysmon)
801 lockInit(&sched.deferlock, lockRankDefer)
802 lockInit(&sched.sudoglock, lockRankSudog)
803 lockInit(&deadlock, lockRankDeadlock)
804 lockInit(&paniclk, lockRankPanic)
805 lockInit(&allglock, lockRankAllg)
806 lockInit(&allpLock, lockRankAllp)
807 lockInit(&reflectOffs.lock, lockRankReflectOffs)
808 lockInit(&finlock, lockRankFin)
809 lockInit(&cpuprof.lock, lockRankCpuprof)
810 allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
811 execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
812 traceLockInit()
813
814
815
816 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
817
818 lockVerifyMSize()
819
820
821
822 gp := getg()
823 if raceenabled {
824 gp.racectx, raceprocctx0 = raceinit()
825 }
826
827 sched.maxmcount = 10000
828 crashFD.Store(^uintptr(0))
829
830
831 worldStopped()
832
833 ticks.init()
834 moduledataverify()
835 stackinit()
836 mallocinit()
837 godebug := getGodebugEarly()
838 cpuinit(godebug)
839 randinit()
840 alginit()
841 mcommoninit(gp.m, -1)
842 modulesinit()
843 typelinksinit()
844 itabsinit()
845 stkobjinit()
846
847 sigsave(&gp.m.sigmask)
848 initSigmask = gp.m.sigmask
849
850 goargs()
851 goenvs()
852 secure()
853 checkfds()
854 parsedebugvars()
855 gcinit()
856
857
858
859 gcrash.stack = stackalloc(16384)
860 gcrash.stackguard0 = gcrash.stack.lo + 1000
861 gcrash.stackguard1 = gcrash.stack.lo + 1000
862
863
864
865
866
867 if disableMemoryProfiling {
868 MemProfileRate = 0
869 }
870
871
872 mProfStackInit(gp.m)
873
874 lock(&sched.lock)
875 sched.lastpoll.Store(nanotime())
876 procs := ncpu
877 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
878 procs = n
879 }
880 if procresize(procs) != nil {
881 throw("unknown runnable goroutine during bootstrap")
882 }
883 unlock(&sched.lock)
884
885
886 worldStarted()
887
888 if buildVersion == "" {
889
890
891 buildVersion = "unknown"
892 }
893 if len(modinfo) == 1 {
894
895
896 modinfo = ""
897 }
898 }
899
900 func dumpgstatus(gp *g) {
901 thisg := getg()
902 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
903 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
904 }
905
906
907 func checkmcount() {
908 assertLockHeld(&sched.lock)
909
910
911
912
913
914
915
916
917
918 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
919 if count > sched.maxmcount {
920 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
921 throw("thread exhaustion")
922 }
923 }
924
925
926
927
928
929 func mReserveID() int64 {
930 assertLockHeld(&sched.lock)
931
932 if sched.mnext+1 < sched.mnext {
933 throw("runtime: thread ID overflow")
934 }
935 id := sched.mnext
936 sched.mnext++
937 checkmcount()
938 return id
939 }
940
941
942 func mcommoninit(mp *m, id int64) {
943 gp := getg()
944
945
946 if gp != gp.m.g0 {
947 callers(1, mp.createstack[:])
948 }
949
950 lock(&sched.lock)
951
952 if id >= 0 {
953 mp.id = id
954 } else {
955 mp.id = mReserveID()
956 }
957
958 mrandinit(mp)
959
960 mpreinit(mp)
961 if mp.gsignal != nil {
962 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
963 }
964
965
966
967 mp.alllink = allm
968
969
970
971 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
972 unlock(&sched.lock)
973
974
975 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
976 mp.cgoCallers = new(cgoCallers)
977 }
978 mProfStackInit(mp)
979 }
980
981
982
983
984
985 func mProfStackInit(mp *m) {
986 if debug.profstackdepth == 0 {
987
988
989 return
990 }
991 mp.profStack = makeProfStackFP()
992 mp.mLockProfile.stack = makeProfStackFP()
993 }
994
995
996
997
998 func makeProfStackFP() []uintptr {
999
1000
1001
1002
1003
1004
1005 return make([]uintptr, 1+maxSkip+debug.profstackdepth)
1006 }
1007
1008
1009
1010 func makeProfStack() []uintptr { return make([]uintptr, debug.profstackdepth) }
1011
1012
1013 func pprof_makeProfStack() []uintptr { return makeProfStack() }
1014
1015 func (mp *m) becomeSpinning() {
1016 mp.spinning = true
1017 sched.nmspinning.Add(1)
1018 sched.needspinning.Store(0)
1019 }
1020
1021
1022
1023
1024
1025
1026
1027
1028 func (mp *m) snapshotAllp() []*p {
1029 mp.allpSnapshot = allp
1030 return mp.allpSnapshot
1031 }
1032
1033
1034
1035
1036
1037
1038
1039 func (mp *m) clearAllpSnapshot() {
1040 mp.allpSnapshot = nil
1041 }
1042
1043 func (mp *m) hasCgoOnStack() bool {
1044 return mp.ncgo > 0 || mp.isextra
1045 }
1046
1047 const (
1048
1049
1050 osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
1051
1052
1053
1054 osHasLowResClockInt = goos.IsWindows
1055
1056
1057
1058 osHasLowResClock = osHasLowResClockInt > 0
1059 )
1060
1061
1062 func ready(gp *g, traceskip int, next bool) {
1063 status := readgstatus(gp)
1064
1065
1066 mp := acquirem()
1067 if status&^_Gscan != _Gwaiting {
1068 dumpgstatus(gp)
1069 throw("bad g->status in ready")
1070 }
1071
1072
1073 trace := traceAcquire()
1074 casgstatus(gp, _Gwaiting, _Grunnable)
1075 if trace.ok() {
1076 trace.GoUnpark(gp, traceskip)
1077 traceRelease(trace)
1078 }
1079 runqput(mp.p.ptr(), gp, next)
1080 wakep()
1081 releasem(mp)
1082 }
1083
1084
1085
1086 const freezeStopWait = 0x7fffffff
1087
1088
1089
1090 var freezing atomic.Bool
1091
1092
1093
1094
1095 func freezetheworld() {
1096 freezing.Store(true)
1097 if debug.dontfreezetheworld > 0 {
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122 usleep(1000)
1123 return
1124 }
1125
1126
1127
1128
1129 for i := 0; i < 5; i++ {
1130
1131 sched.stopwait = freezeStopWait
1132 sched.gcwaiting.Store(true)
1133
1134 if !preemptall() {
1135 break
1136 }
1137 usleep(1000)
1138 }
1139
1140 usleep(1000)
1141 preemptall()
1142 usleep(1000)
1143 }
1144
1145
1146
1147
1148
1149 func readgstatus(gp *g) uint32 {
1150 return gp.atomicstatus.Load()
1151 }
1152
1153
1154
1155
1156
1157 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1158 success := false
1159
1160
1161 switch oldval {
1162 default:
1163 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1164 dumpgstatus(gp)
1165 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1166 case _Gscanrunnable,
1167 _Gscanwaiting,
1168 _Gscanrunning,
1169 _Gscansyscall,
1170 _Gscanpreempted:
1171 if newval == oldval&^_Gscan {
1172 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1173 }
1174 }
1175 if !success {
1176 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1177 dumpgstatus(gp)
1178 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1179 }
1180 releaseLockRankAndM(lockRankGscan)
1181 }
1182
1183
1184
1185 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1186 switch oldval {
1187 case _Grunnable,
1188 _Grunning,
1189 _Gwaiting,
1190 _Gsyscall:
1191 if newval == oldval|_Gscan {
1192 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1193 if r {
1194 acquireLockRankAndM(lockRankGscan)
1195 }
1196 return r
1197
1198 }
1199 }
1200 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1201 throw("castogscanstatus")
1202 panic("not reached")
1203 }
1204
1205
1206
1207 var casgstatusAlwaysTrack = false
1208
1209
1210
1211
1212
1213
1214
1215 func casgstatus(gp *g, oldval, newval uint32) {
1216 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1217 systemstack(func() {
1218
1219
1220 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1221 throw("casgstatus: bad incoming values")
1222 })
1223 }
1224
1225 lockWithRankMayAcquire(nil, lockRankGscan)
1226
1227
1228 const yieldDelay = 5 * 1000
1229 var nextYield int64
1230
1231
1232
1233 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1234 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1235 systemstack(func() {
1236
1237
1238 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1239 })
1240 }
1241 if i == 0 {
1242 nextYield = nanotime() + yieldDelay
1243 }
1244 if nanotime() < nextYield {
1245 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1246 procyield(1)
1247 }
1248 } else {
1249 osyield()
1250 nextYield = nanotime() + yieldDelay/2
1251 }
1252 }
1253
1254 if gp.syncGroup != nil {
1255 systemstack(func() {
1256 gp.syncGroup.changegstatus(gp, oldval, newval)
1257 })
1258 }
1259
1260 if oldval == _Grunning {
1261
1262 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1263 gp.tracking = true
1264 }
1265 gp.trackingSeq++
1266 }
1267 if !gp.tracking {
1268 return
1269 }
1270
1271
1272
1273
1274
1275
1276 switch oldval {
1277 case _Grunnable:
1278
1279
1280
1281 now := nanotime()
1282 gp.runnableTime += now - gp.trackingStamp
1283 gp.trackingStamp = 0
1284 case _Gwaiting:
1285 if !gp.waitreason.isMutexWait() {
1286
1287 break
1288 }
1289
1290
1291
1292
1293
1294 now := nanotime()
1295 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1296 gp.trackingStamp = 0
1297 }
1298 switch newval {
1299 case _Gwaiting:
1300 if !gp.waitreason.isMutexWait() {
1301
1302 break
1303 }
1304
1305 now := nanotime()
1306 gp.trackingStamp = now
1307 case _Grunnable:
1308
1309
1310 now := nanotime()
1311 gp.trackingStamp = now
1312 case _Grunning:
1313
1314
1315
1316 gp.tracking = false
1317 sched.timeToRun.record(gp.runnableTime)
1318 gp.runnableTime = 0
1319 }
1320 }
1321
1322
1323
1324
1325 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1326
1327 gp.waitreason = reason
1328 casgstatus(gp, old, _Gwaiting)
1329 }
1330
1331
1332
1333
1334
1335 func casGToWaitingForSuspendG(gp *g, old uint32, reason waitReason) {
1336 if !reason.isWaitingForSuspendG() {
1337 throw("casGToWaitingForSuspendG with non-isWaitingForSuspendG wait reason")
1338 }
1339 casGToWaiting(gp, old, reason)
1340 }
1341
1342
1343
1344
1345
1346 func casGToPreemptScan(gp *g, old, new uint32) {
1347 if old != _Grunning || new != _Gscan|_Gpreempted {
1348 throw("bad g transition")
1349 }
1350 acquireLockRankAndM(lockRankGscan)
1351 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1352 }
1353
1354
1355
1356
1357
1358
1359 }
1360
1361
1362
1363
1364 func casGFromPreempted(gp *g, old, new uint32) bool {
1365 if old != _Gpreempted || new != _Gwaiting {
1366 throw("bad g transition")
1367 }
1368 gp.waitreason = waitReasonPreempted
1369 if !gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting) {
1370 return false
1371 }
1372 if sg := gp.syncGroup; sg != nil {
1373 sg.changegstatus(gp, _Gpreempted, _Gwaiting)
1374 }
1375 return true
1376 }
1377
1378
1379 type stwReason uint8
1380
1381
1382
1383
1384 const (
1385 stwUnknown stwReason = iota
1386 stwGCMarkTerm
1387 stwGCSweepTerm
1388 stwWriteHeapDump
1389 stwGoroutineProfile
1390 stwGoroutineProfileCleanup
1391 stwAllGoroutinesStack
1392 stwReadMemStats
1393 stwAllThreadsSyscall
1394 stwGOMAXPROCS
1395 stwStartTrace
1396 stwStopTrace
1397 stwForTestCountPagesInUse
1398 stwForTestReadMetricsSlow
1399 stwForTestReadMemStatsSlow
1400 stwForTestPageCachePagesLeaked
1401 stwForTestResetDebugLog
1402 )
1403
1404 func (r stwReason) String() string {
1405 return stwReasonStrings[r]
1406 }
1407
1408 func (r stwReason) isGC() bool {
1409 return r == stwGCMarkTerm || r == stwGCSweepTerm
1410 }
1411
1412
1413
1414
1415 var stwReasonStrings = [...]string{
1416 stwUnknown: "unknown",
1417 stwGCMarkTerm: "GC mark termination",
1418 stwGCSweepTerm: "GC sweep termination",
1419 stwWriteHeapDump: "write heap dump",
1420 stwGoroutineProfile: "goroutine profile",
1421 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1422 stwAllGoroutinesStack: "all goroutines stack trace",
1423 stwReadMemStats: "read mem stats",
1424 stwAllThreadsSyscall: "AllThreadsSyscall",
1425 stwGOMAXPROCS: "GOMAXPROCS",
1426 stwStartTrace: "start trace",
1427 stwStopTrace: "stop trace",
1428 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1429 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1430 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1431 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1432 stwForTestResetDebugLog: "ResetDebugLog (test)",
1433 }
1434
1435
1436
1437 type worldStop struct {
1438 reason stwReason
1439 startedStopping int64
1440 finishedStopping int64
1441 stoppingCPUTime int64
1442 }
1443
1444
1445
1446
1447 var stopTheWorldContext worldStop
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466 func stopTheWorld(reason stwReason) worldStop {
1467 semacquire(&worldsema)
1468 gp := getg()
1469 gp.m.preemptoff = reason.String()
1470 systemstack(func() {
1471 stopTheWorldContext = stopTheWorldWithSema(reason)
1472 })
1473 return stopTheWorldContext
1474 }
1475
1476
1477
1478
1479 func startTheWorld(w worldStop) {
1480 systemstack(func() { startTheWorldWithSema(0, w) })
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497 mp := acquirem()
1498 mp.preemptoff = ""
1499 semrelease1(&worldsema, true, 0)
1500 releasem(mp)
1501 }
1502
1503
1504
1505
1506 func stopTheWorldGC(reason stwReason) worldStop {
1507 semacquire(&gcsema)
1508 return stopTheWorld(reason)
1509 }
1510
1511
1512
1513
1514 func startTheWorldGC(w worldStop) {
1515 startTheWorld(w)
1516 semrelease(&gcsema)
1517 }
1518
1519
1520 var worldsema uint32 = 1
1521
1522
1523
1524
1525
1526
1527
1528 var gcsema uint32 = 1
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562 func stopTheWorldWithSema(reason stwReason) worldStop {
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582 casGToWaitingForSuspendG(getg().m.curg, _Grunning, waitReasonStoppingTheWorld)
1583
1584 trace := traceAcquire()
1585 if trace.ok() {
1586 trace.STWStart(reason)
1587 traceRelease(trace)
1588 }
1589 gp := getg()
1590
1591
1592
1593 if gp.m.locks > 0 {
1594 throw("stopTheWorld: holding locks")
1595 }
1596
1597 lock(&sched.lock)
1598 start := nanotime()
1599 sched.stopwait = gomaxprocs
1600 sched.gcwaiting.Store(true)
1601 preemptall()
1602
1603 gp.m.p.ptr().status = _Pgcstop
1604 gp.m.p.ptr().gcStopTime = start
1605 sched.stopwait--
1606
1607 trace = traceAcquire()
1608 for _, pp := range allp {
1609 s := pp.status
1610 if s == _Psyscall && atomic.Cas(&pp.status, s, _Pgcstop) {
1611 if trace.ok() {
1612 trace.ProcSteal(pp, false)
1613 }
1614 pp.syscalltick++
1615 pp.gcStopTime = nanotime()
1616 sched.stopwait--
1617 }
1618 }
1619 if trace.ok() {
1620 traceRelease(trace)
1621 }
1622
1623
1624 now := nanotime()
1625 for {
1626 pp, _ := pidleget(now)
1627 if pp == nil {
1628 break
1629 }
1630 pp.status = _Pgcstop
1631 pp.gcStopTime = nanotime()
1632 sched.stopwait--
1633 }
1634 wait := sched.stopwait > 0
1635 unlock(&sched.lock)
1636
1637
1638 if wait {
1639 for {
1640
1641 if notetsleep(&sched.stopnote, 100*1000) {
1642 noteclear(&sched.stopnote)
1643 break
1644 }
1645 preemptall()
1646 }
1647 }
1648
1649 finish := nanotime()
1650 startTime := finish - start
1651 if reason.isGC() {
1652 sched.stwStoppingTimeGC.record(startTime)
1653 } else {
1654 sched.stwStoppingTimeOther.record(startTime)
1655 }
1656
1657
1658
1659
1660
1661 stoppingCPUTime := int64(0)
1662 bad := ""
1663 if sched.stopwait != 0 {
1664 bad = "stopTheWorld: not stopped (stopwait != 0)"
1665 } else {
1666 for _, pp := range allp {
1667 if pp.status != _Pgcstop {
1668 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1669 }
1670 if pp.gcStopTime == 0 && bad == "" {
1671 bad = "stopTheWorld: broken CPU time accounting"
1672 }
1673 stoppingCPUTime += finish - pp.gcStopTime
1674 pp.gcStopTime = 0
1675 }
1676 }
1677 if freezing.Load() {
1678
1679
1680
1681
1682 lock(&deadlock)
1683 lock(&deadlock)
1684 }
1685 if bad != "" {
1686 throw(bad)
1687 }
1688
1689 worldStopped()
1690
1691
1692 casgstatus(getg().m.curg, _Gwaiting, _Grunning)
1693
1694 return worldStop{
1695 reason: reason,
1696 startedStopping: start,
1697 finishedStopping: finish,
1698 stoppingCPUTime: stoppingCPUTime,
1699 }
1700 }
1701
1702
1703
1704
1705
1706
1707
1708 func startTheWorldWithSema(now int64, w worldStop) int64 {
1709 assertWorldStopped()
1710
1711 mp := acquirem()
1712 if netpollinited() {
1713 list, delta := netpoll(0)
1714 injectglist(&list)
1715 netpollAdjustWaiters(delta)
1716 }
1717 lock(&sched.lock)
1718
1719 procs := gomaxprocs
1720 if newprocs != 0 {
1721 procs = newprocs
1722 newprocs = 0
1723 }
1724 p1 := procresize(procs)
1725 sched.gcwaiting.Store(false)
1726 if sched.sysmonwait.Load() {
1727 sched.sysmonwait.Store(false)
1728 notewakeup(&sched.sysmonnote)
1729 }
1730 unlock(&sched.lock)
1731
1732 worldStarted()
1733
1734 for p1 != nil {
1735 p := p1
1736 p1 = p1.link.ptr()
1737 if p.m != 0 {
1738 mp := p.m.ptr()
1739 p.m = 0
1740 if mp.nextp != 0 {
1741 throw("startTheWorld: inconsistent mp->nextp")
1742 }
1743 mp.nextp.set(p)
1744 notewakeup(&mp.park)
1745 } else {
1746
1747 newm(nil, p, -1)
1748 }
1749 }
1750
1751
1752 if now == 0 {
1753 now = nanotime()
1754 }
1755 totalTime := now - w.startedStopping
1756 if w.reason.isGC() {
1757 sched.stwTotalTimeGC.record(totalTime)
1758 } else {
1759 sched.stwTotalTimeOther.record(totalTime)
1760 }
1761 trace := traceAcquire()
1762 if trace.ok() {
1763 trace.STWDone()
1764 traceRelease(trace)
1765 }
1766
1767
1768
1769
1770 wakep()
1771
1772 releasem(mp)
1773
1774 return now
1775 }
1776
1777
1778
1779 func usesLibcall() bool {
1780 switch GOOS {
1781 case "aix", "darwin", "illumos", "ios", "solaris", "windows":
1782 return true
1783 case "openbsd":
1784 return GOARCH != "mips64"
1785 }
1786 return false
1787 }
1788
1789
1790
1791 func mStackIsSystemAllocated() bool {
1792 switch GOOS {
1793 case "aix", "darwin", "plan9", "illumos", "ios", "solaris", "windows":
1794 return true
1795 case "openbsd":
1796 return GOARCH != "mips64"
1797 }
1798 return false
1799 }
1800
1801
1802
1803 func mstart()
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814 func mstart0() {
1815 gp := getg()
1816
1817 osStack := gp.stack.lo == 0
1818 if osStack {
1819
1820
1821
1822
1823
1824
1825
1826
1827 size := gp.stack.hi
1828 if size == 0 {
1829 size = 16384 * sys.StackGuardMultiplier
1830 }
1831 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1832 gp.stack.lo = gp.stack.hi - size + 1024
1833 }
1834
1835
1836 gp.stackguard0 = gp.stack.lo + stackGuard
1837
1838
1839 gp.stackguard1 = gp.stackguard0
1840 mstart1()
1841
1842
1843 if mStackIsSystemAllocated() {
1844
1845
1846
1847 osStack = true
1848 }
1849 mexit(osStack)
1850 }
1851
1852
1853
1854
1855
1856 func mstart1() {
1857 gp := getg()
1858
1859 if gp != gp.m.g0 {
1860 throw("bad runtime·mstart")
1861 }
1862
1863
1864
1865
1866
1867
1868
1869 gp.sched.g = guintptr(unsafe.Pointer(gp))
1870 gp.sched.pc = sys.GetCallerPC()
1871 gp.sched.sp = sys.GetCallerSP()
1872
1873 asminit()
1874 minit()
1875
1876
1877
1878 if gp.m == &m0 {
1879 mstartm0()
1880 }
1881
1882 if debug.dataindependenttiming == 1 {
1883 sys.EnableDIT()
1884 }
1885
1886 if fn := gp.m.mstartfn; fn != nil {
1887 fn()
1888 }
1889
1890 if gp.m != &m0 {
1891 acquirep(gp.m.nextp.ptr())
1892 gp.m.nextp = 0
1893 }
1894 schedule()
1895 }
1896
1897
1898
1899
1900
1901
1902
1903 func mstartm0() {
1904
1905
1906
1907 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1908 cgoHasExtraM = true
1909 newextram()
1910 }
1911 initsig(false)
1912 }
1913
1914
1915
1916
1917 func mPark() {
1918 gp := getg()
1919 notesleep(&gp.m.park)
1920 noteclear(&gp.m.park)
1921 }
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933 func mexit(osStack bool) {
1934 mp := getg().m
1935
1936 if mp == &m0 {
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948 handoffp(releasep())
1949 lock(&sched.lock)
1950 sched.nmfreed++
1951 checkdead()
1952 unlock(&sched.lock)
1953 mPark()
1954 throw("locked m0 woke up")
1955 }
1956
1957 sigblock(true)
1958 unminit()
1959
1960
1961 if mp.gsignal != nil {
1962 stackfree(mp.gsignal.stack)
1963
1964
1965
1966
1967 mp.gsignal = nil
1968 }
1969
1970
1971 vgetrandomDestroy(mp)
1972
1973
1974 lock(&sched.lock)
1975 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
1976 if *pprev == mp {
1977 *pprev = mp.alllink
1978 goto found
1979 }
1980 }
1981 throw("m not found in allm")
1982 found:
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997 mp.freeWait.Store(freeMWait)
1998 mp.freelink = sched.freem
1999 sched.freem = mp
2000 unlock(&sched.lock)
2001
2002 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
2003 sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
2004
2005
2006 handoffp(releasep())
2007
2008
2009
2010
2011
2012 lock(&sched.lock)
2013 sched.nmfreed++
2014 checkdead()
2015 unlock(&sched.lock)
2016
2017 if GOOS == "darwin" || GOOS == "ios" {
2018
2019
2020 if mp.signalPending.Load() != 0 {
2021 pendingPreemptSignals.Add(-1)
2022 }
2023 }
2024
2025
2026
2027 mdestroy(mp)
2028
2029 if osStack {
2030
2031 mp.freeWait.Store(freeMRef)
2032
2033
2034
2035 return
2036 }
2037
2038
2039
2040
2041
2042 exitThread(&mp.freeWait)
2043 }
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055 func forEachP(reason waitReason, fn func(*p)) {
2056 systemstack(func() {
2057 gp := getg().m.curg
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074 casGToWaitingForSuspendG(gp, _Grunning, reason)
2075 forEachPInternal(fn)
2076 casgstatus(gp, _Gwaiting, _Grunning)
2077 })
2078 }
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089 func forEachPInternal(fn func(*p)) {
2090 mp := acquirem()
2091 pp := getg().m.p.ptr()
2092
2093 lock(&sched.lock)
2094 if sched.safePointWait != 0 {
2095 throw("forEachP: sched.safePointWait != 0")
2096 }
2097 sched.safePointWait = gomaxprocs - 1
2098 sched.safePointFn = fn
2099
2100
2101 for _, p2 := range allp {
2102 if p2 != pp {
2103 atomic.Store(&p2.runSafePointFn, 1)
2104 }
2105 }
2106 preemptall()
2107
2108
2109
2110
2111
2112
2113
2114 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
2115 if atomic.Cas(&p.runSafePointFn, 1, 0) {
2116 fn(p)
2117 sched.safePointWait--
2118 }
2119 }
2120
2121 wait := sched.safePointWait > 0
2122 unlock(&sched.lock)
2123
2124
2125 fn(pp)
2126
2127
2128
2129 for _, p2 := range allp {
2130 s := p2.status
2131
2132
2133
2134 trace := traceAcquire()
2135 if s == _Psyscall && p2.runSafePointFn == 1 && atomic.Cas(&p2.status, s, _Pidle) {
2136 if trace.ok() {
2137
2138 trace.ProcSteal(p2, false)
2139 traceRelease(trace)
2140 }
2141 p2.syscalltick++
2142 handoffp(p2)
2143 } else if trace.ok() {
2144 traceRelease(trace)
2145 }
2146 }
2147
2148
2149 if wait {
2150 for {
2151
2152
2153
2154
2155 if notetsleep(&sched.safePointNote, 100*1000) {
2156 noteclear(&sched.safePointNote)
2157 break
2158 }
2159 preemptall()
2160 }
2161 }
2162 if sched.safePointWait != 0 {
2163 throw("forEachP: not done")
2164 }
2165 for _, p2 := range allp {
2166 if p2.runSafePointFn != 0 {
2167 throw("forEachP: P did not run fn")
2168 }
2169 }
2170
2171 lock(&sched.lock)
2172 sched.safePointFn = nil
2173 unlock(&sched.lock)
2174 releasem(mp)
2175 }
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188 func runSafePointFn() {
2189 p := getg().m.p.ptr()
2190
2191
2192
2193 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
2194 return
2195 }
2196 sched.safePointFn(p)
2197 lock(&sched.lock)
2198 sched.safePointWait--
2199 if sched.safePointWait == 0 {
2200 notewakeup(&sched.safePointNote)
2201 }
2202 unlock(&sched.lock)
2203 }
2204
2205
2206
2207
2208 var cgoThreadStart unsafe.Pointer
2209
2210 type cgothreadstart struct {
2211 g guintptr
2212 tls *uint64
2213 fn unsafe.Pointer
2214 }
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225 func allocm(pp *p, fn func(), id int64) *m {
2226 allocmLock.rlock()
2227
2228
2229
2230
2231 acquirem()
2232
2233 gp := getg()
2234 if gp.m.p == 0 {
2235 acquirep(pp)
2236 }
2237
2238
2239
2240 if sched.freem != nil {
2241 lock(&sched.lock)
2242 var newList *m
2243 for freem := sched.freem; freem != nil; {
2244
2245 wait := freem.freeWait.Load()
2246 if wait == freeMWait {
2247 next := freem.freelink
2248 freem.freelink = newList
2249 newList = freem
2250 freem = next
2251 continue
2252 }
2253
2254
2255
2256 if traceEnabled() || traceShuttingDown() {
2257 traceThreadDestroy(freem)
2258 }
2259
2260
2261
2262 if wait == freeMStack {
2263
2264
2265
2266 systemstack(func() {
2267 stackfree(freem.g0.stack)
2268 })
2269 }
2270 freem = freem.freelink
2271 }
2272 sched.freem = newList
2273 unlock(&sched.lock)
2274 }
2275
2276 mp := new(m)
2277 mp.mstartfn = fn
2278 mcommoninit(mp, id)
2279
2280
2281
2282 if iscgo || mStackIsSystemAllocated() {
2283 mp.g0 = malg(-1)
2284 } else {
2285 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
2286 }
2287 mp.g0.m = mp
2288
2289 if pp == gp.m.p.ptr() {
2290 releasep()
2291 }
2292
2293 releasem(gp.m)
2294 allocmLock.runlock()
2295 return mp
2296 }
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337 func needm(signal bool) {
2338 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
2339
2340
2341
2342
2343
2344
2345 writeErrStr("fatal error: cgo callback before cgo call\n")
2346 exit(1)
2347 }
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357 var sigmask sigset
2358 sigsave(&sigmask)
2359 sigblock(false)
2360
2361
2362
2363
2364 mp, last := getExtraM()
2365
2366
2367
2368
2369
2370
2371
2372
2373 mp.needextram = last
2374
2375
2376 mp.sigmask = sigmask
2377
2378
2379
2380 osSetupTLS(mp)
2381
2382
2383
2384 setg(mp.g0)
2385 sp := sys.GetCallerSP()
2386 callbackUpdateSystemStack(mp, sp, signal)
2387
2388
2389
2390
2391 mp.isExtraInC = false
2392
2393
2394 asminit()
2395 minit()
2396
2397
2398
2399
2400
2401
2402 var trace traceLocker
2403 if !signal {
2404 trace = traceAcquire()
2405 }
2406
2407
2408 casgstatus(mp.curg, _Gdead, _Gsyscall)
2409 sched.ngsys.Add(-1)
2410
2411 if !signal {
2412 if trace.ok() {
2413 trace.GoCreateSyscall(mp.curg)
2414 traceRelease(trace)
2415 }
2416 }
2417 mp.isExtraInSig = signal
2418 }
2419
2420
2421
2422
2423 func needAndBindM() {
2424 needm(false)
2425
2426 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2427 cgoBindM()
2428 }
2429 }
2430
2431
2432
2433
2434 func newextram() {
2435 c := extraMWaiters.Swap(0)
2436 if c > 0 {
2437 for i := uint32(0); i < c; i++ {
2438 oneNewExtraM()
2439 }
2440 } else if extraMLength.Load() == 0 {
2441
2442 oneNewExtraM()
2443 }
2444 }
2445
2446
2447 func oneNewExtraM() {
2448
2449
2450
2451
2452
2453 mp := allocm(nil, nil, -1)
2454 gp := malg(4096)
2455 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2456 gp.sched.sp = gp.stack.hi
2457 gp.sched.sp -= 4 * goarch.PtrSize
2458 gp.sched.lr = 0
2459 gp.sched.g = guintptr(unsafe.Pointer(gp))
2460 gp.syscallpc = gp.sched.pc
2461 gp.syscallsp = gp.sched.sp
2462 gp.stktopsp = gp.sched.sp
2463
2464
2465
2466
2467 casgstatus(gp, _Gidle, _Gdead)
2468 gp.m = mp
2469 mp.curg = gp
2470 mp.isextra = true
2471
2472 mp.isExtraInC = true
2473 mp.lockedInt++
2474 mp.lockedg.set(gp)
2475 gp.lockedm.set(mp)
2476 gp.goid = sched.goidgen.Add(1)
2477 if raceenabled {
2478 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2479 }
2480
2481 allgadd(gp)
2482
2483
2484
2485
2486
2487 sched.ngsys.Add(1)
2488
2489
2490 addExtraM(mp)
2491 }
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526 func dropm() {
2527
2528
2529
2530 mp := getg().m
2531
2532
2533
2534
2535
2536 var trace traceLocker
2537 if !mp.isExtraInSig {
2538 trace = traceAcquire()
2539 }
2540
2541
2542 casgstatus(mp.curg, _Gsyscall, _Gdead)
2543 mp.curg.preemptStop = false
2544 sched.ngsys.Add(1)
2545
2546 if !mp.isExtraInSig {
2547 if trace.ok() {
2548 trace.GoDestroySyscall()
2549 traceRelease(trace)
2550 }
2551 }
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566 mp.syscalltick--
2567
2568
2569
2570 mp.curg.trace.reset()
2571
2572
2573
2574
2575 if traceEnabled() || traceShuttingDown() {
2576
2577
2578
2579
2580
2581
2582
2583 lock(&sched.lock)
2584 traceThreadDestroy(mp)
2585 unlock(&sched.lock)
2586 }
2587 mp.isExtraInSig = false
2588
2589
2590
2591
2592
2593 sigmask := mp.sigmask
2594 sigblock(false)
2595 unminit()
2596
2597 setg(nil)
2598
2599
2600
2601 g0 := mp.g0
2602 g0.stack.hi = 0
2603 g0.stack.lo = 0
2604 g0.stackguard0 = 0
2605 g0.stackguard1 = 0
2606 mp.g0StackAccurate = false
2607
2608 putExtraM(mp)
2609
2610 msigrestore(sigmask)
2611 }
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633 func cgoBindM() {
2634 if GOOS == "windows" || GOOS == "plan9" {
2635 fatal("bindm in unexpected GOOS")
2636 }
2637 g := getg()
2638 if g.m.g0 != g {
2639 fatal("the current g is not g0")
2640 }
2641 if _cgo_bindm != nil {
2642 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2643 }
2644 }
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657 func getm() uintptr {
2658 return uintptr(unsafe.Pointer(getg().m))
2659 }
2660
2661 var (
2662
2663
2664
2665
2666
2667
2668 extraM atomic.Uintptr
2669
2670 extraMLength atomic.Uint32
2671
2672 extraMWaiters atomic.Uint32
2673
2674
2675 extraMInUse atomic.Uint32
2676 )
2677
2678
2679
2680
2681
2682
2683
2684
2685 func lockextra(nilokay bool) *m {
2686 const locked = 1
2687
2688 incr := false
2689 for {
2690 old := extraM.Load()
2691 if old == locked {
2692 osyield_no_g()
2693 continue
2694 }
2695 if old == 0 && !nilokay {
2696 if !incr {
2697
2698
2699
2700 extraMWaiters.Add(1)
2701 incr = true
2702 }
2703 usleep_no_g(1)
2704 continue
2705 }
2706 if extraM.CompareAndSwap(old, locked) {
2707 return (*m)(unsafe.Pointer(old))
2708 }
2709 osyield_no_g()
2710 continue
2711 }
2712 }
2713
2714
2715 func unlockextra(mp *m, delta int32) {
2716 extraMLength.Add(delta)
2717 extraM.Store(uintptr(unsafe.Pointer(mp)))
2718 }
2719
2720
2721
2722
2723
2724
2725
2726
2727 func getExtraM() (mp *m, last bool) {
2728 mp = lockextra(false)
2729 extraMInUse.Add(1)
2730 unlockextra(mp.schedlink.ptr(), -1)
2731 return mp, mp.schedlink.ptr() == nil
2732 }
2733
2734
2735
2736
2737
2738 func putExtraM(mp *m) {
2739 extraMInUse.Add(-1)
2740 addExtraM(mp)
2741 }
2742
2743
2744
2745
2746 func addExtraM(mp *m) {
2747 mnext := lockextra(true)
2748 mp.schedlink.set(mnext)
2749 unlockextra(mp, 1)
2750 }
2751
2752 var (
2753
2754
2755
2756 allocmLock rwmutex
2757
2758
2759
2760
2761 execLock rwmutex
2762 )
2763
2764
2765
2766 const (
2767 failthreadcreate = "runtime: failed to create new OS thread\n"
2768 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2769 )
2770
2771
2772
2773
2774 var newmHandoff struct {
2775 lock mutex
2776
2777
2778
2779 newm muintptr
2780
2781
2782
2783 waiting bool
2784 wake note
2785
2786
2787
2788
2789 haveTemplateThread uint32
2790 }
2791
2792
2793
2794
2795
2796
2797
2798
2799 func newm(fn func(), pp *p, id int64) {
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810 acquirem()
2811
2812 mp := allocm(pp, fn, id)
2813 mp.nextp.set(pp)
2814 mp.sigmask = initSigmask
2815 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827 lock(&newmHandoff.lock)
2828 if newmHandoff.haveTemplateThread == 0 {
2829 throw("on a locked thread with no template thread")
2830 }
2831 mp.schedlink = newmHandoff.newm
2832 newmHandoff.newm.set(mp)
2833 if newmHandoff.waiting {
2834 newmHandoff.waiting = false
2835 notewakeup(&newmHandoff.wake)
2836 }
2837 unlock(&newmHandoff.lock)
2838
2839
2840
2841 releasem(getg().m)
2842 return
2843 }
2844 newm1(mp)
2845 releasem(getg().m)
2846 }
2847
2848 func newm1(mp *m) {
2849 if iscgo {
2850 var ts cgothreadstart
2851 if _cgo_thread_start == nil {
2852 throw("_cgo_thread_start missing")
2853 }
2854 ts.g.set(mp.g0)
2855 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2856 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2857 if msanenabled {
2858 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2859 }
2860 if asanenabled {
2861 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2862 }
2863 execLock.rlock()
2864 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2865 execLock.runlock()
2866 return
2867 }
2868 execLock.rlock()
2869 newosproc(mp)
2870 execLock.runlock()
2871 }
2872
2873
2874
2875
2876
2877 func startTemplateThread() {
2878 if GOARCH == "wasm" {
2879 return
2880 }
2881
2882
2883
2884 mp := acquirem()
2885 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2886 releasem(mp)
2887 return
2888 }
2889 newm(templateThread, nil, -1)
2890 releasem(mp)
2891 }
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905 func templateThread() {
2906 lock(&sched.lock)
2907 sched.nmsys++
2908 checkdead()
2909 unlock(&sched.lock)
2910
2911 for {
2912 lock(&newmHandoff.lock)
2913 for newmHandoff.newm != 0 {
2914 newm := newmHandoff.newm.ptr()
2915 newmHandoff.newm = 0
2916 unlock(&newmHandoff.lock)
2917 for newm != nil {
2918 next := newm.schedlink.ptr()
2919 newm.schedlink = 0
2920 newm1(newm)
2921 newm = next
2922 }
2923 lock(&newmHandoff.lock)
2924 }
2925 newmHandoff.waiting = true
2926 noteclear(&newmHandoff.wake)
2927 unlock(&newmHandoff.lock)
2928 notesleep(&newmHandoff.wake)
2929 }
2930 }
2931
2932
2933
2934 func stopm() {
2935 gp := getg()
2936
2937 if gp.m.locks != 0 {
2938 throw("stopm holding locks")
2939 }
2940 if gp.m.p != 0 {
2941 throw("stopm holding p")
2942 }
2943 if gp.m.spinning {
2944 throw("stopm spinning")
2945 }
2946
2947 lock(&sched.lock)
2948 mput(gp.m)
2949 unlock(&sched.lock)
2950 mPark()
2951 acquirep(gp.m.nextp.ptr())
2952 gp.m.nextp = 0
2953 }
2954
2955 func mspinning() {
2956
2957 getg().m.spinning = true
2958 }
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977 func startm(pp *p, spinning, lockheld bool) {
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994 mp := acquirem()
2995 if !lockheld {
2996 lock(&sched.lock)
2997 }
2998 if pp == nil {
2999 if spinning {
3000
3001
3002
3003 throw("startm: P required for spinning=true")
3004 }
3005 pp, _ = pidleget(0)
3006 if pp == nil {
3007 if !lockheld {
3008 unlock(&sched.lock)
3009 }
3010 releasem(mp)
3011 return
3012 }
3013 }
3014 nmp := mget()
3015 if nmp == nil {
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030 id := mReserveID()
3031 unlock(&sched.lock)
3032
3033 var fn func()
3034 if spinning {
3035
3036 fn = mspinning
3037 }
3038 newm(fn, pp, id)
3039
3040 if lockheld {
3041 lock(&sched.lock)
3042 }
3043
3044
3045 releasem(mp)
3046 return
3047 }
3048 if !lockheld {
3049 unlock(&sched.lock)
3050 }
3051 if nmp.spinning {
3052 throw("startm: m is spinning")
3053 }
3054 if nmp.nextp != 0 {
3055 throw("startm: m has p")
3056 }
3057 if spinning && !runqempty(pp) {
3058 throw("startm: p has runnable gs")
3059 }
3060
3061 nmp.spinning = spinning
3062 nmp.nextp.set(pp)
3063 notewakeup(&nmp.park)
3064
3065
3066 releasem(mp)
3067 }
3068
3069
3070
3071
3072
3073 func handoffp(pp *p) {
3074
3075
3076
3077
3078 if !runqempty(pp) || sched.runqsize != 0 {
3079 startm(pp, false, false)
3080 return
3081 }
3082
3083 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
3084 startm(pp, false, false)
3085 return
3086 }
3087
3088 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) {
3089 startm(pp, false, false)
3090 return
3091 }
3092
3093
3094 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
3095 sched.needspinning.Store(0)
3096 startm(pp, true, false)
3097 return
3098 }
3099 lock(&sched.lock)
3100 if sched.gcwaiting.Load() {
3101 pp.status = _Pgcstop
3102 pp.gcStopTime = nanotime()
3103 sched.stopwait--
3104 if sched.stopwait == 0 {
3105 notewakeup(&sched.stopnote)
3106 }
3107 unlock(&sched.lock)
3108 return
3109 }
3110 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
3111 sched.safePointFn(pp)
3112 sched.safePointWait--
3113 if sched.safePointWait == 0 {
3114 notewakeup(&sched.safePointNote)
3115 }
3116 }
3117 if sched.runqsize != 0 {
3118 unlock(&sched.lock)
3119 startm(pp, false, false)
3120 return
3121 }
3122
3123
3124 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
3125 unlock(&sched.lock)
3126 startm(pp, false, false)
3127 return
3128 }
3129
3130
3131
3132 when := pp.timers.wakeTime()
3133 pidleput(pp, 0)
3134 unlock(&sched.lock)
3135
3136 if when != 0 {
3137 wakeNetPoller(when)
3138 }
3139 }
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154 func wakep() {
3155
3156
3157 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
3158 return
3159 }
3160
3161
3162
3163
3164
3165
3166 mp := acquirem()
3167
3168 var pp *p
3169 lock(&sched.lock)
3170 pp, _ = pidlegetSpinning(0)
3171 if pp == nil {
3172 if sched.nmspinning.Add(-1) < 0 {
3173 throw("wakep: negative nmspinning")
3174 }
3175 unlock(&sched.lock)
3176 releasem(mp)
3177 return
3178 }
3179
3180
3181
3182
3183 unlock(&sched.lock)
3184
3185 startm(pp, true, false)
3186
3187 releasem(mp)
3188 }
3189
3190
3191
3192 func stoplockedm() {
3193 gp := getg()
3194
3195 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3196 throw("stoplockedm: inconsistent locking")
3197 }
3198 if gp.m.p != 0 {
3199
3200 pp := releasep()
3201 handoffp(pp)
3202 }
3203 incidlelocked(1)
3204
3205 mPark()
3206 status := readgstatus(gp.m.lockedg.ptr())
3207 if status&^_Gscan != _Grunnable {
3208 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
3209 dumpgstatus(gp.m.lockedg.ptr())
3210 throw("stoplockedm: not runnable")
3211 }
3212 acquirep(gp.m.nextp.ptr())
3213 gp.m.nextp = 0
3214 }
3215
3216
3217
3218
3219
3220 func startlockedm(gp *g) {
3221 mp := gp.lockedm.ptr()
3222 if mp == getg().m {
3223 throw("startlockedm: locked to me")
3224 }
3225 if mp.nextp != 0 {
3226 throw("startlockedm: m has p")
3227 }
3228
3229 incidlelocked(-1)
3230 pp := releasep()
3231 mp.nextp.set(pp)
3232 notewakeup(&mp.park)
3233 stopm()
3234 }
3235
3236
3237
3238 func gcstopm() {
3239 gp := getg()
3240
3241 if !sched.gcwaiting.Load() {
3242 throw("gcstopm: not waiting for gc")
3243 }
3244 if gp.m.spinning {
3245 gp.m.spinning = false
3246
3247
3248 if sched.nmspinning.Add(-1) < 0 {
3249 throw("gcstopm: negative nmspinning")
3250 }
3251 }
3252 pp := releasep()
3253 lock(&sched.lock)
3254 pp.status = _Pgcstop
3255 pp.gcStopTime = nanotime()
3256 sched.stopwait--
3257 if sched.stopwait == 0 {
3258 notewakeup(&sched.stopnote)
3259 }
3260 unlock(&sched.lock)
3261 stopm()
3262 }
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273 func execute(gp *g, inheritTime bool) {
3274 mp := getg().m
3275
3276 if goroutineProfile.active {
3277
3278
3279
3280 tryRecordGoroutineProfile(gp, nil, osyield)
3281 }
3282
3283
3284
3285 mp.curg = gp
3286 gp.m = mp
3287 casgstatus(gp, _Grunnable, _Grunning)
3288 gp.waitsince = 0
3289 gp.preempt = false
3290 gp.stackguard0 = gp.stack.lo + stackGuard
3291 if !inheritTime {
3292 mp.p.ptr().schedtick++
3293 }
3294
3295
3296 hz := sched.profilehz
3297 if mp.profilehz != hz {
3298 setThreadCPUProfiler(hz)
3299 }
3300
3301 trace := traceAcquire()
3302 if trace.ok() {
3303 trace.GoStart()
3304 traceRelease(trace)
3305 }
3306
3307 gogo(&gp.sched)
3308 }
3309
3310
3311
3312
3313
3314 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3315 mp := getg().m
3316
3317
3318
3319
3320
3321 top:
3322
3323
3324
3325 mp.clearAllpSnapshot()
3326
3327 pp := mp.p.ptr()
3328 if sched.gcwaiting.Load() {
3329 gcstopm()
3330 goto top
3331 }
3332 if pp.runSafePointFn != 0 {
3333 runSafePointFn()
3334 }
3335
3336
3337
3338
3339
3340 now, pollUntil, _ := pp.timers.check(0)
3341
3342
3343 if traceEnabled() || traceShuttingDown() {
3344 gp := traceReader()
3345 if gp != nil {
3346 trace := traceAcquire()
3347 casgstatus(gp, _Gwaiting, _Grunnable)
3348 if trace.ok() {
3349 trace.GoUnpark(gp, 0)
3350 traceRelease(trace)
3351 }
3352 return gp, false, true
3353 }
3354 }
3355
3356
3357 if gcBlackenEnabled != 0 {
3358 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3359 if gp != nil {
3360 return gp, false, true
3361 }
3362 now = tnow
3363 }
3364
3365
3366
3367
3368 if pp.schedtick%61 == 0 && sched.runqsize > 0 {
3369 lock(&sched.lock)
3370 gp := globrunqget(pp, 1)
3371 unlock(&sched.lock)
3372 if gp != nil {
3373 return gp, false, false
3374 }
3375 }
3376
3377
3378 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
3379 if gp := wakefing(); gp != nil {
3380 ready(gp, 0, true)
3381 }
3382 }
3383 if *cgo_yield != nil {
3384 asmcgocall(*cgo_yield, nil)
3385 }
3386
3387
3388 if gp, inheritTime := runqget(pp); gp != nil {
3389 return gp, inheritTime, false
3390 }
3391
3392
3393 if sched.runqsize != 0 {
3394 lock(&sched.lock)
3395 gp := globrunqget(pp, 0)
3396 unlock(&sched.lock)
3397 if gp != nil {
3398 return gp, false, false
3399 }
3400 }
3401
3402
3403
3404
3405
3406
3407
3408
3409 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3410 if list, delta := netpoll(0); !list.empty() {
3411 gp := list.pop()
3412 injectglist(&list)
3413 netpollAdjustWaiters(delta)
3414 trace := traceAcquire()
3415 casgstatus(gp, _Gwaiting, _Grunnable)
3416 if trace.ok() {
3417 trace.GoUnpark(gp, 0)
3418 traceRelease(trace)
3419 }
3420 return gp, false, false
3421 }
3422 }
3423
3424
3425
3426
3427
3428
3429 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
3430 if !mp.spinning {
3431 mp.becomeSpinning()
3432 }
3433
3434 gp, inheritTime, tnow, w, newWork := stealWork(now)
3435 if gp != nil {
3436
3437 return gp, inheritTime, false
3438 }
3439 if newWork {
3440
3441
3442 goto top
3443 }
3444
3445 now = tnow
3446 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3447
3448 pollUntil = w
3449 }
3450 }
3451
3452
3453
3454
3455
3456 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) && gcController.addIdleMarkWorker() {
3457 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3458 if node != nil {
3459 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3460 gp := node.gp.ptr()
3461
3462 trace := traceAcquire()
3463 casgstatus(gp, _Gwaiting, _Grunnable)
3464 if trace.ok() {
3465 trace.GoUnpark(gp, 0)
3466 traceRelease(trace)
3467 }
3468 return gp, false, false
3469 }
3470 gcController.removeIdleMarkWorker()
3471 }
3472
3473
3474
3475
3476
3477 gp, otherReady := beforeIdle(now, pollUntil)
3478 if gp != nil {
3479 trace := traceAcquire()
3480 casgstatus(gp, _Gwaiting, _Grunnable)
3481 if trace.ok() {
3482 trace.GoUnpark(gp, 0)
3483 traceRelease(trace)
3484 }
3485 return gp, false, false
3486 }
3487 if otherReady {
3488 goto top
3489 }
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499 allpSnapshot := mp.snapshotAllp()
3500
3501
3502 idlepMaskSnapshot := idlepMask
3503 timerpMaskSnapshot := timerpMask
3504
3505
3506 lock(&sched.lock)
3507 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3508 unlock(&sched.lock)
3509 goto top
3510 }
3511 if sched.runqsize != 0 {
3512 gp := globrunqget(pp, 0)
3513 unlock(&sched.lock)
3514 return gp, false, false
3515 }
3516 if !mp.spinning && sched.needspinning.Load() == 1 {
3517
3518 mp.becomeSpinning()
3519 unlock(&sched.lock)
3520 goto top
3521 }
3522 if releasep() != pp {
3523 throw("findrunnable: wrong p")
3524 }
3525 now = pidleput(pp, now)
3526 unlock(&sched.lock)
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564 wasSpinning := mp.spinning
3565 if mp.spinning {
3566 mp.spinning = false
3567 if sched.nmspinning.Add(-1) < 0 {
3568 throw("findrunnable: negative nmspinning")
3569 }
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582 lock(&sched.lock)
3583 if sched.runqsize != 0 {
3584 pp, _ := pidlegetSpinning(0)
3585 if pp != nil {
3586 gp := globrunqget(pp, 0)
3587 if gp == nil {
3588 throw("global runq empty with non-zero runqsize")
3589 }
3590 unlock(&sched.lock)
3591 acquirep(pp)
3592 mp.becomeSpinning()
3593 return gp, false, false
3594 }
3595 }
3596 unlock(&sched.lock)
3597
3598 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3599 if pp != nil {
3600 acquirep(pp)
3601 mp.becomeSpinning()
3602 goto top
3603 }
3604
3605
3606 pp, gp := checkIdleGCNoP()
3607 if pp != nil {
3608 acquirep(pp)
3609 mp.becomeSpinning()
3610
3611
3612 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3613 trace := traceAcquire()
3614 casgstatus(gp, _Gwaiting, _Grunnable)
3615 if trace.ok() {
3616 trace.GoUnpark(gp, 0)
3617 traceRelease(trace)
3618 }
3619 return gp, false, false
3620 }
3621
3622
3623
3624
3625
3626
3627
3628 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3629 }
3630
3631
3632
3633
3634
3635 if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3636 sched.pollUntil.Store(pollUntil)
3637 if mp.p != 0 {
3638 throw("findrunnable: netpoll with p")
3639 }
3640 if mp.spinning {
3641 throw("findrunnable: netpoll with spinning")
3642 }
3643 delay := int64(-1)
3644 if pollUntil != 0 {
3645 if now == 0 {
3646 now = nanotime()
3647 }
3648 delay = pollUntil - now
3649 if delay < 0 {
3650 delay = 0
3651 }
3652 }
3653 if faketime != 0 {
3654
3655 delay = 0
3656 }
3657 list, delta := netpoll(delay)
3658
3659 now = nanotime()
3660 sched.pollUntil.Store(0)
3661 sched.lastpoll.Store(now)
3662 if faketime != 0 && list.empty() {
3663
3664
3665 stopm()
3666 goto top
3667 }
3668 lock(&sched.lock)
3669 pp, _ := pidleget(now)
3670 unlock(&sched.lock)
3671 if pp == nil {
3672 injectglist(&list)
3673 netpollAdjustWaiters(delta)
3674 } else {
3675 acquirep(pp)
3676 if !list.empty() {
3677 gp := list.pop()
3678 injectglist(&list)
3679 netpollAdjustWaiters(delta)
3680 trace := traceAcquire()
3681 casgstatus(gp, _Gwaiting, _Grunnable)
3682 if trace.ok() {
3683 trace.GoUnpark(gp, 0)
3684 traceRelease(trace)
3685 }
3686 return gp, false, false
3687 }
3688 if wasSpinning {
3689 mp.becomeSpinning()
3690 }
3691 goto top
3692 }
3693 } else if pollUntil != 0 && netpollinited() {
3694 pollerPollUntil := sched.pollUntil.Load()
3695 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3696 netpollBreak()
3697 }
3698 }
3699 stopm()
3700 goto top
3701 }
3702
3703
3704
3705
3706
3707 func pollWork() bool {
3708 if sched.runqsize != 0 {
3709 return true
3710 }
3711 p := getg().m.p.ptr()
3712 if !runqempty(p) {
3713 return true
3714 }
3715 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3716 if list, delta := netpoll(0); !list.empty() {
3717 injectglist(&list)
3718 netpollAdjustWaiters(delta)
3719 return true
3720 }
3721 }
3722 return false
3723 }
3724
3725
3726
3727
3728
3729
3730
3731 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3732 pp := getg().m.p.ptr()
3733
3734 ranTimer := false
3735
3736 const stealTries = 4
3737 for i := 0; i < stealTries; i++ {
3738 stealTimersOrRunNextG := i == stealTries-1
3739
3740 for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
3741 if sched.gcwaiting.Load() {
3742
3743 return nil, false, now, pollUntil, true
3744 }
3745 p2 := allp[enum.position()]
3746 if pp == p2 {
3747 continue
3748 }
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3764 tnow, w, ran := p2.timers.check(now)
3765 now = tnow
3766 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3767 pollUntil = w
3768 }
3769 if ran {
3770
3771
3772
3773
3774
3775
3776
3777
3778 if gp, inheritTime := runqget(pp); gp != nil {
3779 return gp, inheritTime, now, pollUntil, ranTimer
3780 }
3781 ranTimer = true
3782 }
3783 }
3784
3785
3786 if !idlepMask.read(enum.position()) {
3787 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3788 return gp, false, now, pollUntil, ranTimer
3789 }
3790 }
3791 }
3792 }
3793
3794
3795
3796
3797 return nil, false, now, pollUntil, ranTimer
3798 }
3799
3800
3801
3802
3803
3804
3805 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3806 for id, p2 := range allpSnapshot {
3807 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3808 lock(&sched.lock)
3809 pp, _ := pidlegetSpinning(0)
3810 if pp == nil {
3811
3812 unlock(&sched.lock)
3813 return nil
3814 }
3815 unlock(&sched.lock)
3816 return pp
3817 }
3818 }
3819
3820
3821 return nil
3822 }
3823
3824
3825
3826
3827 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3828 for id, p2 := range allpSnapshot {
3829 if timerpMaskSnapshot.read(uint32(id)) {
3830 w := p2.timers.wakeTime()
3831 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3832 pollUntil = w
3833 }
3834 }
3835 }
3836
3837 return pollUntil
3838 }
3839
3840
3841
3842
3843
3844 func checkIdleGCNoP() (*p, *g) {
3845
3846
3847
3848
3849
3850
3851 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3852 return nil, nil
3853 }
3854 if !gcMarkWorkAvailable(nil) {
3855 return nil, nil
3856 }
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875 lock(&sched.lock)
3876 pp, now := pidlegetSpinning(0)
3877 if pp == nil {
3878 unlock(&sched.lock)
3879 return nil, nil
3880 }
3881
3882
3883 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3884 pidleput(pp, now)
3885 unlock(&sched.lock)
3886 return nil, nil
3887 }
3888
3889 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3890 if node == nil {
3891 pidleput(pp, now)
3892 unlock(&sched.lock)
3893 gcController.removeIdleMarkWorker()
3894 return nil, nil
3895 }
3896
3897 unlock(&sched.lock)
3898
3899 return pp, node.gp.ptr()
3900 }
3901
3902
3903
3904
3905 func wakeNetPoller(when int64) {
3906 if sched.lastpoll.Load() == 0 {
3907
3908
3909
3910
3911 pollerPollUntil := sched.pollUntil.Load()
3912 if pollerPollUntil == 0 || pollerPollUntil > when {
3913 netpollBreak()
3914 }
3915 } else {
3916
3917
3918 if GOOS != "plan9" {
3919 wakep()
3920 }
3921 }
3922 }
3923
3924 func resetspinning() {
3925 gp := getg()
3926 if !gp.m.spinning {
3927 throw("resetspinning: not a spinning m")
3928 }
3929 gp.m.spinning = false
3930 nmspinning := sched.nmspinning.Add(-1)
3931 if nmspinning < 0 {
3932 throw("findrunnable: negative nmspinning")
3933 }
3934
3935
3936
3937 wakep()
3938 }
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948 func injectglist(glist *gList) {
3949 if glist.empty() {
3950 return
3951 }
3952
3953
3954
3955 head := glist.head.ptr()
3956 var tail *g
3957 qsize := 0
3958 trace := traceAcquire()
3959 for gp := head; gp != nil; gp = gp.schedlink.ptr() {
3960 tail = gp
3961 qsize++
3962 casgstatus(gp, _Gwaiting, _Grunnable)
3963 if trace.ok() {
3964 trace.GoUnpark(gp, 0)
3965 }
3966 }
3967 if trace.ok() {
3968 traceRelease(trace)
3969 }
3970
3971
3972 var q gQueue
3973 q.head.set(head)
3974 q.tail.set(tail)
3975 *glist = gList{}
3976
3977 startIdle := func(n int) {
3978 for i := 0; i < n; i++ {
3979 mp := acquirem()
3980 lock(&sched.lock)
3981
3982 pp, _ := pidlegetSpinning(0)
3983 if pp == nil {
3984 unlock(&sched.lock)
3985 releasem(mp)
3986 break
3987 }
3988
3989 startm(pp, false, true)
3990 unlock(&sched.lock)
3991 releasem(mp)
3992 }
3993 }
3994
3995 pp := getg().m.p.ptr()
3996 if pp == nil {
3997 lock(&sched.lock)
3998 globrunqputbatch(&q, int32(qsize))
3999 unlock(&sched.lock)
4000 startIdle(qsize)
4001 return
4002 }
4003
4004 npidle := int(sched.npidle.Load())
4005 var (
4006 globq gQueue
4007 n int
4008 )
4009 for n = 0; n < npidle && !q.empty(); n++ {
4010 g := q.pop()
4011 globq.pushBack(g)
4012 }
4013 if n > 0 {
4014 lock(&sched.lock)
4015 globrunqputbatch(&globq, int32(n))
4016 unlock(&sched.lock)
4017 startIdle(n)
4018 qsize -= n
4019 }
4020
4021 if !q.empty() {
4022 runqputbatch(pp, &q, qsize)
4023 }
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033
4034
4035
4036
4037
4038 wakep()
4039 }
4040
4041
4042
4043 func schedule() {
4044 mp := getg().m
4045
4046 if mp.locks != 0 {
4047 throw("schedule: holding locks")
4048 }
4049
4050 if mp.lockedg != 0 {
4051 stoplockedm()
4052 execute(mp.lockedg.ptr(), false)
4053 }
4054
4055
4056
4057 if mp.incgo {
4058 throw("schedule: in cgo")
4059 }
4060
4061 top:
4062 pp := mp.p.ptr()
4063 pp.preempt = false
4064
4065
4066
4067
4068 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
4069 throw("schedule: spinning with local work")
4070 }
4071
4072 gp, inheritTime, tryWakeP := findRunnable()
4073
4074
4075
4076
4077 mp.clearAllpSnapshot()
4078
4079 if debug.dontfreezetheworld > 0 && freezing.Load() {
4080
4081
4082
4083
4084
4085
4086
4087 lock(&deadlock)
4088 lock(&deadlock)
4089 }
4090
4091
4092
4093
4094 if mp.spinning {
4095 resetspinning()
4096 }
4097
4098 if sched.disable.user && !schedEnabled(gp) {
4099
4100
4101
4102 lock(&sched.lock)
4103 if schedEnabled(gp) {
4104
4105
4106 unlock(&sched.lock)
4107 } else {
4108 sched.disable.runnable.pushBack(gp)
4109 sched.disable.n++
4110 unlock(&sched.lock)
4111 goto top
4112 }
4113 }
4114
4115
4116
4117 if tryWakeP {
4118 wakep()
4119 }
4120 if gp.lockedm != 0 {
4121
4122
4123 startlockedm(gp)
4124 goto top
4125 }
4126
4127 execute(gp, inheritTime)
4128 }
4129
4130
4131
4132
4133
4134
4135
4136
4137 func dropg() {
4138 gp := getg()
4139
4140 setMNoWB(&gp.m.curg.m, nil)
4141 setGNoWB(&gp.m.curg, nil)
4142 }
4143
4144 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
4145 unlock((*mutex)(lock))
4146 return true
4147 }
4148
4149
4150 func park_m(gp *g) {
4151 mp := getg().m
4152
4153 trace := traceAcquire()
4154
4155
4156
4157
4158
4159 sg := gp.syncGroup
4160 if sg != nil {
4161 sg.incActive()
4162 }
4163
4164 if trace.ok() {
4165
4166
4167
4168 trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
4169 }
4170
4171
4172 casgstatus(gp, _Grunning, _Gwaiting)
4173 if trace.ok() {
4174 traceRelease(trace)
4175 }
4176
4177 dropg()
4178
4179 if fn := mp.waitunlockf; fn != nil {
4180 ok := fn(gp, mp.waitlock)
4181 mp.waitunlockf = nil
4182 mp.waitlock = nil
4183 if !ok {
4184 trace := traceAcquire()
4185 casgstatus(gp, _Gwaiting, _Grunnable)
4186 if sg != nil {
4187 sg.decActive()
4188 }
4189 if trace.ok() {
4190 trace.GoUnpark(gp, 2)
4191 traceRelease(trace)
4192 }
4193 execute(gp, true)
4194 }
4195 }
4196
4197 if sg != nil {
4198 sg.decActive()
4199 }
4200
4201 schedule()
4202 }
4203
4204 func goschedImpl(gp *g, preempted bool) {
4205 trace := traceAcquire()
4206 status := readgstatus(gp)
4207 if status&^_Gscan != _Grunning {
4208 dumpgstatus(gp)
4209 throw("bad g status")
4210 }
4211 if trace.ok() {
4212
4213
4214
4215 if preempted {
4216 trace.GoPreempt()
4217 } else {
4218 trace.GoSched()
4219 }
4220 }
4221 casgstatus(gp, _Grunning, _Grunnable)
4222 if trace.ok() {
4223 traceRelease(trace)
4224 }
4225
4226 dropg()
4227 lock(&sched.lock)
4228 globrunqput(gp)
4229 unlock(&sched.lock)
4230
4231 if mainStarted {
4232 wakep()
4233 }
4234
4235 schedule()
4236 }
4237
4238
4239 func gosched_m(gp *g) {
4240 goschedImpl(gp, false)
4241 }
4242
4243
4244 func goschedguarded_m(gp *g) {
4245 if !canPreemptM(gp.m) {
4246 gogo(&gp.sched)
4247 }
4248 goschedImpl(gp, false)
4249 }
4250
4251 func gopreempt_m(gp *g) {
4252 goschedImpl(gp, true)
4253 }
4254
4255
4256
4257
4258 func preemptPark(gp *g) {
4259 status := readgstatus(gp)
4260 if status&^_Gscan != _Grunning {
4261 dumpgstatus(gp)
4262 throw("bad g status")
4263 }
4264
4265 if gp.asyncSafePoint {
4266
4267
4268
4269 f := findfunc(gp.sched.pc)
4270 if !f.valid() {
4271 throw("preempt at unknown pc")
4272 }
4273 if f.flag&abi.FuncFlagSPWrite != 0 {
4274 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
4275 throw("preempt SPWRITE")
4276 }
4277 }
4278
4279
4280
4281
4282
4283
4284
4285 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4286 dropg()
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299
4300
4301
4302
4303 trace := traceAcquire()
4304 if trace.ok() {
4305 trace.GoPark(traceBlockPreempted, 0)
4306 }
4307 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4308 if trace.ok() {
4309 traceRelease(trace)
4310 }
4311 schedule()
4312 }
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325
4326
4327
4328 func goyield() {
4329 checkTimeouts()
4330 mcall(goyield_m)
4331 }
4332
4333 func goyield_m(gp *g) {
4334 trace := traceAcquire()
4335 pp := gp.m.p.ptr()
4336 if trace.ok() {
4337
4338
4339
4340 trace.GoPreempt()
4341 }
4342 casgstatus(gp, _Grunning, _Grunnable)
4343 if trace.ok() {
4344 traceRelease(trace)
4345 }
4346 dropg()
4347 runqput(pp, gp, false)
4348 schedule()
4349 }
4350
4351
4352 func goexit1() {
4353 if raceenabled {
4354 if gp := getg(); gp.syncGroup != nil {
4355 racereleasemergeg(gp, gp.syncGroup.raceaddr())
4356 }
4357 racegoend()
4358 }
4359 trace := traceAcquire()
4360 if trace.ok() {
4361 trace.GoEnd()
4362 traceRelease(trace)
4363 }
4364 mcall(goexit0)
4365 }
4366
4367
4368 func goexit0(gp *g) {
4369 gdestroy(gp)
4370 schedule()
4371 }
4372
4373 func gdestroy(gp *g) {
4374 mp := getg().m
4375 pp := mp.p.ptr()
4376
4377 casgstatus(gp, _Grunning, _Gdead)
4378 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4379 if isSystemGoroutine(gp, false) {
4380 sched.ngsys.Add(-1)
4381 }
4382 gp.m = nil
4383 locked := gp.lockedm != 0
4384 gp.lockedm = 0
4385 mp.lockedg = 0
4386 gp.preemptStop = false
4387 gp.paniconfault = false
4388 gp._defer = nil
4389 gp._panic = nil
4390 gp.writebuf = nil
4391 gp.waitreason = waitReasonZero
4392 gp.param = nil
4393 gp.labels = nil
4394 gp.timer = nil
4395 gp.syncGroup = nil
4396
4397 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4398
4399
4400
4401 assistWorkPerByte := gcController.assistWorkPerByte.Load()
4402 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4403 gcController.bgScanCredit.Add(scanCredit)
4404 gp.gcAssistBytes = 0
4405 }
4406
4407 dropg()
4408
4409 if GOARCH == "wasm" {
4410 gfput(pp, gp)
4411 return
4412 }
4413
4414 if locked && mp.lockedInt != 0 {
4415 print("runtime: mp.lockedInt = ", mp.lockedInt, "\n")
4416 if mp.isextra {
4417 throw("runtime.Goexit called in a thread that was not created by the Go runtime")
4418 }
4419 throw("exited a goroutine internally locked to the OS thread")
4420 }
4421 gfput(pp, gp)
4422 if locked {
4423
4424
4425
4426
4427
4428
4429 if GOOS != "plan9" {
4430 gogo(&mp.g0.sched)
4431 } else {
4432
4433
4434 mp.lockedExt = 0
4435 }
4436 }
4437 }
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447 func save(pc, sp, bp uintptr) {
4448 gp := getg()
4449
4450 if gp == gp.m.g0 || gp == gp.m.gsignal {
4451
4452
4453
4454
4455
4456 throw("save on system g not allowed")
4457 }
4458
4459 gp.sched.pc = pc
4460 gp.sched.sp = sp
4461 gp.sched.lr = 0
4462 gp.sched.ret = 0
4463 gp.sched.bp = bp
4464
4465
4466
4467 if gp.sched.ctxt != nil {
4468 badctxt()
4469 }
4470 }
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480
4481
4482
4483
4484
4485
4486
4487
4488
4489
4490
4491
4492
4493
4494
4495
4496 func reentersyscall(pc, sp, bp uintptr) {
4497 trace := traceAcquire()
4498 gp := getg()
4499
4500
4501
4502 gp.m.locks++
4503
4504
4505
4506
4507
4508 gp.stackguard0 = stackPreempt
4509 gp.throwsplit = true
4510
4511
4512 save(pc, sp, bp)
4513 gp.syscallsp = sp
4514 gp.syscallpc = pc
4515 gp.syscallbp = bp
4516 casgstatus(gp, _Grunning, _Gsyscall)
4517 if staticLockRanking {
4518
4519
4520 save(pc, sp, bp)
4521 }
4522 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4523 systemstack(func() {
4524 print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4525 throw("entersyscall")
4526 })
4527 }
4528 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4529 systemstack(func() {
4530 print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4531 throw("entersyscall")
4532 })
4533 }
4534
4535 if trace.ok() {
4536 systemstack(func() {
4537 trace.GoSysCall()
4538 traceRelease(trace)
4539 })
4540
4541
4542
4543 save(pc, sp, bp)
4544 }
4545
4546 if sched.sysmonwait.Load() {
4547 systemstack(entersyscall_sysmon)
4548 save(pc, sp, bp)
4549 }
4550
4551 if gp.m.p.ptr().runSafePointFn != 0 {
4552
4553 systemstack(runSafePointFn)
4554 save(pc, sp, bp)
4555 }
4556
4557 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4558 pp := gp.m.p.ptr()
4559 pp.m = 0
4560 gp.m.oldp.set(pp)
4561 gp.m.p = 0
4562 atomic.Store(&pp.status, _Psyscall)
4563 if sched.gcwaiting.Load() {
4564 systemstack(entersyscall_gcwait)
4565 save(pc, sp, bp)
4566 }
4567
4568 gp.m.locks--
4569 }
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585 func entersyscall() {
4586
4587
4588
4589
4590 fp := getcallerfp()
4591 reentersyscall(sys.GetCallerPC(), sys.GetCallerSP(), fp)
4592 }
4593
4594 func entersyscall_sysmon() {
4595 lock(&sched.lock)
4596 if sched.sysmonwait.Load() {
4597 sched.sysmonwait.Store(false)
4598 notewakeup(&sched.sysmonnote)
4599 }
4600 unlock(&sched.lock)
4601 }
4602
4603 func entersyscall_gcwait() {
4604 gp := getg()
4605 pp := gp.m.oldp.ptr()
4606
4607 lock(&sched.lock)
4608 trace := traceAcquire()
4609 if sched.stopwait > 0 && atomic.Cas(&pp.status, _Psyscall, _Pgcstop) {
4610 if trace.ok() {
4611
4612
4613
4614
4615
4616
4617
4618
4619
4620 trace.ProcSteal(pp, true)
4621 traceRelease(trace)
4622 }
4623 pp.gcStopTime = nanotime()
4624 pp.syscalltick++
4625 if sched.stopwait--; sched.stopwait == 0 {
4626 notewakeup(&sched.stopnote)
4627 }
4628 } else if trace.ok() {
4629 traceRelease(trace)
4630 }
4631 unlock(&sched.lock)
4632 }
4633
4634
4635
4636
4637
4638
4639
4640
4641
4642
4643
4644
4645
4646 func entersyscallblock() {
4647 gp := getg()
4648
4649 gp.m.locks++
4650 gp.throwsplit = true
4651 gp.stackguard0 = stackPreempt
4652 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4653 gp.m.p.ptr().syscalltick++
4654
4655
4656 pc := sys.GetCallerPC()
4657 sp := sys.GetCallerSP()
4658 bp := getcallerfp()
4659 save(pc, sp, bp)
4660 gp.syscallsp = gp.sched.sp
4661 gp.syscallpc = gp.sched.pc
4662 gp.syscallbp = gp.sched.bp
4663 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4664 sp1 := sp
4665 sp2 := gp.sched.sp
4666 sp3 := gp.syscallsp
4667 systemstack(func() {
4668 print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4669 throw("entersyscallblock")
4670 })
4671 }
4672 casgstatus(gp, _Grunning, _Gsyscall)
4673 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4674 systemstack(func() {
4675 print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4676 throw("entersyscallblock")
4677 })
4678 }
4679 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4680 systemstack(func() {
4681 print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4682 throw("entersyscallblock")
4683 })
4684 }
4685
4686 systemstack(entersyscallblock_handoff)
4687
4688
4689 save(sys.GetCallerPC(), sys.GetCallerSP(), getcallerfp())
4690
4691 gp.m.locks--
4692 }
4693
4694 func entersyscallblock_handoff() {
4695 trace := traceAcquire()
4696 if trace.ok() {
4697 trace.GoSysCall()
4698 traceRelease(trace)
4699 }
4700 handoffp(releasep())
4701 }
4702
4703
4704
4705
4706
4707
4708
4709
4710
4711
4712
4713
4714
4715
4716
4717
4718
4719
4720
4721
4722
4723 func exitsyscall() {
4724 gp := getg()
4725
4726 gp.m.locks++
4727 if sys.GetCallerSP() > gp.syscallsp {
4728 throw("exitsyscall: syscall frame is no longer valid")
4729 }
4730
4731 gp.waitsince = 0
4732 oldp := gp.m.oldp.ptr()
4733 gp.m.oldp = 0
4734 if exitsyscallfast(oldp) {
4735
4736
4737 if goroutineProfile.active {
4738
4739
4740
4741 systemstack(func() {
4742 tryRecordGoroutineProfileWB(gp)
4743 })
4744 }
4745 trace := traceAcquire()
4746 if trace.ok() {
4747 lostP := oldp != gp.m.p.ptr() || gp.m.syscalltick != gp.m.p.ptr().syscalltick
4748 systemstack(func() {
4749
4750
4751
4752
4753 trace.GoSysExit(lostP)
4754 if lostP {
4755
4756
4757
4758
4759 trace.GoStart()
4760 }
4761 })
4762 }
4763
4764 gp.m.p.ptr().syscalltick++
4765
4766 casgstatus(gp, _Gsyscall, _Grunning)
4767 if trace.ok() {
4768 traceRelease(trace)
4769 }
4770
4771
4772
4773 gp.syscallsp = 0
4774 gp.m.locks--
4775 if gp.preempt {
4776
4777 gp.stackguard0 = stackPreempt
4778 } else {
4779
4780 gp.stackguard0 = gp.stack.lo + stackGuard
4781 }
4782 gp.throwsplit = false
4783
4784 if sched.disable.user && !schedEnabled(gp) {
4785
4786 Gosched()
4787 }
4788
4789 return
4790 }
4791
4792 gp.m.locks--
4793
4794
4795 mcall(exitsyscall0)
4796
4797
4798
4799
4800
4801
4802
4803 gp.syscallsp = 0
4804 gp.m.p.ptr().syscalltick++
4805 gp.throwsplit = false
4806 }
4807
4808
4809 func exitsyscallfast(oldp *p) bool {
4810
4811 if sched.stopwait == freezeStopWait {
4812 return false
4813 }
4814
4815
4816 trace := traceAcquire()
4817 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
4818
4819 wirep(oldp)
4820 exitsyscallfast_reacquired(trace)
4821 if trace.ok() {
4822 traceRelease(trace)
4823 }
4824 return true
4825 }
4826 if trace.ok() {
4827 traceRelease(trace)
4828 }
4829
4830
4831 if sched.pidle != 0 {
4832 var ok bool
4833 systemstack(func() {
4834 ok = exitsyscallfast_pidle()
4835 })
4836 if ok {
4837 return true
4838 }
4839 }
4840 return false
4841 }
4842
4843
4844
4845
4846
4847
4848 func exitsyscallfast_reacquired(trace traceLocker) {
4849 gp := getg()
4850 if gp.m.syscalltick != gp.m.p.ptr().syscalltick {
4851 if trace.ok() {
4852
4853
4854
4855 systemstack(func() {
4856
4857
4858 trace.ProcSteal(gp.m.p.ptr(), true)
4859 trace.ProcStart()
4860 })
4861 }
4862 gp.m.p.ptr().syscalltick++
4863 }
4864 }
4865
4866 func exitsyscallfast_pidle() bool {
4867 lock(&sched.lock)
4868 pp, _ := pidleget(0)
4869 if pp != nil && sched.sysmonwait.Load() {
4870 sched.sysmonwait.Store(false)
4871 notewakeup(&sched.sysmonnote)
4872 }
4873 unlock(&sched.lock)
4874 if pp != nil {
4875 acquirep(pp)
4876 return true
4877 }
4878 return false
4879 }
4880
4881
4882
4883
4884
4885
4886
4887 func exitsyscall0(gp *g) {
4888 var trace traceLocker
4889 traceExitingSyscall()
4890 trace = traceAcquire()
4891 casgstatus(gp, _Gsyscall, _Grunnable)
4892 traceExitedSyscall()
4893 if trace.ok() {
4894
4895
4896
4897
4898 trace.GoSysExit(true)
4899 traceRelease(trace)
4900 }
4901 dropg()
4902 lock(&sched.lock)
4903 var pp *p
4904 if schedEnabled(gp) {
4905 pp, _ = pidleget(0)
4906 }
4907 var locked bool
4908 if pp == nil {
4909 globrunqput(gp)
4910
4911
4912
4913
4914
4915
4916 locked = gp.lockedm != 0
4917 } else if sched.sysmonwait.Load() {
4918 sched.sysmonwait.Store(false)
4919 notewakeup(&sched.sysmonnote)
4920 }
4921 unlock(&sched.lock)
4922 if pp != nil {
4923 acquirep(pp)
4924 execute(gp, false)
4925 }
4926 if locked {
4927
4928
4929
4930
4931 stoplockedm()
4932 execute(gp, false)
4933 }
4934 stopm()
4935 schedule()
4936 }
4937
4938
4939
4940
4941
4942
4943
4944
4945
4946
4947
4948
4949
4950 func syscall_runtime_BeforeFork() {
4951 gp := getg().m.curg
4952
4953
4954
4955
4956 gp.m.locks++
4957 sigsave(&gp.m.sigmask)
4958 sigblock(false)
4959
4960
4961
4962
4963
4964 gp.stackguard0 = stackFork
4965 }
4966
4967
4968
4969
4970
4971
4972
4973
4974
4975
4976
4977
4978
4979 func syscall_runtime_AfterFork() {
4980 gp := getg().m.curg
4981
4982
4983 gp.stackguard0 = gp.stack.lo + stackGuard
4984
4985 msigrestore(gp.m.sigmask)
4986
4987 gp.m.locks--
4988 }
4989
4990
4991
4992 var inForkedChild bool
4993
4994
4995
4996
4997
4998
4999
5000
5001
5002
5003
5004
5005
5006
5007
5008
5009
5010
5011
5012
5013 func syscall_runtime_AfterForkInChild() {
5014
5015
5016
5017
5018 inForkedChild = true
5019
5020 clearSignalHandlers()
5021
5022
5023
5024 msigrestore(getg().m.sigmask)
5025
5026 inForkedChild = false
5027 }
5028
5029
5030
5031
5032 var pendingPreemptSignals atomic.Int32
5033
5034
5035
5036
5037 func syscall_runtime_BeforeExec() {
5038
5039 execLock.lock()
5040
5041
5042
5043 if GOOS == "darwin" || GOOS == "ios" {
5044 for pendingPreemptSignals.Load() > 0 {
5045 osyield()
5046 }
5047 }
5048 }
5049
5050
5051
5052
5053 func syscall_runtime_AfterExec() {
5054 execLock.unlock()
5055 }
5056
5057
5058 func malg(stacksize int32) *g {
5059 newg := new(g)
5060 if stacksize >= 0 {
5061 stacksize = round2(stackSystem + stacksize)
5062 systemstack(func() {
5063 newg.stack = stackalloc(uint32(stacksize))
5064 })
5065 newg.stackguard0 = newg.stack.lo + stackGuard
5066 newg.stackguard1 = ^uintptr(0)
5067
5068
5069 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
5070 }
5071 return newg
5072 }
5073
5074
5075
5076
5077 func newproc(fn *funcval) {
5078 gp := getg()
5079 pc := sys.GetCallerPC()
5080 systemstack(func() {
5081 newg := newproc1(fn, gp, pc, false, waitReasonZero)
5082
5083 pp := getg().m.p.ptr()
5084 runqput(pp, newg, true)
5085
5086 if mainStarted {
5087 wakep()
5088 }
5089 })
5090 }
5091
5092
5093
5094
5095 func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
5096 if fn == nil {
5097 fatal("go of nil func value")
5098 }
5099
5100 mp := acquirem()
5101 pp := mp.p.ptr()
5102 newg := gfget(pp)
5103 if newg == nil {
5104 newg = malg(stackMin)
5105 casgstatus(newg, _Gidle, _Gdead)
5106 allgadd(newg)
5107 }
5108 if newg.stack.hi == 0 {
5109 throw("newproc1: newg missing stack")
5110 }
5111
5112 if readgstatus(newg) != _Gdead {
5113 throw("newproc1: new g is not Gdead")
5114 }
5115
5116 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
5117 totalSize = alignUp(totalSize, sys.StackAlign)
5118 sp := newg.stack.hi - totalSize
5119 if usesLR {
5120
5121 *(*uintptr)(unsafe.Pointer(sp)) = 0
5122 prepGoExitFrame(sp)
5123 }
5124 if GOARCH == "arm64" {
5125
5126 *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
5127 }
5128
5129 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
5130 newg.sched.sp = sp
5131 newg.stktopsp = sp
5132 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
5133 newg.sched.g = guintptr(unsafe.Pointer(newg))
5134 gostartcallfn(&newg.sched, fn)
5135 newg.parentGoid = callergp.goid
5136 newg.gopc = callerpc
5137 newg.ancestors = saveAncestors(callergp)
5138 newg.startpc = fn.fn
5139 if isSystemGoroutine(newg, false) {
5140 sched.ngsys.Add(1)
5141 } else {
5142
5143 newg.syncGroup = callergp.syncGroup
5144 if mp.curg != nil {
5145 newg.labels = mp.curg.labels
5146 }
5147 if goroutineProfile.active {
5148
5149
5150
5151
5152
5153 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
5154 }
5155 }
5156
5157 newg.trackingSeq = uint8(cheaprand())
5158 if newg.trackingSeq%gTrackingPeriod == 0 {
5159 newg.tracking = true
5160 }
5161 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
5162
5163
5164 trace := traceAcquire()
5165 var status uint32 = _Grunnable
5166 if parked {
5167 status = _Gwaiting
5168 newg.waitreason = waitreason
5169 }
5170 if pp.goidcache == pp.goidcacheend {
5171
5172
5173
5174 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
5175 pp.goidcache -= _GoidCacheBatch - 1
5176 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
5177 }
5178 newg.goid = pp.goidcache
5179 casgstatus(newg, _Gdead, status)
5180 pp.goidcache++
5181 newg.trace.reset()
5182 if trace.ok() {
5183 trace.GoCreate(newg, newg.startpc, parked)
5184 traceRelease(trace)
5185 }
5186
5187
5188 if raceenabled {
5189 newg.racectx = racegostart(callerpc)
5190 newg.raceignore = 0
5191 if newg.labels != nil {
5192
5193
5194 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
5195 }
5196 }
5197 releasem(mp)
5198
5199 return newg
5200 }
5201
5202
5203
5204
5205 func saveAncestors(callergp *g) *[]ancestorInfo {
5206
5207 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
5208 return nil
5209 }
5210 var callerAncestors []ancestorInfo
5211 if callergp.ancestors != nil {
5212 callerAncestors = *callergp.ancestors
5213 }
5214 n := int32(len(callerAncestors)) + 1
5215 if n > debug.tracebackancestors {
5216 n = debug.tracebackancestors
5217 }
5218 ancestors := make([]ancestorInfo, n)
5219 copy(ancestors[1:], callerAncestors)
5220
5221 var pcs [tracebackInnerFrames]uintptr
5222 npcs := gcallers(callergp, 0, pcs[:])
5223 ipcs := make([]uintptr, npcs)
5224 copy(ipcs, pcs[:])
5225 ancestors[0] = ancestorInfo{
5226 pcs: ipcs,
5227 goid: callergp.goid,
5228 gopc: callergp.gopc,
5229 }
5230
5231 ancestorsp := new([]ancestorInfo)
5232 *ancestorsp = ancestors
5233 return ancestorsp
5234 }
5235
5236
5237
5238 func gfput(pp *p, gp *g) {
5239 if readgstatus(gp) != _Gdead {
5240 throw("gfput: bad status (not Gdead)")
5241 }
5242
5243 stksize := gp.stack.hi - gp.stack.lo
5244
5245 if stksize != uintptr(startingStackSize) {
5246
5247 stackfree(gp.stack)
5248 gp.stack.lo = 0
5249 gp.stack.hi = 0
5250 gp.stackguard0 = 0
5251 }
5252
5253 pp.gFree.push(gp)
5254 pp.gFree.n++
5255 if pp.gFree.n >= 64 {
5256 var (
5257 inc int32
5258 stackQ gQueue
5259 noStackQ gQueue
5260 )
5261 for pp.gFree.n >= 32 {
5262 gp := pp.gFree.pop()
5263 pp.gFree.n--
5264 if gp.stack.lo == 0 {
5265 noStackQ.push(gp)
5266 } else {
5267 stackQ.push(gp)
5268 }
5269 inc++
5270 }
5271 lock(&sched.gFree.lock)
5272 sched.gFree.noStack.pushAll(noStackQ)
5273 sched.gFree.stack.pushAll(stackQ)
5274 sched.gFree.n += inc
5275 unlock(&sched.gFree.lock)
5276 }
5277 }
5278
5279
5280
5281 func gfget(pp *p) *g {
5282 retry:
5283 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
5284 lock(&sched.gFree.lock)
5285
5286 for pp.gFree.n < 32 {
5287
5288 gp := sched.gFree.stack.pop()
5289 if gp == nil {
5290 gp = sched.gFree.noStack.pop()
5291 if gp == nil {
5292 break
5293 }
5294 }
5295 sched.gFree.n--
5296 pp.gFree.push(gp)
5297 pp.gFree.n++
5298 }
5299 unlock(&sched.gFree.lock)
5300 goto retry
5301 }
5302 gp := pp.gFree.pop()
5303 if gp == nil {
5304 return nil
5305 }
5306 pp.gFree.n--
5307 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5308
5309
5310
5311 systemstack(func() {
5312 stackfree(gp.stack)
5313 gp.stack.lo = 0
5314 gp.stack.hi = 0
5315 gp.stackguard0 = 0
5316 })
5317 }
5318 if gp.stack.lo == 0 {
5319
5320 systemstack(func() {
5321 gp.stack = stackalloc(startingStackSize)
5322 })
5323 gp.stackguard0 = gp.stack.lo + stackGuard
5324 } else {
5325 if raceenabled {
5326 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5327 }
5328 if msanenabled {
5329 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5330 }
5331 if asanenabled {
5332 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5333 }
5334 }
5335 return gp
5336 }
5337
5338
5339 func gfpurge(pp *p) {
5340 var (
5341 inc int32
5342 stackQ gQueue
5343 noStackQ gQueue
5344 )
5345 for !pp.gFree.empty() {
5346 gp := pp.gFree.pop()
5347 pp.gFree.n--
5348 if gp.stack.lo == 0 {
5349 noStackQ.push(gp)
5350 } else {
5351 stackQ.push(gp)
5352 }
5353 inc++
5354 }
5355 lock(&sched.gFree.lock)
5356 sched.gFree.noStack.pushAll(noStackQ)
5357 sched.gFree.stack.pushAll(stackQ)
5358 sched.gFree.n += inc
5359 unlock(&sched.gFree.lock)
5360 }
5361
5362
5363 func Breakpoint() {
5364 breakpoint()
5365 }
5366
5367
5368
5369
5370
5371
5372 func dolockOSThread() {
5373 if GOARCH == "wasm" {
5374 return
5375 }
5376 gp := getg()
5377 gp.m.lockedg.set(gp)
5378 gp.lockedm.set(gp.m)
5379 }
5380
5381
5382
5383
5384
5385
5386
5387
5388
5389
5390
5391
5392
5393
5394
5395
5396
5397 func LockOSThread() {
5398 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
5399
5400
5401
5402 startTemplateThread()
5403 }
5404 gp := getg()
5405 gp.m.lockedExt++
5406 if gp.m.lockedExt == 0 {
5407 gp.m.lockedExt--
5408 panic("LockOSThread nesting overflow")
5409 }
5410 dolockOSThread()
5411 }
5412
5413
5414 func lockOSThread() {
5415 getg().m.lockedInt++
5416 dolockOSThread()
5417 }
5418
5419
5420
5421
5422
5423
5424 func dounlockOSThread() {
5425 if GOARCH == "wasm" {
5426 return
5427 }
5428 gp := getg()
5429 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5430 return
5431 }
5432 gp.m.lockedg = 0
5433 gp.lockedm = 0
5434 }
5435
5436
5437
5438
5439
5440
5441
5442
5443
5444
5445
5446
5447
5448
5449
5450 func UnlockOSThread() {
5451 gp := getg()
5452 if gp.m.lockedExt == 0 {
5453 return
5454 }
5455 gp.m.lockedExt--
5456 dounlockOSThread()
5457 }
5458
5459
5460 func unlockOSThread() {
5461 gp := getg()
5462 if gp.m.lockedInt == 0 {
5463 systemstack(badunlockosthread)
5464 }
5465 gp.m.lockedInt--
5466 dounlockOSThread()
5467 }
5468
5469 func badunlockosthread() {
5470 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
5471 }
5472
5473 func gcount() int32 {
5474 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - sched.ngsys.Load()
5475 for _, pp := range allp {
5476 n -= pp.gFree.n
5477 }
5478
5479
5480
5481 if n < 1 {
5482 n = 1
5483 }
5484 return n
5485 }
5486
5487 func mcount() int32 {
5488 return int32(sched.mnext - sched.nmfreed)
5489 }
5490
5491 var prof struct {
5492 signalLock atomic.Uint32
5493
5494
5495
5496 hz atomic.Int32
5497 }
5498
5499 func _System() { _System() }
5500 func _ExternalCode() { _ExternalCode() }
5501 func _LostExternalCode() { _LostExternalCode() }
5502 func _GC() { _GC() }
5503 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
5504 func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
5505 func _VDSO() { _VDSO() }
5506
5507
5508
5509
5510
5511 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5512 if prof.hz.Load() == 0 {
5513 return
5514 }
5515
5516
5517
5518
5519 if mp != nil && mp.profilehz == 0 {
5520 return
5521 }
5522
5523
5524
5525
5526
5527
5528
5529 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
5530 if f := findfunc(pc); f.valid() {
5531 if stringslite.HasPrefix(funcname(f), "internal/runtime/atomic") {
5532 cpuprof.lostAtomic++
5533 return
5534 }
5535 }
5536 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
5537
5538
5539
5540 cpuprof.lostAtomic++
5541 return
5542 }
5543 }
5544
5545
5546
5547
5548
5549
5550
5551 getg().m.mallocing++
5552
5553 var u unwinder
5554 var stk [maxCPUProfStack]uintptr
5555 n := 0
5556 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
5557 cgoOff := 0
5558
5559
5560
5561
5562
5563 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
5564 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
5565 cgoOff++
5566 }
5567 n += copy(stk[:], mp.cgoCallers[:cgoOff])
5568 mp.cgoCallers[0] = 0
5569 }
5570
5571
5572 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
5573 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
5574
5575
5576 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
5577 } else if mp != nil && mp.vdsoSP != 0 {
5578
5579
5580 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5581 } else {
5582 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5583 }
5584 n += tracebackPCs(&u, 0, stk[n:])
5585
5586 if n <= 0 {
5587
5588
5589 n = 2
5590 if inVDSOPage(pc) {
5591 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
5592 } else if pc > firstmoduledata.etext {
5593
5594 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
5595 }
5596 stk[0] = pc
5597 if mp.preemptoff != "" {
5598 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
5599 } else {
5600 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
5601 }
5602 }
5603
5604 if prof.hz.Load() != 0 {
5605
5606
5607
5608 var tagPtr *unsafe.Pointer
5609 if gp != nil && gp.m != nil && gp.m.curg != nil {
5610 tagPtr = &gp.m.curg.labels
5611 }
5612 cpuprof.add(tagPtr, stk[:n])
5613
5614 gprof := gp
5615 var mp *m
5616 var pp *p
5617 if gp != nil && gp.m != nil {
5618 if gp.m.curg != nil {
5619 gprof = gp.m.curg
5620 }
5621 mp = gp.m
5622 pp = gp.m.p.ptr()
5623 }
5624 traceCPUSample(gprof, mp, pp, stk[:n])
5625 }
5626 getg().m.mallocing--
5627 }
5628
5629
5630
5631 func setcpuprofilerate(hz int32) {
5632
5633 if hz < 0 {
5634 hz = 0
5635 }
5636
5637
5638
5639 gp := getg()
5640 gp.m.locks++
5641
5642
5643
5644
5645 setThreadCPUProfiler(0)
5646
5647 for !prof.signalLock.CompareAndSwap(0, 1) {
5648 osyield()
5649 }
5650 if prof.hz.Load() != hz {
5651 setProcessCPUProfiler(hz)
5652 prof.hz.Store(hz)
5653 }
5654 prof.signalLock.Store(0)
5655
5656 lock(&sched.lock)
5657 sched.profilehz = hz
5658 unlock(&sched.lock)
5659
5660 if hz != 0 {
5661 setThreadCPUProfiler(hz)
5662 }
5663
5664 gp.m.locks--
5665 }
5666
5667
5668
5669 func (pp *p) init(id int32) {
5670 pp.id = id
5671 pp.status = _Pgcstop
5672 pp.sudogcache = pp.sudogbuf[:0]
5673 pp.deferpool = pp.deferpoolbuf[:0]
5674 pp.wbBuf.reset()
5675 if pp.mcache == nil {
5676 if id == 0 {
5677 if mcache0 == nil {
5678 throw("missing mcache?")
5679 }
5680
5681
5682 pp.mcache = mcache0
5683 } else {
5684 pp.mcache = allocmcache()
5685 }
5686 }
5687 if raceenabled && pp.raceprocctx == 0 {
5688 if id == 0 {
5689 pp.raceprocctx = raceprocctx0
5690 raceprocctx0 = 0
5691 } else {
5692 pp.raceprocctx = raceproccreate()
5693 }
5694 }
5695 lockInit(&pp.timers.mu, lockRankTimers)
5696
5697
5698
5699 timerpMask.set(id)
5700
5701
5702 idlepMask.clear(id)
5703 }
5704
5705
5706
5707
5708
5709 func (pp *p) destroy() {
5710 assertLockHeld(&sched.lock)
5711 assertWorldStopped()
5712
5713
5714 for pp.runqhead != pp.runqtail {
5715
5716 pp.runqtail--
5717 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5718
5719 globrunqputhead(gp)
5720 }
5721 if pp.runnext != 0 {
5722 globrunqputhead(pp.runnext.ptr())
5723 pp.runnext = 0
5724 }
5725
5726
5727 getg().m.p.ptr().timers.take(&pp.timers)
5728
5729
5730 if gcphase != _GCoff {
5731 wbBufFlush1(pp)
5732 pp.gcw.dispose()
5733 }
5734 for i := range pp.sudogbuf {
5735 pp.sudogbuf[i] = nil
5736 }
5737 pp.sudogcache = pp.sudogbuf[:0]
5738 pp.pinnerCache = nil
5739 for j := range pp.deferpoolbuf {
5740 pp.deferpoolbuf[j] = nil
5741 }
5742 pp.deferpool = pp.deferpoolbuf[:0]
5743 systemstack(func() {
5744 for i := 0; i < pp.mspancache.len; i++ {
5745
5746 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
5747 }
5748 pp.mspancache.len = 0
5749 lock(&mheap_.lock)
5750 pp.pcache.flush(&mheap_.pages)
5751 unlock(&mheap_.lock)
5752 })
5753 freemcache(pp.mcache)
5754 pp.mcache = nil
5755 gfpurge(pp)
5756 if raceenabled {
5757 if pp.timers.raceCtx != 0 {
5758
5759
5760
5761
5762
5763 mp := getg().m
5764 phold := mp.p.ptr()
5765 mp.p.set(pp)
5766
5767 racectxend(pp.timers.raceCtx)
5768 pp.timers.raceCtx = 0
5769
5770 mp.p.set(phold)
5771 }
5772 raceprocdestroy(pp.raceprocctx)
5773 pp.raceprocctx = 0
5774 }
5775 pp.gcAssistTime = 0
5776 pp.status = _Pdead
5777 }
5778
5779
5780
5781
5782
5783
5784
5785
5786
5787 func procresize(nprocs int32) *p {
5788 assertLockHeld(&sched.lock)
5789 assertWorldStopped()
5790
5791 old := gomaxprocs
5792 if old < 0 || nprocs <= 0 {
5793 throw("procresize: invalid arg")
5794 }
5795 trace := traceAcquire()
5796 if trace.ok() {
5797 trace.Gomaxprocs(nprocs)
5798 traceRelease(trace)
5799 }
5800
5801
5802 now := nanotime()
5803 if sched.procresizetime != 0 {
5804 sched.totaltime += int64(old) * (now - sched.procresizetime)
5805 }
5806 sched.procresizetime = now
5807
5808 maskWords := (nprocs + 31) / 32
5809
5810
5811 if nprocs > int32(len(allp)) {
5812
5813
5814 lock(&allpLock)
5815 if nprocs <= int32(cap(allp)) {
5816 allp = allp[:nprocs]
5817 } else {
5818 nallp := make([]*p, nprocs)
5819
5820
5821 copy(nallp, allp[:cap(allp)])
5822 allp = nallp
5823 }
5824
5825 if maskWords <= int32(cap(idlepMask)) {
5826 idlepMask = idlepMask[:maskWords]
5827 timerpMask = timerpMask[:maskWords]
5828 } else {
5829 nidlepMask := make([]uint32, maskWords)
5830
5831 copy(nidlepMask, idlepMask)
5832 idlepMask = nidlepMask
5833
5834 ntimerpMask := make([]uint32, maskWords)
5835 copy(ntimerpMask, timerpMask)
5836 timerpMask = ntimerpMask
5837 }
5838 unlock(&allpLock)
5839 }
5840
5841
5842 for i := old; i < nprocs; i++ {
5843 pp := allp[i]
5844 if pp == nil {
5845 pp = new(p)
5846 }
5847 pp.init(i)
5848 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
5849 }
5850
5851 gp := getg()
5852 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
5853
5854 gp.m.p.ptr().status = _Prunning
5855 gp.m.p.ptr().mcache.prepareForSweep()
5856 } else {
5857
5858
5859
5860
5861
5862 if gp.m.p != 0 {
5863 trace := traceAcquire()
5864 if trace.ok() {
5865
5866
5867
5868 trace.GoSched()
5869 trace.ProcStop(gp.m.p.ptr())
5870 traceRelease(trace)
5871 }
5872 gp.m.p.ptr().m = 0
5873 }
5874 gp.m.p = 0
5875 pp := allp[0]
5876 pp.m = 0
5877 pp.status = _Pidle
5878 acquirep(pp)
5879 trace := traceAcquire()
5880 if trace.ok() {
5881 trace.GoStart()
5882 traceRelease(trace)
5883 }
5884 }
5885
5886
5887 mcache0 = nil
5888
5889
5890 for i := nprocs; i < old; i++ {
5891 pp := allp[i]
5892 pp.destroy()
5893
5894 }
5895
5896
5897 if int32(len(allp)) != nprocs {
5898 lock(&allpLock)
5899 allp = allp[:nprocs]
5900 idlepMask = idlepMask[:maskWords]
5901 timerpMask = timerpMask[:maskWords]
5902 unlock(&allpLock)
5903 }
5904
5905 var runnablePs *p
5906 for i := nprocs - 1; i >= 0; i-- {
5907 pp := allp[i]
5908 if gp.m.p.ptr() == pp {
5909 continue
5910 }
5911 pp.status = _Pidle
5912 if runqempty(pp) {
5913 pidleput(pp, now)
5914 } else {
5915 pp.m.set(mget())
5916 pp.link.set(runnablePs)
5917 runnablePs = pp
5918 }
5919 }
5920 stealOrder.reset(uint32(nprocs))
5921 var int32p *int32 = &gomaxprocs
5922 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
5923 if old != nprocs {
5924
5925 gcCPULimiter.resetCapacity(now, nprocs)
5926 }
5927 return runnablePs
5928 }
5929
5930
5931
5932
5933
5934
5935
5936 func acquirep(pp *p) {
5937
5938 wirep(pp)
5939
5940
5941
5942
5943
5944 pp.mcache.prepareForSweep()
5945
5946 trace := traceAcquire()
5947 if trace.ok() {
5948 trace.ProcStart()
5949 traceRelease(trace)
5950 }
5951 }
5952
5953
5954
5955
5956
5957
5958
5959 func wirep(pp *p) {
5960 gp := getg()
5961
5962 if gp.m.p != 0 {
5963
5964
5965 systemstack(func() {
5966 throw("wirep: already in go")
5967 })
5968 }
5969 if pp.m != 0 || pp.status != _Pidle {
5970
5971
5972 systemstack(func() {
5973 id := int64(0)
5974 if pp.m != 0 {
5975 id = pp.m.ptr().id
5976 }
5977 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
5978 throw("wirep: invalid p state")
5979 })
5980 }
5981 gp.m.p.set(pp)
5982 pp.m.set(gp.m)
5983 pp.status = _Prunning
5984 }
5985
5986
5987 func releasep() *p {
5988 trace := traceAcquire()
5989 if trace.ok() {
5990 trace.ProcStop(getg().m.p.ptr())
5991 traceRelease(trace)
5992 }
5993 return releasepNoTrace()
5994 }
5995
5996
5997 func releasepNoTrace() *p {
5998 gp := getg()
5999
6000 if gp.m.p == 0 {
6001 throw("releasep: invalid arg")
6002 }
6003 pp := gp.m.p.ptr()
6004 if pp.m.ptr() != gp.m || pp.status != _Prunning {
6005 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
6006 throw("releasep: invalid p state")
6007 }
6008 gp.m.p = 0
6009 pp.m = 0
6010 pp.status = _Pidle
6011 return pp
6012 }
6013
6014 func incidlelocked(v int32) {
6015 lock(&sched.lock)
6016 sched.nmidlelocked += v
6017 if v > 0 {
6018 checkdead()
6019 }
6020 unlock(&sched.lock)
6021 }
6022
6023
6024
6025
6026 func checkdead() {
6027 assertLockHeld(&sched.lock)
6028
6029
6030
6031
6032
6033
6034 if (islibrary || isarchive) && GOARCH != "wasm" {
6035 return
6036 }
6037
6038
6039
6040
6041
6042 if panicking.Load() > 0 {
6043 return
6044 }
6045
6046
6047
6048
6049
6050 var run0 int32
6051 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
6052 run0 = 1
6053 }
6054
6055 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
6056 if run > run0 {
6057 return
6058 }
6059 if run < 0 {
6060 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
6061 unlock(&sched.lock)
6062 throw("checkdead: inconsistent counts")
6063 }
6064
6065 grunning := 0
6066 forEachG(func(gp *g) {
6067 if isSystemGoroutine(gp, false) {
6068 return
6069 }
6070 s := readgstatus(gp)
6071 switch s &^ _Gscan {
6072 case _Gwaiting,
6073 _Gpreempted:
6074 grunning++
6075 case _Grunnable,
6076 _Grunning,
6077 _Gsyscall:
6078 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
6079 unlock(&sched.lock)
6080 throw("checkdead: runnable g")
6081 }
6082 })
6083 if grunning == 0 {
6084 unlock(&sched.lock)
6085 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
6086 }
6087
6088
6089 if faketime != 0 {
6090 if when := timeSleepUntil(); when < maxWhen {
6091 faketime = when
6092
6093
6094 pp, _ := pidleget(faketime)
6095 if pp == nil {
6096
6097
6098 unlock(&sched.lock)
6099 throw("checkdead: no p for timer")
6100 }
6101 mp := mget()
6102 if mp == nil {
6103
6104
6105 unlock(&sched.lock)
6106 throw("checkdead: no m for timer")
6107 }
6108
6109
6110
6111 sched.nmspinning.Add(1)
6112 mp.spinning = true
6113 mp.nextp.set(pp)
6114 notewakeup(&mp.park)
6115 return
6116 }
6117 }
6118
6119
6120 for _, pp := range allp {
6121 if len(pp.timers.heap) > 0 {
6122 return
6123 }
6124 }
6125
6126 unlock(&sched.lock)
6127 fatal("all goroutines are asleep - deadlock!")
6128 }
6129
6130
6131
6132
6133
6134
6135 var forcegcperiod int64 = 2 * 60 * 1e9
6136
6137
6138
6139 var needSysmonWorkaround bool = false
6140
6141
6142
6143
6144 const haveSysmon = GOARCH != "wasm"
6145
6146
6147
6148
6149 func sysmon() {
6150 lock(&sched.lock)
6151 sched.nmsys++
6152 checkdead()
6153 unlock(&sched.lock)
6154
6155 lasttrace := int64(0)
6156 idle := 0
6157 delay := uint32(0)
6158
6159 for {
6160 if idle == 0 {
6161 delay = 20
6162 } else if idle > 50 {
6163 delay *= 2
6164 }
6165 if delay > 10*1000 {
6166 delay = 10 * 1000
6167 }
6168 usleep(delay)
6169
6170
6171
6172
6173
6174
6175
6176
6177
6178
6179
6180
6181
6182
6183
6184
6185 now := nanotime()
6186 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
6187 lock(&sched.lock)
6188 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
6189 syscallWake := false
6190 next := timeSleepUntil()
6191 if next > now {
6192 sched.sysmonwait.Store(true)
6193 unlock(&sched.lock)
6194
6195
6196 sleep := forcegcperiod / 2
6197 if next-now < sleep {
6198 sleep = next - now
6199 }
6200 shouldRelax := sleep >= osRelaxMinNS
6201 if shouldRelax {
6202 osRelax(true)
6203 }
6204 syscallWake = notetsleep(&sched.sysmonnote, sleep)
6205 if shouldRelax {
6206 osRelax(false)
6207 }
6208 lock(&sched.lock)
6209 sched.sysmonwait.Store(false)
6210 noteclear(&sched.sysmonnote)
6211 }
6212 if syscallWake {
6213 idle = 0
6214 delay = 20
6215 }
6216 }
6217 unlock(&sched.lock)
6218 }
6219
6220 lock(&sched.sysmonlock)
6221
6222
6223 now = nanotime()
6224
6225
6226 if *cgo_yield != nil {
6227 asmcgocall(*cgo_yield, nil)
6228 }
6229
6230 lastpoll := sched.lastpoll.Load()
6231 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
6232 sched.lastpoll.CompareAndSwap(lastpoll, now)
6233 list, delta := netpoll(0)
6234 if !list.empty() {
6235
6236
6237
6238
6239
6240
6241
6242 incidlelocked(-1)
6243 injectglist(&list)
6244 incidlelocked(1)
6245 netpollAdjustWaiters(delta)
6246 }
6247 }
6248 if GOOS == "netbsd" && needSysmonWorkaround {
6249
6250
6251
6252
6253
6254
6255
6256
6257
6258
6259
6260
6261
6262
6263
6264 if next := timeSleepUntil(); next < now {
6265 startm(nil, false, false)
6266 }
6267 }
6268 if scavenger.sysmonWake.Load() != 0 {
6269
6270 scavenger.wake()
6271 }
6272
6273
6274 if retake(now) != 0 {
6275 idle = 0
6276 } else {
6277 idle++
6278 }
6279
6280 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
6281 lock(&forcegc.lock)
6282 forcegc.idle.Store(false)
6283 var list gList
6284 list.push(forcegc.g)
6285 injectglist(&list)
6286 unlock(&forcegc.lock)
6287 }
6288 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
6289 lasttrace = now
6290 schedtrace(debug.scheddetail > 0)
6291 }
6292 unlock(&sched.sysmonlock)
6293 }
6294 }
6295
6296 type sysmontick struct {
6297 schedtick uint32
6298 syscalltick uint32
6299 schedwhen int64
6300 syscallwhen int64
6301 }
6302
6303
6304
6305 const forcePreemptNS = 10 * 1000 * 1000
6306
6307 func retake(now int64) uint32 {
6308 n := 0
6309
6310
6311 lock(&allpLock)
6312
6313
6314
6315 for i := 0; i < len(allp); i++ {
6316 pp := allp[i]
6317 if pp == nil {
6318
6319
6320 continue
6321 }
6322 pd := &pp.sysmontick
6323 s := pp.status
6324 sysretake := false
6325 if s == _Prunning || s == _Psyscall {
6326
6327
6328
6329
6330 t := int64(pp.schedtick)
6331 if int64(pd.schedtick) != t {
6332 pd.schedtick = uint32(t)
6333 pd.schedwhen = now
6334 } else if pd.schedwhen+forcePreemptNS <= now {
6335 preemptone(pp)
6336
6337
6338 sysretake = true
6339 }
6340 }
6341 if s == _Psyscall {
6342
6343 t := int64(pp.syscalltick)
6344 if !sysretake && int64(pd.syscalltick) != t {
6345 pd.syscalltick = uint32(t)
6346 pd.syscallwhen = now
6347 continue
6348 }
6349
6350
6351
6352 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
6353 continue
6354 }
6355
6356 unlock(&allpLock)
6357
6358
6359
6360
6361 incidlelocked(-1)
6362 trace := traceAcquire()
6363 if atomic.Cas(&pp.status, s, _Pidle) {
6364 if trace.ok() {
6365 trace.ProcSteal(pp, false)
6366 traceRelease(trace)
6367 }
6368 n++
6369 pp.syscalltick++
6370 handoffp(pp)
6371 } else if trace.ok() {
6372 traceRelease(trace)
6373 }
6374 incidlelocked(1)
6375 lock(&allpLock)
6376 }
6377 }
6378 unlock(&allpLock)
6379 return uint32(n)
6380 }
6381
6382
6383
6384
6385
6386
6387 func preemptall() bool {
6388 res := false
6389 for _, pp := range allp {
6390 if pp.status != _Prunning {
6391 continue
6392 }
6393 if preemptone(pp) {
6394 res = true
6395 }
6396 }
6397 return res
6398 }
6399
6400
6401
6402
6403
6404
6405
6406
6407
6408
6409
6410 func preemptone(pp *p) bool {
6411 mp := pp.m.ptr()
6412 if mp == nil || mp == getg().m {
6413 return false
6414 }
6415 gp := mp.curg
6416 if gp == nil || gp == mp.g0 {
6417 return false
6418 }
6419
6420 gp.preempt = true
6421
6422
6423
6424
6425
6426 gp.stackguard0 = stackPreempt
6427
6428
6429 if preemptMSupported && debug.asyncpreemptoff == 0 {
6430 pp.preempt = true
6431 preemptM(mp)
6432 }
6433
6434 return true
6435 }
6436
6437 var starttime int64
6438
6439 func schedtrace(detailed bool) {
6440 now := nanotime()
6441 if starttime == 0 {
6442 starttime = now
6443 }
6444
6445 lock(&sched.lock)
6446 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
6447 if detailed {
6448 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
6449 }
6450
6451
6452
6453 for i, pp := range allp {
6454 mp := pp.m.ptr()
6455 h := atomic.Load(&pp.runqhead)
6456 t := atomic.Load(&pp.runqtail)
6457 if detailed {
6458 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
6459 if mp != nil {
6460 print(mp.id)
6461 } else {
6462 print("nil")
6463 }
6464 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.n, " timerslen=", len(pp.timers.heap), "\n")
6465 } else {
6466
6467
6468 print(" ")
6469 if i == 0 {
6470 print("[")
6471 }
6472 print(t - h)
6473 if i == len(allp)-1 {
6474 print("]\n")
6475 }
6476 }
6477 }
6478
6479 if !detailed {
6480 unlock(&sched.lock)
6481 return
6482 }
6483
6484 for mp := allm; mp != nil; mp = mp.alllink {
6485 pp := mp.p.ptr()
6486 print(" M", mp.id, ": p=")
6487 if pp != nil {
6488 print(pp.id)
6489 } else {
6490 print("nil")
6491 }
6492 print(" curg=")
6493 if mp.curg != nil {
6494 print(mp.curg.goid)
6495 } else {
6496 print("nil")
6497 }
6498 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
6499 if lockedg := mp.lockedg.ptr(); lockedg != nil {
6500 print(lockedg.goid)
6501 } else {
6502 print("nil")
6503 }
6504 print("\n")
6505 }
6506
6507 forEachG(func(gp *g) {
6508 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
6509 if gp.m != nil {
6510 print(gp.m.id)
6511 } else {
6512 print("nil")
6513 }
6514 print(" lockedm=")
6515 if lockedm := gp.lockedm.ptr(); lockedm != nil {
6516 print(lockedm.id)
6517 } else {
6518 print("nil")
6519 }
6520 print("\n")
6521 })
6522 unlock(&sched.lock)
6523 }
6524
6525
6526
6527
6528
6529
6530 func schedEnableUser(enable bool) {
6531 lock(&sched.lock)
6532 if sched.disable.user == !enable {
6533 unlock(&sched.lock)
6534 return
6535 }
6536 sched.disable.user = !enable
6537 if enable {
6538 n := sched.disable.n
6539 sched.disable.n = 0
6540 globrunqputbatch(&sched.disable.runnable, n)
6541 unlock(&sched.lock)
6542 for ; n != 0 && sched.npidle.Load() != 0; n-- {
6543 startm(nil, false, false)
6544 }
6545 } else {
6546 unlock(&sched.lock)
6547 }
6548 }
6549
6550
6551
6552
6553
6554 func schedEnabled(gp *g) bool {
6555 assertLockHeld(&sched.lock)
6556
6557 if sched.disable.user {
6558 return isSystemGoroutine(gp, true)
6559 }
6560 return true
6561 }
6562
6563
6564
6565
6566
6567
6568 func mput(mp *m) {
6569 assertLockHeld(&sched.lock)
6570
6571 mp.schedlink = sched.midle
6572 sched.midle.set(mp)
6573 sched.nmidle++
6574 checkdead()
6575 }
6576
6577
6578
6579
6580
6581
6582 func mget() *m {
6583 assertLockHeld(&sched.lock)
6584
6585 mp := sched.midle.ptr()
6586 if mp != nil {
6587 sched.midle = mp.schedlink
6588 sched.nmidle--
6589 }
6590 return mp
6591 }
6592
6593
6594
6595
6596
6597
6598 func globrunqput(gp *g) {
6599 assertLockHeld(&sched.lock)
6600
6601 sched.runq.pushBack(gp)
6602 sched.runqsize++
6603 }
6604
6605
6606
6607
6608
6609
6610 func globrunqputhead(gp *g) {
6611 assertLockHeld(&sched.lock)
6612
6613 sched.runq.push(gp)
6614 sched.runqsize++
6615 }
6616
6617
6618
6619
6620
6621
6622
6623 func globrunqputbatch(batch *gQueue, n int32) {
6624 assertLockHeld(&sched.lock)
6625
6626 sched.runq.pushBackAll(*batch)
6627 sched.runqsize += n
6628 *batch = gQueue{}
6629 }
6630
6631
6632
6633 func globrunqget(pp *p, max int32) *g {
6634 assertLockHeld(&sched.lock)
6635
6636 if sched.runqsize == 0 {
6637 return nil
6638 }
6639
6640 n := sched.runqsize/gomaxprocs + 1
6641 if n > sched.runqsize {
6642 n = sched.runqsize
6643 }
6644 if max > 0 && n > max {
6645 n = max
6646 }
6647 if n > int32(len(pp.runq))/2 {
6648 n = int32(len(pp.runq)) / 2
6649 }
6650
6651 sched.runqsize -= n
6652
6653 gp := sched.runq.pop()
6654 n--
6655 for ; n > 0; n-- {
6656 gp1 := sched.runq.pop()
6657 runqput(pp, gp1, false)
6658 }
6659 return gp
6660 }
6661
6662
6663 type pMask []uint32
6664
6665
6666 func (p pMask) read(id uint32) bool {
6667 word := id / 32
6668 mask := uint32(1) << (id % 32)
6669 return (atomic.Load(&p[word]) & mask) != 0
6670 }
6671
6672
6673 func (p pMask) set(id int32) {
6674 word := id / 32
6675 mask := uint32(1) << (id % 32)
6676 atomic.Or(&p[word], mask)
6677 }
6678
6679
6680 func (p pMask) clear(id int32) {
6681 word := id / 32
6682 mask := uint32(1) << (id % 32)
6683 atomic.And(&p[word], ^mask)
6684 }
6685
6686
6687
6688
6689
6690
6691
6692
6693
6694
6695
6696
6697 func pidleput(pp *p, now int64) int64 {
6698 assertLockHeld(&sched.lock)
6699
6700 if !runqempty(pp) {
6701 throw("pidleput: P has non-empty run queue")
6702 }
6703 if now == 0 {
6704 now = nanotime()
6705 }
6706 if pp.timers.len.Load() == 0 {
6707 timerpMask.clear(pp.id)
6708 }
6709 idlepMask.set(pp.id)
6710 pp.link = sched.pidle
6711 sched.pidle.set(pp)
6712 sched.npidle.Add(1)
6713 if !pp.limiterEvent.start(limiterEventIdle, now) {
6714 throw("must be able to track idle limiter event")
6715 }
6716 return now
6717 }
6718
6719
6720
6721
6722
6723
6724
6725
6726 func pidleget(now int64) (*p, int64) {
6727 assertLockHeld(&sched.lock)
6728
6729 pp := sched.pidle.ptr()
6730 if pp != nil {
6731
6732 if now == 0 {
6733 now = nanotime()
6734 }
6735 timerpMask.set(pp.id)
6736 idlepMask.clear(pp.id)
6737 sched.pidle = pp.link
6738 sched.npidle.Add(-1)
6739 pp.limiterEvent.stop(limiterEventIdle, now)
6740 }
6741 return pp, now
6742 }
6743
6744
6745
6746
6747
6748
6749
6750
6751
6752
6753
6754 func pidlegetSpinning(now int64) (*p, int64) {
6755 assertLockHeld(&sched.lock)
6756
6757 pp, now := pidleget(now)
6758 if pp == nil {
6759
6760
6761
6762 sched.needspinning.Store(1)
6763 return nil, now
6764 }
6765
6766 return pp, now
6767 }
6768
6769
6770
6771 func runqempty(pp *p) bool {
6772
6773
6774
6775
6776 for {
6777 head := atomic.Load(&pp.runqhead)
6778 tail := atomic.Load(&pp.runqtail)
6779 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
6780 if tail == atomic.Load(&pp.runqtail) {
6781 return head == tail && runnext == 0
6782 }
6783 }
6784 }
6785
6786
6787
6788
6789
6790
6791
6792
6793
6794
6795 const randomizeScheduler = raceenabled
6796
6797
6798
6799
6800
6801
6802 func runqput(pp *p, gp *g, next bool) {
6803 if !haveSysmon && next {
6804
6805
6806
6807
6808
6809
6810
6811
6812 next = false
6813 }
6814 if randomizeScheduler && next && randn(2) == 0 {
6815 next = false
6816 }
6817
6818 if next {
6819 retryNext:
6820 oldnext := pp.runnext
6821 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
6822 goto retryNext
6823 }
6824 if oldnext == 0 {
6825 return
6826 }
6827
6828 gp = oldnext.ptr()
6829 }
6830
6831 retry:
6832 h := atomic.LoadAcq(&pp.runqhead)
6833 t := pp.runqtail
6834 if t-h < uint32(len(pp.runq)) {
6835 pp.runq[t%uint32(len(pp.runq))].set(gp)
6836 atomic.StoreRel(&pp.runqtail, t+1)
6837 return
6838 }
6839 if runqputslow(pp, gp, h, t) {
6840 return
6841 }
6842
6843 goto retry
6844 }
6845
6846
6847
6848 func runqputslow(pp *p, gp *g, h, t uint32) bool {
6849 var batch [len(pp.runq)/2 + 1]*g
6850
6851
6852 n := t - h
6853 n = n / 2
6854 if n != uint32(len(pp.runq)/2) {
6855 throw("runqputslow: queue is not full")
6856 }
6857 for i := uint32(0); i < n; i++ {
6858 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
6859 }
6860 if !atomic.CasRel(&pp.runqhead, h, h+n) {
6861 return false
6862 }
6863 batch[n] = gp
6864
6865 if randomizeScheduler {
6866 for i := uint32(1); i <= n; i++ {
6867 j := cheaprandn(i + 1)
6868 batch[i], batch[j] = batch[j], batch[i]
6869 }
6870 }
6871
6872
6873 for i := uint32(0); i < n; i++ {
6874 batch[i].schedlink.set(batch[i+1])
6875 }
6876 var q gQueue
6877 q.head.set(batch[0])
6878 q.tail.set(batch[n])
6879
6880
6881 lock(&sched.lock)
6882 globrunqputbatch(&q, int32(n+1))
6883 unlock(&sched.lock)
6884 return true
6885 }
6886
6887
6888
6889
6890
6891 func runqputbatch(pp *p, q *gQueue, qsize int) {
6892 h := atomic.LoadAcq(&pp.runqhead)
6893 t := pp.runqtail
6894 n := uint32(0)
6895 for !q.empty() && t-h < uint32(len(pp.runq)) {
6896 gp := q.pop()
6897 pp.runq[t%uint32(len(pp.runq))].set(gp)
6898 t++
6899 n++
6900 }
6901 qsize -= int(n)
6902
6903 if randomizeScheduler {
6904 off := func(o uint32) uint32 {
6905 return (pp.runqtail + o) % uint32(len(pp.runq))
6906 }
6907 for i := uint32(1); i < n; i++ {
6908 j := cheaprandn(i + 1)
6909 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
6910 }
6911 }
6912
6913 atomic.StoreRel(&pp.runqtail, t)
6914 if !q.empty() {
6915 lock(&sched.lock)
6916 globrunqputbatch(q, int32(qsize))
6917 unlock(&sched.lock)
6918 }
6919 }
6920
6921
6922
6923
6924
6925 func runqget(pp *p) (gp *g, inheritTime bool) {
6926
6927 next := pp.runnext
6928
6929
6930
6931 if next != 0 && pp.runnext.cas(next, 0) {
6932 return next.ptr(), true
6933 }
6934
6935 for {
6936 h := atomic.LoadAcq(&pp.runqhead)
6937 t := pp.runqtail
6938 if t == h {
6939 return nil, false
6940 }
6941 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
6942 if atomic.CasRel(&pp.runqhead, h, h+1) {
6943 return gp, false
6944 }
6945 }
6946 }
6947
6948
6949
6950 func runqdrain(pp *p) (drainQ gQueue, n uint32) {
6951 oldNext := pp.runnext
6952 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
6953 drainQ.pushBack(oldNext.ptr())
6954 n++
6955 }
6956
6957 retry:
6958 h := atomic.LoadAcq(&pp.runqhead)
6959 t := pp.runqtail
6960 qn := t - h
6961 if qn == 0 {
6962 return
6963 }
6964 if qn > uint32(len(pp.runq)) {
6965 goto retry
6966 }
6967
6968 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
6969 goto retry
6970 }
6971
6972
6973
6974
6975
6976
6977
6978
6979 for i := uint32(0); i < qn; i++ {
6980 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
6981 drainQ.pushBack(gp)
6982 n++
6983 }
6984 return
6985 }
6986
6987
6988
6989
6990
6991 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
6992 for {
6993 h := atomic.LoadAcq(&pp.runqhead)
6994 t := atomic.LoadAcq(&pp.runqtail)
6995 n := t - h
6996 n = n - n/2
6997 if n == 0 {
6998 if stealRunNextG {
6999
7000 if next := pp.runnext; next != 0 {
7001 if pp.status == _Prunning {
7002
7003
7004
7005
7006
7007
7008
7009
7010
7011
7012 if !osHasLowResTimer {
7013 usleep(3)
7014 } else {
7015
7016
7017
7018 osyield()
7019 }
7020 }
7021 if !pp.runnext.cas(next, 0) {
7022 continue
7023 }
7024 batch[batchHead%uint32(len(batch))] = next
7025 return 1
7026 }
7027 }
7028 return 0
7029 }
7030 if n > uint32(len(pp.runq)/2) {
7031 continue
7032 }
7033 for i := uint32(0); i < n; i++ {
7034 g := pp.runq[(h+i)%uint32(len(pp.runq))]
7035 batch[(batchHead+i)%uint32(len(batch))] = g
7036 }
7037 if atomic.CasRel(&pp.runqhead, h, h+n) {
7038 return n
7039 }
7040 }
7041 }
7042
7043
7044
7045
7046 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
7047 t := pp.runqtail
7048 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
7049 if n == 0 {
7050 return nil
7051 }
7052 n--
7053 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
7054 if n == 0 {
7055 return gp
7056 }
7057 h := atomic.LoadAcq(&pp.runqhead)
7058 if t-h+n >= uint32(len(pp.runq)) {
7059 throw("runqsteal: runq overflow")
7060 }
7061 atomic.StoreRel(&pp.runqtail, t+n)
7062 return gp
7063 }
7064
7065
7066
7067 type gQueue struct {
7068 head guintptr
7069 tail guintptr
7070 }
7071
7072
7073 func (q *gQueue) empty() bool {
7074 return q.head == 0
7075 }
7076
7077
7078 func (q *gQueue) push(gp *g) {
7079 gp.schedlink = q.head
7080 q.head.set(gp)
7081 if q.tail == 0 {
7082 q.tail.set(gp)
7083 }
7084 }
7085
7086
7087 func (q *gQueue) pushBack(gp *g) {
7088 gp.schedlink = 0
7089 if q.tail != 0 {
7090 q.tail.ptr().schedlink.set(gp)
7091 } else {
7092 q.head.set(gp)
7093 }
7094 q.tail.set(gp)
7095 }
7096
7097
7098
7099 func (q *gQueue) pushBackAll(q2 gQueue) {
7100 if q2.tail == 0 {
7101 return
7102 }
7103 q2.tail.ptr().schedlink = 0
7104 if q.tail != 0 {
7105 q.tail.ptr().schedlink = q2.head
7106 } else {
7107 q.head = q2.head
7108 }
7109 q.tail = q2.tail
7110 }
7111
7112
7113
7114 func (q *gQueue) pop() *g {
7115 gp := q.head.ptr()
7116 if gp != nil {
7117 q.head = gp.schedlink
7118 if q.head == 0 {
7119 q.tail = 0
7120 }
7121 }
7122 return gp
7123 }
7124
7125
7126 func (q *gQueue) popList() gList {
7127 stack := gList{q.head}
7128 *q = gQueue{}
7129 return stack
7130 }
7131
7132
7133
7134 type gList struct {
7135 head guintptr
7136 }
7137
7138
7139 func (l *gList) empty() bool {
7140 return l.head == 0
7141 }
7142
7143
7144 func (l *gList) push(gp *g) {
7145 gp.schedlink = l.head
7146 l.head.set(gp)
7147 }
7148
7149
7150 func (l *gList) pushAll(q gQueue) {
7151 if !q.empty() {
7152 q.tail.ptr().schedlink = l.head
7153 l.head = q.head
7154 }
7155 }
7156
7157
7158 func (l *gList) pop() *g {
7159 gp := l.head.ptr()
7160 if gp != nil {
7161 l.head = gp.schedlink
7162 }
7163 return gp
7164 }
7165
7166
7167 func setMaxThreads(in int) (out int) {
7168 lock(&sched.lock)
7169 out = int(sched.maxmcount)
7170 if in > 0x7fffffff {
7171 sched.maxmcount = 0x7fffffff
7172 } else {
7173 sched.maxmcount = int32(in)
7174 }
7175 checkmcount()
7176 unlock(&sched.lock)
7177 return
7178 }
7179
7180
7181
7182
7183
7184
7185
7186
7187
7188
7189
7190
7191
7192 func procPin() int {
7193 gp := getg()
7194 mp := gp.m
7195
7196 mp.locks++
7197 return int(mp.p.ptr().id)
7198 }
7199
7200
7201
7202
7203
7204
7205
7206
7207
7208
7209
7210
7211
7212 func procUnpin() {
7213 gp := getg()
7214 gp.m.locks--
7215 }
7216
7217
7218
7219 func sync_runtime_procPin() int {
7220 return procPin()
7221 }
7222
7223
7224
7225 func sync_runtime_procUnpin() {
7226 procUnpin()
7227 }
7228
7229
7230
7231 func sync_atomic_runtime_procPin() int {
7232 return procPin()
7233 }
7234
7235
7236
7237 func sync_atomic_runtime_procUnpin() {
7238 procUnpin()
7239 }
7240
7241
7242
7243
7244
7245 func internal_sync_runtime_canSpin(i int) bool {
7246
7247
7248
7249
7250
7251 if i >= active_spin || ncpu <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
7252 return false
7253 }
7254 if p := getg().m.p.ptr(); !runqempty(p) {
7255 return false
7256 }
7257 return true
7258 }
7259
7260
7261
7262 func internal_sync_runtime_doSpin() {
7263 procyield(active_spin_cnt)
7264 }
7265
7266
7267
7268
7269
7270
7271
7272
7273
7274
7275
7276
7277
7278
7279
7280 func sync_runtime_canSpin(i int) bool {
7281 return internal_sync_runtime_canSpin(i)
7282 }
7283
7284
7285
7286
7287
7288
7289
7290
7291
7292
7293
7294
7295
7296 func sync_runtime_doSpin() {
7297 internal_sync_runtime_doSpin()
7298 }
7299
7300 var stealOrder randomOrder
7301
7302
7303
7304
7305
7306 type randomOrder struct {
7307 count uint32
7308 coprimes []uint32
7309 }
7310
7311 type randomEnum struct {
7312 i uint32
7313 count uint32
7314 pos uint32
7315 inc uint32
7316 }
7317
7318 func (ord *randomOrder) reset(count uint32) {
7319 ord.count = count
7320 ord.coprimes = ord.coprimes[:0]
7321 for i := uint32(1); i <= count; i++ {
7322 if gcd(i, count) == 1 {
7323 ord.coprimes = append(ord.coprimes, i)
7324 }
7325 }
7326 }
7327
7328 func (ord *randomOrder) start(i uint32) randomEnum {
7329 return randomEnum{
7330 count: ord.count,
7331 pos: i % ord.count,
7332 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
7333 }
7334 }
7335
7336 func (enum *randomEnum) done() bool {
7337 return enum.i == enum.count
7338 }
7339
7340 func (enum *randomEnum) next() {
7341 enum.i++
7342 enum.pos = (enum.pos + enum.inc) % enum.count
7343 }
7344
7345 func (enum *randomEnum) position() uint32 {
7346 return enum.pos
7347 }
7348
7349 func gcd(a, b uint32) uint32 {
7350 for b != 0 {
7351 a, b = b, a%b
7352 }
7353 return a
7354 }
7355
7356
7357
7358 type initTask struct {
7359 state uint32
7360 nfns uint32
7361
7362 }
7363
7364
7365
7366 var inittrace tracestat
7367
7368 type tracestat struct {
7369 active bool
7370 id uint64
7371 allocs uint64
7372 bytes uint64
7373 }
7374
7375 func doInit(ts []*initTask) {
7376 for _, t := range ts {
7377 doInit1(t)
7378 }
7379 }
7380
7381 func doInit1(t *initTask) {
7382 switch t.state {
7383 case 2:
7384 return
7385 case 1:
7386 throw("recursive call during initialization - linker skew")
7387 default:
7388 t.state = 1
7389
7390 var (
7391 start int64
7392 before tracestat
7393 )
7394
7395 if inittrace.active {
7396 start = nanotime()
7397
7398 before = inittrace
7399 }
7400
7401 if t.nfns == 0 {
7402
7403 throw("inittask with no functions")
7404 }
7405
7406 firstFunc := add(unsafe.Pointer(t), 8)
7407 for i := uint32(0); i < t.nfns; i++ {
7408 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
7409 f := *(*func())(unsafe.Pointer(&p))
7410 f()
7411 }
7412
7413 if inittrace.active {
7414 end := nanotime()
7415
7416 after := inittrace
7417
7418 f := *(*func())(unsafe.Pointer(&firstFunc))
7419 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
7420
7421 var sbuf [24]byte
7422 print("init ", pkg, " @")
7423 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
7424 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
7425 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
7426 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
7427 print("\n")
7428 }
7429
7430 t.state = 2
7431 }
7432 }
7433
View as plain text