Source file
src/runtime/mgcmark.go
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "internal/abi"
11 "internal/goarch"
12 "internal/runtime/atomic"
13 "internal/runtime/sys"
14 "unsafe"
15 )
16
17 const (
18 fixedRootFinalizers = iota
19 fixedRootFreeGStacks
20 fixedRootCount
21
22
23
24 rootBlockBytes = 256 << 10
25
26
27
28
29
30
31
32
33 maxObletBytes = 128 << 10
34
35
36
37
38
39
40
41 drainCheckThreshold = 100000
42
43
44
45
46
47
48
49
50
51 pagesPerSpanRoot = 512
52 )
53
54
55
56
57
58 func gcMarkRootPrepare() {
59 assertWorldStopped()
60
61
62 nBlocks := func(bytes uintptr) int {
63 return int(divRoundUp(bytes, rootBlockBytes))
64 }
65
66 work.nDataRoots = 0
67 work.nBSSRoots = 0
68
69
70 for _, datap := range activeModules() {
71 nDataRoots := nBlocks(datap.edata - datap.data)
72 if nDataRoots > work.nDataRoots {
73 work.nDataRoots = nDataRoots
74 }
75
76 nBSSRoots := nBlocks(datap.ebss - datap.bss)
77 if nBSSRoots > work.nBSSRoots {
78 work.nBSSRoots = nBSSRoots
79 }
80 }
81
82
83
84
85
86
87
88
89
90
91
92
93
94 mheap_.markArenas = mheap_.allArenas[:len(mheap_.allArenas):len(mheap_.allArenas)]
95 work.nSpanRoots = len(mheap_.markArenas) * (pagesPerArena / pagesPerSpanRoot)
96
97
98
99
100
101
102
103 work.stackRoots = allGsSnapshot()
104 work.nStackRoots = len(work.stackRoots)
105
106 work.markrootNext = 0
107 work.markrootJobs = uint32(fixedRootCount + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots)
108
109
110 work.baseData = uint32(fixedRootCount)
111 work.baseBSS = work.baseData + uint32(work.nDataRoots)
112 work.baseSpans = work.baseBSS + uint32(work.nBSSRoots)
113 work.baseStacks = work.baseSpans + uint32(work.nSpanRoots)
114 work.baseEnd = work.baseStacks + uint32(work.nStackRoots)
115 }
116
117
118
119 func gcMarkRootCheck() {
120 if work.markrootNext < work.markrootJobs {
121 print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n")
122 throw("left over markroot jobs")
123 }
124
125
126
127
128
129
130 i := 0
131 forEachGRace(func(gp *g) {
132 if i >= work.nStackRoots {
133 return
134 }
135
136 if !gp.gcscandone {
137 println("gp", gp, "goid", gp.goid,
138 "status", readgstatus(gp),
139 "gcscandone", gp.gcscandone)
140 throw("scan missed a g")
141 }
142
143 i++
144 })
145 }
146
147
148 var oneptrmask = [...]uint8{1}
149
150
151
152
153
154
155
156
157
158
159
160
161 func markroot(gcw *gcWork, i uint32, flushBgCredit bool) int64 {
162
163 var workDone int64
164 var workCounter *atomic.Int64
165 switch {
166 case work.baseData <= i && i < work.baseBSS:
167 workCounter = &gcController.globalsScanWork
168 for _, datap := range activeModules() {
169 workDone += markrootBlock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, gcw, int(i-work.baseData))
170 }
171
172 case work.baseBSS <= i && i < work.baseSpans:
173 workCounter = &gcController.globalsScanWork
174 for _, datap := range activeModules() {
175 workDone += markrootBlock(datap.bss, datap.ebss-datap.bss, datap.gcbssmask.bytedata, gcw, int(i-work.baseBSS))
176 }
177
178 case i == fixedRootFinalizers:
179 for fb := allfin; fb != nil; fb = fb.alllink {
180 cnt := uintptr(atomic.Load(&fb.cnt))
181
182
183 scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw, nil)
184 }
185
186 case i == fixedRootFreeGStacks:
187
188
189 systemstack(markrootFreeGStacks)
190
191 case work.baseSpans <= i && i < work.baseStacks:
192
193 markrootSpans(gcw, int(i-work.baseSpans))
194
195 default:
196
197 workCounter = &gcController.stackScanWork
198 if i < work.baseStacks || work.baseEnd <= i {
199 printlock()
200 print("runtime: markroot index ", i, " not in stack roots range [", work.baseStacks, ", ", work.baseEnd, ")\n")
201 throw("markroot: bad index")
202 }
203 gp := work.stackRoots[i-work.baseStacks]
204
205
206
207 status := readgstatus(gp)
208 if (status == _Gwaiting || status == _Gsyscall) && gp.waitsince == 0 {
209 gp.waitsince = work.tstart
210 }
211
212
213
214 systemstack(func() {
215
216
217
218
219 userG := getg().m.curg
220 selfScan := gp == userG && readgstatus(userG) == _Grunning
221 if selfScan {
222 casGToWaitingForGC(userG, _Grunning, waitReasonGarbageCollectionScan)
223 }
224
225
226
227
228
229
230
231
232 stopped := suspendG(gp)
233 if stopped.dead {
234 gp.gcscandone = true
235 return
236 }
237 if gp.gcscandone {
238 throw("g already scanned")
239 }
240 workDone += scanstack(gp, gcw)
241 gp.gcscandone = true
242 resumeG(stopped)
243
244 if selfScan {
245 casgstatus(userG, _Gwaiting, _Grunning)
246 }
247 })
248 }
249 if workCounter != nil && workDone != 0 {
250 workCounter.Add(workDone)
251 if flushBgCredit {
252 gcFlushBgCredit(workDone)
253 }
254 }
255 return workDone
256 }
257
258
259
260
261
262
263
264 func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) int64 {
265 if rootBlockBytes%(8*goarch.PtrSize) != 0 {
266
267 throw("rootBlockBytes must be a multiple of 8*ptrSize")
268 }
269
270
271
272
273 off := uintptr(shard) * rootBlockBytes
274 if off >= n0 {
275 return 0
276 }
277 b := b0 + off
278 ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*goarch.PtrSize))))
279 n := uintptr(rootBlockBytes)
280 if off+n > n0 {
281 n = n0 - off
282 }
283
284
285 scanblock(b, n, ptrmask, gcw, nil)
286 return int64(n)
287 }
288
289
290
291
292
293 func markrootFreeGStacks() {
294
295 lock(&sched.gFree.lock)
296 list := sched.gFree.stack
297 sched.gFree.stack = gList{}
298 unlock(&sched.gFree.lock)
299 if list.empty() {
300 return
301 }
302
303
304 q := gQueue{list.head, list.head}
305 for gp := list.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
306 stackfree(gp.stack)
307 gp.stack.lo = 0
308 gp.stack.hi = 0
309
310
311 q.tail.set(gp)
312 }
313
314
315 lock(&sched.gFree.lock)
316 sched.gFree.noStack.pushAll(q)
317 unlock(&sched.gFree.lock)
318 }
319
320
321
322
323 func markrootSpans(gcw *gcWork, shard int) {
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340 sg := mheap_.sweepgen
341
342
343 ai := mheap_.markArenas[shard/(pagesPerArena/pagesPerSpanRoot)]
344 ha := mheap_.arenas[ai.l1()][ai.l2()]
345 arenaPage := uint(uintptr(shard) * pagesPerSpanRoot % pagesPerArena)
346
347
348 specialsbits := ha.pageSpecials[arenaPage/8:]
349 specialsbits = specialsbits[:pagesPerSpanRoot/8]
350 for i := range specialsbits {
351
352 specials := atomic.Load8(&specialsbits[i])
353 if specials == 0 {
354 continue
355 }
356 for j := uint(0); j < 8; j++ {
357 if specials&(1<<j) == 0 {
358 continue
359 }
360
361
362
363
364
365
366 s := ha.spans[arenaPage+uint(i)*8+j]
367
368
369
370 if state := s.state.get(); state != mSpanInUse {
371 print("s.state = ", state, "\n")
372 throw("non in-use span found with specials bit set")
373 }
374
375 if !useCheckmark && !(s.sweepgen == sg || s.sweepgen == sg+3) {
376
377 print("sweep ", s.sweepgen, " ", sg, "\n")
378 throw("gc: unswept span")
379 }
380
381
382
383 lock(&s.speciallock)
384 for sp := s.specials; sp != nil; sp = sp.next {
385 switch sp.kind {
386 case _KindSpecialFinalizer:
387
388
389 spf := (*specialfinalizer)(unsafe.Pointer(sp))
390
391 p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize
392
393
394
395
396 if !s.spanclass.noscan() {
397 scanobject(p, gcw)
398 }
399
400
401 scanblock(uintptr(unsafe.Pointer(&spf.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
402 case _KindSpecialWeakHandle:
403
404 spw := (*specialWeakHandle)(unsafe.Pointer(sp))
405 scanblock(uintptr(unsafe.Pointer(&spw.handle)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
406 case _KindSpecialCleanup:
407 spc := (*specialCleanup)(unsafe.Pointer(sp))
408
409 scanblock(uintptr(unsafe.Pointer(&spc.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
410 }
411 }
412 unlock(&s.speciallock)
413 }
414 }
415 }
416
417
418
419
420
421 func gcAssistAlloc(gp *g) {
422
423
424 if getg() == gp.m.g0 {
425 return
426 }
427 if mp := getg().m; mp.locks > 0 || mp.preemptoff != "" {
428 return
429 }
430
431 if gp := getg(); gp.syncGroup != nil {
432
433
434
435 sg := gp.syncGroup
436 gp.syncGroup = nil
437 defer func() {
438 gp.syncGroup = sg
439 }()
440 }
441
442
443
444
445
446
447
448
449
450
451
452
453
454 enteredMarkAssistForTracing := false
455 retry:
456 if gcCPULimiter.limiting() {
457
458
459 if enteredMarkAssistForTracing {
460 trace := traceAcquire()
461 if trace.ok() {
462 trace.GCMarkAssistDone()
463
464
465
466
467
468
469
470 gp.inMarkAssist = false
471 traceRelease(trace)
472 } else {
473
474
475
476 gp.inMarkAssist = false
477 }
478 }
479 return
480 }
481
482
483
484
485 assistWorkPerByte := gcController.assistWorkPerByte.Load()
486 assistBytesPerWork := gcController.assistBytesPerWork.Load()
487 debtBytes := -gp.gcAssistBytes
488 scanWork := int64(assistWorkPerByte * float64(debtBytes))
489 if scanWork < gcOverAssistWork {
490 scanWork = gcOverAssistWork
491 debtBytes = int64(assistBytesPerWork * float64(scanWork))
492 }
493
494
495
496
497
498
499
500 bgScanCredit := gcController.bgScanCredit.Load()
501 stolen := int64(0)
502 if bgScanCredit > 0 {
503 if bgScanCredit < scanWork {
504 stolen = bgScanCredit
505 gp.gcAssistBytes += 1 + int64(assistBytesPerWork*float64(stolen))
506 } else {
507 stolen = scanWork
508 gp.gcAssistBytes += debtBytes
509 }
510 gcController.bgScanCredit.Add(-stolen)
511
512 scanWork -= stolen
513
514 if scanWork == 0 {
515
516
517 if enteredMarkAssistForTracing {
518 trace := traceAcquire()
519 if trace.ok() {
520 trace.GCMarkAssistDone()
521
522
523
524
525
526
527
528 gp.inMarkAssist = false
529 traceRelease(trace)
530 } else {
531
532
533
534 gp.inMarkAssist = false
535 }
536 }
537 return
538 }
539 }
540 if !enteredMarkAssistForTracing {
541 trace := traceAcquire()
542 if trace.ok() {
543 trace.GCMarkAssistStart()
544
545
546 gp.inMarkAssist = true
547 traceRelease(trace)
548 } else {
549 gp.inMarkAssist = true
550 }
551
552
553
554
555
556 enteredMarkAssistForTracing = true
557 }
558
559
560 systemstack(func() {
561 gcAssistAlloc1(gp, scanWork)
562
563
564 })
565
566 completed := gp.param != nil
567 gp.param = nil
568 if completed {
569 gcMarkDone()
570 }
571
572 if gp.gcAssistBytes < 0 {
573
574
575
576
577
578
579
580 if gp.preempt {
581 Gosched()
582 goto retry
583 }
584
585
586
587
588
589
590
591
592
593
594 if !gcParkAssist() {
595 goto retry
596 }
597
598
599
600 }
601 if enteredMarkAssistForTracing {
602 trace := traceAcquire()
603 if trace.ok() {
604 trace.GCMarkAssistDone()
605
606
607
608
609
610
611
612 gp.inMarkAssist = false
613 traceRelease(trace)
614 } else {
615
616
617
618 gp.inMarkAssist = false
619 }
620 }
621 }
622
623
624
625
626
627
628
629
630
631
632
633 func gcAssistAlloc1(gp *g, scanWork int64) {
634
635
636 gp.param = nil
637
638 if atomic.Load(&gcBlackenEnabled) == 0 {
639
640
641
642
643
644
645
646 gp.gcAssistBytes = 0
647 return
648 }
649
650
651
652
653
654
655 startTime := nanotime()
656 trackLimiterEvent := gp.m.p.ptr().limiterEvent.start(limiterEventMarkAssist, startTime)
657
658 decnwait := atomic.Xadd(&work.nwait, -1)
659 if decnwait == work.nproc {
660 println("runtime: work.nwait =", decnwait, "work.nproc=", work.nproc)
661 throw("nwait > work.nprocs")
662 }
663
664
665 casGToWaitingForGC(gp, _Grunning, waitReasonGCAssistMarking)
666
667
668
669 gcw := &getg().m.p.ptr().gcw
670 workDone := gcDrainN(gcw, scanWork)
671
672 casgstatus(gp, _Gwaiting, _Grunning)
673
674
675
676
677
678
679
680 assistBytesPerWork := gcController.assistBytesPerWork.Load()
681 gp.gcAssistBytes += 1 + int64(assistBytesPerWork*float64(workDone))
682
683
684
685 incnwait := atomic.Xadd(&work.nwait, +1)
686 if incnwait > work.nproc {
687 println("runtime: work.nwait=", incnwait,
688 "work.nproc=", work.nproc)
689 throw("work.nwait > work.nproc")
690 }
691
692 if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
693
694
695
696
697 gp.param = unsafe.Pointer(gp)
698 }
699 now := nanotime()
700 duration := now - startTime
701 pp := gp.m.p.ptr()
702 pp.gcAssistTime += duration
703 if trackLimiterEvent {
704 pp.limiterEvent.stop(limiterEventMarkAssist, now)
705 }
706 if pp.gcAssistTime > gcAssistTimeSlack {
707 gcController.assistTime.Add(pp.gcAssistTime)
708 gcCPULimiter.update(now)
709 pp.gcAssistTime = 0
710 }
711 }
712
713
714
715
716 func gcWakeAllAssists() {
717 lock(&work.assistQueue.lock)
718 list := work.assistQueue.q.popList()
719 injectglist(&list)
720 unlock(&work.assistQueue.lock)
721 }
722
723
724
725
726
727 func gcParkAssist() bool {
728 lock(&work.assistQueue.lock)
729
730
731
732 if atomic.Load(&gcBlackenEnabled) == 0 {
733 unlock(&work.assistQueue.lock)
734 return true
735 }
736
737 gp := getg()
738 oldList := work.assistQueue.q
739 work.assistQueue.q.pushBack(gp)
740
741
742
743
744
745 if gcController.bgScanCredit.Load() > 0 {
746 work.assistQueue.q = oldList
747 if oldList.tail != 0 {
748 oldList.tail.ptr().schedlink.set(nil)
749 }
750 unlock(&work.assistQueue.lock)
751 return false
752 }
753
754 goparkunlock(&work.assistQueue.lock, waitReasonGCAssistWait, traceBlockGCMarkAssist, 2)
755 return true
756 }
757
758
759
760
761
762
763
764
765
766
767
768 func gcFlushBgCredit(scanWork int64) {
769 if work.assistQueue.q.empty() {
770
771
772
773
774 gcController.bgScanCredit.Add(scanWork)
775 return
776 }
777
778 assistBytesPerWork := gcController.assistBytesPerWork.Load()
779 scanBytes := int64(float64(scanWork) * assistBytesPerWork)
780
781 lock(&work.assistQueue.lock)
782 for !work.assistQueue.q.empty() && scanBytes > 0 {
783 gp := work.assistQueue.q.pop()
784
785
786 if scanBytes+gp.gcAssistBytes >= 0 {
787
788 scanBytes += gp.gcAssistBytes
789 gp.gcAssistBytes = 0
790
791
792
793
794
795
796 ready(gp, 0, false)
797 } else {
798
799 gp.gcAssistBytes += scanBytes
800 scanBytes = 0
801
802
803
804
805 work.assistQueue.q.pushBack(gp)
806 break
807 }
808 }
809
810 if scanBytes > 0 {
811
812 assistWorkPerByte := gcController.assistWorkPerByte.Load()
813 scanWork = int64(float64(scanBytes) * assistWorkPerByte)
814 gcController.bgScanCredit.Add(scanWork)
815 }
816 unlock(&work.assistQueue.lock)
817 }
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836 func scanstack(gp *g, gcw *gcWork) int64 {
837 if readgstatus(gp)&_Gscan == 0 {
838 print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n")
839 throw("scanstack - bad status")
840 }
841
842 switch readgstatus(gp) &^ _Gscan {
843 default:
844 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
845 throw("mark - bad status")
846 case _Gdead:
847 return 0
848 case _Grunning:
849 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
850 throw("scanstack: goroutine not stopped")
851 case _Grunnable, _Gsyscall, _Gwaiting:
852
853 }
854
855 if gp == getg() {
856 throw("can't scan our own stack")
857 }
858
859
860
861
862 var sp uintptr
863 if gp.syscallsp != 0 {
864 sp = gp.syscallsp
865 } else {
866 sp = gp.sched.sp
867 }
868 scannedSize := gp.stack.hi - sp
869
870
871
872 p := getg().m.p.ptr()
873 p.scannedStackSize += uint64(scannedSize)
874 p.scannedStacks++
875
876 if isShrinkStackSafe(gp) {
877
878 shrinkstack(gp)
879 } else {
880
881 gp.preemptShrink = true
882 }
883
884 var state stackScanState
885 state.stack = gp.stack
886
887 if stackTraceDebug {
888 println("stack trace goroutine", gp.goid)
889 }
890
891 if debugScanConservative && gp.asyncSafePoint {
892 print("scanning async preempted goroutine ", gp.goid, " stack [", hex(gp.stack.lo), ",", hex(gp.stack.hi), ")\n")
893 }
894
895
896
897
898 if gp.sched.ctxt != nil {
899 scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
900 }
901
902
903 var u unwinder
904 for u.init(gp, 0); u.valid(); u.next() {
905 scanframeworker(&u.frame, &state, gcw)
906 }
907
908
909
910
911
912 for d := gp._defer; d != nil; d = d.link {
913 if d.fn != nil {
914
915
916 scanblock(uintptr(unsafe.Pointer(&d.fn)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
917 }
918 if d.link != nil {
919
920
921 scanblock(uintptr(unsafe.Pointer(&d.link)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
922 }
923
924
925
926 if d.heap {
927 scanblock(uintptr(unsafe.Pointer(&d)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
928 }
929 }
930 if gp._panic != nil {
931
932 state.putPtr(uintptr(unsafe.Pointer(gp._panic)), false)
933 }
934
935
936
937
938
939
940 state.buildIndex()
941 for {
942 p, conservative := state.getPtr()
943 if p == 0 {
944 break
945 }
946 obj := state.findObject(p)
947 if obj == nil {
948 continue
949 }
950 r := obj.r
951 if r == nil {
952
953 continue
954 }
955 obj.setRecord(nil)
956 if stackTraceDebug {
957 printlock()
958 print(" live stkobj at", hex(state.stack.lo+uintptr(obj.off)), "of size", obj.size)
959 if conservative {
960 print(" (conservative)")
961 }
962 println()
963 printunlock()
964 }
965 ptrBytes, gcData := r.gcdata()
966 b := state.stack.lo + uintptr(obj.off)
967 if conservative {
968 scanConservative(b, ptrBytes, gcData, gcw, &state)
969 } else {
970 scanblock(b, ptrBytes, gcData, gcw, &state)
971 }
972 }
973
974
975
976 for state.head != nil {
977 x := state.head
978 state.head = x.next
979 if stackTraceDebug {
980 for i := 0; i < x.nobj; i++ {
981 obj := &x.obj[i]
982 if obj.r == nil {
983 continue
984 }
985 println(" dead stkobj at", hex(gp.stack.lo+uintptr(obj.off)), "of size", obj.r.size)
986
987 }
988 }
989 x.nobj = 0
990 putempty((*workbuf)(unsafe.Pointer(x)))
991 }
992 if state.buf != nil || state.cbuf != nil || state.freeBuf != nil {
993 throw("remaining pointer buffers")
994 }
995 return int64(scannedSize)
996 }
997
998
999
1000
1001 func scanframeworker(frame *stkframe, state *stackScanState, gcw *gcWork) {
1002 if _DebugGC > 1 && frame.continpc != 0 {
1003 print("scanframe ", funcname(frame.fn), "\n")
1004 }
1005
1006 isAsyncPreempt := frame.fn.valid() && frame.fn.funcID == abi.FuncID_asyncPreempt
1007 isDebugCall := frame.fn.valid() && frame.fn.funcID == abi.FuncID_debugCallV2
1008 if state.conservative || isAsyncPreempt || isDebugCall {
1009 if debugScanConservative {
1010 println("conservatively scanning function", funcname(frame.fn), "at PC", hex(frame.continpc))
1011 }
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021 if frame.varp != 0 {
1022 size := frame.varp - frame.sp
1023 if size > 0 {
1024 scanConservative(frame.sp, size, nil, gcw, state)
1025 }
1026 }
1027
1028
1029 if n := frame.argBytes(); n != 0 {
1030
1031
1032 scanConservative(frame.argp, n, nil, gcw, state)
1033 }
1034
1035 if isAsyncPreempt || isDebugCall {
1036
1037
1038
1039
1040 state.conservative = true
1041 } else {
1042
1043
1044
1045 state.conservative = false
1046 }
1047 return
1048 }
1049
1050 locals, args, objs := frame.getStackMap(false)
1051
1052
1053 if locals.n > 0 {
1054 size := uintptr(locals.n) * goarch.PtrSize
1055 scanblock(frame.varp-size, size, locals.bytedata, gcw, state)
1056 }
1057
1058
1059 if args.n > 0 {
1060 scanblock(frame.argp, uintptr(args.n)*goarch.PtrSize, args.bytedata, gcw, state)
1061 }
1062
1063
1064 if frame.varp != 0 {
1065
1066
1067
1068 for i := range objs {
1069 obj := &objs[i]
1070 off := obj.off
1071 base := frame.varp
1072 if off >= 0 {
1073 base = frame.argp
1074 }
1075 ptr := base + uintptr(off)
1076 if ptr < frame.sp {
1077
1078 continue
1079 }
1080 if stackTraceDebug {
1081 println("stkobj at", hex(ptr), "of size", obj.size)
1082 }
1083 state.addObject(ptr, obj)
1084 }
1085 }
1086 }
1087
1088 type gcDrainFlags int
1089
1090 const (
1091 gcDrainUntilPreempt gcDrainFlags = 1 << iota
1092 gcDrainFlushBgCredit
1093 gcDrainIdle
1094 gcDrainFractional
1095 )
1096
1097
1098
1099 func gcDrainMarkWorkerIdle(gcw *gcWork) {
1100 gcDrain(gcw, gcDrainIdle|gcDrainUntilPreempt|gcDrainFlushBgCredit)
1101 }
1102
1103
1104
1105 func gcDrainMarkWorkerDedicated(gcw *gcWork, untilPreempt bool) {
1106 flags := gcDrainFlushBgCredit
1107 if untilPreempt {
1108 flags |= gcDrainUntilPreempt
1109 }
1110 gcDrain(gcw, flags)
1111 }
1112
1113
1114
1115 func gcDrainMarkWorkerFractional(gcw *gcWork) {
1116 gcDrain(gcw, gcDrainFractional|gcDrainUntilPreempt|gcDrainFlushBgCredit)
1117 }
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149 func gcDrain(gcw *gcWork, flags gcDrainFlags) {
1150 if !writeBarrier.enabled {
1151 throw("gcDrain phase incorrect")
1152 }
1153
1154
1155
1156 gp := getg().m.curg
1157 pp := gp.m.p.ptr()
1158 preemptible := flags&gcDrainUntilPreempt != 0
1159 flushBgCredit := flags&gcDrainFlushBgCredit != 0
1160 idle := flags&gcDrainIdle != 0
1161
1162 initScanWork := gcw.heapScanWork
1163
1164
1165
1166 checkWork := int64(1<<63 - 1)
1167 var check func() bool
1168 if flags&(gcDrainIdle|gcDrainFractional) != 0 {
1169 checkWork = initScanWork + drainCheckThreshold
1170 if idle {
1171 check = pollWork
1172 } else if flags&gcDrainFractional != 0 {
1173 check = pollFractionalWorkerExit
1174 }
1175 }
1176
1177
1178 if work.markrootNext < work.markrootJobs {
1179
1180
1181 for !(gp.preempt && (preemptible || sched.gcwaiting.Load() || pp.runSafePointFn != 0)) {
1182 job := atomic.Xadd(&work.markrootNext, +1) - 1
1183 if job >= work.markrootJobs {
1184 break
1185 }
1186 markroot(gcw, job, flushBgCredit)
1187 if check != nil && check() {
1188 goto done
1189 }
1190 }
1191 }
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203 for !(gp.preempt && (preemptible || sched.gcwaiting.Load() || pp.runSafePointFn != 0)) {
1204
1205
1206
1207
1208
1209 if work.full == 0 {
1210 gcw.balance()
1211 }
1212
1213 b := gcw.tryGetFast()
1214 if b == 0 {
1215 b = gcw.tryGet()
1216 if b == 0 {
1217
1218
1219
1220 wbBufFlush()
1221 b = gcw.tryGet()
1222 }
1223 }
1224 if b == 0 {
1225
1226 break
1227 }
1228 scanobject(b, gcw)
1229
1230
1231
1232
1233 if gcw.heapScanWork >= gcCreditSlack {
1234 gcController.heapScanWork.Add(gcw.heapScanWork)
1235 if flushBgCredit {
1236 gcFlushBgCredit(gcw.heapScanWork - initScanWork)
1237 initScanWork = 0
1238 }
1239 checkWork -= gcw.heapScanWork
1240 gcw.heapScanWork = 0
1241
1242 if checkWork <= 0 {
1243 checkWork += drainCheckThreshold
1244 if check != nil && check() {
1245 break
1246 }
1247 }
1248 }
1249 }
1250
1251 done:
1252
1253 if gcw.heapScanWork > 0 {
1254 gcController.heapScanWork.Add(gcw.heapScanWork)
1255 if flushBgCredit {
1256 gcFlushBgCredit(gcw.heapScanWork - initScanWork)
1257 }
1258 gcw.heapScanWork = 0
1259 }
1260 }
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275 func gcDrainN(gcw *gcWork, scanWork int64) int64 {
1276 if !writeBarrier.enabled {
1277 throw("gcDrainN phase incorrect")
1278 }
1279
1280
1281
1282 workFlushed := -gcw.heapScanWork
1283
1284
1285
1286 gp := getg().m.curg
1287 for !gp.preempt && !gcCPULimiter.limiting() && workFlushed+gcw.heapScanWork < scanWork {
1288
1289 if work.full == 0 {
1290 gcw.balance()
1291 }
1292
1293 b := gcw.tryGetFast()
1294 if b == 0 {
1295 b = gcw.tryGet()
1296 if b == 0 {
1297
1298
1299 wbBufFlush()
1300 b = gcw.tryGet()
1301 }
1302 }
1303
1304 if b == 0 {
1305
1306 if work.markrootNext < work.markrootJobs {
1307 job := atomic.Xadd(&work.markrootNext, +1) - 1
1308 if job < work.markrootJobs {
1309 workFlushed += markroot(gcw, job, false)
1310 continue
1311 }
1312 }
1313
1314 break
1315 }
1316
1317 scanobject(b, gcw)
1318
1319
1320 if gcw.heapScanWork >= gcCreditSlack {
1321 gcController.heapScanWork.Add(gcw.heapScanWork)
1322 workFlushed += gcw.heapScanWork
1323 gcw.heapScanWork = 0
1324 }
1325 }
1326
1327
1328
1329
1330
1331 return workFlushed + gcw.heapScanWork
1332 }
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343 func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState) {
1344
1345
1346
1347 b := b0
1348 n := n0
1349
1350 for i := uintptr(0); i < n; {
1351
1352 bits := uint32(*addb(ptrmask, i/(goarch.PtrSize*8)))
1353 if bits == 0 {
1354 i += goarch.PtrSize * 8
1355 continue
1356 }
1357 for j := 0; j < 8 && i < n; j++ {
1358 if bits&1 != 0 {
1359
1360 p := *(*uintptr)(unsafe.Pointer(b + i))
1361 if p != 0 {
1362 if obj, span, objIndex := findObject(p, b, i); obj != 0 {
1363 greyobject(obj, b, i, span, gcw, objIndex)
1364 } else if stk != nil && p >= stk.stack.lo && p < stk.stack.hi {
1365 stk.putPtr(p, false)
1366 }
1367 }
1368 }
1369 bits >>= 1
1370 i += goarch.PtrSize
1371 }
1372 }
1373 }
1374
1375
1376
1377
1378
1379
1380
1381 func scanobject(b uintptr, gcw *gcWork) {
1382
1383
1384
1385
1386 sys.Prefetch(b)
1387
1388
1389
1390
1391
1392
1393 s := spanOfUnchecked(b)
1394 n := s.elemsize
1395 if n == 0 {
1396 throw("scanobject n == 0")
1397 }
1398 if s.spanclass.noscan() {
1399
1400
1401 throw("scanobject of a noscan object")
1402 }
1403
1404 var tp typePointers
1405 if n > maxObletBytes {
1406
1407
1408 if b == s.base() {
1409
1410
1411
1412
1413
1414 for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes {
1415 if !gcw.putFast(oblet) {
1416 gcw.put(oblet)
1417 }
1418 }
1419 }
1420
1421
1422
1423
1424 n = s.base() + s.elemsize - b
1425 n = min(n, maxObletBytes)
1426 tp = s.typePointersOfUnchecked(s.base())
1427 tp = tp.fastForward(b-tp.addr, b+n)
1428 } else {
1429 tp = s.typePointersOfUnchecked(b)
1430 }
1431
1432 var scanSize uintptr
1433 for {
1434 var addr uintptr
1435 if tp, addr = tp.nextFast(); addr == 0 {
1436 if tp, addr = tp.next(b + n); addr == 0 {
1437 break
1438 }
1439 }
1440
1441
1442
1443
1444 scanSize = addr - b + goarch.PtrSize
1445
1446
1447
1448 obj := *(*uintptr)(unsafe.Pointer(addr))
1449
1450
1451
1452 if obj != 0 && obj-b >= n {
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462 if obj, span, objIndex := findObject(obj, b, addr-b); obj != 0 {
1463 greyobject(obj, b, addr-b, span, gcw, objIndex)
1464 }
1465 }
1466 }
1467 gcw.bytesMarked += uint64(n)
1468 gcw.heapScanWork += int64(scanSize)
1469 }
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479 func scanConservative(b, n uintptr, ptrmask *uint8, gcw *gcWork, state *stackScanState) {
1480 if debugScanConservative {
1481 printlock()
1482 print("conservatively scanning [", hex(b), ",", hex(b+n), ")\n")
1483 hexdumpWords(b, b+n, func(p uintptr) byte {
1484 if ptrmask != nil {
1485 word := (p - b) / goarch.PtrSize
1486 bits := *addb(ptrmask, word/8)
1487 if (bits>>(word%8))&1 == 0 {
1488 return '$'
1489 }
1490 }
1491
1492 val := *(*uintptr)(unsafe.Pointer(p))
1493 if state != nil && state.stack.lo <= val && val < state.stack.hi {
1494 return '@'
1495 }
1496
1497 span := spanOfHeap(val)
1498 if span == nil {
1499 return ' '
1500 }
1501 idx := span.objIndex(val)
1502 if span.isFree(idx) {
1503 return ' '
1504 }
1505 return '*'
1506 })
1507 printunlock()
1508 }
1509
1510 for i := uintptr(0); i < n; i += goarch.PtrSize {
1511 if ptrmask != nil {
1512 word := i / goarch.PtrSize
1513 bits := *addb(ptrmask, word/8)
1514 if bits == 0 {
1515
1516
1517
1518
1519
1520
1521 if i%(goarch.PtrSize*8) != 0 {
1522 throw("misaligned mask")
1523 }
1524 i += goarch.PtrSize*8 - goarch.PtrSize
1525 continue
1526 }
1527 if (bits>>(word%8))&1 == 0 {
1528 continue
1529 }
1530 }
1531
1532 val := *(*uintptr)(unsafe.Pointer(b + i))
1533
1534
1535 if state != nil && state.stack.lo <= val && val < state.stack.hi {
1536
1537
1538
1539
1540
1541
1542
1543
1544 state.putPtr(val, true)
1545 continue
1546 }
1547
1548
1549 span := spanOfHeap(val)
1550 if span == nil {
1551 continue
1552 }
1553
1554
1555 idx := span.objIndex(val)
1556 if span.isFree(idx) {
1557 continue
1558 }
1559
1560
1561 obj := span.base() + idx*span.elemsize
1562 greyobject(obj, b, i, span, gcw, idx)
1563 }
1564 }
1565
1566
1567
1568
1569
1570
1571 func shade(b uintptr) {
1572 if obj, span, objIndex := findObject(b, 0, 0); obj != 0 {
1573 gcw := &getg().m.p.ptr().gcw
1574 greyobject(obj, 0, 0, span, gcw, objIndex)
1575 }
1576 }
1577
1578
1579
1580
1581
1582
1583
1584
1585 func greyobject(obj, base, off uintptr, span *mspan, gcw *gcWork, objIndex uintptr) {
1586
1587 if obj&(goarch.PtrSize-1) != 0 {
1588 throw("greyobject: obj not pointer-aligned")
1589 }
1590 mbits := span.markBitsForIndex(objIndex)
1591
1592 if useCheckmark {
1593 if setCheckmark(obj, base, off, mbits) {
1594
1595 return
1596 }
1597 } else {
1598 if debug.gccheckmark > 0 && span.isFree(objIndex) {
1599 print("runtime: marking free object ", hex(obj), " found at *(", hex(base), "+", hex(off), ")\n")
1600 gcDumpObject("base", base, off)
1601 gcDumpObject("obj", obj, ^uintptr(0))
1602 getg().m.traceback = 2
1603 throw("marking free object")
1604 }
1605
1606
1607 if mbits.isMarked() {
1608 return
1609 }
1610 mbits.setMarked()
1611
1612
1613 arena, pageIdx, pageMask := pageIndexOf(span.base())
1614 if arena.pageMarks[pageIdx]&pageMask == 0 {
1615 atomic.Or8(&arena.pageMarks[pageIdx], pageMask)
1616 }
1617
1618
1619
1620 if span.spanclass.noscan() {
1621 gcw.bytesMarked += uint64(span.elemsize)
1622 return
1623 }
1624 }
1625
1626
1627
1628
1629
1630 sys.Prefetch(obj)
1631
1632 if !gcw.putFast(obj) {
1633 gcw.put(obj)
1634 }
1635 }
1636
1637
1638
1639 func gcDumpObject(label string, obj, off uintptr) {
1640 s := spanOf(obj)
1641 print(label, "=", hex(obj))
1642 if s == nil {
1643 print(" s=nil\n")
1644 return
1645 }
1646 print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.spanclass=", s.spanclass, " s.elemsize=", s.elemsize, " s.state=")
1647 if state := s.state.get(); 0 <= state && int(state) < len(mSpanStateNames) {
1648 print(mSpanStateNames[state], "\n")
1649 } else {
1650 print("unknown(", state, ")\n")
1651 }
1652
1653 skipped := false
1654 size := s.elemsize
1655 if s.state.get() == mSpanManual && size == 0 {
1656
1657
1658
1659 size = off + goarch.PtrSize
1660 }
1661 for i := uintptr(0); i < size; i += goarch.PtrSize {
1662
1663
1664
1665 if !(i < 128*goarch.PtrSize || off-16*goarch.PtrSize < i && i < off+16*goarch.PtrSize) {
1666 skipped = true
1667 continue
1668 }
1669 if skipped {
1670 print(" ...\n")
1671 skipped = false
1672 }
1673 print(" *(", label, "+", i, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + i))))
1674 if i == off {
1675 print(" <==")
1676 }
1677 print("\n")
1678 }
1679 if skipped {
1680 print(" ...\n")
1681 }
1682 }
1683
1684
1685
1686
1687
1688
1689
1690
1691 func gcmarknewobject(span *mspan, obj uintptr) {
1692 if useCheckmark {
1693 throw("gcmarknewobject called while doing checkmark")
1694 }
1695 if gcphase == _GCmarktermination {
1696
1697 throw("mallocgc called with gcphase == _GCmarktermination")
1698 }
1699
1700
1701 objIndex := span.objIndex(obj)
1702 span.markBitsForIndex(objIndex).setMarked()
1703
1704
1705 arena, pageIdx, pageMask := pageIndexOf(span.base())
1706 if arena.pageMarks[pageIdx]&pageMask == 0 {
1707 atomic.Or8(&arena.pageMarks[pageIdx], pageMask)
1708 }
1709
1710 gcw := &getg().m.p.ptr().gcw
1711 gcw.bytesMarked += uint64(span.elemsize)
1712 }
1713
1714
1715
1716
1717 func gcMarkTinyAllocs() {
1718 assertWorldStopped()
1719
1720 for _, p := range allp {
1721 c := p.mcache
1722 if c == nil || c.tiny == 0 {
1723 continue
1724 }
1725 _, span, objIndex := findObject(c.tiny, 0, 0)
1726 gcw := &p.gcw
1727 greyobject(c.tiny, 0, 0, span, gcw, objIndex)
1728 }
1729 }
1730
View as plain text