Source file
src/runtime/stack.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goos"
12 "internal/runtime/atomic"
13 "internal/runtime/sys"
14 "unsafe"
15 )
16
17
66
67 const (
68
69
70
71
72 stackSystem = goos.IsWindows*4096 + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024
73
74
75 stackMin = 2048
76
77
78
79 fixedStack0 = stackMin + stackSystem
80 fixedStack1 = fixedStack0 - 1
81 fixedStack2 = fixedStack1 | (fixedStack1 >> 1)
82 fixedStack3 = fixedStack2 | (fixedStack2 >> 2)
83 fixedStack4 = fixedStack3 | (fixedStack3 >> 4)
84 fixedStack5 = fixedStack4 | (fixedStack4 >> 8)
85 fixedStack6 = fixedStack5 | (fixedStack5 >> 16)
86 fixedStack = fixedStack6 + 1
87
88
89
90
91 stackNosplit = abi.StackNosplitBase * sys.StackGuardMultiplier
92
93
94
95
96
97
98
99 stackGuard = stackNosplit + stackSystem + abi.StackSmall
100 )
101
102 const (
103
104
105
106
107
108 stackDebug = 0
109 stackFromSystem = 0
110 stackFaultOnFree = 0
111 stackNoCache = 0
112
113
114 debugCheckBP = false
115 )
116
117 var (
118 stackPoisonCopy = 0
119 )
120
121 const (
122 uintptrMask = 1<<(8*goarch.PtrSize) - 1
123
124
125
126
127
128
129
130 stackPreempt = uintptrMask & -1314
131
132
133
134 stackFork = uintptrMask & -1234
135
136
137
138 stackForceMove = uintptrMask & -275
139
140
141 stackPoisonMin = uintptrMask & -4096
142 )
143
144
145
146
147
148
149
150 var stackpool [_NumStackOrders]struct {
151 item stackpoolItem
152 _ [(cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize) % cpu.CacheLinePadSize]byte
153 }
154
155 type stackpoolItem struct {
156 _ sys.NotInHeap
157 mu mutex
158 span mSpanList
159 }
160
161
162 var stackLarge struct {
163 lock mutex
164 free [heapAddrBits - pageShift]mSpanList
165 }
166
167 func stackinit() {
168 if _StackCacheSize&_PageMask != 0 {
169 throw("cache size must be a multiple of page size")
170 }
171 for i := range stackpool {
172 stackpool[i].item.span.init()
173 lockInit(&stackpool[i].item.mu, lockRankStackpool)
174 }
175 for i := range stackLarge.free {
176 stackLarge.free[i].init()
177 lockInit(&stackLarge.lock, lockRankStackLarge)
178 }
179 }
180
181
182 func stacklog2(n uintptr) int {
183 log2 := 0
184 for n > 1 {
185 n >>= 1
186 log2++
187 }
188 return log2
189 }
190
191
192
193 func stackpoolalloc(order uint8) gclinkptr {
194 list := &stackpool[order].item.span
195 s := list.first
196 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
197 if s == nil {
198
199 s = mheap_.allocManual(_StackCacheSize>>_PageShift, spanAllocStack)
200 if s == nil {
201 throw("out of memory")
202 }
203 if s.allocCount != 0 {
204 throw("bad allocCount")
205 }
206 if s.manualFreeList.ptr() != nil {
207 throw("bad manualFreeList")
208 }
209 osStackAlloc(s)
210 s.elemsize = fixedStack << order
211 for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
212 x := gclinkptr(s.base() + i)
213 x.ptr().next = s.manualFreeList
214 s.manualFreeList = x
215 }
216 list.insert(s)
217 }
218 x := s.manualFreeList
219 if x.ptr() == nil {
220 throw("span has no free stacks")
221 }
222 s.manualFreeList = x.ptr().next
223 s.allocCount++
224 if s.manualFreeList.ptr() == nil {
225
226 list.remove(s)
227 }
228 return x
229 }
230
231
232 func stackpoolfree(x gclinkptr, order uint8) {
233 s := spanOfUnchecked(uintptr(x))
234 if s.state.get() != mSpanManual {
235 throw("freeing stack not in a stack span")
236 }
237 if s.manualFreeList.ptr() == nil {
238
239 stackpool[order].item.span.insert(s)
240 }
241 x.ptr().next = s.manualFreeList
242 s.manualFreeList = x
243 s.allocCount--
244 if gcphase == _GCoff && s.allocCount == 0 {
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260 stackpool[order].item.span.remove(s)
261 s.manualFreeList = 0
262 osStackFree(s)
263 mheap_.freeManual(s, spanAllocStack)
264 }
265 }
266
267
268
269
270
271 func stackcacherefill(c *mcache, order uint8) {
272 if stackDebug >= 1 {
273 print("stackcacherefill order=", order, "\n")
274 }
275
276
277
278 var list gclinkptr
279 var size uintptr
280 lock(&stackpool[order].item.mu)
281 for size < _StackCacheSize/2 {
282 x := stackpoolalloc(order)
283 x.ptr().next = list
284 list = x
285 size += fixedStack << order
286 }
287 unlock(&stackpool[order].item.mu)
288 c.stackcache[order].list = list
289 c.stackcache[order].size = size
290 }
291
292
293 func stackcacherelease(c *mcache, order uint8) {
294 if stackDebug >= 1 {
295 print("stackcacherelease order=", order, "\n")
296 }
297 x := c.stackcache[order].list
298 size := c.stackcache[order].size
299 lock(&stackpool[order].item.mu)
300 for size > _StackCacheSize/2 {
301 y := x.ptr().next
302 stackpoolfree(x, order)
303 x = y
304 size -= fixedStack << order
305 }
306 unlock(&stackpool[order].item.mu)
307 c.stackcache[order].list = x
308 c.stackcache[order].size = size
309 }
310
311
312 func stackcache_clear(c *mcache) {
313 if stackDebug >= 1 {
314 print("stackcache clear\n")
315 }
316 for order := uint8(0); order < _NumStackOrders; order++ {
317 lock(&stackpool[order].item.mu)
318 x := c.stackcache[order].list
319 for x.ptr() != nil {
320 y := x.ptr().next
321 stackpoolfree(x, order)
322 x = y
323 }
324 c.stackcache[order].list = 0
325 c.stackcache[order].size = 0
326 unlock(&stackpool[order].item.mu)
327 }
328 }
329
330
331
332
333
334
335
336 func stackalloc(n uint32) stack {
337
338
339
340 thisg := getg()
341 if thisg != thisg.m.g0 {
342 throw("stackalloc not on scheduler stack")
343 }
344 if n&(n-1) != 0 {
345 throw("stack size not a power of 2")
346 }
347 if stackDebug >= 1 {
348 print("stackalloc ", n, "\n")
349 }
350
351 if debug.efence != 0 || stackFromSystem != 0 {
352 n = uint32(alignUp(uintptr(n), physPageSize))
353 v := sysAlloc(uintptr(n), &memstats.stacks_sys)
354 if v == nil {
355 throw("out of memory (stackalloc)")
356 }
357 return stack{uintptr(v), uintptr(v) + uintptr(n)}
358 }
359
360
361
362
363 var v unsafe.Pointer
364 if n < fixedStack<<_NumStackOrders && n < _StackCacheSize {
365 order := uint8(0)
366 n2 := n
367 for n2 > fixedStack {
368 order++
369 n2 >>= 1
370 }
371 var x gclinkptr
372 if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" {
373
374
375
376
377 lock(&stackpool[order].item.mu)
378 x = stackpoolalloc(order)
379 unlock(&stackpool[order].item.mu)
380 } else {
381 c := thisg.m.p.ptr().mcache
382 x = c.stackcache[order].list
383 if x.ptr() == nil {
384 stackcacherefill(c, order)
385 x = c.stackcache[order].list
386 }
387 c.stackcache[order].list = x.ptr().next
388 c.stackcache[order].size -= uintptr(n)
389 }
390 v = unsafe.Pointer(x)
391 } else {
392 var s *mspan
393 npage := uintptr(n) >> _PageShift
394 log2npage := stacklog2(npage)
395
396
397 lock(&stackLarge.lock)
398 if !stackLarge.free[log2npage].isEmpty() {
399 s = stackLarge.free[log2npage].first
400 stackLarge.free[log2npage].remove(s)
401 }
402 unlock(&stackLarge.lock)
403
404 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
405
406 if s == nil {
407
408 s = mheap_.allocManual(npage, spanAllocStack)
409 if s == nil {
410 throw("out of memory")
411 }
412 osStackAlloc(s)
413 s.elemsize = uintptr(n)
414 }
415 v = unsafe.Pointer(s.base())
416 }
417
418 if traceAllocFreeEnabled() {
419 trace := traceAcquire()
420 if trace.ok() {
421 trace.GoroutineStackAlloc(uintptr(v), uintptr(n))
422 traceRelease(trace)
423 }
424 }
425 if raceenabled {
426 racemalloc(v, uintptr(n))
427 }
428 if msanenabled {
429 msanmalloc(v, uintptr(n))
430 }
431 if asanenabled {
432 asanunpoison(v, uintptr(n))
433 }
434 if stackDebug >= 1 {
435 print(" allocated ", v, "\n")
436 }
437 return stack{uintptr(v), uintptr(v) + uintptr(n)}
438 }
439
440
441
442
443
444
445
446 func stackfree(stk stack) {
447 gp := getg()
448 v := unsafe.Pointer(stk.lo)
449 n := stk.hi - stk.lo
450 if n&(n-1) != 0 {
451 throw("stack not a power of 2")
452 }
453 if stk.lo+n < stk.hi {
454 throw("bad stack size")
455 }
456 if stackDebug >= 1 {
457 println("stackfree", v, n)
458 memclrNoHeapPointers(v, n)
459 }
460 if debug.efence != 0 || stackFromSystem != 0 {
461 if debug.efence != 0 || stackFaultOnFree != 0 {
462 sysFault(v, n)
463 } else {
464 sysFree(v, n, &memstats.stacks_sys)
465 }
466 return
467 }
468 if traceAllocFreeEnabled() {
469 trace := traceAcquire()
470 if trace.ok() {
471 trace.GoroutineStackFree(uintptr(v))
472 traceRelease(trace)
473 }
474 }
475 if msanenabled {
476 msanfree(v, n)
477 }
478 if asanenabled {
479 asanpoison(v, n)
480 }
481 if n < fixedStack<<_NumStackOrders && n < _StackCacheSize {
482 order := uint8(0)
483 n2 := n
484 for n2 > fixedStack {
485 order++
486 n2 >>= 1
487 }
488 x := gclinkptr(v)
489 if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" {
490 lock(&stackpool[order].item.mu)
491 stackpoolfree(x, order)
492 unlock(&stackpool[order].item.mu)
493 } else {
494 c := gp.m.p.ptr().mcache
495 if c.stackcache[order].size >= _StackCacheSize {
496 stackcacherelease(c, order)
497 }
498 x.ptr().next = c.stackcache[order].list
499 c.stackcache[order].list = x
500 c.stackcache[order].size += n
501 }
502 } else {
503 s := spanOfUnchecked(uintptr(v))
504 if s.state.get() != mSpanManual {
505 println(hex(s.base()), v)
506 throw("bad span state")
507 }
508 if gcphase == _GCoff {
509
510
511 osStackFree(s)
512 mheap_.freeManual(s, spanAllocStack)
513 } else {
514
515
516
517
518
519 log2npage := stacklog2(s.npages)
520 lock(&stackLarge.lock)
521 stackLarge.free[log2npage].insert(s)
522 unlock(&stackLarge.lock)
523 }
524 }
525 }
526
527 var maxstacksize uintptr = 1 << 20
528
529 var maxstackceiling = maxstacksize
530
531 var ptrnames = []string{
532 0: "scalar",
533 1: "ptr",
534 }
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569 type adjustinfo struct {
570 old stack
571 delta uintptr
572
573
574 sghi uintptr
575 }
576
577
578
579 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
580 pp := (*uintptr)(vpp)
581 p := *pp
582 if stackDebug >= 4 {
583 print(" ", pp, ":", hex(p), "\n")
584 }
585 if adjinfo.old.lo <= p && p < adjinfo.old.hi {
586 *pp = p + adjinfo.delta
587 if stackDebug >= 3 {
588 print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
589 }
590 }
591 }
592
593
594
595 type bitvector struct {
596 n int32
597 bytedata *uint8
598 }
599
600
601
602
603
604 func (bv *bitvector) ptrbit(i uintptr) uint8 {
605 b := *(addb(bv.bytedata, i/8))
606 return (b >> (i % 8)) & 1
607 }
608
609
610
611 func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) {
612 minp := adjinfo.old.lo
613 maxp := adjinfo.old.hi
614 delta := adjinfo.delta
615 num := uintptr(bv.n)
616
617
618
619
620
621 useCAS := uintptr(scanp) < adjinfo.sghi
622 for i := uintptr(0); i < num; i += 8 {
623 if stackDebug >= 4 {
624 for j := uintptr(0); j < 8; j++ {
625 print(" ", add(scanp, (i+j)*goarch.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*goarch.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
626 }
627 }
628 b := *(addb(bv.bytedata, i/8))
629 for b != 0 {
630 j := uintptr(sys.TrailingZeros8(b))
631 b &= b - 1
632 pp := (*uintptr)(add(scanp, (i+j)*goarch.PtrSize))
633 retry:
634 p := *pp
635 if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
636
637
638 getg().m.traceback = 2
639 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
640 throw("invalid pointer found on stack")
641 }
642 if minp <= p && p < maxp {
643 if stackDebug >= 3 {
644 print("adjust ptr ", hex(p), " ", funcname(f), "\n")
645 }
646 if useCAS {
647 ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
648 if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
649 goto retry
650 }
651 } else {
652 *pp = p + delta
653 }
654 }
655 }
656 }
657 }
658
659
660 func adjustframe(frame *stkframe, adjinfo *adjustinfo) {
661 if frame.continpc == 0 {
662
663 return
664 }
665 f := frame.fn
666 if stackDebug >= 2 {
667 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
668 }
669
670
671 if (goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.ARM64) && frame.argp-frame.varp == 2*goarch.PtrSize {
672 if stackDebug >= 3 {
673 print(" saved bp\n")
674 }
675 if debugCheckBP {
676
677
678 bp := *(*uintptr)(unsafe.Pointer(frame.varp))
679 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
680 println("runtime: found invalid frame pointer")
681 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
682 throw("bad frame pointer")
683 }
684 }
685
686
687
688
689 adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
690 }
691
692 locals, args, objs := frame.getStackMap(true)
693
694
695 if locals.n > 0 {
696 size := uintptr(locals.n) * goarch.PtrSize
697 adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f)
698 }
699
700
701 if args.n > 0 {
702 if stackDebug >= 3 {
703 print(" args\n")
704 }
705 adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{})
706 }
707
708
709
710 if frame.varp != 0 {
711 for i := range objs {
712 obj := &objs[i]
713 off := obj.off
714 base := frame.varp
715 if off >= 0 {
716 base = frame.argp
717 }
718 p := base + uintptr(off)
719 if p < frame.sp {
720
721
722
723 continue
724 }
725 ptrBytes, gcData := obj.gcdata()
726 for i := uintptr(0); i < ptrBytes; i += goarch.PtrSize {
727 if *addb(gcData, i/(8*goarch.PtrSize))>>(i/goarch.PtrSize&7)&1 != 0 {
728 adjustpointer(adjinfo, unsafe.Pointer(p+i))
729 }
730 }
731 }
732 }
733 }
734
735 func adjustctxt(gp *g, adjinfo *adjustinfo) {
736 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
737 if !framepointer_enabled {
738 return
739 }
740 if debugCheckBP {
741 bp := gp.sched.bp
742 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
743 println("runtime: found invalid top frame pointer")
744 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
745 throw("bad top frame pointer")
746 }
747 }
748 oldfp := gp.sched.bp
749 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
750 if GOARCH == "arm64" {
751
752
753
754 if oldfp == gp.sched.sp-goarch.PtrSize {
755 memmove(unsafe.Pointer(gp.sched.bp), unsafe.Pointer(oldfp), goarch.PtrSize)
756 adjustpointer(adjinfo, unsafe.Pointer(gp.sched.bp))
757 }
758 }
759 }
760
761 func adjustdefers(gp *g, adjinfo *adjustinfo) {
762
763
764
765 adjustpointer(adjinfo, unsafe.Pointer(&gp._defer))
766 for d := gp._defer; d != nil; d = d.link {
767 adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
768 adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
769 adjustpointer(adjinfo, unsafe.Pointer(&d.link))
770 }
771 }
772
773 func adjustpanics(gp *g, adjinfo *adjustinfo) {
774
775
776 adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
777 }
778
779 func adjustsudogs(gp *g, adjinfo *adjustinfo) {
780
781
782 for s := gp.waiting; s != nil; s = s.waitlink {
783 adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
784 }
785 }
786
787 func fillstack(stk stack, b byte) {
788 for p := stk.lo; p < stk.hi; p++ {
789 *(*byte)(unsafe.Pointer(p)) = b
790 }
791 }
792
793 func findsghi(gp *g, stk stack) uintptr {
794 var sghi uintptr
795 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
796 p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
797 if stk.lo <= p && p < stk.hi && p > sghi {
798 sghi = p
799 }
800 }
801 return sghi
802 }
803
804
805
806
807 func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
808 if gp.waiting == nil {
809 return 0
810 }
811
812
813 var lastc *hchan
814 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
815 if sg.c != lastc {
816
817
818
819
820
821
822
823
824
825 lockWithRank(&sg.c.lock, lockRankHchanLeaf)
826 }
827 lastc = sg.c
828 }
829
830
831 adjustsudogs(gp, adjinfo)
832
833
834
835
836 var sgsize uintptr
837 if adjinfo.sghi != 0 {
838 oldBot := adjinfo.old.hi - used
839 newBot := oldBot + adjinfo.delta
840 sgsize = adjinfo.sghi - oldBot
841 memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
842 }
843
844
845 lastc = nil
846 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
847 if sg.c != lastc {
848 unlock(&sg.c.lock)
849 }
850 lastc = sg.c
851 }
852
853 return sgsize
854 }
855
856
857
858 func copystack(gp *g, newsize uintptr) {
859 if gp.syscallsp != 0 {
860 throw("stack growth not allowed in system call")
861 }
862 old := gp.stack
863 if old.lo == 0 {
864 throw("nil stackbase")
865 }
866 used := old.hi - gp.sched.sp
867
868
869
870
871 gcController.addScannableStack(getg().m.p.ptr(), int64(newsize)-int64(old.hi-old.lo))
872
873
874 new := stackalloc(uint32(newsize))
875 if stackPoisonCopy != 0 {
876 fillstack(new, 0xfd)
877 }
878 if stackDebug >= 1 {
879 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
880 }
881
882
883 var adjinfo adjustinfo
884 adjinfo.old = old
885 adjinfo.delta = new.hi - old.hi
886
887
888 ncopy := used
889 if !gp.activeStackChans {
890 if newsize < old.hi-old.lo && gp.parkingOnChan.Load() {
891
892
893
894
895 throw("racy sudog adjustment due to parking on channel")
896 }
897 adjustsudogs(gp, &adjinfo)
898 } else {
899
900
901
902
903
904
905
906 adjinfo.sghi = findsghi(gp, old)
907
908
909
910 ncopy -= syncadjustsudogs(gp, used, &adjinfo)
911 }
912
913
914 memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
915
916
917
918
919 adjustctxt(gp, &adjinfo)
920 adjustdefers(gp, &adjinfo)
921 adjustpanics(gp, &adjinfo)
922 if adjinfo.sghi != 0 {
923 adjinfo.sghi += adjinfo.delta
924 }
925
926
927 gp.stack = new
928 gp.stackguard0 = new.lo + stackGuard
929 gp.sched.sp = new.hi - used
930 gp.stktopsp += adjinfo.delta
931
932
933 var u unwinder
934 for u.init(gp, 0); u.valid(); u.next() {
935 adjustframe(&u.frame, &adjinfo)
936 }
937
938
939 if stackPoisonCopy != 0 {
940 fillstack(old, 0xfc)
941 }
942 stackfree(old)
943 }
944
945
946 func round2(x int32) int32 {
947 s := uint(0)
948 for 1<<s < x {
949 s++
950 }
951 return 1 << s
952 }
953
954
955
956
957
958
959
960
961
962
963
964
965
966 func newstack() {
967 thisg := getg()
968
969 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
970 throw("stack growth after fork")
971 }
972 if thisg.m.morebuf.g.ptr() != thisg.m.curg {
973 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
974 morebuf := thisg.m.morebuf
975 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
976 throw("runtime: wrong goroutine in newstack")
977 }
978
979 gp := thisg.m.curg
980
981 if thisg.m.curg.throwsplit {
982
983 morebuf := thisg.m.morebuf
984 gp.syscallsp = morebuf.sp
985 gp.syscallpc = morebuf.pc
986 pcname, pcoff := "(unknown)", uintptr(0)
987 f := findfunc(gp.sched.pc)
988 if f.valid() {
989 pcname = funcname(f)
990 pcoff = gp.sched.pc - f.entry()
991 }
992 print("runtime: newstack at ", pcname, "+", hex(pcoff),
993 " sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
994 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
995 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
996
997 thisg.m.traceback = 2
998 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
999 throw("runtime: stack split at bad time")
1000 }
1001
1002 morebuf := thisg.m.morebuf
1003 thisg.m.morebuf.pc = 0
1004 thisg.m.morebuf.lr = 0
1005 thisg.m.morebuf.sp = 0
1006 thisg.m.morebuf.g = 0
1007
1008
1009
1010
1011 stackguard0 := atomic.Loaduintptr(&gp.stackguard0)
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025 preempt := stackguard0 == stackPreempt
1026 if preempt {
1027 if !canPreemptM(thisg.m) {
1028
1029
1030 gp.stackguard0 = gp.stack.lo + stackGuard
1031 gogo(&gp.sched)
1032 }
1033 }
1034
1035 if gp.stack.lo == 0 {
1036 throw("missing stack in newstack")
1037 }
1038 sp := gp.sched.sp
1039 if goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.I386 || goarch.ArchFamily == goarch.WASM {
1040
1041 sp -= goarch.PtrSize
1042 }
1043 if stackDebug >= 1 || sp < gp.stack.lo {
1044 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
1045 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
1046 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
1047 }
1048 if sp < gp.stack.lo {
1049 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ")
1050 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
1051 throw("runtime: split stack overflow")
1052 }
1053
1054 if preempt {
1055 if gp == thisg.m.g0 {
1056 throw("runtime: preempt g0")
1057 }
1058 if thisg.m.p == 0 && thisg.m.locks == 0 {
1059 throw("runtime: g is running but p is not")
1060 }
1061
1062 if gp.preemptShrink {
1063
1064
1065 gp.preemptShrink = false
1066 shrinkstack(gp)
1067 }
1068
1069 if gp.preemptStop {
1070 preemptPark(gp)
1071 }
1072
1073
1074 gopreempt_m(gp)
1075 }
1076
1077
1078 oldsize := gp.stack.hi - gp.stack.lo
1079 newsize := oldsize * 2
1080
1081
1082
1083
1084 if f := findfunc(gp.sched.pc); f.valid() {
1085 max := uintptr(funcMaxSPDelta(f))
1086 needed := max + stackGuard
1087 used := gp.stack.hi - gp.sched.sp
1088 for newsize-used < needed {
1089 newsize *= 2
1090 }
1091 }
1092
1093 if stackguard0 == stackForceMove {
1094
1095
1096
1097 newsize = oldsize
1098 }
1099
1100 if newsize > maxstacksize || newsize > maxstackceiling {
1101 if maxstacksize < maxstackceiling {
1102 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
1103 } else {
1104 print("runtime: goroutine stack exceeds ", maxstackceiling, "-byte limit\n")
1105 }
1106 print("runtime: sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
1107 throw("stack overflow")
1108 }
1109
1110
1111
1112 casgstatus(gp, _Grunning, _Gcopystack)
1113
1114
1115
1116 copystack(gp, newsize)
1117 if stackDebug >= 1 {
1118 print("stack grow done\n")
1119 }
1120 casgstatus(gp, _Gcopystack, _Grunning)
1121 gogo(&gp.sched)
1122 }
1123
1124
1125 func nilfunc() {
1126 *(*uint8)(nil) = 0
1127 }
1128
1129
1130
1131 func gostartcallfn(gobuf *gobuf, fv *funcval) {
1132 var fn unsafe.Pointer
1133 if fv != nil {
1134 fn = unsafe.Pointer(fv.fn)
1135 } else {
1136 fn = unsafe.Pointer(abi.FuncPCABIInternal(nilfunc))
1137 }
1138 gostartcall(gobuf, fn, unsafe.Pointer(fv))
1139 }
1140
1141
1142
1143
1144
1145 func isShrinkStackSafe(gp *g) bool {
1146
1147
1148
1149
1150 if gp.syscallsp != 0 {
1151 return false
1152 }
1153
1154
1155
1156 if gp.asyncSafePoint {
1157 return false
1158 }
1159
1160
1161
1162 if gp.parkingOnChan.Load() {
1163 return false
1164 }
1165
1166
1167
1168
1169
1170
1171
1172
1173 if traceEnabled() && readgstatus(gp)&^_Gscan == _Gwaiting && gp.waitreason.isWaitingForGC() {
1174 return false
1175 }
1176 return true
1177 }
1178
1179
1180
1181
1182
1183 func shrinkstack(gp *g) {
1184 if gp.stack.lo == 0 {
1185 throw("missing stack in shrinkstack")
1186 }
1187 if s := readgstatus(gp); s&_Gscan == 0 {
1188
1189
1190
1191 if !(gp == getg().m.curg && getg() != getg().m.curg && s == _Grunning) {
1192
1193 throw("bad status in shrinkstack")
1194 }
1195 }
1196 if !isShrinkStackSafe(gp) {
1197 throw("shrinkstack at bad time")
1198 }
1199
1200
1201
1202 if gp == getg().m.curg && gp.m.libcallsp != 0 {
1203 throw("shrinking stack in libcall")
1204 }
1205
1206 if debug.gcshrinkstackoff > 0 {
1207 return
1208 }
1209 f := findfunc(gp.startpc)
1210 if f.valid() && f.funcID == abi.FuncID_gcBgMarkWorker {
1211
1212
1213 return
1214 }
1215
1216 oldsize := gp.stack.hi - gp.stack.lo
1217 newsize := oldsize / 2
1218
1219
1220 if newsize < fixedStack {
1221 return
1222 }
1223
1224
1225
1226
1227
1228 avail := gp.stack.hi - gp.stack.lo
1229 if used := gp.stack.hi - gp.sched.sp + stackNosplit; used >= avail/4 {
1230 return
1231 }
1232
1233 if stackDebug > 0 {
1234 print("shrinking stack ", oldsize, "->", newsize, "\n")
1235 }
1236
1237 copystack(gp, newsize)
1238 }
1239
1240
1241 func freeStackSpans() {
1242
1243 for order := range stackpool {
1244 lock(&stackpool[order].item.mu)
1245 list := &stackpool[order].item.span
1246 for s := list.first; s != nil; {
1247 next := s.next
1248 if s.allocCount == 0 {
1249 list.remove(s)
1250 s.manualFreeList = 0
1251 osStackFree(s)
1252 mheap_.freeManual(s, spanAllocStack)
1253 }
1254 s = next
1255 }
1256 unlock(&stackpool[order].item.mu)
1257 }
1258
1259
1260 lock(&stackLarge.lock)
1261 for i := range stackLarge.free {
1262 for s := stackLarge.free[i].first; s != nil; {
1263 next := s.next
1264 stackLarge.free[i].remove(s)
1265 osStackFree(s)
1266 mheap_.freeManual(s, spanAllocStack)
1267 s = next
1268 }
1269 }
1270 unlock(&stackLarge.lock)
1271 }
1272
1273
1274
1275 type stackObjectRecord struct {
1276
1277
1278
1279 off int32
1280 size int32
1281 ptrBytes int32
1282 gcdataoff uint32
1283 }
1284
1285
1286
1287
1288 func (r *stackObjectRecord) gcdata() (uintptr, *byte) {
1289 ptr := uintptr(unsafe.Pointer(r))
1290 var mod *moduledata
1291 for datap := &firstmoduledata; datap != nil; datap = datap.next {
1292 if datap.gofunc <= ptr && ptr < datap.end {
1293 mod = datap
1294 break
1295 }
1296 }
1297
1298
1299
1300 res := mod.rodata + uintptr(r.gcdataoff)
1301 return uintptr(r.ptrBytes), (*byte)(unsafe.Pointer(res))
1302 }
1303
1304
1305
1306
1307
1308 func morestackc() {
1309 throw("attempt to execute system stack code on user stack")
1310 }
1311
1312
1313
1314
1315
1316 var startingStackSize uint32 = fixedStack
1317
1318 func gcComputeStartingStackSize() {
1319 if debug.adaptivestackstart == 0 {
1320 return
1321 }
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332 var scannedStackSize uint64
1333 var scannedStacks uint64
1334 for _, p := range allp {
1335 scannedStackSize += p.scannedStackSize
1336 scannedStacks += p.scannedStacks
1337
1338 p.scannedStackSize = 0
1339 p.scannedStacks = 0
1340 }
1341 if scannedStacks == 0 {
1342 startingStackSize = fixedStack
1343 return
1344 }
1345 avg := scannedStackSize/scannedStacks + stackGuard
1346
1347
1348 if avg > uint64(maxstacksize) {
1349 avg = uint64(maxstacksize)
1350 }
1351 if avg < fixedStack {
1352 avg = fixedStack
1353 }
1354
1355 startingStackSize = uint32(round2(int32(avg)))
1356 }
1357
View as plain text