Source file
src/runtime/stack.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goos"
12 "runtime/internal/atomic"
13 "runtime/internal/sys"
14 "unsafe"
15 )
16
17
66
67 const (
68
69
70
71
72 stackSystem = goos.IsWindows*512*goarch.PtrSize + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024
73
74
75 stackMin = 2048
76
77
78
79 fixedStack0 = stackMin + stackSystem
80 fixedStack1 = fixedStack0 - 1
81 fixedStack2 = fixedStack1 | (fixedStack1 >> 1)
82 fixedStack3 = fixedStack2 | (fixedStack2 >> 2)
83 fixedStack4 = fixedStack3 | (fixedStack3 >> 4)
84 fixedStack5 = fixedStack4 | (fixedStack4 >> 8)
85 fixedStack6 = fixedStack5 | (fixedStack5 >> 16)
86 fixedStack = fixedStack6 + 1
87
88
89
90
91 stackNosplit = abi.StackNosplitBase * sys.StackGuardMultiplier
92
93
94
95
96
97
98
99 stackGuard = stackNosplit + stackSystem + abi.StackSmall
100 )
101
102 const (
103
104
105
106
107
108 stackDebug = 0
109 stackFromSystem = 0
110 stackFaultOnFree = 0
111 stackNoCache = 0
112
113
114 debugCheckBP = false
115 )
116
117 var (
118 stackPoisonCopy = 0
119 )
120
121 const (
122 uintptrMask = 1<<(8*goarch.PtrSize) - 1
123
124
125
126
127
128
129
130 stackPreempt = uintptrMask & -1314
131
132
133
134 stackFork = uintptrMask & -1234
135
136
137
138 stackForceMove = uintptrMask & -275
139
140
141 stackPoisonMin = uintptrMask & -4096
142 )
143
144
145
146
147
148
149
150 var stackpool [_NumStackOrders]struct {
151 item stackpoolItem
152 _ [(cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize) % cpu.CacheLinePadSize]byte
153 }
154
155 type stackpoolItem struct {
156 _ sys.NotInHeap
157 mu mutex
158 span mSpanList
159 }
160
161
162 var stackLarge struct {
163 lock mutex
164 free [heapAddrBits - pageShift]mSpanList
165 }
166
167 func stackinit() {
168 if _StackCacheSize&_PageMask != 0 {
169 throw("cache size must be a multiple of page size")
170 }
171 for i := range stackpool {
172 stackpool[i].item.span.init()
173 lockInit(&stackpool[i].item.mu, lockRankStackpool)
174 }
175 for i := range stackLarge.free {
176 stackLarge.free[i].init()
177 lockInit(&stackLarge.lock, lockRankStackLarge)
178 }
179 }
180
181
182 func stacklog2(n uintptr) int {
183 log2 := 0
184 for n > 1 {
185 n >>= 1
186 log2++
187 }
188 return log2
189 }
190
191
192
193 func stackpoolalloc(order uint8) gclinkptr {
194 list := &stackpool[order].item.span
195 s := list.first
196 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
197 if s == nil {
198
199 s = mheap_.allocManual(_StackCacheSize>>_PageShift, spanAllocStack)
200 if s == nil {
201 throw("out of memory")
202 }
203 if s.allocCount != 0 {
204 throw("bad allocCount")
205 }
206 if s.manualFreeList.ptr() != nil {
207 throw("bad manualFreeList")
208 }
209 osStackAlloc(s)
210 s.elemsize = fixedStack << order
211 for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
212 x := gclinkptr(s.base() + i)
213 x.ptr().next = s.manualFreeList
214 s.manualFreeList = x
215 }
216 list.insert(s)
217 }
218 x := s.manualFreeList
219 if x.ptr() == nil {
220 throw("span has no free stacks")
221 }
222 s.manualFreeList = x.ptr().next
223 s.allocCount++
224 if s.manualFreeList.ptr() == nil {
225
226 list.remove(s)
227 }
228 return x
229 }
230
231
232 func stackpoolfree(x gclinkptr, order uint8) {
233 s := spanOfUnchecked(uintptr(x))
234 if s.state.get() != mSpanManual {
235 throw("freeing stack not in a stack span")
236 }
237 if s.manualFreeList.ptr() == nil {
238
239 stackpool[order].item.span.insert(s)
240 }
241 x.ptr().next = s.manualFreeList
242 s.manualFreeList = x
243 s.allocCount--
244 if gcphase == _GCoff && s.allocCount == 0 {
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260 stackpool[order].item.span.remove(s)
261 s.manualFreeList = 0
262 osStackFree(s)
263 mheap_.freeManual(s, spanAllocStack)
264 }
265 }
266
267
268
269
270
271 func stackcacherefill(c *mcache, order uint8) {
272 if stackDebug >= 1 {
273 print("stackcacherefill order=", order, "\n")
274 }
275
276
277
278 var list gclinkptr
279 var size uintptr
280 lock(&stackpool[order].item.mu)
281 for size < _StackCacheSize/2 {
282 x := stackpoolalloc(order)
283 x.ptr().next = list
284 list = x
285 size += fixedStack << order
286 }
287 unlock(&stackpool[order].item.mu)
288 c.stackcache[order].list = list
289 c.stackcache[order].size = size
290 }
291
292
293 func stackcacherelease(c *mcache, order uint8) {
294 if stackDebug >= 1 {
295 print("stackcacherelease order=", order, "\n")
296 }
297 x := c.stackcache[order].list
298 size := c.stackcache[order].size
299 lock(&stackpool[order].item.mu)
300 for size > _StackCacheSize/2 {
301 y := x.ptr().next
302 stackpoolfree(x, order)
303 x = y
304 size -= fixedStack << order
305 }
306 unlock(&stackpool[order].item.mu)
307 c.stackcache[order].list = x
308 c.stackcache[order].size = size
309 }
310
311
312 func stackcache_clear(c *mcache) {
313 if stackDebug >= 1 {
314 print("stackcache clear\n")
315 }
316 for order := uint8(0); order < _NumStackOrders; order++ {
317 lock(&stackpool[order].item.mu)
318 x := c.stackcache[order].list
319 for x.ptr() != nil {
320 y := x.ptr().next
321 stackpoolfree(x, order)
322 x = y
323 }
324 c.stackcache[order].list = 0
325 c.stackcache[order].size = 0
326 unlock(&stackpool[order].item.mu)
327 }
328 }
329
330
331
332
333
334
335
336 func stackalloc(n uint32) stack {
337
338
339
340 thisg := getg()
341 if thisg != thisg.m.g0 {
342 throw("stackalloc not on scheduler stack")
343 }
344 if n&(n-1) != 0 {
345 throw("stack size not a power of 2")
346 }
347 if stackDebug >= 1 {
348 print("stackalloc ", n, "\n")
349 }
350
351 if debug.efence != 0 || stackFromSystem != 0 {
352 n = uint32(alignUp(uintptr(n), physPageSize))
353 v := sysAlloc(uintptr(n), &memstats.stacks_sys)
354 if v == nil {
355 throw("out of memory (stackalloc)")
356 }
357 return stack{uintptr(v), uintptr(v) + uintptr(n)}
358 }
359
360
361
362
363 var v unsafe.Pointer
364 if n < fixedStack<<_NumStackOrders && n < _StackCacheSize {
365 order := uint8(0)
366 n2 := n
367 for n2 > fixedStack {
368 order++
369 n2 >>= 1
370 }
371 var x gclinkptr
372 if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" {
373
374
375
376
377 lock(&stackpool[order].item.mu)
378 x = stackpoolalloc(order)
379 unlock(&stackpool[order].item.mu)
380 } else {
381 c := thisg.m.p.ptr().mcache
382 x = c.stackcache[order].list
383 if x.ptr() == nil {
384 stackcacherefill(c, order)
385 x = c.stackcache[order].list
386 }
387 c.stackcache[order].list = x.ptr().next
388 c.stackcache[order].size -= uintptr(n)
389 }
390 v = unsafe.Pointer(x)
391 } else {
392 var s *mspan
393 npage := uintptr(n) >> _PageShift
394 log2npage := stacklog2(npage)
395
396
397 lock(&stackLarge.lock)
398 if !stackLarge.free[log2npage].isEmpty() {
399 s = stackLarge.free[log2npage].first
400 stackLarge.free[log2npage].remove(s)
401 }
402 unlock(&stackLarge.lock)
403
404 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
405
406 if s == nil {
407
408 s = mheap_.allocManual(npage, spanAllocStack)
409 if s == nil {
410 throw("out of memory")
411 }
412 osStackAlloc(s)
413 s.elemsize = uintptr(n)
414 }
415 v = unsafe.Pointer(s.base())
416 }
417
418 if raceenabled {
419 racemalloc(v, uintptr(n))
420 }
421 if msanenabled {
422 msanmalloc(v, uintptr(n))
423 }
424 if asanenabled {
425 asanunpoison(v, uintptr(n))
426 }
427 if stackDebug >= 1 {
428 print(" allocated ", v, "\n")
429 }
430 return stack{uintptr(v), uintptr(v) + uintptr(n)}
431 }
432
433
434
435
436
437
438
439 func stackfree(stk stack) {
440 gp := getg()
441 v := unsafe.Pointer(stk.lo)
442 n := stk.hi - stk.lo
443 if n&(n-1) != 0 {
444 throw("stack not a power of 2")
445 }
446 if stk.lo+n < stk.hi {
447 throw("bad stack size")
448 }
449 if stackDebug >= 1 {
450 println("stackfree", v, n)
451 memclrNoHeapPointers(v, n)
452 }
453 if debug.efence != 0 || stackFromSystem != 0 {
454 if debug.efence != 0 || stackFaultOnFree != 0 {
455 sysFault(v, n)
456 } else {
457 sysFree(v, n, &memstats.stacks_sys)
458 }
459 return
460 }
461 if msanenabled {
462 msanfree(v, n)
463 }
464 if asanenabled {
465 asanpoison(v, n)
466 }
467 if n < fixedStack<<_NumStackOrders && n < _StackCacheSize {
468 order := uint8(0)
469 n2 := n
470 for n2 > fixedStack {
471 order++
472 n2 >>= 1
473 }
474 x := gclinkptr(v)
475 if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" {
476 lock(&stackpool[order].item.mu)
477 stackpoolfree(x, order)
478 unlock(&stackpool[order].item.mu)
479 } else {
480 c := gp.m.p.ptr().mcache
481 if c.stackcache[order].size >= _StackCacheSize {
482 stackcacherelease(c, order)
483 }
484 x.ptr().next = c.stackcache[order].list
485 c.stackcache[order].list = x
486 c.stackcache[order].size += n
487 }
488 } else {
489 s := spanOfUnchecked(uintptr(v))
490 if s.state.get() != mSpanManual {
491 println(hex(s.base()), v)
492 throw("bad span state")
493 }
494 if gcphase == _GCoff {
495
496
497 osStackFree(s)
498 mheap_.freeManual(s, spanAllocStack)
499 } else {
500
501
502
503
504
505 log2npage := stacklog2(s.npages)
506 lock(&stackLarge.lock)
507 stackLarge.free[log2npage].insert(s)
508 unlock(&stackLarge.lock)
509 }
510 }
511 }
512
513 var maxstacksize uintptr = 1 << 20
514
515 var maxstackceiling = maxstacksize
516
517 var ptrnames = []string{
518 0: "scalar",
519 1: "ptr",
520 }
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555 type adjustinfo struct {
556 old stack
557 delta uintptr
558 cache pcvalueCache
559
560
561 sghi uintptr
562 }
563
564
565
566 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
567 pp := (*uintptr)(vpp)
568 p := *pp
569 if stackDebug >= 4 {
570 print(" ", pp, ":", hex(p), "\n")
571 }
572 if adjinfo.old.lo <= p && p < adjinfo.old.hi {
573 *pp = p + adjinfo.delta
574 if stackDebug >= 3 {
575 print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
576 }
577 }
578 }
579
580
581
582 type bitvector struct {
583 n int32
584 bytedata *uint8
585 }
586
587
588
589
590
591 func (bv *bitvector) ptrbit(i uintptr) uint8 {
592 b := *(addb(bv.bytedata, i/8))
593 return (b >> (i % 8)) & 1
594 }
595
596
597
598 func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) {
599 minp := adjinfo.old.lo
600 maxp := adjinfo.old.hi
601 delta := adjinfo.delta
602 num := uintptr(bv.n)
603
604
605
606
607
608 useCAS := uintptr(scanp) < adjinfo.sghi
609 for i := uintptr(0); i < num; i += 8 {
610 if stackDebug >= 4 {
611 for j := uintptr(0); j < 8; j++ {
612 print(" ", add(scanp, (i+j)*goarch.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*goarch.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
613 }
614 }
615 b := *(addb(bv.bytedata, i/8))
616 for b != 0 {
617 j := uintptr(sys.TrailingZeros8(b))
618 b &= b - 1
619 pp := (*uintptr)(add(scanp, (i+j)*goarch.PtrSize))
620 retry:
621 p := *pp
622 if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
623
624
625 getg().m.traceback = 2
626 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
627 throw("invalid pointer found on stack")
628 }
629 if minp <= p && p < maxp {
630 if stackDebug >= 3 {
631 print("adjust ptr ", hex(p), " ", funcname(f), "\n")
632 }
633 if useCAS {
634 ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
635 if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
636 goto retry
637 }
638 } else {
639 *pp = p + delta
640 }
641 }
642 }
643 }
644 }
645
646
647 func adjustframe(frame *stkframe, adjinfo *adjustinfo) {
648 if frame.continpc == 0 {
649
650 return
651 }
652 f := frame.fn
653 if stackDebug >= 2 {
654 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
655 }
656
657
658 if (goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.ARM64) && frame.argp-frame.varp == 2*goarch.PtrSize {
659 if stackDebug >= 3 {
660 print(" saved bp\n")
661 }
662 if debugCheckBP {
663
664
665 bp := *(*uintptr)(unsafe.Pointer(frame.varp))
666 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
667 println("runtime: found invalid frame pointer")
668 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
669 throw("bad frame pointer")
670 }
671 }
672
673
674
675
676 adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
677 }
678
679 locals, args, objs := frame.getStackMap(&adjinfo.cache, true)
680
681
682 if locals.n > 0 {
683 size := uintptr(locals.n) * goarch.PtrSize
684 adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f)
685 }
686
687
688 if args.n > 0 {
689 if stackDebug >= 3 {
690 print(" args\n")
691 }
692 adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{})
693 }
694
695
696
697 if frame.varp != 0 {
698 for i := range objs {
699 obj := &objs[i]
700 off := obj.off
701 base := frame.varp
702 if off >= 0 {
703 base = frame.argp
704 }
705 p := base + uintptr(off)
706 if p < frame.sp {
707
708
709
710 continue
711 }
712 ptrdata := obj.ptrdata()
713 gcdata := obj.gcdata()
714 var s *mspan
715 if obj.useGCProg() {
716
717 s = materializeGCProg(ptrdata, gcdata)
718 gcdata = (*byte)(unsafe.Pointer(s.startAddr))
719 }
720 for i := uintptr(0); i < ptrdata; i += goarch.PtrSize {
721 if *addb(gcdata, i/(8*goarch.PtrSize))>>(i/goarch.PtrSize&7)&1 != 0 {
722 adjustpointer(adjinfo, unsafe.Pointer(p+i))
723 }
724 }
725 if s != nil {
726 dematerializeGCProg(s)
727 }
728 }
729 }
730 }
731
732 func adjustctxt(gp *g, adjinfo *adjustinfo) {
733 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
734 if !framepointer_enabled {
735 return
736 }
737 if debugCheckBP {
738 bp := gp.sched.bp
739 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
740 println("runtime: found invalid top frame pointer")
741 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
742 throw("bad top frame pointer")
743 }
744 }
745 oldfp := gp.sched.bp
746 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
747 if GOARCH == "arm64" {
748
749
750
751 if oldfp == gp.sched.sp-goarch.PtrSize {
752 memmove(unsafe.Pointer(gp.sched.bp), unsafe.Pointer(oldfp), goarch.PtrSize)
753 adjustpointer(adjinfo, unsafe.Pointer(gp.sched.bp))
754 }
755 }
756 }
757
758 func adjustdefers(gp *g, adjinfo *adjustinfo) {
759
760
761
762 adjustpointer(adjinfo, unsafe.Pointer(&gp._defer))
763 for d := gp._defer; d != nil; d = d.link {
764 adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
765 adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
766 adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
767 adjustpointer(adjinfo, unsafe.Pointer(&d.link))
768 adjustpointer(adjinfo, unsafe.Pointer(&d.varp))
769 adjustpointer(adjinfo, unsafe.Pointer(&d.fd))
770 }
771 }
772
773 func adjustpanics(gp *g, adjinfo *adjustinfo) {
774
775
776 adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
777 }
778
779 func adjustsudogs(gp *g, adjinfo *adjustinfo) {
780
781
782 for s := gp.waiting; s != nil; s = s.waitlink {
783 adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
784 }
785 }
786
787 func fillstack(stk stack, b byte) {
788 for p := stk.lo; p < stk.hi; p++ {
789 *(*byte)(unsafe.Pointer(p)) = b
790 }
791 }
792
793 func findsghi(gp *g, stk stack) uintptr {
794 var sghi uintptr
795 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
796 p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
797 if stk.lo <= p && p < stk.hi && p > sghi {
798 sghi = p
799 }
800 }
801 return sghi
802 }
803
804
805
806
807 func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
808 if gp.waiting == nil {
809 return 0
810 }
811
812
813 var lastc *hchan
814 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
815 if sg.c != lastc {
816
817
818
819
820
821
822
823
824
825 lockWithRank(&sg.c.lock, lockRankHchanLeaf)
826 }
827 lastc = sg.c
828 }
829
830
831 adjustsudogs(gp, adjinfo)
832
833
834
835
836 var sgsize uintptr
837 if adjinfo.sghi != 0 {
838 oldBot := adjinfo.old.hi - used
839 newBot := oldBot + adjinfo.delta
840 sgsize = adjinfo.sghi - oldBot
841 memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
842 }
843
844
845 lastc = nil
846 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
847 if sg.c != lastc {
848 unlock(&sg.c.lock)
849 }
850 lastc = sg.c
851 }
852
853 return sgsize
854 }
855
856
857
858 func copystack(gp *g, newsize uintptr) {
859 if gp.syscallsp != 0 {
860 throw("stack growth not allowed in system call")
861 }
862 old := gp.stack
863 if old.lo == 0 {
864 throw("nil stackbase")
865 }
866 used := old.hi - gp.sched.sp
867
868
869
870
871 gcController.addScannableStack(getg().m.p.ptr(), int64(newsize)-int64(old.hi-old.lo))
872
873
874 new := stackalloc(uint32(newsize))
875 if stackPoisonCopy != 0 {
876 fillstack(new, 0xfd)
877 }
878 if stackDebug >= 1 {
879 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
880 }
881
882
883 var adjinfo adjustinfo
884 adjinfo.old = old
885 adjinfo.delta = new.hi - old.hi
886
887
888 ncopy := used
889 if !gp.activeStackChans {
890 if newsize < old.hi-old.lo && gp.parkingOnChan.Load() {
891
892
893
894
895 throw("racy sudog adjustment due to parking on channel")
896 }
897 adjustsudogs(gp, &adjinfo)
898 } else {
899
900
901
902
903
904
905
906 adjinfo.sghi = findsghi(gp, old)
907
908
909
910 ncopy -= syncadjustsudogs(gp, used, &adjinfo)
911 }
912
913
914 memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
915
916
917
918
919 adjustctxt(gp, &adjinfo)
920 adjustdefers(gp, &adjinfo)
921 adjustpanics(gp, &adjinfo)
922 if adjinfo.sghi != 0 {
923 adjinfo.sghi += adjinfo.delta
924 }
925
926
927 gp.stack = new
928 gp.stackguard0 = new.lo + stackGuard
929 gp.sched.sp = new.hi - used
930 gp.stktopsp += adjinfo.delta
931
932
933 var u unwinder
934 for u.init(gp, 0); u.valid(); u.next() {
935 adjustframe(&u.frame, &adjinfo)
936 }
937
938
939 if stackPoisonCopy != 0 {
940 fillstack(old, 0xfc)
941 }
942 stackfree(old)
943 }
944
945
946 func round2(x int32) int32 {
947 s := uint(0)
948 for 1<<s < x {
949 s++
950 }
951 return 1 << s
952 }
953
954
955
956
957
958
959
960
961
962
963
964
965
966 func newstack() {
967 thisg := getg()
968
969 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
970 throw("stack growth after fork")
971 }
972 if thisg.m.morebuf.g.ptr() != thisg.m.curg {
973 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
974 morebuf := thisg.m.morebuf
975 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
976 throw("runtime: wrong goroutine in newstack")
977 }
978
979 gp := thisg.m.curg
980
981 if thisg.m.curg.throwsplit {
982
983 morebuf := thisg.m.morebuf
984 gp.syscallsp = morebuf.sp
985 gp.syscallpc = morebuf.pc
986 pcname, pcoff := "(unknown)", uintptr(0)
987 f := findfunc(gp.sched.pc)
988 if f.valid() {
989 pcname = funcname(f)
990 pcoff = gp.sched.pc - f.entry()
991 }
992 print("runtime: newstack at ", pcname, "+", hex(pcoff),
993 " sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
994 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
995 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
996
997 thisg.m.traceback = 2
998 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
999 throw("runtime: stack split at bad time")
1000 }
1001
1002 morebuf := thisg.m.morebuf
1003 thisg.m.morebuf.pc = 0
1004 thisg.m.morebuf.lr = 0
1005 thisg.m.morebuf.sp = 0
1006 thisg.m.morebuf.g = 0
1007
1008
1009
1010
1011 stackguard0 := atomic.Loaduintptr(&gp.stackguard0)
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025 preempt := stackguard0 == stackPreempt
1026 if preempt {
1027 if !canPreemptM(thisg.m) {
1028
1029
1030 gp.stackguard0 = gp.stack.lo + stackGuard
1031 gogo(&gp.sched)
1032 }
1033 }
1034
1035 if gp.stack.lo == 0 {
1036 throw("missing stack in newstack")
1037 }
1038 sp := gp.sched.sp
1039 if goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.I386 || goarch.ArchFamily == goarch.WASM {
1040
1041 sp -= goarch.PtrSize
1042 }
1043 if stackDebug >= 1 || sp < gp.stack.lo {
1044 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
1045 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
1046 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
1047 }
1048 if sp < gp.stack.lo {
1049 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ")
1050 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
1051 throw("runtime: split stack overflow")
1052 }
1053
1054 if preempt {
1055 if gp == thisg.m.g0 {
1056 throw("runtime: preempt g0")
1057 }
1058 if thisg.m.p == 0 && thisg.m.locks == 0 {
1059 throw("runtime: g is running but p is not")
1060 }
1061
1062 if gp.preemptShrink {
1063
1064
1065 gp.preemptShrink = false
1066 shrinkstack(gp)
1067 }
1068
1069 if gp.preemptStop {
1070 preemptPark(gp)
1071 }
1072
1073
1074 gopreempt_m(gp)
1075 }
1076
1077
1078 oldsize := gp.stack.hi - gp.stack.lo
1079 newsize := oldsize * 2
1080
1081
1082
1083
1084 if f := findfunc(gp.sched.pc); f.valid() {
1085 max := uintptr(funcMaxSPDelta(f))
1086 needed := max + stackGuard
1087 used := gp.stack.hi - gp.sched.sp
1088 for newsize-used < needed {
1089 newsize *= 2
1090 }
1091 }
1092
1093 if stackguard0 == stackForceMove {
1094
1095
1096
1097 newsize = oldsize
1098 }
1099
1100 if newsize > maxstacksize || newsize > maxstackceiling {
1101 if maxstacksize < maxstackceiling {
1102 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
1103 } else {
1104 print("runtime: goroutine stack exceeds ", maxstackceiling, "-byte limit\n")
1105 }
1106 print("runtime: sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
1107 throw("stack overflow")
1108 }
1109
1110
1111
1112 casgstatus(gp, _Grunning, _Gcopystack)
1113
1114
1115
1116 copystack(gp, newsize)
1117 if stackDebug >= 1 {
1118 print("stack grow done\n")
1119 }
1120 casgstatus(gp, _Gcopystack, _Grunning)
1121 gogo(&gp.sched)
1122 }
1123
1124
1125 func nilfunc() {
1126 *(*uint8)(nil) = 0
1127 }
1128
1129
1130
1131 func gostartcallfn(gobuf *gobuf, fv *funcval) {
1132 var fn unsafe.Pointer
1133 if fv != nil {
1134 fn = unsafe.Pointer(fv.fn)
1135 } else {
1136 fn = unsafe.Pointer(abi.FuncPCABIInternal(nilfunc))
1137 }
1138 gostartcall(gobuf, fn, unsafe.Pointer(fv))
1139 }
1140
1141
1142
1143
1144 func isShrinkStackSafe(gp *g) bool {
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157 return gp.syscallsp == 0 && !gp.asyncSafePoint && !gp.parkingOnChan.Load()
1158 }
1159
1160
1161
1162
1163
1164 func shrinkstack(gp *g) {
1165 if gp.stack.lo == 0 {
1166 throw("missing stack in shrinkstack")
1167 }
1168 if s := readgstatus(gp); s&_Gscan == 0 {
1169
1170
1171
1172 if !(gp == getg().m.curg && getg() != getg().m.curg && s == _Grunning) {
1173
1174 throw("bad status in shrinkstack")
1175 }
1176 }
1177 if !isShrinkStackSafe(gp) {
1178 throw("shrinkstack at bad time")
1179 }
1180
1181
1182
1183 if gp == getg().m.curg && gp.m.libcallsp != 0 {
1184 throw("shrinking stack in libcall")
1185 }
1186
1187 if debug.gcshrinkstackoff > 0 {
1188 return
1189 }
1190 f := findfunc(gp.startpc)
1191 if f.valid() && f.funcID == abi.FuncID_gcBgMarkWorker {
1192
1193
1194 return
1195 }
1196
1197 oldsize := gp.stack.hi - gp.stack.lo
1198 newsize := oldsize / 2
1199
1200
1201 if newsize < fixedStack {
1202 return
1203 }
1204
1205
1206
1207
1208
1209 avail := gp.stack.hi - gp.stack.lo
1210 if used := gp.stack.hi - gp.sched.sp + stackNosplit; used >= avail/4 {
1211 return
1212 }
1213
1214 if stackDebug > 0 {
1215 print("shrinking stack ", oldsize, "->", newsize, "\n")
1216 }
1217
1218 copystack(gp, newsize)
1219 }
1220
1221
1222 func freeStackSpans() {
1223
1224 for order := range stackpool {
1225 lock(&stackpool[order].item.mu)
1226 list := &stackpool[order].item.span
1227 for s := list.first; s != nil; {
1228 next := s.next
1229 if s.allocCount == 0 {
1230 list.remove(s)
1231 s.manualFreeList = 0
1232 osStackFree(s)
1233 mheap_.freeManual(s, spanAllocStack)
1234 }
1235 s = next
1236 }
1237 unlock(&stackpool[order].item.mu)
1238 }
1239
1240
1241 lock(&stackLarge.lock)
1242 for i := range stackLarge.free {
1243 for s := stackLarge.free[i].first; s != nil; {
1244 next := s.next
1245 stackLarge.free[i].remove(s)
1246 osStackFree(s)
1247 mheap_.freeManual(s, spanAllocStack)
1248 s = next
1249 }
1250 }
1251 unlock(&stackLarge.lock)
1252 }
1253
1254
1255
1256 type stackObjectRecord struct {
1257
1258
1259
1260 off int32
1261 size int32
1262 _ptrdata int32
1263 gcdataoff uint32
1264 }
1265
1266 func (r *stackObjectRecord) useGCProg() bool {
1267 return r._ptrdata < 0
1268 }
1269
1270 func (r *stackObjectRecord) ptrdata() uintptr {
1271 x := r._ptrdata
1272 if x < 0 {
1273 return uintptr(-x)
1274 }
1275 return uintptr(x)
1276 }
1277
1278
1279 func (r *stackObjectRecord) gcdata() *byte {
1280 ptr := uintptr(unsafe.Pointer(r))
1281 var mod *moduledata
1282 for datap := &firstmoduledata; datap != nil; datap = datap.next {
1283 if datap.gofunc <= ptr && ptr < datap.end {
1284 mod = datap
1285 break
1286 }
1287 }
1288
1289
1290
1291 res := mod.rodata + uintptr(r.gcdataoff)
1292 return (*byte)(unsafe.Pointer(res))
1293 }
1294
1295
1296
1297
1298
1299 func morestackc() {
1300 throw("attempt to execute system stack code on user stack")
1301 }
1302
1303
1304
1305
1306
1307 var startingStackSize uint32 = fixedStack
1308
1309 func gcComputeStartingStackSize() {
1310 if debug.adaptivestackstart == 0 {
1311 return
1312 }
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323 var scannedStackSize uint64
1324 var scannedStacks uint64
1325 for _, p := range allp {
1326 scannedStackSize += p.scannedStackSize
1327 scannedStacks += p.scannedStacks
1328
1329 p.scannedStackSize = 0
1330 p.scannedStacks = 0
1331 }
1332 if scannedStacks == 0 {
1333 startingStackSize = fixedStack
1334 return
1335 }
1336 avg := scannedStackSize/scannedStacks + stackGuard
1337
1338
1339 if avg > uint64(maxstacksize) {
1340 avg = uint64(maxstacksize)
1341 }
1342 if avg < fixedStack {
1343 avg = fixedStack
1344 }
1345
1346 startingStackSize = uint32(round2(int32(avg)))
1347 }
1348
View as plain text