Source file src/runtime/panic.go
1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/abi" 9 "internal/goarch" 10 "internal/runtime/atomic" 11 "internal/runtime/sys" 12 "internal/stringslite" 13 "unsafe" 14 ) 15 16 // throwType indicates the current type of ongoing throw, which affects the 17 // amount of detail printed to stderr. Higher values include more detail. 18 type throwType uint32 19 20 const ( 21 // throwTypeNone means that we are not throwing. 22 throwTypeNone throwType = iota 23 24 // throwTypeUser is a throw due to a problem with the application. 25 // 26 // These throws do not include runtime frames, system goroutines, or 27 // frame metadata. 28 throwTypeUser 29 30 // throwTypeRuntime is a throw due to a problem with Go itself. 31 // 32 // These throws include as much information as possible to aid in 33 // debugging the runtime, including runtime frames, system goroutines, 34 // and frame metadata. 35 throwTypeRuntime 36 ) 37 38 // We have two different ways of doing defers. The older way involves creating a 39 // defer record at the time that a defer statement is executing and adding it to a 40 // defer chain. This chain is inspected by the deferreturn call at all function 41 // exits in order to run the appropriate defer calls. A cheaper way (which we call 42 // open-coded defers) is used for functions in which no defer statements occur in 43 // loops. In that case, we simply store the defer function/arg information into 44 // specific stack slots at the point of each defer statement, as well as setting a 45 // bit in a bitmask. At each function exit, we add inline code to directly make 46 // the appropriate defer calls based on the bitmask and fn/arg information stored 47 // on the stack. During panic/Goexit processing, the appropriate defer calls are 48 // made using extra funcdata info that indicates the exact stack slots that 49 // contain the bitmask and defer fn/args. 50 51 // Check to make sure we can really generate a panic. If the panic 52 // was generated from the runtime, or from inside malloc, then convert 53 // to a throw of msg. 54 // pc should be the program counter of the compiler-generated code that 55 // triggered this panic. 56 func panicCheck1(pc uintptr, msg string) { 57 if goarch.IsWasm == 0 && stringslite.HasPrefix(funcname(findfunc(pc)), "runtime.") { 58 // Note: wasm can't tail call, so we can't get the original caller's pc. 59 throw(msg) 60 } 61 // TODO: is this redundant? How could we be in malloc 62 // but not in the runtime? runtime/internal/*, maybe? 63 gp := getg() 64 if gp != nil && gp.m != nil && gp.m.mallocing != 0 { 65 throw(msg) 66 } 67 } 68 69 // Same as above, but calling from the runtime is allowed. 70 // 71 // Using this function is necessary for any panic that may be 72 // generated by runtime.sigpanic, since those are always called by the 73 // runtime. 74 func panicCheck2(err string) { 75 // panic allocates, so to avoid recursive malloc, turn panics 76 // during malloc into throws. 77 gp := getg() 78 if gp != nil && gp.m != nil && gp.m.mallocing != 0 { 79 throw(err) 80 } 81 } 82 83 // Many of the following panic entry-points turn into throws when they 84 // happen in various runtime contexts. These should never happen in 85 // the runtime, and if they do, they indicate a serious issue and 86 // should not be caught by user code. 87 // 88 // The panic{Index,Slice,divide,shift} functions are called by 89 // code generated by the compiler for out of bounds index expressions, 90 // out of bounds slice expressions, division by zero, and shift by negative. 91 // The panicdivide (again), panicoverflow, panicfloat, and panicmem 92 // functions are called by the signal handler when a signal occurs 93 // indicating the respective problem. 94 // 95 // Since panic{Index,Slice,shift} are never called directly, and 96 // since the runtime package should never have an out of bounds slice 97 // or array reference or negative shift, if we see those functions called from the 98 // runtime package we turn the panic into a throw. That will dump the 99 // entire runtime stack for easier debugging. 100 // 101 // The entry points called by the signal handler will be called from 102 // runtime.sigpanic, so we can't disallow calls from the runtime to 103 // these (they always look like they're called from the runtime). 104 // Hence, for these, we just check for clearly bad runtime conditions. 105 // 106 // The panic{Index,Slice} functions are implemented in assembly and tail call 107 // to the goPanic{Index,Slice} functions below. This is done so we can use 108 // a space-minimal register calling convention. 109 110 // failures in the comparisons for s[x], 0 <= x < y (y == len(s)) 111 // 112 //go:yeswritebarrierrec 113 func goPanicIndex(x int, y int) { 114 panicCheck1(sys.GetCallerPC(), "index out of range") 115 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsIndex}) 116 } 117 118 //go:yeswritebarrierrec 119 func goPanicIndexU(x uint, y int) { 120 panicCheck1(sys.GetCallerPC(), "index out of range") 121 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsIndex}) 122 } 123 124 // failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s)) 125 // 126 //go:yeswritebarrierrec 127 func goPanicSliceAlen(x int, y int) { 128 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 129 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAlen}) 130 } 131 132 //go:yeswritebarrierrec 133 func goPanicSliceAlenU(x uint, y int) { 134 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 135 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAlen}) 136 } 137 138 //go:yeswritebarrierrec 139 func goPanicSliceAcap(x int, y int) { 140 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 141 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAcap}) 142 } 143 144 //go:yeswritebarrierrec 145 func goPanicSliceAcapU(x uint, y int) { 146 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 147 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAcap}) 148 } 149 150 // failures in the comparisons for s[x:y], 0 <= x <= y 151 // 152 //go:yeswritebarrierrec 153 func goPanicSliceB(x int, y int) { 154 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 155 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceB}) 156 } 157 158 //go:yeswritebarrierrec 159 func goPanicSliceBU(x uint, y int) { 160 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 161 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceB}) 162 } 163 164 // failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s)) 165 func goPanicSlice3Alen(x int, y int) { 166 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 167 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Alen}) 168 } 169 func goPanicSlice3AlenU(x uint, y int) { 170 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 171 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Alen}) 172 } 173 func goPanicSlice3Acap(x int, y int) { 174 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 175 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Acap}) 176 } 177 func goPanicSlice3AcapU(x uint, y int) { 178 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 179 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Acap}) 180 } 181 182 // failures in the comparisons for s[:x:y], 0 <= x <= y 183 func goPanicSlice3B(x int, y int) { 184 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 185 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3B}) 186 } 187 func goPanicSlice3BU(x uint, y int) { 188 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 189 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3B}) 190 } 191 192 // failures in the comparisons for s[x:y:], 0 <= x <= y 193 func goPanicSlice3C(x int, y int) { 194 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 195 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3C}) 196 } 197 func goPanicSlice3CU(x uint, y int) { 198 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 199 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3C}) 200 } 201 202 // failures in the conversion ([x]T)(s) or (*[x]T)(s), 0 <= x <= y, y == len(s) 203 func goPanicSliceConvert(x int, y int) { 204 panicCheck1(sys.GetCallerPC(), "slice length too short to convert to array or pointer to array") 205 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsConvert}) 206 } 207 208 // Implemented in assembly, as they take arguments in registers. 209 // Declared here to mark them as ABIInternal. 210 func panicIndex(x int, y int) 211 func panicIndexU(x uint, y int) 212 func panicSliceAlen(x int, y int) 213 func panicSliceAlenU(x uint, y int) 214 func panicSliceAcap(x int, y int) 215 func panicSliceAcapU(x uint, y int) 216 func panicSliceB(x int, y int) 217 func panicSliceBU(x uint, y int) 218 func panicSlice3Alen(x int, y int) 219 func panicSlice3AlenU(x uint, y int) 220 func panicSlice3Acap(x int, y int) 221 func panicSlice3AcapU(x uint, y int) 222 func panicSlice3B(x int, y int) 223 func panicSlice3BU(x uint, y int) 224 func panicSlice3C(x int, y int) 225 func panicSlice3CU(x uint, y int) 226 func panicSliceConvert(x int, y int) 227 228 var shiftError = error(errorString("negative shift amount")) 229 230 //go:yeswritebarrierrec 231 func panicshift() { 232 panicCheck1(sys.GetCallerPC(), "negative shift amount") 233 panic(shiftError) 234 } 235 236 var divideError = error(errorString("integer divide by zero")) 237 238 //go:yeswritebarrierrec 239 func panicdivide() { 240 panicCheck2("integer divide by zero") 241 panic(divideError) 242 } 243 244 var overflowError = error(errorString("integer overflow")) 245 246 func panicoverflow() { 247 panicCheck2("integer overflow") 248 panic(overflowError) 249 } 250 251 var floatError = error(errorString("floating point error")) 252 253 func panicfloat() { 254 panicCheck2("floating point error") 255 panic(floatError) 256 } 257 258 var memoryError = error(errorString("invalid memory address or nil pointer dereference")) 259 260 func panicmem() { 261 panicCheck2("invalid memory address or nil pointer dereference") 262 panic(memoryError) 263 } 264 265 func panicmemAddr(addr uintptr) { 266 panicCheck2("invalid memory address or nil pointer dereference") 267 panic(errorAddressString{msg: "invalid memory address or nil pointer dereference", addr: addr}) 268 } 269 270 // Create a new deferred function fn, which has no arguments and results. 271 // The compiler turns a defer statement into a call to this. 272 func deferproc(fn func()) { 273 gp := getg() 274 if gp.m.curg != gp { 275 // go code on the system stack can't defer 276 throw("defer on system stack") 277 } 278 279 d := newdefer() 280 d.link = gp._defer 281 gp._defer = d 282 d.fn = fn 283 d.pc = sys.GetCallerPC() 284 // We must not be preempted between calling GetCallerSP and 285 // storing it to d.sp because GetCallerSP's result is a 286 // uintptr stack pointer. 287 d.sp = sys.GetCallerSP() 288 289 // deferproc returns 0 normally. 290 // a deferred func that stops a panic 291 // makes the deferproc return 1. 292 // the code the compiler generates always 293 // checks the return value and jumps to the 294 // end of the function if deferproc returns != 0. 295 return0() 296 // No code can go here - the C return register has 297 // been set and must not be clobbered. 298 } 299 300 var rangeDoneError = error(errorString("range function continued iteration after function for loop body returned false")) 301 var rangePanicError = error(errorString("range function continued iteration after loop body panic")) 302 var rangeExhaustedError = error(errorString("range function continued iteration after whole loop exit")) 303 var rangeMissingPanicError = error(errorString("range function recovered a loop body panic and did not resume panicking")) 304 305 //go:noinline 306 func panicrangestate(state int) { 307 switch abi.RF_State(state) { 308 case abi.RF_DONE: 309 panic(rangeDoneError) 310 case abi.RF_PANIC: 311 panic(rangePanicError) 312 case abi.RF_EXHAUSTED: 313 panic(rangeExhaustedError) 314 case abi.RF_MISSING_PANIC: 315 panic(rangeMissingPanicError) 316 } 317 throw("unexpected state passed to panicrangestate") 318 } 319 320 // deferrangefunc is called by functions that are about to 321 // execute a range-over-function loop in which the loop body 322 // may execute a defer statement. That defer needs to add to 323 // the chain for the current function, not the func literal synthesized 324 // to represent the loop body. To do that, the original function 325 // calls deferrangefunc to obtain an opaque token representing 326 // the current frame, and then the loop body uses deferprocat 327 // instead of deferproc to add to that frame's defer lists. 328 // 329 // The token is an 'any' with underlying type *atomic.Pointer[_defer]. 330 // It is the atomically-updated head of a linked list of _defer structs 331 // representing deferred calls. At the same time, we create a _defer 332 // struct on the main g._defer list with d.head set to this head pointer. 333 // 334 // The g._defer list is now a linked list of deferred calls, 335 // but an atomic list hanging off: 336 // 337 // g._defer => d4 -> d3 -> drangefunc -> d2 -> d1 -> nil 338 // | .head 339 // | 340 // +--> dY -> dX -> nil 341 // 342 // with each -> indicating a d.link pointer, and where drangefunc 343 // has the d.rangefunc = true bit set. 344 // Note that the function being ranged over may have added 345 // its own defers (d4 and d3), so drangefunc need not be at the 346 // top of the list when deferprocat is used. This is why we pass 347 // the atomic head explicitly. 348 // 349 // To keep misbehaving programs from crashing the runtime, 350 // deferprocat pushes new defers onto the .head list atomically. 351 // The fact that it is a separate list from the main goroutine 352 // defer list means that the main goroutine's defers can still 353 // be handled non-atomically. 354 // 355 // In the diagram, dY and dX are meant to be processed when 356 // drangefunc would be processed, which is to say the defer order 357 // should be d4, d3, dY, dX, d2, d1. To make that happen, 358 // when defer processing reaches a d with rangefunc=true, 359 // it calls deferconvert to atomically take the extras 360 // away from d.head and then adds them to the main list. 361 // 362 // That is, deferconvert changes this list: 363 // 364 // g._defer => drangefunc -> d2 -> d1 -> nil 365 // | .head 366 // | 367 // +--> dY -> dX -> nil 368 // 369 // into this list: 370 // 371 // g._defer => dY -> dX -> d2 -> d1 -> nil 372 // 373 // It also poisons *drangefunc.head so that any future 374 // deferprocat using that head will throw. 375 // (The atomic head is ordinary garbage collected memory so that 376 // it's not a problem if user code holds onto it beyond 377 // the lifetime of drangefunc.) 378 // 379 // TODO: We could arrange for the compiler to call into the 380 // runtime after the loop finishes normally, to do an eager 381 // deferconvert, which would catch calling the loop body 382 // and having it defer after the loop is done. If we have a 383 // more general catch of loop body misuse, though, this 384 // might not be worth worrying about in addition. 385 // 386 // See also ../cmd/compile/internal/rangefunc/rewrite.go. 387 func deferrangefunc() any { 388 gp := getg() 389 if gp.m.curg != gp { 390 // go code on the system stack can't defer 391 throw("defer on system stack") 392 } 393 394 fn := findfunc(sys.GetCallerPC()) 395 if fn.deferreturn == 0 { 396 throw("no deferreturn") 397 } 398 399 d := newdefer() 400 d.link = gp._defer 401 gp._defer = d 402 d.pc = fn.entry() + uintptr(fn.deferreturn) 403 // We must not be preempted between calling GetCallerSP and 404 // storing it to d.sp because GetCallerSP's result is a 405 // uintptr stack pointer. 406 d.sp = sys.GetCallerSP() 407 408 d.rangefunc = true 409 d.head = new(atomic.Pointer[_defer]) 410 411 return d.head 412 } 413 414 // badDefer returns a fixed bad defer pointer for poisoning an atomic defer list head. 415 func badDefer() *_defer { 416 return (*_defer)(unsafe.Pointer(uintptr(1))) 417 } 418 419 // deferprocat is like deferproc but adds to the atomic list represented by frame. 420 // See the doc comment for deferrangefunc for details. 421 func deferprocat(fn func(), frame any) { 422 head := frame.(*atomic.Pointer[_defer]) 423 if raceenabled { 424 racewritepc(unsafe.Pointer(head), sys.GetCallerPC(), abi.FuncPCABIInternal(deferprocat)) 425 } 426 d1 := newdefer() 427 d1.fn = fn 428 for { 429 d1.link = head.Load() 430 if d1.link == badDefer() { 431 throw("defer after range func returned") 432 } 433 if head.CompareAndSwap(d1.link, d1) { 434 break 435 } 436 } 437 438 // Must be last - see deferproc above. 439 return0() 440 } 441 442 // deferconvert converts the rangefunc defer list of d0 into an ordinary list 443 // following d0. 444 // See the doc comment for deferrangefunc for details. 445 func deferconvert(d0 *_defer) { 446 head := d0.head 447 if raceenabled { 448 racereadpc(unsafe.Pointer(head), sys.GetCallerPC(), abi.FuncPCABIInternal(deferconvert)) 449 } 450 tail := d0.link 451 d0.rangefunc = false 452 453 var d *_defer 454 for { 455 d = head.Load() 456 if head.CompareAndSwap(d, badDefer()) { 457 break 458 } 459 } 460 if d == nil { 461 return 462 } 463 for d1 := d; ; d1 = d1.link { 464 d1.sp = d0.sp 465 d1.pc = d0.pc 466 if d1.link == nil { 467 d1.link = tail 468 break 469 } 470 } 471 d0.link = d 472 return 473 } 474 475 // deferprocStack queues a new deferred function with a defer record on the stack. 476 // The defer record must have its fn field initialized. 477 // All other fields can contain junk. 478 // Nosplit because of the uninitialized pointer fields on the stack. 479 // 480 //go:nosplit 481 func deferprocStack(d *_defer) { 482 gp := getg() 483 if gp.m.curg != gp { 484 // go code on the system stack can't defer 485 throw("defer on system stack") 486 } 487 // fn is already set. 488 // The other fields are junk on entry to deferprocStack and 489 // are initialized here. 490 d.heap = false 491 d.rangefunc = false 492 d.sp = sys.GetCallerSP() 493 d.pc = sys.GetCallerPC() 494 // The lines below implement: 495 // d.panic = nil 496 // d.fd = nil 497 // d.link = gp._defer 498 // d.head = nil 499 // gp._defer = d 500 // But without write barriers. The first three are writes to 501 // the stack so they don't need a write barrier, and furthermore 502 // are to uninitialized memory, so they must not use a write barrier. 503 // The fourth write does not require a write barrier because we 504 // explicitly mark all the defer structures, so we don't need to 505 // keep track of pointers to them with a write barrier. 506 *(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer)) 507 *(*uintptr)(unsafe.Pointer(&d.head)) = 0 508 *(*uintptr)(unsafe.Pointer(&gp._defer)) = uintptr(unsafe.Pointer(d)) 509 510 return0() 511 // No code can go here - the C return register has 512 // been set and must not be clobbered. 513 } 514 515 // Each P holds a pool for defers. 516 517 // Allocate a Defer, usually using per-P pool. 518 // Each defer must be released with freedefer. The defer is not 519 // added to any defer chain yet. 520 func newdefer() *_defer { 521 var d *_defer 522 mp := acquirem() 523 pp := mp.p.ptr() 524 if len(pp.deferpool) == 0 && sched.deferpool != nil { 525 lock(&sched.deferlock) 526 for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil { 527 d := sched.deferpool 528 sched.deferpool = d.link 529 d.link = nil 530 pp.deferpool = append(pp.deferpool, d) 531 } 532 unlock(&sched.deferlock) 533 } 534 if n := len(pp.deferpool); n > 0 { 535 d = pp.deferpool[n-1] 536 pp.deferpool[n-1] = nil 537 pp.deferpool = pp.deferpool[:n-1] 538 } 539 releasem(mp) 540 mp, pp = nil, nil 541 542 if d == nil { 543 // Allocate new defer. 544 d = new(_defer) 545 } 546 d.heap = true 547 return d 548 } 549 550 // popDefer pops the head of gp's defer list and frees it. 551 func popDefer(gp *g) { 552 d := gp._defer 553 d.fn = nil // Can in theory point to the stack 554 // We must not copy the stack between the updating gp._defer and setting 555 // d.link to nil. Between these two steps, d is not on any defer list, so 556 // stack copying won't adjust stack pointers in it (namely, d.link). Hence, 557 // if we were to copy the stack, d could then contain a stale pointer. 558 gp._defer = d.link 559 d.link = nil 560 // After this point we can copy the stack. 561 562 if !d.heap { 563 return 564 } 565 566 mp := acquirem() 567 pp := mp.p.ptr() 568 if len(pp.deferpool) == cap(pp.deferpool) { 569 // Transfer half of local cache to the central cache. 570 var first, last *_defer 571 for len(pp.deferpool) > cap(pp.deferpool)/2 { 572 n := len(pp.deferpool) 573 d := pp.deferpool[n-1] 574 pp.deferpool[n-1] = nil 575 pp.deferpool = pp.deferpool[:n-1] 576 if first == nil { 577 first = d 578 } else { 579 last.link = d 580 } 581 last = d 582 } 583 lock(&sched.deferlock) 584 last.link = sched.deferpool 585 sched.deferpool = first 586 unlock(&sched.deferlock) 587 } 588 589 *d = _defer{} 590 591 pp.deferpool = append(pp.deferpool, d) 592 593 releasem(mp) 594 mp, pp = nil, nil 595 } 596 597 // deferreturn runs deferred functions for the caller's frame. 598 // The compiler inserts a call to this at the end of any 599 // function which calls defer. 600 func deferreturn() { 601 var p _panic 602 p.deferreturn = true 603 604 p.start(sys.GetCallerPC(), unsafe.Pointer(sys.GetCallerSP())) 605 for { 606 fn, ok := p.nextDefer() 607 if !ok { 608 break 609 } 610 fn() 611 } 612 } 613 614 // Goexit terminates the goroutine that calls it. No other goroutine is affected. 615 // Goexit runs all deferred calls before terminating the goroutine. Because Goexit 616 // is not a panic, any recover calls in those deferred functions will return nil. 617 // 618 // Calling Goexit from the main goroutine terminates that goroutine 619 // without func main returning. Since func main has not returned, 620 // the program continues execution of other goroutines. 621 // If all other goroutines exit, the program crashes. 622 // 623 // It crashes if called from a thread not created by the Go runtime. 624 func Goexit() { 625 // Create a panic object for Goexit, so we can recognize when it might be 626 // bypassed by a recover(). 627 var p _panic 628 p.goexit = true 629 630 p.start(sys.GetCallerPC(), unsafe.Pointer(sys.GetCallerSP())) 631 for { 632 fn, ok := p.nextDefer() 633 if !ok { 634 break 635 } 636 fn() 637 } 638 639 goexit1() 640 } 641 642 // Call all Error and String methods before freezing the world. 643 // Used when crashing with panicking. 644 func preprintpanics(p *_panic) { 645 defer func() { 646 text := "panic while printing panic value" 647 switch r := recover().(type) { 648 case nil: 649 // nothing to do 650 case string: 651 throw(text + ": " + r) 652 default: 653 throw(text + ": type " + toRType(efaceOf(&r)._type).string()) 654 } 655 }() 656 for p != nil { 657 switch v := p.arg.(type) { 658 case error: 659 p.arg = v.Error() 660 case stringer: 661 p.arg = v.String() 662 } 663 p = p.link 664 } 665 } 666 667 // Print all currently active panics. Used when crashing. 668 // Should only be called after preprintpanics. 669 func printpanics(p *_panic) { 670 if p.link != nil { 671 printpanics(p.link) 672 if !p.link.goexit { 673 print("\t") 674 } 675 } 676 if p.goexit { 677 return 678 } 679 print("panic: ") 680 printpanicval(p.arg) 681 if p.recovered { 682 print(" [recovered]") 683 } 684 print("\n") 685 } 686 687 // readvarintUnsafe reads the uint32 in varint format starting at fd, and returns the 688 // uint32 and a pointer to the byte following the varint. 689 // 690 // The implementation is the same with runtime.readvarint, except that this function 691 // uses unsafe.Pointer for speed. 692 func readvarintUnsafe(fd unsafe.Pointer) (uint32, unsafe.Pointer) { 693 var r uint32 694 var shift int 695 for { 696 b := *(*uint8)(fd) 697 fd = add(fd, unsafe.Sizeof(b)) 698 if b < 128 { 699 return r + uint32(b)<<shift, fd 700 } 701 r += uint32(b&0x7F) << (shift & 31) 702 shift += 7 703 if shift > 28 { 704 panic("Bad varint") 705 } 706 } 707 } 708 709 // A PanicNilError happens when code calls panic(nil). 710 // 711 // Before Go 1.21, programs that called panic(nil) observed recover returning nil. 712 // Starting in Go 1.21, programs that call panic(nil) observe recover returning a *PanicNilError. 713 // Programs can change back to the old behavior by setting GODEBUG=panicnil=1. 714 type PanicNilError struct { 715 // This field makes PanicNilError structurally different from 716 // any other struct in this package, and the _ makes it different 717 // from any struct in other packages too. 718 // This avoids any accidental conversions being possible 719 // between this struct and some other struct sharing the same fields, 720 // like happened in go.dev/issue/56603. 721 _ [0]*PanicNilError 722 } 723 724 func (*PanicNilError) Error() string { return "panic called with nil argument" } 725 func (*PanicNilError) RuntimeError() {} 726 727 var panicnil = &godebugInc{name: "panicnil"} 728 729 // The implementation of the predeclared function panic. 730 // The compiler emits calls to this function. 731 // 732 // gopanic should be an internal detail, 733 // but widely used packages access it using linkname. 734 // Notable members of the hall of shame include: 735 // - go.undefinedlabs.com/scopeagent 736 // - github.com/goplus/igop 737 // 738 // Do not remove or change the type signature. 739 // See go.dev/issue/67401. 740 // 741 //go:linkname gopanic 742 func gopanic(e any) { 743 if e == nil { 744 if debug.panicnil.Load() != 1 { 745 e = new(PanicNilError) 746 } else { 747 panicnil.IncNonDefault() 748 } 749 } 750 751 gp := getg() 752 if gp.m.curg != gp { 753 print("panic: ") 754 printpanicval(e) 755 print("\n") 756 throw("panic on system stack") 757 } 758 759 if gp.m.mallocing != 0 { 760 print("panic: ") 761 printpanicval(e) 762 print("\n") 763 throw("panic during malloc") 764 } 765 if gp.m.preemptoff != "" { 766 print("panic: ") 767 printpanicval(e) 768 print("\n") 769 print("preempt off reason: ") 770 print(gp.m.preemptoff) 771 print("\n") 772 throw("panic during preemptoff") 773 } 774 if gp.m.locks != 0 { 775 print("panic: ") 776 printpanicval(e) 777 print("\n") 778 throw("panic holding locks") 779 } 780 781 var p _panic 782 p.arg = e 783 784 runningPanicDefers.Add(1) 785 786 p.start(sys.GetCallerPC(), unsafe.Pointer(sys.GetCallerSP())) 787 for { 788 fn, ok := p.nextDefer() 789 if !ok { 790 break 791 } 792 fn() 793 } 794 795 // If we're tracing, flush the current generation to make the trace more 796 // readable. 797 // 798 // TODO(aktau): Handle a panic from within traceAdvance more gracefully. 799 // Currently it would hang. Not handled now because it is very unlikely, and 800 // already unrecoverable. 801 if traceEnabled() { 802 traceAdvance(false) 803 } 804 805 // ran out of deferred calls - old-school panic now 806 // Because it is unsafe to call arbitrary user code after freezing 807 // the world, we call preprintpanics to invoke all necessary Error 808 // and String methods to prepare the panic strings before startpanic. 809 preprintpanics(&p) 810 811 fatalpanic(&p) // should not return 812 *(*int)(nil) = 0 // not reached 813 } 814 815 // start initializes a panic to start unwinding the stack. 816 // 817 // If p.goexit is true, then start may return multiple times. 818 func (p *_panic) start(pc uintptr, sp unsafe.Pointer) { 819 gp := getg() 820 821 // Record the caller's PC and SP, so recovery can identify panics 822 // that have been recovered. Also, so that if p is from Goexit, we 823 // can restart its defer processing loop if a recovered panic tries 824 // to jump past it. 825 p.startPC = sys.GetCallerPC() 826 p.startSP = unsafe.Pointer(sys.GetCallerSP()) 827 828 if p.deferreturn { 829 p.sp = sp 830 831 if s := (*savedOpenDeferState)(gp.param); s != nil { 832 // recovery saved some state for us, so that we can resume 833 // calling open-coded defers without unwinding the stack. 834 835 gp.param = nil 836 837 p.retpc = s.retpc 838 p.deferBitsPtr = (*byte)(add(sp, s.deferBitsOffset)) 839 p.slotsPtr = add(sp, s.slotsOffset) 840 } 841 842 return 843 } 844 845 p.link = gp._panic 846 gp._panic = (*_panic)(noescape(unsafe.Pointer(p))) 847 848 // Initialize state machine, and find the first frame with a defer. 849 // 850 // Note: We could use startPC and startSP here, but callers will 851 // never have defer statements themselves. By starting at their 852 // caller instead, we avoid needing to unwind through an extra 853 // frame. It also somewhat simplifies the terminating condition for 854 // deferreturn. 855 p.lr, p.fp = pc, sp 856 p.nextFrame() 857 } 858 859 // nextDefer returns the next deferred function to invoke, if any. 860 // 861 // Note: The "ok bool" result is necessary to correctly handle when 862 // the deferred function itself was nil (e.g., "defer (func())(nil)"). 863 func (p *_panic) nextDefer() (func(), bool) { 864 gp := getg() 865 866 if !p.deferreturn { 867 if gp._panic != p { 868 throw("bad panic stack") 869 } 870 871 if p.recovered { 872 mcall(recovery) // does not return 873 throw("recovery failed") 874 } 875 } 876 877 // The assembler adjusts p.argp in wrapper functions that shouldn't 878 // be visible to recover(), so we need to restore it each iteration. 879 p.argp = add(p.startSP, sys.MinFrameSize) 880 881 for { 882 for p.deferBitsPtr != nil { 883 bits := *p.deferBitsPtr 884 885 // Check whether any open-coded defers are still pending. 886 // 887 // Note: We need to check this upfront (rather than after 888 // clearing the top bit) because it's possible that Goexit 889 // invokes a deferred call, and there were still more pending 890 // open-coded defers in the frame; but then the deferred call 891 // panic and invoked the remaining defers in the frame, before 892 // recovering and restarting the Goexit loop. 893 if bits == 0 { 894 p.deferBitsPtr = nil 895 break 896 } 897 898 // Find index of top bit set. 899 i := 7 - uintptr(sys.LeadingZeros8(bits)) 900 901 // Clear bit and store it back. 902 bits &^= 1 << i 903 *p.deferBitsPtr = bits 904 905 return *(*func())(add(p.slotsPtr, i*goarch.PtrSize)), true 906 } 907 908 Recheck: 909 if d := gp._defer; d != nil && d.sp == uintptr(p.sp) { 910 if d.rangefunc { 911 deferconvert(d) 912 popDefer(gp) 913 goto Recheck 914 } 915 916 fn := d.fn 917 918 // TODO(mdempsky): Instead of having each deferproc call have 919 // its own "deferreturn(); return" sequence, we should just make 920 // them reuse the one we emit for open-coded defers. 921 p.retpc = d.pc 922 923 // Unlink and free. 924 popDefer(gp) 925 926 return fn, true 927 } 928 929 if !p.nextFrame() { 930 return nil, false 931 } 932 } 933 } 934 935 // nextFrame finds the next frame that contains deferred calls, if any. 936 func (p *_panic) nextFrame() (ok bool) { 937 if p.lr == 0 { 938 return false 939 } 940 941 gp := getg() 942 systemstack(func() { 943 var limit uintptr 944 if d := gp._defer; d != nil { 945 limit = d.sp 946 } 947 948 var u unwinder 949 u.initAt(p.lr, uintptr(p.fp), 0, gp, 0) 950 for { 951 if !u.valid() { 952 p.lr = 0 953 return // ok == false 954 } 955 956 // TODO(mdempsky): If we populate u.frame.fn.deferreturn for 957 // every frame containing a defer (not just open-coded defers), 958 // then we can simply loop until we find the next frame where 959 // it's non-zero. 960 961 if u.frame.sp == limit { 962 break // found a frame with linked defers 963 } 964 965 if p.initOpenCodedDefers(u.frame.fn, unsafe.Pointer(u.frame.varp)) { 966 break // found a frame with open-coded defers 967 } 968 969 u.next() 970 } 971 972 p.lr = u.frame.lr 973 p.sp = unsafe.Pointer(u.frame.sp) 974 p.fp = unsafe.Pointer(u.frame.fp) 975 976 ok = true 977 }) 978 979 return 980 } 981 982 func (p *_panic) initOpenCodedDefers(fn funcInfo, varp unsafe.Pointer) bool { 983 fd := funcdata(fn, abi.FUNCDATA_OpenCodedDeferInfo) 984 if fd == nil { 985 return false 986 } 987 988 if fn.deferreturn == 0 { 989 throw("missing deferreturn") 990 } 991 992 deferBitsOffset, fd := readvarintUnsafe(fd) 993 deferBitsPtr := (*uint8)(add(varp, -uintptr(deferBitsOffset))) 994 if *deferBitsPtr == 0 { 995 return false // has open-coded defers, but none pending 996 } 997 998 slotsOffset, fd := readvarintUnsafe(fd) 999 1000 p.retpc = fn.entry() + uintptr(fn.deferreturn) 1001 p.deferBitsPtr = deferBitsPtr 1002 p.slotsPtr = add(varp, -uintptr(slotsOffset)) 1003 1004 return true 1005 } 1006 1007 // The implementation of the predeclared function recover. 1008 // Cannot split the stack because it needs to reliably 1009 // find the stack segment of its caller. 1010 // 1011 // TODO(rsc): Once we commit to CopyStackAlways, 1012 // this doesn't need to be nosplit. 1013 // 1014 //go:nosplit 1015 func gorecover(argp uintptr) any { 1016 // Must be in a function running as part of a deferred call during the panic. 1017 // Must be called from the topmost function of the call 1018 // (the function used in the defer statement). 1019 // p.argp is the argument pointer of that topmost deferred function call. 1020 // Compare against argp reported by caller. 1021 // If they match, the caller is the one who can recover. 1022 gp := getg() 1023 p := gp._panic 1024 if p != nil && !p.goexit && !p.recovered && argp == uintptr(p.argp) { 1025 p.recovered = true 1026 return p.arg 1027 } 1028 return nil 1029 } 1030 1031 //go:linkname sync_throw sync.throw 1032 func sync_throw(s string) { 1033 throw(s) 1034 } 1035 1036 //go:linkname sync_fatal sync.fatal 1037 func sync_fatal(s string) { 1038 fatal(s) 1039 } 1040 1041 //go:linkname rand_fatal crypto/rand.fatal 1042 func rand_fatal(s string) { 1043 fatal(s) 1044 } 1045 1046 //go:linkname sysrand_fatal crypto/internal/sysrand.fatal 1047 func sysrand_fatal(s string) { 1048 fatal(s) 1049 } 1050 1051 //go:linkname fips_fatal crypto/internal/fips140.fatal 1052 func fips_fatal(s string) { 1053 fatal(s) 1054 } 1055 1056 //go:linkname maps_fatal internal/runtime/maps.fatal 1057 func maps_fatal(s string) { 1058 fatal(s) 1059 } 1060 1061 //go:linkname internal_sync_throw internal/sync.throw 1062 func internal_sync_throw(s string) { 1063 throw(s) 1064 } 1065 1066 //go:linkname internal_sync_fatal internal/sync.fatal 1067 func internal_sync_fatal(s string) { 1068 fatal(s) 1069 } 1070 1071 // throw triggers a fatal error that dumps a stack trace and exits. 1072 // 1073 // throw should be used for runtime-internal fatal errors where Go itself, 1074 // rather than user code, may be at fault for the failure. 1075 // 1076 // throw should be an internal detail, 1077 // but widely used packages access it using linkname. 1078 // Notable members of the hall of shame include: 1079 // - github.com/bytedance/sonic 1080 // - github.com/cockroachdb/pebble 1081 // - github.com/dgraph-io/ristretto 1082 // - github.com/outcaste-io/ristretto 1083 // - github.com/pingcap/br 1084 // - gvisor.dev/gvisor 1085 // - github.com/sagernet/gvisor 1086 // 1087 // Do not remove or change the type signature. 1088 // See go.dev/issue/67401. 1089 // 1090 //go:linkname throw 1091 //go:nosplit 1092 func throw(s string) { 1093 // Everything throw does should be recursively nosplit so it 1094 // can be called even when it's unsafe to grow the stack. 1095 systemstack(func() { 1096 print("fatal error: ") 1097 printindented(s) // logically printpanicval(s), but avoids convTstring write barrier 1098 print("\n") 1099 }) 1100 1101 fatalthrow(throwTypeRuntime) 1102 } 1103 1104 // fatal triggers a fatal error that dumps a stack trace and exits. 1105 // 1106 // fatal is equivalent to throw, but is used when user code is expected to be 1107 // at fault for the failure, such as racing map writes. 1108 // 1109 // fatal does not include runtime frames, system goroutines, or frame metadata 1110 // (fp, sp, pc) in the stack trace unless GOTRACEBACK=system or higher. 1111 // 1112 //go:nosplit 1113 func fatal(s string) { 1114 // Everything fatal does should be recursively nosplit so it 1115 // can be called even when it's unsafe to grow the stack. 1116 printlock() // Prevent multiple interleaved fatal reports. See issue 69447. 1117 systemstack(func() { 1118 print("fatal error: ") 1119 printindented(s) // logically printpanicval(s), but avoids convTstring write barrier 1120 print("\n") 1121 }) 1122 1123 fatalthrow(throwTypeUser) 1124 printunlock() 1125 } 1126 1127 // runningPanicDefers is non-zero while running deferred functions for panic. 1128 // This is used to try hard to get a panic stack trace out when exiting. 1129 var runningPanicDefers atomic.Uint32 1130 1131 // panicking is non-zero when crashing the program for an unrecovered panic. 1132 var panicking atomic.Uint32 1133 1134 // paniclk is held while printing the panic information and stack trace, 1135 // so that two concurrent panics don't overlap their output. 1136 var paniclk mutex 1137 1138 // Unwind the stack after a deferred function calls recover 1139 // after a panic. Then arrange to continue running as though 1140 // the caller of the deferred function returned normally. 1141 // 1142 // However, if unwinding the stack would skip over a Goexit call, we 1143 // return into the Goexit loop instead, so it can continue processing 1144 // defers instead. 1145 func recovery(gp *g) { 1146 p := gp._panic 1147 pc, sp, fp := p.retpc, uintptr(p.sp), uintptr(p.fp) 1148 p0, saveOpenDeferState := p, p.deferBitsPtr != nil && *p.deferBitsPtr != 0 1149 1150 // Unwind the panic stack. 1151 for ; p != nil && uintptr(p.startSP) < sp; p = p.link { 1152 // Don't allow jumping past a pending Goexit. 1153 // Instead, have its _panic.start() call return again. 1154 // 1155 // TODO(mdempsky): In this case, Goexit will resume walking the 1156 // stack where it left off, which means it will need to rewalk 1157 // frames that we've already processed. 1158 // 1159 // There's a similar issue with nested panics, when the inner 1160 // panic supersedes the outer panic. Again, we end up needing to 1161 // walk the same stack frames. 1162 // 1163 // These are probably pretty rare occurrences in practice, and 1164 // they don't seem any worse than the existing logic. But if we 1165 // move the unwinding state into _panic, we could detect when we 1166 // run into where the last panic started, and then just pick up 1167 // where it left off instead. 1168 // 1169 // With how subtle defer handling is, this might not actually be 1170 // worthwhile though. 1171 if p.goexit { 1172 pc, sp = p.startPC, uintptr(p.startSP) 1173 saveOpenDeferState = false // goexit is unwinding the stack anyway 1174 break 1175 } 1176 1177 runningPanicDefers.Add(-1) 1178 } 1179 gp._panic = p 1180 1181 if p == nil { // must be done with signal 1182 gp.sig = 0 1183 } 1184 1185 if gp.param != nil { 1186 throw("unexpected gp.param") 1187 } 1188 if saveOpenDeferState { 1189 // If we're returning to deferreturn and there are more open-coded 1190 // defers for it to call, save enough state for it to be able to 1191 // pick up where p0 left off. 1192 gp.param = unsafe.Pointer(&savedOpenDeferState{ 1193 retpc: p0.retpc, 1194 1195 // We need to save deferBitsPtr and slotsPtr too, but those are 1196 // stack pointers. To avoid issues around heap objects pointing 1197 // to the stack, save them as offsets from SP. 1198 deferBitsOffset: uintptr(unsafe.Pointer(p0.deferBitsPtr)) - uintptr(p0.sp), 1199 slotsOffset: uintptr(p0.slotsPtr) - uintptr(p0.sp), 1200 }) 1201 } 1202 1203 // TODO(mdempsky): Currently, we rely on frames containing "defer" 1204 // to end with "CALL deferreturn; RET". This allows deferreturn to 1205 // finish running any pending defers in the frame. 1206 // 1207 // But we should be able to tell whether there are still pending 1208 // defers here. If there aren't, we can just jump directly to the 1209 // "RET" instruction. And if there are, we don't need an actual 1210 // "CALL deferreturn" instruction; we can simulate it with something 1211 // like: 1212 // 1213 // if usesLR { 1214 // lr = pc 1215 // } else { 1216 // sp -= sizeof(pc) 1217 // *(*uintptr)(sp) = pc 1218 // } 1219 // pc = funcPC(deferreturn) 1220 // 1221 // So that we effectively tail call into deferreturn, such that it 1222 // then returns to the simple "RET" epilogue. That would save the 1223 // overhead of the "deferreturn" call when there aren't actually any 1224 // pending defers left, and shrink the TEXT size of compiled 1225 // binaries. (Admittedly, both of these are modest savings.) 1226 1227 // Ensure we're recovering within the appropriate stack. 1228 if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) { 1229 print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n") 1230 throw("bad recovery") 1231 } 1232 1233 // Make the deferproc for this d return again, 1234 // this time returning 1. The calling function will 1235 // jump to the standard return epilogue. 1236 gp.sched.sp = sp 1237 gp.sched.pc = pc 1238 gp.sched.lr = 0 1239 // Restore the bp on platforms that support frame pointers. 1240 // N.B. It's fine to not set anything for platforms that don't 1241 // support frame pointers, since nothing consumes them. 1242 switch { 1243 case goarch.IsAmd64 != 0: 1244 // on x86, fp actually points one word higher than the top of 1245 // the frame since the return address is saved on the stack by 1246 // the caller 1247 gp.sched.bp = fp - 2*goarch.PtrSize 1248 case goarch.IsArm64 != 0: 1249 // on arm64, the architectural bp points one word higher 1250 // than the sp. fp is totally useless to us here, because it 1251 // only gets us to the caller's fp. 1252 gp.sched.bp = sp - goarch.PtrSize 1253 } 1254 // The value in ret is delivered IN A REGISTER, even if there is a 1255 // stack ABI. 1256 gp.sched.ret = 1 1257 gogo(&gp.sched) 1258 } 1259 1260 // fatalthrow implements an unrecoverable runtime throw. It freezes the 1261 // system, prints stack traces starting from its caller, and terminates the 1262 // process. 1263 // 1264 //go:nosplit 1265 func fatalthrow(t throwType) { 1266 pc := sys.GetCallerPC() 1267 sp := sys.GetCallerSP() 1268 gp := getg() 1269 1270 if gp.m.throwing == throwTypeNone { 1271 gp.m.throwing = t 1272 } 1273 1274 // Switch to the system stack to avoid any stack growth, which may make 1275 // things worse if the runtime is in a bad state. 1276 systemstack(func() { 1277 if isSecureMode() { 1278 exit(2) 1279 } 1280 1281 startpanic_m() 1282 1283 if dopanic_m(gp, pc, sp) { 1284 // crash uses a decent amount of nosplit stack and we're already 1285 // low on stack in throw, so crash on the system stack (unlike 1286 // fatalpanic). 1287 crash() 1288 } 1289 1290 exit(2) 1291 }) 1292 1293 *(*int)(nil) = 0 // not reached 1294 } 1295 1296 // fatalpanic implements an unrecoverable panic. It is like fatalthrow, except 1297 // that if msgs != nil, fatalpanic also prints panic messages and decrements 1298 // runningPanicDefers once main is blocked from exiting. 1299 // 1300 //go:nosplit 1301 func fatalpanic(msgs *_panic) { 1302 pc := sys.GetCallerPC() 1303 sp := sys.GetCallerSP() 1304 gp := getg() 1305 var docrash bool 1306 // Switch to the system stack to avoid any stack growth, which 1307 // may make things worse if the runtime is in a bad state. 1308 systemstack(func() { 1309 if startpanic_m() && msgs != nil { 1310 // There were panic messages and startpanic_m 1311 // says it's okay to try to print them. 1312 1313 // startpanic_m set panicking, which will 1314 // block main from exiting, so now OK to 1315 // decrement runningPanicDefers. 1316 runningPanicDefers.Add(-1) 1317 1318 printpanics(msgs) 1319 } 1320 1321 docrash = dopanic_m(gp, pc, sp) 1322 }) 1323 1324 if docrash { 1325 // By crashing outside the above systemstack call, debuggers 1326 // will not be confused when generating a backtrace. 1327 // Function crash is marked nosplit to avoid stack growth. 1328 crash() 1329 } 1330 1331 systemstack(func() { 1332 exit(2) 1333 }) 1334 1335 *(*int)(nil) = 0 // not reached 1336 } 1337 1338 // startpanic_m prepares for an unrecoverable panic. 1339 // 1340 // It returns true if panic messages should be printed, or false if 1341 // the runtime is in bad shape and should just print stacks. 1342 // 1343 // It must not have write barriers even though the write barrier 1344 // explicitly ignores writes once dying > 0. Write barriers still 1345 // assume that g.m.p != nil, and this function may not have P 1346 // in some contexts (e.g. a panic in a signal handler for a signal 1347 // sent to an M with no P). 1348 // 1349 //go:nowritebarrierrec 1350 func startpanic_m() bool { 1351 gp := getg() 1352 if mheap_.cachealloc.size == 0 { // very early 1353 print("runtime: panic before malloc heap initialized\n") 1354 } 1355 // Disallow malloc during an unrecoverable panic. A panic 1356 // could happen in a signal handler, or in a throw, or inside 1357 // malloc itself. We want to catch if an allocation ever does 1358 // happen (even if we're not in one of these situations). 1359 gp.m.mallocing++ 1360 1361 // If we're dying because of a bad lock count, set it to a 1362 // good lock count so we don't recursively panic below. 1363 if gp.m.locks < 0 { 1364 gp.m.locks = 1 1365 } 1366 1367 switch gp.m.dying { 1368 case 0: 1369 // Setting dying >0 has the side-effect of disabling this G's writebuf. 1370 gp.m.dying = 1 1371 panicking.Add(1) 1372 lock(&paniclk) 1373 if debug.schedtrace > 0 || debug.scheddetail > 0 { 1374 schedtrace(true) 1375 } 1376 freezetheworld() 1377 return true 1378 case 1: 1379 // Something failed while panicking. 1380 // Just print a stack trace and exit. 1381 gp.m.dying = 2 1382 print("panic during panic\n") 1383 return false 1384 case 2: 1385 // This is a genuine bug in the runtime, we couldn't even 1386 // print the stack trace successfully. 1387 gp.m.dying = 3 1388 print("stack trace unavailable\n") 1389 exit(4) 1390 fallthrough 1391 default: 1392 // Can't even print! Just exit. 1393 exit(5) 1394 return false // Need to return something. 1395 } 1396 } 1397 1398 var didothers bool 1399 var deadlock mutex 1400 1401 // gp is the crashing g running on this M, but may be a user G, while getg() is 1402 // always g0. 1403 func dopanic_m(gp *g, pc, sp uintptr) bool { 1404 if gp.sig != 0 { 1405 signame := signame(gp.sig) 1406 if signame != "" { 1407 print("[signal ", signame) 1408 } else { 1409 print("[signal ", hex(gp.sig)) 1410 } 1411 print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n") 1412 } 1413 1414 level, all, docrash := gotraceback() 1415 if level > 0 { 1416 if gp != gp.m.curg { 1417 all = true 1418 } 1419 if gp != gp.m.g0 { 1420 print("\n") 1421 goroutineheader(gp) 1422 traceback(pc, sp, 0, gp) 1423 } else if level >= 2 || gp.m.throwing >= throwTypeRuntime { 1424 print("\nruntime stack:\n") 1425 traceback(pc, sp, 0, gp) 1426 } 1427 if !didothers && all { 1428 didothers = true 1429 tracebackothers(gp) 1430 } 1431 } 1432 unlock(&paniclk) 1433 1434 if panicking.Add(-1) != 0 { 1435 // Some other m is panicking too. 1436 // Let it print what it needs to print. 1437 // Wait forever without chewing up cpu. 1438 // It will exit when it's done. 1439 lock(&deadlock) 1440 lock(&deadlock) 1441 } 1442 1443 printDebugLog() 1444 1445 return docrash 1446 } 1447 1448 // canpanic returns false if a signal should throw instead of 1449 // panicking. 1450 // 1451 //go:nosplit 1452 func canpanic() bool { 1453 gp := getg() 1454 mp := acquirem() 1455 1456 // Is it okay for gp to panic instead of crashing the program? 1457 // Yes, as long as it is running Go code, not runtime code, 1458 // and not stuck in a system call. 1459 if gp != mp.curg { 1460 releasem(mp) 1461 return false 1462 } 1463 // N.B. mp.locks != 1 instead of 0 to account for acquirem. 1464 if mp.locks != 1 || mp.mallocing != 0 || mp.throwing != throwTypeNone || mp.preemptoff != "" || mp.dying != 0 { 1465 releasem(mp) 1466 return false 1467 } 1468 status := readgstatus(gp) 1469 if status&^_Gscan != _Grunning || gp.syscallsp != 0 { 1470 releasem(mp) 1471 return false 1472 } 1473 if GOOS == "windows" && mp.libcallsp != 0 { 1474 releasem(mp) 1475 return false 1476 } 1477 releasem(mp) 1478 return true 1479 } 1480 1481 // shouldPushSigpanic reports whether pc should be used as sigpanic's 1482 // return PC (pushing a frame for the call). Otherwise, it should be 1483 // left alone so that LR is used as sigpanic's return PC, effectively 1484 // replacing the top-most frame with sigpanic. This is used by 1485 // preparePanic. 1486 func shouldPushSigpanic(gp *g, pc, lr uintptr) bool { 1487 if pc == 0 { 1488 // Probably a call to a nil func. The old LR is more 1489 // useful in the stack trace. Not pushing the frame 1490 // will make the trace look like a call to sigpanic 1491 // instead. (Otherwise the trace will end at sigpanic 1492 // and we won't get to see who faulted.) 1493 return false 1494 } 1495 // If we don't recognize the PC as code, but we do recognize 1496 // the link register as code, then this assumes the panic was 1497 // caused by a call to non-code. In this case, we want to 1498 // ignore this call to make unwinding show the context. 1499 // 1500 // If we running C code, we're not going to recognize pc as a 1501 // Go function, so just assume it's good. Otherwise, traceback 1502 // may try to read a stale LR that looks like a Go code 1503 // pointer and wander into the woods. 1504 if gp.m.incgo || findfunc(pc).valid() { 1505 // This wasn't a bad call, so use PC as sigpanic's 1506 // return PC. 1507 return true 1508 } 1509 if findfunc(lr).valid() { 1510 // This was a bad call, but the LR is good, so use the 1511 // LR as sigpanic's return PC. 1512 return false 1513 } 1514 // Neither the PC or LR is good. Hopefully pushing a frame 1515 // will work. 1516 return true 1517 } 1518 1519 // isAbortPC reports whether pc is the program counter at which 1520 // runtime.abort raises a signal. 1521 // 1522 // It is nosplit because it's part of the isgoexception 1523 // implementation. 1524 // 1525 //go:nosplit 1526 func isAbortPC(pc uintptr) bool { 1527 f := findfunc(pc) 1528 if !f.valid() { 1529 return false 1530 } 1531 return f.funcID == abi.FuncID_abort 1532 } 1533