Source file src/runtime/signal_unix.go
1 // Copyright 2012 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 //go:build unix 6 7 package runtime 8 9 import ( 10 "internal/abi" 11 "internal/runtime/atomic" 12 "internal/runtime/sys" 13 "unsafe" 14 ) 15 16 // sigTabT is the type of an entry in the global sigtable array. 17 // sigtable is inherently system dependent, and appears in OS-specific files, 18 // but sigTabT is the same for all Unixy systems. 19 // The sigtable array is indexed by a system signal number to get the flags 20 // and printable name of each signal. 21 type sigTabT struct { 22 flags int32 23 name string 24 } 25 26 //go:linkname os_sigpipe os.sigpipe 27 func os_sigpipe() { 28 systemstack(sigpipe) 29 } 30 31 func signame(sig uint32) string { 32 if sig >= uint32(len(sigtable)) { 33 return "" 34 } 35 return sigtable[sig].name 36 } 37 38 const ( 39 _SIG_DFL uintptr = 0 40 _SIG_IGN uintptr = 1 41 ) 42 43 // sigPreempt is the signal used for non-cooperative preemption. 44 // 45 // There's no good way to choose this signal, but there are some 46 // heuristics: 47 // 48 // 1. It should be a signal that's passed-through by debuggers by 49 // default. On Linux, this is SIGALRM, SIGURG, SIGCHLD, SIGIO, 50 // SIGVTALRM, SIGPROF, and SIGWINCH, plus some glibc-internal signals. 51 // 52 // 2. It shouldn't be used internally by libc in mixed Go/C binaries 53 // because libc may assume it's the only thing that can handle these 54 // signals. For example SIGCANCEL or SIGSETXID. 55 // 56 // 3. It should be a signal that can happen spuriously without 57 // consequences. For example, SIGALRM is a bad choice because the 58 // signal handler can't tell if it was caused by the real process 59 // alarm or not (arguably this means the signal is broken, but I 60 // digress). SIGUSR1 and SIGUSR2 are also bad because those are often 61 // used in meaningful ways by applications. 62 // 63 // 4. We need to deal with platforms without real-time signals (like 64 // macOS), so those are out. 65 // 66 // We use SIGURG because it meets all of these criteria, is extremely 67 // unlikely to be used by an application for its "real" meaning (both 68 // because out-of-band data is basically unused and because SIGURG 69 // doesn't report which socket has the condition, making it pretty 70 // useless), and even if it is, the application has to be ready for 71 // spurious SIGURG. SIGIO wouldn't be a bad choice either, but is more 72 // likely to be used for real. 73 const sigPreempt = _SIGURG 74 75 // Stores the signal handlers registered before Go installed its own. 76 // These signal handlers will be invoked in cases where Go doesn't want to 77 // handle a particular signal (e.g., signal occurred on a non-Go thread). 78 // See sigfwdgo for more information on when the signals are forwarded. 79 // 80 // This is read by the signal handler; accesses should use 81 // atomic.Loaduintptr and atomic.Storeuintptr. 82 var fwdSig [_NSIG]uintptr 83 84 // handlingSig is indexed by signal number and is non-zero if we are 85 // currently handling the signal. Or, to put it another way, whether 86 // the signal handler is currently set to the Go signal handler or not. 87 // This is uint32 rather than bool so that we can use atomic instructions. 88 var handlingSig [_NSIG]uint32 89 90 // channels for synchronizing signal mask updates with the signal mask 91 // thread 92 var ( 93 disableSigChan chan uint32 94 enableSigChan chan uint32 95 maskUpdatedChan chan struct{} 96 ) 97 98 func init() { 99 // _NSIG is the number of signals on this operating system. 100 // sigtable should describe what to do for all the possible signals. 101 if len(sigtable) != _NSIG { 102 print("runtime: len(sigtable)=", len(sigtable), " _NSIG=", _NSIG, "\n") 103 throw("bad sigtable len") 104 } 105 } 106 107 var signalsOK bool 108 109 // Initialize signals. 110 // Called by libpreinit so runtime may not be initialized. 111 // 112 //go:nosplit 113 //go:nowritebarrierrec 114 func initsig(preinit bool) { 115 if !preinit { 116 // It's now OK for signal handlers to run. 117 signalsOK = true 118 } 119 120 // For c-archive/c-shared this is called by libpreinit with 121 // preinit == true. 122 if (isarchive || islibrary) && !preinit { 123 return 124 } 125 126 for i := uint32(0); i < _NSIG; i++ { 127 t := &sigtable[i] 128 if t.flags == 0 || t.flags&_SigDefault != 0 { 129 continue 130 } 131 132 // We don't need to use atomic operations here because 133 // there shouldn't be any other goroutines running yet. 134 fwdSig[i] = getsig(i) 135 136 if !sigInstallGoHandler(i) { 137 // Even if we are not installing a signal handler, 138 // set SA_ONSTACK if necessary. 139 if fwdSig[i] != _SIG_DFL && fwdSig[i] != _SIG_IGN { 140 setsigstack(i) 141 } else if fwdSig[i] == _SIG_IGN { 142 sigInitIgnored(i) 143 } 144 continue 145 } 146 147 handlingSig[i] = 1 148 setsig(i, abi.FuncPCABIInternal(sighandler)) 149 } 150 } 151 152 //go:nosplit 153 //go:nowritebarrierrec 154 func sigInstallGoHandler(sig uint32) bool { 155 // For some signals, we respect an inherited SIG_IGN handler 156 // rather than insist on installing our own default handler. 157 // Even these signals can be fetched using the os/signal package. 158 switch sig { 159 case _SIGHUP, _SIGINT: 160 if atomic.Loaduintptr(&fwdSig[sig]) == _SIG_IGN { 161 return false 162 } 163 } 164 165 if (GOOS == "linux" || GOOS == "android") && !iscgo && sig == sigPerThreadSyscall { 166 // sigPerThreadSyscall is the same signal used by glibc for 167 // per-thread syscalls on Linux. We use it for the same purpose 168 // in non-cgo binaries. 169 return true 170 } 171 172 t := &sigtable[sig] 173 if t.flags&_SigSetStack != 0 { 174 return false 175 } 176 177 // When built using c-archive or c-shared, only install signal 178 // handlers for synchronous signals and SIGPIPE and sigPreempt. 179 if (isarchive || islibrary) && t.flags&_SigPanic == 0 && sig != _SIGPIPE && sig != sigPreempt { 180 return false 181 } 182 183 return true 184 } 185 186 // sigenable enables the Go signal handler to catch the signal sig. 187 // It is only called while holding the os/signal.handlers lock, 188 // via os/signal.enableSignal and signal_enable. 189 func sigenable(sig uint32) { 190 if sig >= uint32(len(sigtable)) { 191 return 192 } 193 194 // SIGPROF is handled specially for profiling. 195 if sig == _SIGPROF { 196 return 197 } 198 199 t := &sigtable[sig] 200 if t.flags&_SigNotify != 0 { 201 ensureSigM() 202 enableSigChan <- sig 203 <-maskUpdatedChan 204 if atomic.Cas(&handlingSig[sig], 0, 1) { 205 atomic.Storeuintptr(&fwdSig[sig], getsig(sig)) 206 setsig(sig, abi.FuncPCABIInternal(sighandler)) 207 } 208 } 209 } 210 211 // sigdisable disables the Go signal handler for the signal sig. 212 // It is only called while holding the os/signal.handlers lock, 213 // via os/signal.disableSignal and signal_disable. 214 func sigdisable(sig uint32) { 215 if sig >= uint32(len(sigtable)) { 216 return 217 } 218 219 // SIGPROF is handled specially for profiling. 220 if sig == _SIGPROF { 221 return 222 } 223 224 t := &sigtable[sig] 225 if t.flags&_SigNotify != 0 { 226 ensureSigM() 227 disableSigChan <- sig 228 <-maskUpdatedChan 229 230 // If initsig does not install a signal handler for a 231 // signal, then to go back to the state before Notify 232 // we should remove the one we installed. 233 if !sigInstallGoHandler(sig) { 234 atomic.Store(&handlingSig[sig], 0) 235 setsig(sig, atomic.Loaduintptr(&fwdSig[sig])) 236 } 237 } 238 } 239 240 // sigignore ignores the signal sig. 241 // It is only called while holding the os/signal.handlers lock, 242 // via os/signal.ignoreSignal and signal_ignore. 243 func sigignore(sig uint32) { 244 if sig >= uint32(len(sigtable)) { 245 return 246 } 247 248 // SIGPROF is handled specially for profiling. 249 if sig == _SIGPROF { 250 return 251 } 252 253 t := &sigtable[sig] 254 if t.flags&_SigNotify != 0 { 255 atomic.Store(&handlingSig[sig], 0) 256 setsig(sig, _SIG_IGN) 257 } 258 } 259 260 // clearSignalHandlers clears all signal handlers that are not ignored 261 // back to the default. This is called by the child after a fork, so that 262 // we can enable the signal mask for the exec without worrying about 263 // running a signal handler in the child. 264 // 265 //go:nosplit 266 //go:nowritebarrierrec 267 func clearSignalHandlers() { 268 for i := uint32(0); i < _NSIG; i++ { 269 if atomic.Load(&handlingSig[i]) != 0 { 270 setsig(i, _SIG_DFL) 271 } 272 } 273 } 274 275 // setProcessCPUProfilerTimer is called when the profiling timer changes. 276 // It is called with prof.signalLock held. hz is the new timer, and is 0 if 277 // profiling is being disabled. Enable or disable the signal as 278 // required for -buildmode=c-archive. 279 func setProcessCPUProfilerTimer(hz int32) { 280 if hz != 0 { 281 // Enable the Go signal handler if not enabled. 282 if atomic.Cas(&handlingSig[_SIGPROF], 0, 1) { 283 h := getsig(_SIGPROF) 284 // If no signal handler was installed before, then we record 285 // _SIG_IGN here. When we turn off profiling (below) we'll start 286 // ignoring SIGPROF signals. We do this, rather than change 287 // to SIG_DFL, because there may be a pending SIGPROF 288 // signal that has not yet been delivered to some other thread. 289 // If we change to SIG_DFL when turning off profiling, the 290 // program will crash when that SIGPROF is delivered. We assume 291 // that programs that use profiling don't want to crash on a 292 // stray SIGPROF. See issue 19320. 293 // We do the change here instead of when turning off profiling, 294 // because there we may race with a signal handler running 295 // concurrently, in particular, sigfwdgo may observe _SIG_DFL and 296 // die. See issue 43828. 297 if h == _SIG_DFL { 298 h = _SIG_IGN 299 } 300 atomic.Storeuintptr(&fwdSig[_SIGPROF], h) 301 setsig(_SIGPROF, abi.FuncPCABIInternal(sighandler)) 302 } 303 304 var it itimerval 305 it.it_interval.tv_sec = 0 306 it.it_interval.set_usec(1000000 / hz) 307 it.it_value = it.it_interval 308 setitimer(_ITIMER_PROF, &it, nil) 309 } else { 310 setitimer(_ITIMER_PROF, &itimerval{}, nil) 311 312 // If the Go signal handler should be disabled by default, 313 // switch back to the signal handler that was installed 314 // when we enabled profiling. We don't try to handle the case 315 // of a program that changes the SIGPROF handler while Go 316 // profiling is enabled. 317 if !sigInstallGoHandler(_SIGPROF) { 318 if atomic.Cas(&handlingSig[_SIGPROF], 1, 0) { 319 h := atomic.Loaduintptr(&fwdSig[_SIGPROF]) 320 setsig(_SIGPROF, h) 321 } 322 } 323 } 324 } 325 326 // setThreadCPUProfilerHz makes any thread-specific changes required to 327 // implement profiling at a rate of hz. 328 // No changes required on Unix systems when using setitimer. 329 func setThreadCPUProfilerHz(hz int32) { 330 getg().m.profilehz = hz 331 } 332 333 func sigpipe() { 334 if signal_ignored(_SIGPIPE) || sigsend(_SIGPIPE) { 335 return 336 } 337 dieFromSignal(_SIGPIPE) 338 } 339 340 // doSigPreempt handles a preemption signal on gp. 341 func doSigPreempt(gp *g, ctxt *sigctxt) { 342 // Check if this G wants to be preempted and is safe to 343 // preempt. 344 if wantAsyncPreempt(gp) { 345 if ok, newpc := isAsyncSafePoint(gp, ctxt.sigpc(), ctxt.sigsp(), ctxt.siglr()); ok { 346 // Adjust the PC and inject a call to asyncPreempt. 347 ctxt.pushCall(abi.FuncPCABI0(asyncPreempt), newpc) 348 } 349 } 350 351 // Acknowledge the preemption. 352 gp.m.preemptGen.Add(1) 353 gp.m.signalPending.Store(0) 354 355 if GOOS == "darwin" || GOOS == "ios" { 356 pendingPreemptSignals.Add(-1) 357 } 358 } 359 360 const preemptMSupported = true 361 362 // preemptM sends a preemption request to mp. This request may be 363 // handled asynchronously and may be coalesced with other requests to 364 // the M. When the request is received, if the running G or P are 365 // marked for preemption and the goroutine is at an asynchronous 366 // safe-point, it will preempt the goroutine. It always atomically 367 // increments mp.preemptGen after handling a preemption request. 368 func preemptM(mp *m) { 369 // On Darwin, don't try to preempt threads during exec. 370 // Issue #41702. 371 if GOOS == "darwin" || GOOS == "ios" { 372 execLock.rlock() 373 } 374 375 if mp.signalPending.CompareAndSwap(0, 1) { 376 if GOOS == "darwin" || GOOS == "ios" { 377 pendingPreemptSignals.Add(1) 378 } 379 380 // If multiple threads are preempting the same M, it may send many 381 // signals to the same M such that it hardly make progress, causing 382 // live-lock problem. Apparently this could happen on darwin. See 383 // issue #37741. 384 // Only send a signal if there isn't already one pending. 385 signalM(mp, sigPreempt) 386 } 387 388 if GOOS == "darwin" || GOOS == "ios" { 389 execLock.runlock() 390 } 391 } 392 393 // sigFetchG fetches the value of G safely when running in a signal handler. 394 // On some architectures, the g value may be clobbered when running in a VDSO. 395 // See issue #32912. 396 // 397 //go:nosplit 398 func sigFetchG(c *sigctxt) *g { 399 switch GOARCH { 400 case "arm", "arm64", "loong64", "ppc64", "ppc64le", "riscv64", "s390x": 401 if !iscgo && inVDSOPage(c.sigpc()) { 402 // When using cgo, we save the g on TLS and load it from there 403 // in sigtramp. Just use that. 404 // Otherwise, before making a VDSO call we save the g to the 405 // bottom of the signal stack. Fetch from there. 406 // TODO: in efence mode, stack is sysAlloc'd, so this wouldn't 407 // work. 408 sp := sys.GetCallerSP() 409 s := spanOf(sp) 410 if s != nil && s.state.get() == mSpanManual && s.base() < sp && sp < s.limit { 411 gp := *(**g)(unsafe.Pointer(s.base())) 412 return gp 413 } 414 return nil 415 } 416 } 417 return getg() 418 } 419 420 // sigtrampgo is called from the signal handler function, sigtramp, 421 // written in assembly code. 422 // This is called by the signal handler, and the world may be stopped. 423 // 424 // It must be nosplit because getg() is still the G that was running 425 // (if any) when the signal was delivered, but it's (usually) called 426 // on the gsignal stack. Until this switches the G to gsignal, the 427 // stack bounds check won't work. 428 // 429 //go:nosplit 430 //go:nowritebarrierrec 431 func sigtrampgo(sig uint32, info *siginfo, ctx unsafe.Pointer) { 432 if sigfwdgo(sig, info, ctx) { 433 return 434 } 435 c := &sigctxt{info, ctx} 436 gp := sigFetchG(c) 437 setg(gp) 438 if gp == nil || (gp.m != nil && gp.m.isExtraInC) { 439 if sig == _SIGPROF { 440 // Some platforms (Linux) have per-thread timers, which we use in 441 // combination with the process-wide timer. Avoid double-counting. 442 if validSIGPROF(nil, c) { 443 sigprofNonGoPC(c.sigpc()) 444 } 445 return 446 } 447 if sig == sigPreempt && preemptMSupported && debug.asyncpreemptoff == 0 { 448 // This is probably a signal from preemptM sent 449 // while executing Go code but received while 450 // executing non-Go code. 451 // We got past sigfwdgo, so we know that there is 452 // no non-Go signal handler for sigPreempt. 453 // The default behavior for sigPreempt is to ignore 454 // the signal, so badsignal will be a no-op anyway. 455 if GOOS == "darwin" || GOOS == "ios" { 456 pendingPreemptSignals.Add(-1) 457 } 458 return 459 } 460 c.fixsigcode(sig) 461 // Set g to nil here and badsignal will use g0 by needm. 462 // TODO: reuse the current m here by using the gsignal and adjustSignalStack, 463 // since the current g maybe a normal goroutine and actually running on the signal stack, 464 // it may hit stack split that is not expected here. 465 if gp != nil { 466 setg(nil) 467 } 468 badsignal(uintptr(sig), c) 469 // Restore g 470 if gp != nil { 471 setg(gp) 472 } 473 return 474 } 475 476 setg(gp.m.gsignal) 477 478 // If some non-Go code called sigaltstack, adjust. 479 var gsignalStack gsignalStack 480 setStack := adjustSignalStack(sig, gp.m, &gsignalStack) 481 if setStack { 482 gp.m.gsignal.stktopsp = sys.GetCallerSP() 483 } 484 485 if gp.stackguard0 == stackFork { 486 signalDuringFork(sig) 487 } 488 489 c.fixsigcode(sig) 490 sighandler(sig, info, ctx, gp) 491 setg(gp) 492 if setStack { 493 restoreGsignalStack(&gsignalStack) 494 } 495 } 496 497 // If the signal handler receives a SIGPROF signal on a non-Go thread, 498 // it tries to collect a traceback into sigprofCallers. 499 // sigprofCallersUse is set to non-zero while sigprofCallers holds a traceback. 500 var sigprofCallers cgoCallers 501 var sigprofCallersUse uint32 502 503 // sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread, 504 // and the signal handler collected a stack trace in sigprofCallers. 505 // When this is called, sigprofCallersUse will be non-zero. 506 // g is nil, and what we can do is very limited. 507 // 508 // It is called from the signal handling functions written in assembly code that 509 // are active for cgo programs, cgoSigtramp and sigprofNonGoWrapper, which have 510 // not verified that the SIGPROF delivery corresponds to the best available 511 // profiling source for this thread. 512 // 513 //go:nosplit 514 //go:nowritebarrierrec 515 func sigprofNonGo(sig uint32, info *siginfo, ctx unsafe.Pointer) { 516 if prof.hz.Load() != 0 { 517 c := &sigctxt{info, ctx} 518 // Some platforms (Linux) have per-thread timers, which we use in 519 // combination with the process-wide timer. Avoid double-counting. 520 if validSIGPROF(nil, c) { 521 n := 0 522 for n < len(sigprofCallers) && sigprofCallers[n] != 0 { 523 n++ 524 } 525 cpuprof.addNonGo(sigprofCallers[:n]) 526 } 527 } 528 529 atomic.Store(&sigprofCallersUse, 0) 530 } 531 532 // sigprofNonGoPC is called when a profiling signal arrived on a 533 // non-Go thread and we have a single PC value, not a stack trace. 534 // g is nil, and what we can do is very limited. 535 // 536 //go:nosplit 537 //go:nowritebarrierrec 538 func sigprofNonGoPC(pc uintptr) { 539 if prof.hz.Load() != 0 { 540 stk := []uintptr{ 541 pc, 542 abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum, 543 } 544 cpuprof.addNonGo(stk) 545 } 546 } 547 548 // adjustSignalStack adjusts the current stack guard based on the 549 // stack pointer that is actually in use while handling a signal. 550 // We do this in case some non-Go code called sigaltstack. 551 // This reports whether the stack was adjusted, and if so stores the old 552 // signal stack in *gsigstack. 553 // 554 //go:nosplit 555 func adjustSignalStack(sig uint32, mp *m, gsigStack *gsignalStack) bool { 556 sp := uintptr(unsafe.Pointer(&sig)) 557 if sp >= mp.gsignal.stack.lo && sp < mp.gsignal.stack.hi { 558 return false 559 } 560 561 var st stackt 562 sigaltstack(nil, &st) 563 stsp := uintptr(unsafe.Pointer(st.ss_sp)) 564 if st.ss_flags&_SS_DISABLE == 0 && sp >= stsp && sp < stsp+st.ss_size { 565 setGsignalStack(&st, gsigStack) 566 return true 567 } 568 569 if sp >= mp.g0.stack.lo && sp < mp.g0.stack.hi { 570 // The signal was delivered on the g0 stack. 571 // This can happen when linked with C code 572 // using the thread sanitizer, which collects 573 // signals then delivers them itself by calling 574 // the signal handler directly when C code, 575 // including C code called via cgo, calls a 576 // TSAN-intercepted function such as malloc. 577 // 578 // We check this condition last as g0.stack.lo 579 // may be not very accurate (see mstart). 580 st := stackt{ss_size: mp.g0.stack.hi - mp.g0.stack.lo} 581 setSignalstackSP(&st, mp.g0.stack.lo) 582 setGsignalStack(&st, gsigStack) 583 return true 584 } 585 586 // sp is not within gsignal stack, g0 stack, or sigaltstack. Bad. 587 // Call indirectly to avoid nosplit stack overflow on OpenBSD. 588 adjustSignalStack2Indirect(sig, sp, mp, st.ss_flags&_SS_DISABLE != 0) 589 return false 590 } 591 592 var adjustSignalStack2Indirect = adjustSignalStack2 593 594 //go:nosplit 595 func adjustSignalStack2(sig uint32, sp uintptr, mp *m, ssDisable bool) { 596 setg(nil) 597 needm(true) 598 if ssDisable { 599 noSignalStack(sig) 600 } else { 601 sigNotOnStack(sig, sp, mp) 602 } 603 dropm() 604 } 605 606 // crashing is the number of m's we have waited for when implementing 607 // GOTRACEBACK=crash when a signal is received. 608 var crashing atomic.Int32 609 610 // testSigtrap and testSigusr1 are used by the runtime tests. If 611 // non-nil, it is called on SIGTRAP/SIGUSR1. If it returns true, the 612 // normal behavior on this signal is suppressed. 613 var testSigtrap func(info *siginfo, ctxt *sigctxt, gp *g) bool 614 var testSigusr1 func(gp *g) bool 615 616 // sigsysIgnored is non-zero if we are currently ignoring SIGSYS. See issue #69065. 617 var sigsysIgnored uint32 618 619 //go:linkname ignoreSIGSYS os.ignoreSIGSYS 620 func ignoreSIGSYS() { 621 atomic.Store(&sigsysIgnored, 1) 622 } 623 624 //go:linkname restoreSIGSYS os.restoreSIGSYS 625 func restoreSIGSYS() { 626 atomic.Store(&sigsysIgnored, 0) 627 } 628 629 // sighandler is invoked when a signal occurs. The global g will be 630 // set to a gsignal goroutine and we will be running on the alternate 631 // signal stack. The parameter gp will be the value of the global g 632 // when the signal occurred. The sig, info, and ctxt parameters are 633 // from the system signal handler: they are the parameters passed when 634 // the SA is passed to the sigaction system call. 635 // 636 // The garbage collector may have stopped the world, so write barriers 637 // are not allowed. 638 // 639 //go:nowritebarrierrec 640 func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) { 641 // The g executing the signal handler. This is almost always 642 // mp.gsignal. See delayedSignal for an exception. 643 gsignal := getg() 644 mp := gsignal.m 645 c := &sigctxt{info, ctxt} 646 647 // Cgo TSAN (not the Go race detector) intercepts signals and calls the 648 // signal handler at a later time. When the signal handler is called, the 649 // memory may have changed, but the signal context remains old. The 650 // unmatched signal context and memory makes it unsafe to unwind or inspect 651 // the stack. So we ignore delayed non-fatal signals that will cause a stack 652 // inspection (profiling signal and preemption signal). 653 // cgo_yield is only non-nil for TSAN, and is specifically used to trigger 654 // signal delivery. We use that as an indicator of delayed signals. 655 // For delayed signals, the handler is called on the g0 stack (see 656 // adjustSignalStack). 657 delayedSignal := *cgo_yield != nil && mp != nil && gsignal.stack == mp.g0.stack 658 659 if sig == _SIGPROF { 660 // Some platforms (Linux) have per-thread timers, which we use in 661 // combination with the process-wide timer. Avoid double-counting. 662 if !delayedSignal && validSIGPROF(mp, c) { 663 sigprof(c.sigpc(), c.sigsp(), c.siglr(), gp, mp) 664 } 665 return 666 } 667 668 if sig == _SIGTRAP && testSigtrap != nil && testSigtrap(info, (*sigctxt)(noescape(unsafe.Pointer(c))), gp) { 669 return 670 } 671 672 if sig == _SIGUSR1 && testSigusr1 != nil && testSigusr1(gp) { 673 return 674 } 675 676 if (GOOS == "linux" || GOOS == "android") && sig == sigPerThreadSyscall { 677 // sigPerThreadSyscall is the same signal used by glibc for 678 // per-thread syscalls on Linux. We use it for the same purpose 679 // in non-cgo binaries. Since this signal is not _SigNotify, 680 // there is nothing more to do once we run the syscall. 681 runPerThreadSyscall() 682 return 683 } 684 685 if sig == sigPreempt && debug.asyncpreemptoff == 0 && !delayedSignal { 686 // Might be a preemption signal. 687 doSigPreempt(gp, c) 688 // Even if this was definitely a preemption signal, it 689 // may have been coalesced with another signal, so we 690 // still let it through to the application. 691 } 692 693 flags := int32(_SigThrow) 694 if sig < uint32(len(sigtable)) { 695 flags = sigtable[sig].flags 696 } 697 if !c.sigFromUser() && flags&_SigPanic != 0 && (gp.throwsplit || gp != mp.curg) { 698 // We can't safely sigpanic because it may grow the 699 // stack. Abort in the signal handler instead. 700 // 701 // Also don't inject a sigpanic if we are not on a 702 // user G stack. Either we're in the runtime, or we're 703 // running C code. Either way we cannot recover. 704 flags = _SigThrow 705 } 706 if isAbortPC(c.sigpc()) { 707 // On many architectures, the abort function just 708 // causes a memory fault. Don't turn that into a panic. 709 flags = _SigThrow 710 } 711 if !c.sigFromUser() && flags&_SigPanic != 0 { 712 // The signal is going to cause a panic. 713 // Arrange the stack so that it looks like the point 714 // where the signal occurred made a call to the 715 // function sigpanic. Then set the PC to sigpanic. 716 717 // Have to pass arguments out of band since 718 // augmenting the stack frame would break 719 // the unwinding code. 720 gp.sig = sig 721 gp.sigcode0 = uintptr(c.sigcode()) 722 gp.sigcode1 = c.fault() 723 gp.sigpc = c.sigpc() 724 725 c.preparePanic(sig, gp) 726 return 727 } 728 729 if c.sigFromUser() || flags&_SigNotify != 0 { 730 if sigsend(sig) { 731 return 732 } 733 } 734 735 if c.sigFromUser() && signal_ignored(sig) { 736 return 737 } 738 739 if sig == _SIGSYS && c.sigFromSeccomp() && atomic.Load(&sigsysIgnored) != 0 { 740 return 741 } 742 743 if flags&_SigKill != 0 { 744 dieFromSignal(sig) 745 } 746 747 // _SigThrow means that we should exit now. 748 // If we get here with _SigPanic, it means that the signal 749 // was sent to us by a program (c.sigFromUser() is true); 750 // in that case, if we didn't handle it in sigsend, we exit now. 751 if flags&(_SigThrow|_SigPanic) == 0 { 752 return 753 } 754 755 mp.throwing = throwTypeRuntime 756 mp.caughtsig.set(gp) 757 758 if crashing.Load() == 0 { 759 startpanic_m() 760 } 761 762 gp = fatalsignal(sig, c, gp, mp) 763 764 level, _, docrash := gotraceback() 765 if level > 0 { 766 goroutineheader(gp) 767 tracebacktrap(c.sigpc(), c.sigsp(), c.siglr(), gp) 768 if crashing.Load() > 0 && gp != mp.curg && mp.curg != nil && readgstatus(mp.curg)&^_Gscan == _Grunning { 769 // tracebackothers on original m skipped this one; trace it now. 770 goroutineheader(mp.curg) 771 traceback(^uintptr(0), ^uintptr(0), 0, mp.curg) 772 } else if crashing.Load() == 0 { 773 tracebackothers(gp) 774 print("\n") 775 } 776 dumpregs(c) 777 } 778 779 if docrash { 780 var crashSleepMicros uint32 = 5000 781 var watchdogTimeoutMicros uint32 = 2000 * crashSleepMicros 782 783 isCrashThread := false 784 if crashing.CompareAndSwap(0, 1) { 785 isCrashThread = true 786 } else { 787 crashing.Add(1) 788 } 789 if crashing.Load() < mcount()-int32(extraMLength.Load()) { 790 // There are other m's that need to dump their stacks. 791 // Relay SIGQUIT to the next m by sending it to the current process. 792 // All m's that have already received SIGQUIT have signal masks blocking 793 // receipt of any signals, so the SIGQUIT will go to an m that hasn't seen it yet. 794 // The first m will wait until all ms received the SIGQUIT, then crash/exit. 795 // Just in case the relaying gets botched, each m involved in 796 // the relay sleeps for 5 seconds and then does the crash/exit itself. 797 // The faulting m is crashing first so it is the faulting thread in the core dump (see issue #63277): 798 // in expected operation, the first m will wait until the last m has received the SIGQUIT, 799 // and then run crash/exit and the process is gone. 800 // However, if it spends more than 10 seconds to send SIGQUIT to all ms, 801 // any of ms may crash/exit the process after waiting for 10 seconds. 802 print("\n-----\n\n") 803 raiseproc(_SIGQUIT) 804 } 805 if isCrashThread { 806 // Sleep for short intervals so that we can crash quickly after all ms have received SIGQUIT. 807 // Reset the timer whenever we see more ms received SIGQUIT 808 // to make it have enough time to crash (see issue #64752). 809 timeout := watchdogTimeoutMicros 810 maxCrashing := crashing.Load() 811 for timeout > 0 && (crashing.Load() < mcount()-int32(extraMLength.Load())) { 812 usleep(crashSleepMicros) 813 timeout -= crashSleepMicros 814 815 if c := crashing.Load(); c > maxCrashing { 816 // We make progress, so reset the watchdog timeout 817 maxCrashing = c 818 timeout = watchdogTimeoutMicros 819 } 820 } 821 } else { 822 maxCrashing := int32(0) 823 c := crashing.Load() 824 for c > maxCrashing { 825 maxCrashing = c 826 usleep(watchdogTimeoutMicros) 827 c = crashing.Load() 828 } 829 } 830 printDebugLog() 831 crash() 832 } 833 834 printDebugLog() 835 836 exit(2) 837 } 838 839 func fatalsignal(sig uint32, c *sigctxt, gp *g, mp *m) *g { 840 if sig < uint32(len(sigtable)) { 841 print(sigtable[sig].name, "\n") 842 } else { 843 print("Signal ", sig, "\n") 844 } 845 846 if isSecureMode() { 847 exit(2) 848 } 849 850 print("PC=", hex(c.sigpc()), " m=", mp.id, " sigcode=", c.sigcode()) 851 if sig == _SIGSEGV || sig == _SIGBUS { 852 print(" addr=", hex(c.fault())) 853 } 854 print("\n") 855 if mp.incgo && gp == mp.g0 && mp.curg != nil { 856 print("signal arrived during cgo execution\n") 857 // Switch to curg so that we get a traceback of the Go code 858 // leading up to the cgocall, which switched from curg to g0. 859 gp = mp.curg 860 } 861 if sig == _SIGILL || sig == _SIGFPE { 862 // It would be nice to know how long the instruction is. 863 // Unfortunately, that's complicated to do in general (mostly for x86 864 // and s930x, but other archs have non-standard instruction lengths also). 865 // Opt to print 16 bytes, which covers most instructions. 866 const maxN = 16 867 n := uintptr(maxN) 868 // We have to be careful, though. If we're near the end of 869 // a page and the following page isn't mapped, we could 870 // segfault. So make sure we don't straddle a page (even though 871 // that could lead to printing an incomplete instruction). 872 // We're assuming here we can read at least the page containing the PC. 873 // I suppose it is possible that the page is mapped executable but not readable? 874 pc := c.sigpc() 875 if n > physPageSize-pc%physPageSize { 876 n = physPageSize - pc%physPageSize 877 } 878 print("instruction bytes:") 879 b := (*[maxN]byte)(unsafe.Pointer(pc)) 880 for i := uintptr(0); i < n; i++ { 881 print(" ", hex(b[i])) 882 } 883 println() 884 } 885 print("\n") 886 return gp 887 } 888 889 // sigpanic turns a synchronous signal into a run-time panic. 890 // If the signal handler sees a synchronous panic, it arranges the 891 // stack to look like the function where the signal occurred called 892 // sigpanic, sets the signal's PC value to sigpanic, and returns from 893 // the signal handler. The effect is that the program will act as 894 // though the function that got the signal simply called sigpanic 895 // instead. 896 // 897 // This must NOT be nosplit because the linker doesn't know where 898 // sigpanic calls can be injected. 899 // 900 // The signal handler must not inject a call to sigpanic if 901 // getg().throwsplit, since sigpanic may need to grow the stack. 902 // 903 // This is exported via linkname to assembly in runtime/cgo. 904 // 905 //go:linkname sigpanic 906 func sigpanic() { 907 gp := getg() 908 if !canpanic() { 909 throw("unexpected signal during runtime execution") 910 } 911 912 switch gp.sig { 913 case _SIGBUS: 914 if gp.sigcode0 == _BUS_ADRERR && gp.sigcode1 < 0x1000 { 915 panicmem() 916 } 917 // Support runtime/debug.SetPanicOnFault. 918 if gp.paniconfault { 919 panicmemAddr(gp.sigcode1) 920 } 921 print("unexpected fault address ", hex(gp.sigcode1), "\n") 922 throw("fault") 923 case _SIGSEGV: 924 if (gp.sigcode0 == 0 || gp.sigcode0 == _SEGV_MAPERR || gp.sigcode0 == _SEGV_ACCERR) && gp.sigcode1 < 0x1000 { 925 panicmem() 926 } 927 // Support runtime/debug.SetPanicOnFault. 928 if gp.paniconfault { 929 panicmemAddr(gp.sigcode1) 930 } 931 if inUserArenaChunk(gp.sigcode1) { 932 // We could check that the arena chunk is explicitly set to fault, 933 // but the fact that we faulted on accessing it is enough to prove 934 // that it is. 935 print("accessed data from freed user arena ", hex(gp.sigcode1), "\n") 936 } else { 937 print("unexpected fault address ", hex(gp.sigcode1), "\n") 938 } 939 throw("fault") 940 case _SIGFPE: 941 switch gp.sigcode0 { 942 case _FPE_INTDIV: 943 panicdivide() 944 case _FPE_INTOVF: 945 panicoverflow() 946 } 947 panicfloat() 948 } 949 950 if gp.sig >= uint32(len(sigtable)) { 951 // can't happen: we looked up gp.sig in sigtable to decide to call sigpanic 952 throw("unexpected signal value") 953 } 954 panic(errorString(sigtable[gp.sig].name)) 955 } 956 957 // dieFromSignal kills the program with a signal. 958 // This provides the expected exit status for the shell. 959 // This is only called with fatal signals expected to kill the process. 960 // 961 //go:nosplit 962 //go:nowritebarrierrec 963 func dieFromSignal(sig uint32) { 964 unblocksig(sig) 965 // Mark the signal as unhandled to ensure it is forwarded. 966 atomic.Store(&handlingSig[sig], 0) 967 raise(sig) 968 969 // That should have killed us. On some systems, though, raise 970 // sends the signal to the whole process rather than to just 971 // the current thread, which means that the signal may not yet 972 // have been delivered. Give other threads a chance to run and 973 // pick up the signal. 974 osyield() 975 osyield() 976 osyield() 977 978 // If that didn't work, try _SIG_DFL. 979 setsig(sig, _SIG_DFL) 980 raise(sig) 981 982 osyield() 983 osyield() 984 osyield() 985 986 // If we are still somehow running, just exit with the wrong status. 987 exit(2) 988 } 989 990 // raisebadsignal is called when a signal is received on a non-Go 991 // thread, and the Go program does not want to handle it (that is, the 992 // program has not called os/signal.Notify for the signal). 993 func raisebadsignal(sig uint32, c *sigctxt) { 994 if sig == _SIGPROF { 995 // Ignore profiling signals that arrive on non-Go threads. 996 return 997 } 998 999 var handler uintptr 1000 var flags int32 1001 if sig >= _NSIG { 1002 handler = _SIG_DFL 1003 } else { 1004 handler = atomic.Loaduintptr(&fwdSig[sig]) 1005 flags = sigtable[sig].flags 1006 } 1007 1008 // If the signal is ignored, raising the signal is no-op. 1009 if handler == _SIG_IGN || (handler == _SIG_DFL && flags&_SigIgn != 0) { 1010 return 1011 } 1012 1013 // Reset the signal handler and raise the signal. 1014 // We are currently running inside a signal handler, so the 1015 // signal is blocked. We need to unblock it before raising the 1016 // signal, or the signal we raise will be ignored until we return 1017 // from the signal handler. We know that the signal was unblocked 1018 // before entering the handler, or else we would not have received 1019 // it. That means that we don't have to worry about blocking it 1020 // again. 1021 unblocksig(sig) 1022 setsig(sig, handler) 1023 1024 // If we're linked into a non-Go program we want to try to 1025 // avoid modifying the original context in which the signal 1026 // was raised. If the handler is the default, we know it 1027 // is non-recoverable, so we don't have to worry about 1028 // re-installing sighandler. At this point we can just 1029 // return and the signal will be re-raised and caught by 1030 // the default handler with the correct context. 1031 // 1032 // On FreeBSD, the libthr sigaction code prevents 1033 // this from working so we fall through to raise. 1034 if GOOS != "freebsd" && (isarchive || islibrary) && handler == _SIG_DFL && !c.sigFromUser() { 1035 return 1036 } 1037 1038 raise(sig) 1039 1040 // Give the signal a chance to be delivered. 1041 // In almost all real cases the program is about to crash, 1042 // so sleeping here is not a waste of time. 1043 usleep(1000) 1044 1045 // If the signal didn't cause the program to exit, restore the 1046 // Go signal handler and carry on. 1047 // 1048 // We may receive another instance of the signal before we 1049 // restore the Go handler, but that is not so bad: we know 1050 // that the Go program has been ignoring the signal. 1051 setsig(sig, abi.FuncPCABIInternal(sighandler)) 1052 } 1053 1054 //go:nosplit 1055 func crash() { 1056 dieFromSignal(_SIGABRT) 1057 } 1058 1059 // ensureSigM starts one global, sleeping thread to make sure at least one thread 1060 // is available to catch signals enabled for os/signal. 1061 func ensureSigM() { 1062 if maskUpdatedChan != nil { 1063 return 1064 } 1065 maskUpdatedChan = make(chan struct{}) 1066 disableSigChan = make(chan uint32) 1067 enableSigChan = make(chan uint32) 1068 go func() { 1069 // Signal masks are per-thread, so make sure this goroutine stays on one 1070 // thread. 1071 LockOSThread() 1072 defer UnlockOSThread() 1073 // The sigBlocked mask contains the signals not active for os/signal, 1074 // initially all signals except the essential. When signal.Notify()/Stop is called, 1075 // sigenable/sigdisable in turn notify this thread to update its signal 1076 // mask accordingly. 1077 sigBlocked := sigset_all 1078 for i := range sigtable { 1079 if !blockableSig(uint32(i)) { 1080 sigdelset(&sigBlocked, i) 1081 } 1082 } 1083 sigprocmask(_SIG_SETMASK, &sigBlocked, nil) 1084 for { 1085 select { 1086 case sig := <-enableSigChan: 1087 if sig > 0 { 1088 sigdelset(&sigBlocked, int(sig)) 1089 } 1090 case sig := <-disableSigChan: 1091 if sig > 0 && blockableSig(sig) { 1092 sigaddset(&sigBlocked, int(sig)) 1093 } 1094 } 1095 sigprocmask(_SIG_SETMASK, &sigBlocked, nil) 1096 maskUpdatedChan <- struct{}{} 1097 } 1098 }() 1099 } 1100 1101 // This is called when we receive a signal when there is no signal stack. 1102 // This can only happen if non-Go code calls sigaltstack to disable the 1103 // signal stack. 1104 func noSignalStack(sig uint32) { 1105 println("signal", sig, "received on thread with no signal stack") 1106 throw("non-Go code disabled sigaltstack") 1107 } 1108 1109 // This is called if we receive a signal when there is a signal stack 1110 // but we are not on it. This can only happen if non-Go code called 1111 // sigaction without setting the SS_ONSTACK flag. 1112 func sigNotOnStack(sig uint32, sp uintptr, mp *m) { 1113 println("signal", sig, "received but handler not on signal stack") 1114 print("mp.gsignal stack [", hex(mp.gsignal.stack.lo), " ", hex(mp.gsignal.stack.hi), "], ") 1115 print("mp.g0 stack [", hex(mp.g0.stack.lo), " ", hex(mp.g0.stack.hi), "], sp=", hex(sp), "\n") 1116 throw("non-Go code set up signal handler without SA_ONSTACK flag") 1117 } 1118 1119 // signalDuringFork is called if we receive a signal while doing a fork. 1120 // We do not want signals at that time, as a signal sent to the process 1121 // group may be delivered to the child process, causing confusion. 1122 // This should never be called, because we block signals across the fork; 1123 // this function is just a safety check. See issue 18600 for background. 1124 func signalDuringFork(sig uint32) { 1125 println("signal", sig, "received during fork") 1126 throw("signal received during fork") 1127 } 1128 1129 // This runs on a foreign stack, without an m or a g. No stack split. 1130 // 1131 //go:nosplit 1132 //go:norace 1133 //go:nowritebarrierrec 1134 func badsignal(sig uintptr, c *sigctxt) { 1135 if !iscgo && !cgoHasExtraM { 1136 // There is no extra M. needm will not be able to grab 1137 // an M. Instead of hanging, just crash. 1138 // Cannot call split-stack function as there is no G. 1139 writeErrStr("fatal: bad g in signal handler\n") 1140 exit(2) 1141 *(*uintptr)(unsafe.Pointer(uintptr(123))) = 2 1142 } 1143 needm(true) 1144 if !sigsend(uint32(sig)) { 1145 // A foreign thread received the signal sig, and the 1146 // Go code does not want to handle it. 1147 raisebadsignal(uint32(sig), c) 1148 } 1149 dropm() 1150 } 1151 1152 //go:noescape 1153 func sigfwd(fn uintptr, sig uint32, info *siginfo, ctx unsafe.Pointer) 1154 1155 // Determines if the signal should be handled by Go and if not, forwards the 1156 // signal to the handler that was installed before Go's. Returns whether the 1157 // signal was forwarded. 1158 // This is called by the signal handler, and the world may be stopped. 1159 // 1160 //go:nosplit 1161 //go:nowritebarrierrec 1162 func sigfwdgo(sig uint32, info *siginfo, ctx unsafe.Pointer) bool { 1163 if sig >= uint32(len(sigtable)) { 1164 return false 1165 } 1166 fwdFn := atomic.Loaduintptr(&fwdSig[sig]) 1167 flags := sigtable[sig].flags 1168 1169 // If we aren't handling the signal, forward it. 1170 if atomic.Load(&handlingSig[sig]) == 0 || !signalsOK { 1171 // If the signal is ignored, doing nothing is the same as forwarding. 1172 if fwdFn == _SIG_IGN || (fwdFn == _SIG_DFL && flags&_SigIgn != 0) { 1173 return true 1174 } 1175 // We are not handling the signal and there is no other handler to forward to. 1176 // Crash with the default behavior. 1177 if fwdFn == _SIG_DFL { 1178 setsig(sig, _SIG_DFL) 1179 dieFromSignal(sig) 1180 return false 1181 } 1182 1183 sigfwd(fwdFn, sig, info, ctx) 1184 return true 1185 } 1186 1187 // This function and its caller sigtrampgo assumes SIGPIPE is delivered on the 1188 // originating thread. This property does not hold on macOS (golang.org/issue/33384), 1189 // so we have no choice but to ignore SIGPIPE. 1190 if (GOOS == "darwin" || GOOS == "ios") && sig == _SIGPIPE { 1191 return true 1192 } 1193 1194 // If there is no handler to forward to, no need to forward. 1195 if fwdFn == _SIG_DFL { 1196 return false 1197 } 1198 1199 c := &sigctxt{info, ctx} 1200 // Only forward synchronous signals and SIGPIPE. 1201 // Unfortunately, user generated SIGPIPEs will also be forwarded, because si_code 1202 // is set to _SI_USER even for a SIGPIPE raised from a write to a closed socket 1203 // or pipe. 1204 if (c.sigFromUser() || flags&_SigPanic == 0) && sig != _SIGPIPE { 1205 return false 1206 } 1207 // Determine if the signal occurred inside Go code. We test that: 1208 // (1) we weren't in VDSO page, 1209 // (2) we were in a goroutine (i.e., m.curg != nil), and 1210 // (3) we weren't in CGO. 1211 // (4) we weren't in dropped extra m. 1212 gp := sigFetchG(c) 1213 if gp != nil && gp.m != nil && gp.m.curg != nil && !gp.m.isExtraInC && !gp.m.incgo { 1214 return false 1215 } 1216 1217 // Signal not handled by Go, forward it. 1218 if fwdFn != _SIG_IGN { 1219 sigfwd(fwdFn, sig, info, ctx) 1220 } 1221 1222 return true 1223 } 1224 1225 // sigsave saves the current thread's signal mask into *p. 1226 // This is used to preserve the non-Go signal mask when a non-Go 1227 // thread calls a Go function. 1228 // This is nosplit and nowritebarrierrec because it is called by needm 1229 // which may be called on a non-Go thread with no g available. 1230 // 1231 //go:nosplit 1232 //go:nowritebarrierrec 1233 func sigsave(p *sigset) { 1234 sigprocmask(_SIG_SETMASK, nil, p) 1235 } 1236 1237 // msigrestore sets the current thread's signal mask to sigmask. 1238 // This is used to restore the non-Go signal mask when a non-Go thread 1239 // calls a Go function. 1240 // This is nosplit and nowritebarrierrec because it is called by dropm 1241 // after g has been cleared. 1242 // 1243 //go:nosplit 1244 //go:nowritebarrierrec 1245 func msigrestore(sigmask sigset) { 1246 sigprocmask(_SIG_SETMASK, &sigmask, nil) 1247 } 1248 1249 // sigsetAllExiting is used by sigblock(true) when a thread is 1250 // exiting. 1251 var sigsetAllExiting = func() sigset { 1252 res := sigset_all 1253 1254 // Apply GOOS-specific overrides here, rather than in osinit, 1255 // because osinit may be called before sigsetAllExiting is 1256 // initialized (#51913). 1257 if GOOS == "linux" && iscgo { 1258 // #42494 glibc and musl reserve some signals for 1259 // internal use and require they not be blocked by 1260 // the rest of a normal C runtime. When the go runtime 1261 // blocks...unblocks signals, temporarily, the blocked 1262 // interval of time is generally very short. As such, 1263 // these expectations of *libc code are mostly met by 1264 // the combined go+cgo system of threads. However, 1265 // when go causes a thread to exit, via a return from 1266 // mstart(), the combined runtime can deadlock if 1267 // these signals are blocked. Thus, don't block these 1268 // signals when exiting threads. 1269 // - glibc: SIGCANCEL (32), SIGSETXID (33) 1270 // - musl: SIGTIMER (32), SIGCANCEL (33), SIGSYNCCALL (34) 1271 sigdelset(&res, 32) 1272 sigdelset(&res, 33) 1273 sigdelset(&res, 34) 1274 } 1275 1276 return res 1277 }() 1278 1279 // sigblock blocks signals in the current thread's signal mask. 1280 // This is used to block signals while setting up and tearing down g 1281 // when a non-Go thread calls a Go function. When a thread is exiting 1282 // we use the sigsetAllExiting value, otherwise the OS specific 1283 // definition of sigset_all is used. 1284 // This is nosplit and nowritebarrierrec because it is called by needm 1285 // which may be called on a non-Go thread with no g available. 1286 // 1287 //go:nosplit 1288 //go:nowritebarrierrec 1289 func sigblock(exiting bool) { 1290 if exiting { 1291 sigprocmask(_SIG_SETMASK, &sigsetAllExiting, nil) 1292 return 1293 } 1294 sigprocmask(_SIG_SETMASK, &sigset_all, nil) 1295 } 1296 1297 // unblocksig removes sig from the current thread's signal mask. 1298 // This is nosplit and nowritebarrierrec because it is called from 1299 // dieFromSignal, which can be called by sigfwdgo while running in the 1300 // signal handler, on the signal stack, with no g available. 1301 // 1302 //go:nosplit 1303 //go:nowritebarrierrec 1304 func unblocksig(sig uint32) { 1305 var set sigset 1306 sigaddset(&set, int(sig)) 1307 sigprocmask(_SIG_UNBLOCK, &set, nil) 1308 } 1309 1310 // minitSignals is called when initializing a new m to set the 1311 // thread's alternate signal stack and signal mask. 1312 func minitSignals() { 1313 minitSignalStack() 1314 minitSignalMask() 1315 } 1316 1317 // minitSignalStack is called when initializing a new m to set the 1318 // alternate signal stack. If the alternate signal stack is not set 1319 // for the thread (the normal case) then set the alternate signal 1320 // stack to the gsignal stack. If the alternate signal stack is set 1321 // for the thread (the case when a non-Go thread sets the alternate 1322 // signal stack and then calls a Go function) then set the gsignal 1323 // stack to the alternate signal stack. We also set the alternate 1324 // signal stack to the gsignal stack if cgo is not used (regardless 1325 // of whether it is already set). Record which choice was made in 1326 // newSigstack, so that it can be undone in unminit. 1327 func minitSignalStack() { 1328 mp := getg().m 1329 var st stackt 1330 sigaltstack(nil, &st) 1331 if st.ss_flags&_SS_DISABLE != 0 || !iscgo { 1332 signalstack(&mp.gsignal.stack) 1333 mp.newSigstack = true 1334 } else { 1335 setGsignalStack(&st, &mp.goSigStack) 1336 mp.newSigstack = false 1337 } 1338 } 1339 1340 // minitSignalMask is called when initializing a new m to set the 1341 // thread's signal mask. When this is called all signals have been 1342 // blocked for the thread. This starts with m.sigmask, which was set 1343 // either from initSigmask for a newly created thread or by calling 1344 // sigsave if this is a non-Go thread calling a Go function. It 1345 // removes all essential signals from the mask, thus causing those 1346 // signals to not be blocked. Then it sets the thread's signal mask. 1347 // After this is called the thread can receive signals. 1348 func minitSignalMask() { 1349 nmask := getg().m.sigmask 1350 for i := range sigtable { 1351 if !blockableSig(uint32(i)) { 1352 sigdelset(&nmask, i) 1353 } 1354 } 1355 sigprocmask(_SIG_SETMASK, &nmask, nil) 1356 } 1357 1358 // unminitSignals is called from dropm, via unminit, to undo the 1359 // effect of calling minit on a non-Go thread. 1360 // 1361 //go:nosplit 1362 func unminitSignals() { 1363 if getg().m.newSigstack { 1364 st := stackt{ss_flags: _SS_DISABLE} 1365 sigaltstack(&st, nil) 1366 } else { 1367 // We got the signal stack from someone else. Restore 1368 // the Go-allocated stack in case this M gets reused 1369 // for another thread (e.g., it's an extram). Also, on 1370 // Android, libc allocates a signal stack for all 1371 // threads, so it's important to restore the Go stack 1372 // even on Go-created threads so we can free it. 1373 restoreGsignalStack(&getg().m.goSigStack) 1374 } 1375 } 1376 1377 // blockableSig reports whether sig may be blocked by the signal mask. 1378 // We never want to block the signals marked _SigUnblock; 1379 // these are the synchronous signals that turn into a Go panic. 1380 // We never want to block the preemption signal if it is being used. 1381 // In a Go program--not a c-archive/c-shared--we never want to block 1382 // the signals marked _SigKill or _SigThrow, as otherwise it's possible 1383 // for all running threads to block them and delay their delivery until 1384 // we start a new thread. When linked into a C program we let the C code 1385 // decide on the disposition of those signals. 1386 func blockableSig(sig uint32) bool { 1387 flags := sigtable[sig].flags 1388 if flags&_SigUnblock != 0 { 1389 return false 1390 } 1391 if sig == sigPreempt && preemptMSupported && debug.asyncpreemptoff == 0 { 1392 return false 1393 } 1394 if isarchive || islibrary { 1395 return true 1396 } 1397 return flags&(_SigKill|_SigThrow) == 0 1398 } 1399 1400 // gsignalStack saves the fields of the gsignal stack changed by 1401 // setGsignalStack. 1402 type gsignalStack struct { 1403 stack stack 1404 stackguard0 uintptr 1405 stackguard1 uintptr 1406 stktopsp uintptr 1407 } 1408 1409 // setGsignalStack sets the gsignal stack of the current m to an 1410 // alternate signal stack returned from the sigaltstack system call. 1411 // It saves the old values in *old for use by restoreGsignalStack. 1412 // This is used when handling a signal if non-Go code has set the 1413 // alternate signal stack. 1414 // 1415 //go:nosplit 1416 //go:nowritebarrierrec 1417 func setGsignalStack(st *stackt, old *gsignalStack) { 1418 gp := getg() 1419 if old != nil { 1420 old.stack = gp.m.gsignal.stack 1421 old.stackguard0 = gp.m.gsignal.stackguard0 1422 old.stackguard1 = gp.m.gsignal.stackguard1 1423 old.stktopsp = gp.m.gsignal.stktopsp 1424 } 1425 stsp := uintptr(unsafe.Pointer(st.ss_sp)) 1426 gp.m.gsignal.stack.lo = stsp 1427 gp.m.gsignal.stack.hi = stsp + st.ss_size 1428 gp.m.gsignal.stackguard0 = stsp + stackGuard 1429 gp.m.gsignal.stackguard1 = stsp + stackGuard 1430 } 1431 1432 // restoreGsignalStack restores the gsignal stack to the value it had 1433 // before entering the signal handler. 1434 // 1435 //go:nosplit 1436 //go:nowritebarrierrec 1437 func restoreGsignalStack(st *gsignalStack) { 1438 gp := getg().m.gsignal 1439 gp.stack = st.stack 1440 gp.stackguard0 = st.stackguard0 1441 gp.stackguard1 = st.stackguard1 1442 gp.stktopsp = st.stktopsp 1443 } 1444 1445 // signalstack sets the current thread's alternate signal stack to s. 1446 // 1447 //go:nosplit 1448 func signalstack(s *stack) { 1449 st := stackt{ss_size: s.hi - s.lo} 1450 setSignalstackSP(&st, s.lo) 1451 sigaltstack(&st, nil) 1452 } 1453 1454 // setsigsegv is used on darwin/arm64 to fake a segmentation fault. 1455 // 1456 // This is exported via linkname to assembly in runtime/cgo. 1457 // 1458 //go:nosplit 1459 //go:linkname setsigsegv 1460 func setsigsegv(pc uintptr) { 1461 gp := getg() 1462 gp.sig = _SIGSEGV 1463 gp.sigpc = pc 1464 gp.sigcode0 = _SEGV_MAPERR 1465 gp.sigcode1 = 0 // TODO: emulate si_addr 1466 } 1467