Source file src/runtime/mfinal.go
1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Garbage collector: finalizers and block profiling. 6 7 package runtime 8 9 import ( 10 "internal/abi" 11 "internal/goarch" 12 "runtime/internal/atomic" 13 "runtime/internal/sys" 14 "unsafe" 15 ) 16 17 // finblock is an array of finalizers to be executed. finblocks are 18 // arranged in a linked list for the finalizer queue. 19 // 20 // finblock is allocated from non-GC'd memory, so any heap pointers 21 // must be specially handled. GC currently assumes that the finalizer 22 // queue does not grow during marking (but it can shrink). 23 type finblock struct { 24 _ sys.NotInHeap 25 alllink *finblock 26 next *finblock 27 cnt uint32 28 _ int32 29 fin [(_FinBlockSize - 2*goarch.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer 30 } 31 32 var fingStatus atomic.Uint32 33 34 // finalizer goroutine status. 35 const ( 36 fingUninitialized uint32 = iota 37 fingCreated uint32 = 1 << (iota - 1) 38 fingRunningFinalizer 39 fingWait 40 fingWake 41 ) 42 43 var finlock mutex // protects the following variables 44 var fing *g // goroutine that runs finalizers 45 var finq *finblock // list of finalizers that are to be executed 46 var finc *finblock // cache of free blocks 47 var finptrmask [_FinBlockSize / goarch.PtrSize / 8]byte 48 49 var allfin *finblock // list of all blocks 50 51 // NOTE: Layout known to queuefinalizer. 52 type finalizer struct { 53 fn *funcval // function to call (may be a heap pointer) 54 arg unsafe.Pointer // ptr to object (may be a heap pointer) 55 nret uintptr // bytes of return values from fn 56 fint *_type // type of first argument of fn 57 ot *ptrtype // type of ptr to object (may be a heap pointer) 58 } 59 60 var finalizer1 = [...]byte{ 61 // Each Finalizer is 5 words, ptr ptr INT ptr ptr (INT = uintptr here) 62 // Each byte describes 8 words. 63 // Need 8 Finalizers described by 5 bytes before pattern repeats: 64 // ptr ptr INT ptr ptr 65 // ptr ptr INT ptr ptr 66 // ptr ptr INT ptr ptr 67 // ptr ptr INT ptr ptr 68 // ptr ptr INT ptr ptr 69 // ptr ptr INT ptr ptr 70 // ptr ptr INT ptr ptr 71 // ptr ptr INT ptr ptr 72 // aka 73 // 74 // ptr ptr INT ptr ptr ptr ptr INT 75 // ptr ptr ptr ptr INT ptr ptr ptr 76 // ptr INT ptr ptr ptr ptr INT ptr 77 // ptr ptr ptr INT ptr ptr ptr ptr 78 // INT ptr ptr ptr ptr INT ptr ptr 79 // 80 // Assumptions about Finalizer layout checked below. 81 1<<0 | 1<<1 | 0<<2 | 1<<3 | 1<<4 | 1<<5 | 1<<6 | 0<<7, 82 1<<0 | 1<<1 | 1<<2 | 1<<3 | 0<<4 | 1<<5 | 1<<6 | 1<<7, 83 1<<0 | 0<<1 | 1<<2 | 1<<3 | 1<<4 | 1<<5 | 0<<6 | 1<<7, 84 1<<0 | 1<<1 | 1<<2 | 0<<3 | 1<<4 | 1<<5 | 1<<6 | 1<<7, 85 0<<0 | 1<<1 | 1<<2 | 1<<3 | 1<<4 | 0<<5 | 1<<6 | 1<<7, 86 } 87 88 // lockRankMayQueueFinalizer records the lock ranking effects of a 89 // function that may call queuefinalizer. 90 func lockRankMayQueueFinalizer() { 91 lockWithRankMayAcquire(&finlock, getLockRank(&finlock)) 92 } 93 94 func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot *ptrtype) { 95 if gcphase != _GCoff { 96 // Currently we assume that the finalizer queue won't 97 // grow during marking so we don't have to rescan it 98 // during mark termination. If we ever need to lift 99 // this assumption, we can do it by adding the 100 // necessary barriers to queuefinalizer (which it may 101 // have automatically). 102 throw("queuefinalizer during GC") 103 } 104 105 lock(&finlock) 106 if finq == nil || finq.cnt == uint32(len(finq.fin)) { 107 if finc == nil { 108 finc = (*finblock)(persistentalloc(_FinBlockSize, 0, &memstats.gcMiscSys)) 109 finc.alllink = allfin 110 allfin = finc 111 if finptrmask[0] == 0 { 112 // Build pointer mask for Finalizer array in block. 113 // Check assumptions made in finalizer1 array above. 114 if (unsafe.Sizeof(finalizer{}) != 5*goarch.PtrSize || 115 unsafe.Offsetof(finalizer{}.fn) != 0 || 116 unsafe.Offsetof(finalizer{}.arg) != goarch.PtrSize || 117 unsafe.Offsetof(finalizer{}.nret) != 2*goarch.PtrSize || 118 unsafe.Offsetof(finalizer{}.fint) != 3*goarch.PtrSize || 119 unsafe.Offsetof(finalizer{}.ot) != 4*goarch.PtrSize) { 120 throw("finalizer out of sync") 121 } 122 for i := range finptrmask { 123 finptrmask[i] = finalizer1[i%len(finalizer1)] 124 } 125 } 126 } 127 block := finc 128 finc = block.next 129 block.next = finq 130 finq = block 131 } 132 f := &finq.fin[finq.cnt] 133 atomic.Xadd(&finq.cnt, +1) // Sync with markroots 134 f.fn = fn 135 f.nret = nret 136 f.fint = fint 137 f.ot = ot 138 f.arg = p 139 unlock(&finlock) 140 fingStatus.Or(fingWake) 141 } 142 143 //go:nowritebarrier 144 func iterate_finq(callback func(*funcval, unsafe.Pointer, uintptr, *_type, *ptrtype)) { 145 for fb := allfin; fb != nil; fb = fb.alllink { 146 for i := uint32(0); i < fb.cnt; i++ { 147 f := &fb.fin[i] 148 callback(f.fn, f.arg, f.nret, f.fint, f.ot) 149 } 150 } 151 } 152 153 func wakefing() *g { 154 if ok := fingStatus.CompareAndSwap(fingCreated|fingWait|fingWake, fingCreated); ok { 155 return fing 156 } 157 return nil 158 } 159 160 func createfing() { 161 // start the finalizer goroutine exactly once 162 if fingStatus.Load() == fingUninitialized && fingStatus.CompareAndSwap(fingUninitialized, fingCreated) { 163 go runfinq() 164 } 165 } 166 167 func finalizercommit(gp *g, lock unsafe.Pointer) bool { 168 unlock((*mutex)(lock)) 169 // fingStatus should be modified after fing is put into a waiting state 170 // to avoid waking fing in running state, even if it is about to be parked. 171 fingStatus.Or(fingWait) 172 return true 173 } 174 175 // This is the goroutine that runs all of the finalizers. 176 func runfinq() { 177 var ( 178 frame unsafe.Pointer 179 framecap uintptr 180 argRegs int 181 ) 182 183 gp := getg() 184 lock(&finlock) 185 fing = gp 186 unlock(&finlock) 187 188 for { 189 lock(&finlock) 190 fb := finq 191 finq = nil 192 if fb == nil { 193 gopark(finalizercommit, unsafe.Pointer(&finlock), waitReasonFinalizerWait, traceBlockSystemGoroutine, 1) 194 continue 195 } 196 argRegs = intArgRegs 197 unlock(&finlock) 198 if raceenabled { 199 racefingo() 200 } 201 for fb != nil { 202 for i := fb.cnt; i > 0; i-- { 203 f := &fb.fin[i-1] 204 205 var regs abi.RegArgs 206 // The args may be passed in registers or on stack. Even for 207 // the register case, we still need the spill slots. 208 // TODO: revisit if we remove spill slots. 209 // 210 // Unfortunately because we can have an arbitrary 211 // amount of returns and it would be complex to try and 212 // figure out how many of those can get passed in registers, 213 // just conservatively assume none of them do. 214 framesz := unsafe.Sizeof((any)(nil)) + f.nret 215 if framecap < framesz { 216 // The frame does not contain pointers interesting for GC, 217 // all not yet finalized objects are stored in finq. 218 // If we do not mark it as FlagNoScan, 219 // the last finalized object is not collected. 220 frame = mallocgc(framesz, nil, true) 221 framecap = framesz 222 } 223 224 if f.fint == nil { 225 throw("missing type in runfinq") 226 } 227 r := frame 228 if argRegs > 0 { 229 r = unsafe.Pointer(®s.Ints) 230 } else { 231 // frame is effectively uninitialized 232 // memory. That means we have to clear 233 // it before writing to it to avoid 234 // confusing the write barrier. 235 *(*[2]uintptr)(frame) = [2]uintptr{} 236 } 237 switch f.fint.Kind_ & kindMask { 238 case kindPtr: 239 // direct use of pointer 240 *(*unsafe.Pointer)(r) = f.arg 241 case kindInterface: 242 ityp := (*interfacetype)(unsafe.Pointer(f.fint)) 243 // set up with empty interface 244 (*eface)(r)._type = &f.ot.Type 245 (*eface)(r).data = f.arg 246 if len(ityp.Methods) != 0 { 247 // convert to interface with methods 248 // this conversion is guaranteed to succeed - we checked in SetFinalizer 249 (*iface)(r).tab = assertE2I(ityp, (*eface)(r)._type) 250 } 251 default: 252 throw("bad kind in runfinq") 253 } 254 fingStatus.Or(fingRunningFinalizer) 255 reflectcall(nil, unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz), uint32(framesz), ®s) 256 fingStatus.And(^fingRunningFinalizer) 257 258 // Drop finalizer queue heap references 259 // before hiding them from markroot. 260 // This also ensures these will be 261 // clear if we reuse the finalizer. 262 f.fn = nil 263 f.arg = nil 264 f.ot = nil 265 atomic.Store(&fb.cnt, i-1) 266 } 267 next := fb.next 268 lock(&finlock) 269 fb.next = finc 270 finc = fb 271 unlock(&finlock) 272 fb = next 273 } 274 } 275 } 276 277 func isGoPointerWithoutSpan(p unsafe.Pointer) bool { 278 // 0-length objects are okay. 279 if p == unsafe.Pointer(&zerobase) { 280 return true 281 } 282 283 // Global initializers might be linker-allocated. 284 // var Foo = &Object{} 285 // func main() { 286 // runtime.SetFinalizer(Foo, nil) 287 // } 288 // The relevant segments are: noptrdata, data, bss, noptrbss. 289 // We cannot assume they are in any order or even contiguous, 290 // due to external linking. 291 for datap := &firstmoduledata; datap != nil; datap = datap.next { 292 if datap.noptrdata <= uintptr(p) && uintptr(p) < datap.enoptrdata || 293 datap.data <= uintptr(p) && uintptr(p) < datap.edata || 294 datap.bss <= uintptr(p) && uintptr(p) < datap.ebss || 295 datap.noptrbss <= uintptr(p) && uintptr(p) < datap.enoptrbss { 296 return true 297 } 298 } 299 return false 300 } 301 302 // SetFinalizer sets the finalizer associated with obj to the provided 303 // finalizer function. When the garbage collector finds an unreachable block 304 // with an associated finalizer, it clears the association and runs 305 // finalizer(obj) in a separate goroutine. This makes obj reachable again, 306 // but now without an associated finalizer. Assuming that SetFinalizer 307 // is not called again, the next time the garbage collector sees 308 // that obj is unreachable, it will free obj. 309 // 310 // SetFinalizer(obj, nil) clears any finalizer associated with obj. 311 // 312 // The argument obj must be a pointer to an object allocated by calling 313 // new, by taking the address of a composite literal, or by taking the 314 // address of a local variable. 315 // The argument finalizer must be a function that takes a single argument 316 // to which obj's type can be assigned, and can have arbitrary ignored return 317 // values. If either of these is not true, SetFinalizer may abort the 318 // program. 319 // 320 // Finalizers are run in dependency order: if A points at B, both have 321 // finalizers, and they are otherwise unreachable, only the finalizer 322 // for A runs; once A is freed, the finalizer for B can run. 323 // If a cyclic structure includes a block with a finalizer, that 324 // cycle is not guaranteed to be garbage collected and the finalizer 325 // is not guaranteed to run, because there is no ordering that 326 // respects the dependencies. 327 // 328 // The finalizer is scheduled to run at some arbitrary time after the 329 // program can no longer reach the object to which obj points. 330 // There is no guarantee that finalizers will run before a program exits, 331 // so typically they are useful only for releasing non-memory resources 332 // associated with an object during a long-running program. 333 // For example, an os.File object could use a finalizer to close the 334 // associated operating system file descriptor when a program discards 335 // an os.File without calling Close, but it would be a mistake 336 // to depend on a finalizer to flush an in-memory I/O buffer such as a 337 // bufio.Writer, because the buffer would not be flushed at program exit. 338 // 339 // It is not guaranteed that a finalizer will run if the size of *obj is 340 // zero bytes, because it may share same address with other zero-size 341 // objects in memory. See https://go.dev/ref/spec#Size_and_alignment_guarantees. 342 // 343 // It is not guaranteed that a finalizer will run for objects allocated 344 // in initializers for package-level variables. Such objects may be 345 // linker-allocated, not heap-allocated. 346 // 347 // Note that because finalizers may execute arbitrarily far into the future 348 // after an object is no longer referenced, the runtime is allowed to perform 349 // a space-saving optimization that batches objects together in a single 350 // allocation slot. The finalizer for an unreferenced object in such an 351 // allocation may never run if it always exists in the same batch as a 352 // referenced object. Typically, this batching only happens for tiny 353 // (on the order of 16 bytes or less) and pointer-free objects. 354 // 355 // A finalizer may run as soon as an object becomes unreachable. 356 // In order to use finalizers correctly, the program must ensure that 357 // the object is reachable until it is no longer required. 358 // Objects stored in global variables, or that can be found by tracing 359 // pointers from a global variable, are reachable. For other objects, 360 // pass the object to a call of the KeepAlive function to mark the 361 // last point in the function where the object must be reachable. 362 // 363 // For example, if p points to a struct, such as os.File, that contains 364 // a file descriptor d, and p has a finalizer that closes that file 365 // descriptor, and if the last use of p in a function is a call to 366 // syscall.Write(p.d, buf, size), then p may be unreachable as soon as 367 // the program enters syscall.Write. The finalizer may run at that moment, 368 // closing p.d, causing syscall.Write to fail because it is writing to 369 // a closed file descriptor (or, worse, to an entirely different 370 // file descriptor opened by a different goroutine). To avoid this problem, 371 // call KeepAlive(p) after the call to syscall.Write. 372 // 373 // A single goroutine runs all finalizers for a program, sequentially. 374 // If a finalizer must run for a long time, it should do so by starting 375 // a new goroutine. 376 // 377 // In the terminology of the Go memory model, a call 378 // SetFinalizer(x, f) “synchronizes before” the finalization call f(x). 379 // However, there is no guarantee that KeepAlive(x) or any other use of x 380 // “synchronizes before” f(x), so in general a finalizer should use a mutex 381 // or other synchronization mechanism if it needs to access mutable state in x. 382 // For example, consider a finalizer that inspects a mutable field in x 383 // that is modified from time to time in the main program before x 384 // becomes unreachable and the finalizer is invoked. 385 // The modifications in the main program and the inspection in the finalizer 386 // need to use appropriate synchronization, such as mutexes or atomic updates, 387 // to avoid read-write races. 388 func SetFinalizer(obj any, finalizer any) { 389 if debug.sbrk != 0 { 390 // debug.sbrk never frees memory, so no finalizers run 391 // (and we don't have the data structures to record them). 392 return 393 } 394 e := efaceOf(&obj) 395 etyp := e._type 396 if etyp == nil { 397 throw("runtime.SetFinalizer: first argument is nil") 398 } 399 if etyp.Kind_&kindMask != kindPtr { 400 throw("runtime.SetFinalizer: first argument is " + toRType(etyp).string() + ", not pointer") 401 } 402 ot := (*ptrtype)(unsafe.Pointer(etyp)) 403 if ot.Elem == nil { 404 throw("nil elem type!") 405 } 406 407 if inUserArenaChunk(uintptr(e.data)) { 408 // Arena-allocated objects are not eligible for finalizers. 409 throw("runtime.SetFinalizer: first argument was allocated into an arena") 410 } 411 412 // find the containing object 413 base, _, _ := findObject(uintptr(e.data), 0, 0) 414 415 if base == 0 { 416 if isGoPointerWithoutSpan(e.data) { 417 return 418 } 419 throw("runtime.SetFinalizer: pointer not in allocated block") 420 } 421 422 if uintptr(e.data) != base { 423 // As an implementation detail we allow to set finalizers for an inner byte 424 // of an object if it could come from tiny alloc (see mallocgc for details). 425 if ot.Elem == nil || ot.Elem.PtrBytes != 0 || ot.Elem.Size_ >= maxTinySize { 426 throw("runtime.SetFinalizer: pointer not at beginning of allocated block") 427 } 428 } 429 430 f := efaceOf(&finalizer) 431 ftyp := f._type 432 if ftyp == nil { 433 // switch to system stack and remove finalizer 434 systemstack(func() { 435 removefinalizer(e.data) 436 }) 437 return 438 } 439 440 if ftyp.Kind_&kindMask != kindFunc { 441 throw("runtime.SetFinalizer: second argument is " + toRType(ftyp).string() + ", not a function") 442 } 443 ft := (*functype)(unsafe.Pointer(ftyp)) 444 if ft.IsVariadic() { 445 throw("runtime.SetFinalizer: cannot pass " + toRType(etyp).string() + " to finalizer " + toRType(ftyp).string() + " because dotdotdot") 446 } 447 if ft.InCount != 1 { 448 throw("runtime.SetFinalizer: cannot pass " + toRType(etyp).string() + " to finalizer " + toRType(ftyp).string()) 449 } 450 fint := ft.InSlice()[0] 451 switch { 452 case fint == etyp: 453 // ok - same type 454 goto okarg 455 case fint.Kind_&kindMask == kindPtr: 456 if (fint.Uncommon() == nil || etyp.Uncommon() == nil) && (*ptrtype)(unsafe.Pointer(fint)).Elem == ot.Elem { 457 // ok - not same type, but both pointers, 458 // one or the other is unnamed, and same element type, so assignable. 459 goto okarg 460 } 461 case fint.Kind_&kindMask == kindInterface: 462 ityp := (*interfacetype)(unsafe.Pointer(fint)) 463 if len(ityp.Methods) == 0 { 464 // ok - satisfies empty interface 465 goto okarg 466 } 467 if iface := assertE2I2(ityp, *efaceOf(&obj)); iface.tab != nil { 468 goto okarg 469 } 470 } 471 throw("runtime.SetFinalizer: cannot pass " + toRType(etyp).string() + " to finalizer " + toRType(ftyp).string()) 472 okarg: 473 // compute size needed for return parameters 474 nret := uintptr(0) 475 for _, t := range ft.OutSlice() { 476 nret = alignUp(nret, uintptr(t.Align_)) + uintptr(t.Size_) 477 } 478 nret = alignUp(nret, goarch.PtrSize) 479 480 // make sure we have a finalizer goroutine 481 createfing() 482 483 systemstack(func() { 484 if !addfinalizer(e.data, (*funcval)(f.data), nret, fint, ot) { 485 throw("runtime.SetFinalizer: finalizer already set") 486 } 487 }) 488 } 489 490 // Mark KeepAlive as noinline so that it is easily detectable as an intrinsic. 491 // 492 //go:noinline 493 494 // KeepAlive marks its argument as currently reachable. 495 // This ensures that the object is not freed, and its finalizer is not run, 496 // before the point in the program where KeepAlive is called. 497 // 498 // A very simplified example showing where KeepAlive is required: 499 // 500 // type File struct { d int } 501 // d, err := syscall.Open("/file/path", syscall.O_RDONLY, 0) 502 // // ... do something if err != nil ... 503 // p := &File{d} 504 // runtime.SetFinalizer(p, func(p *File) { syscall.Close(p.d) }) 505 // var buf [10]byte 506 // n, err := syscall.Read(p.d, buf[:]) 507 // // Ensure p is not finalized until Read returns. 508 // runtime.KeepAlive(p) 509 // // No more uses of p after this point. 510 // 511 // Without the KeepAlive call, the finalizer could run at the start of 512 // syscall.Read, closing the file descriptor before syscall.Read makes 513 // the actual system call. 514 // 515 // Note: KeepAlive should only be used to prevent finalizers from 516 // running prematurely. In particular, when used with unsafe.Pointer, 517 // the rules for valid uses of unsafe.Pointer still apply. 518 func KeepAlive(x any) { 519 // Introduce a use of x that the compiler can't eliminate. 520 // This makes sure x is alive on entry. We need x to be alive 521 // on entry for "defer runtime.KeepAlive(x)"; see issue 21402. 522 if cgoAlwaysFalse { 523 println(x) 524 } 525 } 526