Source file src/runtime/mcleanup.go
1 // Copyright 2024 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/abi" 9 "unsafe" 10 ) 11 12 // AddCleanup attaches a cleanup function to ptr. Some time after ptr is no longer 13 // reachable, the runtime will call cleanup(arg) in a separate goroutine. 14 // 15 // A typical use is that ptr is an object wrapping an underlying resource (e.g., 16 // a File object wrapping an OS file descriptor), arg is the underlying resource 17 // (e.g., the OS file descriptor), and the cleanup function releases the underlying 18 // resource (e.g., by calling the close system call). 19 // 20 // There are few constraints on ptr. In particular, multiple cleanups may be 21 // attached to the same pointer, or to different pointers within the same 22 // allocation. 23 // 24 // If ptr is reachable from cleanup or arg, ptr will never be collected 25 // and the cleanup will never run. As a protection against simple cases of this, 26 // AddCleanup panics if arg is equal to ptr. 27 // 28 // There is no specified order in which cleanups will run. 29 // In particular, if several objects point to each other and all become 30 // unreachable at the same time, their cleanups all become eligible to run 31 // and can run in any order. This is true even if the objects form a cycle. 32 // 33 // Cleanups run concurrently with any user-created goroutines. 34 // Cleanups may also run concurrently with one another (unlike finalizers). 35 // If a cleanup function must run for a long time, it should create a new goroutine 36 // to avoid blocking the execution of other cleanups. 37 // 38 // If ptr has both a cleanup and a finalizer, the cleanup will only run once 39 // it has been finalized and becomes unreachable without an associated finalizer. 40 // 41 // The cleanup(arg) call is not always guaranteed to run; in particular it is not 42 // guaranteed to run before program exit. 43 // 44 // Cleanups are not guaranteed to run if the size of T is zero bytes, because 45 // it may share same address with other zero-size objects in memory. See 46 // https://go.dev/ref/spec#Size_and_alignment_guarantees. 47 // 48 // It is not guaranteed that a cleanup will run for objects allocated 49 // in initializers for package-level variables. Such objects may be 50 // linker-allocated, not heap-allocated. 51 // 52 // Note that because cleanups may execute arbitrarily far into the future 53 // after an object is no longer referenced, the runtime is allowed to perform 54 // a space-saving optimization that batches objects together in a single 55 // allocation slot. The cleanup for an unreferenced object in such an 56 // allocation may never run if it always exists in the same batch as a 57 // referenced object. Typically, this batching only happens for tiny 58 // (on the order of 16 bytes or less) and pointer-free objects. 59 // 60 // A cleanup may run as soon as an object becomes unreachable. 61 // In order to use cleanups correctly, the program must ensure that 62 // the object is reachable until it is safe to run its cleanup. 63 // Objects stored in global variables, or that can be found by tracing 64 // pointers from a global variable, are reachable. A function argument or 65 // receiver may become unreachable at the last point where the function 66 // mentions it. To ensure a cleanup does not get called prematurely, 67 // pass the object to the [KeepAlive] function after the last point 68 // where the object must remain reachable. 69 func AddCleanup[T, S any](ptr *T, cleanup func(S), arg S) Cleanup { 70 // Explicitly force ptr to escape to the heap. 71 ptr = abi.Escape(ptr) 72 73 // The pointer to the object must be valid. 74 if ptr == nil { 75 panic("runtime.AddCleanup: ptr is nil") 76 } 77 usptr := uintptr(unsafe.Pointer(ptr)) 78 79 // Check that arg is not equal to ptr. 80 if kind := abi.TypeOf(arg).Kind(); kind == abi.Pointer || kind == abi.UnsafePointer { 81 if unsafe.Pointer(ptr) == *((*unsafe.Pointer)(unsafe.Pointer(&arg))) { 82 panic("runtime.AddCleanup: ptr is equal to arg, cleanup will never run") 83 } 84 } 85 if inUserArenaChunk(usptr) { 86 // Arena-allocated objects are not eligible for cleanup. 87 panic("runtime.AddCleanup: ptr is arena-allocated") 88 } 89 if debug.sbrk != 0 { 90 // debug.sbrk never frees memory, so no cleanup will ever run 91 // (and we don't have the data structures to record them). 92 // Return a noop cleanup. 93 return Cleanup{} 94 } 95 96 fn := func() { 97 cleanup(arg) 98 } 99 // Closure must escape. 100 fv := *(**funcval)(unsafe.Pointer(&fn)) 101 fv = abi.Escape(fv) 102 103 // Find the containing object. 104 base, _, _ := findObject(usptr, 0, 0) 105 if base == 0 { 106 if isGoPointerWithoutSpan(unsafe.Pointer(ptr)) { 107 // Cleanup is a noop. 108 return Cleanup{} 109 } 110 panic("runtime.AddCleanup: ptr not in allocated block") 111 } 112 113 // Ensure we have a finalizer processing goroutine running. 114 createfing() 115 116 id := addCleanup(unsafe.Pointer(ptr), fv) 117 return Cleanup{ 118 id: id, 119 ptr: usptr, 120 } 121 } 122 123 // Cleanup is a handle to a cleanup call for a specific object. 124 type Cleanup struct { 125 // id is the unique identifier for the cleanup within the arena. 126 id uint64 127 // ptr contains the pointer to the object. 128 ptr uintptr 129 } 130 131 // Stop cancels the cleanup call. Stop will have no effect if the cleanup call 132 // has already been queued for execution (because ptr became unreachable). 133 // To guarantee that Stop removes the cleanup function, the caller must ensure 134 // that the pointer that was passed to AddCleanup is reachable across the call to Stop. 135 func (c Cleanup) Stop() { 136 if c.id == 0 { 137 // id is set to zero when the cleanup is a noop. 138 return 139 } 140 141 // The following block removes the Special record of type cleanup for the object c.ptr. 142 span := spanOfHeap(uintptr(unsafe.Pointer(c.ptr))) 143 if span == nil { 144 return 145 } 146 // Ensure that the span is swept. 147 // Sweeping accesses the specials list w/o locks, so we have 148 // to synchronize with it. And it's just much safer. 149 mp := acquirem() 150 span.ensureSwept() 151 152 offset := uintptr(unsafe.Pointer(c.ptr)) - span.base() 153 154 var found *special 155 lock(&span.speciallock) 156 157 iter, exists := span.specialFindSplicePoint(offset, _KindSpecialCleanup) 158 if exists { 159 for { 160 s := *iter 161 if s == nil { 162 // Reached the end of the linked list. Stop searching at this point. 163 break 164 } 165 if offset == uintptr(s.offset) && _KindSpecialCleanup == s.kind && 166 (*specialCleanup)(unsafe.Pointer(s)).id == c.id { 167 // The special is a cleanup and contains a matching cleanup id. 168 *iter = s.next 169 found = s 170 break 171 } 172 if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && _KindSpecialCleanup < s.kind) { 173 // The special is outside the region specified for that kind of 174 // special. The specials are sorted by kind. 175 break 176 } 177 // Try the next special. 178 iter = &s.next 179 } 180 } 181 if span.specials == nil { 182 spanHasNoSpecials(span) 183 } 184 unlock(&span.speciallock) 185 releasem(mp) 186 187 if found == nil { 188 return 189 } 190 lock(&mheap_.speciallock) 191 mheap_.specialCleanupAlloc.free(unsafe.Pointer(found)) 192 unlock(&mheap_.speciallock) 193 } 194