Source file src/runtime/trace2map.go

     1  // Copyright 2023 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build goexperiment.exectracer2
     6  
     7  // Simple hash table for tracing. Provides a mapping
     8  // between variable-length data and a unique ID. Subsequent
     9  // puts of the same data will return the same ID.
    10  //
    11  // Uses a region-based allocation scheme and assumes that the
    12  // table doesn't ever grow very big.
    13  //
    14  // This is definitely not a general-purpose hash table! It avoids
    15  // doing any high-level Go operations so it's safe to use even in
    16  // sensitive contexts.
    17  
    18  package runtime
    19  
    20  import (
    21  	"runtime/internal/atomic"
    22  	"runtime/internal/sys"
    23  	"unsafe"
    24  )
    25  
    26  type traceMap struct {
    27  	lock mutex // Must be acquired on the system stack
    28  	seq  atomic.Uint64
    29  	mem  traceRegionAlloc
    30  	tab  [1 << 13]atomic.UnsafePointer // *traceMapNode (can't use generics because it's notinheap)
    31  }
    32  
    33  type traceMapNode struct {
    34  	_    sys.NotInHeap
    35  	link atomic.UnsafePointer // *traceMapNode (can't use generics because it's notinheap)
    36  	hash uintptr
    37  	id   uint64
    38  	data []byte
    39  }
    40  
    41  // next is a type-safe wrapper around link.
    42  func (n *traceMapNode) next() *traceMapNode {
    43  	return (*traceMapNode)(n.link.Load())
    44  }
    45  
    46  // stealID steals an ID from the table, ensuring that it will not
    47  // appear in the table anymore.
    48  func (tab *traceMap) stealID() uint64 {
    49  	return tab.seq.Add(1)
    50  }
    51  
    52  // put inserts the data into the table.
    53  //
    54  // It's always safe to noescape data because its bytes are always copied.
    55  //
    56  // Returns a unique ID for the data and whether this is the first time
    57  // the data has been added to the map.
    58  func (tab *traceMap) put(data unsafe.Pointer, size uintptr) (uint64, bool) {
    59  	if size == 0 {
    60  		return 0, false
    61  	}
    62  	hash := memhash(data, 0, size)
    63  	// First, search the hashtable w/o the mutex.
    64  	if id := tab.find(data, size, hash); id != 0 {
    65  		return id, false
    66  	}
    67  	// Now, double check under the mutex.
    68  	// Switch to the system stack so we can acquire tab.lock
    69  	var id uint64
    70  	var added bool
    71  	systemstack(func() {
    72  		lock(&tab.lock)
    73  		if id = tab.find(data, size, hash); id != 0 {
    74  			unlock(&tab.lock)
    75  			return
    76  		}
    77  		// Create new record.
    78  		id = tab.seq.Add(1)
    79  		vd := tab.newTraceMapNode(data, size, hash, id)
    80  
    81  		// Insert it into the table.
    82  		//
    83  		// Update the link first, since the node isn't published yet.
    84  		// Then, store the node in the table as the new first node
    85  		// for the bucket.
    86  		part := int(hash % uintptr(len(tab.tab)))
    87  		vd.link.StoreNoWB(tab.tab[part].Load())
    88  		tab.tab[part].StoreNoWB(unsafe.Pointer(vd))
    89  		unlock(&tab.lock)
    90  
    91  		added = true
    92  	})
    93  	return id, added
    94  }
    95  
    96  // find looks up data in the table, assuming hash is a hash of data.
    97  //
    98  // Returns 0 if the data is not found, and the unique ID for it if it is.
    99  func (tab *traceMap) find(data unsafe.Pointer, size, hash uintptr) uint64 {
   100  	part := int(hash % uintptr(len(tab.tab)))
   101  	for vd := tab.bucket(part); vd != nil; vd = vd.next() {
   102  		// Synchronization not necessary. Once published to the table, these
   103  		// values are immutable.
   104  		if vd.hash == hash && uintptr(len(vd.data)) == size {
   105  			if memequal(unsafe.Pointer(&vd.data[0]), data, size) {
   106  				return vd.id
   107  			}
   108  		}
   109  	}
   110  	return 0
   111  }
   112  
   113  // bucket is a type-safe wrapper for looking up a value in tab.tab.
   114  func (tab *traceMap) bucket(part int) *traceMapNode {
   115  	return (*traceMapNode)(tab.tab[part].Load())
   116  }
   117  
   118  func (tab *traceMap) newTraceMapNode(data unsafe.Pointer, size, hash uintptr, id uint64) *traceMapNode {
   119  	// Create data array.
   120  	sl := notInHeapSlice{
   121  		array: tab.mem.alloc(size),
   122  		len:   int(size),
   123  		cap:   int(size),
   124  	}
   125  	memmove(unsafe.Pointer(sl.array), data, size)
   126  
   127  	// Create metadata structure.
   128  	meta := (*traceMapNode)(unsafe.Pointer(tab.mem.alloc(unsafe.Sizeof(traceMapNode{}))))
   129  	*(*notInHeapSlice)(unsafe.Pointer(&meta.data)) = sl
   130  	meta.id = id
   131  	meta.hash = hash
   132  	return meta
   133  }
   134  
   135  // reset drops all allocated memory from the table and resets it.
   136  //
   137  // tab.lock must be held. Must run on the system stack because of this.
   138  //
   139  //go:systemstack
   140  func (tab *traceMap) reset() {
   141  	assertLockHeld(&tab.lock)
   142  	tab.mem.drop()
   143  	tab.seq.Store(0)
   144  	// Clear table without write barriers. The table consists entirely
   145  	// of notinheap pointers, so this is fine.
   146  	//
   147  	// Write barriers may theoretically call into the tracer and acquire
   148  	// the lock again, and this lock ordering is expressed in the static
   149  	// lock ranking checker.
   150  	memclrNoHeapPointers(unsafe.Pointer(&tab.tab), unsafe.Sizeof(tab.tab))
   151  }
   152  

View as plain text