Source file src/runtime/trace2buf.go

     1  // Copyright 2023 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build goexperiment.exectracer2
     6  
     7  // Trace buffer management.
     8  
     9  package runtime
    10  
    11  import (
    12  	"runtime/internal/sys"
    13  	"unsafe"
    14  )
    15  
    16  // Maximum number of bytes required to encode uint64 in base-128.
    17  const traceBytesPerNumber = 10
    18  
    19  // traceWriter is the interface for writing all trace data.
    20  //
    21  // This type is passed around as a value, and all of its methods return
    22  // a new traceWriter. This allows for chaining together calls in a fluent-style
    23  // API. This is partly stylistic, and very slightly for performance, since
    24  // the compiler can destructure this value and pass it between calls as
    25  // just regular arguments. However, this style is not load-bearing, and
    26  // we can change it if it's deemed too error-prone.
    27  type traceWriter struct {
    28  	traceLocker
    29  	*traceBuf
    30  }
    31  
    32  // write returns an a traceWriter that writes into the current M's stream.
    33  func (tl traceLocker) writer() traceWriter {
    34  	return traceWriter{traceLocker: tl, traceBuf: tl.mp.trace.buf[tl.gen%2]}
    35  }
    36  
    37  // unsafeTraceWriter produces a traceWriter that doesn't lock the trace.
    38  //
    39  // It should only be used in contexts where either:
    40  // - Another traceLocker is held.
    41  // - trace.gen is prevented from advancing.
    42  //
    43  // buf may be nil.
    44  func unsafeTraceWriter(gen uintptr, buf *traceBuf) traceWriter {
    45  	return traceWriter{traceLocker: traceLocker{gen: gen}, traceBuf: buf}
    46  }
    47  
    48  // end writes the buffer back into the m.
    49  func (w traceWriter) end() {
    50  	if w.mp == nil {
    51  		// Tolerate a nil mp. It makes code that creates traceWriters directly
    52  		// less error-prone.
    53  		return
    54  	}
    55  	w.mp.trace.buf[w.gen%2] = w.traceBuf
    56  }
    57  
    58  // ensure makes sure that at least maxSize bytes are available to write.
    59  //
    60  // Returns whether the buffer was flushed.
    61  func (w traceWriter) ensure(maxSize int) (traceWriter, bool) {
    62  	refill := w.traceBuf == nil || !w.available(maxSize)
    63  	if refill {
    64  		w = w.refill()
    65  	}
    66  	return w, refill
    67  }
    68  
    69  // flush puts w.traceBuf on the queue of full buffers.
    70  func (w traceWriter) flush() traceWriter {
    71  	systemstack(func() {
    72  		lock(&trace.lock)
    73  		if w.traceBuf != nil {
    74  			traceBufFlush(w.traceBuf, w.gen)
    75  		}
    76  		unlock(&trace.lock)
    77  	})
    78  	w.traceBuf = nil
    79  	return w
    80  }
    81  
    82  // refill puts w.traceBuf on the queue of full buffers and refresh's w's buffer.
    83  func (w traceWriter) refill() traceWriter {
    84  	systemstack(func() {
    85  		lock(&trace.lock)
    86  		if w.traceBuf != nil {
    87  			traceBufFlush(w.traceBuf, w.gen)
    88  		}
    89  		if trace.empty != nil {
    90  			w.traceBuf = trace.empty
    91  			trace.empty = w.traceBuf.link
    92  			unlock(&trace.lock)
    93  		} else {
    94  			unlock(&trace.lock)
    95  			w.traceBuf = (*traceBuf)(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
    96  			if w.traceBuf == nil {
    97  				throw("trace: out of memory")
    98  			}
    99  		}
   100  	})
   101  	// Initialize the buffer.
   102  	ts := traceClockNow()
   103  	if ts <= w.traceBuf.lastTime {
   104  		ts = w.traceBuf.lastTime + 1
   105  	}
   106  	w.traceBuf.lastTime = ts
   107  	w.traceBuf.link = nil
   108  	w.traceBuf.pos = 0
   109  
   110  	// Tolerate a nil mp.
   111  	mID := ^uint64(0)
   112  	if w.mp != nil {
   113  		mID = uint64(w.mp.procid)
   114  	}
   115  
   116  	// Write the buffer's header.
   117  	w.byte(byte(traceEvEventBatch))
   118  	w.varint(uint64(w.gen))
   119  	w.varint(uint64(mID))
   120  	w.varint(uint64(ts))
   121  	w.traceBuf.lenPos = w.varintReserve()
   122  	return w
   123  }
   124  
   125  // traceBufQueue is a FIFO of traceBufs.
   126  type traceBufQueue struct {
   127  	head, tail *traceBuf
   128  }
   129  
   130  // push queues buf into queue of buffers.
   131  func (q *traceBufQueue) push(buf *traceBuf) {
   132  	buf.link = nil
   133  	if q.head == nil {
   134  		q.head = buf
   135  	} else {
   136  		q.tail.link = buf
   137  	}
   138  	q.tail = buf
   139  }
   140  
   141  // pop dequeues from the queue of buffers.
   142  func (q *traceBufQueue) pop() *traceBuf {
   143  	buf := q.head
   144  	if buf == nil {
   145  		return nil
   146  	}
   147  	q.head = buf.link
   148  	if q.head == nil {
   149  		q.tail = nil
   150  	}
   151  	buf.link = nil
   152  	return buf
   153  }
   154  
   155  func (q *traceBufQueue) empty() bool {
   156  	return q.head == nil
   157  }
   158  
   159  // traceBufHeader is per-P tracing buffer.
   160  type traceBufHeader struct {
   161  	link     *traceBuf // in trace.empty/full
   162  	lastTime traceTime // when we wrote the last event
   163  	pos      int       // next write offset in arr
   164  	lenPos   int       // position of batch length value
   165  }
   166  
   167  // traceBuf is per-M tracing buffer.
   168  //
   169  // TODO(mknyszek): Rename traceBuf to traceBatch, since they map 1:1 with event batches.
   170  type traceBuf struct {
   171  	_ sys.NotInHeap
   172  	traceBufHeader
   173  	arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf
   174  }
   175  
   176  // byte appends v to buf.
   177  func (buf *traceBuf) byte(v byte) {
   178  	buf.arr[buf.pos] = v
   179  	buf.pos++
   180  }
   181  
   182  // varint appends v to buf in little-endian-base-128 encoding.
   183  func (buf *traceBuf) varint(v uint64) {
   184  	pos := buf.pos
   185  	arr := buf.arr[pos : pos+traceBytesPerNumber]
   186  	for i := range arr {
   187  		if v < 0x80 {
   188  			pos += i + 1
   189  			arr[i] = byte(v)
   190  			break
   191  		}
   192  		arr[i] = 0x80 | byte(v)
   193  		v >>= 7
   194  	}
   195  	buf.pos = pos
   196  }
   197  
   198  // varintReserve reserves enough space in buf to hold any varint.
   199  //
   200  // Space reserved this way can be filled in with the varintAt method.
   201  func (buf *traceBuf) varintReserve() int {
   202  	p := buf.pos
   203  	buf.pos += traceBytesPerNumber
   204  	return p
   205  }
   206  
   207  // stringData appends s's data directly to buf.
   208  func (buf *traceBuf) stringData(s string) {
   209  	buf.pos += copy(buf.arr[buf.pos:], s)
   210  }
   211  
   212  func (buf *traceBuf) available(size int) bool {
   213  	return len(buf.arr)-buf.pos >= size
   214  }
   215  
   216  // varintAt writes varint v at byte position pos in buf. This always
   217  // consumes traceBytesPerNumber bytes. This is intended for when the caller
   218  // needs to reserve space for a varint but can't populate it until later.
   219  // Use varintReserve to reserve this space.
   220  func (buf *traceBuf) varintAt(pos int, v uint64) {
   221  	for i := 0; i < traceBytesPerNumber; i++ {
   222  		if i < traceBytesPerNumber-1 {
   223  			buf.arr[pos] = 0x80 | byte(v)
   224  		} else {
   225  			buf.arr[pos] = byte(v)
   226  		}
   227  		v >>= 7
   228  		pos++
   229  	}
   230  	if v != 0 {
   231  		throw("v could not fit in traceBytesPerNumber")
   232  	}
   233  }
   234  
   235  // traceBufFlush flushes a trace buffer.
   236  //
   237  // Must run on the system stack because trace.lock must be held.
   238  //
   239  //go:systemstack
   240  func traceBufFlush(buf *traceBuf, gen uintptr) {
   241  	assertLockHeld(&trace.lock)
   242  
   243  	// Write out the non-header length of the batch in the header.
   244  	//
   245  	// Note: the length of the header is not included to make it easier
   246  	// to calculate this value when deserializing and reserializing the
   247  	// trace. Varints can have additional padding of zero bits that is
   248  	// quite difficult to preserve, and if we include the header we
   249  	// force serializers to do more work. Nothing else actually needs
   250  	// padding.
   251  	buf.varintAt(buf.lenPos, uint64(buf.pos-(buf.lenPos+traceBytesPerNumber)))
   252  	trace.full[gen%2].push(buf)
   253  
   254  	// Notify the scheduler that there's work available and that the trace
   255  	// reader should be scheduled.
   256  	if !trace.workAvailable.Load() {
   257  		trace.workAvailable.Store(true)
   258  	}
   259  }
   260  

View as plain text