Source file src/runtime/mpagecache.go

     1  // Copyright 2019 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/sys"
     9  	"unsafe"
    10  )
    11  
    12  const pageCachePages = 8 * unsafe.Sizeof(pageCache{}.cache)
    13  
    14  // pageCache represents a per-p cache of pages the allocator can
    15  // allocate from without a lock. More specifically, it represents
    16  // a pageCachePages*pageSize chunk of memory with 0 or more free
    17  // pages in it.
    18  type pageCache struct {
    19  	base  uintptr // base address of the chunk
    20  	cache uint64  // 64-bit bitmap representing free pages (1 means free)
    21  	scav  uint64  // 64-bit bitmap representing scavenged pages (1 means scavenged)
    22  }
    23  
    24  // empty reports whether the page cache has no free pages.
    25  func (c *pageCache) empty() bool {
    26  	return c.cache == 0
    27  }
    28  
    29  // alloc allocates npages from the page cache and is the main entry
    30  // point for allocation.
    31  //
    32  // Returns a base address and the amount of scavenged memory in the
    33  // allocated region in bytes.
    34  //
    35  // Returns a base address of zero on failure, in which case the
    36  // amount of scavenged memory should be ignored.
    37  func (c *pageCache) alloc(npages uintptr) (uintptr, uintptr) {
    38  	if c.cache == 0 {
    39  		return 0, 0
    40  	}
    41  	if npages == 1 {
    42  		i := uintptr(sys.TrailingZeros64(c.cache))
    43  		scav := (c.scav >> i) & 1
    44  		c.cache &^= 1 << i // set bit to mark in-use
    45  		c.scav &^= 1 << i  // clear bit to mark unscavenged
    46  		return c.base + i*pageSize, uintptr(scav) * pageSize
    47  	}
    48  	return c.allocN(npages)
    49  }
    50  
    51  // allocN is a helper which attempts to allocate npages worth of pages
    52  // from the cache. It represents the general case for allocating from
    53  // the page cache.
    54  //
    55  // Returns a base address and the amount of scavenged memory in the
    56  // allocated region in bytes.
    57  func (c *pageCache) allocN(npages uintptr) (uintptr, uintptr) {
    58  	i := findBitRange64(c.cache, uint(npages))
    59  	if i >= 64 {
    60  		return 0, 0
    61  	}
    62  	mask := ((uint64(1) << npages) - 1) << i
    63  	scav := sys.OnesCount64(c.scav & mask)
    64  	c.cache &^= mask // mark in-use bits
    65  	c.scav &^= mask  // clear scavenged bits
    66  	return c.base + uintptr(i*pageSize), uintptr(scav) * pageSize
    67  }
    68  
    69  // flush empties out unallocated free pages in the given cache
    70  // into s. Then, it clears the cache, such that empty returns
    71  // true.
    72  //
    73  // p.mheapLock must be held.
    74  //
    75  // Must run on the system stack because p.mheapLock must be held.
    76  //
    77  //go:systemstack
    78  func (c *pageCache) flush(p *pageAlloc) {
    79  	assertLockHeld(p.mheapLock)
    80  
    81  	if c.empty() {
    82  		return
    83  	}
    84  	ci := chunkIndex(c.base)
    85  	pi := chunkPageIndex(c.base)
    86  
    87  	// This method is called very infrequently, so just do the
    88  	// slower, safer thing by iterating over each bit individually.
    89  	for i := uint(0); i < 64; i++ {
    90  		if c.cache&(1<<i) != 0 {
    91  			p.chunkOf(ci).free1(pi + i)
    92  
    93  			// Update density statistics.
    94  			p.scav.index.free(ci, pi+i, 1)
    95  		}
    96  		if c.scav&(1<<i) != 0 {
    97  			p.chunkOf(ci).scavenged.setRange(pi+i, 1)
    98  		}
    99  	}
   100  
   101  	// Since this is a lot like a free, we need to make sure
   102  	// we update the searchAddr just like free does.
   103  	if b := (offAddr{c.base}); b.lessThan(p.searchAddr) {
   104  		p.searchAddr = b
   105  	}
   106  	p.update(c.base, pageCachePages, false, false)
   107  	*c = pageCache{}
   108  }
   109  
   110  // allocToCache acquires a pageCachePages-aligned chunk of free pages which
   111  // may not be contiguous, and returns a pageCache structure which owns the
   112  // chunk.
   113  //
   114  // p.mheapLock must be held.
   115  //
   116  // Must run on the system stack because p.mheapLock must be held.
   117  //
   118  //go:systemstack
   119  func (p *pageAlloc) allocToCache() pageCache {
   120  	assertLockHeld(p.mheapLock)
   121  
   122  	// If the searchAddr refers to a region which has a higher address than
   123  	// any known chunk, then we know we're out of memory.
   124  	if chunkIndex(p.searchAddr.addr()) >= p.end {
   125  		return pageCache{}
   126  	}
   127  	c := pageCache{}
   128  	ci := chunkIndex(p.searchAddr.addr()) // chunk index
   129  	var chunk *pallocData
   130  	if p.summary[len(p.summary)-1][ci] != 0 {
   131  		// Fast path: there's free pages at or near the searchAddr address.
   132  		chunk = p.chunkOf(ci)
   133  		j, _ := chunk.find(1, chunkPageIndex(p.searchAddr.addr()))
   134  		if j == ^uint(0) {
   135  			throw("bad summary data")
   136  		}
   137  		c = pageCache{
   138  			base:  chunkBase(ci) + alignDown(uintptr(j), 64)*pageSize,
   139  			cache: ^chunk.pages64(j),
   140  			scav:  chunk.scavenged.block64(j),
   141  		}
   142  	} else {
   143  		// Slow path: the searchAddr address had nothing there, so go find
   144  		// the first free page the slow way.
   145  		addr, _ := p.find(1)
   146  		if addr == 0 {
   147  			// We failed to find adequate free space, so mark the searchAddr as OoM
   148  			// and return an empty pageCache.
   149  			p.searchAddr = maxSearchAddr()
   150  			return pageCache{}
   151  		}
   152  		ci = chunkIndex(addr)
   153  		chunk = p.chunkOf(ci)
   154  		c = pageCache{
   155  			base:  alignDown(addr, 64*pageSize),
   156  			cache: ^chunk.pages64(chunkPageIndex(addr)),
   157  			scav:  chunk.scavenged.block64(chunkPageIndex(addr)),
   158  		}
   159  	}
   160  
   161  	// Set the page bits as allocated and clear the scavenged bits, but
   162  	// be careful to only set and clear the relevant bits.
   163  	cpi := chunkPageIndex(c.base)
   164  	chunk.allocPages64(cpi, c.cache)
   165  	chunk.scavenged.clearBlock64(cpi, c.cache&c.scav /* free and scavenged */)
   166  
   167  	// Update as an allocation, but note that it's not contiguous.
   168  	p.update(c.base, pageCachePages, false, true)
   169  
   170  	// Update density statistics.
   171  	p.scav.index.alloc(ci, uint(sys.OnesCount64(c.cache)))
   172  
   173  	// Set the search address to the last page represented by the cache.
   174  	// Since all of the pages in this block are going to the cache, and we
   175  	// searched for the first free page, we can confidently start at the
   176  	// next page.
   177  	//
   178  	// However, p.searchAddr is not allowed to point into unmapped heap memory
   179  	// unless it is maxSearchAddr, so make it the last page as opposed to
   180  	// the page after.
   181  	p.searchAddr = offAddr{c.base + pageSize*(pageCachePages-1)}
   182  	return c
   183  }
   184  

View as plain text