1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Memory allocator. 6 // 7 // This was originally based on tcmalloc, but has diverged quite a bit. 8 // http://goog-perftools.sourceforge.net/doc/tcmalloc.html 9 10 // The main allocator works in runs of pages. 11 // Small allocation sizes (up to and including 32 kB) are 12 // rounded to one of about 70 size classes, each of which 13 // has its own free set of objects of exactly that size. 14 // Any free page of memory can be split into a set of objects 15 // of one size class, which are then managed using a free bitmap. 16 // 17 // The allocator's data structures are: 18 // 19 // fixalloc: a free-list allocator for fixed-size off-heap objects, 20 // used to manage storage used by the allocator. 21 // mheap: the malloc heap, managed at page (8192-byte) granularity. 22 // mspan: a run of in-use pages managed by the mheap. 23 // mcentral: collects all spans of a given size class. 24 // mcache: a per-P cache of mspans with free space. 25 // mstats: allocation statistics. 26 // 27 // Allocating a small object proceeds up a hierarchy of caches: 28 // 29 // 1. Round the size up to one of the small size classes 30 // and look in the corresponding mspan in this P's mcache. 31 // Scan the mspan's free bitmap to find a free slot. 32 // If there is a free slot, allocate it. 33 // This can all be done without acquiring a lock. 34 // 35 // 2. If the mspan has no free slots, obtain a new mspan 36 // from the mcentral's list of mspans of the required size 37 // class that have free space. 38 // Obtaining a whole span amortizes the cost of locking 39 // the mcentral. 40 // 41 // 3. If the mcentral's mspan list is empty, obtain a run 42 // of pages from the mheap to use for the mspan. 43 // 44 // 4. If the mheap is empty or has no page runs large enough, 45 // allocate a new group of pages (at least 1MB) from the 46 // operating system. Allocating a large run of pages 47 // amortizes the cost of talking to the operating system. 48 // 49 // Sweeping an mspan and freeing objects on it proceeds up a similar 50 // hierarchy: 51 // 52 // 1. If the mspan is being swept in response to allocation, it 53 // is returned to the mcache to satisfy the allocation. 54 // 55 // 2. Otherwise, if the mspan still has allocated objects in it, 56 // it is placed on the mcentral free list for the mspan's size 57 // class. 58 // 59 // 3. Otherwise, if all objects in the mspan are free, the mspan's 60 // pages are returned to the mheap and the mspan is now dead. 61 // 62 // Allocating and freeing a large object uses the mheap 63 // directly, bypassing the mcache and mcentral. 64 // 65 // If mspan.needzero is false, then free object slots in the mspan are 66 // already zeroed. Otherwise if needzero is true, objects are zeroed as 67 // they are allocated. There are various benefits to delaying zeroing 68 // this way: 69 // 70 // 1. Stack frame allocation can avoid zeroing altogether. 71 // 72 // 2. It exhibits better temporal locality, since the program is 73 // probably about to write to the memory. 74 // 75 // 3. We don't zero pages that never get reused. 76 77 // Virtual memory layout 78 // 79 // The heap consists of a set of arenas, which are 64MB on 64-bit and 80 // 4MB on 32-bit (heapArenaBytes). Each arena's start address is also 81 // aligned to the arena size. 82 // 83 // Each arena has an associated heapArena object that stores the 84 // metadata for that arena: the heap bitmap for all words in the arena 85 // and the span map for all pages in the arena. heapArena objects are 86 // themselves allocated off-heap. 87 // 88 // Since arenas are aligned, the address space can be viewed as a 89 // series of arena frames. The arena map (mheap_.arenas) maps from 90 // arena frame number to *heapArena, or nil for parts of the address 91 // space not backed by the Go heap. The arena map is structured as a 92 // two-level array consisting of a "L1" arena map and many "L2" arena 93 // maps; however, since arenas are large, on many architectures, the 94 // arena map consists of a single, large L2 map. 95 // 96 // The arena map covers the entire possible address space, allowing 97 // the Go heap to use any part of the address space. The allocator 98 // attempts to keep arenas contiguous so that large spans (and hence 99 // large objects) can cross arenas. 100 101 package runtime 102 103 import ( 104 "internal/goarch" 105 "internal/goexperiment" 106 "internal/goos" 107 "runtime/internal/atomic" 108 "runtime/internal/math" 109 "runtime/internal/sys" 110 "unsafe" 111 ) 112 113 const ( 114 maxTinySize = _TinySize 115 tinySizeClass = _TinySizeClass 116 maxSmallSize = _MaxSmallSize 117 118 pageShift = _PageShift 119 pageSize = _PageSize 120 121 _PageSize = 1 << _PageShift 122 _PageMask = _PageSize - 1 123 124 // _64bit = 1 on 64-bit systems, 0 on 32-bit systems 125 _64bit = 1 << (^uintptr(0) >> 63) / 2 126 127 // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go. 128 _TinySize = 16 129 _TinySizeClass = int8(2) 130 131 _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc 132 133 // Per-P, per order stack segment cache size. 134 _StackCacheSize = 32 * 1024 135 136 // Number of orders that get caching. Order 0 is FixedStack 137 // and each successive order is twice as large. 138 // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks 139 // will be allocated directly. 140 // Since FixedStack is different on different systems, we 141 // must vary NumStackOrders to keep the same maximum cached size. 142 // OS | FixedStack | NumStackOrders 143 // -----------------+------------+--------------- 144 // linux/darwin/bsd | 2KB | 4 145 // windows/32 | 4KB | 3 146 // windows/64 | 8KB | 2 147 // plan9 | 4KB | 3 148 _NumStackOrders = 4 - goarch.PtrSize/4*goos.IsWindows - 1*goos.IsPlan9 149 150 // heapAddrBits is the number of bits in a heap address. On 151 // amd64, addresses are sign-extended beyond heapAddrBits. On 152 // other arches, they are zero-extended. 153 // 154 // On most 64-bit platforms, we limit this to 48 bits based on a 155 // combination of hardware and OS limitations. 156 // 157 // amd64 hardware limits addresses to 48 bits, sign-extended 158 // to 64 bits. Addresses where the top 16 bits are not either 159 // all 0 or all 1 are "non-canonical" and invalid. Because of 160 // these "negative" addresses, we offset addresses by 1<<47 161 // (arenaBaseOffset) on amd64 before computing indexes into 162 // the heap arenas index. In 2017, amd64 hardware added 163 // support for 57 bit addresses; however, currently only Linux 164 // supports this extension and the kernel will never choose an 165 // address above 1<<47 unless mmap is called with a hint 166 // address above 1<<47 (which we never do). 167 // 168 // arm64 hardware (as of ARMv8) limits user addresses to 48 169 // bits, in the range [0, 1<<48). 170 // 171 // ppc64, mips64, and s390x support arbitrary 64 bit addresses 172 // in hardware. On Linux, Go leans on stricter OS limits. Based 173 // on Linux's processor.h, the user address space is limited as 174 // follows on 64-bit architectures: 175 // 176 // Architecture Name Maximum Value (exclusive) 177 // --------------------------------------------------------------------- 178 // amd64 TASK_SIZE_MAX 0x007ffffffff000 (47 bit addresses) 179 // arm64 TASK_SIZE_64 0x01000000000000 (48 bit addresses) 180 // ppc64{,le} TASK_SIZE_USER64 0x00400000000000 (46 bit addresses) 181 // mips64{,le} TASK_SIZE64 0x00010000000000 (40 bit addresses) 182 // s390x TASK_SIZE 1<<64 (64 bit addresses) 183 // 184 // These limits may increase over time, but are currently at 185 // most 48 bits except on s390x. On all architectures, Linux 186 // starts placing mmap'd regions at addresses that are 187 // significantly below 48 bits, so even if it's possible to 188 // exceed Go's 48 bit limit, it's extremely unlikely in 189 // practice. 190 // 191 // On 32-bit platforms, we accept the full 32-bit address 192 // space because doing so is cheap. 193 // mips32 only has access to the low 2GB of virtual memory, so 194 // we further limit it to 31 bits. 195 // 196 // On ios/arm64, although 64-bit pointers are presumably 197 // available, pointers are truncated to 33 bits in iOS <14. 198 // Furthermore, only the top 4 GiB of the address space are 199 // actually available to the application. In iOS >=14, more 200 // of the address space is available, and the OS can now 201 // provide addresses outside of those 33 bits. Pick 40 bits 202 // as a reasonable balance between address space usage by the 203 // page allocator, and flexibility for what mmap'd regions 204 // we'll accept for the heap. We can't just move to the full 205 // 48 bits because this uses too much address space for older 206 // iOS versions. 207 // TODO(mknyszek): Once iOS <14 is deprecated, promote ios/arm64 208 // to a 48-bit address space like every other arm64 platform. 209 // 210 // WebAssembly currently has a limit of 4GB linear memory. 211 heapAddrBits = (_64bit*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64))*48 + (1-_64bit+goarch.IsWasm)*(32-(goarch.IsMips+goarch.IsMipsle)) + 40*goos.IsIos*goarch.IsArm64 212 213 // maxAlloc is the maximum size of an allocation. On 64-bit, 214 // it's theoretically possible to allocate 1<<heapAddrBits bytes. On 215 // 32-bit, however, this is one less than 1<<32 because the 216 // number of bytes in the address space doesn't actually fit 217 // in a uintptr. 218 maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1 219 220 // The number of bits in a heap address, the size of heap 221 // arenas, and the L1 and L2 arena map sizes are related by 222 // 223 // (1 << addr bits) = arena size * L1 entries * L2 entries 224 // 225 // Currently, we balance these as follows: 226 // 227 // Platform Addr bits Arena size L1 entries L2 entries 228 // -------------- --------- ---------- ---------- ----------- 229 // */64-bit 48 64MB 1 4M (32MB) 230 // windows/64-bit 48 4MB 64 1M (8MB) 231 // ios/arm64 33 4MB 1 2048 (8KB) 232 // */32-bit 32 4MB 1 1024 (4KB) 233 // */mips(le) 31 4MB 1 512 (2KB) 234 235 // heapArenaBytes is the size of a heap arena. The heap 236 // consists of mappings of size heapArenaBytes, aligned to 237 // heapArenaBytes. The initial heap mapping is one arena. 238 // 239 // This is currently 64MB on 64-bit non-Windows and 4MB on 240 // 32-bit and on Windows. We use smaller arenas on Windows 241 // because all committed memory is charged to the process, 242 // even if it's not touched. Hence, for processes with small 243 // heaps, the mapped arena space needs to be commensurate. 244 // This is particularly important with the race detector, 245 // since it significantly amplifies the cost of committed 246 // memory. 247 heapArenaBytes = 1 << logHeapArenaBytes 248 249 heapArenaWords = heapArenaBytes / goarch.PtrSize 250 251 // logHeapArenaBytes is log_2 of heapArenaBytes. For clarity, 252 // prefer using heapArenaBytes where possible (we need the 253 // constant to compute some other constants). 254 logHeapArenaBytes = (6+20)*(_64bit*(1-goos.IsWindows)*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64)) + (2+20)*(_64bit*goos.IsWindows) + (2+20)*(1-_64bit) + (2+20)*goarch.IsWasm + (2+20)*goos.IsIos*goarch.IsArm64 255 256 // heapArenaBitmapWords is the size of each heap arena's bitmap in uintptrs. 257 heapArenaBitmapWords = heapArenaWords / (8 * goarch.PtrSize) 258 259 pagesPerArena = heapArenaBytes / pageSize 260 261 // arenaL1Bits is the number of bits of the arena number 262 // covered by the first level arena map. 263 // 264 // This number should be small, since the first level arena 265 // map requires PtrSize*(1<<arenaL1Bits) of space in the 266 // binary's BSS. It can be zero, in which case the first level 267 // index is effectively unused. There is a performance benefit 268 // to this, since the generated code can be more efficient, 269 // but comes at the cost of having a large L2 mapping. 270 // 271 // We use the L1 map on 64-bit Windows because the arena size 272 // is small, but the address space is still 48 bits, and 273 // there's a high cost to having a large L2. 274 arenaL1Bits = 6 * (_64bit * goos.IsWindows) 275 276 // arenaL2Bits is the number of bits of the arena number 277 // covered by the second level arena index. 278 // 279 // The size of each arena map allocation is proportional to 280 // 1<<arenaL2Bits, so it's important that this not be too 281 // large. 48 bits leads to 32MB arena index allocations, which 282 // is about the practical threshold. 283 arenaL2Bits = heapAddrBits - logHeapArenaBytes - arenaL1Bits 284 285 // arenaL1Shift is the number of bits to shift an arena frame 286 // number by to compute an index into the first level arena map. 287 arenaL1Shift = arenaL2Bits 288 289 // arenaBits is the total bits in a combined arena map index. 290 // This is split between the index into the L1 arena map and 291 // the L2 arena map. 292 arenaBits = arenaL1Bits + arenaL2Bits 293 294 // arenaBaseOffset is the pointer value that corresponds to 295 // index 0 in the heap arena map. 296 // 297 // On amd64, the address space is 48 bits, sign extended to 64 298 // bits. This offset lets us handle "negative" addresses (or 299 // high addresses if viewed as unsigned). 300 // 301 // On aix/ppc64, this offset allows to keep the heapAddrBits to 302 // 48. Otherwise, it would be 60 in order to handle mmap addresses 303 // (in range 0x0a00000000000000 - 0x0afffffffffffff). But in this 304 // case, the memory reserved in (s *pageAlloc).init for chunks 305 // is causing important slowdowns. 306 // 307 // On other platforms, the user address space is contiguous 308 // and starts at 0, so no offset is necessary. 309 arenaBaseOffset = 0xffff800000000000*goarch.IsAmd64 + 0x0a00000000000000*goos.IsAix 310 // A typed version of this constant that will make it into DWARF (for viewcore). 311 arenaBaseOffsetUintptr = uintptr(arenaBaseOffset) 312 313 // Max number of threads to run garbage collection. 314 // 2, 3, and 4 are all plausible maximums depending 315 // on the hardware details of the machine. The garbage 316 // collector scales well to 32 cpus. 317 _MaxGcproc = 32 318 319 // minLegalPointer is the smallest possible legal pointer. 320 // This is the smallest possible architectural page size, 321 // since we assume that the first page is never mapped. 322 // 323 // This should agree with minZeroPage in the compiler. 324 minLegalPointer uintptr = 4096 325 326 // minHeapForMetadataHugePages sets a threshold on when certain kinds of 327 // heap metadata, currently the arenas map L2 entries and page alloc bitmap 328 // mappings, are allowed to be backed by huge pages. If the heap goal ever 329 // exceeds this threshold, then huge pages are enabled. 330 // 331 // These numbers are chosen with the assumption that huge pages are on the 332 // order of a few MiB in size. 333 // 334 // The kind of metadata this applies to has a very low overhead when compared 335 // to address space used, but their constant overheads for small heaps would 336 // be very high if they were to be backed by huge pages (e.g. a few MiB makes 337 // a huge difference for an 8 MiB heap, but barely any difference for a 1 GiB 338 // heap). The benefit of huge pages is also not worth it for small heaps, 339 // because only a very, very small part of the metadata is used for small heaps. 340 // 341 // N.B. If the heap goal exceeds the threshold then shrinks to a very small size 342 // again, then huge pages will still be enabled for this mapping. The reason is that 343 // there's no point unless we're also returning the physical memory for these 344 // metadata mappings back to the OS. That would be quite complex to do in general 345 // as the heap is likely fragmented after a reduction in heap size. 346 minHeapForMetadataHugePages = 1 << 30 347 ) 348 349 // physPageSize is the size in bytes of the OS's physical pages. 350 // Mapping and unmapping operations must be done at multiples of 351 // physPageSize. 352 // 353 // This must be set by the OS init code (typically in osinit) before 354 // mallocinit. 355 var physPageSize uintptr 356 357 // physHugePageSize is the size in bytes of the OS's default physical huge 358 // page size whose allocation is opaque to the application. It is assumed 359 // and verified to be a power of two. 360 // 361 // If set, this must be set by the OS init code (typically in osinit) before 362 // mallocinit. However, setting it at all is optional, and leaving the default 363 // value is always safe (though potentially less efficient). 364 // 365 // Since physHugePageSize is always assumed to be a power of two, 366 // physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift. 367 // The purpose of physHugePageShift is to avoid doing divisions in 368 // performance critical functions. 369 var ( 370 physHugePageSize uintptr 371 physHugePageShift uint 372 ) 373 374 func mallocinit() { 375 if class_to_size[_TinySizeClass] != _TinySize { 376 throw("bad TinySizeClass") 377 } 378 379 if heapArenaBitmapWords&(heapArenaBitmapWords-1) != 0 { 380 // heapBits expects modular arithmetic on bitmap 381 // addresses to work. 382 throw("heapArenaBitmapWords not a power of 2") 383 } 384 385 // Check physPageSize. 386 if physPageSize == 0 { 387 // The OS init code failed to fetch the physical page size. 388 throw("failed to get system page size") 389 } 390 if physPageSize > maxPhysPageSize { 391 print("system page size (", physPageSize, ") is larger than maximum page size (", maxPhysPageSize, ")\n") 392 throw("bad system page size") 393 } 394 if physPageSize < minPhysPageSize { 395 print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n") 396 throw("bad system page size") 397 } 398 if physPageSize&(physPageSize-1) != 0 { 399 print("system page size (", physPageSize, ") must be a power of 2\n") 400 throw("bad system page size") 401 } 402 if physHugePageSize&(physHugePageSize-1) != 0 { 403 print("system huge page size (", physHugePageSize, ") must be a power of 2\n") 404 throw("bad system huge page size") 405 } 406 if physHugePageSize > maxPhysHugePageSize { 407 // physHugePageSize is greater than the maximum supported huge page size. 408 // Don't throw here, like in the other cases, since a system configured 409 // in this way isn't wrong, we just don't have the code to support them. 410 // Instead, silently set the huge page size to zero. 411 physHugePageSize = 0 412 } 413 if physHugePageSize != 0 { 414 // Since physHugePageSize is a power of 2, it suffices to increase 415 // physHugePageShift until 1<<physHugePageShift == physHugePageSize. 416 for 1<<physHugePageShift != physHugePageSize { 417 physHugePageShift++ 418 } 419 } 420 if pagesPerArena%pagesPerSpanRoot != 0 { 421 print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerSpanRoot (", pagesPerSpanRoot, ")\n") 422 throw("bad pagesPerSpanRoot") 423 } 424 if pagesPerArena%pagesPerReclaimerChunk != 0 { 425 print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerReclaimerChunk (", pagesPerReclaimerChunk, ")\n") 426 throw("bad pagesPerReclaimerChunk") 427 } 428 if goexperiment.AllocHeaders { 429 // Check that the minimum size (exclusive) for a malloc header is also 430 // a size class boundary. This is important to making sure checks align 431 // across different parts of the runtime. 432 minSizeForMallocHeaderIsSizeClass := false 433 for i := 0; i < len(class_to_size); i++ { 434 if minSizeForMallocHeader == uintptr(class_to_size[i]) { 435 minSizeForMallocHeaderIsSizeClass = true 436 break 437 } 438 } 439 if !minSizeForMallocHeaderIsSizeClass { 440 throw("min size of malloc header is not a size class boundary") 441 } 442 // Check that the pointer bitmap for all small sizes without a malloc header 443 // fits in a word. 444 if minSizeForMallocHeader/goarch.PtrSize > 8*goarch.PtrSize { 445 throw("max pointer/scan bitmap size for headerless objects is too large") 446 } 447 } 448 449 if minTagBits > taggedPointerBits { 450 throw("taggedPointerbits too small") 451 } 452 453 // Initialize the heap. 454 mheap_.init() 455 mcache0 = allocmcache() 456 lockInit(&gcBitsArenas.lock, lockRankGcBitsArenas) 457 lockInit(&profInsertLock, lockRankProfInsert) 458 lockInit(&profBlockLock, lockRankProfBlock) 459 lockInit(&profMemActiveLock, lockRankProfMemActive) 460 for i := range profMemFutureLock { 461 lockInit(&profMemFutureLock[i], lockRankProfMemFuture) 462 } 463 lockInit(&globalAlloc.mutex, lockRankGlobalAlloc) 464 465 // Create initial arena growth hints. 466 if goarch.PtrSize == 8 { 467 // On a 64-bit machine, we pick the following hints 468 // because: 469 // 470 // 1. Starting from the middle of the address space 471 // makes it easier to grow out a contiguous range 472 // without running in to some other mapping. 473 // 474 // 2. This makes Go heap addresses more easily 475 // recognizable when debugging. 476 // 477 // 3. Stack scanning in gccgo is still conservative, 478 // so it's important that addresses be distinguishable 479 // from other data. 480 // 481 // Starting at 0x00c0 means that the valid memory addresses 482 // will begin 0x00c0, 0x00c1, ... 483 // In little-endian, that's c0 00, c1 00, ... None of those are valid 484 // UTF-8 sequences, and they are otherwise as far away from 485 // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0 486 // addresses. An earlier attempt to use 0x11f8 caused out of memory errors 487 // on OS X during thread allocations. 0x00c0 causes conflicts with 488 // AddressSanitizer which reserves all memory up to 0x0100. 489 // These choices reduce the odds of a conservative garbage collector 490 // not collecting memory because some non-pointer block of memory 491 // had a bit pattern that matched a memory address. 492 // 493 // However, on arm64, we ignore all this advice above and slam the 494 // allocation at 0x40 << 32 because when using 4k pages with 3-level 495 // translation buffers, the user address space is limited to 39 bits 496 // On ios/arm64, the address space is even smaller. 497 // 498 // On AIX, mmaps starts at 0x0A00000000000000 for 64-bit. 499 // processes. 500 // 501 // Space mapped for user arenas comes immediately after the range 502 // originally reserved for the regular heap when race mode is not 503 // enabled because user arena chunks can never be used for regular heap 504 // allocations and we want to avoid fragmenting the address space. 505 // 506 // In race mode we have no choice but to just use the same hints because 507 // the race detector requires that the heap be mapped contiguously. 508 for i := 0x7f; i >= 0; i-- { 509 var p uintptr 510 switch { 511 case raceenabled: 512 // The TSAN runtime requires the heap 513 // to be in the range [0x00c000000000, 514 // 0x00e000000000). 515 p = uintptr(i)<<32 | uintptrMask&(0x00c0<<32) 516 if p >= uintptrMask&0x00e000000000 { 517 continue 518 } 519 case GOARCH == "arm64" && GOOS == "ios": 520 p = uintptr(i)<<40 | uintptrMask&(0x0013<<28) 521 case GOARCH == "arm64": 522 p = uintptr(i)<<40 | uintptrMask&(0x0040<<32) 523 case GOOS == "aix": 524 if i == 0 { 525 // We don't use addresses directly after 0x0A00000000000000 526 // to avoid collisions with others mmaps done by non-go programs. 527 continue 528 } 529 p = uintptr(i)<<40 | uintptrMask&(0xa0<<52) 530 default: 531 p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32) 532 } 533 // Switch to generating hints for user arenas if we've gone 534 // through about half the hints. In race mode, take only about 535 // a quarter; we don't have very much space to work with. 536 hintList := &mheap_.arenaHints 537 if (!raceenabled && i > 0x3f) || (raceenabled && i > 0x5f) { 538 hintList = &mheap_.userArena.arenaHints 539 } 540 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) 541 hint.addr = p 542 hint.next, *hintList = *hintList, hint 543 } 544 } else { 545 // On a 32-bit machine, we're much more concerned 546 // about keeping the usable heap contiguous. 547 // Hence: 548 // 549 // 1. We reserve space for all heapArenas up front so 550 // they don't get interleaved with the heap. They're 551 // ~258MB, so this isn't too bad. (We could reserve a 552 // smaller amount of space up front if this is a 553 // problem.) 554 // 555 // 2. We hint the heap to start right above the end of 556 // the binary so we have the best chance of keeping it 557 // contiguous. 558 // 559 // 3. We try to stake out a reasonably large initial 560 // heap reservation. 561 562 const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{}) 563 meta := uintptr(sysReserve(nil, arenaMetaSize)) 564 if meta != 0 { 565 mheap_.heapArenaAlloc.init(meta, arenaMetaSize, true) 566 } 567 568 // We want to start the arena low, but if we're linked 569 // against C code, it's possible global constructors 570 // have called malloc and adjusted the process' brk. 571 // Query the brk so we can avoid trying to map the 572 // region over it (which will cause the kernel to put 573 // the region somewhere else, likely at a high 574 // address). 575 procBrk := sbrk0() 576 577 // If we ask for the end of the data segment but the 578 // operating system requires a little more space 579 // before we can start allocating, it will give out a 580 // slightly higher pointer. Except QEMU, which is 581 // buggy, as usual: it won't adjust the pointer 582 // upward. So adjust it upward a little bit ourselves: 583 // 1/4 MB to get away from the running binary image. 584 p := firstmoduledata.end 585 if p < procBrk { 586 p = procBrk 587 } 588 if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end { 589 p = mheap_.heapArenaAlloc.end 590 } 591 p = alignUp(p+(256<<10), heapArenaBytes) 592 // Because we're worried about fragmentation on 593 // 32-bit, we try to make a large initial reservation. 594 arenaSizes := []uintptr{ 595 512 << 20, 596 256 << 20, 597 128 << 20, 598 } 599 for _, arenaSize := range arenaSizes { 600 a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes) 601 if a != nil { 602 mheap_.arena.init(uintptr(a), size, false) 603 p = mheap_.arena.end // For hint below 604 break 605 } 606 } 607 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) 608 hint.addr = p 609 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 610 611 // Place the hint for user arenas just after the large reservation. 612 // 613 // While this potentially competes with the hint above, in practice we probably 614 // aren't going to be getting this far anyway on 32-bit platforms. 615 userArenaHint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) 616 userArenaHint.addr = p 617 userArenaHint.next, mheap_.userArena.arenaHints = mheap_.userArena.arenaHints, userArenaHint 618 } 619 // Initialize the memory limit here because the allocator is going to look at it 620 // but we haven't called gcinit yet and we're definitely going to allocate memory before then. 621 gcController.memoryLimit.Store(maxInt64) 622 } 623 624 // sysAlloc allocates heap arena space for at least n bytes. The 625 // returned pointer is always heapArenaBytes-aligned and backed by 626 // h.arenas metadata. The returned size is always a multiple of 627 // heapArenaBytes. sysAlloc returns nil on failure. 628 // There is no corresponding free function. 629 // 630 // hintList is a list of hint addresses for where to allocate new 631 // heap arenas. It must be non-nil. 632 // 633 // register indicates whether the heap arena should be registered 634 // in allArenas. 635 // 636 // sysAlloc returns a memory region in the Reserved state. This region must 637 // be transitioned to Prepared and then Ready before use. 638 // 639 // h must be locked. 640 func (h *mheap) sysAlloc(n uintptr, hintList **arenaHint, register bool) (v unsafe.Pointer, size uintptr) { 641 assertLockHeld(&h.lock) 642 643 n = alignUp(n, heapArenaBytes) 644 645 if hintList == &h.arenaHints { 646 // First, try the arena pre-reservation. 647 // Newly-used mappings are considered released. 648 // 649 // Only do this if we're using the regular heap arena hints. 650 // This behavior is only for the heap. 651 v = h.arena.alloc(n, heapArenaBytes, &gcController.heapReleased) 652 if v != nil { 653 size = n 654 goto mapped 655 } 656 } 657 658 // Try to grow the heap at a hint address. 659 for *hintList != nil { 660 hint := *hintList 661 p := hint.addr 662 if hint.down { 663 p -= n 664 } 665 if p+n < p { 666 // We can't use this, so don't ask. 667 v = nil 668 } else if arenaIndex(p+n-1) >= 1<<arenaBits { 669 // Outside addressable heap. Can't use. 670 v = nil 671 } else { 672 v = sysReserve(unsafe.Pointer(p), n) 673 } 674 if p == uintptr(v) { 675 // Success. Update the hint. 676 if !hint.down { 677 p += n 678 } 679 hint.addr = p 680 size = n 681 break 682 } 683 // Failed. Discard this hint and try the next. 684 // 685 // TODO: This would be cleaner if sysReserve could be 686 // told to only return the requested address. In 687 // particular, this is already how Windows behaves, so 688 // it would simplify things there. 689 if v != nil { 690 sysFreeOS(v, n) 691 } 692 *hintList = hint.next 693 h.arenaHintAlloc.free(unsafe.Pointer(hint)) 694 } 695 696 if size == 0 { 697 if raceenabled { 698 // The race detector assumes the heap lives in 699 // [0x00c000000000, 0x00e000000000), but we 700 // just ran out of hints in this region. Give 701 // a nice failure. 702 throw("too many address space collisions for -race mode") 703 } 704 705 // All of the hints failed, so we'll take any 706 // (sufficiently aligned) address the kernel will give 707 // us. 708 v, size = sysReserveAligned(nil, n, heapArenaBytes) 709 if v == nil { 710 return nil, 0 711 } 712 713 // Create new hints for extending this region. 714 hint := (*arenaHint)(h.arenaHintAlloc.alloc()) 715 hint.addr, hint.down = uintptr(v), true 716 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 717 hint = (*arenaHint)(h.arenaHintAlloc.alloc()) 718 hint.addr = uintptr(v) + size 719 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 720 } 721 722 // Check for bad pointers or pointers we can't use. 723 { 724 var bad string 725 p := uintptr(v) 726 if p+size < p { 727 bad = "region exceeds uintptr range" 728 } else if arenaIndex(p) >= 1<<arenaBits { 729 bad = "base outside usable address space" 730 } else if arenaIndex(p+size-1) >= 1<<arenaBits { 731 bad = "end outside usable address space" 732 } 733 if bad != "" { 734 // This should be impossible on most architectures, 735 // but it would be really confusing to debug. 736 print("runtime: memory allocated by OS [", hex(p), ", ", hex(p+size), ") not in usable address space: ", bad, "\n") 737 throw("memory reservation exceeds address space limit") 738 } 739 } 740 741 if uintptr(v)&(heapArenaBytes-1) != 0 { 742 throw("misrounded allocation in sysAlloc") 743 } 744 745 mapped: 746 // Create arena metadata. 747 for ri := arenaIndex(uintptr(v)); ri <= arenaIndex(uintptr(v)+size-1); ri++ { 748 l2 := h.arenas[ri.l1()] 749 if l2 == nil { 750 // Allocate an L2 arena map. 751 // 752 // Use sysAllocOS instead of sysAlloc or persistentalloc because there's no 753 // statistic we can comfortably account for this space in. With this structure, 754 // we rely on demand paging to avoid large overheads, but tracking which memory 755 // is paged in is too expensive. Trying to account for the whole region means 756 // that it will appear like an enormous memory overhead in statistics, even though 757 // it is not. 758 l2 = (*[1 << arenaL2Bits]*heapArena)(sysAllocOS(unsafe.Sizeof(*l2))) 759 if l2 == nil { 760 throw("out of memory allocating heap arena map") 761 } 762 if h.arenasHugePages { 763 sysHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2)) 764 } else { 765 sysNoHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2)) 766 } 767 atomic.StorepNoWB(unsafe.Pointer(&h.arenas[ri.l1()]), unsafe.Pointer(l2)) 768 } 769 770 if l2[ri.l2()] != nil { 771 throw("arena already initialized") 772 } 773 var r *heapArena 774 r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys)) 775 if r == nil { 776 r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys)) 777 if r == nil { 778 throw("out of memory allocating heap arena metadata") 779 } 780 } 781 782 // Register the arena in allArenas if requested. 783 if register { 784 if len(h.allArenas) == cap(h.allArenas) { 785 size := 2 * uintptr(cap(h.allArenas)) * goarch.PtrSize 786 if size == 0 { 787 size = physPageSize 788 } 789 newArray := (*notInHeap)(persistentalloc(size, goarch.PtrSize, &memstats.gcMiscSys)) 790 if newArray == nil { 791 throw("out of memory allocating allArenas") 792 } 793 oldSlice := h.allArenas 794 *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / goarch.PtrSize)} 795 copy(h.allArenas, oldSlice) 796 // Do not free the old backing array because 797 // there may be concurrent readers. Since we 798 // double the array each time, this can lead 799 // to at most 2x waste. 800 } 801 h.allArenas = h.allArenas[:len(h.allArenas)+1] 802 h.allArenas[len(h.allArenas)-1] = ri 803 } 804 805 // Store atomically just in case an object from the 806 // new heap arena becomes visible before the heap lock 807 // is released (which shouldn't happen, but there's 808 // little downside to this). 809 atomic.StorepNoWB(unsafe.Pointer(&l2[ri.l2()]), unsafe.Pointer(r)) 810 } 811 812 // Tell the race detector about the new heap memory. 813 if raceenabled { 814 racemapshadow(v, size) 815 } 816 817 return 818 } 819 820 // sysReserveAligned is like sysReserve, but the returned pointer is 821 // aligned to align bytes. It may reserve either n or n+align bytes, 822 // so it returns the size that was reserved. 823 func sysReserveAligned(v unsafe.Pointer, size, align uintptr) (unsafe.Pointer, uintptr) { 824 // Since the alignment is rather large in uses of this 825 // function, we're not likely to get it by chance, so we ask 826 // for a larger region and remove the parts we don't need. 827 retries := 0 828 retry: 829 p := uintptr(sysReserve(v, size+align)) 830 switch { 831 case p == 0: 832 return nil, 0 833 case p&(align-1) == 0: 834 return unsafe.Pointer(p), size + align 835 case GOOS == "windows": 836 // On Windows we can't release pieces of a 837 // reservation, so we release the whole thing and 838 // re-reserve the aligned sub-region. This may race, 839 // so we may have to try again. 840 sysFreeOS(unsafe.Pointer(p), size+align) 841 p = alignUp(p, align) 842 p2 := sysReserve(unsafe.Pointer(p), size) 843 if p != uintptr(p2) { 844 // Must have raced. Try again. 845 sysFreeOS(p2, size) 846 if retries++; retries == 100 { 847 throw("failed to allocate aligned heap memory; too many retries") 848 } 849 goto retry 850 } 851 // Success. 852 return p2, size 853 default: 854 // Trim off the unaligned parts. 855 pAligned := alignUp(p, align) 856 sysFreeOS(unsafe.Pointer(p), pAligned-p) 857 end := pAligned + size 858 endLen := (p + size + align) - end 859 if endLen > 0 { 860 sysFreeOS(unsafe.Pointer(end), endLen) 861 } 862 return unsafe.Pointer(pAligned), size 863 } 864 } 865 866 // enableMetadataHugePages enables huge pages for various sources of heap metadata. 867 // 868 // A note on latency: for sufficiently small heaps (<10s of GiB) this function will take constant 869 // time, but may take time proportional to the size of the mapped heap beyond that. 870 // 871 // This function is idempotent. 872 // 873 // The heap lock must not be held over this operation, since it will briefly acquire 874 // the heap lock. 875 // 876 // Must be called on the system stack because it acquires the heap lock. 877 // 878 //go:systemstack 879 func (h *mheap) enableMetadataHugePages() { 880 // Enable huge pages for page structure. 881 h.pages.enableChunkHugePages() 882 883 // Grab the lock and set arenasHugePages if it's not. 884 // 885 // Once arenasHugePages is set, all new L2 entries will be eligible for 886 // huge pages. We'll set all the old entries after we release the lock. 887 lock(&h.lock) 888 if h.arenasHugePages { 889 unlock(&h.lock) 890 return 891 } 892 h.arenasHugePages = true 893 unlock(&h.lock) 894 895 // N.B. The arenas L1 map is quite small on all platforms, so it's fine to 896 // just iterate over the whole thing. 897 for i := range h.arenas { 898 l2 := (*[1 << arenaL2Bits]*heapArena)(atomic.Loadp(unsafe.Pointer(&h.arenas[i]))) 899 if l2 == nil { 900 continue 901 } 902 sysHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2)) 903 } 904 } 905 906 // base address for all 0-byte allocations 907 var zerobase uintptr 908 909 // nextFreeFast returns the next free object if one is quickly available. 910 // Otherwise it returns 0. 911 func nextFreeFast(s *mspan) gclinkptr { 912 theBit := sys.TrailingZeros64(s.allocCache) // Is there a free object in the allocCache? 913 if theBit < 64 { 914 result := s.freeindex + uint16(theBit) 915 if result < s.nelems { 916 freeidx := result + 1 917 if freeidx%64 == 0 && freeidx != s.nelems { 918 return 0 919 } 920 s.allocCache >>= uint(theBit + 1) 921 s.freeindex = freeidx 922 s.allocCount++ 923 return gclinkptr(uintptr(result)*s.elemsize + s.base()) 924 } 925 } 926 return 0 927 } 928 929 // nextFree returns the next free object from the cached span if one is available. 930 // Otherwise it refills the cache with a span with an available object and 931 // returns that object along with a flag indicating that this was a heavy 932 // weight allocation. If it is a heavy weight allocation the caller must 933 // determine whether a new GC cycle needs to be started or if the GC is active 934 // whether this goroutine needs to assist the GC. 935 // 936 // Must run in a non-preemptible context since otherwise the owner of 937 // c could change. 938 func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) { 939 s = c.alloc[spc] 940 shouldhelpgc = false 941 freeIndex := s.nextFreeIndex() 942 if freeIndex == s.nelems { 943 // The span is full. 944 if s.allocCount != s.nelems { 945 println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 946 throw("s.allocCount != s.nelems && freeIndex == s.nelems") 947 } 948 c.refill(spc) 949 shouldhelpgc = true 950 s = c.alloc[spc] 951 952 freeIndex = s.nextFreeIndex() 953 } 954 955 if freeIndex >= s.nelems { 956 throw("freeIndex is not valid") 957 } 958 959 v = gclinkptr(uintptr(freeIndex)*s.elemsize + s.base()) 960 s.allocCount++ 961 if s.allocCount > s.nelems { 962 println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 963 throw("s.allocCount > s.nelems") 964 } 965 return 966 } 967 968 // Allocate an object of size bytes. 969 // Small objects are allocated from the per-P cache's free lists. 970 // Large objects (> 32 kB) are allocated straight from the heap. 971 func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { 972 if gcphase == _GCmarktermination { 973 throw("mallocgc called with gcphase == _GCmarktermination") 974 } 975 976 if size == 0 { 977 return unsafe.Pointer(&zerobase) 978 } 979 980 // It's possible for any malloc to trigger sweeping, which may in 981 // turn queue finalizers. Record this dynamic lock edge. 982 lockRankMayQueueFinalizer() 983 984 userSize := size 985 if asanenabled { 986 // Refer to ASAN runtime library, the malloc() function allocates extra memory, 987 // the redzone, around the user requested memory region. And the redzones are marked 988 // as unaddressable. We perform the same operations in Go to detect the overflows or 989 // underflows. 990 size += computeRZlog(size) 991 } 992 993 if debug.malloc { 994 if debug.sbrk != 0 { 995 align := uintptr(16) 996 if typ != nil { 997 // TODO(austin): This should be just 998 // align = uintptr(typ.align) 999 // but that's only 4 on 32-bit platforms, 1000 // even if there's a uint64 field in typ (see #599). 1001 // This causes 64-bit atomic accesses to panic. 1002 // Hence, we use stricter alignment that matches 1003 // the normal allocator better. 1004 if size&7 == 0 { 1005 align = 8 1006 } else if size&3 == 0 { 1007 align = 4 1008 } else if size&1 == 0 { 1009 align = 2 1010 } else { 1011 align = 1 1012 } 1013 } 1014 return persistentalloc(size, align, &memstats.other_sys) 1015 } 1016 1017 if inittrace.active && inittrace.id == getg().goid { 1018 // Init functions are executed sequentially in a single goroutine. 1019 inittrace.allocs += 1 1020 } 1021 } 1022 1023 // assistG is the G to charge for this allocation, or nil if 1024 // GC is not currently active. 1025 assistG := deductAssistCredit(size) 1026 1027 // Set mp.mallocing to keep from being preempted by GC. 1028 mp := acquirem() 1029 if mp.mallocing != 0 { 1030 throw("malloc deadlock") 1031 } 1032 if mp.gsignal == getg() { 1033 throw("malloc during signal") 1034 } 1035 mp.mallocing = 1 1036 1037 shouldhelpgc := false 1038 dataSize := userSize 1039 c := getMCache(mp) 1040 if c == nil { 1041 throw("mallocgc called without a P or outside bootstrapping") 1042 } 1043 var span *mspan 1044 var header **_type 1045 var x unsafe.Pointer 1046 noscan := typ == nil || typ.PtrBytes == 0 1047 // In some cases block zeroing can profitably (for latency reduction purposes) 1048 // be delayed till preemption is possible; delayedZeroing tracks that state. 1049 delayedZeroing := false 1050 // Determine if it's a 'small' object that goes into a size-classed span. 1051 // 1052 // Note: This comparison looks a little strange, but it exists to smooth out 1053 // the crossover between the largest size class and large objects that have 1054 // their own spans. The small window of object sizes between maxSmallSize-mallocHeaderSize 1055 // and maxSmallSize will be considered large, even though they might fit in 1056 // a size class. In practice this is completely fine, since the largest small 1057 // size class has a single object in it already, precisely to make the transition 1058 // to large objects smooth. 1059 if size <= maxSmallSize-mallocHeaderSize { 1060 if noscan && size < maxTinySize { 1061 // Tiny allocator. 1062 // 1063 // Tiny allocator combines several tiny allocation requests 1064 // into a single memory block. The resulting memory block 1065 // is freed when all subobjects are unreachable. The subobjects 1066 // must be noscan (don't have pointers), this ensures that 1067 // the amount of potentially wasted memory is bounded. 1068 // 1069 // Size of the memory block used for combining (maxTinySize) is tunable. 1070 // Current setting is 16 bytes, which relates to 2x worst case memory 1071 // wastage (when all but one subobjects are unreachable). 1072 // 8 bytes would result in no wastage at all, but provides less 1073 // opportunities for combining. 1074 // 32 bytes provides more opportunities for combining, 1075 // but can lead to 4x worst case wastage. 1076 // The best case winning is 8x regardless of block size. 1077 // 1078 // Objects obtained from tiny allocator must not be freed explicitly. 1079 // So when an object will be freed explicitly, we ensure that 1080 // its size >= maxTinySize. 1081 // 1082 // SetFinalizer has a special case for objects potentially coming 1083 // from tiny allocator, it such case it allows to set finalizers 1084 // for an inner byte of a memory block. 1085 // 1086 // The main targets of tiny allocator are small strings and 1087 // standalone escaping variables. On a json benchmark 1088 // the allocator reduces number of allocations by ~12% and 1089 // reduces heap size by ~20%. 1090 off := c.tinyoffset 1091 // Align tiny pointer for required (conservative) alignment. 1092 if size&7 == 0 { 1093 off = alignUp(off, 8) 1094 } else if goarch.PtrSize == 4 && size == 12 { 1095 // Conservatively align 12-byte objects to 8 bytes on 32-bit 1096 // systems so that objects whose first field is a 64-bit 1097 // value is aligned to 8 bytes and does not cause a fault on 1098 // atomic access. See issue 37262. 1099 // TODO(mknyszek): Remove this workaround if/when issue 36606 1100 // is resolved. 1101 off = alignUp(off, 8) 1102 } else if size&3 == 0 { 1103 off = alignUp(off, 4) 1104 } else if size&1 == 0 { 1105 off = alignUp(off, 2) 1106 } 1107 if off+size <= maxTinySize && c.tiny != 0 { 1108 // The object fits into existing tiny block. 1109 x = unsafe.Pointer(c.tiny + off) 1110 c.tinyoffset = off + size 1111 c.tinyAllocs++ 1112 mp.mallocing = 0 1113 releasem(mp) 1114 return x 1115 } 1116 // Allocate a new maxTinySize block. 1117 span = c.alloc[tinySpanClass] 1118 v := nextFreeFast(span) 1119 if v == 0 { 1120 v, span, shouldhelpgc = c.nextFree(tinySpanClass) 1121 } 1122 x = unsafe.Pointer(v) 1123 (*[2]uint64)(x)[0] = 0 1124 (*[2]uint64)(x)[1] = 0 1125 // See if we need to replace the existing tiny block with the new one 1126 // based on amount of remaining free space. 1127 if !raceenabled && (size < c.tinyoffset || c.tiny == 0) { 1128 // Note: disabled when race detector is on, see comment near end of this function. 1129 c.tiny = uintptr(x) 1130 c.tinyoffset = size 1131 } 1132 size = maxTinySize 1133 } else { 1134 hasHeader := !noscan && !heapBitsInSpan(size) 1135 if goexperiment.AllocHeaders && hasHeader { 1136 size += mallocHeaderSize 1137 } 1138 var sizeclass uint8 1139 if size <= smallSizeMax-8 { 1140 sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)] 1141 } else { 1142 sizeclass = size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)] 1143 } 1144 size = uintptr(class_to_size[sizeclass]) 1145 spc := makeSpanClass(sizeclass, noscan) 1146 span = c.alloc[spc] 1147 v := nextFreeFast(span) 1148 if v == 0 { 1149 v, span, shouldhelpgc = c.nextFree(spc) 1150 } 1151 x = unsafe.Pointer(v) 1152 if needzero && span.needzero != 0 { 1153 memclrNoHeapPointers(x, size) 1154 } 1155 if goexperiment.AllocHeaders && hasHeader { 1156 header = (**_type)(x) 1157 x = add(x, mallocHeaderSize) 1158 size -= mallocHeaderSize 1159 } 1160 } 1161 } else { 1162 shouldhelpgc = true 1163 // For large allocations, keep track of zeroed state so that 1164 // bulk zeroing can be happen later in a preemptible context. 1165 span = c.allocLarge(size, noscan) 1166 span.freeindex = 1 1167 span.allocCount = 1 1168 size = span.elemsize 1169 x = unsafe.Pointer(span.base()) 1170 if needzero && span.needzero != 0 { 1171 if noscan { 1172 delayedZeroing = true 1173 } else { 1174 memclrNoHeapPointers(x, size) 1175 } 1176 } 1177 if goexperiment.AllocHeaders && !noscan { 1178 header = &span.largeType 1179 } 1180 } 1181 if !noscan { 1182 if goexperiment.AllocHeaders { 1183 c.scanAlloc += heapSetType(uintptr(x), dataSize, typ, header, span) 1184 } else { 1185 var scanSize uintptr 1186 heapBitsSetType(uintptr(x), size, dataSize, typ) 1187 if dataSize > typ.Size_ { 1188 // Array allocation. If there are any 1189 // pointers, GC has to scan to the last 1190 // element. 1191 if typ.PtrBytes != 0 { 1192 scanSize = dataSize - typ.Size_ + typ.PtrBytes 1193 } 1194 } else { 1195 scanSize = typ.PtrBytes 1196 } 1197 c.scanAlloc += scanSize 1198 } 1199 } 1200 1201 // Ensure that the stores above that initialize x to 1202 // type-safe memory and set the heap bits occur before 1203 // the caller can make x observable to the garbage 1204 // collector. Otherwise, on weakly ordered machines, 1205 // the garbage collector could follow a pointer to x, 1206 // but see uninitialized memory or stale heap bits. 1207 publicationBarrier() 1208 // As x and the heap bits are initialized, update 1209 // freeIndexForScan now so x is seen by the GC 1210 // (including conservative scan) as an allocated object. 1211 // While this pointer can't escape into user code as a 1212 // _live_ pointer until we return, conservative scanning 1213 // may find a dead pointer that happens to point into this 1214 // object. Delaying this update until now ensures that 1215 // conservative scanning considers this pointer dead until 1216 // this point. 1217 span.freeIndexForScan = span.freeindex 1218 1219 // Allocate black during GC. 1220 // All slots hold nil so no scanning is needed. 1221 // This may be racing with GC so do it atomically if there can be 1222 // a race marking the bit. 1223 if gcphase != _GCoff { 1224 gcmarknewobject(span, uintptr(x)) 1225 } 1226 1227 if raceenabled { 1228 racemalloc(x, size) 1229 } 1230 1231 if msanenabled { 1232 msanmalloc(x, size) 1233 } 1234 1235 if asanenabled { 1236 // We should only read/write the memory with the size asked by the user. 1237 // The rest of the allocated memory should be poisoned, so that we can report 1238 // errors when accessing poisoned memory. 1239 // The allocated memory is larger than required userSize, it will also include 1240 // redzone and some other padding bytes. 1241 rzBeg := unsafe.Add(x, userSize) 1242 asanpoison(rzBeg, size-userSize) 1243 asanunpoison(x, userSize) 1244 } 1245 1246 // If !goexperiment.AllocHeaders, "size" doesn't include the 1247 // allocation header, so use span.elemsize as the "full" size 1248 // for various computations below. 1249 // 1250 // TODO(mknyszek): We should really count the header as part 1251 // of gc_sys or something, but it's risky to change the 1252 // accounting so much right now. Just pretend its internal 1253 // fragmentation and match the GC's accounting by using the 1254 // whole allocation slot. 1255 fullSize := size 1256 if goexperiment.AllocHeaders { 1257 fullSize = span.elemsize 1258 } 1259 if rate := MemProfileRate; rate > 0 { 1260 // Note cache c only valid while m acquired; see #47302 1261 // 1262 // N.B. Use the full size because that matches how the GC 1263 // will update the mem profile on the "free" side. 1264 if rate != 1 && fullSize < c.nextSample { 1265 c.nextSample -= fullSize 1266 } else { 1267 profilealloc(mp, x, fullSize) 1268 } 1269 } 1270 mp.mallocing = 0 1271 releasem(mp) 1272 1273 // Pointerfree data can be zeroed late in a context where preemption can occur. 1274 // x will keep the memory alive. 1275 if delayedZeroing { 1276 if !noscan { 1277 throw("delayed zeroing on data that may contain pointers") 1278 } 1279 if goexperiment.AllocHeaders && header != nil { 1280 throw("unexpected malloc header in delayed zeroing of large object") 1281 } 1282 // N.B. size == fullSize always in this case. 1283 memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302 1284 } 1285 1286 if debug.malloc { 1287 if debug.allocfreetrace != 0 { 1288 tracealloc(x, size, typ) 1289 } 1290 1291 if inittrace.active && inittrace.id == getg().goid { 1292 // Init functions are executed sequentially in a single goroutine. 1293 inittrace.bytes += uint64(fullSize) 1294 } 1295 } 1296 1297 if assistG != nil { 1298 // Account for internal fragmentation in the assist 1299 // debt now that we know it. 1300 // 1301 // N.B. Use the full size because that's how the rest 1302 // of the GC accounts for bytes marked. 1303 assistG.gcAssistBytes -= int64(fullSize - dataSize) 1304 } 1305 1306 if shouldhelpgc { 1307 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 1308 gcStart(t) 1309 } 1310 } 1311 1312 if raceenabled && noscan && dataSize < maxTinySize { 1313 // Pad tinysize allocations so they are aligned with the end 1314 // of the tinyalloc region. This ensures that any arithmetic 1315 // that goes off the top end of the object will be detectable 1316 // by checkptr (issue 38872). 1317 // Note that we disable tinyalloc when raceenabled for this to work. 1318 // TODO: This padding is only performed when the race detector 1319 // is enabled. It would be nice to enable it if any package 1320 // was compiled with checkptr, but there's no easy way to 1321 // detect that (especially at compile time). 1322 // TODO: enable this padding for all allocations, not just 1323 // tinyalloc ones. It's tricky because of pointer maps. 1324 // Maybe just all noscan objects? 1325 x = add(x, size-dataSize) 1326 } 1327 1328 return x 1329 } 1330 1331 // deductAssistCredit reduces the current G's assist credit 1332 // by size bytes, and assists the GC if necessary. 1333 // 1334 // Caller must be preemptible. 1335 // 1336 // Returns the G for which the assist credit was accounted. 1337 func deductAssistCredit(size uintptr) *g { 1338 var assistG *g 1339 if gcBlackenEnabled != 0 { 1340 // Charge the current user G for this allocation. 1341 assistG = getg() 1342 if assistG.m.curg != nil { 1343 assistG = assistG.m.curg 1344 } 1345 // Charge the allocation against the G. We'll account 1346 // for internal fragmentation at the end of mallocgc. 1347 assistG.gcAssistBytes -= int64(size) 1348 1349 if assistG.gcAssistBytes < 0 { 1350 // This G is in debt. Assist the GC to correct 1351 // this before allocating. This must happen 1352 // before disabling preemption. 1353 gcAssistAlloc(assistG) 1354 } 1355 } 1356 return assistG 1357 } 1358 1359 // memclrNoHeapPointersChunked repeatedly calls memclrNoHeapPointers 1360 // on chunks of the buffer to be zeroed, with opportunities for preemption 1361 // along the way. memclrNoHeapPointers contains no safepoints and also 1362 // cannot be preemptively scheduled, so this provides a still-efficient 1363 // block copy that can also be preempted on a reasonable granularity. 1364 // 1365 // Use this with care; if the data being cleared is tagged to contain 1366 // pointers, this allows the GC to run before it is all cleared. 1367 func memclrNoHeapPointersChunked(size uintptr, x unsafe.Pointer) { 1368 v := uintptr(x) 1369 // got this from benchmarking. 128k is too small, 512k is too large. 1370 const chunkBytes = 256 * 1024 1371 vsize := v + size 1372 for voff := v; voff < vsize; voff = voff + chunkBytes { 1373 if getg().preempt { 1374 // may hold locks, e.g., profiling 1375 goschedguarded() 1376 } 1377 // clear min(avail, lump) bytes 1378 n := vsize - voff 1379 if n > chunkBytes { 1380 n = chunkBytes 1381 } 1382 memclrNoHeapPointers(unsafe.Pointer(voff), n) 1383 } 1384 } 1385 1386 // implementation of new builtin 1387 // compiler (both frontend and SSA backend) knows the signature 1388 // of this function. 1389 func newobject(typ *_type) unsafe.Pointer { 1390 return mallocgc(typ.Size_, typ, true) 1391 } 1392 1393 //go:linkname reflect_unsafe_New reflect.unsafe_New 1394 func reflect_unsafe_New(typ *_type) unsafe.Pointer { 1395 return mallocgc(typ.Size_, typ, true) 1396 } 1397 1398 //go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New 1399 func reflectlite_unsafe_New(typ *_type) unsafe.Pointer { 1400 return mallocgc(typ.Size_, typ, true) 1401 } 1402 1403 // newarray allocates an array of n elements of type typ. 1404 func newarray(typ *_type, n int) unsafe.Pointer { 1405 if n == 1 { 1406 return mallocgc(typ.Size_, typ, true) 1407 } 1408 mem, overflow := math.MulUintptr(typ.Size_, uintptr(n)) 1409 if overflow || mem > maxAlloc || n < 0 { 1410 panic(plainError("runtime: allocation size out of range")) 1411 } 1412 return mallocgc(mem, typ, true) 1413 } 1414 1415 //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray 1416 func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer { 1417 return newarray(typ, n) 1418 } 1419 1420 func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { 1421 c := getMCache(mp) 1422 if c == nil { 1423 throw("profilealloc called without a P or outside bootstrapping") 1424 } 1425 c.nextSample = nextSample() 1426 mProf_Malloc(x, size) 1427 } 1428 1429 // nextSample returns the next sampling point for heap profiling. The goal is 1430 // to sample allocations on average every MemProfileRate bytes, but with a 1431 // completely random distribution over the allocation timeline; this 1432 // corresponds to a Poisson process with parameter MemProfileRate. In Poisson 1433 // processes, the distance between two samples follows the exponential 1434 // distribution (exp(MemProfileRate)), so the best return value is a random 1435 // number taken from an exponential distribution whose mean is MemProfileRate. 1436 func nextSample() uintptr { 1437 if MemProfileRate == 1 { 1438 // Callers assign our return value to 1439 // mcache.next_sample, but next_sample is not used 1440 // when the rate is 1. So avoid the math below and 1441 // just return something. 1442 return 0 1443 } 1444 if GOOS == "plan9" { 1445 // Plan 9 doesn't support floating point in note handler. 1446 if gp := getg(); gp == gp.m.gsignal { 1447 return nextSampleNoFP() 1448 } 1449 } 1450 1451 return uintptr(fastexprand(MemProfileRate)) 1452 } 1453 1454 // fastexprand returns a random number from an exponential distribution with 1455 // the specified mean. 1456 func fastexprand(mean int) int32 { 1457 // Avoid overflow. Maximum possible step is 1458 // -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean. 1459 switch { 1460 case mean > 0x7000000: 1461 mean = 0x7000000 1462 case mean == 0: 1463 return 0 1464 } 1465 1466 // Take a random sample of the exponential distribution exp(-mean*x). 1467 // The probability distribution function is mean*exp(-mean*x), so the CDF is 1468 // p = 1 - exp(-mean*x), so 1469 // q = 1 - p == exp(-mean*x) 1470 // log_e(q) = -mean*x 1471 // -log_e(q)/mean = x 1472 // x = -log_e(q) * mean 1473 // x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency 1474 const randomBitCount = 26 1475 q := cheaprandn(1<<randomBitCount) + 1 1476 qlog := fastlog2(float64(q)) - randomBitCount 1477 if qlog > 0 { 1478 qlog = 0 1479 } 1480 const minusLog2 = -0.6931471805599453 // -ln(2) 1481 return int32(qlog*(minusLog2*float64(mean))) + 1 1482 } 1483 1484 // nextSampleNoFP is similar to nextSample, but uses older, 1485 // simpler code to avoid floating point. 1486 func nextSampleNoFP() uintptr { 1487 // Set first allocation sample size. 1488 rate := MemProfileRate 1489 if rate > 0x3fffffff { // make 2*rate not overflow 1490 rate = 0x3fffffff 1491 } 1492 if rate != 0 { 1493 return uintptr(cheaprandn(uint32(2 * rate))) 1494 } 1495 return 0 1496 } 1497 1498 type persistentAlloc struct { 1499 base *notInHeap 1500 off uintptr 1501 } 1502 1503 var globalAlloc struct { 1504 mutex 1505 persistentAlloc 1506 } 1507 1508 // persistentChunkSize is the number of bytes we allocate when we grow 1509 // a persistentAlloc. 1510 const persistentChunkSize = 256 << 10 1511 1512 // persistentChunks is a list of all the persistent chunks we have 1513 // allocated. The list is maintained through the first word in the 1514 // persistent chunk. This is updated atomically. 1515 var persistentChunks *notInHeap 1516 1517 // Wrapper around sysAlloc that can allocate small chunks. 1518 // There is no associated free operation. 1519 // Intended for things like function/type/debug-related persistent data. 1520 // If align is 0, uses default align (currently 8). 1521 // The returned memory will be zeroed. 1522 // sysStat must be non-nil. 1523 // 1524 // Consider marking persistentalloc'd types not in heap by embedding 1525 // runtime/internal/sys.NotInHeap. 1526 func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer { 1527 var p *notInHeap 1528 systemstack(func() { 1529 p = persistentalloc1(size, align, sysStat) 1530 }) 1531 return unsafe.Pointer(p) 1532 } 1533 1534 // Must run on system stack because stack growth can (re)invoke it. 1535 // See issue 9174. 1536 // 1537 //go:systemstack 1538 func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap { 1539 const ( 1540 maxBlock = 64 << 10 // VM reservation granularity is 64K on windows 1541 ) 1542 1543 if size == 0 { 1544 throw("persistentalloc: size == 0") 1545 } 1546 if align != 0 { 1547 if align&(align-1) != 0 { 1548 throw("persistentalloc: align is not a power of 2") 1549 } 1550 if align > _PageSize { 1551 throw("persistentalloc: align is too large") 1552 } 1553 } else { 1554 align = 8 1555 } 1556 1557 if size >= maxBlock { 1558 return (*notInHeap)(sysAlloc(size, sysStat)) 1559 } 1560 1561 mp := acquirem() 1562 var persistent *persistentAlloc 1563 if mp != nil && mp.p != 0 { 1564 persistent = &mp.p.ptr().palloc 1565 } else { 1566 lock(&globalAlloc.mutex) 1567 persistent = &globalAlloc.persistentAlloc 1568 } 1569 persistent.off = alignUp(persistent.off, align) 1570 if persistent.off+size > persistentChunkSize || persistent.base == nil { 1571 persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys)) 1572 if persistent.base == nil { 1573 if persistent == &globalAlloc.persistentAlloc { 1574 unlock(&globalAlloc.mutex) 1575 } 1576 throw("runtime: cannot allocate memory") 1577 } 1578 1579 // Add the new chunk to the persistentChunks list. 1580 for { 1581 chunks := uintptr(unsafe.Pointer(persistentChunks)) 1582 *(*uintptr)(unsafe.Pointer(persistent.base)) = chunks 1583 if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) { 1584 break 1585 } 1586 } 1587 persistent.off = alignUp(goarch.PtrSize, align) 1588 } 1589 p := persistent.base.add(persistent.off) 1590 persistent.off += size 1591 releasem(mp) 1592 if persistent == &globalAlloc.persistentAlloc { 1593 unlock(&globalAlloc.mutex) 1594 } 1595 1596 if sysStat != &memstats.other_sys { 1597 sysStat.add(int64(size)) 1598 memstats.other_sys.add(-int64(size)) 1599 } 1600 return p 1601 } 1602 1603 // inPersistentAlloc reports whether p points to memory allocated by 1604 // persistentalloc. This must be nosplit because it is called by the 1605 // cgo checker code, which is called by the write barrier code. 1606 // 1607 //go:nosplit 1608 func inPersistentAlloc(p uintptr) bool { 1609 chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks))) 1610 for chunk != 0 { 1611 if p >= chunk && p < chunk+persistentChunkSize { 1612 return true 1613 } 1614 chunk = *(*uintptr)(unsafe.Pointer(chunk)) 1615 } 1616 return false 1617 } 1618 1619 // linearAlloc is a simple linear allocator that pre-reserves a region 1620 // of memory and then optionally maps that region into the Ready state 1621 // as needed. 1622 // 1623 // The caller is responsible for locking. 1624 type linearAlloc struct { 1625 next uintptr // next free byte 1626 mapped uintptr // one byte past end of mapped space 1627 end uintptr // end of reserved space 1628 1629 mapMemory bool // transition memory from Reserved to Ready if true 1630 } 1631 1632 func (l *linearAlloc) init(base, size uintptr, mapMemory bool) { 1633 if base+size < base { 1634 // Chop off the last byte. The runtime isn't prepared 1635 // to deal with situations where the bounds could overflow. 1636 // Leave that memory reserved, though, so we don't map it 1637 // later. 1638 size -= 1 1639 } 1640 l.next, l.mapped = base, base 1641 l.end = base + size 1642 l.mapMemory = mapMemory 1643 } 1644 1645 func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer { 1646 p := alignUp(l.next, align) 1647 if p+size > l.end { 1648 return nil 1649 } 1650 l.next = p + size 1651 if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped { 1652 if l.mapMemory { 1653 // Transition from Reserved to Prepared to Ready. 1654 n := pEnd - l.mapped 1655 sysMap(unsafe.Pointer(l.mapped), n, sysStat) 1656 sysUsed(unsafe.Pointer(l.mapped), n, n) 1657 } 1658 l.mapped = pEnd 1659 } 1660 return unsafe.Pointer(p) 1661 } 1662 1663 // notInHeap is off-heap memory allocated by a lower-level allocator 1664 // like sysAlloc or persistentAlloc. 1665 // 1666 // In general, it's better to use real types which embed 1667 // runtime/internal/sys.NotInHeap, but this serves as a generic type 1668 // for situations where that isn't possible (like in the allocators). 1669 // 1670 // TODO: Use this as the return type of sysAlloc, persistentAlloc, etc? 1671 type notInHeap struct{ _ sys.NotInHeap } 1672 1673 func (p *notInHeap) add(bytes uintptr) *notInHeap { 1674 return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes)) 1675 } 1676 1677 // computeRZlog computes the size of the redzone. 1678 // Refer to the implementation of the compiler-rt. 1679 func computeRZlog(userSize uintptr) uintptr { 1680 switch { 1681 case userSize <= (64 - 16): 1682 return 16 << 0 1683 case userSize <= (128 - 32): 1684 return 16 << 1 1685 case userSize <= (512 - 64): 1686 return 16 << 2 1687 case userSize <= (4096 - 128): 1688 return 16 << 3 1689 case userSize <= (1<<14)-256: 1690 return 16 << 4 1691 case userSize <= (1<<15)-512: 1692 return 16 << 5 1693 case userSize <= (1<<16)-1024: 1694 return 16 << 6 1695 default: 1696 return 16 << 7 1697 } 1698 } 1699