Source file
src/runtime/stack.go
Documentation: runtime
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goos"
12 "runtime/internal/atomic"
13 "runtime/internal/sys"
14 "unsafe"
15 )
16
17
66
67 const (
68
69
70
71
72 stackSystem = goos.IsWindows*512*goarch.PtrSize + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024
73
74
75 stackMin = 2048
76
77
78
79 fixedStack0 = stackMin + stackSystem
80 fixedStack1 = fixedStack0 - 1
81 fixedStack2 = fixedStack1 | (fixedStack1 >> 1)
82 fixedStack3 = fixedStack2 | (fixedStack2 >> 2)
83 fixedStack4 = fixedStack3 | (fixedStack3 >> 4)
84 fixedStack5 = fixedStack4 | (fixedStack4 >> 8)
85 fixedStack6 = fixedStack5 | (fixedStack5 >> 16)
86 fixedStack = fixedStack6 + 1
87
88
89
90
91 stackNosplit = abi.StackNosplitBase * sys.StackGuardMultiplier
92
93
94
95
96
97
98
99 stackGuard = stackNosplit + stackSystem + abi.StackSmall
100 )
101
102 const (
103
104
105
106
107
108 stackDebug = 0
109 stackFromSystem = 0
110 stackFaultOnFree = 0
111 stackNoCache = 0
112
113
114 debugCheckBP = false
115 )
116
117 var (
118 stackPoisonCopy = 0
119 )
120
121 const (
122 uintptrMask = 1<<(8*goarch.PtrSize) - 1
123
124
125
126
127
128
129
130 stackPreempt = uintptrMask & -1314
131
132
133
134 stackFork = uintptrMask & -1234
135
136
137
138 stackForceMove = uintptrMask & -275
139
140
141 stackPoisonMin = uintptrMask & -4096
142 )
143
144
145
146
147
148
149
150 var stackpool [_NumStackOrders]struct {
151 item stackpoolItem
152 _ [(cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize) % cpu.CacheLinePadSize]byte
153 }
154
155 type stackpoolItem struct {
156 _ sys.NotInHeap
157 mu mutex
158 span mSpanList
159 }
160
161
162 var stackLarge struct {
163 lock mutex
164 free [heapAddrBits - pageShift]mSpanList
165 }
166
167 func stackinit() {
168 if _StackCacheSize&_PageMask != 0 {
169 throw("cache size must be a multiple of page size")
170 }
171 for i := range stackpool {
172 stackpool[i].item.span.init()
173 lockInit(&stackpool[i].item.mu, lockRankStackpool)
174 }
175 for i := range stackLarge.free {
176 stackLarge.free[i].init()
177 lockInit(&stackLarge.lock, lockRankStackLarge)
178 }
179 }
180
181
182 func stacklog2(n uintptr) int {
183 log2 := 0
184 for n > 1 {
185 n >>= 1
186 log2++
187 }
188 return log2
189 }
190
191
192
193 func stackpoolalloc(order uint8) gclinkptr {
194 list := &stackpool[order].item.span
195 s := list.first
196 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
197 if s == nil {
198
199 s = mheap_.allocManual(_StackCacheSize>>_PageShift, spanAllocStack)
200 if s == nil {
201 throw("out of memory")
202 }
203 if s.allocCount != 0 {
204 throw("bad allocCount")
205 }
206 if s.manualFreeList.ptr() != nil {
207 throw("bad manualFreeList")
208 }
209 osStackAlloc(s)
210 s.elemsize = fixedStack << order
211 for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
212 x := gclinkptr(s.base() + i)
213 x.ptr().next = s.manualFreeList
214 s.manualFreeList = x
215 }
216 list.insert(s)
217 }
218 x := s.manualFreeList
219 if x.ptr() == nil {
220 throw("span has no free stacks")
221 }
222 s.manualFreeList = x.ptr().next
223 s.allocCount++
224 if s.manualFreeList.ptr() == nil {
225
226 list.remove(s)
227 }
228 return x
229 }
230
231
232 func stackpoolfree(x gclinkptr, order uint8) {
233 s := spanOfUnchecked(uintptr(x))
234 if s.state.get() != mSpanManual {
235 throw("freeing stack not in a stack span")
236 }
237 if s.manualFreeList.ptr() == nil {
238
239 stackpool[order].item.span.insert(s)
240 }
241 x.ptr().next = s.manualFreeList
242 s.manualFreeList = x
243 s.allocCount--
244 if gcphase == _GCoff && s.allocCount == 0 {
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260 stackpool[order].item.span.remove(s)
261 s.manualFreeList = 0
262 osStackFree(s)
263 mheap_.freeManual(s, spanAllocStack)
264 }
265 }
266
267
268
269
270
271 func stackcacherefill(c *mcache, order uint8) {
272 if stackDebug >= 1 {
273 print("stackcacherefill order=", order, "\n")
274 }
275
276
277
278 var list gclinkptr
279 var size uintptr
280 lock(&stackpool[order].item.mu)
281 for size < _StackCacheSize/2 {
282 x := stackpoolalloc(order)
283 x.ptr().next = list
284 list = x
285 size += fixedStack << order
286 }
287 unlock(&stackpool[order].item.mu)
288 c.stackcache[order].list = list
289 c.stackcache[order].size = size
290 }
291
292
293 func stackcacherelease(c *mcache, order uint8) {
294 if stackDebug >= 1 {
295 print("stackcacherelease order=", order, "\n")
296 }
297 x := c.stackcache[order].list
298 size := c.stackcache[order].size
299 lock(&stackpool[order].item.mu)
300 for size > _StackCacheSize/2 {
301 y := x.ptr().next
302 stackpoolfree(x, order)
303 x = y
304 size -= fixedStack << order
305 }
306 unlock(&stackpool[order].item.mu)
307 c.stackcache[order].list = x
308 c.stackcache[order].size = size
309 }
310
311
312 func stackcache_clear(c *mcache) {
313 if stackDebug >= 1 {
314 print("stackcache clear\n")
315 }
316 for order := uint8(0); order < _NumStackOrders; order++ {
317 lock(&stackpool[order].item.mu)
318 x := c.stackcache[order].list
319 for x.ptr() != nil {
320 y := x.ptr().next
321 stackpoolfree(x, order)
322 x = y
323 }
324 c.stackcache[order].list = 0
325 c.stackcache[order].size = 0
326 unlock(&stackpool[order].item.mu)
327 }
328 }
329
330
331
332
333
334
335
336 func stackalloc(n uint32) stack {
337
338
339
340 thisg := getg()
341 if thisg != thisg.m.g0 {
342 throw("stackalloc not on scheduler stack")
343 }
344 if n&(n-1) != 0 {
345 throw("stack size not a power of 2")
346 }
347 if stackDebug >= 1 {
348 print("stackalloc ", n, "\n")
349 }
350
351 if debug.efence != 0 || stackFromSystem != 0 {
352 n = uint32(alignUp(uintptr(n), physPageSize))
353 v := sysAlloc(uintptr(n), &memstats.stacks_sys)
354 if v == nil {
355 throw("out of memory (stackalloc)")
356 }
357 return stack{uintptr(v), uintptr(v) + uintptr(n)}
358 }
359
360
361
362
363 var v unsafe.Pointer
364 if n < fixedStack<<_NumStackOrders && n < _StackCacheSize {
365 order := uint8(0)
366 n2 := n
367 for n2 > fixedStack {
368 order++
369 n2 >>= 1
370 }
371 var x gclinkptr
372 if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" {
373
374
375
376
377 lock(&stackpool[order].item.mu)
378 x = stackpoolalloc(order)
379 unlock(&stackpool[order].item.mu)
380 } else {
381 c := thisg.m.p.ptr().mcache
382 x = c.stackcache[order].list
383 if x.ptr() == nil {
384 stackcacherefill(c, order)
385 x = c.stackcache[order].list
386 }
387 c.stackcache[order].list = x.ptr().next
388 c.stackcache[order].size -= uintptr(n)
389 }
390 v = unsafe.Pointer(x)
391 } else {
392 var s *mspan
393 npage := uintptr(n) >> _PageShift
394 log2npage := stacklog2(npage)
395
396
397 lock(&stackLarge.lock)
398 if !stackLarge.free[log2npage].isEmpty() {
399 s = stackLarge.free[log2npage].first
400 stackLarge.free[log2npage].remove(s)
401 }
402 unlock(&stackLarge.lock)
403
404 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
405
406 if s == nil {
407
408 s = mheap_.allocManual(npage, spanAllocStack)
409 if s == nil {
410 throw("out of memory")
411 }
412 osStackAlloc(s)
413 s.elemsize = uintptr(n)
414 }
415 v = unsafe.Pointer(s.base())
416 }
417
418 if raceenabled {
419 racemalloc(v, uintptr(n))
420 }
421 if msanenabled {
422 msanmalloc(v, uintptr(n))
423 }
424 if asanenabled {
425 asanunpoison(v, uintptr(n))
426 }
427 if stackDebug >= 1 {
428 print(" allocated ", v, "\n")
429 }
430 return stack{uintptr(v), uintptr(v) + uintptr(n)}
431 }
432
433
434
435
436
437
438
439 func stackfree(stk stack) {
440 gp := getg()
441 v := unsafe.Pointer(stk.lo)
442 n := stk.hi - stk.lo
443 if n&(n-1) != 0 {
444 throw("stack not a power of 2")
445 }
446 if stk.lo+n < stk.hi {
447 throw("bad stack size")
448 }
449 if stackDebug >= 1 {
450 println("stackfree", v, n)
451 memclrNoHeapPointers(v, n)
452 }
453 if debug.efence != 0 || stackFromSystem != 0 {
454 if debug.efence != 0 || stackFaultOnFree != 0 {
455 sysFault(v, n)
456 } else {
457 sysFree(v, n, &memstats.stacks_sys)
458 }
459 return
460 }
461 if msanenabled {
462 msanfree(v, n)
463 }
464 if asanenabled {
465 asanpoison(v, n)
466 }
467 if n < fixedStack<<_NumStackOrders && n < _StackCacheSize {
468 order := uint8(0)
469 n2 := n
470 for n2 > fixedStack {
471 order++
472 n2 >>= 1
473 }
474 x := gclinkptr(v)
475 if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" {
476 lock(&stackpool[order].item.mu)
477 stackpoolfree(x, order)
478 unlock(&stackpool[order].item.mu)
479 } else {
480 c := gp.m.p.ptr().mcache
481 if c.stackcache[order].size >= _StackCacheSize {
482 stackcacherelease(c, order)
483 }
484 x.ptr().next = c.stackcache[order].list
485 c.stackcache[order].list = x
486 c.stackcache[order].size += n
487 }
488 } else {
489 s := spanOfUnchecked(uintptr(v))
490 if s.state.get() != mSpanManual {
491 println(hex(s.base()), v)
492 throw("bad span state")
493 }
494 if gcphase == _GCoff {
495
496
497 osStackFree(s)
498 mheap_.freeManual(s, spanAllocStack)
499 } else {
500
501
502
503
504
505 log2npage := stacklog2(s.npages)
506 lock(&stackLarge.lock)
507 stackLarge.free[log2npage].insert(s)
508 unlock(&stackLarge.lock)
509 }
510 }
511 }
512
513 var maxstacksize uintptr = 1 << 20
514
515 var maxstackceiling = maxstacksize
516
517 var ptrnames = []string{
518 0: "scalar",
519 1: "ptr",
520 }
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555 type adjustinfo struct {
556 old stack
557 delta uintptr
558
559
560 sghi uintptr
561 }
562
563
564
565 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
566 pp := (*uintptr)(vpp)
567 p := *pp
568 if stackDebug >= 4 {
569 print(" ", pp, ":", hex(p), "\n")
570 }
571 if adjinfo.old.lo <= p && p < adjinfo.old.hi {
572 *pp = p + adjinfo.delta
573 if stackDebug >= 3 {
574 print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
575 }
576 }
577 }
578
579
580
581 type bitvector struct {
582 n int32
583 bytedata *uint8
584 }
585
586
587
588
589
590 func (bv *bitvector) ptrbit(i uintptr) uint8 {
591 b := *(addb(bv.bytedata, i/8))
592 return (b >> (i % 8)) & 1
593 }
594
595
596
597 func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) {
598 minp := adjinfo.old.lo
599 maxp := adjinfo.old.hi
600 delta := adjinfo.delta
601 num := uintptr(bv.n)
602
603
604
605
606
607 useCAS := uintptr(scanp) < adjinfo.sghi
608 for i := uintptr(0); i < num; i += 8 {
609 if stackDebug >= 4 {
610 for j := uintptr(0); j < 8; j++ {
611 print(" ", add(scanp, (i+j)*goarch.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*goarch.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
612 }
613 }
614 b := *(addb(bv.bytedata, i/8))
615 for b != 0 {
616 j := uintptr(sys.TrailingZeros8(b))
617 b &= b - 1
618 pp := (*uintptr)(add(scanp, (i+j)*goarch.PtrSize))
619 retry:
620 p := *pp
621 if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
622
623
624 getg().m.traceback = 2
625 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
626 throw("invalid pointer found on stack")
627 }
628 if minp <= p && p < maxp {
629 if stackDebug >= 3 {
630 print("adjust ptr ", hex(p), " ", funcname(f), "\n")
631 }
632 if useCAS {
633 ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
634 if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
635 goto retry
636 }
637 } else {
638 *pp = p + delta
639 }
640 }
641 }
642 }
643 }
644
645
646 func adjustframe(frame *stkframe, adjinfo *adjustinfo) {
647 if frame.continpc == 0 {
648
649 return
650 }
651 f := frame.fn
652 if stackDebug >= 2 {
653 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
654 }
655
656
657 if (goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.ARM64) && frame.argp-frame.varp == 2*goarch.PtrSize {
658 if stackDebug >= 3 {
659 print(" saved bp\n")
660 }
661 if debugCheckBP {
662
663
664 bp := *(*uintptr)(unsafe.Pointer(frame.varp))
665 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
666 println("runtime: found invalid frame pointer")
667 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
668 throw("bad frame pointer")
669 }
670 }
671
672
673
674
675 adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
676 }
677
678 locals, args, objs := frame.getStackMap(true)
679
680
681 if locals.n > 0 {
682 size := uintptr(locals.n) * goarch.PtrSize
683 adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f)
684 }
685
686
687 if args.n > 0 {
688 if stackDebug >= 3 {
689 print(" args\n")
690 }
691 adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{})
692 }
693
694
695
696 if frame.varp != 0 {
697 for i := range objs {
698 obj := &objs[i]
699 off := obj.off
700 base := frame.varp
701 if off >= 0 {
702 base = frame.argp
703 }
704 p := base + uintptr(off)
705 if p < frame.sp {
706
707
708
709 continue
710 }
711 ptrdata := obj.ptrdata()
712 gcdata := obj.gcdata()
713 var s *mspan
714 if obj.useGCProg() {
715
716 s = materializeGCProg(ptrdata, gcdata)
717 gcdata = (*byte)(unsafe.Pointer(s.startAddr))
718 }
719 for i := uintptr(0); i < ptrdata; i += goarch.PtrSize {
720 if *addb(gcdata, i/(8*goarch.PtrSize))>>(i/goarch.PtrSize&7)&1 != 0 {
721 adjustpointer(adjinfo, unsafe.Pointer(p+i))
722 }
723 }
724 if s != nil {
725 dematerializeGCProg(s)
726 }
727 }
728 }
729 }
730
731 func adjustctxt(gp *g, adjinfo *adjustinfo) {
732 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
733 if !framepointer_enabled {
734 return
735 }
736 if debugCheckBP {
737 bp := gp.sched.bp
738 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
739 println("runtime: found invalid top frame pointer")
740 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
741 throw("bad top frame pointer")
742 }
743 }
744 oldfp := gp.sched.bp
745 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
746 if GOARCH == "arm64" {
747
748
749
750 if oldfp == gp.sched.sp-goarch.PtrSize {
751 memmove(unsafe.Pointer(gp.sched.bp), unsafe.Pointer(oldfp), goarch.PtrSize)
752 adjustpointer(adjinfo, unsafe.Pointer(gp.sched.bp))
753 }
754 }
755 }
756
757 func adjustdefers(gp *g, adjinfo *adjustinfo) {
758
759
760
761 adjustpointer(adjinfo, unsafe.Pointer(&gp._defer))
762 for d := gp._defer; d != nil; d = d.link {
763 adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
764 adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
765 adjustpointer(adjinfo, unsafe.Pointer(&d.link))
766 }
767 }
768
769 func adjustpanics(gp *g, adjinfo *adjustinfo) {
770
771
772 adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
773 }
774
775 func adjustsudogs(gp *g, adjinfo *adjustinfo) {
776
777
778 for s := gp.waiting; s != nil; s = s.waitlink {
779 adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
780 }
781 }
782
783 func fillstack(stk stack, b byte) {
784 for p := stk.lo; p < stk.hi; p++ {
785 *(*byte)(unsafe.Pointer(p)) = b
786 }
787 }
788
789 func findsghi(gp *g, stk stack) uintptr {
790 var sghi uintptr
791 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
792 p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
793 if stk.lo <= p && p < stk.hi && p > sghi {
794 sghi = p
795 }
796 }
797 return sghi
798 }
799
800
801
802
803 func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
804 if gp.waiting == nil {
805 return 0
806 }
807
808
809 var lastc *hchan
810 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
811 if sg.c != lastc {
812
813
814
815
816
817
818
819
820
821 lockWithRank(&sg.c.lock, lockRankHchanLeaf)
822 }
823 lastc = sg.c
824 }
825
826
827 adjustsudogs(gp, adjinfo)
828
829
830
831
832 var sgsize uintptr
833 if adjinfo.sghi != 0 {
834 oldBot := adjinfo.old.hi - used
835 newBot := oldBot + adjinfo.delta
836 sgsize = adjinfo.sghi - oldBot
837 memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
838 }
839
840
841 lastc = nil
842 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
843 if sg.c != lastc {
844 unlock(&sg.c.lock)
845 }
846 lastc = sg.c
847 }
848
849 return sgsize
850 }
851
852
853
854 func copystack(gp *g, newsize uintptr) {
855 if gp.syscallsp != 0 {
856 throw("stack growth not allowed in system call")
857 }
858 old := gp.stack
859 if old.lo == 0 {
860 throw("nil stackbase")
861 }
862 used := old.hi - gp.sched.sp
863
864
865
866
867 gcController.addScannableStack(getg().m.p.ptr(), int64(newsize)-int64(old.hi-old.lo))
868
869
870 new := stackalloc(uint32(newsize))
871 if stackPoisonCopy != 0 {
872 fillstack(new, 0xfd)
873 }
874 if stackDebug >= 1 {
875 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
876 }
877
878
879 var adjinfo adjustinfo
880 adjinfo.old = old
881 adjinfo.delta = new.hi - old.hi
882
883
884 ncopy := used
885 if !gp.activeStackChans {
886 if newsize < old.hi-old.lo && gp.parkingOnChan.Load() {
887
888
889
890
891 throw("racy sudog adjustment due to parking on channel")
892 }
893 adjustsudogs(gp, &adjinfo)
894 } else {
895
896
897
898
899
900
901
902 adjinfo.sghi = findsghi(gp, old)
903
904
905
906 ncopy -= syncadjustsudogs(gp, used, &adjinfo)
907 }
908
909
910 memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
911
912
913
914
915 adjustctxt(gp, &adjinfo)
916 adjustdefers(gp, &adjinfo)
917 adjustpanics(gp, &adjinfo)
918 if adjinfo.sghi != 0 {
919 adjinfo.sghi += adjinfo.delta
920 }
921
922
923 gp.stack = new
924 gp.stackguard0 = new.lo + stackGuard
925 gp.sched.sp = new.hi - used
926 gp.stktopsp += adjinfo.delta
927
928
929 var u unwinder
930 for u.init(gp, 0); u.valid(); u.next() {
931 adjustframe(&u.frame, &adjinfo)
932 }
933
934
935 if stackPoisonCopy != 0 {
936 fillstack(old, 0xfc)
937 }
938 stackfree(old)
939 }
940
941
942 func round2(x int32) int32 {
943 s := uint(0)
944 for 1<<s < x {
945 s++
946 }
947 return 1 << s
948 }
949
950
951
952
953
954
955
956
957
958
959
960
961
962 func newstack() {
963 thisg := getg()
964
965 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
966 throw("stack growth after fork")
967 }
968 if thisg.m.morebuf.g.ptr() != thisg.m.curg {
969 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
970 morebuf := thisg.m.morebuf
971 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
972 throw("runtime: wrong goroutine in newstack")
973 }
974
975 gp := thisg.m.curg
976
977 if thisg.m.curg.throwsplit {
978
979 morebuf := thisg.m.morebuf
980 gp.syscallsp = morebuf.sp
981 gp.syscallpc = morebuf.pc
982 pcname, pcoff := "(unknown)", uintptr(0)
983 f := findfunc(gp.sched.pc)
984 if f.valid() {
985 pcname = funcname(f)
986 pcoff = gp.sched.pc - f.entry()
987 }
988 print("runtime: newstack at ", pcname, "+", hex(pcoff),
989 " sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
990 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
991 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
992
993 thisg.m.traceback = 2
994 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
995 throw("runtime: stack split at bad time")
996 }
997
998 morebuf := thisg.m.morebuf
999 thisg.m.morebuf.pc = 0
1000 thisg.m.morebuf.lr = 0
1001 thisg.m.morebuf.sp = 0
1002 thisg.m.morebuf.g = 0
1003
1004
1005
1006
1007 stackguard0 := atomic.Loaduintptr(&gp.stackguard0)
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021 preempt := stackguard0 == stackPreempt
1022 if preempt {
1023 if !canPreemptM(thisg.m) {
1024
1025
1026 gp.stackguard0 = gp.stack.lo + stackGuard
1027 gogo(&gp.sched)
1028 }
1029 }
1030
1031 if gp.stack.lo == 0 {
1032 throw("missing stack in newstack")
1033 }
1034 sp := gp.sched.sp
1035 if goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.I386 || goarch.ArchFamily == goarch.WASM {
1036
1037 sp -= goarch.PtrSize
1038 }
1039 if stackDebug >= 1 || sp < gp.stack.lo {
1040 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
1041 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
1042 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
1043 }
1044 if sp < gp.stack.lo {
1045 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ")
1046 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
1047 throw("runtime: split stack overflow")
1048 }
1049
1050 if preempt {
1051 if gp == thisg.m.g0 {
1052 throw("runtime: preempt g0")
1053 }
1054 if thisg.m.p == 0 && thisg.m.locks == 0 {
1055 throw("runtime: g is running but p is not")
1056 }
1057
1058 if gp.preemptShrink {
1059
1060
1061 gp.preemptShrink = false
1062 shrinkstack(gp)
1063 }
1064
1065 if gp.preemptStop {
1066 preemptPark(gp)
1067 }
1068
1069
1070 gopreempt_m(gp)
1071 }
1072
1073
1074 oldsize := gp.stack.hi - gp.stack.lo
1075 newsize := oldsize * 2
1076
1077
1078
1079
1080 if f := findfunc(gp.sched.pc); f.valid() {
1081 max := uintptr(funcMaxSPDelta(f))
1082 needed := max + stackGuard
1083 used := gp.stack.hi - gp.sched.sp
1084 for newsize-used < needed {
1085 newsize *= 2
1086 }
1087 }
1088
1089 if stackguard0 == stackForceMove {
1090
1091
1092
1093 newsize = oldsize
1094 }
1095
1096 if newsize > maxstacksize || newsize > maxstackceiling {
1097 if maxstacksize < maxstackceiling {
1098 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
1099 } else {
1100 print("runtime: goroutine stack exceeds ", maxstackceiling, "-byte limit\n")
1101 }
1102 print("runtime: sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
1103 throw("stack overflow")
1104 }
1105
1106
1107
1108 casgstatus(gp, _Grunning, _Gcopystack)
1109
1110
1111
1112 copystack(gp, newsize)
1113 if stackDebug >= 1 {
1114 print("stack grow done\n")
1115 }
1116 casgstatus(gp, _Gcopystack, _Grunning)
1117 gogo(&gp.sched)
1118 }
1119
1120
1121 func nilfunc() {
1122 *(*uint8)(nil) = 0
1123 }
1124
1125
1126
1127 func gostartcallfn(gobuf *gobuf, fv *funcval) {
1128 var fn unsafe.Pointer
1129 if fv != nil {
1130 fn = unsafe.Pointer(fv.fn)
1131 } else {
1132 fn = unsafe.Pointer(abi.FuncPCABIInternal(nilfunc))
1133 }
1134 gostartcall(gobuf, fn, unsafe.Pointer(fv))
1135 }
1136
1137
1138
1139
1140 func isShrinkStackSafe(gp *g) bool {
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153 return gp.syscallsp == 0 && !gp.asyncSafePoint && !gp.parkingOnChan.Load()
1154 }
1155
1156
1157
1158
1159
1160 func shrinkstack(gp *g) {
1161 if gp.stack.lo == 0 {
1162 throw("missing stack in shrinkstack")
1163 }
1164 if s := readgstatus(gp); s&_Gscan == 0 {
1165
1166
1167
1168 if !(gp == getg().m.curg && getg() != getg().m.curg && s == _Grunning) {
1169
1170 throw("bad status in shrinkstack")
1171 }
1172 }
1173 if !isShrinkStackSafe(gp) {
1174 throw("shrinkstack at bad time")
1175 }
1176
1177
1178
1179 if gp == getg().m.curg && gp.m.libcallsp != 0 {
1180 throw("shrinking stack in libcall")
1181 }
1182
1183 if debug.gcshrinkstackoff > 0 {
1184 return
1185 }
1186 f := findfunc(gp.startpc)
1187 if f.valid() && f.funcID == abi.FuncID_gcBgMarkWorker {
1188
1189
1190 return
1191 }
1192
1193 oldsize := gp.stack.hi - gp.stack.lo
1194 newsize := oldsize / 2
1195
1196
1197 if newsize < fixedStack {
1198 return
1199 }
1200
1201
1202
1203
1204
1205 avail := gp.stack.hi - gp.stack.lo
1206 if used := gp.stack.hi - gp.sched.sp + stackNosplit; used >= avail/4 {
1207 return
1208 }
1209
1210 if stackDebug > 0 {
1211 print("shrinking stack ", oldsize, "->", newsize, "\n")
1212 }
1213
1214 copystack(gp, newsize)
1215 }
1216
1217
1218 func freeStackSpans() {
1219
1220 for order := range stackpool {
1221 lock(&stackpool[order].item.mu)
1222 list := &stackpool[order].item.span
1223 for s := list.first; s != nil; {
1224 next := s.next
1225 if s.allocCount == 0 {
1226 list.remove(s)
1227 s.manualFreeList = 0
1228 osStackFree(s)
1229 mheap_.freeManual(s, spanAllocStack)
1230 }
1231 s = next
1232 }
1233 unlock(&stackpool[order].item.mu)
1234 }
1235
1236
1237 lock(&stackLarge.lock)
1238 for i := range stackLarge.free {
1239 for s := stackLarge.free[i].first; s != nil; {
1240 next := s.next
1241 stackLarge.free[i].remove(s)
1242 osStackFree(s)
1243 mheap_.freeManual(s, spanAllocStack)
1244 s = next
1245 }
1246 }
1247 unlock(&stackLarge.lock)
1248 }
1249
1250
1251
1252 type stackObjectRecord struct {
1253
1254
1255
1256 off int32
1257 size int32
1258 _ptrdata int32
1259 gcdataoff uint32
1260 }
1261
1262 func (r *stackObjectRecord) useGCProg() bool {
1263 return r._ptrdata < 0
1264 }
1265
1266 func (r *stackObjectRecord) ptrdata() uintptr {
1267 x := r._ptrdata
1268 if x < 0 {
1269 return uintptr(-x)
1270 }
1271 return uintptr(x)
1272 }
1273
1274
1275 func (r *stackObjectRecord) gcdata() *byte {
1276 ptr := uintptr(unsafe.Pointer(r))
1277 var mod *moduledata
1278 for datap := &firstmoduledata; datap != nil; datap = datap.next {
1279 if datap.gofunc <= ptr && ptr < datap.end {
1280 mod = datap
1281 break
1282 }
1283 }
1284
1285
1286
1287 res := mod.rodata + uintptr(r.gcdataoff)
1288 return (*byte)(unsafe.Pointer(res))
1289 }
1290
1291
1292
1293
1294
1295 func morestackc() {
1296 throw("attempt to execute system stack code on user stack")
1297 }
1298
1299
1300
1301
1302
1303 var startingStackSize uint32 = fixedStack
1304
1305 func gcComputeStartingStackSize() {
1306 if debug.adaptivestackstart == 0 {
1307 return
1308 }
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319 var scannedStackSize uint64
1320 var scannedStacks uint64
1321 for _, p := range allp {
1322 scannedStackSize += p.scannedStackSize
1323 scannedStacks += p.scannedStacks
1324
1325 p.scannedStackSize = 0
1326 p.scannedStacks = 0
1327 }
1328 if scannedStacks == 0 {
1329 startingStackSize = fixedStack
1330 return
1331 }
1332 avg := scannedStackSize/scannedStacks + stackGuard
1333
1334
1335 if avg > uint64(maxstacksize) {
1336 avg = uint64(maxstacksize)
1337 }
1338 if avg < fixedStack {
1339 avg = fixedStack
1340 }
1341
1342 startingStackSize = uint32(round2(int32(avg)))
1343 }
1344
View as plain text