Source file
src/runtime/proc.go
Documentation: runtime
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goexperiment"
12 "internal/goos"
13 "runtime/internal/atomic"
14 "runtime/internal/sys"
15 "unsafe"
16 )
17
18
19 var modinfo string
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115 var (
116 m0 m
117 g0 g
118 mcache0 *mcache
119 raceprocctx0 uintptr
120 raceFiniLock mutex
121 )
122
123
124
125 var runtime_inittasks []*initTask
126
127
128
129
130
131 var main_init_done chan bool
132
133
134 func main_main()
135
136
137 var mainStarted bool
138
139
140 var runtimeInitTime int64
141
142
143 var initSigmask sigset
144
145
146 func main() {
147 mp := getg().m
148
149
150
151 mp.g0.racectx = 0
152
153
154
155
156 if goarch.PtrSize == 8 {
157 maxstacksize = 1000000000
158 } else {
159 maxstacksize = 250000000
160 }
161
162
163
164
165 maxstackceiling = 2 * maxstacksize
166
167
168 mainStarted = true
169
170 if GOARCH != "wasm" {
171 systemstack(func() {
172 newm(sysmon, nil, -1)
173 })
174 }
175
176
177
178
179
180
181
182 lockOSThread()
183
184 if mp != &m0 {
185 throw("runtime.main not on m0")
186 }
187
188
189
190 runtimeInitTime = nanotime()
191 if runtimeInitTime == 0 {
192 throw("nanotime returning zero")
193 }
194
195 if debug.inittrace != 0 {
196 inittrace.id = getg().goid
197 inittrace.active = true
198 }
199
200 doInit(runtime_inittasks)
201
202
203 needUnlock := true
204 defer func() {
205 if needUnlock {
206 unlockOSThread()
207 }
208 }()
209
210 gcenable()
211
212 main_init_done = make(chan bool)
213 if iscgo {
214 if _cgo_pthread_key_created == nil {
215 throw("_cgo_pthread_key_created missing")
216 }
217
218 if _cgo_thread_start == nil {
219 throw("_cgo_thread_start missing")
220 }
221 if GOOS != "windows" {
222 if _cgo_setenv == nil {
223 throw("_cgo_setenv missing")
224 }
225 if _cgo_unsetenv == nil {
226 throw("_cgo_unsetenv missing")
227 }
228 }
229 if _cgo_notify_runtime_init_done == nil {
230 throw("_cgo_notify_runtime_init_done missing")
231 }
232
233
234 if set_crosscall2 == nil {
235 throw("set_crosscall2 missing")
236 }
237 set_crosscall2()
238
239
240
241 startTemplateThread()
242 cgocall(_cgo_notify_runtime_init_done, nil)
243 }
244
245
246
247
248
249
250
251
252 for m := &firstmoduledata; m != nil; m = m.next {
253 doInit(m.inittasks)
254 }
255
256
257
258 inittrace.active = false
259
260 close(main_init_done)
261
262 needUnlock = false
263 unlockOSThread()
264
265 if isarchive || islibrary {
266
267
268 return
269 }
270 fn := main_main
271 fn()
272 if raceenabled {
273 runExitHooks(0)
274 racefini()
275 }
276
277
278
279
280
281 if runningPanicDefers.Load() != 0 {
282
283 for c := 0; c < 1000; c++ {
284 if runningPanicDefers.Load() == 0 {
285 break
286 }
287 Gosched()
288 }
289 }
290 if panicking.Load() != 0 {
291 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
292 }
293 runExitHooks(0)
294
295 exit(0)
296 for {
297 var x *int32
298 *x = 0
299 }
300 }
301
302
303
304
305 func os_beforeExit(exitCode int) {
306 runExitHooks(exitCode)
307 if exitCode == 0 && raceenabled {
308 racefini()
309 }
310 }
311
312
313 func init() {
314 go forcegchelper()
315 }
316
317 func forcegchelper() {
318 forcegc.g = getg()
319 lockInit(&forcegc.lock, lockRankForcegc)
320 for {
321 lock(&forcegc.lock)
322 if forcegc.idle.Load() {
323 throw("forcegc: phase error")
324 }
325 forcegc.idle.Store(true)
326 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
327
328 if debug.gctrace > 0 {
329 println("GC forced")
330 }
331
332 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
333 }
334 }
335
336
337
338
339
340 func Gosched() {
341 checkTimeouts()
342 mcall(gosched_m)
343 }
344
345
346
347
348
349 func goschedguarded() {
350 mcall(goschedguarded_m)
351 }
352
353
354
355
356
357
358 func goschedIfBusy() {
359 gp := getg()
360
361
362 if !gp.preempt && sched.npidle.Load() > 0 {
363 return
364 }
365 mcall(gosched_m)
366 }
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
386 if reason != waitReasonSleep {
387 checkTimeouts()
388 }
389 mp := acquirem()
390 gp := mp.curg
391 status := readgstatus(gp)
392 if status != _Grunning && status != _Gscanrunning {
393 throw("gopark: bad g status")
394 }
395 mp.waitlock = lock
396 mp.waitunlockf = unlockf
397 gp.waitreason = reason
398 mp.waitTraceBlockReason = traceReason
399 mp.waitTraceSkip = traceskip
400 releasem(mp)
401
402 mcall(park_m)
403 }
404
405
406
407 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
408 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
409 }
410
411 func goready(gp *g, traceskip int) {
412 systemstack(func() {
413 ready(gp, traceskip, true)
414 })
415 }
416
417
418 func acquireSudog() *sudog {
419
420
421
422
423
424
425
426
427 mp := acquirem()
428 pp := mp.p.ptr()
429 if len(pp.sudogcache) == 0 {
430 lock(&sched.sudoglock)
431
432 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
433 s := sched.sudogcache
434 sched.sudogcache = s.next
435 s.next = nil
436 pp.sudogcache = append(pp.sudogcache, s)
437 }
438 unlock(&sched.sudoglock)
439
440 if len(pp.sudogcache) == 0 {
441 pp.sudogcache = append(pp.sudogcache, new(sudog))
442 }
443 }
444 n := len(pp.sudogcache)
445 s := pp.sudogcache[n-1]
446 pp.sudogcache[n-1] = nil
447 pp.sudogcache = pp.sudogcache[:n-1]
448 if s.elem != nil {
449 throw("acquireSudog: found s.elem != nil in cache")
450 }
451 releasem(mp)
452 return s
453 }
454
455
456 func releaseSudog(s *sudog) {
457 if s.elem != nil {
458 throw("runtime: sudog with non-nil elem")
459 }
460 if s.isSelect {
461 throw("runtime: sudog with non-false isSelect")
462 }
463 if s.next != nil {
464 throw("runtime: sudog with non-nil next")
465 }
466 if s.prev != nil {
467 throw("runtime: sudog with non-nil prev")
468 }
469 if s.waitlink != nil {
470 throw("runtime: sudog with non-nil waitlink")
471 }
472 if s.c != nil {
473 throw("runtime: sudog with non-nil c")
474 }
475 gp := getg()
476 if gp.param != nil {
477 throw("runtime: releaseSudog with non-nil gp.param")
478 }
479 mp := acquirem()
480 pp := mp.p.ptr()
481 if len(pp.sudogcache) == cap(pp.sudogcache) {
482
483 var first, last *sudog
484 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
485 n := len(pp.sudogcache)
486 p := pp.sudogcache[n-1]
487 pp.sudogcache[n-1] = nil
488 pp.sudogcache = pp.sudogcache[:n-1]
489 if first == nil {
490 first = p
491 } else {
492 last.next = p
493 }
494 last = p
495 }
496 lock(&sched.sudoglock)
497 last.next = sched.sudogcache
498 sched.sudogcache = first
499 unlock(&sched.sudoglock)
500 }
501 pp.sudogcache = append(pp.sudogcache, s)
502 releasem(mp)
503 }
504
505
506 func badmcall(fn func(*g)) {
507 throw("runtime: mcall called on m->g0 stack")
508 }
509
510 func badmcall2(fn func(*g)) {
511 throw("runtime: mcall function returned")
512 }
513
514 func badreflectcall() {
515 panic(plainError("arg size to reflect.call more than 1GB"))
516 }
517
518
519
520 func badmorestackg0() {
521 if !crashStackImplemented {
522 writeErrStr("fatal: morestack on g0\n")
523 return
524 }
525
526 g := getg()
527 switchToCrashStack(func() {
528 print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
529 g.m.traceback = 2
530 traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
531 print("\n")
532
533 throw("morestack on g0")
534 })
535 }
536
537
538
539 func badmorestackgsignal() {
540 writeErrStr("fatal: morestack on gsignal\n")
541 }
542
543
544 func badctxt() {
545 throw("ctxt != 0")
546 }
547
548
549
550 var gcrash g
551
552 var crashingG atomic.Pointer[g]
553
554
555
556
557
558
559
560
561
562 func switchToCrashStack(fn func()) {
563 me := getg()
564 if crashingG.CompareAndSwapNoWB(nil, me) {
565 switchToCrashStack0(fn)
566 abort()
567 }
568 if crashingG.Load() == me {
569
570 writeErrStr("fatal: recursive switchToCrashStack\n")
571 abort()
572 }
573
574 usleep_no_g(100)
575 writeErrStr("fatal: concurrent switchToCrashStack\n")
576 abort()
577 }
578
579
580
581
582 const crashStackImplemented = (GOARCH == "amd64" || GOARCH == "arm64" || GOARCH == "mips64" || GOARCH == "mips64le" || GOARCH == "ppc64" || GOARCH == "ppc64le" || GOARCH == "riscv64" || GOARCH == "wasm") && GOOS != "windows"
583
584
585 func switchToCrashStack0(fn func())
586
587 func lockedOSThread() bool {
588 gp := getg()
589 return gp.lockedm != 0 && gp.m.lockedg != 0
590 }
591
592 var (
593
594
595
596
597
598
599 allglock mutex
600 allgs []*g
601
602
603
604
605
606
607
608
609
610
611
612
613
614 allglen uintptr
615 allgptr **g
616 )
617
618 func allgadd(gp *g) {
619 if readgstatus(gp) == _Gidle {
620 throw("allgadd: bad status Gidle")
621 }
622
623 lock(&allglock)
624 allgs = append(allgs, gp)
625 if &allgs[0] != allgptr {
626 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
627 }
628 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
629 unlock(&allglock)
630 }
631
632
633
634
635 func allGsSnapshot() []*g {
636 assertWorldStoppedOrLockHeld(&allglock)
637
638
639
640
641
642
643 return allgs[:len(allgs):len(allgs)]
644 }
645
646
647 func atomicAllG() (**g, uintptr) {
648 length := atomic.Loaduintptr(&allglen)
649 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
650 return ptr, length
651 }
652
653
654 func atomicAllGIndex(ptr **g, i uintptr) *g {
655 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
656 }
657
658
659
660
661 func forEachG(fn func(gp *g)) {
662 lock(&allglock)
663 for _, gp := range allgs {
664 fn(gp)
665 }
666 unlock(&allglock)
667 }
668
669
670
671
672
673 func forEachGRace(fn func(gp *g)) {
674 ptr, length := atomicAllG()
675 for i := uintptr(0); i < length; i++ {
676 gp := atomicAllGIndex(ptr, i)
677 fn(gp)
678 }
679 return
680 }
681
682 const (
683
684
685 _GoidCacheBatch = 16
686 )
687
688
689
690 func cpuinit(env string) {
691 switch GOOS {
692 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
693 cpu.DebugOptions = true
694 }
695 cpu.Initialize(env)
696
697
698
699 switch GOARCH {
700 case "386", "amd64":
701 x86HasPOPCNT = cpu.X86.HasPOPCNT
702 x86HasSSE41 = cpu.X86.HasSSE41
703 x86HasFMA = cpu.X86.HasFMA
704
705 case "arm":
706 armHasVFPv4 = cpu.ARM.HasVFPv4
707
708 case "arm64":
709 arm64HasATOMICS = cpu.ARM64.HasATOMICS
710 }
711 }
712
713
714
715
716 func getGodebugEarly() string {
717 const prefix = "GODEBUG="
718 var env string
719 switch GOOS {
720 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
721
722
723
724 n := int32(0)
725 for argv_index(argv, argc+1+n) != nil {
726 n++
727 }
728
729 for i := int32(0); i < n; i++ {
730 p := argv_index(argv, argc+1+i)
731 s := unsafe.String(p, findnull(p))
732
733 if hasPrefix(s, prefix) {
734 env = gostring(p)[len(prefix):]
735 break
736 }
737 }
738 }
739 return env
740 }
741
742
743
744
745
746
747
748
749
750 func schedinit() {
751 lockInit(&sched.lock, lockRankSched)
752 lockInit(&sched.sysmonlock, lockRankSysmon)
753 lockInit(&sched.deferlock, lockRankDefer)
754 lockInit(&sched.sudoglock, lockRankSudog)
755 lockInit(&deadlock, lockRankDeadlock)
756 lockInit(&paniclk, lockRankPanic)
757 lockInit(&allglock, lockRankAllg)
758 lockInit(&allpLock, lockRankAllp)
759 lockInit(&reflectOffs.lock, lockRankReflectOffs)
760 lockInit(&finlock, lockRankFin)
761 lockInit(&cpuprof.lock, lockRankCpuprof)
762 allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
763 execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
764 traceLockInit()
765
766
767
768 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
769
770
771
772 gp := getg()
773 if raceenabled {
774 gp.racectx, raceprocctx0 = raceinit()
775 }
776
777 sched.maxmcount = 10000
778
779
780 worldStopped()
781
782 ticks.init()
783 moduledataverify()
784 stackinit()
785 mallocinit()
786 godebug := getGodebugEarly()
787 initPageTrace(godebug)
788 cpuinit(godebug)
789 randinit()
790 alginit()
791 mcommoninit(gp.m, -1)
792 modulesinit()
793 typelinksinit()
794 itabsinit()
795 stkobjinit()
796
797 sigsave(&gp.m.sigmask)
798 initSigmask = gp.m.sigmask
799
800 goargs()
801 goenvs()
802 secure()
803 checkfds()
804 parsedebugvars()
805 gcinit()
806
807
808
809 gcrash.stack = stackalloc(16384)
810 gcrash.stackguard0 = gcrash.stack.lo + 1000
811 gcrash.stackguard1 = gcrash.stack.lo + 1000
812
813
814
815
816
817 if disableMemoryProfiling {
818 MemProfileRate = 0
819 }
820
821 lock(&sched.lock)
822 sched.lastpoll.Store(nanotime())
823 procs := ncpu
824 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
825 procs = n
826 }
827 if procresize(procs) != nil {
828 throw("unknown runnable goroutine during bootstrap")
829 }
830 unlock(&sched.lock)
831
832
833 worldStarted()
834
835 if buildVersion == "" {
836
837
838 buildVersion = "unknown"
839 }
840 if len(modinfo) == 1 {
841
842
843 modinfo = ""
844 }
845 }
846
847 func dumpgstatus(gp *g) {
848 thisg := getg()
849 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
850 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
851 }
852
853
854 func checkmcount() {
855 assertLockHeld(&sched.lock)
856
857
858
859
860
861
862
863
864
865 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
866 if count > sched.maxmcount {
867 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
868 throw("thread exhaustion")
869 }
870 }
871
872
873
874
875
876 func mReserveID() int64 {
877 assertLockHeld(&sched.lock)
878
879 if sched.mnext+1 < sched.mnext {
880 throw("runtime: thread ID overflow")
881 }
882 id := sched.mnext
883 sched.mnext++
884 checkmcount()
885 return id
886 }
887
888
889 func mcommoninit(mp *m, id int64) {
890 gp := getg()
891
892
893 if gp != gp.m.g0 {
894 callers(1, mp.createstack[:])
895 }
896
897 lock(&sched.lock)
898
899 if id >= 0 {
900 mp.id = id
901 } else {
902 mp.id = mReserveID()
903 }
904
905 mrandinit(mp)
906
907 mpreinit(mp)
908 if mp.gsignal != nil {
909 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
910 }
911
912
913
914 mp.alllink = allm
915
916
917
918 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
919 unlock(&sched.lock)
920
921
922 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
923 mp.cgoCallers = new(cgoCallers)
924 }
925 }
926
927 func (mp *m) becomeSpinning() {
928 mp.spinning = true
929 sched.nmspinning.Add(1)
930 sched.needspinning.Store(0)
931 }
932
933 func (mp *m) hasCgoOnStack() bool {
934 return mp.ncgo > 0 || mp.isextra
935 }
936
937 const (
938
939
940 osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
941
942
943
944 osHasLowResClockInt = goos.IsWindows
945
946
947
948 osHasLowResClock = osHasLowResClockInt > 0
949 )
950
951
952 func ready(gp *g, traceskip int, next bool) {
953 status := readgstatus(gp)
954
955
956 mp := acquirem()
957 if status&^_Gscan != _Gwaiting {
958 dumpgstatus(gp)
959 throw("bad g->status in ready")
960 }
961
962
963 trace := traceAcquire()
964 casgstatus(gp, _Gwaiting, _Grunnable)
965 if trace.ok() {
966 trace.GoUnpark(gp, traceskip)
967 traceRelease(trace)
968 }
969 runqput(mp.p.ptr(), gp, next)
970 wakep()
971 releasem(mp)
972 }
973
974
975
976 const freezeStopWait = 0x7fffffff
977
978
979
980 var freezing atomic.Bool
981
982
983
984
985 func freezetheworld() {
986 freezing.Store(true)
987 if debug.dontfreezetheworld > 0 {
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012 usleep(1000)
1013 return
1014 }
1015
1016
1017
1018
1019 for i := 0; i < 5; i++ {
1020
1021 sched.stopwait = freezeStopWait
1022 sched.gcwaiting.Store(true)
1023
1024 if !preemptall() {
1025 break
1026 }
1027 usleep(1000)
1028 }
1029
1030 usleep(1000)
1031 preemptall()
1032 usleep(1000)
1033 }
1034
1035
1036
1037
1038
1039 func readgstatus(gp *g) uint32 {
1040 return gp.atomicstatus.Load()
1041 }
1042
1043
1044
1045
1046
1047 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1048 success := false
1049
1050
1051 switch oldval {
1052 default:
1053 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1054 dumpgstatus(gp)
1055 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1056 case _Gscanrunnable,
1057 _Gscanwaiting,
1058 _Gscanrunning,
1059 _Gscansyscall,
1060 _Gscanpreempted:
1061 if newval == oldval&^_Gscan {
1062 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1063 }
1064 }
1065 if !success {
1066 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1067 dumpgstatus(gp)
1068 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1069 }
1070 releaseLockRank(lockRankGscan)
1071 }
1072
1073
1074
1075 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1076 switch oldval {
1077 case _Grunnable,
1078 _Grunning,
1079 _Gwaiting,
1080 _Gsyscall:
1081 if newval == oldval|_Gscan {
1082 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1083 if r {
1084 acquireLockRank(lockRankGscan)
1085 }
1086 return r
1087
1088 }
1089 }
1090 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1091 throw("castogscanstatus")
1092 panic("not reached")
1093 }
1094
1095
1096
1097 var casgstatusAlwaysTrack = false
1098
1099
1100
1101
1102
1103
1104
1105 func casgstatus(gp *g, oldval, newval uint32) {
1106 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1107 systemstack(func() {
1108 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1109 throw("casgstatus: bad incoming values")
1110 })
1111 }
1112
1113 acquireLockRank(lockRankGscan)
1114 releaseLockRank(lockRankGscan)
1115
1116
1117 const yieldDelay = 5 * 1000
1118 var nextYield int64
1119
1120
1121
1122 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1123 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1124 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1125 }
1126 if i == 0 {
1127 nextYield = nanotime() + yieldDelay
1128 }
1129 if nanotime() < nextYield {
1130 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1131 procyield(1)
1132 }
1133 } else {
1134 osyield()
1135 nextYield = nanotime() + yieldDelay/2
1136 }
1137 }
1138
1139 if oldval == _Grunning {
1140
1141 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1142 gp.tracking = true
1143 }
1144 gp.trackingSeq++
1145 }
1146 if !gp.tracking {
1147 return
1148 }
1149
1150
1151
1152
1153
1154
1155 switch oldval {
1156 case _Grunnable:
1157
1158
1159
1160 now := nanotime()
1161 gp.runnableTime += now - gp.trackingStamp
1162 gp.trackingStamp = 0
1163 case _Gwaiting:
1164 if !gp.waitreason.isMutexWait() {
1165
1166 break
1167 }
1168
1169
1170
1171
1172
1173 now := nanotime()
1174 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1175 gp.trackingStamp = 0
1176 }
1177 switch newval {
1178 case _Gwaiting:
1179 if !gp.waitreason.isMutexWait() {
1180
1181 break
1182 }
1183
1184 now := nanotime()
1185 gp.trackingStamp = now
1186 case _Grunnable:
1187
1188
1189 now := nanotime()
1190 gp.trackingStamp = now
1191 case _Grunning:
1192
1193
1194
1195 gp.tracking = false
1196 sched.timeToRun.record(gp.runnableTime)
1197 gp.runnableTime = 0
1198 }
1199 }
1200
1201
1202
1203
1204 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1205
1206 gp.waitreason = reason
1207 casgstatus(gp, old, _Gwaiting)
1208 }
1209
1210
1211
1212
1213
1214
1215
1216
1217 func casgcopystack(gp *g) uint32 {
1218 for {
1219 oldstatus := readgstatus(gp) &^ _Gscan
1220 if oldstatus != _Gwaiting && oldstatus != _Grunnable {
1221 throw("copystack: bad status, not Gwaiting or Grunnable")
1222 }
1223 if gp.atomicstatus.CompareAndSwap(oldstatus, _Gcopystack) {
1224 return oldstatus
1225 }
1226 }
1227 }
1228
1229
1230
1231
1232
1233 func casGToPreemptScan(gp *g, old, new uint32) {
1234 if old != _Grunning || new != _Gscan|_Gpreempted {
1235 throw("bad g transition")
1236 }
1237 acquireLockRank(lockRankGscan)
1238 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1239 }
1240 }
1241
1242
1243
1244
1245 func casGFromPreempted(gp *g, old, new uint32) bool {
1246 if old != _Gpreempted || new != _Gwaiting {
1247 throw("bad g transition")
1248 }
1249 gp.waitreason = waitReasonPreempted
1250 return gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting)
1251 }
1252
1253
1254 type stwReason uint8
1255
1256
1257
1258
1259 const (
1260 stwUnknown stwReason = iota
1261 stwGCMarkTerm
1262 stwGCSweepTerm
1263 stwWriteHeapDump
1264 stwGoroutineProfile
1265 stwGoroutineProfileCleanup
1266 stwAllGoroutinesStack
1267 stwReadMemStats
1268 stwAllThreadsSyscall
1269 stwGOMAXPROCS
1270 stwStartTrace
1271 stwStopTrace
1272 stwForTestCountPagesInUse
1273 stwForTestReadMetricsSlow
1274 stwForTestReadMemStatsSlow
1275 stwForTestPageCachePagesLeaked
1276 stwForTestResetDebugLog
1277 )
1278
1279 func (r stwReason) String() string {
1280 return stwReasonStrings[r]
1281 }
1282
1283 func (r stwReason) isGC() bool {
1284 return r == stwGCMarkTerm || r == stwGCSweepTerm
1285 }
1286
1287
1288
1289
1290 var stwReasonStrings = [...]string{
1291 stwUnknown: "unknown",
1292 stwGCMarkTerm: "GC mark termination",
1293 stwGCSweepTerm: "GC sweep termination",
1294 stwWriteHeapDump: "write heap dump",
1295 stwGoroutineProfile: "goroutine profile",
1296 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1297 stwAllGoroutinesStack: "all goroutines stack trace",
1298 stwReadMemStats: "read mem stats",
1299 stwAllThreadsSyscall: "AllThreadsSyscall",
1300 stwGOMAXPROCS: "GOMAXPROCS",
1301 stwStartTrace: "start trace",
1302 stwStopTrace: "stop trace",
1303 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1304 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1305 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1306 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1307 stwForTestResetDebugLog: "ResetDebugLog (test)",
1308 }
1309
1310
1311
1312 type worldStop struct {
1313 reason stwReason
1314 start int64
1315 }
1316
1317
1318
1319
1320 var stopTheWorldContext worldStop
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339 func stopTheWorld(reason stwReason) worldStop {
1340 semacquire(&worldsema)
1341 gp := getg()
1342 gp.m.preemptoff = reason.String()
1343 systemstack(func() {
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358 casGToWaiting(gp, _Grunning, waitReasonStoppingTheWorld)
1359 stopTheWorldContext = stopTheWorldWithSema(reason)
1360 casgstatus(gp, _Gwaiting, _Grunning)
1361 })
1362 return stopTheWorldContext
1363 }
1364
1365
1366
1367
1368 func startTheWorld(w worldStop) {
1369 systemstack(func() { startTheWorldWithSema(0, w) })
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386 mp := acquirem()
1387 mp.preemptoff = ""
1388 semrelease1(&worldsema, true, 0)
1389 releasem(mp)
1390 }
1391
1392
1393
1394
1395 func stopTheWorldGC(reason stwReason) worldStop {
1396 semacquire(&gcsema)
1397 return stopTheWorld(reason)
1398 }
1399
1400
1401
1402
1403 func startTheWorldGC(w worldStop) {
1404 startTheWorld(w)
1405 semrelease(&gcsema)
1406 }
1407
1408
1409 var worldsema uint32 = 1
1410
1411
1412
1413
1414
1415
1416
1417 var gcsema uint32 = 1
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449 func stopTheWorldWithSema(reason stwReason) worldStop {
1450 trace := traceAcquire()
1451 if trace.ok() {
1452 trace.STWStart(reason)
1453 traceRelease(trace)
1454 }
1455 gp := getg()
1456
1457
1458
1459 if gp.m.locks > 0 {
1460 throw("stopTheWorld: holding locks")
1461 }
1462
1463 lock(&sched.lock)
1464 start := nanotime()
1465 sched.stopwait = gomaxprocs
1466 sched.gcwaiting.Store(true)
1467 preemptall()
1468
1469 gp.m.p.ptr().status = _Pgcstop
1470 sched.stopwait--
1471
1472 trace = traceAcquire()
1473 for _, pp := range allp {
1474 s := pp.status
1475 if s == _Psyscall && atomic.Cas(&pp.status, s, _Pgcstop) {
1476 if trace.ok() {
1477 trace.GoSysBlock(pp)
1478 trace.ProcSteal(pp, false)
1479 }
1480 pp.syscalltick++
1481 sched.stopwait--
1482 }
1483 }
1484 if trace.ok() {
1485 traceRelease(trace)
1486 }
1487
1488
1489 now := nanotime()
1490 for {
1491 pp, _ := pidleget(now)
1492 if pp == nil {
1493 break
1494 }
1495 pp.status = _Pgcstop
1496 sched.stopwait--
1497 }
1498 wait := sched.stopwait > 0
1499 unlock(&sched.lock)
1500
1501
1502 if wait {
1503 for {
1504
1505 if notetsleep(&sched.stopnote, 100*1000) {
1506 noteclear(&sched.stopnote)
1507 break
1508 }
1509 preemptall()
1510 }
1511 }
1512
1513 startTime := nanotime() - start
1514 if reason.isGC() {
1515 sched.stwStoppingTimeGC.record(startTime)
1516 } else {
1517 sched.stwStoppingTimeOther.record(startTime)
1518 }
1519
1520
1521 bad := ""
1522 if sched.stopwait != 0 {
1523 bad = "stopTheWorld: not stopped (stopwait != 0)"
1524 } else {
1525 for _, pp := range allp {
1526 if pp.status != _Pgcstop {
1527 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1528 }
1529 }
1530 }
1531 if freezing.Load() {
1532
1533
1534
1535
1536 lock(&deadlock)
1537 lock(&deadlock)
1538 }
1539 if bad != "" {
1540 throw(bad)
1541 }
1542
1543 worldStopped()
1544
1545 return worldStop{reason: reason, start: start}
1546 }
1547
1548
1549
1550
1551
1552
1553
1554 func startTheWorldWithSema(now int64, w worldStop) int64 {
1555 assertWorldStopped()
1556
1557 mp := acquirem()
1558 if netpollinited() {
1559 list, delta := netpoll(0)
1560 injectglist(&list)
1561 netpollAdjustWaiters(delta)
1562 }
1563 lock(&sched.lock)
1564
1565 procs := gomaxprocs
1566 if newprocs != 0 {
1567 procs = newprocs
1568 newprocs = 0
1569 }
1570 p1 := procresize(procs)
1571 sched.gcwaiting.Store(false)
1572 if sched.sysmonwait.Load() {
1573 sched.sysmonwait.Store(false)
1574 notewakeup(&sched.sysmonnote)
1575 }
1576 unlock(&sched.lock)
1577
1578 worldStarted()
1579
1580 for p1 != nil {
1581 p := p1
1582 p1 = p1.link.ptr()
1583 if p.m != 0 {
1584 mp := p.m.ptr()
1585 p.m = 0
1586 if mp.nextp != 0 {
1587 throw("startTheWorld: inconsistent mp->nextp")
1588 }
1589 mp.nextp.set(p)
1590 notewakeup(&mp.park)
1591 } else {
1592
1593 newm(nil, p, -1)
1594 }
1595 }
1596
1597
1598 if now == 0 {
1599 now = nanotime()
1600 }
1601 totalTime := now - w.start
1602 if w.reason.isGC() {
1603 sched.stwTotalTimeGC.record(totalTime)
1604 } else {
1605 sched.stwTotalTimeOther.record(totalTime)
1606 }
1607 trace := traceAcquire()
1608 if trace.ok() {
1609 trace.STWDone()
1610 traceRelease(trace)
1611 }
1612
1613
1614
1615
1616 wakep()
1617
1618 releasem(mp)
1619
1620 return now
1621 }
1622
1623
1624
1625 func usesLibcall() bool {
1626 switch GOOS {
1627 case "aix", "darwin", "illumos", "ios", "solaris", "windows":
1628 return true
1629 case "openbsd":
1630 return GOARCH != "mips64"
1631 }
1632 return false
1633 }
1634
1635
1636
1637 func mStackIsSystemAllocated() bool {
1638 switch GOOS {
1639 case "aix", "darwin", "plan9", "illumos", "ios", "solaris", "windows":
1640 return true
1641 case "openbsd":
1642 return GOARCH != "mips64"
1643 }
1644 return false
1645 }
1646
1647
1648
1649 func mstart()
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660 func mstart0() {
1661 gp := getg()
1662
1663 osStack := gp.stack.lo == 0
1664 if osStack {
1665
1666
1667
1668
1669
1670
1671
1672
1673 size := gp.stack.hi
1674 if size == 0 {
1675 size = 16384 * sys.StackGuardMultiplier
1676 }
1677 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1678 gp.stack.lo = gp.stack.hi - size + 1024
1679 }
1680
1681
1682 gp.stackguard0 = gp.stack.lo + stackGuard
1683
1684
1685 gp.stackguard1 = gp.stackguard0
1686 mstart1()
1687
1688
1689 if mStackIsSystemAllocated() {
1690
1691
1692
1693 osStack = true
1694 }
1695 mexit(osStack)
1696 }
1697
1698
1699
1700
1701
1702 func mstart1() {
1703 gp := getg()
1704
1705 if gp != gp.m.g0 {
1706 throw("bad runtime·mstart")
1707 }
1708
1709
1710
1711
1712
1713
1714
1715 gp.sched.g = guintptr(unsafe.Pointer(gp))
1716 gp.sched.pc = getcallerpc()
1717 gp.sched.sp = getcallersp()
1718
1719 asminit()
1720 minit()
1721
1722
1723
1724 if gp.m == &m0 {
1725 mstartm0()
1726 }
1727
1728 if fn := gp.m.mstartfn; fn != nil {
1729 fn()
1730 }
1731
1732 if gp.m != &m0 {
1733 acquirep(gp.m.nextp.ptr())
1734 gp.m.nextp = 0
1735 }
1736 schedule()
1737 }
1738
1739
1740
1741
1742
1743
1744
1745 func mstartm0() {
1746
1747
1748
1749 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1750 cgoHasExtraM = true
1751 newextram()
1752 }
1753 initsig(false)
1754 }
1755
1756
1757
1758
1759 func mPark() {
1760 gp := getg()
1761 notesleep(&gp.m.park)
1762 noteclear(&gp.m.park)
1763 }
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775 func mexit(osStack bool) {
1776 mp := getg().m
1777
1778 if mp == &m0 {
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790 handoffp(releasep())
1791 lock(&sched.lock)
1792 sched.nmfreed++
1793 checkdead()
1794 unlock(&sched.lock)
1795 mPark()
1796 throw("locked m0 woke up")
1797 }
1798
1799 sigblock(true)
1800 unminit()
1801
1802
1803 if mp.gsignal != nil {
1804 stackfree(mp.gsignal.stack)
1805
1806
1807
1808
1809 mp.gsignal = nil
1810 }
1811
1812
1813 lock(&sched.lock)
1814 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
1815 if *pprev == mp {
1816 *pprev = mp.alllink
1817 goto found
1818 }
1819 }
1820 throw("m not found in allm")
1821 found:
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836 mp.freeWait.Store(freeMWait)
1837 mp.freelink = sched.freem
1838 sched.freem = mp
1839 unlock(&sched.lock)
1840
1841 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
1842 sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
1843
1844
1845 handoffp(releasep())
1846
1847
1848
1849
1850
1851 lock(&sched.lock)
1852 sched.nmfreed++
1853 checkdead()
1854 unlock(&sched.lock)
1855
1856 if GOOS == "darwin" || GOOS == "ios" {
1857
1858
1859 if mp.signalPending.Load() != 0 {
1860 pendingPreemptSignals.Add(-1)
1861 }
1862 }
1863
1864
1865
1866 mdestroy(mp)
1867
1868 if osStack {
1869
1870 mp.freeWait.Store(freeMRef)
1871
1872
1873
1874 return
1875 }
1876
1877
1878
1879
1880
1881 exitThread(&mp.freeWait)
1882 }
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894 func forEachP(reason waitReason, fn func(*p)) {
1895 systemstack(func() {
1896 gp := getg().m.curg
1897
1898
1899
1900
1901
1902
1903
1904
1905 casGToWaiting(gp, _Grunning, reason)
1906 forEachPInternal(fn)
1907 casgstatus(gp, _Gwaiting, _Grunning)
1908 })
1909 }
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920 func forEachPInternal(fn func(*p)) {
1921 mp := acquirem()
1922 pp := getg().m.p.ptr()
1923
1924 lock(&sched.lock)
1925 if sched.safePointWait != 0 {
1926 throw("forEachP: sched.safePointWait != 0")
1927 }
1928 sched.safePointWait = gomaxprocs - 1
1929 sched.safePointFn = fn
1930
1931
1932 for _, p2 := range allp {
1933 if p2 != pp {
1934 atomic.Store(&p2.runSafePointFn, 1)
1935 }
1936 }
1937 preemptall()
1938
1939
1940
1941
1942
1943
1944
1945 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
1946 if atomic.Cas(&p.runSafePointFn, 1, 0) {
1947 fn(p)
1948 sched.safePointWait--
1949 }
1950 }
1951
1952 wait := sched.safePointWait > 0
1953 unlock(&sched.lock)
1954
1955
1956 fn(pp)
1957
1958
1959
1960 for _, p2 := range allp {
1961 s := p2.status
1962
1963
1964
1965 trace := traceAcquire()
1966 if s == _Psyscall && p2.runSafePointFn == 1 && atomic.Cas(&p2.status, s, _Pidle) {
1967 if trace.ok() {
1968
1969 trace.GoSysBlock(p2)
1970 trace.ProcSteal(p2, false)
1971 traceRelease(trace)
1972 }
1973 p2.syscalltick++
1974 handoffp(p2)
1975 } else if trace.ok() {
1976 traceRelease(trace)
1977 }
1978 }
1979
1980
1981 if wait {
1982 for {
1983
1984
1985
1986
1987 if notetsleep(&sched.safePointNote, 100*1000) {
1988 noteclear(&sched.safePointNote)
1989 break
1990 }
1991 preemptall()
1992 }
1993 }
1994 if sched.safePointWait != 0 {
1995 throw("forEachP: not done")
1996 }
1997 for _, p2 := range allp {
1998 if p2.runSafePointFn != 0 {
1999 throw("forEachP: P did not run fn")
2000 }
2001 }
2002
2003 lock(&sched.lock)
2004 sched.safePointFn = nil
2005 unlock(&sched.lock)
2006 releasem(mp)
2007 }
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020 func runSafePointFn() {
2021 p := getg().m.p.ptr()
2022
2023
2024
2025 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
2026 return
2027 }
2028 sched.safePointFn(p)
2029 lock(&sched.lock)
2030 sched.safePointWait--
2031 if sched.safePointWait == 0 {
2032 notewakeup(&sched.safePointNote)
2033 }
2034 unlock(&sched.lock)
2035 }
2036
2037
2038
2039
2040 var cgoThreadStart unsafe.Pointer
2041
2042 type cgothreadstart struct {
2043 g guintptr
2044 tls *uint64
2045 fn unsafe.Pointer
2046 }
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057 func allocm(pp *p, fn func(), id int64) *m {
2058 allocmLock.rlock()
2059
2060
2061
2062
2063 acquirem()
2064
2065 gp := getg()
2066 if gp.m.p == 0 {
2067 acquirep(pp)
2068 }
2069
2070
2071
2072 if sched.freem != nil {
2073 lock(&sched.lock)
2074 var newList *m
2075 for freem := sched.freem; freem != nil; {
2076
2077 wait := freem.freeWait.Load()
2078 if wait == freeMWait {
2079 next := freem.freelink
2080 freem.freelink = newList
2081 newList = freem
2082 freem = next
2083 continue
2084 }
2085
2086
2087
2088 if traceEnabled() || traceShuttingDown() {
2089 traceThreadDestroy(freem)
2090 }
2091
2092
2093
2094 if wait == freeMStack {
2095
2096
2097
2098 systemstack(func() {
2099 stackfree(freem.g0.stack)
2100 })
2101 }
2102 freem = freem.freelink
2103 }
2104 sched.freem = newList
2105 unlock(&sched.lock)
2106 }
2107
2108 mp := new(m)
2109 mp.mstartfn = fn
2110 mcommoninit(mp, id)
2111
2112
2113
2114 if iscgo || mStackIsSystemAllocated() {
2115 mp.g0 = malg(-1)
2116 } else {
2117 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
2118 }
2119 mp.g0.m = mp
2120
2121 if pp == gp.m.p.ptr() {
2122 releasep()
2123 }
2124
2125 releasem(gp.m)
2126 allocmLock.runlock()
2127 return mp
2128 }
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169 func needm(signal bool) {
2170 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
2171
2172
2173
2174
2175
2176
2177 writeErrStr("fatal error: cgo callback before cgo call\n")
2178 exit(1)
2179 }
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189 var sigmask sigset
2190 sigsave(&sigmask)
2191 sigblock(false)
2192
2193
2194
2195
2196 mp, last := getExtraM()
2197
2198
2199
2200
2201
2202
2203
2204
2205 mp.needextram = last
2206
2207
2208 mp.sigmask = sigmask
2209
2210
2211
2212 osSetupTLS(mp)
2213
2214
2215
2216 setg(mp.g0)
2217 sp := getcallersp()
2218 callbackUpdateSystemStack(mp, sp, signal)
2219
2220
2221
2222
2223 mp.isExtraInC = false
2224
2225
2226 asminit()
2227 minit()
2228
2229
2230
2231
2232
2233
2234 var trace traceLocker
2235 if goexperiment.ExecTracer2 && !signal {
2236 trace = traceAcquire()
2237 }
2238
2239
2240 casgstatus(mp.curg, _Gdead, _Gsyscall)
2241 sched.ngsys.Add(-1)
2242
2243 if goexperiment.ExecTracer2 && !signal {
2244 if trace.ok() {
2245 trace.GoCreateSyscall(mp.curg)
2246 traceRelease(trace)
2247 }
2248 }
2249 mp.isExtraInSig = signal
2250 }
2251
2252
2253
2254
2255 func needAndBindM() {
2256 needm(false)
2257
2258 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2259 cgoBindM()
2260 }
2261 }
2262
2263
2264
2265
2266 func newextram() {
2267 c := extraMWaiters.Swap(0)
2268 if c > 0 {
2269 for i := uint32(0); i < c; i++ {
2270 oneNewExtraM()
2271 }
2272 } else if extraMLength.Load() == 0 {
2273
2274 oneNewExtraM()
2275 }
2276 }
2277
2278
2279 func oneNewExtraM() {
2280
2281
2282
2283
2284
2285 mp := allocm(nil, nil, -1)
2286 gp := malg(4096)
2287 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2288 gp.sched.sp = gp.stack.hi
2289 gp.sched.sp -= 4 * goarch.PtrSize
2290 gp.sched.lr = 0
2291 gp.sched.g = guintptr(unsafe.Pointer(gp))
2292 gp.syscallpc = gp.sched.pc
2293 gp.syscallsp = gp.sched.sp
2294 gp.stktopsp = gp.sched.sp
2295
2296
2297
2298
2299 casgstatus(gp, _Gidle, _Gdead)
2300 gp.m = mp
2301 mp.curg = gp
2302 mp.isextra = true
2303
2304 mp.isExtraInC = true
2305 mp.lockedInt++
2306 mp.lockedg.set(gp)
2307 gp.lockedm.set(mp)
2308 gp.goid = sched.goidgen.Add(1)
2309 if raceenabled {
2310 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2311 }
2312 trace := traceAcquire()
2313 if trace.ok() {
2314 trace.OneNewExtraM(gp)
2315 traceRelease(trace)
2316 }
2317
2318 allgadd(gp)
2319
2320
2321
2322
2323
2324 sched.ngsys.Add(1)
2325
2326
2327 addExtraM(mp)
2328 }
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363 func dropm() {
2364
2365
2366
2367 mp := getg().m
2368
2369
2370
2371
2372
2373
2374 var trace traceLocker
2375 if goexperiment.ExecTracer2 && !mp.isExtraInSig {
2376 trace = traceAcquire()
2377 }
2378
2379
2380 casgstatus(mp.curg, _Gsyscall, _Gdead)
2381 mp.curg.preemptStop = false
2382 sched.ngsys.Add(1)
2383
2384 if goexperiment.ExecTracer2 && !mp.isExtraInSig {
2385 if trace.ok() {
2386 trace.GoDestroySyscall()
2387 traceRelease(trace)
2388 }
2389 }
2390
2391 if goexperiment.ExecTracer2 {
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405 mp.syscalltick--
2406 }
2407
2408
2409
2410 mp.curg.trace.reset()
2411
2412
2413
2414
2415
2416
2417
2418 if goexperiment.ExecTracer2 && (traceEnabled() || traceShuttingDown()) {
2419
2420
2421
2422
2423
2424
2425
2426 lock(&sched.lock)
2427 traceThreadDestroy(mp)
2428 unlock(&sched.lock)
2429 }
2430 mp.isExtraInSig = false
2431
2432
2433
2434
2435
2436 sigmask := mp.sigmask
2437 sigblock(false)
2438 unminit()
2439
2440 setg(nil)
2441
2442
2443
2444 g0 := mp.g0
2445 g0.stack.hi = 0
2446 g0.stack.lo = 0
2447 g0.stackguard0 = 0
2448 g0.stackguard1 = 0
2449
2450 putExtraM(mp)
2451
2452 msigrestore(sigmask)
2453 }
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475 func cgoBindM() {
2476 if GOOS == "windows" || GOOS == "plan9" {
2477 fatal("bindm in unexpected GOOS")
2478 }
2479 g := getg()
2480 if g.m.g0 != g {
2481 fatal("the current g is not g0")
2482 }
2483 if _cgo_bindm != nil {
2484 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2485 }
2486 }
2487
2488
2489 func getm() uintptr {
2490 return uintptr(unsafe.Pointer(getg().m))
2491 }
2492
2493 var (
2494
2495
2496
2497
2498
2499
2500 extraM atomic.Uintptr
2501
2502 extraMLength atomic.Uint32
2503
2504 extraMWaiters atomic.Uint32
2505
2506
2507 extraMInUse atomic.Uint32
2508 )
2509
2510
2511
2512
2513
2514
2515
2516
2517 func lockextra(nilokay bool) *m {
2518 const locked = 1
2519
2520 incr := false
2521 for {
2522 old := extraM.Load()
2523 if old == locked {
2524 osyield_no_g()
2525 continue
2526 }
2527 if old == 0 && !nilokay {
2528 if !incr {
2529
2530
2531
2532 extraMWaiters.Add(1)
2533 incr = true
2534 }
2535 usleep_no_g(1)
2536 continue
2537 }
2538 if extraM.CompareAndSwap(old, locked) {
2539 return (*m)(unsafe.Pointer(old))
2540 }
2541 osyield_no_g()
2542 continue
2543 }
2544 }
2545
2546
2547 func unlockextra(mp *m, delta int32) {
2548 extraMLength.Add(delta)
2549 extraM.Store(uintptr(unsafe.Pointer(mp)))
2550 }
2551
2552
2553
2554
2555
2556
2557
2558
2559 func getExtraM() (mp *m, last bool) {
2560 mp = lockextra(false)
2561 extraMInUse.Add(1)
2562 unlockextra(mp.schedlink.ptr(), -1)
2563 return mp, mp.schedlink.ptr() == nil
2564 }
2565
2566
2567
2568
2569
2570 func putExtraM(mp *m) {
2571 extraMInUse.Add(-1)
2572 addExtraM(mp)
2573 }
2574
2575
2576
2577
2578 func addExtraM(mp *m) {
2579 mnext := lockextra(true)
2580 mp.schedlink.set(mnext)
2581 unlockextra(mp, 1)
2582 }
2583
2584 var (
2585
2586
2587
2588 allocmLock rwmutex
2589
2590
2591
2592
2593 execLock rwmutex
2594 )
2595
2596
2597
2598 const (
2599 failthreadcreate = "runtime: failed to create new OS thread\n"
2600 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2601 )
2602
2603
2604
2605
2606 var newmHandoff struct {
2607 lock mutex
2608
2609
2610
2611 newm muintptr
2612
2613
2614
2615 waiting bool
2616 wake note
2617
2618
2619
2620
2621 haveTemplateThread uint32
2622 }
2623
2624
2625
2626
2627
2628
2629
2630
2631 func newm(fn func(), pp *p, id int64) {
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642 acquirem()
2643
2644 mp := allocm(pp, fn, id)
2645 mp.nextp.set(pp)
2646 mp.sigmask = initSigmask
2647 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659 lock(&newmHandoff.lock)
2660 if newmHandoff.haveTemplateThread == 0 {
2661 throw("on a locked thread with no template thread")
2662 }
2663 mp.schedlink = newmHandoff.newm
2664 newmHandoff.newm.set(mp)
2665 if newmHandoff.waiting {
2666 newmHandoff.waiting = false
2667 notewakeup(&newmHandoff.wake)
2668 }
2669 unlock(&newmHandoff.lock)
2670
2671
2672
2673 releasem(getg().m)
2674 return
2675 }
2676 newm1(mp)
2677 releasem(getg().m)
2678 }
2679
2680 func newm1(mp *m) {
2681 if iscgo {
2682 var ts cgothreadstart
2683 if _cgo_thread_start == nil {
2684 throw("_cgo_thread_start missing")
2685 }
2686 ts.g.set(mp.g0)
2687 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2688 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2689 if msanenabled {
2690 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2691 }
2692 if asanenabled {
2693 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2694 }
2695 execLock.rlock()
2696 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2697 execLock.runlock()
2698 return
2699 }
2700 execLock.rlock()
2701 newosproc(mp)
2702 execLock.runlock()
2703 }
2704
2705
2706
2707
2708
2709 func startTemplateThread() {
2710 if GOARCH == "wasm" {
2711 return
2712 }
2713
2714
2715
2716 mp := acquirem()
2717 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2718 releasem(mp)
2719 return
2720 }
2721 newm(templateThread, nil, -1)
2722 releasem(mp)
2723 }
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737 func templateThread() {
2738 lock(&sched.lock)
2739 sched.nmsys++
2740 checkdead()
2741 unlock(&sched.lock)
2742
2743 for {
2744 lock(&newmHandoff.lock)
2745 for newmHandoff.newm != 0 {
2746 newm := newmHandoff.newm.ptr()
2747 newmHandoff.newm = 0
2748 unlock(&newmHandoff.lock)
2749 for newm != nil {
2750 next := newm.schedlink.ptr()
2751 newm.schedlink = 0
2752 newm1(newm)
2753 newm = next
2754 }
2755 lock(&newmHandoff.lock)
2756 }
2757 newmHandoff.waiting = true
2758 noteclear(&newmHandoff.wake)
2759 unlock(&newmHandoff.lock)
2760 notesleep(&newmHandoff.wake)
2761 }
2762 }
2763
2764
2765
2766 func stopm() {
2767 gp := getg()
2768
2769 if gp.m.locks != 0 {
2770 throw("stopm holding locks")
2771 }
2772 if gp.m.p != 0 {
2773 throw("stopm holding p")
2774 }
2775 if gp.m.spinning {
2776 throw("stopm spinning")
2777 }
2778
2779 lock(&sched.lock)
2780 mput(gp.m)
2781 unlock(&sched.lock)
2782 mPark()
2783 acquirep(gp.m.nextp.ptr())
2784 gp.m.nextp = 0
2785 }
2786
2787 func mspinning() {
2788
2789 getg().m.spinning = true
2790 }
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809 func startm(pp *p, spinning, lockheld bool) {
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826 mp := acquirem()
2827 if !lockheld {
2828 lock(&sched.lock)
2829 }
2830 if pp == nil {
2831 if spinning {
2832
2833
2834
2835 throw("startm: P required for spinning=true")
2836 }
2837 pp, _ = pidleget(0)
2838 if pp == nil {
2839 if !lockheld {
2840 unlock(&sched.lock)
2841 }
2842 releasem(mp)
2843 return
2844 }
2845 }
2846 nmp := mget()
2847 if nmp == nil {
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862 id := mReserveID()
2863 unlock(&sched.lock)
2864
2865 var fn func()
2866 if spinning {
2867
2868 fn = mspinning
2869 }
2870 newm(fn, pp, id)
2871
2872 if lockheld {
2873 lock(&sched.lock)
2874 }
2875
2876
2877 releasem(mp)
2878 return
2879 }
2880 if !lockheld {
2881 unlock(&sched.lock)
2882 }
2883 if nmp.spinning {
2884 throw("startm: m is spinning")
2885 }
2886 if nmp.nextp != 0 {
2887 throw("startm: m has p")
2888 }
2889 if spinning && !runqempty(pp) {
2890 throw("startm: p has runnable gs")
2891 }
2892
2893 nmp.spinning = spinning
2894 nmp.nextp.set(pp)
2895 notewakeup(&nmp.park)
2896
2897
2898 releasem(mp)
2899 }
2900
2901
2902
2903
2904
2905 func handoffp(pp *p) {
2906
2907
2908
2909
2910 if !runqempty(pp) || sched.runqsize != 0 {
2911 startm(pp, false, false)
2912 return
2913 }
2914
2915 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
2916 startm(pp, false, false)
2917 return
2918 }
2919
2920 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) {
2921 startm(pp, false, false)
2922 return
2923 }
2924
2925
2926 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
2927 sched.needspinning.Store(0)
2928 startm(pp, true, false)
2929 return
2930 }
2931 lock(&sched.lock)
2932 if sched.gcwaiting.Load() {
2933 pp.status = _Pgcstop
2934 sched.stopwait--
2935 if sched.stopwait == 0 {
2936 notewakeup(&sched.stopnote)
2937 }
2938 unlock(&sched.lock)
2939 return
2940 }
2941 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
2942 sched.safePointFn(pp)
2943 sched.safePointWait--
2944 if sched.safePointWait == 0 {
2945 notewakeup(&sched.safePointNote)
2946 }
2947 }
2948 if sched.runqsize != 0 {
2949 unlock(&sched.lock)
2950 startm(pp, false, false)
2951 return
2952 }
2953
2954
2955 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
2956 unlock(&sched.lock)
2957 startm(pp, false, false)
2958 return
2959 }
2960
2961
2962
2963 when := nobarrierWakeTime(pp)
2964 pidleput(pp, 0)
2965 unlock(&sched.lock)
2966
2967 if when != 0 {
2968 wakeNetPoller(when)
2969 }
2970 }
2971
2972
2973
2974
2975 func wakep() {
2976
2977
2978 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
2979 return
2980 }
2981
2982
2983
2984
2985
2986
2987 mp := acquirem()
2988
2989 var pp *p
2990 lock(&sched.lock)
2991 pp, _ = pidlegetSpinning(0)
2992 if pp == nil {
2993 if sched.nmspinning.Add(-1) < 0 {
2994 throw("wakep: negative nmspinning")
2995 }
2996 unlock(&sched.lock)
2997 releasem(mp)
2998 return
2999 }
3000
3001
3002
3003
3004 unlock(&sched.lock)
3005
3006 startm(pp, true, false)
3007
3008 releasem(mp)
3009 }
3010
3011
3012
3013 func stoplockedm() {
3014 gp := getg()
3015
3016 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3017 throw("stoplockedm: inconsistent locking")
3018 }
3019 if gp.m.p != 0 {
3020
3021 pp := releasep()
3022 handoffp(pp)
3023 }
3024 incidlelocked(1)
3025
3026 mPark()
3027 status := readgstatus(gp.m.lockedg.ptr())
3028 if status&^_Gscan != _Grunnable {
3029 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
3030 dumpgstatus(gp.m.lockedg.ptr())
3031 throw("stoplockedm: not runnable")
3032 }
3033 acquirep(gp.m.nextp.ptr())
3034 gp.m.nextp = 0
3035 }
3036
3037
3038
3039
3040
3041 func startlockedm(gp *g) {
3042 mp := gp.lockedm.ptr()
3043 if mp == getg().m {
3044 throw("startlockedm: locked to me")
3045 }
3046 if mp.nextp != 0 {
3047 throw("startlockedm: m has p")
3048 }
3049
3050 incidlelocked(-1)
3051 pp := releasep()
3052 mp.nextp.set(pp)
3053 notewakeup(&mp.park)
3054 stopm()
3055 }
3056
3057
3058
3059 func gcstopm() {
3060 gp := getg()
3061
3062 if !sched.gcwaiting.Load() {
3063 throw("gcstopm: not waiting for gc")
3064 }
3065 if gp.m.spinning {
3066 gp.m.spinning = false
3067
3068
3069 if sched.nmspinning.Add(-1) < 0 {
3070 throw("gcstopm: negative nmspinning")
3071 }
3072 }
3073 pp := releasep()
3074 lock(&sched.lock)
3075 pp.status = _Pgcstop
3076 sched.stopwait--
3077 if sched.stopwait == 0 {
3078 notewakeup(&sched.stopnote)
3079 }
3080 unlock(&sched.lock)
3081 stopm()
3082 }
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093 func execute(gp *g, inheritTime bool) {
3094 mp := getg().m
3095
3096 if goroutineProfile.active {
3097
3098
3099
3100 tryRecordGoroutineProfile(gp, osyield)
3101 }
3102
3103
3104
3105 mp.curg = gp
3106 gp.m = mp
3107 casgstatus(gp, _Grunnable, _Grunning)
3108 gp.waitsince = 0
3109 gp.preempt = false
3110 gp.stackguard0 = gp.stack.lo + stackGuard
3111 if !inheritTime {
3112 mp.p.ptr().schedtick++
3113 }
3114
3115
3116 hz := sched.profilehz
3117 if mp.profilehz != hz {
3118 setThreadCPUProfiler(hz)
3119 }
3120
3121 trace := traceAcquire()
3122 if trace.ok() {
3123
3124
3125 if !goexperiment.ExecTracer2 && gp.syscallsp != 0 {
3126 trace.GoSysExit(true)
3127 }
3128 trace.GoStart()
3129 traceRelease(trace)
3130 }
3131
3132 gogo(&gp.sched)
3133 }
3134
3135
3136
3137
3138
3139 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3140 mp := getg().m
3141
3142
3143
3144
3145
3146 top:
3147 pp := mp.p.ptr()
3148 if sched.gcwaiting.Load() {
3149 gcstopm()
3150 goto top
3151 }
3152 if pp.runSafePointFn != 0 {
3153 runSafePointFn()
3154 }
3155
3156
3157
3158
3159
3160 now, pollUntil, _ := checkTimers(pp, 0)
3161
3162
3163 if traceEnabled() || traceShuttingDown() {
3164 gp := traceReader()
3165 if gp != nil {
3166 trace := traceAcquire()
3167 casgstatus(gp, _Gwaiting, _Grunnable)
3168 if trace.ok() {
3169 trace.GoUnpark(gp, 0)
3170 traceRelease(trace)
3171 }
3172 return gp, false, true
3173 }
3174 }
3175
3176
3177 if gcBlackenEnabled != 0 {
3178 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3179 if gp != nil {
3180 return gp, false, true
3181 }
3182 now = tnow
3183 }
3184
3185
3186
3187
3188 if pp.schedtick%61 == 0 && sched.runqsize > 0 {
3189 lock(&sched.lock)
3190 gp := globrunqget(pp, 1)
3191 unlock(&sched.lock)
3192 if gp != nil {
3193 return gp, false, false
3194 }
3195 }
3196
3197
3198 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
3199 if gp := wakefing(); gp != nil {
3200 ready(gp, 0, true)
3201 }
3202 }
3203 if *cgo_yield != nil {
3204 asmcgocall(*cgo_yield, nil)
3205 }
3206
3207
3208 if gp, inheritTime := runqget(pp); gp != nil {
3209 return gp, inheritTime, false
3210 }
3211
3212
3213 if sched.runqsize != 0 {
3214 lock(&sched.lock)
3215 gp := globrunqget(pp, 0)
3216 unlock(&sched.lock)
3217 if gp != nil {
3218 return gp, false, false
3219 }
3220 }
3221
3222
3223
3224
3225
3226
3227
3228
3229 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3230 if list, delta := netpoll(0); !list.empty() {
3231 gp := list.pop()
3232 injectglist(&list)
3233 netpollAdjustWaiters(delta)
3234 trace := traceAcquire()
3235 casgstatus(gp, _Gwaiting, _Grunnable)
3236 if trace.ok() {
3237 trace.GoUnpark(gp, 0)
3238 traceRelease(trace)
3239 }
3240 return gp, false, false
3241 }
3242 }
3243
3244
3245
3246
3247
3248
3249 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
3250 if !mp.spinning {
3251 mp.becomeSpinning()
3252 }
3253
3254 gp, inheritTime, tnow, w, newWork := stealWork(now)
3255 if gp != nil {
3256
3257 return gp, inheritTime, false
3258 }
3259 if newWork {
3260
3261
3262 goto top
3263 }
3264
3265 now = tnow
3266 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3267
3268 pollUntil = w
3269 }
3270 }
3271
3272
3273
3274
3275
3276 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) && gcController.addIdleMarkWorker() {
3277 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3278 if node != nil {
3279 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3280 gp := node.gp.ptr()
3281
3282 trace := traceAcquire()
3283 casgstatus(gp, _Gwaiting, _Grunnable)
3284 if trace.ok() {
3285 trace.GoUnpark(gp, 0)
3286 traceRelease(trace)
3287 }
3288 return gp, false, false
3289 }
3290 gcController.removeIdleMarkWorker()
3291 }
3292
3293
3294
3295
3296
3297 gp, otherReady := beforeIdle(now, pollUntil)
3298 if gp != nil {
3299 trace := traceAcquire()
3300 casgstatus(gp, _Gwaiting, _Grunnable)
3301 if trace.ok() {
3302 trace.GoUnpark(gp, 0)
3303 traceRelease(trace)
3304 }
3305 return gp, false, false
3306 }
3307 if otherReady {
3308 goto top
3309 }
3310
3311
3312
3313
3314
3315 allpSnapshot := allp
3316
3317
3318 idlepMaskSnapshot := idlepMask
3319 timerpMaskSnapshot := timerpMask
3320
3321
3322 lock(&sched.lock)
3323 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3324 unlock(&sched.lock)
3325 goto top
3326 }
3327 if sched.runqsize != 0 {
3328 gp := globrunqget(pp, 0)
3329 unlock(&sched.lock)
3330 return gp, false, false
3331 }
3332 if !mp.spinning && sched.needspinning.Load() == 1 {
3333
3334 mp.becomeSpinning()
3335 unlock(&sched.lock)
3336 goto top
3337 }
3338 if releasep() != pp {
3339 throw("findrunnable: wrong p")
3340 }
3341 now = pidleput(pp, now)
3342 unlock(&sched.lock)
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380 wasSpinning := mp.spinning
3381 if mp.spinning {
3382 mp.spinning = false
3383 if sched.nmspinning.Add(-1) < 0 {
3384 throw("findrunnable: negative nmspinning")
3385 }
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398 lock(&sched.lock)
3399 if sched.runqsize != 0 {
3400 pp, _ := pidlegetSpinning(0)
3401 if pp != nil {
3402 gp := globrunqget(pp, 0)
3403 if gp == nil {
3404 throw("global runq empty with non-zero runqsize")
3405 }
3406 unlock(&sched.lock)
3407 acquirep(pp)
3408 mp.becomeSpinning()
3409 return gp, false, false
3410 }
3411 }
3412 unlock(&sched.lock)
3413
3414 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3415 if pp != nil {
3416 acquirep(pp)
3417 mp.becomeSpinning()
3418 goto top
3419 }
3420
3421
3422 pp, gp := checkIdleGCNoP()
3423 if pp != nil {
3424 acquirep(pp)
3425 mp.becomeSpinning()
3426
3427
3428 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3429 trace := traceAcquire()
3430 casgstatus(gp, _Gwaiting, _Grunnable)
3431 if trace.ok() {
3432 trace.GoUnpark(gp, 0)
3433 traceRelease(trace)
3434 }
3435 return gp, false, false
3436 }
3437
3438
3439
3440
3441
3442
3443
3444 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3445 }
3446
3447
3448 if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3449 sched.pollUntil.Store(pollUntil)
3450 if mp.p != 0 {
3451 throw("findrunnable: netpoll with p")
3452 }
3453 if mp.spinning {
3454 throw("findrunnable: netpoll with spinning")
3455 }
3456 delay := int64(-1)
3457 if pollUntil != 0 {
3458 if now == 0 {
3459 now = nanotime()
3460 }
3461 delay = pollUntil - now
3462 if delay < 0 {
3463 delay = 0
3464 }
3465 }
3466 if faketime != 0 {
3467
3468 delay = 0
3469 }
3470 list, delta := netpoll(delay)
3471
3472 now = nanotime()
3473 sched.pollUntil.Store(0)
3474 sched.lastpoll.Store(now)
3475 if faketime != 0 && list.empty() {
3476
3477
3478 stopm()
3479 goto top
3480 }
3481 lock(&sched.lock)
3482 pp, _ := pidleget(now)
3483 unlock(&sched.lock)
3484 if pp == nil {
3485 injectglist(&list)
3486 netpollAdjustWaiters(delta)
3487 } else {
3488 acquirep(pp)
3489 if !list.empty() {
3490 gp := list.pop()
3491 injectglist(&list)
3492 netpollAdjustWaiters(delta)
3493 trace := traceAcquire()
3494 casgstatus(gp, _Gwaiting, _Grunnable)
3495 if trace.ok() {
3496 trace.GoUnpark(gp, 0)
3497 traceRelease(trace)
3498 }
3499 return gp, false, false
3500 }
3501 if wasSpinning {
3502 mp.becomeSpinning()
3503 }
3504 goto top
3505 }
3506 } else if pollUntil != 0 && netpollinited() {
3507 pollerPollUntil := sched.pollUntil.Load()
3508 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3509 netpollBreak()
3510 }
3511 }
3512 stopm()
3513 goto top
3514 }
3515
3516
3517
3518
3519
3520 func pollWork() bool {
3521 if sched.runqsize != 0 {
3522 return true
3523 }
3524 p := getg().m.p.ptr()
3525 if !runqempty(p) {
3526 return true
3527 }
3528 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3529 if list, delta := netpoll(0); !list.empty() {
3530 injectglist(&list)
3531 netpollAdjustWaiters(delta)
3532 return true
3533 }
3534 }
3535 return false
3536 }
3537
3538
3539
3540
3541
3542
3543
3544 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3545 pp := getg().m.p.ptr()
3546
3547 ranTimer := false
3548
3549 const stealTries = 4
3550 for i := 0; i < stealTries; i++ {
3551 stealTimersOrRunNextG := i == stealTries-1
3552
3553 for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
3554 if sched.gcwaiting.Load() {
3555
3556 return nil, false, now, pollUntil, true
3557 }
3558 p2 := allp[enum.position()]
3559 if pp == p2 {
3560 continue
3561 }
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3577 tnow, w, ran := checkTimers(p2, now)
3578 now = tnow
3579 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3580 pollUntil = w
3581 }
3582 if ran {
3583
3584
3585
3586
3587
3588
3589
3590
3591 if gp, inheritTime := runqget(pp); gp != nil {
3592 return gp, inheritTime, now, pollUntil, ranTimer
3593 }
3594 ranTimer = true
3595 }
3596 }
3597
3598
3599 if !idlepMask.read(enum.position()) {
3600 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3601 return gp, false, now, pollUntil, ranTimer
3602 }
3603 }
3604 }
3605 }
3606
3607
3608
3609
3610 return nil, false, now, pollUntil, ranTimer
3611 }
3612
3613
3614
3615
3616
3617
3618 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3619 for id, p2 := range allpSnapshot {
3620 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3621 lock(&sched.lock)
3622 pp, _ := pidlegetSpinning(0)
3623 if pp == nil {
3624
3625 unlock(&sched.lock)
3626 return nil
3627 }
3628 unlock(&sched.lock)
3629 return pp
3630 }
3631 }
3632
3633
3634 return nil
3635 }
3636
3637
3638
3639
3640 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3641 for id, p2 := range allpSnapshot {
3642 if timerpMaskSnapshot.read(uint32(id)) {
3643 w := nobarrierWakeTime(p2)
3644 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3645 pollUntil = w
3646 }
3647 }
3648 }
3649
3650 return pollUntil
3651 }
3652
3653
3654
3655
3656
3657 func checkIdleGCNoP() (*p, *g) {
3658
3659
3660
3661
3662
3663
3664 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3665 return nil, nil
3666 }
3667 if !gcMarkWorkAvailable(nil) {
3668 return nil, nil
3669 }
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688 lock(&sched.lock)
3689 pp, now := pidlegetSpinning(0)
3690 if pp == nil {
3691 unlock(&sched.lock)
3692 return nil, nil
3693 }
3694
3695
3696 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3697 pidleput(pp, now)
3698 unlock(&sched.lock)
3699 return nil, nil
3700 }
3701
3702 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3703 if node == nil {
3704 pidleput(pp, now)
3705 unlock(&sched.lock)
3706 gcController.removeIdleMarkWorker()
3707 return nil, nil
3708 }
3709
3710 unlock(&sched.lock)
3711
3712 return pp, node.gp.ptr()
3713 }
3714
3715
3716
3717
3718 func wakeNetPoller(when int64) {
3719 if sched.lastpoll.Load() == 0 {
3720
3721
3722
3723
3724 pollerPollUntil := sched.pollUntil.Load()
3725 if pollerPollUntil == 0 || pollerPollUntil > when {
3726 netpollBreak()
3727 }
3728 } else {
3729
3730
3731 if GOOS != "plan9" {
3732 wakep()
3733 }
3734 }
3735 }
3736
3737 func resetspinning() {
3738 gp := getg()
3739 if !gp.m.spinning {
3740 throw("resetspinning: not a spinning m")
3741 }
3742 gp.m.spinning = false
3743 nmspinning := sched.nmspinning.Add(-1)
3744 if nmspinning < 0 {
3745 throw("findrunnable: negative nmspinning")
3746 }
3747
3748
3749
3750 wakep()
3751 }
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761 func injectglist(glist *gList) {
3762 if glist.empty() {
3763 return
3764 }
3765 trace := traceAcquire()
3766 if trace.ok() {
3767 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
3768 trace.GoUnpark(gp, 0)
3769 }
3770 traceRelease(trace)
3771 }
3772
3773
3774
3775 head := glist.head.ptr()
3776 var tail *g
3777 qsize := 0
3778 for gp := head; gp != nil; gp = gp.schedlink.ptr() {
3779 tail = gp
3780 qsize++
3781 casgstatus(gp, _Gwaiting, _Grunnable)
3782 }
3783
3784
3785 var q gQueue
3786 q.head.set(head)
3787 q.tail.set(tail)
3788 *glist = gList{}
3789
3790 startIdle := func(n int) {
3791 for i := 0; i < n; i++ {
3792 mp := acquirem()
3793 lock(&sched.lock)
3794
3795 pp, _ := pidlegetSpinning(0)
3796 if pp == nil {
3797 unlock(&sched.lock)
3798 releasem(mp)
3799 break
3800 }
3801
3802 startm(pp, false, true)
3803 unlock(&sched.lock)
3804 releasem(mp)
3805 }
3806 }
3807
3808 pp := getg().m.p.ptr()
3809 if pp == nil {
3810 lock(&sched.lock)
3811 globrunqputbatch(&q, int32(qsize))
3812 unlock(&sched.lock)
3813 startIdle(qsize)
3814 return
3815 }
3816
3817 npidle := int(sched.npidle.Load())
3818 var globq gQueue
3819 var n int
3820 for n = 0; n < npidle && !q.empty(); n++ {
3821 g := q.pop()
3822 globq.pushBack(g)
3823 }
3824 if n > 0 {
3825 lock(&sched.lock)
3826 globrunqputbatch(&globq, int32(n))
3827 unlock(&sched.lock)
3828 startIdle(n)
3829 qsize -= n
3830 }
3831
3832 if !q.empty() {
3833 runqputbatch(pp, &q, qsize)
3834 }
3835 }
3836
3837
3838
3839 func schedule() {
3840 mp := getg().m
3841
3842 if mp.locks != 0 {
3843 throw("schedule: holding locks")
3844 }
3845
3846 if mp.lockedg != 0 {
3847 stoplockedm()
3848 execute(mp.lockedg.ptr(), false)
3849 }
3850
3851
3852
3853 if mp.incgo {
3854 throw("schedule: in cgo")
3855 }
3856
3857 top:
3858 pp := mp.p.ptr()
3859 pp.preempt = false
3860
3861
3862
3863
3864 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
3865 throw("schedule: spinning with local work")
3866 }
3867
3868 gp, inheritTime, tryWakeP := findRunnable()
3869
3870 if debug.dontfreezetheworld > 0 && freezing.Load() {
3871
3872
3873
3874
3875
3876
3877
3878 lock(&deadlock)
3879 lock(&deadlock)
3880 }
3881
3882
3883
3884
3885 if mp.spinning {
3886 resetspinning()
3887 }
3888
3889 if sched.disable.user && !schedEnabled(gp) {
3890
3891
3892
3893 lock(&sched.lock)
3894 if schedEnabled(gp) {
3895
3896
3897 unlock(&sched.lock)
3898 } else {
3899 sched.disable.runnable.pushBack(gp)
3900 sched.disable.n++
3901 unlock(&sched.lock)
3902 goto top
3903 }
3904 }
3905
3906
3907
3908 if tryWakeP {
3909 wakep()
3910 }
3911 if gp.lockedm != 0 {
3912
3913
3914 startlockedm(gp)
3915 goto top
3916 }
3917
3918 execute(gp, inheritTime)
3919 }
3920
3921
3922
3923
3924
3925
3926
3927
3928 func dropg() {
3929 gp := getg()
3930
3931 setMNoWB(&gp.m.curg.m, nil)
3932 setGNoWB(&gp.m.curg, nil)
3933 }
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945 func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool) {
3946
3947
3948 next := pp.timer0When.Load()
3949 nextAdj := pp.timerModifiedEarliest.Load()
3950 if next == 0 || (nextAdj != 0 && nextAdj < next) {
3951 next = nextAdj
3952 }
3953
3954 if next == 0 {
3955
3956 return now, 0, false
3957 }
3958
3959 if now == 0 {
3960 now = nanotime()
3961 }
3962 if now < next {
3963
3964
3965
3966
3967 if pp != getg().m.p.ptr() || int(pp.deletedTimers.Load()) <= int(pp.numTimers.Load()/4) {
3968 return now, next, false
3969 }
3970 }
3971
3972 lock(&pp.timersLock)
3973
3974 if len(pp.timers) > 0 {
3975 adjusttimers(pp, now)
3976 for len(pp.timers) > 0 {
3977
3978
3979 if tw := runtimer(pp, now); tw != 0 {
3980 if tw > 0 {
3981 pollUntil = tw
3982 }
3983 break
3984 }
3985 ran = true
3986 }
3987 }
3988
3989
3990
3991
3992 if pp == getg().m.p.ptr() && int(pp.deletedTimers.Load()) > len(pp.timers)/4 {
3993 clearDeletedTimers(pp)
3994 }
3995
3996 unlock(&pp.timersLock)
3997
3998 return now, pollUntil, ran
3999 }
4000
4001 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
4002 unlock((*mutex)(lock))
4003 return true
4004 }
4005
4006
4007 func park_m(gp *g) {
4008 mp := getg().m
4009
4010 trace := traceAcquire()
4011
4012
4013
4014 casgstatus(gp, _Grunning, _Gwaiting)
4015 if trace.ok() {
4016 trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
4017 traceRelease(trace)
4018 }
4019
4020 dropg()
4021
4022 if fn := mp.waitunlockf; fn != nil {
4023 ok := fn(gp, mp.waitlock)
4024 mp.waitunlockf = nil
4025 mp.waitlock = nil
4026 if !ok {
4027 trace := traceAcquire()
4028 casgstatus(gp, _Gwaiting, _Grunnable)
4029 if trace.ok() {
4030 trace.GoUnpark(gp, 2)
4031 traceRelease(trace)
4032 }
4033 execute(gp, true)
4034 }
4035 }
4036 schedule()
4037 }
4038
4039 func goschedImpl(gp *g, preempted bool) {
4040 trace := traceAcquire()
4041 status := readgstatus(gp)
4042 if status&^_Gscan != _Grunning {
4043 dumpgstatus(gp)
4044 throw("bad g status")
4045 }
4046 casgstatus(gp, _Grunning, _Grunnable)
4047 if trace.ok() {
4048 if preempted {
4049 trace.GoPreempt()
4050 } else {
4051 trace.GoSched()
4052 }
4053 traceRelease(trace)
4054 }
4055
4056 dropg()
4057 lock(&sched.lock)
4058 globrunqput(gp)
4059 unlock(&sched.lock)
4060
4061 if mainStarted {
4062 wakep()
4063 }
4064
4065 schedule()
4066 }
4067
4068
4069 func gosched_m(gp *g) {
4070 goschedImpl(gp, false)
4071 }
4072
4073
4074 func goschedguarded_m(gp *g) {
4075 if !canPreemptM(gp.m) {
4076 gogo(&gp.sched)
4077 }
4078 goschedImpl(gp, false)
4079 }
4080
4081 func gopreempt_m(gp *g) {
4082 goschedImpl(gp, true)
4083 }
4084
4085
4086
4087
4088 func preemptPark(gp *g) {
4089 status := readgstatus(gp)
4090 if status&^_Gscan != _Grunning {
4091 dumpgstatus(gp)
4092 throw("bad g status")
4093 }
4094
4095 if gp.asyncSafePoint {
4096
4097
4098
4099 f := findfunc(gp.sched.pc)
4100 if !f.valid() {
4101 throw("preempt at unknown pc")
4102 }
4103 if f.flag&abi.FuncFlagSPWrite != 0 {
4104 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
4105 throw("preempt SPWRITE")
4106 }
4107 }
4108
4109
4110
4111
4112
4113
4114
4115 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4116 dropg()
4117
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129
4130
4131
4132
4133 trace := traceAcquire()
4134 if trace.ok() {
4135 trace.GoPark(traceBlockPreempted, 0)
4136 }
4137 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4138 if trace.ok() {
4139 traceRelease(trace)
4140 }
4141 schedule()
4142 }
4143
4144
4145
4146
4147 func goyield() {
4148 checkTimeouts()
4149 mcall(goyield_m)
4150 }
4151
4152 func goyield_m(gp *g) {
4153 trace := traceAcquire()
4154 pp := gp.m.p.ptr()
4155 casgstatus(gp, _Grunning, _Grunnable)
4156 if trace.ok() {
4157 trace.GoPreempt()
4158 traceRelease(trace)
4159 }
4160 dropg()
4161 runqput(pp, gp, false)
4162 schedule()
4163 }
4164
4165
4166 func goexit1() {
4167 if raceenabled {
4168 racegoend()
4169 }
4170 trace := traceAcquire()
4171 if trace.ok() {
4172 trace.GoEnd()
4173 traceRelease(trace)
4174 }
4175 mcall(goexit0)
4176 }
4177
4178
4179 func goexit0(gp *g) {
4180 gdestroy(gp)
4181 schedule()
4182 }
4183
4184 func gdestroy(gp *g) {
4185 mp := getg().m
4186 pp := mp.p.ptr()
4187
4188 casgstatus(gp, _Grunning, _Gdead)
4189 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4190 if isSystemGoroutine(gp, false) {
4191 sched.ngsys.Add(-1)
4192 }
4193 gp.m = nil
4194 locked := gp.lockedm != 0
4195 gp.lockedm = 0
4196 mp.lockedg = 0
4197 gp.preemptStop = false
4198 gp.paniconfault = false
4199 gp._defer = nil
4200 gp._panic = nil
4201 gp.writebuf = nil
4202 gp.waitreason = waitReasonZero
4203 gp.param = nil
4204 gp.labels = nil
4205 gp.timer = nil
4206
4207 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4208
4209
4210
4211 assistWorkPerByte := gcController.assistWorkPerByte.Load()
4212 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4213 gcController.bgScanCredit.Add(scanCredit)
4214 gp.gcAssistBytes = 0
4215 }
4216
4217 dropg()
4218
4219 if GOARCH == "wasm" {
4220 gfput(pp, gp)
4221 return
4222 }
4223
4224 if mp.lockedInt != 0 {
4225 print("invalid m->lockedInt = ", mp.lockedInt, "\n")
4226 throw("internal lockOSThread error")
4227 }
4228 gfput(pp, gp)
4229 if locked {
4230
4231
4232
4233
4234
4235
4236 if GOOS != "plan9" {
4237 gogo(&mp.g0.sched)
4238 } else {
4239
4240
4241 mp.lockedExt = 0
4242 }
4243 }
4244 }
4245
4246
4247
4248
4249
4250
4251
4252
4253
4254 func save(pc, sp uintptr) {
4255 gp := getg()
4256
4257 if gp == gp.m.g0 || gp == gp.m.gsignal {
4258
4259
4260
4261
4262
4263 throw("save on system g not allowed")
4264 }
4265
4266 gp.sched.pc = pc
4267 gp.sched.sp = sp
4268 gp.sched.lr = 0
4269 gp.sched.ret = 0
4270
4271
4272
4273 if gp.sched.ctxt != nil {
4274 badctxt()
4275 }
4276 }
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299
4300
4301
4302
4303
4304
4305
4306
4307
4308
4309
4310
4311
4312
4313
4314
4315 func reentersyscall(pc, sp uintptr) {
4316 trace := traceAcquire()
4317 gp := getg()
4318
4319
4320
4321 gp.m.locks++
4322
4323
4324
4325
4326
4327 gp.stackguard0 = stackPreempt
4328 gp.throwsplit = true
4329
4330
4331 save(pc, sp)
4332 gp.syscallsp = sp
4333 gp.syscallpc = pc
4334 casgstatus(gp, _Grunning, _Gsyscall)
4335 if staticLockRanking {
4336
4337
4338 save(pc, sp)
4339 }
4340 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4341 systemstack(func() {
4342 print("entersyscall inconsistent ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4343 throw("entersyscall")
4344 })
4345 }
4346
4347 if trace.ok() {
4348 systemstack(func() {
4349 trace.GoSysCall()
4350 traceRelease(trace)
4351 })
4352
4353
4354
4355 save(pc, sp)
4356 }
4357
4358 if sched.sysmonwait.Load() {
4359 systemstack(entersyscall_sysmon)
4360 save(pc, sp)
4361 }
4362
4363 if gp.m.p.ptr().runSafePointFn != 0 {
4364
4365 systemstack(runSafePointFn)
4366 save(pc, sp)
4367 }
4368
4369 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4370 pp := gp.m.p.ptr()
4371 pp.m = 0
4372 gp.m.oldp.set(pp)
4373 gp.m.p = 0
4374 atomic.Store(&pp.status, _Psyscall)
4375 if sched.gcwaiting.Load() {
4376 systemstack(entersyscall_gcwait)
4377 save(pc, sp)
4378 }
4379
4380 gp.m.locks--
4381 }
4382
4383
4384
4385
4386
4387
4388
4389 func entersyscall() {
4390 reentersyscall(getcallerpc(), getcallersp())
4391 }
4392
4393 func entersyscall_sysmon() {
4394 lock(&sched.lock)
4395 if sched.sysmonwait.Load() {
4396 sched.sysmonwait.Store(false)
4397 notewakeup(&sched.sysmonnote)
4398 }
4399 unlock(&sched.lock)
4400 }
4401
4402 func entersyscall_gcwait() {
4403 gp := getg()
4404 pp := gp.m.oldp.ptr()
4405
4406 lock(&sched.lock)
4407 trace := traceAcquire()
4408 if sched.stopwait > 0 && atomic.Cas(&pp.status, _Psyscall, _Pgcstop) {
4409 if trace.ok() {
4410 if goexperiment.ExecTracer2 {
4411
4412
4413
4414
4415
4416
4417
4418
4419
4420 trace.ProcSteal(pp, true)
4421 } else {
4422 trace.GoSysBlock(pp)
4423 trace.ProcStop(pp)
4424 }
4425 traceRelease(trace)
4426 }
4427 pp.syscalltick++
4428 if sched.stopwait--; sched.stopwait == 0 {
4429 notewakeup(&sched.stopnote)
4430 }
4431 } else if trace.ok() {
4432 traceRelease(trace)
4433 }
4434 unlock(&sched.lock)
4435 }
4436
4437
4438
4439
4440 func entersyscallblock() {
4441 gp := getg()
4442
4443 gp.m.locks++
4444 gp.throwsplit = true
4445 gp.stackguard0 = stackPreempt
4446 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4447 gp.m.p.ptr().syscalltick++
4448
4449
4450 pc := getcallerpc()
4451 sp := getcallersp()
4452 save(pc, sp)
4453 gp.syscallsp = gp.sched.sp
4454 gp.syscallpc = gp.sched.pc
4455 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4456 sp1 := sp
4457 sp2 := gp.sched.sp
4458 sp3 := gp.syscallsp
4459 systemstack(func() {
4460 print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4461 throw("entersyscallblock")
4462 })
4463 }
4464 casgstatus(gp, _Grunning, _Gsyscall)
4465 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4466 systemstack(func() {
4467 print("entersyscallblock inconsistent ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4468 throw("entersyscallblock")
4469 })
4470 }
4471
4472 systemstack(entersyscallblock_handoff)
4473
4474
4475 save(getcallerpc(), getcallersp())
4476
4477 gp.m.locks--
4478 }
4479
4480 func entersyscallblock_handoff() {
4481 trace := traceAcquire()
4482 if trace.ok() {
4483 trace.GoSysCall()
4484 trace.GoSysBlock(getg().m.p.ptr())
4485 traceRelease(trace)
4486 }
4487 handoffp(releasep())
4488 }
4489
4490
4491
4492
4493
4494
4495
4496
4497
4498
4499
4500
4501
4502 func exitsyscall() {
4503 gp := getg()
4504
4505 gp.m.locks++
4506 if getcallersp() > gp.syscallsp {
4507 throw("exitsyscall: syscall frame is no longer valid")
4508 }
4509
4510 gp.waitsince = 0
4511 oldp := gp.m.oldp.ptr()
4512 gp.m.oldp = 0
4513 if exitsyscallfast(oldp) {
4514
4515
4516 if goroutineProfile.active {
4517
4518
4519
4520 systemstack(func() {
4521 tryRecordGoroutineProfileWB(gp)
4522 })
4523 }
4524 trace := traceAcquire()
4525 if trace.ok() {
4526 lostP := oldp != gp.m.p.ptr() || gp.m.syscalltick != gp.m.p.ptr().syscalltick
4527 systemstack(func() {
4528 if goexperiment.ExecTracer2 {
4529
4530
4531
4532
4533 trace.GoSysExit(lostP)
4534 }
4535 if lostP {
4536
4537
4538
4539
4540 trace.GoStart()
4541 }
4542 })
4543 }
4544
4545 gp.m.p.ptr().syscalltick++
4546
4547 casgstatus(gp, _Gsyscall, _Grunning)
4548 if trace.ok() {
4549 traceRelease(trace)
4550 }
4551
4552
4553
4554 gp.syscallsp = 0
4555 gp.m.locks--
4556 if gp.preempt {
4557
4558 gp.stackguard0 = stackPreempt
4559 } else {
4560
4561 gp.stackguard0 = gp.stack.lo + stackGuard
4562 }
4563 gp.throwsplit = false
4564
4565 if sched.disable.user && !schedEnabled(gp) {
4566
4567 Gosched()
4568 }
4569
4570 return
4571 }
4572
4573 if !goexperiment.ExecTracer2 {
4574
4575
4576
4577 trace := traceAcquire()
4578 if trace.ok() {
4579 trace.RecordSyscallExitedTime(gp, oldp)
4580 traceRelease(trace)
4581 }
4582 }
4583
4584 gp.m.locks--
4585
4586
4587 mcall(exitsyscall0)
4588
4589
4590
4591
4592
4593
4594
4595 gp.syscallsp = 0
4596 gp.m.p.ptr().syscalltick++
4597 gp.throwsplit = false
4598 }
4599
4600
4601 func exitsyscallfast(oldp *p) bool {
4602 gp := getg()
4603
4604
4605 if sched.stopwait == freezeStopWait {
4606 return false
4607 }
4608
4609
4610 trace := traceAcquire()
4611 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
4612
4613 wirep(oldp)
4614 exitsyscallfast_reacquired(trace)
4615 if trace.ok() {
4616 traceRelease(trace)
4617 }
4618 return true
4619 }
4620 if trace.ok() {
4621 traceRelease(trace)
4622 }
4623
4624
4625 if sched.pidle != 0 {
4626 var ok bool
4627 systemstack(func() {
4628 ok = exitsyscallfast_pidle()
4629 if ok && !goexperiment.ExecTracer2 {
4630 trace := traceAcquire()
4631 if trace.ok() {
4632 if oldp != nil {
4633
4634
4635 for oldp.syscalltick == gp.m.syscalltick {
4636 osyield()
4637 }
4638 }
4639
4640
4641 trace.GoSysExit(true)
4642 traceRelease(trace)
4643 }
4644 }
4645 })
4646 if ok {
4647 return true
4648 }
4649 }
4650 return false
4651 }
4652
4653
4654
4655
4656
4657
4658 func exitsyscallfast_reacquired(trace traceLocker) {
4659 gp := getg()
4660 if gp.m.syscalltick != gp.m.p.ptr().syscalltick {
4661 if trace.ok() {
4662
4663
4664
4665 systemstack(func() {
4666 if goexperiment.ExecTracer2 {
4667
4668
4669 trace.ProcSteal(gp.m.p.ptr(), true)
4670 trace.ProcStart()
4671 } else {
4672
4673 trace.GoSysBlock(gp.m.p.ptr())
4674
4675 trace.GoSysExit(true)
4676 }
4677 })
4678 }
4679 gp.m.p.ptr().syscalltick++
4680 }
4681 }
4682
4683 func exitsyscallfast_pidle() bool {
4684 lock(&sched.lock)
4685 pp, _ := pidleget(0)
4686 if pp != nil && sched.sysmonwait.Load() {
4687 sched.sysmonwait.Store(false)
4688 notewakeup(&sched.sysmonnote)
4689 }
4690 unlock(&sched.lock)
4691 if pp != nil {
4692 acquirep(pp)
4693 return true
4694 }
4695 return false
4696 }
4697
4698
4699
4700
4701
4702
4703
4704 func exitsyscall0(gp *g) {
4705 var trace traceLocker
4706 if goexperiment.ExecTracer2 {
4707 traceExitingSyscall()
4708 trace = traceAcquire()
4709 }
4710 casgstatus(gp, _Gsyscall, _Grunnable)
4711 if goexperiment.ExecTracer2 {
4712 traceExitedSyscall()
4713 if trace.ok() {
4714
4715
4716
4717
4718 trace.GoSysExit(true)
4719 traceRelease(trace)
4720 }
4721 }
4722 dropg()
4723 lock(&sched.lock)
4724 var pp *p
4725 if schedEnabled(gp) {
4726 pp, _ = pidleget(0)
4727 }
4728 var locked bool
4729 if pp == nil {
4730 globrunqput(gp)
4731
4732
4733
4734
4735
4736
4737 locked = gp.lockedm != 0
4738 } else if sched.sysmonwait.Load() {
4739 sched.sysmonwait.Store(false)
4740 notewakeup(&sched.sysmonnote)
4741 }
4742 unlock(&sched.lock)
4743 if pp != nil {
4744 acquirep(pp)
4745 execute(gp, false)
4746 }
4747 if locked {
4748
4749
4750
4751
4752 stoplockedm()
4753 execute(gp, false)
4754 }
4755 stopm()
4756 schedule()
4757 }
4758
4759
4760
4761
4762
4763 func syscall_runtime_BeforeFork() {
4764 gp := getg().m.curg
4765
4766
4767
4768
4769 gp.m.locks++
4770 sigsave(&gp.m.sigmask)
4771 sigblock(false)
4772
4773
4774
4775
4776
4777 gp.stackguard0 = stackFork
4778 }
4779
4780
4781
4782
4783
4784 func syscall_runtime_AfterFork() {
4785 gp := getg().m.curg
4786
4787
4788 gp.stackguard0 = gp.stack.lo + stackGuard
4789
4790 msigrestore(gp.m.sigmask)
4791
4792 gp.m.locks--
4793 }
4794
4795
4796
4797 var inForkedChild bool
4798
4799
4800
4801
4802
4803
4804
4805
4806
4807
4808
4809
4810 func syscall_runtime_AfterForkInChild() {
4811
4812
4813
4814
4815 inForkedChild = true
4816
4817 clearSignalHandlers()
4818
4819
4820
4821 msigrestore(getg().m.sigmask)
4822
4823 inForkedChild = false
4824 }
4825
4826
4827
4828
4829 var pendingPreemptSignals atomic.Int32
4830
4831
4832
4833
4834 func syscall_runtime_BeforeExec() {
4835
4836 execLock.lock()
4837
4838
4839
4840 if GOOS == "darwin" || GOOS == "ios" {
4841 for pendingPreemptSignals.Load() > 0 {
4842 osyield()
4843 }
4844 }
4845 }
4846
4847
4848
4849
4850 func syscall_runtime_AfterExec() {
4851 execLock.unlock()
4852 }
4853
4854
4855 func malg(stacksize int32) *g {
4856 newg := new(g)
4857 if stacksize >= 0 {
4858 stacksize = round2(stackSystem + stacksize)
4859 systemstack(func() {
4860 newg.stack = stackalloc(uint32(stacksize))
4861 })
4862 newg.stackguard0 = newg.stack.lo + stackGuard
4863 newg.stackguard1 = ^uintptr(0)
4864
4865
4866 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
4867 }
4868 return newg
4869 }
4870
4871
4872
4873
4874 func newproc(fn *funcval) {
4875 gp := getg()
4876 pc := getcallerpc()
4877 systemstack(func() {
4878 newg := newproc1(fn, gp, pc)
4879
4880 pp := getg().m.p.ptr()
4881 runqput(pp, newg, true)
4882
4883 if mainStarted {
4884 wakep()
4885 }
4886 })
4887 }
4888
4889
4890
4891
4892 func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g {
4893 if fn == nil {
4894 fatal("go of nil func value")
4895 }
4896
4897 mp := acquirem()
4898 pp := mp.p.ptr()
4899 newg := gfget(pp)
4900 if newg == nil {
4901 newg = malg(stackMin)
4902 casgstatus(newg, _Gidle, _Gdead)
4903 allgadd(newg)
4904 }
4905 if newg.stack.hi == 0 {
4906 throw("newproc1: newg missing stack")
4907 }
4908
4909 if readgstatus(newg) != _Gdead {
4910 throw("newproc1: new g is not Gdead")
4911 }
4912
4913 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
4914 totalSize = alignUp(totalSize, sys.StackAlign)
4915 sp := newg.stack.hi - totalSize
4916 if usesLR {
4917
4918 *(*uintptr)(unsafe.Pointer(sp)) = 0
4919 prepGoExitFrame(sp)
4920 }
4921 if GOARCH == "arm64" {
4922
4923 *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
4924 }
4925
4926 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
4927 newg.sched.sp = sp
4928 newg.stktopsp = sp
4929 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
4930 newg.sched.g = guintptr(unsafe.Pointer(newg))
4931 gostartcallfn(&newg.sched, fn)
4932 newg.parentGoid = callergp.goid
4933 newg.gopc = callerpc
4934 newg.ancestors = saveAncestors(callergp)
4935 newg.startpc = fn.fn
4936 if isSystemGoroutine(newg, false) {
4937 sched.ngsys.Add(1)
4938 } else {
4939
4940 if mp.curg != nil {
4941 newg.labels = mp.curg.labels
4942 }
4943 if goroutineProfile.active {
4944
4945
4946
4947
4948
4949 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
4950 }
4951 }
4952
4953 newg.trackingSeq = uint8(cheaprand())
4954 if newg.trackingSeq%gTrackingPeriod == 0 {
4955 newg.tracking = true
4956 }
4957 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
4958
4959
4960 trace := traceAcquire()
4961 casgstatus(newg, _Gdead, _Grunnable)
4962 if pp.goidcache == pp.goidcacheend {
4963
4964
4965
4966 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
4967 pp.goidcache -= _GoidCacheBatch - 1
4968 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
4969 }
4970 newg.goid = pp.goidcache
4971 pp.goidcache++
4972 newg.trace.reset()
4973 if trace.ok() {
4974 trace.GoCreate(newg, newg.startpc)
4975 traceRelease(trace)
4976 }
4977
4978
4979 if raceenabled {
4980 newg.racectx = racegostart(callerpc)
4981 newg.raceignore = 0
4982 if newg.labels != nil {
4983
4984
4985 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
4986 }
4987 }
4988 releasem(mp)
4989
4990 return newg
4991 }
4992
4993
4994
4995
4996 func saveAncestors(callergp *g) *[]ancestorInfo {
4997
4998 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
4999 return nil
5000 }
5001 var callerAncestors []ancestorInfo
5002 if callergp.ancestors != nil {
5003 callerAncestors = *callergp.ancestors
5004 }
5005 n := int32(len(callerAncestors)) + 1
5006 if n > debug.tracebackancestors {
5007 n = debug.tracebackancestors
5008 }
5009 ancestors := make([]ancestorInfo, n)
5010 copy(ancestors[1:], callerAncestors)
5011
5012 var pcs [tracebackInnerFrames]uintptr
5013 npcs := gcallers(callergp, 0, pcs[:])
5014 ipcs := make([]uintptr, npcs)
5015 copy(ipcs, pcs[:])
5016 ancestors[0] = ancestorInfo{
5017 pcs: ipcs,
5018 goid: callergp.goid,
5019 gopc: callergp.gopc,
5020 }
5021
5022 ancestorsp := new([]ancestorInfo)
5023 *ancestorsp = ancestors
5024 return ancestorsp
5025 }
5026
5027
5028
5029 func gfput(pp *p, gp *g) {
5030 if readgstatus(gp) != _Gdead {
5031 throw("gfput: bad status (not Gdead)")
5032 }
5033
5034 stksize := gp.stack.hi - gp.stack.lo
5035
5036 if stksize != uintptr(startingStackSize) {
5037
5038 stackfree(gp.stack)
5039 gp.stack.lo = 0
5040 gp.stack.hi = 0
5041 gp.stackguard0 = 0
5042 }
5043
5044 pp.gFree.push(gp)
5045 pp.gFree.n++
5046 if pp.gFree.n >= 64 {
5047 var (
5048 inc int32
5049 stackQ gQueue
5050 noStackQ gQueue
5051 )
5052 for pp.gFree.n >= 32 {
5053 gp := pp.gFree.pop()
5054 pp.gFree.n--
5055 if gp.stack.lo == 0 {
5056 noStackQ.push(gp)
5057 } else {
5058 stackQ.push(gp)
5059 }
5060 inc++
5061 }
5062 lock(&sched.gFree.lock)
5063 sched.gFree.noStack.pushAll(noStackQ)
5064 sched.gFree.stack.pushAll(stackQ)
5065 sched.gFree.n += inc
5066 unlock(&sched.gFree.lock)
5067 }
5068 }
5069
5070
5071
5072 func gfget(pp *p) *g {
5073 retry:
5074 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
5075 lock(&sched.gFree.lock)
5076
5077 for pp.gFree.n < 32 {
5078
5079 gp := sched.gFree.stack.pop()
5080 if gp == nil {
5081 gp = sched.gFree.noStack.pop()
5082 if gp == nil {
5083 break
5084 }
5085 }
5086 sched.gFree.n--
5087 pp.gFree.push(gp)
5088 pp.gFree.n++
5089 }
5090 unlock(&sched.gFree.lock)
5091 goto retry
5092 }
5093 gp := pp.gFree.pop()
5094 if gp == nil {
5095 return nil
5096 }
5097 pp.gFree.n--
5098 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5099
5100
5101
5102 systemstack(func() {
5103 stackfree(gp.stack)
5104 gp.stack.lo = 0
5105 gp.stack.hi = 0
5106 gp.stackguard0 = 0
5107 })
5108 }
5109 if gp.stack.lo == 0 {
5110
5111 systemstack(func() {
5112 gp.stack = stackalloc(startingStackSize)
5113 })
5114 gp.stackguard0 = gp.stack.lo + stackGuard
5115 } else {
5116 if raceenabled {
5117 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5118 }
5119 if msanenabled {
5120 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5121 }
5122 if asanenabled {
5123 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5124 }
5125 }
5126 return gp
5127 }
5128
5129
5130 func gfpurge(pp *p) {
5131 var (
5132 inc int32
5133 stackQ gQueue
5134 noStackQ gQueue
5135 )
5136 for !pp.gFree.empty() {
5137 gp := pp.gFree.pop()
5138 pp.gFree.n--
5139 if gp.stack.lo == 0 {
5140 noStackQ.push(gp)
5141 } else {
5142 stackQ.push(gp)
5143 }
5144 inc++
5145 }
5146 lock(&sched.gFree.lock)
5147 sched.gFree.noStack.pushAll(noStackQ)
5148 sched.gFree.stack.pushAll(stackQ)
5149 sched.gFree.n += inc
5150 unlock(&sched.gFree.lock)
5151 }
5152
5153
5154 func Breakpoint() {
5155 breakpoint()
5156 }
5157
5158
5159
5160
5161
5162
5163 func dolockOSThread() {
5164 if GOARCH == "wasm" {
5165 return
5166 }
5167 gp := getg()
5168 gp.m.lockedg.set(gp)
5169 gp.lockedm.set(gp.m)
5170 }
5171
5172
5173
5174
5175
5176
5177
5178
5179
5180
5181
5182
5183
5184
5185
5186
5187
5188 func LockOSThread() {
5189 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
5190
5191
5192
5193 startTemplateThread()
5194 }
5195 gp := getg()
5196 gp.m.lockedExt++
5197 if gp.m.lockedExt == 0 {
5198 gp.m.lockedExt--
5199 panic("LockOSThread nesting overflow")
5200 }
5201 dolockOSThread()
5202 }
5203
5204
5205 func lockOSThread() {
5206 getg().m.lockedInt++
5207 dolockOSThread()
5208 }
5209
5210
5211
5212
5213
5214
5215 func dounlockOSThread() {
5216 if GOARCH == "wasm" {
5217 return
5218 }
5219 gp := getg()
5220 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5221 return
5222 }
5223 gp.m.lockedg = 0
5224 gp.lockedm = 0
5225 }
5226
5227
5228
5229
5230
5231
5232
5233
5234
5235
5236
5237
5238
5239
5240
5241 func UnlockOSThread() {
5242 gp := getg()
5243 if gp.m.lockedExt == 0 {
5244 return
5245 }
5246 gp.m.lockedExt--
5247 dounlockOSThread()
5248 }
5249
5250
5251 func unlockOSThread() {
5252 gp := getg()
5253 if gp.m.lockedInt == 0 {
5254 systemstack(badunlockosthread)
5255 }
5256 gp.m.lockedInt--
5257 dounlockOSThread()
5258 }
5259
5260 func badunlockosthread() {
5261 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
5262 }
5263
5264 func gcount() int32 {
5265 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - sched.ngsys.Load()
5266 for _, pp := range allp {
5267 n -= pp.gFree.n
5268 }
5269
5270
5271
5272 if n < 1 {
5273 n = 1
5274 }
5275 return n
5276 }
5277
5278 func mcount() int32 {
5279 return int32(sched.mnext - sched.nmfreed)
5280 }
5281
5282 var prof struct {
5283 signalLock atomic.Uint32
5284
5285
5286
5287 hz atomic.Int32
5288 }
5289
5290 func _System() { _System() }
5291 func _ExternalCode() { _ExternalCode() }
5292 func _LostExternalCode() { _LostExternalCode() }
5293 func _GC() { _GC() }
5294 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
5295 func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
5296 func _VDSO() { _VDSO() }
5297
5298
5299
5300
5301
5302 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5303 if prof.hz.Load() == 0 {
5304 return
5305 }
5306
5307
5308
5309
5310 if mp != nil && mp.profilehz == 0 {
5311 return
5312 }
5313
5314
5315
5316
5317
5318
5319
5320 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
5321 if f := findfunc(pc); f.valid() {
5322 if hasPrefix(funcname(f), "runtime/internal/atomic") {
5323 cpuprof.lostAtomic++
5324 return
5325 }
5326 }
5327 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
5328
5329
5330
5331 cpuprof.lostAtomic++
5332 return
5333 }
5334 }
5335
5336
5337
5338
5339
5340
5341
5342 getg().m.mallocing++
5343
5344 var u unwinder
5345 var stk [maxCPUProfStack]uintptr
5346 n := 0
5347 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
5348 cgoOff := 0
5349
5350
5351
5352
5353
5354 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
5355 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
5356 cgoOff++
5357 }
5358 n += copy(stk[:], mp.cgoCallers[:cgoOff])
5359 mp.cgoCallers[0] = 0
5360 }
5361
5362
5363 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
5364 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
5365
5366
5367 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
5368 } else if mp != nil && mp.vdsoSP != 0 {
5369
5370
5371 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5372 } else {
5373 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5374 }
5375 n += tracebackPCs(&u, 0, stk[n:])
5376
5377 if n <= 0 {
5378
5379
5380 n = 2
5381 if inVDSOPage(pc) {
5382 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
5383 } else if pc > firstmoduledata.etext {
5384
5385 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
5386 }
5387 stk[0] = pc
5388 if mp.preemptoff != "" {
5389 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
5390 } else {
5391 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
5392 }
5393 }
5394
5395 if prof.hz.Load() != 0 {
5396
5397
5398
5399 var tagPtr *unsafe.Pointer
5400 if gp != nil && gp.m != nil && gp.m.curg != nil {
5401 tagPtr = &gp.m.curg.labels
5402 }
5403 cpuprof.add(tagPtr, stk[:n])
5404
5405 gprof := gp
5406 var mp *m
5407 var pp *p
5408 if gp != nil && gp.m != nil {
5409 if gp.m.curg != nil {
5410 gprof = gp.m.curg
5411 }
5412 mp = gp.m
5413 pp = gp.m.p.ptr()
5414 }
5415 traceCPUSample(gprof, mp, pp, stk[:n])
5416 }
5417 getg().m.mallocing--
5418 }
5419
5420
5421
5422 func setcpuprofilerate(hz int32) {
5423
5424 if hz < 0 {
5425 hz = 0
5426 }
5427
5428
5429
5430 gp := getg()
5431 gp.m.locks++
5432
5433
5434
5435
5436 setThreadCPUProfiler(0)
5437
5438 for !prof.signalLock.CompareAndSwap(0, 1) {
5439 osyield()
5440 }
5441 if prof.hz.Load() != hz {
5442 setProcessCPUProfiler(hz)
5443 prof.hz.Store(hz)
5444 }
5445 prof.signalLock.Store(0)
5446
5447 lock(&sched.lock)
5448 sched.profilehz = hz
5449 unlock(&sched.lock)
5450
5451 if hz != 0 {
5452 setThreadCPUProfiler(hz)
5453 }
5454
5455 gp.m.locks--
5456 }
5457
5458
5459
5460 func (pp *p) init(id int32) {
5461 pp.id = id
5462 pp.status = _Pgcstop
5463 pp.sudogcache = pp.sudogbuf[:0]
5464 pp.deferpool = pp.deferpoolbuf[:0]
5465 pp.wbBuf.reset()
5466 if pp.mcache == nil {
5467 if id == 0 {
5468 if mcache0 == nil {
5469 throw("missing mcache?")
5470 }
5471
5472
5473 pp.mcache = mcache0
5474 } else {
5475 pp.mcache = allocmcache()
5476 }
5477 }
5478 if raceenabled && pp.raceprocctx == 0 {
5479 if id == 0 {
5480 pp.raceprocctx = raceprocctx0
5481 raceprocctx0 = 0
5482 } else {
5483 pp.raceprocctx = raceproccreate()
5484 }
5485 }
5486 lockInit(&pp.timersLock, lockRankTimers)
5487
5488
5489
5490 timerpMask.set(id)
5491
5492
5493 idlepMask.clear(id)
5494 }
5495
5496
5497
5498
5499
5500 func (pp *p) destroy() {
5501 assertLockHeld(&sched.lock)
5502 assertWorldStopped()
5503
5504
5505 for pp.runqhead != pp.runqtail {
5506
5507 pp.runqtail--
5508 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5509
5510 globrunqputhead(gp)
5511 }
5512 if pp.runnext != 0 {
5513 globrunqputhead(pp.runnext.ptr())
5514 pp.runnext = 0
5515 }
5516 if len(pp.timers) > 0 {
5517 plocal := getg().m.p.ptr()
5518
5519
5520
5521
5522 lock(&plocal.timersLock)
5523 lock(&pp.timersLock)
5524 moveTimers(plocal, pp.timers)
5525 pp.timers = nil
5526 pp.numTimers.Store(0)
5527 pp.deletedTimers.Store(0)
5528 pp.timer0When.Store(0)
5529 unlock(&pp.timersLock)
5530 unlock(&plocal.timersLock)
5531 }
5532
5533 if gcphase != _GCoff {
5534 wbBufFlush1(pp)
5535 pp.gcw.dispose()
5536 }
5537 for i := range pp.sudogbuf {
5538 pp.sudogbuf[i] = nil
5539 }
5540 pp.sudogcache = pp.sudogbuf[:0]
5541 pp.pinnerCache = nil
5542 for j := range pp.deferpoolbuf {
5543 pp.deferpoolbuf[j] = nil
5544 }
5545 pp.deferpool = pp.deferpoolbuf[:0]
5546 systemstack(func() {
5547 for i := 0; i < pp.mspancache.len; i++ {
5548
5549 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
5550 }
5551 pp.mspancache.len = 0
5552 lock(&mheap_.lock)
5553 pp.pcache.flush(&mheap_.pages)
5554 unlock(&mheap_.lock)
5555 })
5556 freemcache(pp.mcache)
5557 pp.mcache = nil
5558 gfpurge(pp)
5559 traceProcFree(pp)
5560 if raceenabled {
5561 if pp.timerRaceCtx != 0 {
5562
5563
5564
5565
5566
5567 mp := getg().m
5568 phold := mp.p.ptr()
5569 mp.p.set(pp)
5570
5571 racectxend(pp.timerRaceCtx)
5572 pp.timerRaceCtx = 0
5573
5574 mp.p.set(phold)
5575 }
5576 raceprocdestroy(pp.raceprocctx)
5577 pp.raceprocctx = 0
5578 }
5579 pp.gcAssistTime = 0
5580 pp.status = _Pdead
5581 }
5582
5583
5584
5585
5586
5587
5588
5589
5590
5591 func procresize(nprocs int32) *p {
5592 assertLockHeld(&sched.lock)
5593 assertWorldStopped()
5594
5595 old := gomaxprocs
5596 if old < 0 || nprocs <= 0 {
5597 throw("procresize: invalid arg")
5598 }
5599 trace := traceAcquire()
5600 if trace.ok() {
5601 trace.Gomaxprocs(nprocs)
5602 traceRelease(trace)
5603 }
5604
5605
5606 now := nanotime()
5607 if sched.procresizetime != 0 {
5608 sched.totaltime += int64(old) * (now - sched.procresizetime)
5609 }
5610 sched.procresizetime = now
5611
5612 maskWords := (nprocs + 31) / 32
5613
5614
5615 if nprocs > int32(len(allp)) {
5616
5617
5618 lock(&allpLock)
5619 if nprocs <= int32(cap(allp)) {
5620 allp = allp[:nprocs]
5621 } else {
5622 nallp := make([]*p, nprocs)
5623
5624
5625 copy(nallp, allp[:cap(allp)])
5626 allp = nallp
5627 }
5628
5629 if maskWords <= int32(cap(idlepMask)) {
5630 idlepMask = idlepMask[:maskWords]
5631 timerpMask = timerpMask[:maskWords]
5632 } else {
5633 nidlepMask := make([]uint32, maskWords)
5634
5635 copy(nidlepMask, idlepMask)
5636 idlepMask = nidlepMask
5637
5638 ntimerpMask := make([]uint32, maskWords)
5639 copy(ntimerpMask, timerpMask)
5640 timerpMask = ntimerpMask
5641 }
5642 unlock(&allpLock)
5643 }
5644
5645
5646 for i := old; i < nprocs; i++ {
5647 pp := allp[i]
5648 if pp == nil {
5649 pp = new(p)
5650 }
5651 pp.init(i)
5652 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
5653 }
5654
5655 gp := getg()
5656 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
5657
5658 gp.m.p.ptr().status = _Prunning
5659 gp.m.p.ptr().mcache.prepareForSweep()
5660 } else {
5661
5662
5663
5664
5665
5666 if gp.m.p != 0 {
5667 trace := traceAcquire()
5668 if trace.ok() {
5669
5670
5671
5672 trace.GoSched()
5673 trace.ProcStop(gp.m.p.ptr())
5674 traceRelease(trace)
5675 }
5676 gp.m.p.ptr().m = 0
5677 }
5678 gp.m.p = 0
5679 pp := allp[0]
5680 pp.m = 0
5681 pp.status = _Pidle
5682 acquirep(pp)
5683 trace := traceAcquire()
5684 if trace.ok() {
5685 trace.GoStart()
5686 traceRelease(trace)
5687 }
5688 }
5689
5690
5691 mcache0 = nil
5692
5693
5694 for i := nprocs; i < old; i++ {
5695 pp := allp[i]
5696 pp.destroy()
5697
5698 }
5699
5700
5701 if int32(len(allp)) != nprocs {
5702 lock(&allpLock)
5703 allp = allp[:nprocs]
5704 idlepMask = idlepMask[:maskWords]
5705 timerpMask = timerpMask[:maskWords]
5706 unlock(&allpLock)
5707 }
5708
5709 var runnablePs *p
5710 for i := nprocs - 1; i >= 0; i-- {
5711 pp := allp[i]
5712 if gp.m.p.ptr() == pp {
5713 continue
5714 }
5715 pp.status = _Pidle
5716 if runqempty(pp) {
5717 pidleput(pp, now)
5718 } else {
5719 pp.m.set(mget())
5720 pp.link.set(runnablePs)
5721 runnablePs = pp
5722 }
5723 }
5724 stealOrder.reset(uint32(nprocs))
5725 var int32p *int32 = &gomaxprocs
5726 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
5727 if old != nprocs {
5728
5729 gcCPULimiter.resetCapacity(now, nprocs)
5730 }
5731 return runnablePs
5732 }
5733
5734
5735
5736
5737
5738
5739
5740 func acquirep(pp *p) {
5741
5742 wirep(pp)
5743
5744
5745
5746
5747
5748 pp.mcache.prepareForSweep()
5749
5750 trace := traceAcquire()
5751 if trace.ok() {
5752 trace.ProcStart()
5753 traceRelease(trace)
5754 }
5755 }
5756
5757
5758
5759
5760
5761
5762
5763 func wirep(pp *p) {
5764 gp := getg()
5765
5766 if gp.m.p != 0 {
5767
5768
5769 systemstack(func() {
5770 throw("wirep: already in go")
5771 })
5772 }
5773 if pp.m != 0 || pp.status != _Pidle {
5774
5775
5776 systemstack(func() {
5777 id := int64(0)
5778 if pp.m != 0 {
5779 id = pp.m.ptr().id
5780 }
5781 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
5782 throw("wirep: invalid p state")
5783 })
5784 }
5785 gp.m.p.set(pp)
5786 pp.m.set(gp.m)
5787 pp.status = _Prunning
5788 }
5789
5790
5791 func releasep() *p {
5792 trace := traceAcquire()
5793 if trace.ok() {
5794 trace.ProcStop(getg().m.p.ptr())
5795 traceRelease(trace)
5796 }
5797 return releasepNoTrace()
5798 }
5799
5800
5801 func releasepNoTrace() *p {
5802 gp := getg()
5803
5804 if gp.m.p == 0 {
5805 throw("releasep: invalid arg")
5806 }
5807 pp := gp.m.p.ptr()
5808 if pp.m.ptr() != gp.m || pp.status != _Prunning {
5809 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
5810 throw("releasep: invalid p state")
5811 }
5812 gp.m.p = 0
5813 pp.m = 0
5814 pp.status = _Pidle
5815 return pp
5816 }
5817
5818 func incidlelocked(v int32) {
5819 lock(&sched.lock)
5820 sched.nmidlelocked += v
5821 if v > 0 {
5822 checkdead()
5823 }
5824 unlock(&sched.lock)
5825 }
5826
5827
5828
5829
5830 func checkdead() {
5831 assertLockHeld(&sched.lock)
5832
5833
5834
5835
5836 if islibrary || isarchive {
5837 return
5838 }
5839
5840
5841
5842
5843
5844 if panicking.Load() > 0 {
5845 return
5846 }
5847
5848
5849
5850
5851
5852 var run0 int32
5853 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
5854 run0 = 1
5855 }
5856
5857 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
5858 if run > run0 {
5859 return
5860 }
5861 if run < 0 {
5862 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
5863 unlock(&sched.lock)
5864 throw("checkdead: inconsistent counts")
5865 }
5866
5867 grunning := 0
5868 forEachG(func(gp *g) {
5869 if isSystemGoroutine(gp, false) {
5870 return
5871 }
5872 s := readgstatus(gp)
5873 switch s &^ _Gscan {
5874 case _Gwaiting,
5875 _Gpreempted:
5876 grunning++
5877 case _Grunnable,
5878 _Grunning,
5879 _Gsyscall:
5880 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
5881 unlock(&sched.lock)
5882 throw("checkdead: runnable g")
5883 }
5884 })
5885 if grunning == 0 {
5886 unlock(&sched.lock)
5887 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
5888 }
5889
5890
5891 if faketime != 0 {
5892 if when := timeSleepUntil(); when < maxWhen {
5893 faketime = when
5894
5895
5896 pp, _ := pidleget(faketime)
5897 if pp == nil {
5898
5899
5900 unlock(&sched.lock)
5901 throw("checkdead: no p for timer")
5902 }
5903 mp := mget()
5904 if mp == nil {
5905
5906
5907 unlock(&sched.lock)
5908 throw("checkdead: no m for timer")
5909 }
5910
5911
5912
5913 sched.nmspinning.Add(1)
5914 mp.spinning = true
5915 mp.nextp.set(pp)
5916 notewakeup(&mp.park)
5917 return
5918 }
5919 }
5920
5921
5922 for _, pp := range allp {
5923 if len(pp.timers) > 0 {
5924 return
5925 }
5926 }
5927
5928 unlock(&sched.lock)
5929 fatal("all goroutines are asleep - deadlock!")
5930 }
5931
5932
5933
5934
5935
5936
5937 var forcegcperiod int64 = 2 * 60 * 1e9
5938
5939
5940
5941 var needSysmonWorkaround bool = false
5942
5943
5944
5945
5946 func sysmon() {
5947 lock(&sched.lock)
5948 sched.nmsys++
5949 checkdead()
5950 unlock(&sched.lock)
5951
5952 lasttrace := int64(0)
5953 idle := 0
5954 delay := uint32(0)
5955
5956 for {
5957 if idle == 0 {
5958 delay = 20
5959 } else if idle > 50 {
5960 delay *= 2
5961 }
5962 if delay > 10*1000 {
5963 delay = 10 * 1000
5964 }
5965 usleep(delay)
5966
5967
5968
5969
5970
5971
5972
5973
5974
5975
5976
5977
5978
5979
5980
5981
5982 now := nanotime()
5983 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
5984 lock(&sched.lock)
5985 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
5986 syscallWake := false
5987 next := timeSleepUntil()
5988 if next > now {
5989 sched.sysmonwait.Store(true)
5990 unlock(&sched.lock)
5991
5992
5993 sleep := forcegcperiod / 2
5994 if next-now < sleep {
5995 sleep = next - now
5996 }
5997 shouldRelax := sleep >= osRelaxMinNS
5998 if shouldRelax {
5999 osRelax(true)
6000 }
6001 syscallWake = notetsleep(&sched.sysmonnote, sleep)
6002 if shouldRelax {
6003 osRelax(false)
6004 }
6005 lock(&sched.lock)
6006 sched.sysmonwait.Store(false)
6007 noteclear(&sched.sysmonnote)
6008 }
6009 if syscallWake {
6010 idle = 0
6011 delay = 20
6012 }
6013 }
6014 unlock(&sched.lock)
6015 }
6016
6017 lock(&sched.sysmonlock)
6018
6019
6020 now = nanotime()
6021
6022
6023 if *cgo_yield != nil {
6024 asmcgocall(*cgo_yield, nil)
6025 }
6026
6027 lastpoll := sched.lastpoll.Load()
6028 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
6029 sched.lastpoll.CompareAndSwap(lastpoll, now)
6030 list, delta := netpoll(0)
6031 if !list.empty() {
6032
6033
6034
6035
6036
6037
6038
6039 incidlelocked(-1)
6040 injectglist(&list)
6041 incidlelocked(1)
6042 netpollAdjustWaiters(delta)
6043 }
6044 }
6045 if GOOS == "netbsd" && needSysmonWorkaround {
6046
6047
6048
6049
6050
6051
6052
6053
6054
6055
6056
6057
6058
6059
6060
6061 if next := timeSleepUntil(); next < now {
6062 startm(nil, false, false)
6063 }
6064 }
6065 if scavenger.sysmonWake.Load() != 0 {
6066
6067 scavenger.wake()
6068 }
6069
6070
6071 if retake(now) != 0 {
6072 idle = 0
6073 } else {
6074 idle++
6075 }
6076
6077 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
6078 lock(&forcegc.lock)
6079 forcegc.idle.Store(false)
6080 var list gList
6081 list.push(forcegc.g)
6082 injectglist(&list)
6083 unlock(&forcegc.lock)
6084 }
6085 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
6086 lasttrace = now
6087 schedtrace(debug.scheddetail > 0)
6088 }
6089 unlock(&sched.sysmonlock)
6090 }
6091 }
6092
6093 type sysmontick struct {
6094 schedtick uint32
6095 schedwhen int64
6096 syscalltick uint32
6097 syscallwhen int64
6098 }
6099
6100
6101
6102 const forcePreemptNS = 10 * 1000 * 1000
6103
6104 func retake(now int64) uint32 {
6105 n := 0
6106
6107
6108 lock(&allpLock)
6109
6110
6111
6112 for i := 0; i < len(allp); i++ {
6113 pp := allp[i]
6114 if pp == nil {
6115
6116
6117 continue
6118 }
6119 pd := &pp.sysmontick
6120 s := pp.status
6121 sysretake := false
6122 if s == _Prunning || s == _Psyscall {
6123
6124 t := int64(pp.schedtick)
6125 if int64(pd.schedtick) != t {
6126 pd.schedtick = uint32(t)
6127 pd.schedwhen = now
6128 } else if pd.schedwhen+forcePreemptNS <= now {
6129 preemptone(pp)
6130
6131
6132 sysretake = true
6133 }
6134 }
6135 if s == _Psyscall {
6136
6137 t := int64(pp.syscalltick)
6138 if !sysretake && int64(pd.syscalltick) != t {
6139 pd.syscalltick = uint32(t)
6140 pd.syscallwhen = now
6141 continue
6142 }
6143
6144
6145
6146 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
6147 continue
6148 }
6149
6150 unlock(&allpLock)
6151
6152
6153
6154
6155 incidlelocked(-1)
6156 trace := traceAcquire()
6157 if atomic.Cas(&pp.status, s, _Pidle) {
6158 if trace.ok() {
6159 trace.GoSysBlock(pp)
6160 trace.ProcSteal(pp, false)
6161 traceRelease(trace)
6162 }
6163 n++
6164 pp.syscalltick++
6165 handoffp(pp)
6166 } else if trace.ok() {
6167 traceRelease(trace)
6168 }
6169 incidlelocked(1)
6170 lock(&allpLock)
6171 }
6172 }
6173 unlock(&allpLock)
6174 return uint32(n)
6175 }
6176
6177
6178
6179
6180
6181
6182 func preemptall() bool {
6183 res := false
6184 for _, pp := range allp {
6185 if pp.status != _Prunning {
6186 continue
6187 }
6188 if preemptone(pp) {
6189 res = true
6190 }
6191 }
6192 return res
6193 }
6194
6195
6196
6197
6198
6199
6200
6201
6202
6203
6204
6205 func preemptone(pp *p) bool {
6206 mp := pp.m.ptr()
6207 if mp == nil || mp == getg().m {
6208 return false
6209 }
6210 gp := mp.curg
6211 if gp == nil || gp == mp.g0 {
6212 return false
6213 }
6214
6215 gp.preempt = true
6216
6217
6218
6219
6220
6221 gp.stackguard0 = stackPreempt
6222
6223
6224 if preemptMSupported && debug.asyncpreemptoff == 0 {
6225 pp.preempt = true
6226 preemptM(mp)
6227 }
6228
6229 return true
6230 }
6231
6232 var starttime int64
6233
6234 func schedtrace(detailed bool) {
6235 now := nanotime()
6236 if starttime == 0 {
6237 starttime = now
6238 }
6239
6240 lock(&sched.lock)
6241 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
6242 if detailed {
6243 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
6244 }
6245
6246
6247
6248 for i, pp := range allp {
6249 mp := pp.m.ptr()
6250 h := atomic.Load(&pp.runqhead)
6251 t := atomic.Load(&pp.runqtail)
6252 if detailed {
6253 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
6254 if mp != nil {
6255 print(mp.id)
6256 } else {
6257 print("nil")
6258 }
6259 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.n, " timerslen=", len(pp.timers), "\n")
6260 } else {
6261
6262
6263 print(" ")
6264 if i == 0 {
6265 print("[")
6266 }
6267 print(t - h)
6268 if i == len(allp)-1 {
6269 print("]\n")
6270 }
6271 }
6272 }
6273
6274 if !detailed {
6275 unlock(&sched.lock)
6276 return
6277 }
6278
6279 for mp := allm; mp != nil; mp = mp.alllink {
6280 pp := mp.p.ptr()
6281 print(" M", mp.id, ": p=")
6282 if pp != nil {
6283 print(pp.id)
6284 } else {
6285 print("nil")
6286 }
6287 print(" curg=")
6288 if mp.curg != nil {
6289 print(mp.curg.goid)
6290 } else {
6291 print("nil")
6292 }
6293 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
6294 if lockedg := mp.lockedg.ptr(); lockedg != nil {
6295 print(lockedg.goid)
6296 } else {
6297 print("nil")
6298 }
6299 print("\n")
6300 }
6301
6302 forEachG(func(gp *g) {
6303 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
6304 if gp.m != nil {
6305 print(gp.m.id)
6306 } else {
6307 print("nil")
6308 }
6309 print(" lockedm=")
6310 if lockedm := gp.lockedm.ptr(); lockedm != nil {
6311 print(lockedm.id)
6312 } else {
6313 print("nil")
6314 }
6315 print("\n")
6316 })
6317 unlock(&sched.lock)
6318 }
6319
6320
6321
6322
6323
6324
6325 func schedEnableUser(enable bool) {
6326 lock(&sched.lock)
6327 if sched.disable.user == !enable {
6328 unlock(&sched.lock)
6329 return
6330 }
6331 sched.disable.user = !enable
6332 if enable {
6333 n := sched.disable.n
6334 sched.disable.n = 0
6335 globrunqputbatch(&sched.disable.runnable, n)
6336 unlock(&sched.lock)
6337 for ; n != 0 && sched.npidle.Load() != 0; n-- {
6338 startm(nil, false, false)
6339 }
6340 } else {
6341 unlock(&sched.lock)
6342 }
6343 }
6344
6345
6346
6347
6348
6349 func schedEnabled(gp *g) bool {
6350 assertLockHeld(&sched.lock)
6351
6352 if sched.disable.user {
6353 return isSystemGoroutine(gp, true)
6354 }
6355 return true
6356 }
6357
6358
6359
6360
6361
6362
6363 func mput(mp *m) {
6364 assertLockHeld(&sched.lock)
6365
6366 mp.schedlink = sched.midle
6367 sched.midle.set(mp)
6368 sched.nmidle++
6369 checkdead()
6370 }
6371
6372
6373
6374
6375
6376
6377 func mget() *m {
6378 assertLockHeld(&sched.lock)
6379
6380 mp := sched.midle.ptr()
6381 if mp != nil {
6382 sched.midle = mp.schedlink
6383 sched.nmidle--
6384 }
6385 return mp
6386 }
6387
6388
6389
6390
6391
6392
6393 func globrunqput(gp *g) {
6394 assertLockHeld(&sched.lock)
6395
6396 sched.runq.pushBack(gp)
6397 sched.runqsize++
6398 }
6399
6400
6401
6402
6403
6404
6405 func globrunqputhead(gp *g) {
6406 assertLockHeld(&sched.lock)
6407
6408 sched.runq.push(gp)
6409 sched.runqsize++
6410 }
6411
6412
6413
6414
6415
6416
6417
6418 func globrunqputbatch(batch *gQueue, n int32) {
6419 assertLockHeld(&sched.lock)
6420
6421 sched.runq.pushBackAll(*batch)
6422 sched.runqsize += n
6423 *batch = gQueue{}
6424 }
6425
6426
6427
6428 func globrunqget(pp *p, max int32) *g {
6429 assertLockHeld(&sched.lock)
6430
6431 if sched.runqsize == 0 {
6432 return nil
6433 }
6434
6435 n := sched.runqsize/gomaxprocs + 1
6436 if n > sched.runqsize {
6437 n = sched.runqsize
6438 }
6439 if max > 0 && n > max {
6440 n = max
6441 }
6442 if n > int32(len(pp.runq))/2 {
6443 n = int32(len(pp.runq)) / 2
6444 }
6445
6446 sched.runqsize -= n
6447
6448 gp := sched.runq.pop()
6449 n--
6450 for ; n > 0; n-- {
6451 gp1 := sched.runq.pop()
6452 runqput(pp, gp1, false)
6453 }
6454 return gp
6455 }
6456
6457
6458 type pMask []uint32
6459
6460
6461 func (p pMask) read(id uint32) bool {
6462 word := id / 32
6463 mask := uint32(1) << (id % 32)
6464 return (atomic.Load(&p[word]) & mask) != 0
6465 }
6466
6467
6468 func (p pMask) set(id int32) {
6469 word := id / 32
6470 mask := uint32(1) << (id % 32)
6471 atomic.Or(&p[word], mask)
6472 }
6473
6474
6475 func (p pMask) clear(id int32) {
6476 word := id / 32
6477 mask := uint32(1) << (id % 32)
6478 atomic.And(&p[word], ^mask)
6479 }
6480
6481
6482
6483
6484
6485
6486
6487
6488
6489
6490
6491
6492
6493
6494
6495
6496
6497
6498
6499
6500
6501
6502
6503
6504
6505
6506 func updateTimerPMask(pp *p) {
6507 if pp.numTimers.Load() > 0 {
6508 return
6509 }
6510
6511
6512
6513
6514 lock(&pp.timersLock)
6515 if pp.numTimers.Load() == 0 {
6516 timerpMask.clear(pp.id)
6517 }
6518 unlock(&pp.timersLock)
6519 }
6520
6521
6522
6523
6524
6525
6526
6527
6528
6529
6530
6531
6532 func pidleput(pp *p, now int64) int64 {
6533 assertLockHeld(&sched.lock)
6534
6535 if !runqempty(pp) {
6536 throw("pidleput: P has non-empty run queue")
6537 }
6538 if now == 0 {
6539 now = nanotime()
6540 }
6541 updateTimerPMask(pp)
6542 idlepMask.set(pp.id)
6543 pp.link = sched.pidle
6544 sched.pidle.set(pp)
6545 sched.npidle.Add(1)
6546 if !pp.limiterEvent.start(limiterEventIdle, now) {
6547 throw("must be able to track idle limiter event")
6548 }
6549 return now
6550 }
6551
6552
6553
6554
6555
6556
6557
6558
6559 func pidleget(now int64) (*p, int64) {
6560 assertLockHeld(&sched.lock)
6561
6562 pp := sched.pidle.ptr()
6563 if pp != nil {
6564
6565 if now == 0 {
6566 now = nanotime()
6567 }
6568 timerpMask.set(pp.id)
6569 idlepMask.clear(pp.id)
6570 sched.pidle = pp.link
6571 sched.npidle.Add(-1)
6572 pp.limiterEvent.stop(limiterEventIdle, now)
6573 }
6574 return pp, now
6575 }
6576
6577
6578
6579
6580
6581
6582
6583
6584
6585
6586
6587 func pidlegetSpinning(now int64) (*p, int64) {
6588 assertLockHeld(&sched.lock)
6589
6590 pp, now := pidleget(now)
6591 if pp == nil {
6592
6593
6594
6595 sched.needspinning.Store(1)
6596 return nil, now
6597 }
6598
6599 return pp, now
6600 }
6601
6602
6603
6604 func runqempty(pp *p) bool {
6605
6606
6607
6608
6609 for {
6610 head := atomic.Load(&pp.runqhead)
6611 tail := atomic.Load(&pp.runqtail)
6612 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
6613 if tail == atomic.Load(&pp.runqtail) {
6614 return head == tail && runnext == 0
6615 }
6616 }
6617 }
6618
6619
6620
6621
6622
6623
6624
6625
6626
6627
6628 const randomizeScheduler = raceenabled
6629
6630
6631
6632
6633
6634
6635 func runqput(pp *p, gp *g, next bool) {
6636 if randomizeScheduler && next && randn(2) == 0 {
6637 next = false
6638 }
6639
6640 if next {
6641 retryNext:
6642 oldnext := pp.runnext
6643 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
6644 goto retryNext
6645 }
6646 if oldnext == 0 {
6647 return
6648 }
6649
6650 gp = oldnext.ptr()
6651 }
6652
6653 retry:
6654 h := atomic.LoadAcq(&pp.runqhead)
6655 t := pp.runqtail
6656 if t-h < uint32(len(pp.runq)) {
6657 pp.runq[t%uint32(len(pp.runq))].set(gp)
6658 atomic.StoreRel(&pp.runqtail, t+1)
6659 return
6660 }
6661 if runqputslow(pp, gp, h, t) {
6662 return
6663 }
6664
6665 goto retry
6666 }
6667
6668
6669
6670 func runqputslow(pp *p, gp *g, h, t uint32) bool {
6671 var batch [len(pp.runq)/2 + 1]*g
6672
6673
6674 n := t - h
6675 n = n / 2
6676 if n != uint32(len(pp.runq)/2) {
6677 throw("runqputslow: queue is not full")
6678 }
6679 for i := uint32(0); i < n; i++ {
6680 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
6681 }
6682 if !atomic.CasRel(&pp.runqhead, h, h+n) {
6683 return false
6684 }
6685 batch[n] = gp
6686
6687 if randomizeScheduler {
6688 for i := uint32(1); i <= n; i++ {
6689 j := cheaprandn(i + 1)
6690 batch[i], batch[j] = batch[j], batch[i]
6691 }
6692 }
6693
6694
6695 for i := uint32(0); i < n; i++ {
6696 batch[i].schedlink.set(batch[i+1])
6697 }
6698 var q gQueue
6699 q.head.set(batch[0])
6700 q.tail.set(batch[n])
6701
6702
6703 lock(&sched.lock)
6704 globrunqputbatch(&q, int32(n+1))
6705 unlock(&sched.lock)
6706 return true
6707 }
6708
6709
6710
6711
6712
6713 func runqputbatch(pp *p, q *gQueue, qsize int) {
6714 h := atomic.LoadAcq(&pp.runqhead)
6715 t := pp.runqtail
6716 n := uint32(0)
6717 for !q.empty() && t-h < uint32(len(pp.runq)) {
6718 gp := q.pop()
6719 pp.runq[t%uint32(len(pp.runq))].set(gp)
6720 t++
6721 n++
6722 }
6723 qsize -= int(n)
6724
6725 if randomizeScheduler {
6726 off := func(o uint32) uint32 {
6727 return (pp.runqtail + o) % uint32(len(pp.runq))
6728 }
6729 for i := uint32(1); i < n; i++ {
6730 j := cheaprandn(i + 1)
6731 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
6732 }
6733 }
6734
6735 atomic.StoreRel(&pp.runqtail, t)
6736 if !q.empty() {
6737 lock(&sched.lock)
6738 globrunqputbatch(q, int32(qsize))
6739 unlock(&sched.lock)
6740 }
6741 }
6742
6743
6744
6745
6746
6747 func runqget(pp *p) (gp *g, inheritTime bool) {
6748
6749 next := pp.runnext
6750
6751
6752
6753 if next != 0 && pp.runnext.cas(next, 0) {
6754 return next.ptr(), true
6755 }
6756
6757 for {
6758 h := atomic.LoadAcq(&pp.runqhead)
6759 t := pp.runqtail
6760 if t == h {
6761 return nil, false
6762 }
6763 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
6764 if atomic.CasRel(&pp.runqhead, h, h+1) {
6765 return gp, false
6766 }
6767 }
6768 }
6769
6770
6771
6772 func runqdrain(pp *p) (drainQ gQueue, n uint32) {
6773 oldNext := pp.runnext
6774 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
6775 drainQ.pushBack(oldNext.ptr())
6776 n++
6777 }
6778
6779 retry:
6780 h := atomic.LoadAcq(&pp.runqhead)
6781 t := pp.runqtail
6782 qn := t - h
6783 if qn == 0 {
6784 return
6785 }
6786 if qn > uint32(len(pp.runq)) {
6787 goto retry
6788 }
6789
6790 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
6791 goto retry
6792 }
6793
6794
6795
6796
6797
6798
6799
6800
6801 for i := uint32(0); i < qn; i++ {
6802 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
6803 drainQ.pushBack(gp)
6804 n++
6805 }
6806 return
6807 }
6808
6809
6810
6811
6812
6813 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
6814 for {
6815 h := atomic.LoadAcq(&pp.runqhead)
6816 t := atomic.LoadAcq(&pp.runqtail)
6817 n := t - h
6818 n = n - n/2
6819 if n == 0 {
6820 if stealRunNextG {
6821
6822 if next := pp.runnext; next != 0 {
6823 if pp.status == _Prunning {
6824
6825
6826
6827
6828
6829
6830
6831
6832
6833
6834 if !osHasLowResTimer {
6835 usleep(3)
6836 } else {
6837
6838
6839
6840 osyield()
6841 }
6842 }
6843 if !pp.runnext.cas(next, 0) {
6844 continue
6845 }
6846 batch[batchHead%uint32(len(batch))] = next
6847 return 1
6848 }
6849 }
6850 return 0
6851 }
6852 if n > uint32(len(pp.runq)/2) {
6853 continue
6854 }
6855 for i := uint32(0); i < n; i++ {
6856 g := pp.runq[(h+i)%uint32(len(pp.runq))]
6857 batch[(batchHead+i)%uint32(len(batch))] = g
6858 }
6859 if atomic.CasRel(&pp.runqhead, h, h+n) {
6860 return n
6861 }
6862 }
6863 }
6864
6865
6866
6867
6868 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
6869 t := pp.runqtail
6870 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
6871 if n == 0 {
6872 return nil
6873 }
6874 n--
6875 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
6876 if n == 0 {
6877 return gp
6878 }
6879 h := atomic.LoadAcq(&pp.runqhead)
6880 if t-h+n >= uint32(len(pp.runq)) {
6881 throw("runqsteal: runq overflow")
6882 }
6883 atomic.StoreRel(&pp.runqtail, t+n)
6884 return gp
6885 }
6886
6887
6888
6889 type gQueue struct {
6890 head guintptr
6891 tail guintptr
6892 }
6893
6894
6895 func (q *gQueue) empty() bool {
6896 return q.head == 0
6897 }
6898
6899
6900 func (q *gQueue) push(gp *g) {
6901 gp.schedlink = q.head
6902 q.head.set(gp)
6903 if q.tail == 0 {
6904 q.tail.set(gp)
6905 }
6906 }
6907
6908
6909 func (q *gQueue) pushBack(gp *g) {
6910 gp.schedlink = 0
6911 if q.tail != 0 {
6912 q.tail.ptr().schedlink.set(gp)
6913 } else {
6914 q.head.set(gp)
6915 }
6916 q.tail.set(gp)
6917 }
6918
6919
6920
6921 func (q *gQueue) pushBackAll(q2 gQueue) {
6922 if q2.tail == 0 {
6923 return
6924 }
6925 q2.tail.ptr().schedlink = 0
6926 if q.tail != 0 {
6927 q.tail.ptr().schedlink = q2.head
6928 } else {
6929 q.head = q2.head
6930 }
6931 q.tail = q2.tail
6932 }
6933
6934
6935
6936 func (q *gQueue) pop() *g {
6937 gp := q.head.ptr()
6938 if gp != nil {
6939 q.head = gp.schedlink
6940 if q.head == 0 {
6941 q.tail = 0
6942 }
6943 }
6944 return gp
6945 }
6946
6947
6948 func (q *gQueue) popList() gList {
6949 stack := gList{q.head}
6950 *q = gQueue{}
6951 return stack
6952 }
6953
6954
6955
6956 type gList struct {
6957 head guintptr
6958 }
6959
6960
6961 func (l *gList) empty() bool {
6962 return l.head == 0
6963 }
6964
6965
6966 func (l *gList) push(gp *g) {
6967 gp.schedlink = l.head
6968 l.head.set(gp)
6969 }
6970
6971
6972 func (l *gList) pushAll(q gQueue) {
6973 if !q.empty() {
6974 q.tail.ptr().schedlink = l.head
6975 l.head = q.head
6976 }
6977 }
6978
6979
6980 func (l *gList) pop() *g {
6981 gp := l.head.ptr()
6982 if gp != nil {
6983 l.head = gp.schedlink
6984 }
6985 return gp
6986 }
6987
6988
6989 func setMaxThreads(in int) (out int) {
6990 lock(&sched.lock)
6991 out = int(sched.maxmcount)
6992 if in > 0x7fffffff {
6993 sched.maxmcount = 0x7fffffff
6994 } else {
6995 sched.maxmcount = int32(in)
6996 }
6997 checkmcount()
6998 unlock(&sched.lock)
6999 return
7000 }
7001
7002
7003 func procPin() int {
7004 gp := getg()
7005 mp := gp.m
7006
7007 mp.locks++
7008 return int(mp.p.ptr().id)
7009 }
7010
7011
7012 func procUnpin() {
7013 gp := getg()
7014 gp.m.locks--
7015 }
7016
7017
7018
7019 func sync_runtime_procPin() int {
7020 return procPin()
7021 }
7022
7023
7024
7025 func sync_runtime_procUnpin() {
7026 procUnpin()
7027 }
7028
7029
7030
7031 func sync_atomic_runtime_procPin() int {
7032 return procPin()
7033 }
7034
7035
7036
7037 func sync_atomic_runtime_procUnpin() {
7038 procUnpin()
7039 }
7040
7041
7042
7043
7044
7045 func sync_runtime_canSpin(i int) bool {
7046
7047
7048
7049
7050
7051 if i >= active_spin || ncpu <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
7052 return false
7053 }
7054 if p := getg().m.p.ptr(); !runqempty(p) {
7055 return false
7056 }
7057 return true
7058 }
7059
7060
7061
7062 func sync_runtime_doSpin() {
7063 procyield(active_spin_cnt)
7064 }
7065
7066 var stealOrder randomOrder
7067
7068
7069
7070
7071
7072 type randomOrder struct {
7073 count uint32
7074 coprimes []uint32
7075 }
7076
7077 type randomEnum struct {
7078 i uint32
7079 count uint32
7080 pos uint32
7081 inc uint32
7082 }
7083
7084 func (ord *randomOrder) reset(count uint32) {
7085 ord.count = count
7086 ord.coprimes = ord.coprimes[:0]
7087 for i := uint32(1); i <= count; i++ {
7088 if gcd(i, count) == 1 {
7089 ord.coprimes = append(ord.coprimes, i)
7090 }
7091 }
7092 }
7093
7094 func (ord *randomOrder) start(i uint32) randomEnum {
7095 return randomEnum{
7096 count: ord.count,
7097 pos: i % ord.count,
7098 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
7099 }
7100 }
7101
7102 func (enum *randomEnum) done() bool {
7103 return enum.i == enum.count
7104 }
7105
7106 func (enum *randomEnum) next() {
7107 enum.i++
7108 enum.pos = (enum.pos + enum.inc) % enum.count
7109 }
7110
7111 func (enum *randomEnum) position() uint32 {
7112 return enum.pos
7113 }
7114
7115 func gcd(a, b uint32) uint32 {
7116 for b != 0 {
7117 a, b = b, a%b
7118 }
7119 return a
7120 }
7121
7122
7123
7124 type initTask struct {
7125 state uint32
7126 nfns uint32
7127
7128 }
7129
7130
7131
7132 var inittrace tracestat
7133
7134 type tracestat struct {
7135 active bool
7136 id uint64
7137 allocs uint64
7138 bytes uint64
7139 }
7140
7141 func doInit(ts []*initTask) {
7142 for _, t := range ts {
7143 doInit1(t)
7144 }
7145 }
7146
7147 func doInit1(t *initTask) {
7148 switch t.state {
7149 case 2:
7150 return
7151 case 1:
7152 throw("recursive call during initialization - linker skew")
7153 default:
7154 t.state = 1
7155
7156 var (
7157 start int64
7158 before tracestat
7159 )
7160
7161 if inittrace.active {
7162 start = nanotime()
7163
7164 before = inittrace
7165 }
7166
7167 if t.nfns == 0 {
7168
7169 throw("inittask with no functions")
7170 }
7171
7172 firstFunc := add(unsafe.Pointer(t), 8)
7173 for i := uint32(0); i < t.nfns; i++ {
7174 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
7175 f := *(*func())(unsafe.Pointer(&p))
7176 f()
7177 }
7178
7179 if inittrace.active {
7180 end := nanotime()
7181
7182 after := inittrace
7183
7184 f := *(*func())(unsafe.Pointer(&firstFunc))
7185 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
7186
7187 var sbuf [24]byte
7188 print("init ", pkg, " @")
7189 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
7190 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
7191 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
7192 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
7193 print("\n")
7194 }
7195
7196 t.state = 2
7197 }
7198 }
7199
View as plain text