Source file
src/runtime/mklockrank.go
Documentation: runtime
1
2
3
4
5
6
7
8
9 package main
10
11 import (
12 "bytes"
13 "flag"
14 "fmt"
15 "go/format"
16 "internal/dag"
17 "io"
18 "log"
19 "os"
20 "strings"
21 )
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40 const ranks = `
41 # Sysmon
42 NONE
43 < sysmon
44 < scavenge, forcegc;
45
46 # Defer
47 NONE < defer;
48
49 # GC
50 NONE <
51 sweepWaiters,
52 assistQueue,
53 sweep;
54
55 # Test only
56 NONE < testR, testW;
57
58 # Scheduler, timers, netpoll
59 NONE <
60 allocmW,
61 execW,
62 cpuprof,
63 pollDesc,
64 wakeableSleep;
65 assistQueue,
66 cpuprof,
67 forcegc,
68 pollDesc, # pollDesc can interact with timers, which can lock sched.
69 scavenge,
70 sweep,
71 sweepWaiters,
72 testR,
73 wakeableSleep
74 # Above SCHED are things that can call into the scheduler.
75 < SCHED
76 # Below SCHED is the scheduler implementation.
77 < allocmR,
78 execR
79 < sched;
80 sched < allg, allp;
81 allp, wakeableSleep < timers;
82 timers < netpollInit;
83
84 # Channels
85 scavenge, sweep, testR, wakeableSleep < hchan;
86 NONE < notifyList;
87 hchan, notifyList < sudog;
88
89 # Semaphores
90 NONE < root;
91
92 # Itabs
93 NONE
94 < itab
95 < reflectOffs;
96
97 # User arena state
98 NONE < userArenaState;
99
100 # Tracing without a P uses a global trace buffer.
101 scavenge
102 # Above TRACEGLOBAL can emit a trace event without a P.
103 < TRACEGLOBAL
104 # Below TRACEGLOBAL manages the global tracing buffer.
105 # Note that traceBuf eventually chains to MALLOC, but we never get that far
106 # in the situation where there's no P.
107 < traceBuf;
108 # Starting/stopping tracing traces strings.
109 traceBuf < traceStrings;
110
111 # Malloc
112 allg,
113 allocmR,
114 execR, # May grow stack
115 execW, # May allocate after BeforeFork
116 hchan,
117 notifyList,
118 reflectOffs,
119 timers,
120 traceStrings,
121 userArenaState
122 # Above MALLOC are things that can allocate memory.
123 < MALLOC
124 # Below MALLOC is the malloc implementation.
125 < fin,
126 spanSetSpine,
127 mspanSpecial,
128 MPROF;
129
130 # We can acquire gcBitsArenas for pinner bits, and
131 # it's guarded by mspanSpecial.
132 MALLOC, mspanSpecial < gcBitsArenas;
133
134 # Memory profiling
135 MPROF < profInsert, profBlock, profMemActive;
136 profMemActive < profMemFuture;
137
138 # Stack allocation and copying
139 gcBitsArenas,
140 netpollInit,
141 profBlock,
142 profInsert,
143 profMemFuture,
144 spanSetSpine,
145 fin,
146 root
147 # Anything that can grow the stack can acquire STACKGROW.
148 # (Most higher layers imply STACKGROW, like MALLOC.)
149 < STACKGROW
150 # Below STACKGROW is the stack allocator/copying implementation.
151 < gscan;
152 gscan < stackpool;
153 gscan < stackLarge;
154 # Generally, hchan must be acquired before gscan. But in one case,
155 # where we suspend a G and then shrink its stack, syncadjustsudogs
156 # can acquire hchan locks while holding gscan. To allow this case,
157 # we use hchanLeaf instead of hchan.
158 gscan < hchanLeaf;
159
160 # Write barrier
161 defer,
162 gscan,
163 mspanSpecial,
164 sudog
165 # Anything that can have write barriers can acquire WB.
166 # Above WB, we can have write barriers.
167 < WB
168 # Below WB is the write barrier implementation.
169 < wbufSpans;
170
171 # Span allocator
172 stackLarge,
173 stackpool,
174 wbufSpans
175 # Above mheap is anything that can call the span allocator.
176 < mheap;
177 # Below mheap is the span allocator implementation.
178 #
179 # Specials: we're allowed to allocate a special while holding
180 # an mspanSpecial lock, and they're part of the malloc implementation.
181 # Pinner bits might be freed by the span allocator.
182 mheap, mspanSpecial < mheapSpecial;
183 mheap, mheapSpecial < globalAlloc;
184
185 # Execution tracer events (with a P)
186 hchan,
187 mheap,
188 root,
189 sched,
190 traceStrings,
191 notifyList,
192 fin
193 # Above TRACE is anything that can create a trace event
194 < TRACE
195 < trace
196 < traceStackTab;
197
198 # panic is handled specially. It is implicitly below all other locks.
199 NONE < panic;
200 # deadlock is not acquired while holding panic, but it also needs to be
201 # below all other locks.
202 panic < deadlock;
203 # raceFini is only held while exiting.
204 panic < raceFini;
205
206 # RWMutex internal read lock
207
208 allocmR,
209 allocmW
210 < allocmRInternal;
211
212 execR,
213 execW
214 < execRInternal;
215
216 testR,
217 testW
218 < testRInternal;
219 `
220
221
222
223
224 var cyclicRanks = map[string]bool{
225
226 "timers": true,
227
228
229 "hchan": true,
230
231
232 "hchanLeaf": true,
233
234 "deadlock": true,
235 }
236
237 func main() {
238 flagO := flag.String("o", "", "write to `file` instead of stdout")
239 flagDot := flag.Bool("dot", false, "emit graphviz output instead of Go")
240 flag.Parse()
241 if flag.NArg() != 0 {
242 fmt.Fprintf(os.Stderr, "too many arguments")
243 os.Exit(2)
244 }
245
246 g, err := dag.Parse(ranks)
247 if err != nil {
248 log.Fatal(err)
249 }
250
251 var out []byte
252 if *flagDot {
253 var b bytes.Buffer
254 g.TransitiveReduction()
255
256 for k := range cyclicRanks {
257 g.AddEdge(k, k)
258 }
259
260
261
262
263
264 g.Transpose()
265 generateDot(&b, g)
266 out = b.Bytes()
267 } else {
268 var b bytes.Buffer
269 generateGo(&b, g)
270 out, err = format.Source(b.Bytes())
271 if err != nil {
272 log.Fatal(err)
273 }
274 }
275
276 if *flagO != "" {
277 err = os.WriteFile(*flagO, out, 0666)
278 } else {
279 _, err = os.Stdout.Write(out)
280 }
281 if err != nil {
282 log.Fatal(err)
283 }
284 }
285
286 func generateGo(w io.Writer, g *dag.Graph) {
287 fmt.Fprintf(w, `// Code generated by mklockrank.go; DO NOT EDIT.
288
289 package runtime
290
291 type lockRank int
292
293 `)
294
295
296 topo := g.Topo()
297 for i, j := 0, len(topo)-1; i < j; i, j = i+1, j-1 {
298 topo[i], topo[j] = topo[j], topo[i]
299 }
300 fmt.Fprintf(w, `
301 // Constants representing the ranks of all non-leaf runtime locks, in rank order.
302 // Locks with lower rank must be taken before locks with higher rank,
303 // in addition to satisfying the partial order in lockPartialOrder.
304 // A few ranks allow self-cycles, which are specified in lockPartialOrder.
305 const (
306 lockRankUnknown lockRank = iota
307
308 `)
309 for _, rank := range topo {
310 if isPseudo(rank) {
311 fmt.Fprintf(w, "\t// %s\n", rank)
312 } else {
313 fmt.Fprintf(w, "\t%s\n", cname(rank))
314 }
315 }
316 fmt.Fprintf(w, `)
317
318 // lockRankLeafRank is the rank of lock that does not have a declared rank,
319 // and hence is a leaf lock.
320 const lockRankLeafRank lockRank = 1000
321 `)
322
323
324 fmt.Fprintf(w, `
325 // lockNames gives the names associated with each of the above ranks.
326 var lockNames = []string{
327 `)
328 for _, rank := range topo {
329 if !isPseudo(rank) {
330 fmt.Fprintf(w, "\t%s: %q,\n", cname(rank), rank)
331 }
332 }
333 fmt.Fprintf(w, `}
334
335 func (rank lockRank) String() string {
336 if rank == 0 {
337 return "UNKNOWN"
338 }
339 if rank == lockRankLeafRank {
340 return "LEAF"
341 }
342 if rank < 0 || int(rank) >= len(lockNames) {
343 return "BAD RANK"
344 }
345 return lockNames[rank]
346 }
347 `)
348
349
350 fmt.Fprintf(w, `
351 // lockPartialOrder is the transitive closure of the lock rank graph.
352 // An entry for rank X lists all of the ranks that can already be held
353 // when rank X is acquired.
354 //
355 // Lock ranks that allow self-cycles list themselves.
356 var lockPartialOrder [][]lockRank = [][]lockRank{
357 `)
358 for _, rank := range topo {
359 if isPseudo(rank) {
360 continue
361 }
362 list := []string{}
363 for _, before := range g.Edges(rank) {
364 if !isPseudo(before) {
365 list = append(list, cname(before))
366 }
367 }
368 if cyclicRanks[rank] {
369 list = append(list, cname(rank))
370 }
371
372 fmt.Fprintf(w, "\t%s: {%s},\n", cname(rank), strings.Join(list, ", "))
373 }
374 fmt.Fprintf(w, "}\n")
375 }
376
377
378 func cname(label string) string {
379 return "lockRank" + strings.ToUpper(label[:1]) + label[1:]
380 }
381
382 func isPseudo(label string) bool {
383 return strings.ToUpper(label) == label
384 }
385
386
387 func generateDot(w io.Writer, g *dag.Graph) {
388 fmt.Fprintf(w, "digraph g {\n")
389
390
391 for _, node := range g.Nodes {
392 fmt.Fprintf(w, "%q;\n", node)
393 }
394
395
396 for _, node := range g.Nodes {
397 for _, to := range g.Edges(node) {
398 fmt.Fprintf(w, "%q -> %q;\n", node, to)
399 }
400 }
401
402 fmt.Fprintf(w, "}\n")
403 }
404
View as plain text