Documentation: runtime
1
2
3
4
5 package runtime
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56 import (
57 "internal/abi"
58 "internal/goarch"
59 "runtime/internal/atomic"
60 "runtime/internal/math"
61 "unsafe"
62 )
63
64 const (
65
66 bucketCntBits = abi.MapBucketCountBits
67 bucketCnt = abi.MapBucketCount
68
69
70
71
72 loadFactorDen = 2
73 loadFactorNum = loadFactorDen * bucketCnt * 13 / 16
74
75
76
77
78
79 maxKeySize = abi.MapMaxKeyBytes
80 maxElemSize = abi.MapMaxElemBytes
81
82
83
84
85 dataOffset = unsafe.Offsetof(struct {
86 b bmap
87 v int64
88 }{}.v)
89
90
91
92
93
94 emptyRest = 0
95 emptyOne = 1
96 evacuatedX = 2
97 evacuatedY = 3
98 evacuatedEmpty = 4
99 minTopHash = 5
100
101
102 iterator = 1
103 oldIterator = 2
104 hashWriting = 4
105 sameSizeGrow = 8
106
107
108 noCheck = 1<<(8*goarch.PtrSize) - 1
109 )
110
111
112 func isEmpty(x uint8) bool {
113 return x <= emptyOne
114 }
115
116
117 type hmap struct {
118
119
120 count int
121 flags uint8
122 B uint8
123 noverflow uint16
124 hash0 uint32
125
126 buckets unsafe.Pointer
127 oldbuckets unsafe.Pointer
128 nevacuate uintptr
129
130 extra *mapextra
131 }
132
133
134 type mapextra struct {
135
136
137
138
139
140
141
142
143 overflow *[]*bmap
144 oldoverflow *[]*bmap
145
146
147 nextOverflow *bmap
148 }
149
150
151 type bmap struct {
152
153
154
155 tophash [bucketCnt]uint8
156
157
158
159
160
161 }
162
163
164
165
166 type hiter struct {
167 key unsafe.Pointer
168 elem unsafe.Pointer
169 t *maptype
170 h *hmap
171 buckets unsafe.Pointer
172 bptr *bmap
173 overflow *[]*bmap
174 oldoverflow *[]*bmap
175 startBucket uintptr
176 offset uint8
177 wrapped bool
178 B uint8
179 i uint8
180 bucket uintptr
181 checkBucket uintptr
182 }
183
184
185 func bucketShift(b uint8) uintptr {
186
187 return uintptr(1) << (b & (goarch.PtrSize*8 - 1))
188 }
189
190
191 func bucketMask(b uint8) uintptr {
192 return bucketShift(b) - 1
193 }
194
195
196 func tophash(hash uintptr) uint8 {
197 top := uint8(hash >> (goarch.PtrSize*8 - 8))
198 if top < minTopHash {
199 top += minTopHash
200 }
201 return top
202 }
203
204 func evacuated(b *bmap) bool {
205 h := b.tophash[0]
206 return h > emptyOne && h < minTopHash
207 }
208
209 func (b *bmap) overflow(t *maptype) *bmap {
210 return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize))
211 }
212
213 func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
214 *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize)) = ovf
215 }
216
217 func (b *bmap) keys() unsafe.Pointer {
218 return add(unsafe.Pointer(b), dataOffset)
219 }
220
221
222
223
224
225
226
227
228 func (h *hmap) incrnoverflow() {
229
230
231
232 if h.B < 16 {
233 h.noverflow++
234 return
235 }
236
237
238
239 mask := uint32(1)<<(h.B-15) - 1
240
241
242 if uint32(rand())&mask == 0 {
243 h.noverflow++
244 }
245 }
246
247 func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
248 var ovf *bmap
249 if h.extra != nil && h.extra.nextOverflow != nil {
250
251
252 ovf = h.extra.nextOverflow
253 if ovf.overflow(t) == nil {
254
255 h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.BucketSize)))
256 } else {
257
258
259
260 ovf.setoverflow(t, nil)
261 h.extra.nextOverflow = nil
262 }
263 } else {
264 ovf = (*bmap)(newobject(t.Bucket))
265 }
266 h.incrnoverflow()
267 if t.Bucket.PtrBytes == 0 {
268 h.createOverflow()
269 *h.extra.overflow = append(*h.extra.overflow, ovf)
270 }
271 b.setoverflow(t, ovf)
272 return ovf
273 }
274
275 func (h *hmap) createOverflow() {
276 if h.extra == nil {
277 h.extra = new(mapextra)
278 }
279 if h.extra.overflow == nil {
280 h.extra.overflow = new([]*bmap)
281 }
282 }
283
284 func makemap64(t *maptype, hint int64, h *hmap) *hmap {
285 if int64(int(hint)) != hint {
286 hint = 0
287 }
288 return makemap(t, int(hint), h)
289 }
290
291
292
293
294 func makemap_small() *hmap {
295 h := new(hmap)
296 h.hash0 = uint32(rand())
297 return h
298 }
299
300
301
302
303
304
305 func makemap(t *maptype, hint int, h *hmap) *hmap {
306 mem, overflow := math.MulUintptr(uintptr(hint), t.Bucket.Size_)
307 if overflow || mem > maxAlloc {
308 hint = 0
309 }
310
311
312 if h == nil {
313 h = new(hmap)
314 }
315 h.hash0 = uint32(rand())
316
317
318
319 B := uint8(0)
320 for overLoadFactor(hint, B) {
321 B++
322 }
323 h.B = B
324
325
326
327
328 if h.B != 0 {
329 var nextOverflow *bmap
330 h.buckets, nextOverflow = makeBucketArray(t, h.B, nil)
331 if nextOverflow != nil {
332 h.extra = new(mapextra)
333 h.extra.nextOverflow = nextOverflow
334 }
335 }
336
337 return h
338 }
339
340
341
342
343
344
345
346 func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap) {
347 base := bucketShift(b)
348 nbuckets := base
349
350
351 if b >= 4 {
352
353
354
355 nbuckets += bucketShift(b - 4)
356 sz := t.Bucket.Size_ * nbuckets
357 up := roundupsize(sz, t.Bucket.PtrBytes == 0)
358 if up != sz {
359 nbuckets = up / t.Bucket.Size_
360 }
361 }
362
363 if dirtyalloc == nil {
364 buckets = newarray(t.Bucket, int(nbuckets))
365 } else {
366
367
368
369 buckets = dirtyalloc
370 size := t.Bucket.Size_ * nbuckets
371 if t.Bucket.PtrBytes != 0 {
372 memclrHasPointers(buckets, size)
373 } else {
374 memclrNoHeapPointers(buckets, size)
375 }
376 }
377
378 if base != nbuckets {
379
380
381
382
383
384 nextOverflow = (*bmap)(add(buckets, base*uintptr(t.BucketSize)))
385 last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.BucketSize)))
386 last.setoverflow(t, (*bmap)(buckets))
387 }
388 return buckets, nextOverflow
389 }
390
391
392
393
394
395
396 func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
397 if raceenabled && h != nil {
398 callerpc := getcallerpc()
399 pc := abi.FuncPCABIInternal(mapaccess1)
400 racereadpc(unsafe.Pointer(h), callerpc, pc)
401 raceReadObjectPC(t.Key, key, callerpc, pc)
402 }
403 if msanenabled && h != nil {
404 msanread(key, t.Key.Size_)
405 }
406 if asanenabled && h != nil {
407 asanread(key, t.Key.Size_)
408 }
409 if h == nil || h.count == 0 {
410 if err := mapKeyError(t, key); err != nil {
411 panic(err)
412 }
413 return unsafe.Pointer(&zeroVal[0])
414 }
415 if h.flags&hashWriting != 0 {
416 fatal("concurrent map read and map write")
417 }
418 hash := t.Hasher(key, uintptr(h.hash0))
419 m := bucketMask(h.B)
420 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
421 if c := h.oldbuckets; c != nil {
422 if !h.sameSizeGrow() {
423
424 m >>= 1
425 }
426 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
427 if !evacuated(oldb) {
428 b = oldb
429 }
430 }
431 top := tophash(hash)
432 bucketloop:
433 for ; b != nil; b = b.overflow(t) {
434 for i := uintptr(0); i < bucketCnt; i++ {
435 if b.tophash[i] != top {
436 if b.tophash[i] == emptyRest {
437 break bucketloop
438 }
439 continue
440 }
441 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
442 if t.IndirectKey() {
443 k = *((*unsafe.Pointer)(k))
444 }
445 if t.Key.Equal(key, k) {
446 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
447 if t.IndirectElem() {
448 e = *((*unsafe.Pointer)(e))
449 }
450 return e
451 }
452 }
453 }
454 return unsafe.Pointer(&zeroVal[0])
455 }
456
457 func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
458 if raceenabled && h != nil {
459 callerpc := getcallerpc()
460 pc := abi.FuncPCABIInternal(mapaccess2)
461 racereadpc(unsafe.Pointer(h), callerpc, pc)
462 raceReadObjectPC(t.Key, key, callerpc, pc)
463 }
464 if msanenabled && h != nil {
465 msanread(key, t.Key.Size_)
466 }
467 if asanenabled && h != nil {
468 asanread(key, t.Key.Size_)
469 }
470 if h == nil || h.count == 0 {
471 if err := mapKeyError(t, key); err != nil {
472 panic(err)
473 }
474 return unsafe.Pointer(&zeroVal[0]), false
475 }
476 if h.flags&hashWriting != 0 {
477 fatal("concurrent map read and map write")
478 }
479 hash := t.Hasher(key, uintptr(h.hash0))
480 m := bucketMask(h.B)
481 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
482 if c := h.oldbuckets; c != nil {
483 if !h.sameSizeGrow() {
484
485 m >>= 1
486 }
487 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
488 if !evacuated(oldb) {
489 b = oldb
490 }
491 }
492 top := tophash(hash)
493 bucketloop:
494 for ; b != nil; b = b.overflow(t) {
495 for i := uintptr(0); i < bucketCnt; i++ {
496 if b.tophash[i] != top {
497 if b.tophash[i] == emptyRest {
498 break bucketloop
499 }
500 continue
501 }
502 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
503 if t.IndirectKey() {
504 k = *((*unsafe.Pointer)(k))
505 }
506 if t.Key.Equal(key, k) {
507 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
508 if t.IndirectElem() {
509 e = *((*unsafe.Pointer)(e))
510 }
511 return e, true
512 }
513 }
514 }
515 return unsafe.Pointer(&zeroVal[0]), false
516 }
517
518
519 func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
520 if h == nil || h.count == 0 {
521 return nil, nil
522 }
523 hash := t.Hasher(key, uintptr(h.hash0))
524 m := bucketMask(h.B)
525 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
526 if c := h.oldbuckets; c != nil {
527 if !h.sameSizeGrow() {
528
529 m >>= 1
530 }
531 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
532 if !evacuated(oldb) {
533 b = oldb
534 }
535 }
536 top := tophash(hash)
537 bucketloop:
538 for ; b != nil; b = b.overflow(t) {
539 for i := uintptr(0); i < bucketCnt; i++ {
540 if b.tophash[i] != top {
541 if b.tophash[i] == emptyRest {
542 break bucketloop
543 }
544 continue
545 }
546 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
547 if t.IndirectKey() {
548 k = *((*unsafe.Pointer)(k))
549 }
550 if t.Key.Equal(key, k) {
551 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
552 if t.IndirectElem() {
553 e = *((*unsafe.Pointer)(e))
554 }
555 return k, e
556 }
557 }
558 }
559 return nil, nil
560 }
561
562 func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
563 e := mapaccess1(t, h, key)
564 if e == unsafe.Pointer(&zeroVal[0]) {
565 return zero
566 }
567 return e
568 }
569
570 func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
571 e := mapaccess1(t, h, key)
572 if e == unsafe.Pointer(&zeroVal[0]) {
573 return zero, false
574 }
575 return e, true
576 }
577
578
579 func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
580 if h == nil {
581 panic(plainError("assignment to entry in nil map"))
582 }
583 if raceenabled {
584 callerpc := getcallerpc()
585 pc := abi.FuncPCABIInternal(mapassign)
586 racewritepc(unsafe.Pointer(h), callerpc, pc)
587 raceReadObjectPC(t.Key, key, callerpc, pc)
588 }
589 if msanenabled {
590 msanread(key, t.Key.Size_)
591 }
592 if asanenabled {
593 asanread(key, t.Key.Size_)
594 }
595 if h.flags&hashWriting != 0 {
596 fatal("concurrent map writes")
597 }
598 hash := t.Hasher(key, uintptr(h.hash0))
599
600
601
602 h.flags ^= hashWriting
603
604 if h.buckets == nil {
605 h.buckets = newobject(t.Bucket)
606 }
607
608 again:
609 bucket := hash & bucketMask(h.B)
610 if h.growing() {
611 growWork(t, h, bucket)
612 }
613 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
614 top := tophash(hash)
615
616 var inserti *uint8
617 var insertk unsafe.Pointer
618 var elem unsafe.Pointer
619 bucketloop:
620 for {
621 for i := uintptr(0); i < bucketCnt; i++ {
622 if b.tophash[i] != top {
623 if isEmpty(b.tophash[i]) && inserti == nil {
624 inserti = &b.tophash[i]
625 insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
626 elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
627 }
628 if b.tophash[i] == emptyRest {
629 break bucketloop
630 }
631 continue
632 }
633 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
634 if t.IndirectKey() {
635 k = *((*unsafe.Pointer)(k))
636 }
637 if !t.Key.Equal(key, k) {
638 continue
639 }
640
641 if t.NeedKeyUpdate() {
642 typedmemmove(t.Key, k, key)
643 }
644 elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
645 goto done
646 }
647 ovf := b.overflow(t)
648 if ovf == nil {
649 break
650 }
651 b = ovf
652 }
653
654
655
656
657
658 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
659 hashGrow(t, h)
660 goto again
661 }
662
663 if inserti == nil {
664
665 newb := h.newoverflow(t, b)
666 inserti = &newb.tophash[0]
667 insertk = add(unsafe.Pointer(newb), dataOffset)
668 elem = add(insertk, bucketCnt*uintptr(t.KeySize))
669 }
670
671
672 if t.IndirectKey() {
673 kmem := newobject(t.Key)
674 *(*unsafe.Pointer)(insertk) = kmem
675 insertk = kmem
676 }
677 if t.IndirectElem() {
678 vmem := newobject(t.Elem)
679 *(*unsafe.Pointer)(elem) = vmem
680 }
681 typedmemmove(t.Key, insertk, key)
682 *inserti = top
683 h.count++
684
685 done:
686 if h.flags&hashWriting == 0 {
687 fatal("concurrent map writes")
688 }
689 h.flags &^= hashWriting
690 if t.IndirectElem() {
691 elem = *((*unsafe.Pointer)(elem))
692 }
693 return elem
694 }
695
696 func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
697 if raceenabled && h != nil {
698 callerpc := getcallerpc()
699 pc := abi.FuncPCABIInternal(mapdelete)
700 racewritepc(unsafe.Pointer(h), callerpc, pc)
701 raceReadObjectPC(t.Key, key, callerpc, pc)
702 }
703 if msanenabled && h != nil {
704 msanread(key, t.Key.Size_)
705 }
706 if asanenabled && h != nil {
707 asanread(key, t.Key.Size_)
708 }
709 if h == nil || h.count == 0 {
710 if err := mapKeyError(t, key); err != nil {
711 panic(err)
712 }
713 return
714 }
715 if h.flags&hashWriting != 0 {
716 fatal("concurrent map writes")
717 }
718
719 hash := t.Hasher(key, uintptr(h.hash0))
720
721
722
723 h.flags ^= hashWriting
724
725 bucket := hash & bucketMask(h.B)
726 if h.growing() {
727 growWork(t, h, bucket)
728 }
729 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
730 bOrig := b
731 top := tophash(hash)
732 search:
733 for ; b != nil; b = b.overflow(t) {
734 for i := uintptr(0); i < bucketCnt; i++ {
735 if b.tophash[i] != top {
736 if b.tophash[i] == emptyRest {
737 break search
738 }
739 continue
740 }
741 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
742 k2 := k
743 if t.IndirectKey() {
744 k2 = *((*unsafe.Pointer)(k2))
745 }
746 if !t.Key.Equal(key, k2) {
747 continue
748 }
749
750 if t.IndirectKey() {
751 *(*unsafe.Pointer)(k) = nil
752 } else if t.Key.PtrBytes != 0 {
753 memclrHasPointers(k, t.Key.Size_)
754 }
755 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
756 if t.IndirectElem() {
757 *(*unsafe.Pointer)(e) = nil
758 } else if t.Elem.PtrBytes != 0 {
759 memclrHasPointers(e, t.Elem.Size_)
760 } else {
761 memclrNoHeapPointers(e, t.Elem.Size_)
762 }
763 b.tophash[i] = emptyOne
764
765
766
767
768 if i == bucketCnt-1 {
769 if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
770 goto notLast
771 }
772 } else {
773 if b.tophash[i+1] != emptyRest {
774 goto notLast
775 }
776 }
777 for {
778 b.tophash[i] = emptyRest
779 if i == 0 {
780 if b == bOrig {
781 break
782 }
783
784 c := b
785 for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
786 }
787 i = bucketCnt - 1
788 } else {
789 i--
790 }
791 if b.tophash[i] != emptyOne {
792 break
793 }
794 }
795 notLast:
796 h.count--
797
798
799 if h.count == 0 {
800 h.hash0 = uint32(rand())
801 }
802 break search
803 }
804 }
805
806 if h.flags&hashWriting == 0 {
807 fatal("concurrent map writes")
808 }
809 h.flags &^= hashWriting
810 }
811
812
813
814
815
816 func mapiterinit(t *maptype, h *hmap, it *hiter) {
817 if raceenabled && h != nil {
818 callerpc := getcallerpc()
819 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit))
820 }
821
822 it.t = t
823 if h == nil || h.count == 0 {
824 return
825 }
826
827 if unsafe.Sizeof(hiter{})/goarch.PtrSize != 12 {
828 throw("hash_iter size incorrect")
829 }
830 it.h = h
831
832
833 it.B = h.B
834 it.buckets = h.buckets
835 if t.Bucket.PtrBytes == 0 {
836
837
838
839
840 h.createOverflow()
841 it.overflow = h.extra.overflow
842 it.oldoverflow = h.extra.oldoverflow
843 }
844
845
846 r := uintptr(rand())
847 it.startBucket = r & bucketMask(h.B)
848 it.offset = uint8(r >> h.B & (bucketCnt - 1))
849
850
851 it.bucket = it.startBucket
852
853
854
855 if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
856 atomic.Or8(&h.flags, iterator|oldIterator)
857 }
858
859 mapiternext(it)
860 }
861
862 func mapiternext(it *hiter) {
863 h := it.h
864 if raceenabled {
865 callerpc := getcallerpc()
866 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext))
867 }
868 if h.flags&hashWriting != 0 {
869 fatal("concurrent map iteration and map write")
870 }
871 t := it.t
872 bucket := it.bucket
873 b := it.bptr
874 i := it.i
875 checkBucket := it.checkBucket
876
877 next:
878 if b == nil {
879 if bucket == it.startBucket && it.wrapped {
880
881 it.key = nil
882 it.elem = nil
883 return
884 }
885 if h.growing() && it.B == h.B {
886
887
888
889
890 oldbucket := bucket & it.h.oldbucketmask()
891 b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
892 if !evacuated(b) {
893 checkBucket = bucket
894 } else {
895 b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
896 checkBucket = noCheck
897 }
898 } else {
899 b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
900 checkBucket = noCheck
901 }
902 bucket++
903 if bucket == bucketShift(it.B) {
904 bucket = 0
905 it.wrapped = true
906 }
907 i = 0
908 }
909 for ; i < bucketCnt; i++ {
910 offi := (i + it.offset) & (bucketCnt - 1)
911 if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty {
912
913
914 continue
915 }
916 k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.KeySize))
917 if t.IndirectKey() {
918 k = *((*unsafe.Pointer)(k))
919 }
920 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize))
921 if checkBucket != noCheck && !h.sameSizeGrow() {
922
923
924
925
926
927
928
929 if t.ReflexiveKey() || t.Key.Equal(k, k) {
930
931
932 hash := t.Hasher(k, uintptr(h.hash0))
933 if hash&bucketMask(it.B) != checkBucket {
934 continue
935 }
936 } else {
937
938
939
940
941
942
943
944 if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
945 continue
946 }
947 }
948 }
949 if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
950 !(t.ReflexiveKey() || t.Key.Equal(k, k)) {
951
952
953
954
955 it.key = k
956 if t.IndirectElem() {
957 e = *((*unsafe.Pointer)(e))
958 }
959 it.elem = e
960 } else {
961
962
963
964
965
966
967
968 rk, re := mapaccessK(t, h, k)
969 if rk == nil {
970 continue
971 }
972 it.key = rk
973 it.elem = re
974 }
975 it.bucket = bucket
976 if it.bptr != b {
977 it.bptr = b
978 }
979 it.i = i + 1
980 it.checkBucket = checkBucket
981 return
982 }
983 b = b.overflow(t)
984 i = 0
985 goto next
986 }
987
988
989 func mapclear(t *maptype, h *hmap) {
990 if raceenabled && h != nil {
991 callerpc := getcallerpc()
992 pc := abi.FuncPCABIInternal(mapclear)
993 racewritepc(unsafe.Pointer(h), callerpc, pc)
994 }
995
996 if h == nil || h.count == 0 {
997 return
998 }
999
1000 if h.flags&hashWriting != 0 {
1001 fatal("concurrent map writes")
1002 }
1003
1004 h.flags ^= hashWriting
1005
1006
1007 markBucketsEmpty := func(bucket unsafe.Pointer, mask uintptr) {
1008 for i := uintptr(0); i <= mask; i++ {
1009 b := (*bmap)(add(bucket, i*uintptr(t.BucketSize)))
1010 for ; b != nil; b = b.overflow(t) {
1011 for i := uintptr(0); i < bucketCnt; i++ {
1012 b.tophash[i] = emptyRest
1013 }
1014 }
1015 }
1016 }
1017 markBucketsEmpty(h.buckets, bucketMask(h.B))
1018 if oldBuckets := h.oldbuckets; oldBuckets != nil {
1019 markBucketsEmpty(oldBuckets, h.oldbucketmask())
1020 }
1021
1022 h.flags &^= sameSizeGrow
1023 h.oldbuckets = nil
1024 h.nevacuate = 0
1025 h.noverflow = 0
1026 h.count = 0
1027
1028
1029
1030 h.hash0 = uint32(rand())
1031
1032
1033 if h.extra != nil {
1034 *h.extra = mapextra{}
1035 }
1036
1037
1038
1039
1040 _, nextOverflow := makeBucketArray(t, h.B, h.buckets)
1041 if nextOverflow != nil {
1042
1043
1044 h.extra.nextOverflow = nextOverflow
1045 }
1046
1047 if h.flags&hashWriting == 0 {
1048 fatal("concurrent map writes")
1049 }
1050 h.flags &^= hashWriting
1051 }
1052
1053 func hashGrow(t *maptype, h *hmap) {
1054
1055
1056
1057 bigger := uint8(1)
1058 if !overLoadFactor(h.count+1, h.B) {
1059 bigger = 0
1060 h.flags |= sameSizeGrow
1061 }
1062 oldbuckets := h.buckets
1063 newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil)
1064
1065 flags := h.flags &^ (iterator | oldIterator)
1066 if h.flags&iterator != 0 {
1067 flags |= oldIterator
1068 }
1069
1070 h.B += bigger
1071 h.flags = flags
1072 h.oldbuckets = oldbuckets
1073 h.buckets = newbuckets
1074 h.nevacuate = 0
1075 h.noverflow = 0
1076
1077 if h.extra != nil && h.extra.overflow != nil {
1078
1079 if h.extra.oldoverflow != nil {
1080 throw("oldoverflow is not nil")
1081 }
1082 h.extra.oldoverflow = h.extra.overflow
1083 h.extra.overflow = nil
1084 }
1085 if nextOverflow != nil {
1086 if h.extra == nil {
1087 h.extra = new(mapextra)
1088 }
1089 h.extra.nextOverflow = nextOverflow
1090 }
1091
1092
1093
1094 }
1095
1096
1097 func overLoadFactor(count int, B uint8) bool {
1098 return count > bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
1099 }
1100
1101
1102
1103
1104 func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
1105
1106
1107
1108
1109 if B > 15 {
1110 B = 15
1111 }
1112
1113 return noverflow >= uint16(1)<<(B&15)
1114 }
1115
1116
1117 func (h *hmap) growing() bool {
1118 return h.oldbuckets != nil
1119 }
1120
1121
1122 func (h *hmap) sameSizeGrow() bool {
1123 return h.flags&sameSizeGrow != 0
1124 }
1125
1126
1127 func (h *hmap) noldbuckets() uintptr {
1128 oldB := h.B
1129 if !h.sameSizeGrow() {
1130 oldB--
1131 }
1132 return bucketShift(oldB)
1133 }
1134
1135
1136 func (h *hmap) oldbucketmask() uintptr {
1137 return h.noldbuckets() - 1
1138 }
1139
1140 func growWork(t *maptype, h *hmap, bucket uintptr) {
1141
1142
1143 evacuate(t, h, bucket&h.oldbucketmask())
1144
1145
1146 if h.growing() {
1147 evacuate(t, h, h.nevacuate)
1148 }
1149 }
1150
1151 func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool {
1152 b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.BucketSize)))
1153 return evacuated(b)
1154 }
1155
1156
1157 type evacDst struct {
1158 b *bmap
1159 i int
1160 k unsafe.Pointer
1161 e unsafe.Pointer
1162 }
1163
1164 func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
1165 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
1166 newbit := h.noldbuckets()
1167 if !evacuated(b) {
1168
1169
1170
1171
1172 var xy [2]evacDst
1173 x := &xy[0]
1174 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
1175 x.k = add(unsafe.Pointer(x.b), dataOffset)
1176 x.e = add(x.k, bucketCnt*uintptr(t.KeySize))
1177
1178 if !h.sameSizeGrow() {
1179
1180
1181 y := &xy[1]
1182 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
1183 y.k = add(unsafe.Pointer(y.b), dataOffset)
1184 y.e = add(y.k, bucketCnt*uintptr(t.KeySize))
1185 }
1186
1187 for ; b != nil; b = b.overflow(t) {
1188 k := add(unsafe.Pointer(b), dataOffset)
1189 e := add(k, bucketCnt*uintptr(t.KeySize))
1190 for i := 0; i < bucketCnt; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) {
1191 top := b.tophash[i]
1192 if isEmpty(top) {
1193 b.tophash[i] = evacuatedEmpty
1194 continue
1195 }
1196 if top < minTopHash {
1197 throw("bad map state")
1198 }
1199 k2 := k
1200 if t.IndirectKey() {
1201 k2 = *((*unsafe.Pointer)(k2))
1202 }
1203 var useY uint8
1204 if !h.sameSizeGrow() {
1205
1206
1207 hash := t.Hasher(k2, uintptr(h.hash0))
1208 if h.flags&iterator != 0 && !t.ReflexiveKey() && !t.Key.Equal(k2, k2) {
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220 useY = top & 1
1221 top = tophash(hash)
1222 } else {
1223 if hash&newbit != 0 {
1224 useY = 1
1225 }
1226 }
1227 }
1228
1229 if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY {
1230 throw("bad evacuatedN")
1231 }
1232
1233 b.tophash[i] = evacuatedX + useY
1234 dst := &xy[useY]
1235
1236 if dst.i == bucketCnt {
1237 dst.b = h.newoverflow(t, dst.b)
1238 dst.i = 0
1239 dst.k = add(unsafe.Pointer(dst.b), dataOffset)
1240 dst.e = add(dst.k, bucketCnt*uintptr(t.KeySize))
1241 }
1242 dst.b.tophash[dst.i&(bucketCnt-1)] = top
1243 if t.IndirectKey() {
1244 *(*unsafe.Pointer)(dst.k) = k2
1245 } else {
1246 typedmemmove(t.Key, dst.k, k)
1247 }
1248 if t.IndirectElem() {
1249 *(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e)
1250 } else {
1251 typedmemmove(t.Elem, dst.e, e)
1252 }
1253 dst.i++
1254
1255
1256
1257
1258 dst.k = add(dst.k, uintptr(t.KeySize))
1259 dst.e = add(dst.e, uintptr(t.ValueSize))
1260 }
1261 }
1262
1263 if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 {
1264 b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
1265
1266
1267 ptr := add(b, dataOffset)
1268 n := uintptr(t.BucketSize) - dataOffset
1269 memclrHasPointers(ptr, n)
1270 }
1271 }
1272
1273 if oldbucket == h.nevacuate {
1274 advanceEvacuationMark(h, t, newbit)
1275 }
1276 }
1277
1278 func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
1279 h.nevacuate++
1280
1281
1282 stop := h.nevacuate + 1024
1283 if stop > newbit {
1284 stop = newbit
1285 }
1286 for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
1287 h.nevacuate++
1288 }
1289 if h.nevacuate == newbit {
1290
1291 h.oldbuckets = nil
1292
1293
1294
1295 if h.extra != nil {
1296 h.extra.oldoverflow = nil
1297 }
1298 h.flags &^= sameSizeGrow
1299 }
1300 }
1301
1302
1303
1304
1305 func reflect_makemap(t *maptype, cap int) *hmap {
1306
1307 if t.Key.Equal == nil {
1308 throw("runtime.reflect_makemap: unsupported map key type")
1309 }
1310 if t.Key.Size_ > maxKeySize && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
1311 t.Key.Size_ <= maxKeySize && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) {
1312 throw("key size wrong")
1313 }
1314 if t.Elem.Size_ > maxElemSize && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
1315 t.Elem.Size_ <= maxElemSize && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) {
1316 throw("elem size wrong")
1317 }
1318 if t.Key.Align_ > bucketCnt {
1319 throw("key align too big")
1320 }
1321 if t.Elem.Align_ > bucketCnt {
1322 throw("elem align too big")
1323 }
1324 if t.Key.Size_%uintptr(t.Key.Align_) != 0 {
1325 throw("key size not a multiple of key align")
1326 }
1327 if t.Elem.Size_%uintptr(t.Elem.Align_) != 0 {
1328 throw("elem size not a multiple of elem align")
1329 }
1330 if bucketCnt < 8 {
1331 throw("bucketsize too small for proper alignment")
1332 }
1333 if dataOffset%uintptr(t.Key.Align_) != 0 {
1334 throw("need padding in bucket (key)")
1335 }
1336 if dataOffset%uintptr(t.Elem.Align_) != 0 {
1337 throw("need padding in bucket (elem)")
1338 }
1339
1340 return makemap(t, cap, nil)
1341 }
1342
1343
1344 func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
1345 elem, ok := mapaccess2(t, h, key)
1346 if !ok {
1347
1348 elem = nil
1349 }
1350 return elem
1351 }
1352
1353
1354 func reflect_mapaccess_faststr(t *maptype, h *hmap, key string) unsafe.Pointer {
1355 elem, ok := mapaccess2_faststr(t, h, key)
1356 if !ok {
1357
1358 elem = nil
1359 }
1360 return elem
1361 }
1362
1363
1364 func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer) {
1365 p := mapassign(t, h, key)
1366 typedmemmove(t.Elem, p, elem)
1367 }
1368
1369
1370 func reflect_mapassign_faststr(t *maptype, h *hmap, key string, elem unsafe.Pointer) {
1371 p := mapassign_faststr(t, h, key)
1372 typedmemmove(t.Elem, p, elem)
1373 }
1374
1375
1376 func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
1377 mapdelete(t, h, key)
1378 }
1379
1380
1381 func reflect_mapdelete_faststr(t *maptype, h *hmap, key string) {
1382 mapdelete_faststr(t, h, key)
1383 }
1384
1385
1386 func reflect_mapiterinit(t *maptype, h *hmap, it *hiter) {
1387 mapiterinit(t, h, it)
1388 }
1389
1390
1391 func reflect_mapiternext(it *hiter) {
1392 mapiternext(it)
1393 }
1394
1395
1396 func reflect_mapiterkey(it *hiter) unsafe.Pointer {
1397 return it.key
1398 }
1399
1400
1401 func reflect_mapiterelem(it *hiter) unsafe.Pointer {
1402 return it.elem
1403 }
1404
1405
1406 func reflect_maplen(h *hmap) int {
1407 if h == nil {
1408 return 0
1409 }
1410 if raceenabled {
1411 callerpc := getcallerpc()
1412 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
1413 }
1414 return h.count
1415 }
1416
1417
1418 func reflect_mapclear(t *maptype, h *hmap) {
1419 mapclear(t, h)
1420 }
1421
1422
1423 func reflectlite_maplen(h *hmap) int {
1424 if h == nil {
1425 return 0
1426 }
1427 if raceenabled {
1428 callerpc := getcallerpc()
1429 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
1430 }
1431 return h.count
1432 }
1433
1434 var zeroVal [abi.ZeroValSize]byte
1435
1436
1437
1438
1439
1440
1441 func mapinitnoop()
1442
1443
1444
1445
1446 func mapclone(m any) any {
1447 e := efaceOf(&m)
1448 e.data = unsafe.Pointer(mapclone2((*maptype)(unsafe.Pointer(e._type)), (*hmap)(e.data)))
1449 return m
1450 }
1451
1452
1453
1454 func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) {
1455 for i := 0; i < bucketCnt; i++ {
1456 if isEmpty(src.tophash[i]) {
1457 continue
1458 }
1459
1460 for ; pos < bucketCnt; pos++ {
1461 if isEmpty(dst.tophash[pos]) {
1462 break
1463 }
1464 }
1465
1466 if pos == bucketCnt {
1467 dst = h.newoverflow(t, dst)
1468 pos = 0
1469 }
1470
1471 srcK := add(unsafe.Pointer(src), dataOffset+uintptr(i)*uintptr(t.KeySize))
1472 srcEle := add(unsafe.Pointer(src), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize))
1473 dstK := add(unsafe.Pointer(dst), dataOffset+uintptr(pos)*uintptr(t.KeySize))
1474 dstEle := add(unsafe.Pointer(dst), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize))
1475
1476 dst.tophash[pos] = src.tophash[i]
1477 if t.IndirectKey() {
1478 srcK = *(*unsafe.Pointer)(srcK)
1479 if t.NeedKeyUpdate() {
1480 kStore := newobject(t.Key)
1481 typedmemmove(t.Key, kStore, srcK)
1482 srcK = kStore
1483 }
1484
1485
1486
1487 *(*unsafe.Pointer)(dstK) = srcK
1488 } else {
1489 typedmemmove(t.Key, dstK, srcK)
1490 }
1491 if t.IndirectElem() {
1492 srcEle = *(*unsafe.Pointer)(srcEle)
1493 eStore := newobject(t.Elem)
1494 typedmemmove(t.Elem, eStore, srcEle)
1495 *(*unsafe.Pointer)(dstEle) = eStore
1496 } else {
1497 typedmemmove(t.Elem, dstEle, srcEle)
1498 }
1499 pos++
1500 h.count++
1501 }
1502 return dst, pos
1503 }
1504
1505 func mapclone2(t *maptype, src *hmap) *hmap {
1506 dst := makemap(t, src.count, nil)
1507 dst.hash0 = src.hash0
1508 dst.nevacuate = 0
1509
1510
1511 if src.count == 0 {
1512 return dst
1513 }
1514
1515 if src.flags&hashWriting != 0 {
1516 fatal("concurrent map clone and map write")
1517 }
1518
1519 if src.B == 0 && !(t.IndirectKey() && t.NeedKeyUpdate()) && !t.IndirectElem() {
1520
1521 dst.buckets = newobject(t.Bucket)
1522 dst.count = src.count
1523 typedmemmove(t.Bucket, dst.buckets, src.buckets)
1524 return dst
1525 }
1526
1527 if dst.B == 0 {
1528 dst.buckets = newobject(t.Bucket)
1529 }
1530 dstArraySize := int(bucketShift(dst.B))
1531 srcArraySize := int(bucketShift(src.B))
1532 for i := 0; i < dstArraySize; i++ {
1533 dstBmap := (*bmap)(add(dst.buckets, uintptr(i*int(t.BucketSize))))
1534 pos := 0
1535 for j := 0; j < srcArraySize; j += dstArraySize {
1536 srcBmap := (*bmap)(add(src.buckets, uintptr((i+j)*int(t.BucketSize))))
1537 for srcBmap != nil {
1538 dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
1539 srcBmap = srcBmap.overflow(t)
1540 }
1541 }
1542 }
1543
1544 if src.oldbuckets == nil {
1545 return dst
1546 }
1547
1548 oldB := src.B
1549 srcOldbuckets := src.oldbuckets
1550 if !src.sameSizeGrow() {
1551 oldB--
1552 }
1553 oldSrcArraySize := int(bucketShift(oldB))
1554
1555 for i := 0; i < oldSrcArraySize; i++ {
1556 srcBmap := (*bmap)(add(srcOldbuckets, uintptr(i*int(t.BucketSize))))
1557 if evacuated(srcBmap) {
1558 continue
1559 }
1560
1561 if oldB >= dst.B {
1562 dstBmap := (*bmap)(add(dst.buckets, (uintptr(i)&bucketMask(dst.B))*uintptr(t.BucketSize)))
1563 for dstBmap.overflow(t) != nil {
1564 dstBmap = dstBmap.overflow(t)
1565 }
1566 pos := 0
1567 for srcBmap != nil {
1568 dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
1569 srcBmap = srcBmap.overflow(t)
1570 }
1571 continue
1572 }
1573
1574
1575
1576 for srcBmap != nil {
1577
1578 for i := uintptr(0); i < bucketCnt; i++ {
1579 if isEmpty(srcBmap.tophash[i]) {
1580 continue
1581 }
1582
1583 if src.flags&hashWriting != 0 {
1584 fatal("concurrent map clone and map write")
1585 }
1586
1587 srcK := add(unsafe.Pointer(srcBmap), dataOffset+i*uintptr(t.KeySize))
1588 if t.IndirectKey() {
1589 srcK = *((*unsafe.Pointer)(srcK))
1590 }
1591
1592 srcEle := add(unsafe.Pointer(srcBmap), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
1593 if t.IndirectElem() {
1594 srcEle = *((*unsafe.Pointer)(srcEle))
1595 }
1596 dstEle := mapassign(t, dst, srcK)
1597 typedmemmove(t.Elem, dstEle, srcEle)
1598 }
1599 srcBmap = srcBmap.overflow(t)
1600 }
1601 }
1602 return dst
1603 }
1604
1605
1606
1607
1608 func keys(m any, p unsafe.Pointer) {
1609 e := efaceOf(&m)
1610 t := (*maptype)(unsafe.Pointer(e._type))
1611 h := (*hmap)(e.data)
1612
1613 if h == nil || h.count == 0 {
1614 return
1615 }
1616 s := (*slice)(p)
1617 r := int(rand())
1618 offset := uint8(r >> h.B & (bucketCnt - 1))
1619 if h.B == 0 {
1620 copyKeys(t, h, (*bmap)(h.buckets), s, offset)
1621 return
1622 }
1623 arraySize := int(bucketShift(h.B))
1624 buckets := h.buckets
1625 for i := 0; i < arraySize; i++ {
1626 bucket := (i + r) & (arraySize - 1)
1627 b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
1628 copyKeys(t, h, b, s, offset)
1629 }
1630
1631 if h.growing() {
1632 oldArraySize := int(h.noldbuckets())
1633 for i := 0; i < oldArraySize; i++ {
1634 bucket := (i + r) & (oldArraySize - 1)
1635 b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
1636 if evacuated(b) {
1637 continue
1638 }
1639 copyKeys(t, h, b, s, offset)
1640 }
1641 }
1642 return
1643 }
1644
1645 func copyKeys(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
1646 for b != nil {
1647 for i := uintptr(0); i < bucketCnt; i++ {
1648 offi := (i + uintptr(offset)) & (bucketCnt - 1)
1649 if isEmpty(b.tophash[offi]) {
1650 continue
1651 }
1652 if h.flags&hashWriting != 0 {
1653 fatal("concurrent map read and map write")
1654 }
1655 k := add(unsafe.Pointer(b), dataOffset+offi*uintptr(t.KeySize))
1656 if t.IndirectKey() {
1657 k = *((*unsafe.Pointer)(k))
1658 }
1659 if s.len >= s.cap {
1660 fatal("concurrent map read and map write")
1661 }
1662 typedmemmove(t.Key, add(s.array, uintptr(s.len)*uintptr(t.Key.Size())), k)
1663 s.len++
1664 }
1665 b = b.overflow(t)
1666 }
1667 }
1668
1669
1670
1671
1672 func values(m any, p unsafe.Pointer) {
1673 e := efaceOf(&m)
1674 t := (*maptype)(unsafe.Pointer(e._type))
1675 h := (*hmap)(e.data)
1676 if h == nil || h.count == 0 {
1677 return
1678 }
1679 s := (*slice)(p)
1680 r := int(rand())
1681 offset := uint8(r >> h.B & (bucketCnt - 1))
1682 if h.B == 0 {
1683 copyValues(t, h, (*bmap)(h.buckets), s, offset)
1684 return
1685 }
1686 arraySize := int(bucketShift(h.B))
1687 buckets := h.buckets
1688 for i := 0; i < arraySize; i++ {
1689 bucket := (i + r) & (arraySize - 1)
1690 b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
1691 copyValues(t, h, b, s, offset)
1692 }
1693
1694 if h.growing() {
1695 oldArraySize := int(h.noldbuckets())
1696 for i := 0; i < oldArraySize; i++ {
1697 bucket := (i + r) & (oldArraySize - 1)
1698 b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
1699 if evacuated(b) {
1700 continue
1701 }
1702 copyValues(t, h, b, s, offset)
1703 }
1704 }
1705 return
1706 }
1707
1708 func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
1709 for b != nil {
1710 for i := uintptr(0); i < bucketCnt; i++ {
1711 offi := (i + uintptr(offset)) & (bucketCnt - 1)
1712 if isEmpty(b.tophash[offi]) {
1713 continue
1714 }
1715
1716 if h.flags&hashWriting != 0 {
1717 fatal("concurrent map read and map write")
1718 }
1719
1720 ele := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+offi*uintptr(t.ValueSize))
1721 if t.IndirectElem() {
1722 ele = *((*unsafe.Pointer)(ele))
1723 }
1724 if s.len >= s.cap {
1725 fatal("concurrent map read and map write")
1726 }
1727 typedmemmove(t.Elem, add(s.array, uintptr(s.len)*uintptr(t.Elem.Size())), ele)
1728 s.len++
1729 }
1730 b = b.overflow(t)
1731 }
1732 }
1733
View as plain text