...

Text file src/github.com/bytedance/sonic/native/utils.h

Documentation: github.com/bytedance/sonic/native

     1/*
     2 * Copyright 2022 ByteDance Inc.
     3 *
     4 * Licensed under the Apache License, Version 2.0 (the "License");
     5 * you may not use this file except in compliance with the License.
     6 * You may obtain a copy of the License at
     7 *
     8 *     http://www.apache.org/licenses/LICENSE-2.0
     9 *
    10 * Unless required by applicable law or agreed to in writing, software
    11 * distributed under the License is distributed on an "AS IS" BASIS,
    12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13 * See the License for the specific language governing permissions and
    14 * limitations under the License.
    15 */
    16
    17#pragma once
    18
    19#include <immintrin.h>
    20#include <string.h>
    21#include "native.h"
    22
    23static always_inline bool vec_cross_page(const void * p, size_t n) {
    24#define PAGE_SIZE 4096
    25    return (((size_t)(p)) & (PAGE_SIZE - 1)) > (PAGE_SIZE - n);
    26#undef PAGE_SIZE
    27}
    28
    29static always_inline void memcpy4 (void *__restrict dp, const void *__restrict sp) {
    30    ((uint32_t *)dp)[0] = ((const uint32_t *)sp)[0];
    31}
    32
    33static always_inline void memcpy8 (void *__restrict dp, const void *__restrict sp) {
    34    ((uint64_t *)dp)[0] = ((const uint64_t *)sp)[0];
    35}
    36
    37static always_inline void memcpy16 (void *__restrict dp, const void *__restrict sp) {
    38    _mm_storeu_si128((void *)(dp), _mm_loadu_si128((const void *)(sp)));
    39}
    40
    41static always_inline void memcpy32(void *__restrict dp, const void *__restrict sp) {
    42#if USE_AVX2
    43    _mm256_storeu_si256((void *)dp,     _mm256_loadu_si256((const void *)sp));
    44#else
    45    _mm_storeu_si128((void *)(dp),      _mm_loadu_si128((const void *)(sp)));
    46    _mm_storeu_si128((void *)(dp + 16), _mm_loadu_si128((const void *)(sp + 16)));
    47#endif
    48}
    49
    50static always_inline void memcpy64(void *__restrict dp, const void *__restrict sp) {
    51    memcpy32(dp, sp);
    52    memcpy32(dp + 32, sp + 32);
    53}
    54
    55static always_inline void memcpy_p4(void *__restrict dp, const void *__restrict sp, size_t nb) {
    56    if (nb >= 2) { *(uint16_t *)dp = *(const uint16_t *)sp; sp += 2, dp += 2, nb -= 2; }
    57    if (nb >= 1) { *(uint8_t *) dp = *(const uint8_t *)sp; }
    58}
    59
    60static always_inline void memcpy_p8(void *__restrict dp, const void *__restrict sp, ssize_t nb) {
    61    if (nb >= 4) { memcpy4(dp, sp); sp += 4, dp += 4, nb -= 4; }
    62    memcpy_p4(dp, sp, nb);
    63}
    64
    65static always_inline void memcpy_p16(void *__restrict dp, const void *__restrict sp, size_t nb) {
    66    if (nb >= 8) { memcpy8(dp, sp); sp += 8, dp += 8, nb -= 8; }
    67    memcpy_p8(dp, sp, nb);
    68}
    69
    70static always_inline void memcpy_p32(void *__restrict dp, const void *__restrict sp, size_t nb) {
    71    if (nb >= 16) { memcpy16(dp, sp); sp += 16, dp += 16, nb -= 16; }
    72    memcpy_p16(dp, sp, nb);
    73}
    74
    75static always_inline void memcpy_p64(void *__restrict dp, const void *__restrict sp, size_t nb) {
    76    if (nb >= 32) { memcpy32(dp, sp); sp += 32, dp += 32, nb -= 32; }
    77    memcpy_p32(dp, sp, nb);
    78}

View as plain text