Thanks for using Compiler Explorer
Sponsors
Jakt
C++
Ada
Analysis
Android Java
Android Kotlin
Assembly
C
C3
Carbon
C++ (Circle)
CIRCT
Clean
CMake
CMakeScript
COBOL
C++ for OpenCL
MLIR
Cppx
Cppx-Blue
Cppx-Gold
Cpp2-cppfront
Crystal
C#
CUDA C++
D
Dart
Elixir
Erlang
Fortran
F#
GLSL
Go
Haskell
HLSL
Hook
Hylo
IL
ispc
Java
Julia
Kotlin
LLVM IR
LLVM MIR
Modula-2
Nim
Objective-C
Objective-C++
OCaml
OpenCL C
Pascal
Pony
Python
Racket
Ruby
Rust
Snowball
Scala
Solidity
Spice
SPIR-V
Swift
LLVM TableGen
Toit
TypeScript Native
V
Vala
Visual Basic
WASM
Zig
Javascript
GIMPLE
Ygen
c++ source #2
Output
Compile to binary object
Link to binary
Execute the code
Intel asm syntax
Demangle identifiers
Verbose demangling
Filters
Unused labels
Library functions
Directives
Comments
Horizontal whitespace
Debug intrinsics
Compiler
6502-c++ 11.1.0
ARM GCC 10.2.0
ARM GCC 10.3.0
ARM GCC 10.4.0
ARM GCC 10.5.0
ARM GCC 11.1.0
ARM GCC 11.2.0
ARM GCC 11.3.0
ARM GCC 11.4.0
ARM GCC 12.1.0
ARM GCC 12.2.0
ARM GCC 12.3.0
ARM GCC 12.4.0
ARM GCC 13.1.0
ARM GCC 13.2.0
ARM GCC 13.2.0 (unknown-eabi)
ARM GCC 13.3.0
ARM GCC 13.3.0 (unknown-eabi)
ARM GCC 14.1.0
ARM GCC 14.1.0 (unknown-eabi)
ARM GCC 14.2.0
ARM GCC 14.2.0 (unknown-eabi)
ARM GCC 4.5.4
ARM GCC 4.6.4
ARM GCC 5.4
ARM GCC 6.3.0
ARM GCC 6.4.0
ARM GCC 7.3.0
ARM GCC 7.5.0
ARM GCC 8.2.0
ARM GCC 8.5.0
ARM GCC 9.3.0
ARM GCC 9.4.0
ARM GCC 9.5.0
ARM GCC trunk
ARM gcc 10.2.1 (none)
ARM gcc 10.3.1 (2021.07 none)
ARM gcc 10.3.1 (2021.10 none)
ARM gcc 11.2.1 (none)
ARM gcc 5.4.1 (none)
ARM gcc 7.2.1 (none)
ARM gcc 8.2 (WinCE)
ARM gcc 8.3.1 (none)
ARM gcc 9.2.1 (none)
ARM msvc v19.0 (WINE)
ARM msvc v19.10 (WINE)
ARM msvc v19.14 (WINE)
ARM64 Morello gcc 10.1 Alpha 2
ARM64 gcc 10.2
ARM64 gcc 10.3
ARM64 gcc 10.4
ARM64 gcc 10.5.0
ARM64 gcc 11.1
ARM64 gcc 11.2
ARM64 gcc 11.3
ARM64 gcc 11.4.0
ARM64 gcc 12.1
ARM64 gcc 12.2.0
ARM64 gcc 12.3.0
ARM64 gcc 12.4.0
ARM64 gcc 13.1.0
ARM64 gcc 13.2.0
ARM64 gcc 13.3.0
ARM64 gcc 14.1.0
ARM64 gcc 14.2.0
ARM64 gcc 4.9.4
ARM64 gcc 5.4
ARM64 gcc 5.5.0
ARM64 gcc 6.3
ARM64 gcc 6.4
ARM64 gcc 7.3
ARM64 gcc 7.5
ARM64 gcc 8.2
ARM64 gcc 8.5
ARM64 gcc 9.3
ARM64 gcc 9.4
ARM64 gcc 9.5
ARM64 gcc trunk
ARM64 msvc v19.14 (WINE)
AVR gcc 10.3.0
AVR gcc 11.1.0
AVR gcc 12.1.0
AVR gcc 12.2.0
AVR gcc 12.3.0
AVR gcc 12.4.0
AVR gcc 13.1.0
AVR gcc 13.2.0
AVR gcc 13.3.0
AVR gcc 14.1.0
AVR gcc 14.2.0
AVR gcc 4.5.4
AVR gcc 4.6.4
AVR gcc 5.4.0
AVR gcc 9.2.0
AVR gcc 9.3.0
Arduino Mega (1.8.9)
Arduino Uno (1.8.9)
BPF clang (trunk)
BPF clang 13.0.0
BPF clang 14.0.0
BPF clang 15.0.0
BPF clang 16.0.0
BPF clang 17.0.1
BPF clang 18.1.0
BPF clang 19.1.0
BPF gcc 13.1.0
BPF gcc 13.2.0
BPF gcc 13.3.0
BPF gcc trunk
EDG (experimental reflection)
EDG 6.5
EDG 6.5 (GNU mode gcc 13)
EDG 6.6
EDG 6.6 (GNU mode gcc 13)
FRC 2019
FRC 2020
FRC 2023
HPPA gcc 14.2.0
KVX ACB 4.1.0 (GCC 7.5.0)
KVX ACB 4.1.0-cd1 (GCC 7.5.0)
KVX ACB 4.10.0 (GCC 10.3.1)
KVX ACB 4.11.1 (GCC 10.3.1)
KVX ACB 4.12.0 (GCC 11.3.0)
KVX ACB 4.2.0 (GCC 7.5.0)
KVX ACB 4.3.0 (GCC 7.5.0)
KVX ACB 4.4.0 (GCC 7.5.0)
KVX ACB 4.6.0 (GCC 9.4.1)
KVX ACB 4.8.0 (GCC 9.4.1)
KVX ACB 4.9.0 (GCC 9.4.1)
KVX ACB 5.0.0 (GCC 12.2.1)
KVX ACB 5.2.0 (GCC 13.2.1)
LoongArch64 clang (trunk)
LoongArch64 clang 17.0.1
LoongArch64 clang 18.1.0
LoongArch64 clang 19.1.0
M68K gcc 13.1.0
M68K gcc 13.2.0
M68K gcc 13.3.0
M68K gcc 14.1.0
M68K gcc 14.2.0
M68k clang (trunk)
MRISC32 gcc (trunk)
MSP430 gcc 4.5.3
MSP430 gcc 5.3.0
MSP430 gcc 6.2.1
MinGW clang 14.0.3
MinGW clang 14.0.6
MinGW clang 15.0.7
MinGW clang 16.0.0
MinGW clang 16.0.2
MinGW gcc 11.3.0
MinGW gcc 12.1.0
MinGW gcc 12.2.0
MinGW gcc 13.1.0
RISC-V (32-bits) gcc (trunk)
RISC-V (32-bits) gcc 10.2.0
RISC-V (32-bits) gcc 10.3.0
RISC-V (32-bits) gcc 11.2.0
RISC-V (32-bits) gcc 11.3.0
RISC-V (32-bits) gcc 11.4.0
RISC-V (32-bits) gcc 12.1.0
RISC-V (32-bits) gcc 12.2.0
RISC-V (32-bits) gcc 12.3.0
RISC-V (32-bits) gcc 12.4.0
RISC-V (32-bits) gcc 13.1.0
RISC-V (32-bits) gcc 13.2.0
RISC-V (32-bits) gcc 13.3.0
RISC-V (32-bits) gcc 14.1.0
RISC-V (32-bits) gcc 14.2.0
RISC-V (32-bits) gcc 8.2.0
RISC-V (32-bits) gcc 8.5.0
RISC-V (32-bits) gcc 9.4.0
RISC-V (64-bits) gcc (trunk)
RISC-V (64-bits) gcc 10.2.0
RISC-V (64-bits) gcc 10.3.0
RISC-V (64-bits) gcc 11.2.0
RISC-V (64-bits) gcc 11.3.0
RISC-V (64-bits) gcc 11.4.0
RISC-V (64-bits) gcc 12.1.0
RISC-V (64-bits) gcc 12.2.0
RISC-V (64-bits) gcc 12.3.0
RISC-V (64-bits) gcc 12.4.0
RISC-V (64-bits) gcc 13.1.0
RISC-V (64-bits) gcc 13.2.0
RISC-V (64-bits) gcc 13.3.0
RISC-V (64-bits) gcc 14.1.0
RISC-V (64-bits) gcc 14.2.0
RISC-V (64-bits) gcc 8.2.0
RISC-V (64-bits) gcc 8.5.0
RISC-V (64-bits) gcc 9.4.0
RISC-V rv32gc clang (trunk)
RISC-V rv32gc clang 10.0.0
RISC-V rv32gc clang 10.0.1
RISC-V rv32gc clang 11.0.0
RISC-V rv32gc clang 11.0.1
RISC-V rv32gc clang 12.0.0
RISC-V rv32gc clang 12.0.1
RISC-V rv32gc clang 13.0.0
RISC-V rv32gc clang 13.0.1
RISC-V rv32gc clang 14.0.0
RISC-V rv32gc clang 15.0.0
RISC-V rv32gc clang 16.0.0
RISC-V rv32gc clang 17.0.1
RISC-V rv32gc clang 18.1.0
RISC-V rv32gc clang 19.1.0
RISC-V rv32gc clang 9.0.0
RISC-V rv32gc clang 9.0.1
RISC-V rv64gc clang (trunk)
RISC-V rv64gc clang 10.0.0
RISC-V rv64gc clang 10.0.1
RISC-V rv64gc clang 11.0.0
RISC-V rv64gc clang 11.0.1
RISC-V rv64gc clang 12.0.0
RISC-V rv64gc clang 12.0.1
RISC-V rv64gc clang 13.0.0
RISC-V rv64gc clang 13.0.1
RISC-V rv64gc clang 14.0.0
RISC-V rv64gc clang 15.0.0
RISC-V rv64gc clang 16.0.0
RISC-V rv64gc clang 17.0.1
RISC-V rv64gc clang 18.1.0
RISC-V rv64gc clang 19.1.0
RISC-V rv64gc clang 9.0.0
RISC-V rv64gc clang 9.0.1
Raspbian Buster
Raspbian Stretch
SPARC LEON gcc 12.2.0
SPARC LEON gcc 12.3.0
SPARC LEON gcc 12.4.0
SPARC LEON gcc 13.1.0
SPARC LEON gcc 13.2.0
SPARC LEON gcc 13.3.0
SPARC LEON gcc 14.1.0
SPARC LEON gcc 14.2.0
SPARC gcc 12.2.0
SPARC gcc 12.3.0
SPARC gcc 12.4.0
SPARC gcc 13.1.0
SPARC gcc 13.2.0
SPARC gcc 13.3.0
SPARC gcc 14.1.0
SPARC gcc 14.2.0
SPARC64 gcc 12.2.0
SPARC64 gcc 12.3.0
SPARC64 gcc 12.4.0
SPARC64 gcc 13.1.0
SPARC64 gcc 13.2.0
SPARC64 gcc 13.3.0
SPARC64 gcc 14.1.0
SPARC64 gcc 14.2.0
TI C6x gcc 12.2.0
TI C6x gcc 12.3.0
TI C6x gcc 12.4.0
TI C6x gcc 13.1.0
TI C6x gcc 13.2.0
TI C6x gcc 13.3.0
TI C6x gcc 14.1.0
TI C6x gcc 14.2.0
TI CL430 21.6.1
VAX gcc NetBSDELF 10.4.0
VAX gcc NetBSDELF 10.5.0 (Nov 15 03:50:22 2023)
WebAssembly clang (trunk)
Xtensa ESP32 gcc 11.2.0 (2022r1)
Xtensa ESP32 gcc 12.2.0 (20230208)
Xtensa ESP32 gcc 8.2.0 (2019r2)
Xtensa ESP32 gcc 8.2.0 (2020r1)
Xtensa ESP32 gcc 8.2.0 (2020r2)
Xtensa ESP32 gcc 8.4.0 (2020r3)
Xtensa ESP32 gcc 8.4.0 (2021r1)
Xtensa ESP32 gcc 8.4.0 (2021r2)
Xtensa ESP32-S2 gcc 11.2.0 (2022r1)
Xtensa ESP32-S2 gcc 12.2.0 (20230208)
Xtensa ESP32-S2 gcc 8.2.0 (2019r2)
Xtensa ESP32-S2 gcc 8.2.0 (2020r1)
Xtensa ESP32-S2 gcc 8.2.0 (2020r2)
Xtensa ESP32-S2 gcc 8.4.0 (2020r3)
Xtensa ESP32-S2 gcc 8.4.0 (2021r1)
Xtensa ESP32-S2 gcc 8.4.0 (2021r2)
Xtensa ESP32-S3 gcc 11.2.0 (2022r1)
Xtensa ESP32-S3 gcc 12.2.0 (20230208)
Xtensa ESP32-S3 gcc 8.4.0 (2020r3)
Xtensa ESP32-S3 gcc 8.4.0 (2021r1)
Xtensa ESP32-S3 gcc 8.4.0 (2021r2)
arm64 msvc v19.20 VS16.0
arm64 msvc v19.21 VS16.1
arm64 msvc v19.22 VS16.2
arm64 msvc v19.23 VS16.3
arm64 msvc v19.24 VS16.4
arm64 msvc v19.25 VS16.5
arm64 msvc v19.27 VS16.7
arm64 msvc v19.28 VS16.8
arm64 msvc v19.28 VS16.9
arm64 msvc v19.29 VS16.10
arm64 msvc v19.29 VS16.11
arm64 msvc v19.30 VS17.0
arm64 msvc v19.31 VS17.1
arm64 msvc v19.32 VS17.2
arm64 msvc v19.33 VS17.3
arm64 msvc v19.34 VS17.4
arm64 msvc v19.35 VS17.5
arm64 msvc v19.36 VS17.6
arm64 msvc v19.37 VS17.7
arm64 msvc v19.38 VS17.8
arm64 msvc v19.39 VS17.9
arm64 msvc v19.40 VS17.10
arm64 msvc v19.latest
armv7-a clang (trunk)
armv7-a clang 10.0.0
armv7-a clang 10.0.1
armv7-a clang 11.0.0
armv7-a clang 11.0.1
armv7-a clang 12.0.0
armv7-a clang 12.0.1
armv7-a clang 13.0.0
armv7-a clang 13.0.1
armv7-a clang 14.0.0
armv7-a clang 15.0.0
armv7-a clang 16.0.0
armv7-a clang 17.0.1
armv7-a clang 18.1.0
armv7-a clang 19.1.0
armv7-a clang 9.0.0
armv7-a clang 9.0.1
armv8-a clang (all architectural features, trunk)
armv8-a clang (trunk)
armv8-a clang 10.0.0
armv8-a clang 10.0.1
armv8-a clang 11.0.0
armv8-a clang 11.0.1
armv8-a clang 12.0.0
armv8-a clang 13.0.0
armv8-a clang 14.0.0
armv8-a clang 15.0.0
armv8-a clang 16.0.0
armv8-a clang 17.0.1
armv8-a clang 18.1.0
armv8-a clang 19.1.0
armv8-a clang 9.0.0
armv8-a clang 9.0.1
clang-cl 18.1.0
ellcc 0.1.33
ellcc 0.1.34
ellcc 2017-07-16
hexagon-clang 16.0.5
llvm-mos atari2600-3e
llvm-mos atari2600-4k
llvm-mos atari2600-common
llvm-mos atari5200-supercart
llvm-mos atari8-cart-megacart
llvm-mos atari8-cart-std
llvm-mos atari8-cart-xegs
llvm-mos atari8-common
llvm-mos atari8-dos
llvm-mos c128
llvm-mos c64
llvm-mos commodore
llvm-mos cpm65
llvm-mos cx16
llvm-mos dodo
llvm-mos eater
llvm-mos mega65
llvm-mos nes
llvm-mos nes-action53
llvm-mos nes-cnrom
llvm-mos nes-gtrom
llvm-mos nes-mmc1
llvm-mos nes-mmc3
llvm-mos nes-nrom
llvm-mos nes-unrom
llvm-mos nes-unrom-512
llvm-mos osi-c1p
llvm-mos pce
llvm-mos pce-cd
llvm-mos pce-common
llvm-mos pet
llvm-mos rp6502
llvm-mos rpc8e
llvm-mos supervision
llvm-mos vic20
loongarch64 gcc 12.2.0
loongarch64 gcc 12.3.0
loongarch64 gcc 12.4.0
loongarch64 gcc 13.1.0
loongarch64 gcc 13.2.0
loongarch64 gcc 13.3.0
loongarch64 gcc 14.1.0
loongarch64 gcc 14.2.0
mips clang 13.0.0
mips clang 14.0.0
mips clang 15.0.0
mips clang 16.0.0
mips clang 17.0.1
mips clang 18.1.0
mips clang 19.1.0
mips gcc 11.2.0
mips gcc 12.1.0
mips gcc 12.2.0
mips gcc 12.3.0
mips gcc 12.4.0
mips gcc 13.1.0
mips gcc 13.2.0
mips gcc 13.3.0
mips gcc 14.1.0
mips gcc 14.2.0
mips gcc 4.9.4
mips gcc 5.4
mips gcc 5.5.0
mips gcc 9.3.0 (codescape)
mips gcc 9.5.0
mips64 (el) gcc 12.1.0
mips64 (el) gcc 12.2.0
mips64 (el) gcc 12.3.0
mips64 (el) gcc 12.4.0
mips64 (el) gcc 13.1.0
mips64 (el) gcc 13.2.0
mips64 (el) gcc 13.3.0
mips64 (el) gcc 14.1.0
mips64 (el) gcc 14.2.0
mips64 (el) gcc 4.9.4
mips64 (el) gcc 5.4.0
mips64 (el) gcc 5.5.0
mips64 (el) gcc 9.5.0
mips64 clang 13.0.0
mips64 clang 14.0.0
mips64 clang 15.0.0
mips64 clang 16.0.0
mips64 clang 17.0.1
mips64 clang 18.1.0
mips64 clang 19.1.0
mips64 gcc 11.2.0
mips64 gcc 12.1.0
mips64 gcc 12.2.0
mips64 gcc 12.3.0
mips64 gcc 12.4.0
mips64 gcc 13.1.0
mips64 gcc 13.2.0
mips64 gcc 13.3.0
mips64 gcc 14.1.0
mips64 gcc 14.2.0
mips64 gcc 4.9.4
mips64 gcc 5.4.0
mips64 gcc 5.5.0
mips64 gcc 9.5.0
mips64el clang 13.0.0
mips64el clang 14.0.0
mips64el clang 15.0.0
mips64el clang 16.0.0
mips64el clang 17.0.1
mips64el clang 18.1.0
mips64el clang 19.1.0
mipsel clang 13.0.0
mipsel clang 14.0.0
mipsel clang 15.0.0
mipsel clang 16.0.0
mipsel clang 17.0.1
mipsel clang 18.1.0
mipsel clang 19.1.0
mipsel gcc 12.1.0
mipsel gcc 12.2.0
mipsel gcc 12.3.0
mipsel gcc 12.4.0
mipsel gcc 13.1.0
mipsel gcc 13.2.0
mipsel gcc 13.3.0
mipsel gcc 14.1.0
mipsel gcc 14.2.0
mipsel gcc 4.9.4
mipsel gcc 5.4.0
mipsel gcc 5.5.0
mipsel gcc 9.5.0
nanoMIPS gcc 6.3.0 (mtk)
power gcc 11.2.0
power gcc 12.1.0
power gcc 12.2.0
power gcc 12.3.0
power gcc 12.4.0
power gcc 13.1.0
power gcc 13.2.0
power gcc 13.3.0
power gcc 14.1.0
power gcc 14.2.0
power gcc 4.8.5
power64 AT12.0 (gcc8)
power64 AT13.0 (gcc9)
power64 gcc 11.2.0
power64 gcc 12.1.0
power64 gcc 12.2.0
power64 gcc 12.3.0
power64 gcc 12.4.0
power64 gcc 13.1.0
power64 gcc 13.2.0
power64 gcc 13.3.0
power64 gcc 14.1.0
power64 gcc 14.2.0
power64 gcc trunk
power64le AT12.0 (gcc8)
power64le AT13.0 (gcc9)
power64le clang (trunk)
power64le gcc 11.2.0
power64le gcc 12.1.0
power64le gcc 12.2.0
power64le gcc 12.3.0
power64le gcc 12.4.0
power64le gcc 13.1.0
power64le gcc 13.2.0
power64le gcc 13.3.0
power64le gcc 14.1.0
power64le gcc 14.2.0
power64le gcc 6.3.0
power64le gcc trunk
powerpc64 clang (trunk)
s390x gcc 11.2.0
s390x gcc 12.1.0
s390x gcc 12.2.0
s390x gcc 12.3.0
s390x gcc 12.4.0
s390x gcc 13.1.0
s390x gcc 13.2.0
s390x gcc 13.3.0
s390x gcc 14.1.0
s390x gcc 14.2.0
sh gcc 12.2.0
sh gcc 12.3.0
sh gcc 12.4.0
sh gcc 13.1.0
sh gcc 13.2.0
sh gcc 13.3.0
sh gcc 14.1.0
sh gcc 14.2.0
sh gcc 4.9.4
sh gcc 9.5.0
vast (trunk)
x64 msvc v19.0 (WINE)
x64 msvc v19.10 (WINE)
x64 msvc v19.14 (WINE)
x64 msvc v19.20 VS16.0
x64 msvc v19.21 VS16.1
x64 msvc v19.22 VS16.2
x64 msvc v19.23 VS16.3
x64 msvc v19.24 VS16.4
x64 msvc v19.25 VS16.5
x64 msvc v19.27 VS16.7
x64 msvc v19.28 VS16.8
x64 msvc v19.28 VS16.9
x64 msvc v19.29 VS16.10
x64 msvc v19.29 VS16.11
x64 msvc v19.30 VS17.0
x64 msvc v19.31 VS17.1
x64 msvc v19.32 VS17.2
x64 msvc v19.33 VS17.3
x64 msvc v19.34 VS17.4
x64 msvc v19.35 VS17.5
x64 msvc v19.36 VS17.6
x64 msvc v19.37 VS17.7
x64 msvc v19.38 VS17.8
x64 msvc v19.39 VS17.9
x64 msvc v19.40 VS17.10
x64 msvc v19.latest
x86 djgpp 4.9.4
x86 djgpp 5.5.0
x86 djgpp 6.4.0
x86 djgpp 7.2.0
x86 msvc v19.0 (WINE)
x86 msvc v19.10 (WINE)
x86 msvc v19.14 (WINE)
x86 msvc v19.20 VS16.0
x86 msvc v19.21 VS16.1
x86 msvc v19.22 VS16.2
x86 msvc v19.23 VS16.3
x86 msvc v19.24 VS16.4
x86 msvc v19.25 VS16.5
x86 msvc v19.27 VS16.7
x86 msvc v19.28 VS16.8
x86 msvc v19.28 VS16.9
x86 msvc v19.29 VS16.10
x86 msvc v19.29 VS16.11
x86 msvc v19.30 VS17.0
x86 msvc v19.31 VS17.1
x86 msvc v19.32 VS17.2
x86 msvc v19.33 VS17.3
x86 msvc v19.34 VS17.4
x86 msvc v19.35 VS17.5
x86 msvc v19.36 VS17.6
x86 msvc v19.37 VS17.7
x86 msvc v19.38 VS17.8
x86 msvc v19.39 VS17.9
x86 msvc v19.40 VS17.10
x86 msvc v19.latest
x86 nvc++ 22.11
x86 nvc++ 22.7
x86 nvc++ 22.9
x86 nvc++ 23.1
x86 nvc++ 23.11
x86 nvc++ 23.3
x86 nvc++ 23.5
x86 nvc++ 23.7
x86 nvc++ 23.9
x86 nvc++ 24.1
x86 nvc++ 24.3
x86 nvc++ 24.5
x86 nvc++ 24.7
x86 nvc++ 24.9
x86-64 Zapcc 190308
x86-64 clang (EricWF contracts)
x86-64 clang (amd-staging)
x86-64 clang (assertions trunk)
x86-64 clang (clangir)
x86-64 clang (dascandy contracts)
x86-64 clang (experimental -Wlifetime)
x86-64 clang (experimental P1061)
x86-64 clang (experimental P1144)
x86-64 clang (experimental P1221)
x86-64 clang (experimental P2996)
x86-64 clang (experimental P3068)
x86-64 clang (experimental P3309)
x86-64 clang (experimental P3367)
x86-64 clang (experimental P3372)
x86-64 clang (experimental metaprogramming - P2632)
x86-64 clang (old concepts branch)
x86-64 clang (p1974)
x86-64 clang (pattern matching - P2688)
x86-64 clang (reflection)
x86-64 clang (resugar)
x86-64 clang (string interpolation - P3412)
x86-64 clang (thephd.dev)
x86-64 clang (trunk)
x86-64 clang (variadic friends - P2893)
x86-64 clang (widberg)
x86-64 clang 10.0.0
x86-64 clang 10.0.0 (assertions)
x86-64 clang 10.0.1
x86-64 clang 11.0.0
x86-64 clang 11.0.0 (assertions)
x86-64 clang 11.0.1
x86-64 clang 12.0.0
x86-64 clang 12.0.0 (assertions)
x86-64 clang 12.0.1
x86-64 clang 13.0.0
x86-64 clang 13.0.0 (assertions)
x86-64 clang 13.0.1
x86-64 clang 14.0.0
x86-64 clang 14.0.0 (assertions)
x86-64 clang 15.0.0
x86-64 clang 15.0.0 (assertions)
x86-64 clang 16.0.0
x86-64 clang 16.0.0 (assertions)
x86-64 clang 17.0.1
x86-64 clang 17.0.1 (assertions)
x86-64 clang 18.1.0
x86-64 clang 18.1.0 (assertions)
x86-64 clang 19.1.0
x86-64 clang 19.1.0 (assertions)
x86-64 clang 2.6.0 (assertions)
x86-64 clang 2.7.0 (assertions)
x86-64 clang 2.8.0 (assertions)
x86-64 clang 2.9.0 (assertions)
x86-64 clang 3.0.0
x86-64 clang 3.0.0 (assertions)
x86-64 clang 3.1
x86-64 clang 3.1 (assertions)
x86-64 clang 3.2
x86-64 clang 3.2 (assertions)
x86-64 clang 3.3
x86-64 clang 3.3 (assertions)
x86-64 clang 3.4 (assertions)
x86-64 clang 3.4.1
x86-64 clang 3.5
x86-64 clang 3.5 (assertions)
x86-64 clang 3.5.1
x86-64 clang 3.5.2
x86-64 clang 3.6
x86-64 clang 3.6 (assertions)
x86-64 clang 3.7
x86-64 clang 3.7 (assertions)
x86-64 clang 3.7.1
x86-64 clang 3.8
x86-64 clang 3.8 (assertions)
x86-64 clang 3.8.1
x86-64 clang 3.9.0
x86-64 clang 3.9.0 (assertions)
x86-64 clang 3.9.1
x86-64 clang 4.0.0
x86-64 clang 4.0.0 (assertions)
x86-64 clang 4.0.1
x86-64 clang 5.0.0
x86-64 clang 5.0.0 (assertions)
x86-64 clang 5.0.1
x86-64 clang 5.0.2
x86-64 clang 6.0.0
x86-64 clang 6.0.0 (assertions)
x86-64 clang 6.0.1
x86-64 clang 7.0.0
x86-64 clang 7.0.0 (assertions)
x86-64 clang 7.0.1
x86-64 clang 7.1.0
x86-64 clang 8.0.0
x86-64 clang 8.0.0 (assertions)
x86-64 clang 8.0.1
x86-64 clang 9.0.0
x86-64 clang 9.0.0 (assertions)
x86-64 clang 9.0.1
x86-64 clang rocm-4.5.2
x86-64 clang rocm-5.0.2
x86-64 clang rocm-5.1.3
x86-64 clang rocm-5.2.3
x86-64 clang rocm-5.3.3
x86-64 clang rocm-5.7.0
x86-64 clang rocm-6.0.2
x86-64 clang rocm-6.1.2
x86-64 gcc (contract labels)
x86-64 gcc (contracts natural syntax)
x86-64 gcc (contracts)
x86-64 gcc (coroutines)
x86-64 gcc (modules)
x86-64 gcc (trunk)
x86-64 gcc 10.1
x86-64 gcc 10.2
x86-64 gcc 10.3
x86-64 gcc 10.4
x86-64 gcc 10.5
x86-64 gcc 11.1
x86-64 gcc 11.2
x86-64 gcc 11.3
x86-64 gcc 11.4
x86-64 gcc 12.1
x86-64 gcc 12.2
x86-64 gcc 12.3
x86-64 gcc 12.4
x86-64 gcc 13.1
x86-64 gcc 13.2
x86-64 gcc 13.3
x86-64 gcc 14.1
x86-64 gcc 14.2
x86-64 gcc 3.4.6
x86-64 gcc 4.0.4
x86-64 gcc 4.1.2
x86-64 gcc 4.4.7
x86-64 gcc 4.5.3
x86-64 gcc 4.6.4
x86-64 gcc 4.7.1
x86-64 gcc 4.7.2
x86-64 gcc 4.7.3
x86-64 gcc 4.7.4
x86-64 gcc 4.8.1
x86-64 gcc 4.8.2
x86-64 gcc 4.8.3
x86-64 gcc 4.8.4
x86-64 gcc 4.8.5
x86-64 gcc 4.9.0
x86-64 gcc 4.9.1
x86-64 gcc 4.9.2
x86-64 gcc 4.9.3
x86-64 gcc 4.9.4
x86-64 gcc 5.1
x86-64 gcc 5.2
x86-64 gcc 5.3
x86-64 gcc 5.4
x86-64 gcc 5.5
x86-64 gcc 6.1
x86-64 gcc 6.2
x86-64 gcc 6.3
x86-64 gcc 6.4
x86-64 gcc 6.5
x86-64 gcc 7.1
x86-64 gcc 7.2
x86-64 gcc 7.3
x86-64 gcc 7.4
x86-64 gcc 7.5
x86-64 gcc 8.1
x86-64 gcc 8.2
x86-64 gcc 8.3
x86-64 gcc 8.4
x86-64 gcc 8.5
x86-64 gcc 9.1
x86-64 gcc 9.2
x86-64 gcc 9.3
x86-64 gcc 9.4
x86-64 gcc 9.5
x86-64 icc 13.0.1
x86-64 icc 16.0.3
x86-64 icc 17.0.0
x86-64 icc 18.0.0
x86-64 icc 19.0.0
x86-64 icc 19.0.1
x86-64 icc 2021.1.2
x86-64 icc 2021.10.0
x86-64 icc 2021.2.0
x86-64 icc 2021.3.0
x86-64 icc 2021.4.0
x86-64 icc 2021.5.0
x86-64 icc 2021.6.0
x86-64 icc 2021.7.0
x86-64 icc 2021.7.1
x86-64 icc 2021.8.0
x86-64 icc 2021.9.0
x86-64 icx 2021.1.2
x86-64 icx 2021.2.0
x86-64 icx 2021.3.0
x86-64 icx 2021.4.0
x86-64 icx 2022.0.0
x86-64 icx 2022.1.0
x86-64 icx 2022.2.0
x86-64 icx 2022.2.1
x86-64 icx 2023.0.0
x86-64 icx 2023.1.0
x86-64 icx 2023.2.1
x86-64 icx 2024.0.0
x86-64 icx 2024.1.0
x86-64 icx 2024.2.0
x86-64 icx 2025.0.0
x86-64 icx 2025.0.0
zig c++ 0.10.0
zig c++ 0.11.0
zig c++ 0.12.0
zig c++ 0.12.1
zig c++ 0.13.0
zig c++ 0.6.0
zig c++ 0.7.0
zig c++ 0.7.1
zig c++ 0.8.0
zig c++ 0.9.0
zig c++ trunk
Options
Source code
#include <memory> #include <vector> #include <utility> #include <limits> # define PROTOBUF_PREDICT_TRUE(x) (__builtin_expect(false || (x), true)) # define PROTOBUF_PREDICT_FALSE(x) (__builtin_expect(false || (x), false)) # define PROTOBUF_MUSTTAIL [[clang::musttail]] template <class T> __attribute__((always_inline)) constexpr T RotateLeft( T x, int s) noexcept { return static_cast<T>(x << (s & (std::numeric_limits<T>::digits - 1))) | static_cast<T>(x >> ((-s) & (std::numeric_limits<T>::digits - 1))); } inline __attribute__((always_inline)) uint64_t RotRight7AndReplaceLowByte(uint64_t res, const char& byte) { // TODO(b/239808098): remove the inline assembly #if defined(__x86_64__) && defined(__GNUC__) // This will only use one register for `res`. // `byte` comes as a reference to allow the compiler to generate code like: // // rorq $7, %rcx // movb 1(%rax), %cl // // which avoids loading the incoming bytes into a separate register first. asm("ror $7,%0\n\t" "movb %1,%b0" : "+r"(res) : "m"(byte)); #else res = RotateLeft(res, -7); res = res & ~0xFF; res |= 0xFF & byte; #endif return res; }; struct TcFieldData { constexpr TcFieldData() : data(0) {} // Fast table entry constructor: constexpr TcFieldData(uint16_t coded_tag, uint8_t hasbit_idx, uint8_t aux_idx, uint16_t offset) : data(uint64_t{offset} << 48 | // uint64_t{aux_idx} << 24 | // uint64_t{hasbit_idx} << 16 | // uint64_t{coded_tag}) {} // Fields used in fast table parsing: // // Bit: // +-----------+-------------------+ // |63 .. 32|31 .. 0| // +---------------+---------------+ // : . : . : . 16|=======| [16] coded_tag() // : . : . : 24|===| . : [ 8] hasbit_idx() // : . : . 32|===| : . : [ 8] aux_idx() // : . 48:---.---: . : . : [16] (unused) // |=======| . : . : . : [16] offset() // +-----------+-------------------+ // |63 .. 32|31 .. 0| // +---------------+---------------+ template <typename TagType = uint16_t> TagType coded_tag() const { return static_cast<TagType>(data); } uint8_t hasbit_idx() const { return static_cast<uint8_t>(data >> 16); } uint8_t aux_idx() const { return static_cast<uint8_t>(data >> 24); } uint16_t offset() const { return static_cast<uint16_t>(data >> 48); } // Constructor for special entries that do not represent a field. // - End group: `nonfield_info` is the decoded tag. constexpr TcFieldData(uint16_t coded_tag, uint16_t nonfield_info) : data(uint64_t{nonfield_info} << 16 | // uint64_t{coded_tag}) {} // Fields used in non-field entries // // Bit: // +-----------+-------------------+ // |63 .. 32|31 .. 0| // +---------------+---------------+ // : . : . : . 16|=======| [16] coded_tag() // : . : . 32|=======| . : [16] decoded_tag() // :---.---:---.---: . : . : [32] (unused) // +-----------+-------------------+ // |63 .. 32|31 .. 0| // +---------------+---------------+ uint16_t decoded_tag() const { return static_cast<uint16_t>(data >> 16); } // Fields used in mini table parsing: // // Bit: // +-----------+-------------------+ // |63 .. 32|31 .. 0| // +---------------+---------------+ // : . : . |===============| [32] tag() (decoded) // |===============| . : . : [32] entry_offset() // +-----------+-------------------+ // |63 .. 32|31 .. 0| // +---------------+---------------+ uint32_t tag() const { return static_cast<uint32_t>(data); } uint32_t entry_offset() const { return static_cast<uint32_t>(data >> 32); } uint64_t data; }; template <int n> inline __attribute__((always_inline)) uint64_t shift_left_fill_with_ones(uint64_t byte, uint64_t ones) { return (byte << (n * 7)) | (ones >> (64 - (n * 7))); } // Shift "byte" left by n * 7 bits, filling vacated bits with ones, and // put the new value in res. Return whether the result was negative. template <int n> inline __attribute__((always_inline)) bool shift_left_fill_with_ones_was_negative( uint64_t byte, uint64_t ones, int64_t& res) { #if defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(__x86_64__) // For the first two rounds (ptr[1] and ptr[2]), micro benchmarks show a // substantial improvement from capturing the sign from the condition code // register on x86-64. bool sign_bit; asm("shldq %3, %2, %1" : "=@ccs"(sign_bit), "+r"(byte) : "r"(ones), "i"(n * 7)); res = byte; return sign_bit; #else // Generic fallback: res = shift_left_fill_with_ones<n>(byte, ones); return static_cast<int64_t>(res) < 0; #endif } inline __attribute__((always_inline)) std::pair<const char*, uint64_t> Parse64FallbackPair(const char* p, int64_t res1) { auto ptr = reinterpret_cast<const int8_t*>(p); // The algorithm relies on sign extension for each byte to set all high bits // when the varint continues. It also relies on asserting all of the lower // bits for each successive byte read. This allows the result to be aggregated // using a bitwise AND. For example: // // 8 1 64 57 ... 24 17 16 9 8 1 // ptr[0] = 1aaa aaaa ; res1 = 1111 1111 ... 1111 1111 1111 1111 1aaa aaaa // ptr[1] = 1bbb bbbb ; res2 = 1111 1111 ... 1111 1111 11bb bbbb b111 1111 // ptr[2] = 1ccc cccc ; res3 = 0000 0000 ... 000c cccc cc11 1111 1111 1111 // --------------------------------------------- // res1 & res2 & res3 = 0000 0000 ... 000c cccc ccbb bbbb baaa aaaa // // On x86-64, a shld from a single register filled with enough 1s in the high // bits can accomplish all this in one instruction. It so happens that res1 // has 57 high bits of ones, which is enough for the largest shift done. // // Just as importantly, by keeping results in res1, res2, and res3, we take // advantage of the superscalar abilities of the CPU. //ABSL_DCHECK_EQ(res1 >> 7, -1); uint64_t ones = res1; // save the high 1 bits from res1 (input to SHLD) int64_t res2, res3; // accumulated result chunks if (!shift_left_fill_with_ones_was_negative<1>(ptr[1], ones, res2)) goto done2; if (!shift_left_fill_with_ones_was_negative<2>(ptr[2], ones, res3)) goto done3; // For the remainder of the chunks, check the sign of the AND result. res1 &= shift_left_fill_with_ones<3>(ptr[3], ones); if (res1 >= 0) goto done4; res2 &= shift_left_fill_with_ones<4>(ptr[4], ones); if (res2 >= 0) goto done5; res3 &= shift_left_fill_with_ones<5>(ptr[5], ones); if (res3 >= 0) goto done6; res1 &= shift_left_fill_with_ones<6>(ptr[6], ones); if (res1 >= 0) goto done7; res2 &= shift_left_fill_with_ones<7>(ptr[7], ones); if (res2 >= 0) goto done8; res3 &= shift_left_fill_with_ones<8>(ptr[8], ones); if (res3 >= 0) goto done9; // For valid 64bit varints, the 10th byte/ptr[9] should be exactly 1. In this // case, the continuation bit of ptr[8] already set the top bit of res3 // correctly, so all we have to do is check that the expected case is true. if (PROTOBUF_PREDICT_TRUE(ptr[9] == 1)) goto done10; if (PROTOBUF_PREDICT_FALSE(ptr[9] & 0x80)) { // If the continue bit is set, it is an unterminated varint. return {nullptr, 0}; } // A zero value of the first bit of the 10th byte represents an // over-serialized varint. This case should not happen, but if does (say, due // to a nonconforming serializer), deassert the continuation bit that came // from ptr[8]. if ((ptr[9] & 1) == 0) { #if defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(__x86_64__) // Use a small instruction since this is an uncommon code path. asm("btcq $63,%0" : "+r"(res3)); #else res3 ^= static_cast<uint64_t>(1) << 63; #endif } goto done10; done2: return {p + 2, res1 & res2}; done3: return {p + 3, res1 & res2 & res3}; done4: return {p + 4, res1 & res2 & res3}; done5: return {p + 5, res1 & res2 & res3}; done6: return {p + 6, res1 & res2 & res3}; done7: return {p + 7, res1 & res2 & res3}; done8: return {p + 8, res1 & res2 & res3}; done9: return {p + 9, res1 & res2 & res3}; done10: return {p + 10, res1 & res2 & res3}; } class ParseContext; template <typename T> static inline T& RefAt(void* x, size_t offset) { T* target = reinterpret_cast<T*>(static_cast<char*>(x) + offset); return *target; } template <typename T> static inline const T& RefAt(const void* x, size_t offset) { const T* target = reinterpret_cast<const T*>(static_cast<const char*>(x) + offset); return *target; } class MessageLite; #define PROTOBUF_TC_PARAM_DECL \ MessageLite *msg, const char *ptr, \ ParseContext *ctx, \ TcFieldData data, \ const void *table, uint64_t hasbits // PROTOBUF_TC_PARAM_PASS passes values to match PROTOBUF_TC_PARAM_DECL. #define PROTOBUF_TC_PARAM_PASS msg, ptr, ctx, data, table, hasbits static const char* ToTagDispatch(PROTOBUF_TC_PARAM_DECL); static const char* MiniParse(PROTOBUF_TC_PARAM_DECL); static const char* Error(PROTOBUF_TC_PARAM_DECL); template <class VarintType> inline __attribute__((always_inline)) std::pair<const char*, VarintType> ParseFallbackPair(const char* p, int64_t res1) { constexpr bool kIs64BitVarint = std::is_same<VarintType, uint64_t>::value || std::is_same<VarintType, int64_t>::value; constexpr bool kIs32BitVarint = std::is_same<VarintType, uint32_t>::value || std::is_same<VarintType, int32_t>::value; static_assert(kIs64BitVarint || kIs32BitVarint, "Only 32 or 64 bit varints are supported"); auto ptr = reinterpret_cast<const int8_t*>(p); // The algorithm relies on sign extension for each byte to set all high bits // when the varint continues. It also relies on asserting all of the lower // bits for each successive byte read. This allows the result to be aggregated // using a bitwise AND. For example: // // 8 1 64 57 ... 24 17 16 9 8 1 // ptr[0] = 1aaa aaaa ; res1 = 1111 1111 ... 1111 1111 1111 1111 1aaa aaaa // ptr[1] = 1bbb bbbb ; res2 = 1111 1111 ... 1111 1111 11bb bbbb b111 1111 // ptr[2] = 1ccc cccc ; res3 = 0000 0000 ... 000c cccc cc11 1111 1111 1111 // --------------------------------------------- // res1 & res2 & res3 = 0000 0000 ... 000c cccc ccbb bbbb baaa aaaa // // On x86-64, a shld from a single register filled with enough 1s in the high // bits can accomplish all this in one instruction. It so happens that res1 // has 57 high bits of ones, which is enough for the largest shift done. // // Just as importantly, by keeping results in res1, res2, and res3, we take // advantage of the superscalar abilities of the CPU. //ABSL_DCHECK_EQ(res1 >> 7, -1); uint64_t ones = res1; // save the high 1 bits from res1 (input to SHLD) int64_t res2, res3; // accumulated result chunks if (!shift_left_fill_with_ones_was_negative<1>(ptr[1], ones, res2)) goto done2; if (!shift_left_fill_with_ones_was_negative<2>(ptr[2], ones, res3)) goto done3; // For the remainder of the chunks, check the sign of the AND result. res2 &= shift_left_fill_with_ones<3>(ptr[3], ones); if (res2 >= 0) goto done4; res1 &= shift_left_fill_with_ones<4>(ptr[4], ones); if (res1 >= 0) goto done5; if (kIs64BitVarint) { res2 &= shift_left_fill_with_ones<5>(ptr[5], ones); if (res2 >= 0) goto done6; res3 &= shift_left_fill_with_ones<6>(ptr[6], ones); if (res3 >= 0) goto done7; res1 &= shift_left_fill_with_ones<7>(ptr[7], ones); if (res1 >= 0) goto done8; res3 &= shift_left_fill_with_ones<8>(ptr[8], ones); if (res3 >= 0) goto done9; } else if (kIs32BitVarint) { if (PROTOBUF_PREDICT_TRUE(!(ptr[5] & 0x80))) goto done6; if (PROTOBUF_PREDICT_TRUE(!(ptr[6] & 0x80))) goto done7; if (PROTOBUF_PREDICT_TRUE(!(ptr[7] & 0x80))) goto done8; if (PROTOBUF_PREDICT_TRUE(!(ptr[8] & 0x80))) goto done9; } // For valid 64bit varints, the 10th byte/ptr[9] should be exactly 1. In this // case, the continuation bit of ptr[8] already set the top bit of res3 // correctly, so all we have to do is check that the expected case is true. if (PROTOBUF_PREDICT_TRUE(kIs64BitVarint && ptr[9] == 1)) goto done10; if (PROTOBUF_PREDICT_FALSE(ptr[9] & 0x80)) { // If the continue bit is set, it is an unterminated varint. return {nullptr, 0}; } // A zero value of the first bit of the 10th byte represents an // over-serialized varint. This case should not happen, but if does (say, due // to a nonconforming serializer), deassert the continuation bit that came // from ptr[8]. if (kIs64BitVarint && ((ptr[9] & 1) == 0)) { #if defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(__x86_64__) // Use a small instruction since this is an uncommon code path. asm("btcq $63,%0" : "+r"(res3)); #else res3 ^= static_cast<uint64_t>(1) << 63; #endif } goto done10; done2: return {p + 2, res1 & res2}; done3: return {p + 3, res1 & res2 & res3}; done4: return {p + 4, res1 & res2 & res3}; done5: return {p + 5, res1 & res2 & res3}; done6: return {p + 6, res1 & res2 & res3}; done7: return {p + 7, res1 & res2 & res3}; done8: return {p + 8, res1 & res2 & res3}; done9: return {p + 9, res1 & res2 & res3}; done10: return {p + 10, res1 & res2 & res3}; } const char* FastTV64S1(PROTOBUF_TC_PARAM_DECL) { constexpr int hasbit_idx = 0; constexpr int data_offset = 24; using FieldType = unsigned long; using TagType = uint8_t; // super-early success test... if (PROTOBUF_PREDICT_TRUE(((data.data) & 0x80FF) == 0)) { ptr += sizeof(TagType); // Consume tag if (hasbit_idx < 32) { hasbits |= (uint64_t{1} << hasbit_idx); } uint8_t value = data.data >> 8; RefAt<FieldType>(msg, data_offset) = value; ptr += 1; [[clang::musttail]] return ToTagDispatch(PROTOBUF_TC_PARAM_PASS); } if (PROTOBUF_PREDICT_FALSE(data.coded_tag<TagType>() != 0)) { [[clang::musttail]] return MiniParse(PROTOBUF_TC_PARAM_PASS); } ptr += sizeof(TagType); // Consume tag if (hasbit_idx < 32) { hasbits |= (uint64_t{1} << hasbit_idx); } #ifdef OLD auto tmp = Parse64FallbackPair(ptr, static_cast<int8_t>(data.data >> 8)); #else auto tmp = ParseFallbackPair<uint64_t>(ptr, static_cast<int8_t>(data.data >> 8)); #endif data.data = 0; // Indicate to the compiler that we don't need this anymore. ptr = tmp.first; if (PROTOBUF_PREDICT_FALSE(ptr == nullptr)) { return Error(PROTOBUF_TC_PARAM_PASS); } RefAt<FieldType>(msg, data_offset) = static_cast<FieldType>(tmp.second); [[clang::musttail]] return ToTagDispatch(PROTOBUF_TC_PARAM_PASS); } const char* FastTV32S1(PROTOBUF_TC_PARAM_DECL) { using TagType = uint8_t; constexpr int hasbit_idx = 0; constexpr int data_offset = 24; using FieldType = unsigned long; // super-early success test... if (PROTOBUF_PREDICT_TRUE(((data.data) & 0x80FF) == 0)) { ptr += sizeof(TagType); // Consume tag if (hasbit_idx < 32) { hasbits |= (uint64_t{1} << hasbit_idx); } uint8_t value = data.data >> 8; RefAt<FieldType>(msg, data_offset) = value; ptr += 1; [[clang::musttail]] return ToTagDispatch(PROTOBUF_TC_PARAM_PASS); } if (PROTOBUF_PREDICT_FALSE(data.coded_tag<TagType>() != 0)) { [[clang::musttail]] return MiniParse(PROTOBUF_TC_PARAM_PASS); } ptr += sizeof(TagType); // Consume tag if (hasbit_idx < 32) { hasbits |= (uint64_t{1} << hasbit_idx); } #ifndef OLD auto tmp = ParseFallbackPair<uint32_t>(ptr, static_cast<int8_t>(data.data >> 8)); data.data = 0; // Indicate to the compiler that we don't need this anymore. ptr = tmp.first; if (PROTOBUF_PREDICT_FALSE(ptr == nullptr)) { return Error(PROTOBUF_TC_PARAM_PASS); } RefAt<FieldType>(msg, data_offset) = static_cast<FieldType>(tmp.second); [[clang::musttail]] return ToTagDispatch(PROTOBUF_TC_PARAM_PASS); #else // Few registers auto* out = &RefAt<FieldType>(msg, data_offset); uint64_t res = 0xFF & (data.data >> 8); /* if (PROTOBUF_PREDICT_FALSE(res & 0x80)) */ { res = RotRight7AndReplaceLowByte(res, ptr[1]); if (PROTOBUF_PREDICT_FALSE(res & 0x80)) { res = RotRight7AndReplaceLowByte(res, ptr[2]); if (PROTOBUF_PREDICT_FALSE(res & 0x80)) { res = RotRight7AndReplaceLowByte(res, ptr[3]); if (PROTOBUF_PREDICT_FALSE(res & 0x80)) { res = RotRight7AndReplaceLowByte(res, ptr[4]); if (PROTOBUF_PREDICT_FALSE(res & 0x80)) { if (PROTOBUF_PREDICT_FALSE(ptr[5] & 0x80)) { if (PROTOBUF_PREDICT_FALSE(ptr[6] & 0x80)) { if (PROTOBUF_PREDICT_FALSE(ptr[7] & 0x80)) { if (PROTOBUF_PREDICT_FALSE(ptr[8] & 0x80)) { if (ptr[9] & 0x80) return Error(PROTOBUF_TC_PARAM_PASS); *out = RotateLeft(res, 28); ptr += 10; PROTOBUF_MUSTTAIL return ToTagDispatch( PROTOBUF_TC_PARAM_PASS); } *out = RotateLeft(res, 28); ptr += 9; PROTOBUF_MUSTTAIL return ToTagDispatch( PROTOBUF_TC_PARAM_PASS); } *out = RotateLeft(res, 28); ptr += 8; PROTOBUF_MUSTTAIL return ToTagDispatch(PROTOBUF_TC_PARAM_PASS); } *out = RotateLeft(res, 28); ptr += 7; PROTOBUF_MUSTTAIL return ToTagDispatch(PROTOBUF_TC_PARAM_PASS); } *out = RotateLeft(res, 28); ptr += 6; PROTOBUF_MUSTTAIL return ToTagDispatch(PROTOBUF_TC_PARAM_PASS); } *out = RotateLeft(res, 28); ptr += 5; PROTOBUF_MUSTTAIL return ToTagDispatch(PROTOBUF_TC_PARAM_PASS); } *out = RotateLeft(res, 21); ptr += 4; PROTOBUF_MUSTTAIL return ToTagDispatch(PROTOBUF_TC_PARAM_PASS); } *out = RotateLeft(res, 14); ptr += 3; PROTOBUF_MUSTTAIL return ToTagDispatch(PROTOBUF_TC_PARAM_PASS); } *out = RotateLeft(res, 7); ptr += 2; PROTOBUF_MUSTTAIL return ToTagDispatch(PROTOBUF_TC_PARAM_PASS); } *out = res; ptr += 1; PROTOBUF_MUSTTAIL return ToTagDispatch(PROTOBUF_TC_PARAM_PASS); #endif } uint64_t handPDEP_7F7F7F7F7F7F7F7F(uint64_t value) { // 1. Reshape value into two halves, each of which have 28 bits #if defined(__aarch64__) uint64_t top28 = value & 0x00FFFFFFF0000000; uint64_t bot28 = value & 0x000000000FFFFFFF; value = (top28 << 4) + bot28; #else value <<= 4; asm("" : "+r"(value)); // Improve clang value = (value & 0x0FFFFFFF00000000) | (static_cast<uint32_t>(value) >> 4); #endif // 2. Reshape value into four quarters, each of which have 14 bits uint64_t top14s = value & 0xFFFFC000'0FFFC000; uint64_t bot14s = value - top14s; value = bot14s + (top14s << 2); // 3. Reshape value into eight bytes, each of which have 7 bits uint64_t top7s = value & 0x3F80'3F80'3F80'3F80; value += top7s; return value; }
Become a Patron
Sponsor on GitHub
Donate via PayPal
Source on GitHub
Mailing list
Installed libraries
Wiki
Report an issue
How it works
Contact the author
CE on Mastodon
About the author
Statistics
Changelog
Version tree