Skip to content

Commit

Permalink
Add atomics to interpreter and x64 jit (vbpf#486)
Browse files Browse the repository at this point in the history
* Add atomics to interpreter

Signed-off-by: Alan Jowett <[email protected]>

* Implement atomic operations in the JIT.

Signed-off-by: Alan Jowett <[email protected]>

* Add bounds checking for atomics

Signed-off-by: Alan Jowett <[email protected]>

* Mark interlocked ops on arm64 JIT as exected failure

Signed-off-by: Alan Jowett <[email protected]>

* Mark interlocked operations as failing on macos

Signed-off-by: Alan Jowett <[email protected]>

* PR feedback

Signed-off-by: Alan Jowett <[email protected]>

* Update vm/ubpf_jit_x86_64.h

Co-authored-by: Will Hawkins <[email protected]>
Signed-off-by: Alan Jowett <[email protected]>

* PR feedback

Signed-off-by: Alan Jowett <[email protected]>

---------

Signed-off-by: Alan Jowett <[email protected]>
Signed-off-by: Alan Jowett <[email protected]>
Co-authored-by: Alan Jowett <[email protected]>
Co-authored-by: Will Hawkins <[email protected]>
  • Loading branch information
3 people authored Aug 28, 2024
1 parent 4826123 commit f1ecb7a
Show file tree
Hide file tree
Showing 6 changed files with 594 additions and 59 deletions.
34 changes: 29 additions & 5 deletions ubpf_plugin/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -66,9 +66,12 @@ endif()

# Add all names of tests that are expected to fail to the TESTS_EXPECTED_TO_FAIL list
list(APPEND TESTS_EXPECTED_TO_FAIL "duplicate_label")
# TODO: remove this once we have a proper implementation of interlocked operations
# and support for calling local functions.
list(APPEND TESTS_EXPECTED_TO_FAIL "lock")

# Interlocked operations are not supported on ARM64 or MacOS
if(CMAKE_SYSTEM_PROCESSOR STREQUAL aarch64 OR CMAKE_SYSTEM_NAME STREQUAL Darwin)
# All files that have lock in the name are expected to fail
list(APPEND TESTS_EXPECTED_TO_FAIL_JIT "lock")
endif()

foreach(file ${files})
unset(EXPECT_FAILURE)
Expand All @@ -80,12 +83,33 @@ foreach(file ${files})
endif()
endif()
endforeach()

unset(EXPECT_FAILURE_JIT)
foreach(to_fail ${TESTS_EXPECTED_TO_FAIL_JIT})
if(NOT EXPECT_FAILURE_JIT)
string(REGEX MATCH "${to_fail}" EXPECT_FAILURE_JIT "${file}")
if(EXPECT_FAILURE_JIT)
message(STATUS "Expecting ${file} test to fail.")
endif()
endif()
endforeach()

unset(EXPECT_FAILURE_INTERPRET)
foreach(to_fail ${TESTS_EXPECTED_TO_FAIL_INTERPRET})
if(NOT EXPECT_FAILURE_INTERPRET)
string(REGEX MATCH "${to_fail}" EXPECT_FAILURE_INTERPRET "${file}")
if(EXPECT_FAILURE_INTERPRET)
message(STATUS "Expecting ${file} test to fail.")
endif()
endif()
endforeach()

add_test(
NAME ${file}-JIT
COMMAND ${BPF_CONFORMANCE_RUNNER} --test_file_path ${file} ${PLUGIN_JIT}
)

if(EXPECT_FAILURE)
if(EXPECT_FAILURE OR EXPECT_FAILURE_JIT)
set_tests_properties(${file}-JIT PROPERTIES WILL_FAIL TRUE)
endif()

Expand All @@ -94,7 +118,7 @@ foreach(file ${files})
COMMAND ${BPF_CONFORMANCE_RUNNER} --test_file_path ${file} ${PLUGIN_INTERPRET}
)

if(EXPECT_FAILURE)
if(EXPECT_FAILURE OR EXPECT_FAILURE_INTERPRET)
set_tests_properties(${file}-Interpreter PROPERTIES WILL_FAIL TRUE)
endif()
endforeach()
133 changes: 80 additions & 53 deletions vm/ebpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,63 +70,87 @@ enum bpf_register
#define EBPF_SIZE_B 0x10
#define EBPF_SIZE_DW 0x18

/**
* @brief Atomic modifier for EBPF_CLS_STX operation.
*/
#define EBPF_MODE_ATOMIC 0xc0

#define EBPF_ATOMIC_OP_FETCH 0x01
#define EBPF_ATOMIC_OP_XCHG (0xe0 | EBPF_ATOMIC_OP_FETCH)
#define EBPF_ATOMIC_OP_CMPXCHG (0xf0 | EBPF_ATOMIC_OP_FETCH)

/* Other memory modes are not yet supported */
#define EBPF_MODE_IMM 0x00
#define EBPF_MODE_MEM 0x60

#define EBPF_OP_ADD_IMM (EBPF_CLS_ALU | EBPF_SRC_IMM | 0x00)
#define EBPF_OP_ADD_REG (EBPF_CLS_ALU | EBPF_SRC_REG | 0x00)
#define EBPF_OP_SUB_IMM (EBPF_CLS_ALU | EBPF_SRC_IMM | 0x10)
#define EBPF_OP_SUB_REG (EBPF_CLS_ALU | EBPF_SRC_REG | 0x10)
#define EBPF_OP_MUL_IMM (EBPF_CLS_ALU | EBPF_SRC_IMM | 0x20)
#define EBPF_OP_MUL_REG (EBPF_CLS_ALU | EBPF_SRC_REG | 0x20)
#define EBPF_OP_DIV_IMM (EBPF_CLS_ALU | EBPF_SRC_IMM | 0x30)
#define EBPF_OP_DIV_REG (EBPF_CLS_ALU | EBPF_SRC_REG | 0x30)
#define EBPF_OP_OR_IMM (EBPF_CLS_ALU | EBPF_SRC_IMM | 0x40)
#define EBPF_OP_OR_REG (EBPF_CLS_ALU | EBPF_SRC_REG | 0x40)
#define EBPF_OP_AND_IMM (EBPF_CLS_ALU | EBPF_SRC_IMM | 0x50)
#define EBPF_OP_AND_REG (EBPF_CLS_ALU | EBPF_SRC_REG | 0x50)
#define EBPF_OP_LSH_IMM (EBPF_CLS_ALU | EBPF_SRC_IMM | 0x60)
#define EBPF_OP_LSH_REG (EBPF_CLS_ALU | EBPF_SRC_REG | 0x60)
#define EBPF_OP_RSH_IMM (EBPF_CLS_ALU | EBPF_SRC_IMM | 0x70)
#define EBPF_OP_RSH_REG (EBPF_CLS_ALU | EBPF_SRC_REG | 0x70)
#define EBPF_OP_NEG (EBPF_CLS_ALU | 0x80)
#define EBPF_OP_MOD_IMM (EBPF_CLS_ALU | EBPF_SRC_IMM | 0x90)
#define EBPF_OP_MOD_REG (EBPF_CLS_ALU | EBPF_SRC_REG | 0x90)
#define EBPF_OP_XOR_IMM (EBPF_CLS_ALU | EBPF_SRC_IMM | 0xa0)
#define EBPF_OP_XOR_REG (EBPF_CLS_ALU | EBPF_SRC_REG | 0xa0)
#define EBPF_OP_MOV_IMM (EBPF_CLS_ALU | EBPF_SRC_IMM | 0xb0)
#define EBPF_OP_MOV_REG (EBPF_CLS_ALU | EBPF_SRC_REG | 0xb0)
#define EBPF_OP_ARSH_IMM (EBPF_CLS_ALU | EBPF_SRC_IMM | 0xc0)
#define EBPF_OP_ARSH_REG (EBPF_CLS_ALU | EBPF_SRC_REG | 0xc0)
#define EBPF_OP_LE (EBPF_CLS_ALU | EBPF_SRC_IMM | 0xd0)
#define EBPF_OP_BE (EBPF_CLS_ALU | EBPF_SRC_REG | 0xd0)

#define EBPF_OP_ADD64_IMM (EBPF_CLS_ALU64 | EBPF_SRC_IMM | 0x00)
#define EBPF_OP_ADD64_REG (EBPF_CLS_ALU64 | EBPF_SRC_REG | 0x00)
#define EBPF_OP_SUB64_IMM (EBPF_CLS_ALU64 | EBPF_SRC_IMM | 0x10)
#define EBPF_OP_SUB64_REG (EBPF_CLS_ALU64 | EBPF_SRC_REG | 0x10)
#define EBPF_OP_MUL64_IMM (EBPF_CLS_ALU64 | EBPF_SRC_IMM | 0x20)
#define EBPF_OP_MUL64_REG (EBPF_CLS_ALU64 | EBPF_SRC_REG | 0x20)
#define EBPF_OP_DIV64_IMM (EBPF_CLS_ALU64 | EBPF_SRC_IMM | 0x30)
#define EBPF_OP_DIV64_REG (EBPF_CLS_ALU64 | EBPF_SRC_REG | 0x30)
#define EBPF_OP_OR64_IMM (EBPF_CLS_ALU64 | EBPF_SRC_IMM | 0x40)
#define EBPF_OP_OR64_REG (EBPF_CLS_ALU64 | EBPF_SRC_REG | 0x40)
#define EBPF_OP_AND64_IMM (EBPF_CLS_ALU64 | EBPF_SRC_IMM | 0x50)
#define EBPF_OP_AND64_REG (EBPF_CLS_ALU64 | EBPF_SRC_REG | 0x50)
#define EBPF_OP_LSH64_IMM (EBPF_CLS_ALU64 | EBPF_SRC_IMM | 0x60)
#define EBPF_OP_LSH64_REG (EBPF_CLS_ALU64 | EBPF_SRC_REG | 0x60)
#define EBPF_OP_RSH64_IMM (EBPF_CLS_ALU64 | EBPF_SRC_IMM | 0x70)
#define EBPF_OP_RSH64_REG (EBPF_CLS_ALU64 | EBPF_SRC_REG | 0x70)
#define EBPF_OP_NEG64 (EBPF_CLS_ALU64 | 0x80)
#define EBPF_OP_MOD64_IMM (EBPF_CLS_ALU64 | EBPF_SRC_IMM | 0x90)
#define EBPF_OP_MOD64_REG (EBPF_CLS_ALU64 | EBPF_SRC_REG | 0x90)
#define EBPF_OP_XOR64_IMM (EBPF_CLS_ALU64 | EBPF_SRC_IMM | 0xa0)
#define EBPF_OP_XOR64_REG (EBPF_CLS_ALU64 | EBPF_SRC_REG | 0xa0)
#define EBPF_OP_MOV64_IMM (EBPF_CLS_ALU64 | EBPF_SRC_IMM | 0xb0)
#define EBPF_OP_MOV64_REG (EBPF_CLS_ALU64 | EBPF_SRC_REG | 0xb0)
#define EBPF_OP_ARSH64_IMM (EBPF_CLS_ALU64 | EBPF_SRC_IMM | 0xc0)
#define EBPF_OP_ARSH64_REG (EBPF_CLS_ALU64 | EBPF_SRC_REG | 0xc0)
#define EBPF_ALU_OP_ADD 0x00
#define EBPF_ALU_OP_SUB 0x10
#define EBPF_ALU_OP_MUL 0x20
#define EBPF_ALU_OP_DIV 0x30
#define EBPF_ALU_OP_OR 0x40
#define EBPF_ALU_OP_AND 0x50
#define EBPF_ALU_OP_LSH 0x60
#define EBPF_ALU_OP_RSH 0x70
#define EBPF_ALU_OP_NEG 0x80
#define EBPF_ALU_OP_MOD 0x90
#define EBPF_ALU_OP_XOR 0xa0
#define EBPF_ALU_OP_MOV 0xb0
#define EBPF_ALU_OP_ARSH 0xc0
#define EBPF_ALU_OP_END 0xd0

#define EBPF_OP_ADD_IMM (EBPF_CLS_ALU | EBPF_SRC_IMM | EBPF_ALU_OP_ADD)
#define EBPF_OP_ADD_REG (EBPF_CLS_ALU | EBPF_SRC_REG | EBPF_ALU_OP_ADD)
#define EBPF_OP_SUB_IMM (EBPF_CLS_ALU | EBPF_SRC_IMM | EBPF_ALU_OP_SUB)
#define EBPF_OP_SUB_REG (EBPF_CLS_ALU | EBPF_SRC_REG | EBPF_ALU_OP_SUB)
#define EBPF_OP_MUL_IMM (EBPF_CLS_ALU | EBPF_SRC_IMM | EBPF_ALU_OP_MUL)
#define EBPF_OP_MUL_REG (EBPF_CLS_ALU | EBPF_SRC_REG | EBPF_ALU_OP_MUL)
#define EBPF_OP_DIV_IMM (EBPF_CLS_ALU | EBPF_SRC_IMM | EBPF_ALU_OP_DIV)
#define EBPF_OP_DIV_REG (EBPF_CLS_ALU | EBPF_SRC_REG | EBPF_ALU_OP_DIV)
#define EBPF_OP_OR_IMM (EBPF_CLS_ALU | EBPF_SRC_IMM | EBPF_ALU_OP_OR)
#define EBPF_OP_OR_REG (EBPF_CLS_ALU | EBPF_SRC_REG | EBPF_ALU_OP_OR)
#define EBPF_OP_AND_IMM (EBPF_CLS_ALU | EBPF_SRC_IMM | EBPF_ALU_OP_AND)
#define EBPF_OP_AND_REG (EBPF_CLS_ALU | EBPF_SRC_REG | EBPF_ALU_OP_AND)
#define EBPF_OP_LSH_IMM (EBPF_CLS_ALU | EBPF_SRC_IMM | EBPF_ALU_OP_LSH)
#define EBPF_OP_LSH_REG (EBPF_CLS_ALU | EBPF_SRC_REG | EBPF_ALU_OP_LSH)
#define EBPF_OP_RSH_IMM (EBPF_CLS_ALU | EBPF_SRC_IMM | EBPF_ALU_OP_RSH)
#define EBPF_OP_RSH_REG (EBPF_CLS_ALU | EBPF_SRC_REG | EBPF_ALU_OP_RSH)
#define EBPF_OP_NEG (EBPF_CLS_ALU | EBPF_ALU_OP_NEG)
#define EBPF_OP_MOD_IMM (EBPF_CLS_ALU | EBPF_SRC_IMM | EBPF_ALU_OP_MOD)
#define EBPF_OP_MOD_REG (EBPF_CLS_ALU | EBPF_SRC_REG | EBPF_ALU_OP_MOD)
#define EBPF_OP_XOR_IMM (EBPF_CLS_ALU | EBPF_SRC_IMM | EBPF_ALU_OP_XOR)
#define EBPF_OP_XOR_REG (EBPF_CLS_ALU | EBPF_SRC_REG | EBPF_ALU_OP_XOR)
#define EBPF_OP_MOV_IMM (EBPF_CLS_ALU | EBPF_SRC_IMM | EBPF_ALU_OP_MOV)
#define EBPF_OP_MOV_REG (EBPF_CLS_ALU | EBPF_SRC_REG | EBPF_ALU_OP_MOV)
#define EBPF_OP_ARSH_IMM (EBPF_CLS_ALU | EBPF_SRC_IMM | EBPF_ALU_OP_ARSH)
#define EBPF_OP_ARSH_REG (EBPF_CLS_ALU | EBPF_SRC_REG | EBPF_ALU_OP_ARSH)
#define EBPF_OP_LE (EBPF_CLS_ALU | EBPF_SRC_IMM | EBPF_ALU_OP_END)
#define EBPF_OP_BE (EBPF_CLS_ALU | EBPF_SRC_REG | EBPF_ALU_OP_END)

#define EBPF_OP_ADD64_IMM (EBPF_CLS_ALU64 | EBPF_SRC_IMM | EBPF_ALU_OP_ADD)
#define EBPF_OP_ADD64_REG (EBPF_CLS_ALU64 | EBPF_SRC_REG | EBPF_ALU_OP_ADD)
#define EBPF_OP_SUB64_IMM (EBPF_CLS_ALU64 | EBPF_SRC_IMM | EBPF_ALU_OP_SUB)
#define EBPF_OP_SUB64_REG (EBPF_CLS_ALU64 | EBPF_SRC_REG | EBPF_ALU_OP_SUB)
#define EBPF_OP_MUL64_IMM (EBPF_CLS_ALU64 | EBPF_SRC_IMM | EBPF_ALU_OP_MUL)
#define EBPF_OP_MUL64_REG (EBPF_CLS_ALU64 | EBPF_SRC_REG | EBPF_ALU_OP_MUL)
#define EBPF_OP_DIV64_IMM (EBPF_CLS_ALU64 | EBPF_SRC_IMM | EBPF_ALU_OP_DIV)
#define EBPF_OP_DIV64_REG (EBPF_CLS_ALU64 | EBPF_SRC_REG | EBPF_ALU_OP_DIV)
#define EBPF_OP_OR64_IMM (EBPF_CLS_ALU64 | EBPF_SRC_IMM | EBPF_ALU_OP_OR)
#define EBPF_OP_OR64_REG (EBPF_CLS_ALU64 | EBPF_SRC_REG | EBPF_ALU_OP_OR)
#define EBPF_OP_AND64_IMM (EBPF_CLS_ALU64 | EBPF_SRC_IMM | EBPF_ALU_OP_AND)
#define EBPF_OP_AND64_REG (EBPF_CLS_ALU64 | EBPF_SRC_REG | EBPF_ALU_OP_AND)
#define EBPF_OP_LSH64_IMM (EBPF_CLS_ALU64 | EBPF_SRC_IMM | EBPF_ALU_OP_LSH)
#define EBPF_OP_LSH64_REG (EBPF_CLS_ALU64 | EBPF_SRC_REG | EBPF_ALU_OP_LSH)
#define EBPF_OP_RSH64_IMM (EBPF_CLS_ALU64 | EBPF_SRC_IMM | EBPF_ALU_OP_RSH)
#define EBPF_OP_RSH64_REG (EBPF_CLS_ALU64 | EBPF_SRC_REG | EBPF_ALU_OP_RSH)
#define EBPF_OP_NEG64 (EBPF_CLS_ALU64 | EBPF_ALU_OP_NEG)
#define EBPF_OP_MOD64_IMM (EBPF_CLS_ALU64 | EBPF_SRC_IMM | EBPF_ALU_OP_MOD)
#define EBPF_OP_MOD64_REG (EBPF_CLS_ALU64 | EBPF_SRC_REG | EBPF_ALU_OP_MOD)
#define EBPF_OP_XOR64_IMM (EBPF_CLS_ALU64 | EBPF_SRC_IMM | EBPF_ALU_OP_XOR)
#define EBPF_OP_XOR64_REG (EBPF_CLS_ALU64 | EBPF_SRC_REG | EBPF_ALU_OP_XOR)
#define EBPF_OP_MOV64_IMM (EBPF_CLS_ALU64 | EBPF_SRC_IMM | EBPF_ALU_OP_MOV)
#define EBPF_OP_MOV64_REG (EBPF_CLS_ALU64 | EBPF_SRC_REG | EBPF_ALU_OP_MOV)
#define EBPF_OP_ARSH64_IMM (EBPF_CLS_ALU64 | EBPF_SRC_IMM | EBPF_ALU_OP_ARSH)
#define EBPF_OP_ARSH64_REG (EBPF_CLS_ALU64 | EBPF_SRC_REG | EBPF_ALU_OP_ARSH)

#define EBPF_OP_LDXW (EBPF_CLS_LDX | EBPF_MODE_MEM | EBPF_SIZE_W)
#define EBPF_OP_LDXH (EBPF_CLS_LDX | EBPF_MODE_MEM | EBPF_SIZE_H)
Expand Down Expand Up @@ -206,4 +230,7 @@ enum bpf_register
#define EBPF_OP_JSLE32_IMM (EBPF_CLS_JMP32 | EBPF_SRC_IMM | EBPF_MODE_JSLE)
#define EBPF_OP_JSLE32_REG (EBPF_CLS_JMP32 | EBPF_SRC_REG | EBPF_MODE_JSLE)

#define EBPF_OP_ATOMIC32_STORE (EBPF_CLS_STX | EBPF_MODE_ATOMIC | EBPF_SIZE_W)
#define EBPF_OP_ATOMIC_STORE (EBPF_CLS_STX | EBPF_MODE_ATOMIC | EBPF_SIZE_DW)

#endif
33 changes: 33 additions & 0 deletions vm/ubpf_int.h
Original file line number Diff line number Diff line change
Expand Up @@ -208,4 +208,37 @@ ubpf_instruction_has_fallthrough(const struct ebpf_inst inst)
return inst.opcode != EBPF_OP_EXIT;
}

// If either GNU C or Clang
#if defined(__GNUC__) || defined(__clang__)
#define UBPF_ATOMIC_ADD_FETCH(ptr, val) __sync_fetch_and_add(ptr, val)
#define UBPF_ATOMIC_OR_FETCH(ptr, val) __sync_fetch_and_or(ptr, val)
#define UBPF_ATOMIC_AND_FETCH(ptr, val) __sync_fetch_and_and(ptr, val)
#define UBPF_ATOMIC_XOR_FETCH(ptr, val) __sync_fetch_and_xor(ptr, val)
#define UBPF_ATOMIC_EXCHANGE(ptr, val) __sync_lock_test_and_set(ptr, val);
#define UBPF_ATOMIC_COMPARE_EXCHANGE(ptr, oldval, newval) __sync_bool_compare_and_swap(ptr, oldval, newval)
#define UBPF_ATOMIC_ADD_FETCH32(ptr, val) __sync_fetch_and_add(ptr, val)
#define UBPF_ATOMIC_OR_FETCH32(ptr, val) __sync_fetch_and_or(ptr, val)
#define UBPF_ATOMIC_AND_FETCH32(ptr, val) __sync_fetch_and_and(ptr, val)
#define UBPF_ATOMIC_XOR_FETCH32(ptr, val) __sync_fetch_and_xor(ptr, val)
#define UBPF_ATOMIC_EXCHANGE32(ptr, val) __sync_lock_test_and_set(ptr, val);
#define UBPF_ATOMIC_COMPARE_EXCHANGE32(ptr, oldval, newval) __sync_bool_compare_and_swap(ptr, oldval, newval)
// If Microsoft Visual C++
#elif defined(_MSC_VER)
#include <intrin.h>
#define UBPF_ATOMIC_ADD_FETCH(ptr, val) _InterlockedExchangeAdd64((volatile int64_t*)ptr, val)
#define UBPF_ATOMIC_OR_FETCH(ptr, val) _InterlockedOr64((volatile int64_t*)ptr, val)
#define UBPF_ATOMIC_AND_FETCH(ptr, val) _InterlockedAnd64((volatile int64_t*)ptr, val)
#define UBPF_ATOMIC_XOR_FETCH(ptr, val) _InterlockedXor64((volatile int64_t*)ptr, val)
#define UBPF_ATOMIC_EXCHANGE(ptr, val) _InterlockedExchange64((volatile int64_t*)ptr, val)
#define UBPF_ATOMIC_COMPARE_EXCHANGE(ptr, oldval, newval) \
_InterlockedCompareExchange64((volatile int64_t*)ptr, oldval, newval)
#define UBPF_ATOMIC_ADD_FETCH32(ptr, val) _InterlockedExchangeAdd((volatile long*)ptr, val)
#define UBPF_ATOMIC_OR_FETCH32(ptr, val) _InterlockedOr((volatile long*)ptr, val)
#define UBPF_ATOMIC_AND_FETCH32(ptr, val) _InterlockedAnd((volatile long*)ptr, val)
#define UBPF_ATOMIC_XOR_FETCH32(ptr, val) _InterlockedXor((volatile long*)ptr, val)
#define UBPF_ATOMIC_EXCHANGE32(ptr, val) _InterlockedExchange((volatile long*)ptr, val)
#define UBPF_ATOMIC_COMPARE_EXCHANGE32(ptr, oldval, newval) \
_InterlockedCompareExchange((volatile long*)ptr, oldval, newval)
#endif

#endif
87 changes: 87 additions & 0 deletions vm/ubpf_jit_x86_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -820,6 +820,93 @@ translate(struct ubpf_vm* vm, struct jit_state* state, char** errmsg)
emit_load_imm(state, dst, imm);
break;
}
case EBPF_OP_ATOMIC_STORE: {
bool fetch = inst.imm & EBPF_ATOMIC_OP_FETCH;
switch (inst.imm & EBPF_ALU_OP_MASK) {
case EBPF_ALU_OP_ADD:
if (fetch) {
emit_atomic_fetch_add64(state, src, dst, inst.offset);
} else {
emit_atomic_add64(state, src, dst, inst.offset);
}
break;
case EBPF_ALU_OP_OR:
if (fetch) {
emit_atomic_fetch_or64(state, src, dst, inst.offset);
} else {
emit_atomic_or64(state, src, dst, inst.offset);
}
break;
case EBPF_ALU_OP_AND:
if (fetch) {
emit_atomic_fetch_and64(state, src, dst, inst.offset);
} else {
emit_atomic_and64(state, src, dst, inst.offset);
}
break;
case EBPF_ALU_OP_XOR:
if (fetch) {
emit_atomic_fetch_xor64(state, src, dst, inst.offset);
} else {
emit_atomic_xor64(state, src, dst, inst.offset);
}
break;
case (EBPF_ATOMIC_OP_XCHG & ~EBPF_ATOMIC_OP_FETCH):
emit_atomic_exchange64(state, src, dst, inst.offset);
break;
case (EBPF_ATOMIC_OP_CMPXCHG & ~EBPF_ATOMIC_OP_FETCH):
emit_atomic_compare_exchange64(state, src, dst, inst.offset);
break;
default:
*errmsg = ubpf_error("Error: unknown atomic opcode %d at PC %d\n", inst.imm, i);
return -1;
}
} break;

case EBPF_OP_ATOMIC32_STORE: {
bool fetch = inst.imm & EBPF_ATOMIC_OP_FETCH;
switch (inst.imm & EBPF_ALU_OP_MASK) {
case EBPF_ALU_OP_ADD:
if (fetch) {
emit_atomic_fetch_add32(state, src, dst, inst.offset);
} else {
emit_atomic_add32(state, src, dst, inst.offset);
}
break;
case EBPF_ALU_OP_OR:
if (fetch) {
emit_atomic_fetch_or32(state, src, dst, inst.offset);
} else {
emit_atomic_or32(state, src, dst, inst.offset);
}
break;
case EBPF_ALU_OP_AND:
if (fetch) {
emit_atomic_fetch_and32(state, src, dst, inst.offset);
} else {
emit_atomic_and32(state, src, dst, inst.offset);
}
break;
case EBPF_ALU_OP_XOR:
if (fetch) {
emit_atomic_fetch_xor32(state, src, dst, inst.offset);
} else {
emit_atomic_xor32(state, src, dst, inst.offset);
}
break;
case (EBPF_ATOMIC_OP_XCHG & ~EBPF_ATOMIC_OP_FETCH):
emit_atomic_exchange32(state, src, dst, inst.offset);
emit_truncate_u32(state, src);
break;
case (EBPF_ATOMIC_OP_CMPXCHG & ~EBPF_ATOMIC_OP_FETCH):
emit_atomic_compare_exchange32(state, src, dst, inst.offset);
emit_truncate_u32(state, map_register(0));
break;
default:
*errmsg = ubpf_error("Error: unknown atomic opcode %d at PC %d\n", inst.imm, i);
return -1;
}
} break;

default:
state->jit_status = UnknownInstruction;
Expand Down
Loading

0 comments on commit f1ecb7a

Please sign in to comment.