diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index 84f5c6c79..7ccddd01b 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -12,6 +12,14 @@ body: description: What is the problem? A clear and concise description of the bug. validations: required: true + - type: checkboxes + id: regression + attributes: + label: Regression Issue + description: What is a regression? If it worked in a previous version but doesn't in the latest version, it's considered a regression. In this case, please provide specific version number in the report. + options: + - label: Select this option if this issue appears to be a regression. + required: false - type: textarea id: expected attributes: diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d83c1cd02..108b81098 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -159,16 +159,15 @@ jobs: macos: runs-on: macos-14 # latest strategy: + fail-fast: false matrix: - cmake_options: - - "-DAWS_USE_DISPATCH_QUEUE=ON" - - "-DAWS_USE_DISPATCH_QUEUE=OFF" + eventloop: ["-DAWS_USE_APPLE_NETWORK_FRAMEWORK=ON", "-DAWS_USE_APPLE_NETWORK_FRAMEWORK=OFF"] steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder')" chmod a+x builder - ./builder build -p ${{ env.PACKAGE_NAME }} --cmake-extra=${{ matrix.cmake_options }} + ./builder build -p ${{ env.PACKAGE_NAME }} --cmake-extra=${{ matrix.eventloop }} macos-secitem: runs-on: macos-14 # latest @@ -177,7 +176,7 @@ jobs: run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder')" chmod a+x builder - ./builder build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DAWS_USE_DISPATCH_QUEUE=ON --cmake-extra=-DAWS_USE_SECITEM=ON + ./builder build -p ${{ env.PACKAGE_NAME }} --cmake-extra=-DAWS_USE_APPLE_NETWORK_FRAMEWORK=ON --cmake-extra=-DAWS_USE_SECITEM=ON macos-x64: runs-on: macos-14-large # latest @@ -191,8 +190,9 @@ jobs: macos-debug: runs-on: macos-14 # latest strategy: + fail-fast: false matrix: - eventloop: ["-DAWS_USE_DISPATCH_QUEUE=ON", "-DAWS_USE_DISPATCH_QUEUE=OFF"] + eventloop: ["-DAWS_USE_APPLE_NETWORK_FRAMEWORK=ON", "-DAWS_USE_APPLE_NETWORK_FRAMEWORK=OFF"] steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | diff --git a/.github/workflows/issue-regression-labeler.yml b/.github/workflows/issue-regression-labeler.yml new file mode 100644 index 000000000..bd000719d --- /dev/null +++ b/.github/workflows/issue-regression-labeler.yml @@ -0,0 +1,32 @@ +# Apply potential regression label on issues +name: issue-regression-label +on: + issues: + types: [opened, edited] +jobs: + add-regression-label: + runs-on: ubuntu-latest + permissions: + issues: write + steps: + - name: Fetch template body + id: check_regression + uses: actions/github-script@v7 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + TEMPLATE_BODY: ${{ github.event.issue.body }} + with: + script: | + const regressionPattern = /\[x\] Select this option if this issue appears to be a regression\./i; + const template = `${process.env.TEMPLATE_BODY}` + const match = regressionPattern.test(template); + core.setOutput('is_regression', match); + - name: Manage regression label + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + if [ "${{ steps.check_regression.outputs.is_regression }}" == "true" ]; then + gh issue edit ${{ github.event.issue.number }} --add-label "potential-regression" -R ${{ github.repository }} + else + gh issue edit ${{ github.event.issue.number }} --remove-label "potential-regression" -R ${{ github.repository }} + fi diff --git a/.github/workflows/proof-alarm.yml b/.github/workflows/proof-alarm.yml index b6e34c10c..4e33d77f7 100644 --- a/.github/workflows/proof-alarm.yml +++ b/.github/workflows/proof-alarm.yml @@ -16,7 +16,7 @@ jobs: - name: Check run: | TMPFILE=$(mktemp) - echo "c624a28de5af7f851a240a1e65a26c01 source/linux/epoll_event_loop.c" > $TMPFILE + echo "1fdf8e7a914412cc7242b8d64732fa89 source/linux/epoll_event_loop.c" > $TMPFILE md5sum --check $TMPFILE # No further steps if successful diff --git a/CMakeLists.txt b/CMakeLists.txt index 3279a8a0d..251923554 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,11 +1,7 @@ -cmake_minimum_required(VERSION 3.1) +cmake_minimum_required(VERSION 3.9) project(aws-c-io C) -if (POLICY CMP0069) - cmake_policy(SET CMP0069 NEW) # Enable LTO/IPO if available in the compiler, see AwsCFlags -endif() - if (DEFINED CMAKE_PREFIX_PATH) file(TO_CMAKE_PATH "${CMAKE_PREFIX_PATH}" CMAKE_PREFIX_PATH) endif() @@ -78,7 +74,7 @@ if (WIN32) ) list(APPEND AWS_IO_OS_SRC ${AWS_IO_IOCP_SRC}) - set(EVENT_LOOP_DEFINES "AWS_USE_IO_COMPLETION_PORTS") + list(APPEND EVENT_LOOP_DEFINES "IO_COMPLETION_PORTS") endif () if (MSVC) @@ -105,7 +101,7 @@ elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux" OR CMAKE_SYSTEM_NAME STREQUAL "Androi ) set(PLATFORM_LIBS "") - set(EVENT_LOOP_DEFINES "-DAWS_USE_EPOLL") + list(APPEND EVENT_LOOP_DEFINES "EPOLL") set(USE_S2N ON) elseif (APPLE) @@ -129,19 +125,12 @@ elseif (APPLE) message(FATAL_ERROR "Network framework not found") endif () - #No choice on TLS for apple, darwinssl will always be used. list(APPEND PLATFORM_LIBS "-framework Security -framework Network") + list(APPEND EVENT_LOOP_DEFINES "DISPATCH_QUEUE") - if (AWS_USE_SECITEM OR IOS) - message("AWS_USE_SECITEM is set, using dispatch queue event loops") - set(EVENT_LOOP_DEFINES "-DAWS_USE_DISPATCH_QUEUE") - elseif (AWS_USE_DISPATCH_QUEUE) - message("AWS_USE_DISPATCH_QUEUE is set, using dispatch queue event loops") - set(EVENT_LOOP_DEFINES "-DAWS_USE_DISPATCH_QUEUE") - list(APPEND AWS_IO_OS_SRC ${AWS_IO_DISPATCH_QUEUE_SRC}) - else () - message("using default kqueue event loops") - set(EVENT_LOOP_DEFINES "-DAWS_USE_KQUEUE") + # Enable KQUEUE on MacOS only if AWS_USE_SECITEM is not declared. SecItem requires Dispatch Queue. + if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin" AND NOT DEFINED AWS_USE_SECITEM) + list(APPEND EVENT_LOOP_DEFINES "KQUEUE") endif() elseif (CMAKE_SYSTEM_NAME STREQUAL "FreeBSD" OR CMAKE_SYSTEM_NAME STREQUAL "NetBSD" OR CMAKE_SYSTEM_NAME STREQUAL "OpenBSD") @@ -153,7 +142,7 @@ elseif (CMAKE_SYSTEM_NAME STREQUAL "FreeBSD" OR CMAKE_SYSTEM_NAME STREQUAL "NetB "source/posix/*.c" ) - set(EVENT_LOOP_DEFINES "-DAWS_USE_KQUEUE") + list(APPEND EVENT_LOOP_DEFINES "KQUEUE") set(USE_S2N ON) endif() @@ -206,7 +195,9 @@ aws_add_sanitizers(${PROJECT_NAME}) # We are not ABI stable yet set_target_properties(${PROJECT_NAME} PROPERTIES VERSION 1.0.0) -target_compile_definitions(${PROJECT_NAME} PUBLIC "${EVENT_LOOP_DEFINES}") +foreach(EVENT_LOOP_DEFINE IN LISTS EVENT_LOOP_DEFINES) + target_compile_definitions(${PROJECT_NAME} PUBLIC "-DAWS_ENABLE_${EVENT_LOOP_DEFINE}") +endforeach() if (AWS_USE_SECITEM) target_compile_definitions(${PROJECT_NAME} PUBLIC "-DAWS_USE_SECITEM") @@ -228,6 +219,10 @@ if (USE_VSOCK) target_compile_definitions(${PROJECT_NAME} PUBLIC "-DUSE_VSOCK") endif() +if (AWS_USE_APPLE_NETWORK_FRAMEWORK) + target_compile_definitions(${PROJECT_NAME} PUBLIC "-DAWS_USE_APPLE_NETWORK_FRAMEWORK") +endif() + target_include_directories(${PROJECT_NAME} PUBLIC $ $) diff --git a/README.md b/README.md index 03446b54b..b4a0f0636 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ This library is licensed under the Apache 2.0 License. ### Building -CMake 3.1+ is required to build. +CMake 3.9+ is required to build. `` must be an absolute path in the following instructions. @@ -646,8 +646,7 @@ All exported functions, simply shim into the v-table and return. We include a cross-platform API for sockets. We support TCP and UDP using IPv4 and IPv6, and Unix Domain sockets. On Windows, we use Named Pipes to support the functionality of Unix Domain sockets. On Windows, this is implemented with winsock2, and on -all unix platforms we use the posix API. We can also enable Apple Network Framework along with the Apple Dispatch Queue by setting -the preprocessing flag `AWS_USE_DISPATCH_QUEUE`. Then we will use Apple Network Framework on Apple platforms. +all unix platforms we use the posix API. Then we will use Apple Network Framework on Apple platforms. Upon a connection being established, the new socket (either as the result of a `connect()` or `start_accept()` call) will not be attached to any event loops. It is your responsibility to register it with an event loop to begin receiving diff --git a/include/aws/io/event_loop.h b/include/aws/io/event_loop.h index 1926d25b4..7778edd7d 100644 --- a/include/aws/io/event_loop.h +++ b/include/aws/io/event_loop.h @@ -6,84 +6,28 @@ * SPDX-License-Identifier: Apache-2.0. */ -#include -#include -#include - #include AWS_PUSH_SANE_WARNING_LEVEL -enum aws_io_event_type { - AWS_IO_EVENT_TYPE_READABLE = 1, - AWS_IO_EVENT_TYPE_WRITABLE = 2, - AWS_IO_EVENT_TYPE_REMOTE_HANG_UP = 4, - AWS_IO_EVENT_TYPE_CLOSED = 8, - AWS_IO_EVENT_TYPE_ERROR = 16, -}; - struct aws_event_loop; +struct aws_event_loop_group; +struct aws_event_loop_options; +struct aws_shutdown_callback_options; struct aws_task; -struct aws_thread_options; - -#if AWS_USE_IO_COMPLETION_PORTS - -struct aws_overlapped; - -typedef void(aws_event_loop_on_completion_fn)( - struct aws_event_loop *event_loop, - struct aws_overlapped *overlapped, - int status_code, - size_t num_bytes_transferred); /** - * The aws_win32_OVERLAPPED struct is layout-compatible with OVERLAPPED as defined in . It is used - * here to avoid pulling in a dependency on which would also bring along a lot of bad macros, such - * as redefinitions of GetMessage and GetObject. Note that the OVERLAPPED struct layout in the Windows SDK can - * never be altered without breaking binary compatibility for every existing third-party executable, so there - * is no need to worry about keeping this definition in sync. + * @internal */ -struct aws_win32_OVERLAPPED { - uintptr_t Internal; - uintptr_t InternalHigh; - union { - struct { - uint32_t Offset; - uint32_t OffsetHigh; - } s; - void *Pointer; - } u; - void *hEvent; -}; - -/** - * Use aws_overlapped when a handle connected to the event loop needs an OVERLAPPED struct. - * OVERLAPPED structs are needed to make OS-level async I/O calls. - * When the I/O completes, the assigned aws_event_loop_on_completion_fn is called from the event_loop's thread. - * While the I/O is pending, it is not safe to modify or delete aws_overlapped. - * Call aws_overlapped_init() before first use. If the aws_overlapped will be used multiple times, call - * aws_overlapped_reset() or aws_overlapped_init() between uses. - */ -struct aws_overlapped { - struct aws_win32_OVERLAPPED overlapped; - aws_event_loop_on_completion_fn *on_completion; - void *user_data; -}; - -#endif /* AWS_USE_IO_COMPLETION_PORTS */ - typedef void(aws_event_loop_on_event_fn)( struct aws_event_loop *event_loop, struct aws_io_handle *handle, int events, void *user_data); -enum aws_event_loop_style { - AWS_EVENT_LOOP_STYLE_UNDEFINED = 0, - AWS_EVENT_LOOP_STYLE_POLL_BASED = 1, - AWS_EVENT_LOOP_STYLE_COMPLETION_PORT_BASED = 2, -}; - +/** + * @internal + */ struct aws_event_loop_vtable { void (*destroy)(struct aws_event_loop *event_loop); int (*run)(struct aws_event_loop *event_loop); @@ -92,262 +36,84 @@ struct aws_event_loop_vtable { void (*schedule_task_now)(struct aws_event_loop *event_loop, struct aws_task *task); void (*schedule_task_future)(struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos); void (*cancel_task)(struct aws_event_loop *event_loop, struct aws_task *task); - union { - int (*connect_to_completion_port)(struct aws_event_loop *event_loop, struct aws_io_handle *handle); - int (*subscribe_to_io_events)( - struct aws_event_loop *event_loop, - struct aws_io_handle *handle, - int events, - aws_event_loop_on_event_fn *on_event, - void *user_data); - } register_style; - enum aws_event_loop_style event_loop_style; + int (*connect_to_io_completion_port)(struct aws_event_loop *event_loop, struct aws_io_handle *handle); + int (*subscribe_to_io_events)( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle, + int events, + aws_event_loop_on_event_fn *on_event, + void *user_data); int (*unsubscribe_from_io_events)(struct aws_event_loop *event_loop, struct aws_io_handle *handle); void (*free_io_event_resources)(void *user_data); bool (*is_on_callers_thread)(struct aws_event_loop *event_loop); }; -struct aws_event_loop { - struct aws_event_loop_vtable *vtable; - struct aws_allocator *alloc; - aws_io_clock_fn *clock; - struct aws_hash_table local_data; - struct aws_atomic_var current_load_factor; - uint64_t latest_tick_start; - size_t current_tick_latency_sum; - struct aws_atomic_var next_flush_time; - void *impl_data; -}; - -struct aws_event_loop_local_object; -typedef void(aws_event_loop_on_local_object_removed_fn)(struct aws_event_loop_local_object *); - -struct aws_event_loop_local_object { - const void *key; - void *object; - aws_event_loop_on_local_object_removed_fn *on_object_removed; +/** + * Event Loop Type. If set to `AWS_EVENT_LOOP_PLATFORM_DEFAULT`, the event loop will automatically use the platform’s + * default. + * + * Default Event Loop Type + * Linux | AWS_EVENT_LOOP_EPOLL + * Windows | AWS_EVENT_LOOP_IOCP + * BSD Variants| AWS_EVENT_LOOP_KQUEUE + * MacOS | AWS_EVENT_LOOP_KQUEUE + * iOS | AWS_EVENT_LOOP_DISPATCH_QUEUE + */ +enum aws_event_loop_type { + AWS_EVENT_LOOP_PLATFORM_DEFAULT = 0, + AWS_EVENT_LOOP_EPOLL, + AWS_EVENT_LOOP_IOCP, + AWS_EVENT_LOOP_KQUEUE, + AWS_EVENT_LOOP_DISPATCH_QUEUE, }; -struct aws_event_loop_options { - aws_io_clock_fn *clock; - struct aws_thread_options *thread_options; -}; +/** + * Event loop group configuration options + */ +struct aws_event_loop_group_options { -typedef struct aws_event_loop *(aws_new_event_loop_fn)(struct aws_allocator *alloc, - const struct aws_event_loop_options *options, - void *new_loop_user_data); + /** + * How many event loops that event loop group should contain. For most group types, this implies + * the creation and management of an analagous amount of managed threads + */ + uint16_t loop_count; -struct aws_event_loop_group { - struct aws_allocator *allocator; - struct aws_array_list event_loops; - struct aws_ref_count ref_count; - struct aws_shutdown_callback_options shutdown_options; -}; + /** + * Event loop type. If the event loop type is set to AWS_EVENT_LOOP_PLATFORM_DEFAULT, the + * creation function will automatically use the platform’s default event loop type. + */ + enum aws_event_loop_type type; -typedef struct aws_event_loop *(aws_new_system_event_loop_fn)(struct aws_allocator *alloc, - const struct aws_event_loop_options *options); + /** + * Optional callback to invoke when the event loop group finishes destruction. + */ + const struct aws_shutdown_callback_options *shutdown_options; -struct aws_event_loop_configuration { - enum aws_event_loop_style style; - aws_new_system_event_loop_fn *event_loop_new_fn; - const char *name; /** - * TODO: Currently, we use pre-compile definitions to determine which event-loop we would like to use in aws-c-io. - * For future improvements, we would like to allow a runtime configuration to set the event loop, so that the user - * could make choice themselves. Once that's there, as we would have multiple event loop implementation enabled, - * the `is_default` would be used to set the default event loop configuration. + * Optional configuration to control how the event loop group's threads bind to CPU groups */ - bool is_default; -}; + const uint16_t *cpu_group; -struct aws_event_loop_configuration_group { - size_t configuration_count; - const struct aws_event_loop_configuration *configurations; + /** + * Override for the clock function that event loops should use. Defaults to the system's high resolution + * timer. + * + * Do not bind this value to managed code; it is only used in timing-sensitive tests. + */ + aws_io_clock_fn *clock_override; }; -AWS_EXTERN_C_BEGIN - -#ifdef AWS_USE_IO_COMPLETION_PORTS -/** - * Prepares aws_overlapped for use, and sets a function to call when the overlapped operation completes. - */ -AWS_IO_API -void aws_overlapped_init( - struct aws_overlapped *overlapped, - aws_event_loop_on_completion_fn *on_completion, - void *user_data); - -/** - * Prepares aws_overlapped for re-use without changing the assigned aws_event_loop_on_completion_fn. - * Call aws_overlapped_init(), instead of aws_overlapped_reset(), to change the aws_event_loop_on_completion_fn. - */ -AWS_IO_API -void aws_overlapped_reset(struct aws_overlapped *overlapped); - -/** - * Casts an aws_overlapped pointer for use as a LPOVERLAPPED parameter to Windows API functions - */ -AWS_IO_API -struct _OVERLAPPED *aws_overlapped_to_windows_overlapped(struct aws_overlapped *overlapped); -#endif /* AWS_USE_IO_COMPLETION_PORTS */ - /** - * Get available event-loop configurations, this will return each available event-loop implementation for the current - * running system */ -AWS_IO_API const struct aws_event_loop_configuration_group *aws_event_loop_get_available_configurations(void); - -/** - * Creates an instance of the default event loop implementation for the current architecture and operating system. - */ -AWS_IO_API -struct aws_event_loop *aws_event_loop_new_default(struct aws_allocator *alloc, aws_io_clock_fn *clock); - -/** - * Creates an instance of the default event loop implementation for the current architecture and operating system using - * extendable options. - */ -AWS_IO_API -struct aws_event_loop *aws_event_loop_new_default_with_options( - struct aws_allocator *alloc, - const struct aws_event_loop_options *options); - -// TODO: Currently, we do not allow runtime switch between different event loop configurations. -// When that's enabled, we should expose or condense all these def specific function APIs and not -// make them defined specific. Consolidation of them should work and branched logic within due to -// all the arguments being the same. Let's move away from different API based on framework and -// instead raise an unsupported platform error or simply use branching in implementation. -#ifdef AWS_USE_IO_COMPLETION_PORTS -AWS_IO_API -struct aws_event_loop *aws_event_loop_new_iocp_with_options( - struct aws_allocator *alloc, - const struct aws_event_loop_options *options); -#endif /* AWS_USE_IO_COMPLETION_PORTS */ - -#ifdef AWS_USE_DISPATCH_QUEUE -AWS_IO_API -struct aws_event_loop *aws_event_loop_new_dispatch_queue_with_options( - struct aws_allocator *alloc, - const struct aws_event_loop_options *options); -#endif /* AWS_USE_DISPATCH_QUEUE */ - -#ifdef AWS_USE_KQUEUE -AWS_IO_API -struct aws_event_loop *aws_event_loop_new_kqueue_with_options( - struct aws_allocator *alloc, - const struct aws_event_loop_options *options); -#endif /* AWS_USE_KQUEUE */ - -#ifdef AWS_USE_EPOLL -AWS_IO_API -struct aws_event_loop *aws_event_loop_new_epoll_with_options( - struct aws_allocator *alloc, - const struct aws_event_loop_options *options); -#endif /* AWS_USE_EPOLL */ - -/** - * Invokes the destroy() fn for the event loop implementation. - * If the event loop is still in a running state, this function will block waiting on the event loop to shutdown. - * If you do not want this function to block, call aws_event_loop_stop() manually first. - * If the event loop is shared by multiple threads then destroy must be called by exactly one thread. All other threads - * must ensure their API calls to the event loop happen-before the call to destroy. - */ -AWS_IO_API -void aws_event_loop_destroy(struct aws_event_loop *event_loop); - -/** - * Initializes common event-loop data structures. - * This is only called from the *new() function of event loop implementations. - */ -AWS_IO_API -int aws_event_loop_init_base(struct aws_event_loop *event_loop, struct aws_allocator *alloc, aws_io_clock_fn *clock); - -/** - * Common cleanup code for all implementations. - * This is only called from the *destroy() function of event loop implementations. - */ -AWS_IO_API -void aws_event_loop_clean_up_base(struct aws_event_loop *event_loop); - -/** - * Fetches an object from the event-loop's data store. Key will be taken as the memory address of the memory pointed to - * by key. This function is not thread safe and should be called inside the event-loop's thread. - */ -AWS_IO_API -int aws_event_loop_fetch_local_object( - struct aws_event_loop *event_loop, - void *key, - struct aws_event_loop_local_object *obj); - -/** - * Puts an item object the event-loop's data store. Key will be taken as the memory address of the memory pointed to by - * key. The lifetime of item must live until remove or a put item overrides it. This function is not thread safe and - * should be called inside the event-loop's thread. - */ -AWS_IO_API -int aws_event_loop_put_local_object(struct aws_event_loop *event_loop, struct aws_event_loop_local_object *obj); - -/** - * Removes an object from the event-loop's data store. Key will be taken as the memory address of the memory pointed to - * by key. If removed_item is not null, the removed item will be moved to it if it exists. Otherwise, the default - * deallocation strategy will be used. This function is not thread safe and should be called inside the event-loop's - * thread. - */ -AWS_IO_API -int aws_event_loop_remove_local_object( - struct aws_event_loop *event_loop, - void *key, - struct aws_event_loop_local_object *removed_obj); - -/** - * Triggers the running of the event loop. This function must not block. The event loop is not active until this - * function is invoked. This function can be called again on an event loop after calling aws_event_loop_stop() and - * aws_event_loop_wait_for_stop_completion(). - */ -AWS_IO_API -int aws_event_loop_run(struct aws_event_loop *event_loop); - -/** - * Triggers the event loop to stop, but does not wait for the loop to stop completely. - * This function may be called from outside or inside the event loop thread. It is safe to call multiple times. - * This function is called from destroy(). - * - * If you do not call destroy(), an event loop can be run again by calling stop(), wait_for_stop_completion(), run(). - */ -AWS_IO_API -int aws_event_loop_stop(struct aws_event_loop *event_loop); - -/** - * For event-loop implementations to use for providing metrics info to the base event-loop. This enables the - * event-loop load balancer to take into account load when vending another event-loop to a caller. - * - * Call this function at the beginning of your event-loop tick: after wake-up, but before processing any IO or tasks. - */ -AWS_IO_API -void aws_event_loop_register_tick_start(struct aws_event_loop *event_loop); - -/** - * For event-loop implementations to use for providing metrics info to the base event-loop. This enables the - * event-loop load balancer to take into account load when vending another event-loop to a caller. + * @internal - Don't use outside of testing. * - * Call this function at the end of your event-loop tick: after processing IO and tasks. + * Return the default event loop type. If the return value is `AWS_ELT_PLATFORM_DEFAULT`, the function failed to + * retrieve the default type value. + * If `aws_event_loop_override_default_type` has been called, return the override default type. */ AWS_IO_API -void aws_event_loop_register_tick_end(struct aws_event_loop *event_loop); +enum aws_event_loop_type aws_event_loop_get_default_type(void); -/** - * Returns the current load factor (however that may be calculated). If the event-loop is not invoking - * aws_event_loop_register_tick_start() and aws_event_loop_register_tick_end(), this value will always be 0. - */ -AWS_IO_API -size_t aws_event_loop_get_load_factor(struct aws_event_loop *event_loop); - -/** - * Blocks until the event loop stops completely. - * If you want to call aws_event_loop_run() again, you must call this after aws_event_loop_stop(). - * It is not safe to call this function from inside the event loop thread. - */ -AWS_IO_API -int aws_event_loop_wait_for_stop_completion(struct aws_event_loop *event_loop); +AWS_EXTERN_C_BEGIN /** * The event loop will schedule the task and run it on the event loop thread as soon as possible. @@ -383,108 +149,62 @@ AWS_IO_API void aws_event_loop_cancel_task(struct aws_event_loop *event_loop, struct aws_task *task); /** - * Associates an aws_io_handle with the event loop's I/O Completion Port. - * - * The handle must use aws_overlapped for all async operations requiring an OVERLAPPED struct. - * When the operation completes, the aws_overlapped's completion function will run on the event loop thread. - * Note that completion functions will not be invoked while the event loop is stopped. Users should wait for all async - * operations on connected handles to complete before cleaning up or destroying the event loop. - * - * A handle may only be connected to one event loop in its lifetime. + * Returns true if the event loop's thread is the same thread that called this function, otherwise false. */ AWS_IO_API -int aws_event_loop_connect_handle_to_completion_port(struct aws_event_loop *event_loop, struct aws_io_handle *handle); +bool aws_event_loop_thread_is_callers_thread(struct aws_event_loop *event_loop); /** - * Subscribes on_event to events on the event-loop for handle. events is a bitwise concatenation of the events that were - * received. The definition for these values can be found in aws_io_event_type. Currently, only - * AWS_IO_EVENT_TYPE_READABLE and AWS_IO_EVENT_TYPE_WRITABLE are honored. You always are registered for error conditions - * and closure. This function may be called from outside or inside the event loop thread. However, the unsubscribe - * function must be called inside the event-loop's thread. + * Gets the current timestamp for the event loop's clock, in nanoseconds. This function is thread-safe. */ AWS_IO_API -int aws_event_loop_subscribe_to_io_events( - struct aws_event_loop *event_loop, - struct aws_io_handle *handle, - int events, - aws_event_loop_on_event_fn *on_event, - void *user_data); +int aws_event_loop_current_clock_time(const struct aws_event_loop *event_loop, uint64_t *time_nanos); /** - * Unsubscribes handle from event-loop notifications. - * This function is not thread safe and should be called inside the event-loop's thread. - * - * NOTE: if you are using io completion ports, this is a risky call. We use it in places, but only when we're certain - * there's no pending events. If you want to use it, it's your job to make sure you don't have pending events before - * calling it. + * Creation function for event loop groups. */ AWS_IO_API -int aws_event_loop_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle); +struct aws_event_loop_group *aws_event_loop_group_new( + struct aws_allocator *allocator, + const struct aws_event_loop_group_options *options); /** - * Cleans up resources (user_data) associated with the I/O eventing subsystem for a given handle. This should only - * ever be necessary in the case where you are cleaning up an event loop during shutdown and its thread has already - * been joined. + * Increments the reference count on the event loop group, allowing the caller to take a reference to it. + * + * Returns the same event loop group passed in. */ AWS_IO_API -void aws_event_loop_free_io_event_resources(struct aws_event_loop *event_loop, struct aws_io_handle *handle); +struct aws_event_loop_group *aws_event_loop_group_acquire(struct aws_event_loop_group *el_group); /** - * Returns true if the event loop's thread is the same thread that called this function, otherwise false. + * Decrements an event loop group's ref count. When the ref count drops to zero, the event loop group will be + * destroyed. */ AWS_IO_API -bool aws_event_loop_thread_is_callers_thread(struct aws_event_loop *event_loop); +void aws_event_loop_group_release(struct aws_event_loop_group *el_group); /** - * Gets the current timestamp for the event loop's clock, in nanoseconds. This function is thread-safe. + * Returns the event loop at a particular index. If the index is out of bounds, null is returned. */ AWS_IO_API -int aws_event_loop_current_clock_time(struct aws_event_loop *event_loop, uint64_t *time_nanos); +struct aws_event_loop *aws_event_loop_group_get_loop_at(struct aws_event_loop_group *el_group, size_t index); /** - * Creates an event loop group, with clock, number of loops to manage, and the function to call for creating a new - * event loop. + * Gets the number of event loops managed by an event loop group. */ AWS_IO_API -struct aws_event_loop_group *aws_event_loop_group_new( - struct aws_allocator *alloc, - aws_io_clock_fn *clock, - uint16_t el_count, - aws_new_event_loop_fn *new_loop_fn, - void *new_loop_user_data, - const struct aws_shutdown_callback_options *shutdown_options); +size_t aws_event_loop_group_get_loop_count(const struct aws_event_loop_group *el_group); /** - * Creates an event loop group, with specified event loop configuration, max threads and shutdown options. - * If max_threads == 0, then the loop count will be the number of available processors on the machine / 2 (to exclude - * hyper-threads). Otherwise, max_threads will be the number of event loops in the group. - */ -AWS_IO_API -struct aws_event_loop_group *aws_event_loop_group_new_from_config( - struct aws_allocator *allocator, - const struct aws_event_loop_configuration *config, - uint16_t max_threads, - const struct aws_shutdown_callback_options *shutdown_options); - -/** Creates an event loop group, with clock, number of loops to manage, the function to call for creating a new - * event loop, and also pins all loops to hw threads on the same cpu_group (e.g. NUMA nodes). Note: - * If el_count exceeds the number of hw threads in the cpu_group it will be ignored on the assumption that if you - * care about NUMA, you don't want hyper-threads doing your IO and you especially don't want IO on a different node. + * Fetches the next loop for use. The purpose is to enable load balancing across loops. You should not depend on how + * this load balancing is done as it is subject to change in the future. Currently it uses the "best-of-two" algorithm + * based on the load factor of each loop. */ AWS_IO_API -struct aws_event_loop_group *aws_event_loop_group_new_pinned_to_cpu_group( - struct aws_allocator *alloc, - aws_io_clock_fn *clock, - uint16_t el_count, - uint16_t cpu_group, - aws_new_event_loop_fn *new_loop_fn, - void *new_loop_user_data, - const struct aws_shutdown_callback_options *shutdown_options); +struct aws_event_loop *aws_event_loop_group_get_next_loop(struct aws_event_loop_group *el_group); /** - * Initializes an event loop group with platform defaults. If max_threads == 0, then the - * loop count will be the number of available processors on the machine / 2 (to exclude hyper-threads). - * Otherwise, max_threads will be the number of event loops in the group. + * @deprecated - use aws_event_loop_group_new() instead */ AWS_IO_API struct aws_event_loop_group *aws_event_loop_group_new_default( @@ -492,14 +212,8 @@ struct aws_event_loop_group *aws_event_loop_group_new_default( uint16_t max_threads, const struct aws_shutdown_callback_options *shutdown_options); -/** Creates an event loop group, with clock, number of loops to manage, the function to call for creating a new - * event loop, and also pins all loops to hw threads on the same cpu_group (e.g. NUMA nodes). Note: - * If el_count exceeds the number of hw threads in the cpu_group it will be clamped to the number of hw threads - * on the assumption that if you care about NUMA, you don't want hyper-threads doing your IO and you especially - * don't want IO on a different node. - * - * If max_threads == 0, then the - * loop count will be the number of available processors in the cpu_group / 2 (to exclude hyper-threads) +/** + * @deprecated - use aws_event_loop_group_new() instead */ AWS_IO_API struct aws_event_loop_group *aws_event_loop_group_new_default_pinned_to_cpu_group( @@ -509,41 +223,49 @@ struct aws_event_loop_group *aws_event_loop_group_new_default_pinned_to_cpu_grou const struct aws_shutdown_callback_options *shutdown_options); /** - * Increments the reference count on the event loop group, allowing the caller to take a reference to it. + * @internal - Don't use outside of testing. * - * Returns the same event loop group passed in. + * Returns the opaque internal user data of an event loop. Can be cast into a specific implementation by + * privileged consumers. */ AWS_IO_API -struct aws_event_loop_group *aws_event_loop_group_acquire(struct aws_event_loop_group *el_group); +void *aws_event_loop_get_impl(struct aws_event_loop *event_loop); /** - * Decrements an event loop group's ref count. When the ref count drops to zero, the event loop group will be - * destroyed. + * @internal - Don't use outside of testing. + * + * Initializes the base structure used by all event loop implementations with test-oriented overrides. */ AWS_IO_API -void aws_event_loop_group_release(struct aws_event_loop_group *el_group); +struct aws_event_loop *aws_event_loop_new_base( + struct aws_allocator *allocator, + aws_io_clock_fn *clock, + struct aws_event_loop_vtable *vtable, + void *impl); /** - * Return the event loop style. + * @internal - Don't use outside of testing. + * + * Common cleanup code for all implementations. + * This is only called from the *destroy() function of event loop implementations. */ AWS_IO_API -enum aws_event_loop_style aws_event_loop_group_get_style(struct aws_event_loop_group *el_group); - -AWS_IO_API -struct aws_event_loop *aws_event_loop_group_get_loop_at(struct aws_event_loop_group *el_group, size_t index); - -AWS_IO_API -size_t aws_event_loop_group_get_loop_count(struct aws_event_loop_group *el_group); +void aws_event_loop_clean_up_base(struct aws_event_loop *event_loop); /** - * Fetches the next loop for use. The purpose is to enable load balancing across loops. You should not depend on how - * this load balancing is done as it is subject to change in the future. Currently it uses the "best-of-two" algorithm - * based on the load factor of each loop. + * @internal - Don't use outside of testing. + * + * Invokes the destroy() fn for the event loop implementation. + * If the event loop is still in a running state, this function will block waiting on the event loop to shutdown. + * If you do not want this function to block, call aws_event_loop_stop() manually first. + * If the event loop is shared by multiple threads then destroy must be called by exactly one thread. All other threads + * must ensure their API calls to the event loop happen-before the call to destroy. */ AWS_IO_API -struct aws_event_loop *aws_event_loop_group_get_next_loop(struct aws_event_loop_group *el_group); +void aws_event_loop_destroy(struct aws_event_loop *event_loop); AWS_EXTERN_C_END + AWS_POP_SANE_WARNING_LEVEL #endif /* AWS_IO_EVENT_LOOP_H */ diff --git a/include/aws/io/io.h b/include/aws/io/io.h index 628d84f78..d35975a37 100644 --- a/include/aws/io/io.h +++ b/include/aws/io/io.h @@ -16,10 +16,8 @@ AWS_PUSH_SANE_WARNING_LEVEL struct aws_io_handle; -#ifdef AWS_USE_DISPATCH_QUEUE typedef void aws_io_set_queue_on_handle_fn(struct aws_io_handle *handle, void *queue); typedef void aws_io_clear_queue_on_handle_fn(struct aws_io_handle *handle); -#endif /* AWS_USE_DISPATCH_QUEUE */ struct aws_io_handle { union { @@ -28,10 +26,8 @@ struct aws_io_handle { void *handle; } data; void *additional_data; -#ifdef AWS_USE_DISPATCH_QUEUE aws_io_set_queue_on_handle_fn *set_queue; aws_io_clear_queue_on_handle_fn *clear_queue; -#endif /* AWS_USE_DISPATCH_QUEUE */ }; enum aws_io_message_type { diff --git a/include/aws/io/private/event_loop_impl.h b/include/aws/io/private/event_loop_impl.h new file mode 100644 index 000000000..ac5318a3c --- /dev/null +++ b/include/aws/io/private/event_loop_impl.h @@ -0,0 +1,313 @@ +#ifndef AWS_IO_EVENT_LOOP_IMPL_H +#define AWS_IO_EVENT_LOOP_IMPL_H + +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include + +#include +#include +#include +#include + +AWS_PUSH_SANE_WARNING_LEVEL + +struct aws_event_loop; +struct aws_overlapped; + +typedef void(aws_event_loop_on_completion_fn)( + struct aws_event_loop *event_loop, + struct aws_overlapped *overlapped, + int status_code, + size_t num_bytes_transferred); + +/** + * The aws_win32_OVERLAPPED struct is layout-compatible with OVERLAPPED as defined in . It is used + * here to avoid pulling in a dependency on which would also bring along a lot of bad macros, such + * as redefinitions of GetMessage and GetObject. Note that the OVERLAPPED struct layout in the Windows SDK can + * never be altered without breaking binary compatibility for every existing third-party executable, so there + * is no need to worry about keeping this definition in sync. + */ +struct aws_win32_OVERLAPPED { + uintptr_t Internal; + uintptr_t InternalHigh; + union { + struct { + uint32_t Offset; + uint32_t OffsetHigh; + } s; + void *Pointer; + } u; + void *hEvent; +}; + +/** + * Use aws_overlapped when a handle connected to the event loop needs an OVERLAPPED struct. + * OVERLAPPED structs are needed to make OS-level async I/O calls. + * When the I/O completes, the assigned aws_event_loop_on_completion_fn is called from the event_loop's thread. + * While the I/O is pending, it is not safe to modify or delete aws_overlapped. + * Call aws_overlapped_init() before first use. If the aws_overlapped will be used multiple times, call + * aws_overlapped_reset() or aws_overlapped_init() between uses. + */ +struct aws_overlapped { + struct aws_win32_OVERLAPPED overlapped; + aws_event_loop_on_completion_fn *on_completion; + void *user_data; +}; + +enum aws_io_event_type { + AWS_IO_EVENT_TYPE_READABLE = 1, + AWS_IO_EVENT_TYPE_WRITABLE = 2, + AWS_IO_EVENT_TYPE_REMOTE_HANG_UP = 4, + AWS_IO_EVENT_TYPE_CLOSED = 8, + AWS_IO_EVENT_TYPE_ERROR = 16, +}; + +struct aws_event_loop { + struct aws_event_loop_vtable *vtable; + struct aws_allocator *alloc; + aws_io_clock_fn *clock; + struct aws_hash_table local_data; + struct aws_atomic_var current_load_factor; + uint64_t latest_tick_start; + size_t current_tick_latency_sum; + struct aws_atomic_var next_flush_time; + void *impl_data; +}; + +struct aws_event_loop_local_object; +typedef void(aws_event_loop_on_local_object_removed_fn)(struct aws_event_loop_local_object *); + +struct aws_event_loop_local_object { + const void *key; + void *object; + aws_event_loop_on_local_object_removed_fn *on_object_removed; +}; + +struct aws_event_loop_options { + aws_io_clock_fn *clock; + struct aws_thread_options *thread_options; + + /** + * Event loop type. If the event loop type is set to AWS_EVENT_LOOP_PLATFORM_DEFAULT, the + * creation function will automatically use the platform’s default event loop type. + */ + enum aws_event_loop_type type; +}; + +struct aws_event_loop *aws_event_loop_new_iocp_with_options( + struct aws_allocator *alloc, + const struct aws_event_loop_options *options); +struct aws_event_loop *aws_event_loop_new_dispatch_queue_with_options( + struct aws_allocator *alloc, + const struct aws_event_loop_options *options); +struct aws_event_loop *aws_event_loop_new_kqueue_with_options( + struct aws_allocator *alloc, + const struct aws_event_loop_options *options); +struct aws_event_loop *aws_event_loop_new_epoll_with_options( + struct aws_allocator *alloc, + const struct aws_event_loop_options *options); + +typedef struct aws_event_loop *(aws_new_event_loop_fn)(struct aws_allocator *alloc, + const struct aws_event_loop_options *options, + void *new_loop_user_data); + +struct aws_event_loop_group { + struct aws_allocator *allocator; + struct aws_array_list event_loops; + struct aws_ref_count ref_count; + struct aws_shutdown_callback_options shutdown_options; +}; + +AWS_EXTERN_C_BEGIN + +#ifdef AWS_ENABLE_IO_COMPLETION_PORTS + +/** + * Prepares aws_overlapped for use, and sets a function to call when the overlapped operation completes. + */ +AWS_IO_API +void aws_overlapped_init( + struct aws_overlapped *overlapped, + aws_event_loop_on_completion_fn *on_completion, + void *user_data); + +/** + * Prepares aws_overlapped for re-use without changing the assigned aws_event_loop_on_completion_fn. + * Call aws_overlapped_init(), instead of aws_overlapped_reset(), to change the aws_event_loop_on_completion_fn. + */ +AWS_IO_API +void aws_overlapped_reset(struct aws_overlapped *overlapped); + +/** + * Casts an aws_overlapped pointer for use as a LPOVERLAPPED parameter to Windows API functions + */ +AWS_IO_API +struct _OVERLAPPED *aws_overlapped_to_windows_overlapped(struct aws_overlapped *overlapped); +#endif /* AWS_ENABLE_IO_COMPLETION_PORTS */ + +/** + * Associates an aws_io_handle with the event loop's I/O Completion Port. + * + * The handle must use aws_overlapped for all async operations requiring an OVERLAPPED struct. + * When the operation completes, the aws_overlapped's completion function will run on the event loop thread. + * Note that completion functions will not be invoked while the event loop is stopped. Users should wait for all async + * operations on connected handles to complete before cleaning up or destroying the event loop. + * + * A handle may only be connected to one event loop in its lifetime. + */ +AWS_IO_API +int aws_event_loop_connect_handle_to_io_completion_port( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle); + +/** + * Subscribes on_event to events on the event-loop for handle. events is a bitwise concatenation of the events that were + * received. The definition for these values can be found in aws_io_event_type. Currently, only + * AWS_IO_EVENT_TYPE_READABLE and AWS_IO_EVENT_TYPE_WRITABLE are honored. You always are registered for error conditions + * and closure. This function may be called from outside or inside the event loop thread. However, the unsubscribe + * function must be called inside the event-loop's thread. + */ +AWS_IO_API +int aws_event_loop_subscribe_to_io_events( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle, + int events, + aws_event_loop_on_event_fn *on_event, + void *user_data); + +/** + * Creates an instance of the default event loop implementation for the current architecture and operating system. + */ +AWS_IO_API +struct aws_event_loop *aws_event_loop_new_default(struct aws_allocator *alloc, aws_io_clock_fn *clock); + +/** + * Creates an instance of the default event loop implementation for the current architecture and operating system using + * extendable options. + */ +AWS_IO_API +struct aws_event_loop *aws_event_loop_new(struct aws_allocator *alloc, const struct aws_event_loop_options *options); + +/** + * Initializes common event-loop data structures. + * This is only called from the *new() function of event loop implementations. + */ +AWS_IO_API +int aws_event_loop_init_base(struct aws_event_loop *event_loop, struct aws_allocator *alloc, aws_io_clock_fn *clock); + +/** + * Fetches an object from the event-loop's data store. Key will be taken as the memory address of the memory pointed to + * by key. This function is not thread safe and should be called inside the event-loop's thread. + */ +AWS_IO_API +int aws_event_loop_fetch_local_object( + struct aws_event_loop *event_loop, + void *key, + struct aws_event_loop_local_object *obj); + +/** + * Puts an item object the event-loop's data store. Key will be taken as the memory address of the memory pointed to by + * key. The lifetime of item must live until remove or a put item overrides it. This function is not thread safe and + * should be called inside the event-loop's thread. + */ +AWS_IO_API +int aws_event_loop_put_local_object(struct aws_event_loop *event_loop, struct aws_event_loop_local_object *obj); + +/** + * Removes an object from the event-loop's data store. Key will be taken as the memory address of the memory pointed to + * by key. If removed_item is not null, the removed item will be moved to it if it exists. Otherwise, the default + * deallocation strategy will be used. This function is not thread safe and should be called inside the event-loop's + * thread. + */ +AWS_IO_API +int aws_event_loop_remove_local_object( + struct aws_event_loop *event_loop, + void *key, + struct aws_event_loop_local_object *removed_obj); + +/** + * Triggers the running of the event loop. This function must not block. The event loop is not active until this + * function is invoked. This function can be called again on an event loop after calling aws_event_loop_stop() and + * aws_event_loop_wait_for_stop_completion(). + */ +AWS_IO_API +int aws_event_loop_run(struct aws_event_loop *event_loop); + +/** + * Triggers the event loop to stop, but does not wait for the loop to stop completely. + * This function may be called from outside or inside the event loop thread. It is safe to call multiple times. + * This function is called from destroy(). + * + * If you do not call destroy(), an event loop can be run again by calling stop(), wait_for_stop_completion(), run(). + */ +AWS_IO_API +int aws_event_loop_stop(struct aws_event_loop *event_loop); + +/** + * For event-loop implementations to use for providing metrics info to the base event-loop. This enables the + * event-loop load balancer to take into account load when vending another event-loop to a caller. + * + * Call this function at the beginning of your event-loop tick: after wake-up, but before processing any IO or tasks. + */ +AWS_IO_API +void aws_event_loop_register_tick_start(struct aws_event_loop *event_loop); + +/** + * For event-loop implementations to use for providing metrics info to the base event-loop. This enables the + * event-loop load balancer to take into account load when vending another event-loop to a caller. + * + * Call this function at the end of your event-loop tick: after processing IO and tasks. + */ +AWS_IO_API +void aws_event_loop_register_tick_end(struct aws_event_loop *event_loop); + +/** + * Returns the current load factor (however that may be calculated). If the event-loop is not invoking + * aws_event_loop_register_tick_start() and aws_event_loop_register_tick_end(), this value will always be 0. + */ +AWS_IO_API +size_t aws_event_loop_get_load_factor(struct aws_event_loop *event_loop); + +/** + * Blocks until the event loop stops completely. + * If you want to call aws_event_loop_run() again, you must call this after aws_event_loop_stop(). + * It is not safe to call this function from inside the event loop thread. + */ +AWS_IO_API +int aws_event_loop_wait_for_stop_completion(struct aws_event_loop *event_loop); + +/** + * Unsubscribes handle from event-loop notifications. + * This function is not thread safe and should be called inside the event-loop's thread. + * + * NOTE: if you are using io completion ports, this is a risky call. We use it in places, but only when we're certain + * there's no pending events. If you want to use it, it's your job to make sure you don't have pending events before + * calling it. + */ +AWS_IO_API +int aws_event_loop_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle); + +/** + * Cleans up resources (user_data) associated with the I/O eventing subsystem for a given handle. This should only + * ever be necessary in the case where you are cleaning up an event loop during shutdown and its thread has already + * been joined. + */ +AWS_IO_API +void aws_event_loop_free_io_event_resources(struct aws_event_loop *event_loop, struct aws_io_handle *handle); + +AWS_IO_API +struct aws_event_loop_group *aws_event_loop_group_new_internal( + struct aws_allocator *allocator, + const struct aws_event_loop_group_options *options, + aws_new_event_loop_fn *new_loop_fn, + void *new_loop_user_data); + +AWS_EXTERN_C_END + +AWS_POP_SANE_WARNING_LEVEL + +#endif /* AWS_IO_EVENT_LOOP_IMPL_H */ diff --git a/include/aws/io/socket.h b/include/aws/io/socket.h index e8eb874f8..0878ef4af 100644 --- a/include/aws/io/socket.h +++ b/include/aws/io/socket.h @@ -31,11 +31,30 @@ enum aws_socket_type { AWS_SOCKET_DGRAM, }; +/** + * Socket Implementation type. Decides which socket implementation is used. If set to + * `AWS_SOCKET_IMPL_PLATFORM_DEFAULT`, it will automatically use the platform’s default. + * + * PLATFORM DEFAULT SOCKET IMPLEMENTATION TYPE + * Linux | AWS_SOCKET_IMPL_POSIX + * Windows | AWS_SOCKET_IMPL_WINSOCK + * BSD Variants| AWS_SOCKET_IMPL_POSIX + * MacOS | AWS_SOCKET_IMPL_POSIX + * iOS | AWS_SOCKET_IMPL_APPLE_NETWORK_FRAMEWORK + */ +enum aws_socket_impl_type { + AWS_SOCKET_IMPL_PLATFORM_DEFAULT = 0, + AWS_SOCKET_IMPL_POSIX, + AWS_SOCKET_IMPL_WINSOCK, + AWS_SOCKET_IMPL_APPLE_NETWORK_FRAMEWORK, +}; + #define AWS_NETWORK_INTERFACE_NAME_MAX 16 struct aws_socket_options { enum aws_socket_type type; enum aws_socket_domain domain; + enum aws_socket_impl_type impl_type; uint32_t connect_timeout_ms; /* Keepalive properties are TCP only. * Set keepalive true to periodically transmit messages for detecting a disconnected peer. @@ -91,7 +110,7 @@ typedef void(aws_socket_retrieve_tls_options_fn)(struct tls_connection_context * * A user may want to call aws_socket_set_options() on the new socket if different options are desired. * * new_socket is not yet assigned to an event-loop. The user should call aws_socket_assign_to_event_loop() before - * performing IO operations. The user is resposnbile to releasing the socket memory after use. + * performing IO operations. The user must call `aws_socket_release()` when they're done with the socket, to free it. * * When error_code is AWS_ERROR_SUCCESS, new_socket is the recently accepted connection. * If error_code is non-zero, an error occurred and you should aws_socket_close() the socket. @@ -108,7 +127,8 @@ typedef void(aws_socket_on_accept_result_fn)( * Callback for when the data passed to a call to aws_socket_write() has either completed or failed. * On success, error_code will be AWS_ERROR_SUCCESS. * - * socket is possible to be a NULL pointer in the callback. + * `socket` may be NULL in the callback if the socket is released and cleaned up before a callback is triggered. + * by the system I/O handler, */ typedef void( aws_socket_on_write_completed_fn)(struct aws_socket *socket, int error_code, size_t bytes_written, void *user_data); @@ -185,7 +205,6 @@ struct aws_socket { struct aws_event_loop *event_loop; struct aws_channel_handler *handler; int state; - enum aws_event_loop_style event_loop_style; aws_socket_on_readable_fn *readable_fn; void *readable_user_data; aws_socket_on_connection_result_fn *connection_result_fn; @@ -205,6 +224,21 @@ aws_ms_fn_ptr aws_winsock_get_connectex_fn(void); aws_ms_fn_ptr aws_winsock_get_acceptex_fn(void); #endif +int aws_socket_init_posix( + struct aws_socket *socket, + struct aws_allocator *alloc, + const struct aws_socket_options *options); + +int aws_socket_init_winsock( + struct aws_socket *socket, + struct aws_allocator *alloc, + const struct aws_socket_options *options); + +int aws_socket_init_apple_nw_socket( + struct aws_socket *socket, + struct aws_allocator *alloc, + const struct aws_socket_options *options); + AWS_EXTERN_C_BEGIN /** @@ -232,10 +266,9 @@ AWS_IO_API void aws_socket_clean_up(struct aws_socket *socket); * In TCP, LOCAL and VSOCK this function will not block. If the return value is successful, then you must wait on the * `on_connection_result()` callback to be invoked before using the socket. * - * The function will failed with error if the endpoint is invalid. Except for Apple Network Framework (with - * AWS_USE_DISPATCH_QUEUE enabled). In Apple network framework, as connect is an async api, we would not know if the - * local endpoint is valid until we have the connection state returned in callback. The error will returned in - * `on_connection_result` callback + * The function will failed with error if the endpoint is invalid, except for Apple Network Framework. In Apple network + * framework, as connect is an async api, we would not know if the local endpoint is valid until we have the connection + * state returned in callback. The error will returned in `on_connection_result` callback * * If an event_loop is provided for UDP sockets, a notification will be sent on * on_connection_result in the event-loop's thread. Upon completion, the socket will already be assigned diff --git a/include/aws/io/tls_channel_handler.h b/include/aws/io/tls_channel_handler.h index bd25a1a6b..ea014ed02 100644 --- a/include/aws/io/tls_channel_handler.h +++ b/include/aws/io/tls_channel_handler.h @@ -34,14 +34,14 @@ enum aws_tls_cipher_pref { /* Deprecated */ AWS_IO_TLS_CIPHER_PREF_KMS_PQ_TLSv1_0_2020_02 = 3, /* Deprecated */ AWS_IO_TLS_CIPHER_PREF_KMS_PQ_SIKE_TLSv1_0_2020_02 = 4, /* Deprecated */ AWS_IO_TLS_CIPHER_PREF_KMS_PQ_TLSv1_0_2020_07 = 5, + /* Deprecated */ AWS_IO_TLS_CIPHER_PREF_PQ_TLSv1_0_2021_05 = 6, /* - * This TLS cipher preference list contains post-quantum key exchange algorithms that have been submitted to NIST - * for potential future standardization. Support for this preference list, or PQ algorithms present in it, may be - * removed at any time in the future. PQ algorithms in this preference list will be used in hybrid mode, and always - * combined with a classical ECDHE key exchange. + * This TLS cipher preference list contains post-quantum key exchange algorithms that have been standardized by + * NIST. PQ algorithms in this preference list will be used in hybrid mode, and always combined with a classical + * ECDHE key exchange. */ - AWS_IO_TLS_CIPHER_PREF_PQ_TLSv1_0_2021_05 = 6, + AWS_IO_TLS_CIPHER_PREF_PQ_TLSV1_2_2024_10 = 7, AWS_IO_TLS_CIPHER_PREF_END_RANGE = 0xFFFF }; diff --git a/include/aws/testing/io_testing_channel.h b/include/aws/testing/io_testing_channel.h index d2f1c13a5..8fa118ca4 100644 --- a/include/aws/testing/io_testing_channel.h +++ b/include/aws/testing/io_testing_channel.h @@ -13,6 +13,7 @@ #include struct testing_loop { + struct aws_allocator *allocator; struct aws_task_scheduler scheduler; bool mock_on_callers_thread; }; @@ -33,7 +34,7 @@ static int s_testing_loop_wait_for_stop_completion(struct aws_event_loop *event_ } static void s_testing_loop_schedule_task_now(struct aws_event_loop *event_loop, struct aws_task *task) { - struct testing_loop *testing_loop = event_loop->impl_data; + struct testing_loop *testing_loop = (struct testing_loop *)aws_event_loop_get_impl(event_loop); aws_task_scheduler_schedule_now(&testing_loop->scheduler, task); } @@ -42,26 +43,27 @@ static void s_testing_loop_schedule_task_future( struct aws_task *task, uint64_t run_at_nanos) { - struct testing_loop *testing_loop = event_loop->impl_data; + struct testing_loop *testing_loop = (struct testing_loop *)aws_event_loop_get_impl(event_loop); aws_task_scheduler_schedule_future(&testing_loop->scheduler, task, run_at_nanos); } static void s_testing_loop_cancel_task(struct aws_event_loop *event_loop, struct aws_task *task) { - struct testing_loop *testing_loop = event_loop->impl_data; + struct testing_loop *testing_loop = (struct testing_loop *)aws_event_loop_get_impl(event_loop); aws_task_scheduler_cancel_task(&testing_loop->scheduler, task); } static bool s_testing_loop_is_on_callers_thread(struct aws_event_loop *event_loop) { - struct testing_loop *testing_loop = event_loop->impl_data; + struct testing_loop *testing_loop = (struct testing_loop *)aws_event_loop_get_impl(event_loop); return testing_loop->mock_on_callers_thread; } static void s_testing_loop_destroy(struct aws_event_loop *event_loop) { - struct testing_loop *testing_loop = event_loop->impl_data; + struct testing_loop *testing_loop = (struct testing_loop *)aws_event_loop_get_impl(event_loop); + struct aws_allocator *allocator = testing_loop->allocator; aws_task_scheduler_clean_up(&testing_loop->scheduler); - aws_mem_release(event_loop->alloc, testing_loop); + aws_mem_release(allocator, testing_loop); aws_event_loop_clean_up_base(event_loop); - aws_mem_release(event_loop->alloc, event_loop); + aws_mem_release(allocator, event_loop); } static struct aws_event_loop_vtable s_testing_loop_vtable = { @@ -76,16 +78,14 @@ static struct aws_event_loop_vtable s_testing_loop_vtable = { }; static struct aws_event_loop *s_testing_loop_new(struct aws_allocator *allocator, aws_io_clock_fn clock) { - struct aws_event_loop *event_loop = aws_mem_acquire(allocator, sizeof(struct aws_event_loop)); - aws_event_loop_init_base(event_loop, allocator, clock); + struct testing_loop *testing_loop = + (struct testing_loop *)aws_mem_calloc(allocator, 1, sizeof(struct testing_loop)); - struct testing_loop *testing_loop = aws_mem_calloc(allocator, 1, sizeof(struct testing_loop)); aws_task_scheduler_init(&testing_loop->scheduler, allocator); testing_loop->mock_on_callers_thread = true; - event_loop->impl_data = testing_loop; - event_loop->vtable = &s_testing_loop_vtable; + testing_loop->allocator = allocator; - return event_loop; + return aws_event_loop_new_base(allocator, clock, &s_testing_loop_vtable, testing_loop); } typedef void(testing_channel_handler_on_shutdown_fn)( @@ -113,7 +113,7 @@ static int s_testing_channel_handler_process_read_message( (void)slot; (void)message; - struct testing_channel_handler *testing_handler = handler->impl; + struct testing_channel_handler *testing_handler = (struct testing_channel_handler *)handler->impl; aws_linked_list_push_back(&testing_handler->messages, &message->queueing_handle); return AWS_OP_SUCCESS; } @@ -124,7 +124,7 @@ static int s_testing_channel_handler_process_write_message( struct aws_io_message *message) { (void)slot; - struct testing_channel_handler *testing_handler = handler->impl; + struct testing_channel_handler *testing_handler = (struct testing_channel_handler *)handler->impl; aws_linked_list_push_back(&testing_handler->messages, &message->queueing_handle); /* Invoke completion callback if this is the left-most handler */ @@ -142,7 +142,7 @@ static int s_testing_channel_handler_increment_read_window( size_t size) { (void)slot; - struct testing_channel_handler *testing_handler = handler->impl; + struct testing_channel_handler *testing_handler = (struct testing_channel_handler *)handler->impl; testing_handler->latest_window_update = size; return AWS_OP_SUCCESS; } @@ -154,7 +154,7 @@ static int s_testing_channel_handler_shutdown( int error_code, bool free_scarce_resources_immediately) { - struct testing_channel_handler *testing_handler = handler->impl; + struct testing_channel_handler *testing_handler = (struct testing_channel_handler *)handler->impl; /* If user has registered a callback, invoke it */ if (testing_handler->on_shutdown) { @@ -183,7 +183,7 @@ static int s_testing_channel_handler_shutdown( } static size_t s_testing_channel_handler_initial_window_size(struct aws_channel_handler *handler) { - struct testing_channel_handler *testing_handler = handler->impl; + struct testing_channel_handler *testing_handler = (struct testing_channel_handler *)handler->impl; return testing_handler->initial_window; } @@ -193,7 +193,7 @@ static size_t s_testing_channel_handler_message_overhead(struct aws_channel_hand } static void s_testing_channel_handler_destroy(struct aws_channel_handler *handler) { - struct testing_channel_handler *testing_handler = handler->impl; + struct testing_channel_handler *testing_handler = (struct testing_channel_handler *)handler->impl; while (!aws_linked_list_empty(&testing_handler->messages)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&testing_handler->messages); @@ -206,7 +206,7 @@ static void s_testing_channel_handler_destroy(struct aws_channel_handler *handle } static void s_testing_channel_handler_reset_statistics(struct aws_channel_handler *handler) { - struct testing_channel_handler *testing_handler = handler->impl; + struct testing_channel_handler *testing_handler = (struct testing_channel_handler *)handler->impl; aws_crt_statistics_socket_reset(&testing_handler->stats); } @@ -214,7 +214,7 @@ static void s_testing_channel_handler_reset_statistics(struct aws_channel_handle static void s_testing_channel_handler_gather_statistics( struct aws_channel_handler *handler, struct aws_array_list *stats) { - struct testing_channel_handler *testing_handler = handler->impl; + struct testing_channel_handler *testing_handler = (struct testing_channel_handler *)handler->impl; void *stats_base = &testing_handler->stats; aws_array_list_push_back(stats, &stats_base); @@ -235,9 +235,10 @@ static struct aws_channel_handler_vtable s_testing_channel_handler_vtable = { static struct aws_channel_handler *s_new_testing_channel_handler( struct aws_allocator *allocator, size_t initial_window) { - struct aws_channel_handler *handler = aws_mem_calloc(allocator, 1, sizeof(struct aws_channel_handler)); + struct aws_channel_handler *handler = + (struct aws_channel_handler *)aws_mem_calloc(allocator, 1, sizeof(struct aws_channel_handler)); struct testing_channel_handler *testing_handler = - aws_mem_calloc(allocator, 1, sizeof(struct testing_channel_handler)); + (struct testing_channel_handler *)aws_mem_calloc(allocator, 1, sizeof(struct testing_channel_handler)); aws_linked_list_init(&testing_handler->messages); testing_handler->initial_window = initial_window; testing_handler->latest_window_update = 0; @@ -270,14 +271,14 @@ struct testing_channel { static void s_testing_channel_on_setup_completed(struct aws_channel *channel, int error_code, void *user_data) { (void)channel; (void)error_code; - struct testing_channel *testing = user_data; + struct testing_channel *testing = (struct testing_channel *)user_data; testing->channel_setup_completed = true; } static void s_testing_channel_on_shutdown_completed(struct aws_channel *channel, int error_code, void *user_data) { (void)channel; (void)error_code; - struct testing_channel *testing = user_data; + struct testing_channel *testing = (struct testing_channel *)user_data; testing->channel_shutdown_completed = true; testing->channel_shutdown_error_code = error_code; @@ -393,7 +394,7 @@ static inline int testing_channel_init( AWS_ZERO_STRUCT(*testing); testing->loop = s_testing_loop_new(allocator, options->clock_fn); - testing->loop_impl = testing->loop->impl_data; + testing->loop_impl = (struct testing_loop *)aws_event_loop_get_impl(testing->loop); struct aws_channel_options args = { .on_setup_completed = s_testing_channel_on_setup_completed, @@ -411,8 +412,9 @@ static inline int testing_channel_init( ASSERT_TRUE(testing->channel_setup_completed); testing->left_handler_slot = aws_channel_slot_new(testing->channel); - struct aws_channel_handler *handler = s_new_testing_channel_handler(allocator, 16 * 1024); - testing->left_handler_impl = handler->impl; + struct aws_channel_handler *handler = + (struct aws_channel_handler *)s_new_testing_channel_handler(allocator, 16 * 1024); + testing->left_handler_impl = (struct testing_channel_handler *)handler->impl; ASSERT_SUCCESS(aws_channel_slot_set_handler(testing->left_handler_slot, handler)); return AWS_OP_SUCCESS; @@ -445,7 +447,7 @@ static inline int testing_channel_install_downstream_handler(struct testing_chan struct aws_channel_handler *handler = s_new_testing_channel_handler(testing->left_handler_slot->alloc, initial_window); ASSERT_NOT_NULL(handler); - testing->right_handler_impl = handler->impl; + testing->right_handler_impl = (struct testing_channel_handler *)handler->impl; ASSERT_SUCCESS(aws_channel_slot_set_handler(testing->right_handler_slot, handler)); return AWS_OP_SUCCESS; diff --git a/source/bsd/kqueue_event_loop.c b/source/bsd/kqueue_event_loop.c index 981cedf73..0cd2a04bc 100644 --- a/source/bsd/kqueue_event_loop.c +++ b/source/bsd/kqueue_event_loop.c @@ -5,14 +5,14 @@ #include -#include - #include #include #include #include #include #include +#include +#include #if defined(__FreeBSD__) || defined(__NetBSD__) # define __BSD_VISIBLE 1 @@ -124,14 +124,14 @@ struct aws_event_loop_vtable s_kqueue_vtable = { .wait_for_stop_completion = s_wait_for_stop_completion, .schedule_task_now = s_schedule_task_now, .schedule_task_future = s_schedule_task_future, - .register_style.subscribe_to_io_events = s_subscribe_to_io_events, - .event_loop_style = AWS_EVENT_LOOP_STYLE_POLL_BASED, + .subscribe_to_io_events = s_subscribe_to_io_events, .cancel_task = s_cancel_task, .unsubscribe_from_io_events = s_unsubscribe_from_io_events, .free_io_event_resources = s_free_io_event_resources, .is_on_callers_thread = s_is_event_thread, }; +#ifdef AWS_ENABLE_KQUEUE struct aws_event_loop *aws_event_loop_new_kqueue_with_options( struct aws_allocator *alloc, const struct aws_event_loop_options *options) { @@ -292,6 +292,7 @@ struct aws_event_loop *aws_event_loop_new_kqueue_with_options( } return NULL; } +#endif // AWS_ENABLE_KQUEUE static void s_destroy(struct aws_event_loop *event_loop) { AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: destroying event_loop", (void *)event_loop); diff --git a/source/channel.c b/source/channel.c index e9cc835a8..bd594cf4d 100644 --- a/source/channel.c +++ b/source/channel.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #ifdef _MSC_VER diff --git a/include/aws/io/private/aws_apple_network_framework.h b/source/darwin/aws_apple_network_framework.h similarity index 88% rename from include/aws/io/private/aws_apple_network_framework.h rename to source/darwin/aws_apple_network_framework.h index d04c1f969..348316e7f 100644 --- a/include/aws/io/private/aws_apple_network_framework.h +++ b/source/darwin/aws_apple_network_framework.h @@ -5,14 +5,13 @@ * SPDX-License-Identifier: Apache-2.0. */ +#include +#include #include -#ifdef AWS_OS_APPLE -/* It's ok to include external headers because this is a PRIVATE header file */ -# include -# include -# include -# include +/* This Header will only be compiled on Apple Platforms where therse are available. */ +#include +#include struct secure_transport_ctx { struct aws_tls_ctx ctx; @@ -66,6 +65,4 @@ struct dispatch_loop { bool is_destroying; }; -#endif /* AWS_OS_APPLE */ - #endif /* #ifndef AWS_IO_PRIVATE_AWS_APPLE_NETWORK_FRAMEWORK_H */ diff --git a/source/darwin/dispatch_queue_event_loop.c b/source/darwin/dispatch_queue_event_loop.c index d4a62a9b2..fdafc099a 100644 --- a/source/darwin/dispatch_queue_event_loop.c +++ b/source/darwin/dispatch_queue_event_loop.c @@ -4,6 +4,7 @@ */ #include +#include #include #include @@ -14,8 +15,8 @@ #include +#include "aws_apple_network_framework.h" #include -#include #include #include @@ -41,8 +42,7 @@ static struct aws_event_loop_vtable s_vtable = { .schedule_task_now = s_schedule_task_now, .schedule_task_future = s_schedule_task_future, .cancel_task = s_cancel_task, - .register_style.connect_to_completion_port = s_connect_to_dispatch_queue, - .event_loop_style = AWS_EVENT_LOOP_STYLE_COMPLETION_PORT_BASED, + .connect_to_io_completion_port = s_connect_to_dispatch_queue, .unsubscribe_from_io_events = s_unsubscribe_from_io_events, .free_io_event_resources = s_free_io_event_resources, .is_on_callers_thread = s_is_on_callers_thread, @@ -465,7 +465,6 @@ static void s_cancel_task(struct aws_event_loop *event_loop, struct aws_task *ta static int s_connect_to_dispatch_queue(struct aws_event_loop *event_loop, struct aws_io_handle *handle) { (void)event_loop; (void)handle; -#ifdef AWS_USE_DISPATCH_QUEUE AWS_PRECONDITION(handle->set_queue && handle->clear_queue); AWS_LOGF_TRACE( @@ -475,7 +474,6 @@ static int s_connect_to_dispatch_queue(struct aws_event_loop *event_loop, struct (void *)handle->data.handle); struct dispatch_loop *dispatch_loop = event_loop->impl_data; handle->set_queue(handle, dispatch_loop->dispatch_queue); -#endif // #ifdef AWS_USE_DISPATCH_QUEUE return AWS_OP_SUCCESS; } @@ -485,9 +483,7 @@ static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struc "id=%p: un-subscribing from events on handle %p", (void *)event_loop, (void *)handle->data.handle); -#ifdef AWS_USE_DISPATCH_QUEUE handle->clear_queue(handle); -#endif return AWS_OP_SUCCESS; } diff --git a/source/darwin/nw_socket.c b/source/darwin/nw_socket.c index a621bac02..4e64e56f4 100644 --- a/source/darwin/nw_socket.c +++ b/source/darwin/nw_socket.c @@ -3,21 +3,20 @@ * SPDX-License-Identifier: Apache-2.0. */ -#ifdef AWS_USE_DISPATCH_QUEUE +#include -# include +#include +#include +#include +#include -# include -# include -# include -# include +#include +#include +#include -# include -# include -# include - -# include -# include +#include "aws_apple_network_framework.h" +#include +#include const char *aws_sec_trust_result_type_to_string(SecTrustResultType trust_result) { switch (trust_result) { @@ -457,11 +456,11 @@ static int s_setup_socket_params(struct nw_socket *nw_socket, const struct aws_s bool setup_tls = false; struct secure_transport_ctx *transport_ctx = NULL; -# ifdef AWS_USE_SECITEM +#ifdef AWS_USE_SECITEM if (nw_socket->tls_ctx) { setup_tls = true; } -# endif /* AWS_USE_SECITEM*/ +#endif /* AWS_USE_SECITEM*/ if (setup_tls) { transport_ctx = nw_socket->tls_ctx->impl; @@ -690,7 +689,10 @@ static void s_socket_impl_destroy(void *sock_ptr) { nw_socket = NULL; } -int aws_socket_init(struct aws_socket *socket, struct aws_allocator *alloc, const struct aws_socket_options *options) { +int aws_socket_init_apple_nw_socket( + struct aws_socket *socket, + struct aws_allocator *alloc, + const struct aws_socket_options *options) { AWS_ASSERT(options); AWS_ZERO_STRUCT(*socket); @@ -722,7 +724,6 @@ int aws_socket_init(struct aws_socket *socket, struct aws_allocator *alloc, cons socket->options = *options; socket->impl = nw_socket; socket->vtable = &s_vtable; - socket->event_loop_style = AWS_EVENT_LOOP_STYLE_COMPLETION_PORT_BASED; aws_mutex_init(&nw_socket->synced_data.lock); aws_mutex_lock(&nw_socket->synced_data.lock); @@ -1282,7 +1283,7 @@ static int s_socket_connect_fn( socket->io_handle.set_queue = s_client_set_dispatch_queue; socket->io_handle.clear_queue = s_client_clear_dispatch_queue; - aws_event_loop_connect_handle_to_completion_port(event_loop, &socket->io_handle); + aws_event_loop_connect_handle_to_io_completion_port(event_loop, &socket->io_handle); socket->event_loop = event_loop; nw_socket->on_connection_result_fn = on_connection_result; @@ -1655,7 +1656,7 @@ static int s_socket_start_accept_fn( return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); } - aws_event_loop_connect_handle_to_completion_port(accept_loop, &socket->io_handle); + aws_event_loop_connect_handle_to_io_completion_port(accept_loop, &socket->io_handle); socket->event_loop = accept_loop; socket->accept_result_fn = on_accept_result; socket->connect_accept_user_data = user_data; @@ -1818,7 +1819,7 @@ static int s_socket_assign_to_event_loop_fn(struct aws_socket *socket, struct aw // aws_mutex_lock(&nw_socket->synced_data.lock); nw_socket->synced_data.event_loop = event_loop; - if (!aws_event_loop_connect_handle_to_completion_port(event_loop, &socket->io_handle)) { + if (!aws_event_loop_connect_handle_to_io_completion_port(event_loop, &socket->io_handle)) { nw_connection_start(socket->io_handle.data.handle); aws_mutex_unlock(&nw_socket->synced_data.lock); return AWS_OP_SUCCESS; @@ -2101,34 +2102,3 @@ static struct aws_string *s_socket_get_server_name_fn(const struct aws_socket *s struct nw_socket *nw_socket = socket->impl; return nw_socket->host_name; } - -void aws_socket_endpoint_init_local_address_for_test(struct aws_socket_endpoint *endpoint) { - struct aws_uuid uuid; - AWS_FATAL_ASSERT(aws_uuid_init(&uuid) == AWS_OP_SUCCESS); - char uuid_str[AWS_UUID_STR_LEN] = {0}; - struct aws_byte_buf uuid_buf = aws_byte_buf_from_empty_array(uuid_str, sizeof(uuid_str)); - AWS_FATAL_ASSERT(aws_uuid_to_str(&uuid, &uuid_buf) == AWS_OP_SUCCESS); - snprintf(endpoint->address, sizeof(endpoint->address), "testsock" PRInSTR ".local", AWS_BYTE_BUF_PRI(uuid_buf)); -} - -int aws_socket_get_bound_address(const struct aws_socket *socket, struct aws_socket_endpoint *out_address) { - if (socket->local_endpoint.address[0] == 0) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: Socket has no local address. Socket must be bound first.", - (void *)socket, - socket->io_handle.data.fd); - return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); - } - *out_address = socket->local_endpoint; - return AWS_OP_SUCCESS; -} -#else - -/* This is here because ISO C requires at least one declaration. We do not simply not include this file at build - * in the CMakeLists because it'd require even more tinkering in aws-crt-swift to manage the inclusion/exclusion of - * this file across a number of platforms. - */ -int aws_nw_socket_declaration; - -#endif /* AWS_USE_DISPATCH_QUEUE */ diff --git a/source/darwin/secure_transport_tls_channel_handler.c b/source/darwin/secure_transport_tls_channel_handler.c index 0a893ff2d..cef47319f 100644 --- a/source/darwin/secure_transport_tls_channel_handler.c +++ b/source/darwin/secure_transport_tls_channel_handler.c @@ -6,7 +6,6 @@ #include #include -#include #include #include #include @@ -27,7 +26,7 @@ #include #include -#include +#include "aws_apple_network_framework.h" #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunused-variable" diff --git a/source/event_loop.c b/source/event_loop.c index e8b04e254..e49515d73 100644 --- a/source/event_loop.c +++ b/source/event_loop.c @@ -5,84 +5,59 @@ #include +#include +#include +#include + #include #include +#include #include #include -#ifdef __APPLE__ -// DEBUG WIP we may need to wrap this for iOS specific -# include +#ifdef AWS_USE_APPLE_NETWORK_FRAMEWORK +static enum aws_event_loop_type s_default_event_loop_type_override = AWS_EVENT_LOOP_DISPATCH_QUEUE; +#else +static enum aws_event_loop_type s_default_event_loop_type_override = AWS_EVENT_LOOP_PLATFORM_DEFAULT; #endif -static const struct aws_event_loop_configuration s_available_configurations[] = { -#ifdef AWS_USE_IO_COMPLETION_PORTS - { - .name = "WinNT IO Completion Ports", - .event_loop_new_fn = aws_event_loop_new_iocp_with_options, - .style = AWS_EVENT_LOOP_STYLE_COMPLETION_PORT_BASED, - .is_default = true, - }, -#endif /* AWS_USE_IO_COMPLETION_PORTS */ -#ifdef AWS_USE_DISPATCH_QUEUE - /* use kqueue on OSX and dispatch_queues everywhere else */ - { - .name = "Apple Dispatch Queue", - .event_loop_new_fn = aws_event_loop_new_dispatch_queue_with_options, - .style = AWS_EVENT_LOOP_STYLE_COMPLETION_PORT_BASED, - .is_default = true, - }, -#endif /* AWS_USE_DISPATCH_QUEUE */ -#ifdef AWS_USE_KQUEUE - { - .name = "BSD Edge-Triggered KQueue", - .event_loop_new_fn = aws_event_loop_new_kqueue_with_options, - .style = AWS_EVENT_LOOP_STYLE_POLL_BASED, - .is_default = true, - }, -#endif /* AWS_USE_KQUEUE */ -#ifdef AWS_USE_EPOLL - { - .name = "Linux Edge-Triggered Epoll", - .event_loop_new_fn = aws_event_loop_new_epoll_with_options, - .style = AWS_EVENT_LOOP_STYLE_POLL_BASED, - .is_default = true, - }, -#endif /* AWS_USE_EPOLL */ -}; - -static struct aws_event_loop_configuration_group s_available_configuration_group = { - .configuration_count = AWS_ARRAY_SIZE(s_available_configurations), - .configurations = s_available_configurations, -}; - -const struct aws_event_loop_configuration_group *aws_event_loop_get_available_configurations(void) { - return &s_available_configuration_group; -} - struct aws_event_loop *aws_event_loop_new_default(struct aws_allocator *alloc, aws_io_clock_fn *clock) { struct aws_event_loop_options options = { .thread_options = NULL, .clock = clock, + .type = AWS_EVENT_LOOP_PLATFORM_DEFAULT, }; - return aws_event_loop_new_default_with_options(alloc, &options); + return aws_event_loop_new(alloc, &options); } -struct aws_event_loop *aws_event_loop_new_default_with_options( - struct aws_allocator *alloc, - const struct aws_event_loop_options *options) { +static int aws_event_loop_type_validate_platform(enum aws_event_loop_type type); +struct aws_event_loop *aws_event_loop_new(struct aws_allocator *alloc, const struct aws_event_loop_options *options) { - const struct aws_event_loop_configuration_group *default_configs = aws_event_loop_get_available_configurations(); + enum aws_event_loop_type type = options->type; + if (type == AWS_EVENT_LOOP_PLATFORM_DEFAULT) { + type = aws_event_loop_get_default_type(); + } - for (size_t i = 0; i < default_configs->configuration_count; ++i) { - if (default_configs[i].configurations->is_default) { - return default_configs[i].configurations->event_loop_new_fn(alloc, options); - } + if (aws_event_loop_type_validate_platform(type)) { + AWS_LOGF_DEBUG(AWS_LS_IO_EVENT_LOOP, "Invalid event loop type on the platform."); + return NULL; } - AWS_FATAL_ASSERT(!"no available configurations found!"); - return NULL; + switch (type) { + case AWS_EVENT_LOOP_EPOLL: + return aws_event_loop_new_epoll_with_options(alloc, options); + case AWS_EVENT_LOOP_IOCP: + return aws_event_loop_new_iocp_with_options(alloc, options); + case AWS_EVENT_LOOP_KQUEUE: + return aws_event_loop_new_kqueue_with_options(alloc, options); + case AWS_EVENT_LOOP_DISPATCH_QUEUE: + return aws_event_loop_new_dispatch_queue_with_options(alloc, options); + default: + AWS_LOGF_DEBUG(AWS_LS_IO_EVENT_LOOP, "Invalid event loop type on the platform."); + aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); + return NULL; + } } static void s_event_loop_group_thread_exit(void *user_data) { @@ -138,30 +113,32 @@ static void s_aws_event_loop_group_shutdown_async(struct aws_event_loop_group *e aws_thread_launch(&cleanup_thread, s_event_loop_destroy_async_thread_fn, el_group, &thread_options); } -static struct aws_event_loop_group *s_event_loop_group_new( - struct aws_allocator *alloc, - aws_io_clock_fn *clock, - uint16_t el_count, - uint16_t cpu_group, - bool pin_threads, +struct aws_event_loop_group *aws_event_loop_group_new_internal( + struct aws_allocator *allocator, + const struct aws_event_loop_group_options *options, aws_new_event_loop_fn *new_loop_fn, - void *new_loop_user_data, - const struct aws_shutdown_callback_options *shutdown_options) { - AWS_ASSERT(new_loop_fn); + void *new_loop_user_data) { + AWS_FATAL_ASSERT(new_loop_fn); + + aws_io_clock_fn *clock = options->clock_override; + if (!clock) { + clock = aws_high_res_clock_get_ticks; + } size_t group_cpu_count = 0; struct aws_cpu_info *usable_cpus = NULL; + bool pin_threads = options->cpu_group != NULL; if (pin_threads) { + uint16_t cpu_group = *options->cpu_group; group_cpu_count = aws_get_cpu_count_for_group(cpu_group); - if (!group_cpu_count) { + // LOG THIS aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } - usable_cpus = aws_mem_calloc(alloc, group_cpu_count, sizeof(struct aws_cpu_info)); - + usable_cpus = aws_mem_calloc(allocator, group_cpu_count, sizeof(struct aws_cpu_info)); if (usable_cpus == NULL) { return NULL; } @@ -169,16 +146,23 @@ static struct aws_event_loop_group *s_event_loop_group_new( aws_get_cpu_ids_for_group(cpu_group, usable_cpus, group_cpu_count); } - struct aws_event_loop_group *el_group = aws_mem_calloc(alloc, 1, sizeof(struct aws_event_loop_group)); + struct aws_event_loop_group *el_group = aws_mem_calloc(allocator, 1, sizeof(struct aws_event_loop_group)); if (el_group == NULL) { return NULL; } - el_group->allocator = alloc; + el_group->allocator = allocator; aws_ref_count_init( &el_group->ref_count, el_group, (aws_simple_completion_callback *)s_aws_event_loop_group_shutdown_async); - if (aws_array_list_init_dynamic(&el_group->event_loops, alloc, el_count, sizeof(struct aws_event_loop *))) { + uint16_t el_count = options->loop_count; + if (el_count == 0) { + uint16_t processor_count = (uint16_t)aws_system_info_processor_count(); + /* cut them in half to avoid using hyper threads for the IO work. */ + el_count = processor_count > 1 ? processor_count / 2 : processor_count; + } + + if (aws_array_list_init_dynamic(&el_group->event_loops, allocator, el_count, sizeof(struct aws_event_loop *))) { goto on_error; } @@ -187,10 +171,8 @@ static struct aws_event_loop_group *s_event_loop_group_new( if (!pin_threads || (i < group_cpu_count && !usable_cpus[i].suspected_hyper_thread)) { struct aws_thread_options thread_options = *aws_default_thread_options(); - struct aws_event_loop_options options = { - .clock = clock, - .thread_options = &thread_options, - }; + struct aws_event_loop_options el_options = { + .clock = clock, .thread_options = &thread_options, .type = options->type}; if (pin_threads) { thread_options.cpu_id = usable_cpus[i].cpu_id; @@ -204,8 +186,7 @@ static struct aws_event_loop_group *s_event_loop_group_new( } thread_options.name = aws_byte_cursor_from_c_str(thread_name); - struct aws_event_loop *loop = new_loop_fn(alloc, &options, new_loop_user_data); - + struct aws_event_loop *loop = new_loop_fn(allocator, &el_options, new_loop_user_data); if (!loop) { goto on_error; } @@ -221,12 +202,12 @@ static struct aws_event_loop_group *s_event_loop_group_new( } } - if (shutdown_options != NULL) { - el_group->shutdown_options = *shutdown_options; + if (options->shutdown_options != NULL) { + el_group->shutdown_options = *options->shutdown_options; } if (pin_threads) { - aws_mem_release(alloc, usable_cpus); + aws_mem_release(allocator, usable_cpus); } return el_group; @@ -235,7 +216,7 @@ on_error:; /* cache the error code to prevent any potential side effects */ int cached_error_code = aws_last_error(); - aws_mem_release(alloc, usable_cpus); + aws_mem_release(allocator, usable_cpus); s_aws_event_loop_group_shutdown_sync(el_group); s_event_loop_group_thread_exit(el_group); @@ -244,103 +225,20 @@ on_error:; return NULL; } -struct aws_event_loop_group *aws_event_loop_group_new( - struct aws_allocator *alloc, - aws_io_clock_fn *clock, - uint16_t el_count, - aws_new_event_loop_fn *new_loop_fn, - void *new_loop_user_data, - const struct aws_shutdown_callback_options *shutdown_options) { - - AWS_ASSERT(new_loop_fn); - AWS_ASSERT(el_count); - - return s_event_loop_group_new(alloc, clock, el_count, 0, false, new_loop_fn, new_loop_user_data, shutdown_options); -} - static struct aws_event_loop *s_default_new_event_loop( struct aws_allocator *allocator, const struct aws_event_loop_options *options, void *user_data) { (void)user_data; - return aws_event_loop_new_default_with_options(allocator, options); + return aws_event_loop_new(allocator, options); } -struct aws_event_loop_group *aws_event_loop_group_new_default( - struct aws_allocator *alloc, - uint16_t max_threads, - const struct aws_shutdown_callback_options *shutdown_options) { - if (!max_threads) { - uint16_t processor_count = (uint16_t)aws_system_info_processor_count(); - /* cut them in half to avoid using hyper threads for the IO work. */ - max_threads = processor_count > 1 ? processor_count / 2 : processor_count; - } - - return aws_event_loop_group_new( - alloc, aws_high_res_clock_get_ticks, max_threads, s_default_new_event_loop, NULL, shutdown_options); -} - -static struct aws_event_loop *s_default_new_config_based_event_loop( - struct aws_allocator *allocator, - const struct aws_event_loop_options *options, - void *user_data) { - - const struct aws_event_loop_configuration *config = user_data; - return config->event_loop_new_fn(allocator, options); -} - -struct aws_event_loop_group *aws_event_loop_group_new_from_config( +struct aws_event_loop_group *aws_event_loop_group_new( struct aws_allocator *allocator, - const struct aws_event_loop_configuration *config, - uint16_t max_threads, - const struct aws_shutdown_callback_options *shutdown_options) { - if (!max_threads) { - uint16_t processor_count = (uint16_t)aws_system_info_processor_count(); - /* cut them in half to avoid using hyper threads for the IO work. */ - max_threads = processor_count > 1 ? processor_count / 2 : processor_count; - } - - return s_event_loop_group_new( - allocator, - aws_high_res_clock_get_ticks, - max_threads, - 0, - false, - s_default_new_config_based_event_loop, - (void *)config, - shutdown_options); -} + const struct aws_event_loop_group_options *options) { -struct aws_event_loop_group *aws_event_loop_group_new_pinned_to_cpu_group( - struct aws_allocator *alloc, - aws_io_clock_fn *clock, - uint16_t el_count, - uint16_t cpu_group, - aws_new_event_loop_fn *new_loop_fn, - void *new_loop_user_data, - const struct aws_shutdown_callback_options *shutdown_options) { - AWS_ASSERT(new_loop_fn); - AWS_ASSERT(el_count); - - return s_event_loop_group_new( - alloc, clock, el_count, cpu_group, true, new_loop_fn, new_loop_user_data, shutdown_options); -} - -struct aws_event_loop_group *aws_event_loop_group_new_default_pinned_to_cpu_group( - struct aws_allocator *alloc, - uint16_t max_threads, - uint16_t cpu_group, - const struct aws_shutdown_callback_options *shutdown_options) { - - if (!max_threads) { - uint16_t processor_count = (uint16_t)aws_system_info_processor_count(); - /* cut them in half to avoid using hyper threads for the IO work. */ - max_threads = processor_count > 1 ? processor_count / 2 : processor_count; - } - - return aws_event_loop_group_new_pinned_to_cpu_group( - alloc, aws_high_res_clock_get_ticks, max_threads, cpu_group, s_default_new_event_loop, NULL, shutdown_options); + return aws_event_loop_group_new_internal(allocator, options, s_default_new_event_loop, NULL); } struct aws_event_loop_group *aws_event_loop_group_acquire(struct aws_event_loop_group *el_group) { @@ -357,14 +255,7 @@ void aws_event_loop_group_release(struct aws_event_loop_group *el_group) { } } -enum aws_event_loop_style aws_event_loop_group_get_style(struct aws_event_loop_group *el_group) { - AWS_PRECONDITION(aws_event_loop_group_get_loop_count(el_group) > 0); - - struct aws_event_loop *event_loop = aws_event_loop_group_get_loop_at(el_group, 0); - return event_loop->vtable->event_loop_style; -} - -size_t aws_event_loop_group_get_loop_count(struct aws_event_loop_group *el_group) { +size_t aws_event_loop_group_get_loop_count(const struct aws_event_loop_group *el_group) { return aws_array_list_length(&el_group->event_loops); } @@ -588,11 +479,15 @@ void aws_event_loop_cancel_task(struct aws_event_loop *event_loop, struct aws_ta event_loop->vtable->cancel_task(event_loop, task); } -int aws_event_loop_connect_handle_to_completion_port(struct aws_event_loop *event_loop, struct aws_io_handle *handle) { - AWS_ASSERT( - event_loop->vtable && event_loop->vtable->event_loop_style == AWS_EVENT_LOOP_STYLE_COMPLETION_PORT_BASED && - event_loop->vtable->register_style.connect_to_completion_port); - return event_loop->vtable->register_style.connect_to_completion_port(event_loop, handle); +int aws_event_loop_connect_handle_to_io_completion_port( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle) { + + if (event_loop->vtable && event_loop->vtable->connect_to_io_completion_port) { + return event_loop->vtable->connect_to_io_completion_port(event_loop, handle); + } + + return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION); } int aws_event_loop_subscribe_to_io_events( @@ -602,10 +497,10 @@ int aws_event_loop_subscribe_to_io_events( aws_event_loop_on_event_fn *on_event, void *user_data) { - AWS_ASSERT( - event_loop->vtable && event_loop->vtable->event_loop_style == AWS_EVENT_LOOP_STYLE_POLL_BASED && - event_loop->vtable->register_style.subscribe_to_io_events); - return event_loop->vtable->register_style.subscribe_to_io_events(event_loop, handle, events, on_event, user_data); + if (event_loop->vtable && event_loop->vtable->subscribe_to_io_events) { + return event_loop->vtable->subscribe_to_io_events(event_loop, handle, events, on_event, user_data); + } + return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION); } int aws_event_loop_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle) { @@ -624,7 +519,182 @@ bool aws_event_loop_thread_is_callers_thread(struct aws_event_loop *event_loop) return event_loop->vtable->is_on_callers_thread(event_loop); } -int aws_event_loop_current_clock_time(struct aws_event_loop *event_loop, uint64_t *time_nanos) { +int aws_event_loop_current_clock_time(const struct aws_event_loop *event_loop, uint64_t *time_nanos) { AWS_ASSERT(event_loop->clock); return event_loop->clock(time_nanos); } + +struct aws_event_loop_group *aws_event_loop_group_new_default( + struct aws_allocator *alloc, + uint16_t max_threads, + const struct aws_shutdown_callback_options *shutdown_options) { + + struct aws_event_loop_group_options elg_options = { + .loop_count = max_threads, + .shutdown_options = shutdown_options, + }; + + return aws_event_loop_group_new(alloc, &elg_options); +} + +struct aws_event_loop_group *aws_event_loop_group_new_default_pinned_to_cpu_group( + struct aws_allocator *alloc, + uint16_t max_threads, + uint16_t cpu_group, + const struct aws_shutdown_callback_options *shutdown_options) { + + struct aws_event_loop_group_options elg_options = { + .loop_count = max_threads, + .shutdown_options = shutdown_options, + .cpu_group = &cpu_group, + }; + + return aws_event_loop_group_new(alloc, &elg_options); +} + +void *aws_event_loop_get_impl(struct aws_event_loop *event_loop) { + return event_loop->impl_data; +} + +struct aws_event_loop *aws_event_loop_new_base( + struct aws_allocator *allocator, + aws_io_clock_fn *clock, + struct aws_event_loop_vtable *vtable, + void *impl) { + struct aws_event_loop *event_loop = aws_mem_acquire(allocator, sizeof(struct aws_event_loop)); + aws_event_loop_init_base(event_loop, allocator, clock); + event_loop->impl_data = impl; + event_loop->vtable = vtable; + + return event_loop; +} + +/** + * Override default event loop type. Only used internally in tests. + * + * If the defined type is not supported on the current platform, the event loop type would reset to + * AWS_EVENT_LOOP_PLATFORM_DEFAULT. + */ +void aws_event_loop_override_default_type(enum aws_event_loop_type default_type_override) { + if (aws_event_loop_type_validate_platform(default_type_override) == AWS_OP_SUCCESS) { + s_default_event_loop_type_override = default_type_override; + } else { + s_default_event_loop_type_override = AWS_EVENT_LOOP_PLATFORM_DEFAULT; + } +} + +/** + * Return the default event loop type. If the return value is `AWS_EVENT_LOOP_PLATFORM_DEFAULT`, the function failed to + * retrieve the default type value. + * If `aws_event_loop_override_default_type` has been called, return the override default type. + */ +enum aws_event_loop_type aws_event_loop_get_default_type(void) { + if (s_default_event_loop_type_override != AWS_EVENT_LOOP_PLATFORM_DEFAULT) { + return s_default_event_loop_type_override; + } +/** + * Ideally we should use the platform definition (e.x.: AWS_OS_APPLE) here, however the platform + * definition was declared in aws-c-common. We probably do not want to introduce extra dependency here. + */ +#ifdef AWS_ENABLE_KQUEUE + return AWS_EVENT_LOOP_KQUEUE; +#endif +#ifdef AWS_ENABLE_DISPATCH_QUEUE + return AWS_EVENT_LOOP_DISPATCH_QUEUE; +#endif +#ifdef AWS_ENABLE_EPOLL + return AWS_EVENT_LOOP_EPOLL; +#endif +#ifdef AWS_OS_WINDOWS + return AWS_EVENT_LOOP_IOCP; +#endif +} + +static int aws_event_loop_type_validate_platform(enum aws_event_loop_type type) { + switch (type) { + case AWS_EVENT_LOOP_EPOLL: +#ifndef AWS_ENABLE_EPOLL + AWS_LOGF_DEBUG(AWS_LS_IO_EVENT_LOOP, "Event loop type EPOLL is not supported on the platform."); + return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); +#endif // AWS_ENABLE_EPOLL + break; + case AWS_EVENT_LOOP_IOCP: +#ifndef AWS_ENABLE_IO_COMPLETION_PORTS + AWS_LOGF_DEBUG(AWS_LS_IO_EVENT_LOOP, "Event loop type IOCP is not supported on the platform."); + return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); +#endif // AWS_ENABLE_IO_COMPLETION_PORTS + break; + case AWS_EVENT_LOOP_KQUEUE: +#ifndef AWS_ENABLE_KQUEUE + AWS_LOGF_DEBUG(AWS_LS_IO_EVENT_LOOP, "Event loop type KQUEUE is not supported on the platform."); + return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); +#endif // AWS_ENABLE_KQUEUE + break; + case AWS_EVENT_LOOP_DISPATCH_QUEUE: +#ifndef AWS_ENABLE_DISPATCH_QUEUE + AWS_LOGF_DEBUG(AWS_LS_IO_EVENT_LOOP, "Event loop type Dispatch Queue is not supported on the platform."); + return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); +#endif // AWS_ENABLE_DISPATCH_QUEUE + break; + default: + AWS_LOGF_DEBUG(AWS_LS_IO_EVENT_LOOP, "Invalid event loop type."); + return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION); + break; + } + return AWS_OP_SUCCESS; +} + +#ifndef AWS_ENABLE_DISPATCH_QUEUE +struct aws_event_loop *aws_event_loop_new_dispatch_queue_with_options( + struct aws_allocator *alloc, + const struct aws_event_loop_options *options) { + (void)alloc; + (void)options; + AWS_ASSERT(0); + + AWS_LOGF_DEBUG(AWS_LS_IO_EVENT_LOOP, "Dispatch Queue is not supported on the platform"); + aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); + return NULL; +} +#endif // AWS_ENABLE_DISPATCH_QUEUE + +#ifndef AWS_ENABLE_IO_COMPLETION_PORTS +struct aws_event_loop *aws_event_loop_new_iocp_with_options( + struct aws_allocator *alloc, + const struct aws_event_loop_options *options) { + (void)alloc; + (void)options; + AWS_ASSERT(0); + + AWS_LOGF_DEBUG(AWS_LS_IO_EVENT_LOOP, "IOCP is not supported on the platform"); + aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); + return NULL; +} +#endif // AWS_ENABLE_IO_COMPLETION_PORTS + +#ifndef AWS_ENABLE_KQUEUE +struct aws_event_loop *aws_event_loop_new_kqueue_with_options( + struct aws_allocator *alloc, + const struct aws_event_loop_options *options) { + (void)alloc; + (void)options; + AWS_ASSERT(0); + + AWS_LOGF_DEBUG(AWS_LS_IO_EVENT_LOOP, "Kqueue is not supported on the platform"); + aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); + return NULL; +} +#endif // AWS_ENABLE_EPOLL + +#ifndef AWS_ENABLE_EPOLL +struct aws_event_loop *aws_event_loop_new_epoll_with_options( + struct aws_allocator *alloc, + const struct aws_event_loop_options *options) { + (void)alloc; + (void)options; + AWS_ASSERT(0); + + AWS_LOGF_DEBUG(AWS_LS_IO_EVENT_LOOP, "Epoll is not supported on the platform"); + return NULL; +} +#endif // AWS_ENABLE_KQUEUE diff --git a/source/exponential_backoff_retry_strategy.c b/source/exponential_backoff_retry_strategy.c index cf2472269..2110cbd46 100644 --- a/source/exponential_backoff_retry_strategy.c +++ b/source/exponential_backoff_retry_strategy.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -55,7 +56,7 @@ static void s_exponential_retry_destroy(struct aws_retry_strategy *retry_strateg if (completion_callback != NULL) { completion_callback(completion_user_data); } - aws_ref_count_release(&el_group->ref_count); + aws_event_loop_group_release(el_group); } } @@ -361,7 +362,7 @@ struct aws_retry_strategy *aws_retry_strategy_new_exponential_backoff( aws_atomic_init_int(&exponential_backoff_strategy->base.ref_count, 1); exponential_backoff_strategy->config = *config; exponential_backoff_strategy->config.el_group = - aws_ref_count_acquire(&exponential_backoff_strategy->config.el_group->ref_count); + aws_event_loop_group_acquire(exponential_backoff_strategy->config.el_group); if (!exponential_backoff_strategy->config.generate_random && !exponential_backoff_strategy->config.generate_random_impl) { diff --git a/source/future.c b/source/future.c index 96c88ef6a..47af607ce 100644 --- a/source/future.c +++ b/source/future.c @@ -56,6 +56,15 @@ struct aws_future_impl { }; static void s_future_impl_result_dtor(struct aws_future_impl *future, void *result_addr) { + +/* + * On ARM machines, the compiler complains about the array bounds warning for aws_future_bool, even though + * aws_future_bool will never go into any destroy or release branch. Disable the warning since it's a false positive. + */ +#ifndef _MSC_VER +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Warray-bounds" +#endif switch (future->type) { case AWS_FUTURE_T_BY_VALUE_WITH_CLEAN_UP: { future->result_dtor.clean_up(result_addr); @@ -79,6 +88,9 @@ static void s_future_impl_result_dtor(struct aws_future_impl *future, void *resu default: break; } +#ifndef _MSC_VER +# pragma GCC diagnostic pop +#endif } static void s_future_impl_destroy(void *user_data) { @@ -472,60 +484,7 @@ bool aws_future_impl_wait(const struct aws_future_impl *future, uint64_t timeout return is_done; } -// AWS_FUTURE_T_BY_VALUE_IMPLEMENTATION(aws_future_bool, bool) -struct aws_future_bool *aws_future_bool_new(struct aws_allocator *alloc) { - return (struct aws_future_bool *)aws_future_impl_new_by_value(alloc, sizeof(_Bool)); -} -void aws_future_bool_set_result(struct aws_future_bool *future, _Bool result) { - aws_future_impl_set_result_by_move((struct aws_future_impl *)future, &result); -} -_Bool aws_future_bool_get_result(const struct aws_future_bool *future) { - return *(_Bool *)aws_future_impl_get_result_address((const struct aws_future_impl *)future); -} -struct aws_future_bool *aws_future_bool_acquire(struct aws_future_bool *future) { - return (struct aws_future_bool *)aws_future_impl_acquire((struct aws_future_impl *)future); -} -struct aws_future_bool *aws_future_bool_release(struct aws_future_bool *future) { - return (struct aws_future_bool *)aws_future_impl_release((struct aws_future_impl *)future); -} -void aws_future_bool_set_error(struct aws_future_bool *future, int error_code) { - aws_future_impl_set_error((struct aws_future_impl *)future, error_code); -} -_Bool aws_future_bool_is_done(const struct aws_future_bool *future) { - return aws_future_impl_is_done((const struct aws_future_impl *)future); -} -int aws_future_bool_get_error(const struct aws_future_bool *future) { - return aws_future_impl_get_error((const struct aws_future_impl *)future); -} -void aws_future_bool_register_callback( - struct aws_future_bool *future, - aws_future_callback_fn *on_done, - void *user_data) { - aws_future_impl_register_callback((struct aws_future_impl *)future, on_done, user_data); -} -_Bool aws_future_bool_register_callback_if_not_done( - struct aws_future_bool *future, - aws_future_callback_fn *on_done, - void *user_data) { - return aws_future_impl_register_callback_if_not_done((struct aws_future_impl *)future, on_done, user_data); -} -void aws_future_bool_register_event_loop_callback( - struct aws_future_bool *future, - struct aws_event_loop *event_loop, - aws_future_callback_fn *on_done, - void *user_data) { - aws_future_impl_register_event_loop_callback((struct aws_future_impl *)future, event_loop, on_done, user_data); -} -void aws_future_bool_register_channel_callback( - struct aws_future_bool *future, - struct aws_channel *channel, - aws_future_callback_fn *on_done, - void *user_data) { - aws_future_impl_register_channel_callback((struct aws_future_impl *)future, channel, on_done, user_data); -} -_Bool aws_future_bool_wait(struct aws_future_bool *future, uint64_t timeout_ns) { - return aws_future_impl_wait((struct aws_future_impl *)future, timeout_ns); -} +AWS_FUTURE_T_BY_VALUE_IMPLEMENTATION(aws_future_bool, bool) AWS_FUTURE_T_BY_VALUE_IMPLEMENTATION(aws_future_size, size_t) diff --git a/source/linux/epoll_event_loop.c b/source/linux/epoll_event_loop.c index 2076d7153..b0f6d7334 100644 --- a/source/linux/epoll_event_loop.c +++ b/source/linux/epoll_event_loop.c @@ -3,17 +3,16 @@ * SPDX-License-Identifier: Apache-2.0. */ -#include - #include #include #include #include #include #include -#include - +#include #include +#include +#include #include @@ -72,8 +71,7 @@ static struct aws_event_loop_vtable s_vtable = { .schedule_task_now = s_schedule_task_now, .schedule_task_future = s_schedule_task_future, .cancel_task = s_cancel_task, - .register_style.subscribe_to_io_events = s_subscribe_to_io_events, - .event_loop_style = AWS_EVENT_LOOP_STYLE_POLL_BASED, + .subscribe_to_io_events = s_subscribe_to_io_events, .unsubscribe_from_io_events = s_unsubscribe_from_io_events, .free_io_event_resources = s_free_io_event_resources, .is_on_callers_thread = s_is_on_callers_thread, diff --git a/source/posix/pipe.c b/source/posix/pipe.c index f727b021c..449ab1318 100644 --- a/source/posix/pipe.c +++ b/source/posix/pipe.c @@ -6,6 +6,7 @@ #include #include +#include #ifdef __GLIBC__ # define __USE_GNU diff --git a/source/posix/socket.c b/source/posix/socket.c index e9ebe8c57..c0631ab9e 100644 --- a/source/posix/socket.c +++ b/source/posix/socket.c @@ -13,6 +13,7 @@ #include #include +#include #include #include @@ -226,7 +227,7 @@ static bool s_socket_is_open(struct aws_socket *socket); static struct aws_byte_buf s_socket_get_protocol_fn(const struct aws_socket *socket); static struct aws_string *s_socket_get_server_name_fn(const struct aws_socket *socket); -static struct aws_socket_vtable s_vtable = { +struct aws_socket_vtable g_posix_socket_vtable = { .socket_cleanup_fn = s_socket_clean_up, .socket_connect_fn = s_socket_connect, .socket_bind_fn = s_socket_bind, @@ -271,8 +272,7 @@ static int s_socket_init( socket->state = INIT; socket->options = *options; socket->impl = posix_socket; - socket->vtable = &s_vtable; - socket->event_loop_style = AWS_EVENT_LOOP_STYLE_POLL_BASED; + socket->vtable = &g_posix_socket_vtable; if (existing_socket_fd < 0) { int err = s_create_socket(socket, options); @@ -301,12 +301,13 @@ static int s_socket_init( return AWS_OP_SUCCESS; } -#if defined(AWS_USE_KQUEUE) || defined(AWS_USE_EPOLL) -int aws_socket_init(struct aws_socket *socket, struct aws_allocator *alloc, const struct aws_socket_options *options) { +int aws_socket_init_posix( + struct aws_socket *socket, + struct aws_allocator *alloc, + const struct aws_socket_options *options) { AWS_ASSERT(options); return s_socket_init(socket, alloc, options, -1); } -#endif // #ifdef AWS_USE_KQUEUE || AWS_USE_EPOLL static void s_socket_clean_up(struct aws_socket *socket) { if (!socket->impl) { @@ -966,21 +967,6 @@ static int s_socket_bind( return AWS_OP_ERR; } -#if defined(AWS_USE_KQUEUE) || defined(AWS_USE_EPOLL) -int aws_socket_get_bound_address(const struct aws_socket *socket, struct aws_socket_endpoint *out_address) { - if (socket->local_endpoint.address[0] == 0) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: Socket has no local address. Socket must be bound first.", - (void *)socket, - socket->io_handle.data.fd); - return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); - } - *out_address = socket->local_endpoint; - return AWS_OP_SUCCESS; -} -#endif // AWS_USE_KQUEUE || AWS_USE_EPOLL - static int s_socket_listen(struct aws_socket *socket, int backlog_size) { if (socket->state != BOUND) { AWS_LOGF_ERROR( @@ -2089,17 +2075,6 @@ static struct aws_string *s_socket_get_server_name_fn(const struct aws_socket *s return NULL; } -#if defined(AWS_USE_KQUEUE) || defined(AWS_USE_EPOLL) -void aws_socket_endpoint_init_local_address_for_test(struct aws_socket_endpoint *endpoint) { - struct aws_uuid uuid; - AWS_FATAL_ASSERT(aws_uuid_init(&uuid) == AWS_OP_SUCCESS); - char uuid_str[AWS_UUID_STR_LEN] = {0}; - struct aws_byte_buf uuid_buf = aws_byte_buf_from_empty_array(uuid_str, sizeof(uuid_str)); - AWS_FATAL_ASSERT(aws_uuid_to_str(&uuid, &uuid_buf) == AWS_OP_SUCCESS); - snprintf(endpoint->address, sizeof(endpoint->address), "testsock" PRInSTR ".sock", AWS_BYTE_BUF_PRI(uuid_buf)); -} -#endif // AWS_USE_KQUEUE || AWS_USE_EPOLL - bool aws_is_network_interface_name_valid(const char *interface_name) { if (if_nametoindex(interface_name) == 0) { AWS_LOGF_ERROR(AWS_LS_IO_SOCKET, "network_interface_name(%s) is invalid with errno: %d", interface_name, errno); diff --git a/source/s2n/s2n_tls_channel_handler.c b/source/s2n/s2n_tls_channel_handler.c index 018e6c069..af8fbd834 100644 --- a/source/s2n/s2n_tls_channel_handler.c +++ b/source/s2n/s2n_tls_channel_handler.c @@ -5,21 +5,20 @@ #include #include +#include #include - +#include +#include +#include #include #include #include #include +#include #include #include #include -#include -#include -#include -#include - #include #ifdef AWS_S2N_INSOURCE_PATH # include @@ -255,7 +254,7 @@ void aws_tls_init_static_state(struct aws_allocator *alloc) { void aws_tls_clean_up_static_state(void) { /* only clean up s2n if we were the ones that initialized it */ if (!s_s2n_initialized_externally) { - s2n_cleanup(); + s2n_cleanup_final(); } } @@ -271,6 +270,8 @@ bool aws_tls_is_cipher_pref_supported(enum aws_tls_cipher_pref cipher_pref) { #ifndef ANDROID case AWS_IO_TLS_CIPHER_PREF_PQ_TLSv1_0_2021_05: return true; + case AWS_IO_TLS_CIPHER_PREF_PQ_TLSV1_2_2024_10: + return true; #endif default: @@ -1537,6 +1538,9 @@ static struct aws_tls_ctx *s_tls_ctx_new( case AWS_IO_TLS_CIPHER_PREF_PQ_TLSv1_0_2021_05: security_policy = "PQ-TLS-1-0-2021-05-26"; break; + case AWS_IO_TLS_CIPHER_PREF_PQ_TLSV1_2_2024_10: + security_policy = "AWS-CRT-SDK-TLSv1.2-2023-PQ"; + break; default: AWS_LOGF_ERROR(AWS_LS_IO_TLS, "Unrecognized TLS Cipher Preference: %d", options->cipher_pref); aws_raise_error(AWS_IO_TLS_CIPHER_PREF_UNSUPPORTED); diff --git a/source/socket.c b/source/socket.c index 8fee07d25..31df02dc4 100644 --- a/source/socket.c +++ b/source/socket.c @@ -2,6 +2,10 @@ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ + +#include +#include +#include #include void aws_socket_clean_up(struct aws_socket *socket) { @@ -17,7 +21,6 @@ int aws_socket_connect( aws_socket_retrieve_tls_options_fn *retrieve_tls_options, void *user_data) { AWS_PRECONDITION(socket->vtable && socket->vtable->socket_connect_fn); - AWS_PRECONDITION(socket->event_loop_style & event_loop->vtable->event_loop_style); return socket->vtable->socket_connect_fn( socket, remote_endpoint, event_loop, on_connection_result, retrieve_tls_options, user_data); } @@ -67,7 +70,6 @@ int aws_socket_set_options(struct aws_socket *socket, const struct aws_socket_op int aws_socket_assign_to_event_loop(struct aws_socket *socket, struct aws_event_loop *event_loop) { AWS_PRECONDITION(socket->vtable && socket->vtable->socket_assign_to_event_loop_fn); - AWS_PRECONDITION(socket->event_loop_style & event_loop->vtable->event_loop_style); return socket->vtable->socket_assign_to_event_loop_fn(socket, event_loop); } @@ -107,3 +109,166 @@ bool aws_socket_is_open(struct aws_socket *socket) { AWS_PRECONDITION(socket->vtable && socket->vtable->socket_is_open_fn); return socket->vtable->socket_is_open_fn(socket); } + +static enum aws_socket_impl_type aws_socket_get_default_impl_type(void); +static int aws_socket_impl_type_validate_platform(enum aws_socket_impl_type type); +int aws_socket_init(struct aws_socket *socket, struct aws_allocator *alloc, const struct aws_socket_options *options) { + + // 1. get socket type & validate type is avliable the platform + enum aws_socket_impl_type type = options->impl_type; + if (type == AWS_SOCKET_IMPL_PLATFORM_DEFAULT) { + type = aws_socket_get_default_impl_type(); + } + + if (aws_socket_impl_type_validate_platform(type)) { + AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "Invalid event loop type on the platform."); + return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); + } + + // 2. setup vtable based on socket type + switch (type) { + case AWS_SOCKET_IMPL_POSIX: + return aws_socket_init_posix(socket, alloc, options); + case AWS_SOCKET_IMPL_WINSOCK: + return aws_socket_init_winsock(socket, alloc, options); + case AWS_SOCKET_IMPL_APPLE_NETWORK_FRAMEWORK: + return aws_socket_init_apple_nw_socket(socket, alloc, options); + default: + AWS_ASSERT(false && "Invalid socket implementation on platform."); + return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); + } +} + +int aws_socket_get_bound_address(const struct aws_socket *socket, struct aws_socket_endpoint *out_address) { + if (socket->local_endpoint.address[0] == 0) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: Socket has no local address. Socket must be bound first.", + (void *)socket, + socket->io_handle.data.fd); + return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); + } + *out_address = socket->local_endpoint; + return AWS_OP_SUCCESS; +} + +void aws_socket_endpoint_init_local_address_for_test(struct aws_socket_endpoint *endpoint) { + (void)endpoint; + struct aws_uuid uuid; + AWS_FATAL_ASSERT(aws_uuid_init(&uuid) == AWS_OP_SUCCESS); + char uuid_str[AWS_UUID_STR_LEN] = {0}; + struct aws_byte_buf uuid_buf = aws_byte_buf_from_empty_array(uuid_str, sizeof(uuid_str)); + AWS_FATAL_ASSERT(aws_uuid_to_str(&uuid, &uuid_buf) == AWS_OP_SUCCESS); + +#if defined(WS_USE_APPLE_NETWORK_FRAMEWORK) + snprintf(endpoint->address, sizeof(endpoint->address), "testsock" PRInSTR ".local", AWS_BYTE_BUF_PRI(uuid_buf)); + return; +#endif + +#if defined(AWS_ENABLE_KQUEUE) || defined(AWS_ENABLE_EPOLL) + snprintf(endpoint->address, sizeof(endpoint->address), "testsock" PRInSTR ".sock", AWS_BYTE_BUF_PRI(uuid_buf)); + return; +#endif + +#if defined(AWS_ENABLE_IO_COMPLETION_PORTS) + snprintf(endpoint->address, sizeof(endpoint->address), "\\\\.\\pipe\\testsock" PRInSTR, AWS_BYTE_BUF_PRI(uuid_buf)); + return; +#endif +} + +/** + * Return the default socket implementation type. If the return value is `AWS_SOCKET_IMPL_PLATFORM_DEFAULT`, the + * function failed to retrieve the default type value. + */ +static enum aws_socket_impl_type aws_socket_get_default_impl_type(void) { + enum aws_socket_impl_type type = AWS_SOCKET_IMPL_PLATFORM_DEFAULT; +// override default socket +#ifdef AWS_USE_APPLE_NETWORK_FRAMEWORK + type = AWS_SOCKET_IMPL_APPLE_NETWORK_FRAMEWORK; +#endif // AWS_USE_APPLE_NETWORK_FRAMEWORK + if (type != AWS_SOCKET_IMPL_PLATFORM_DEFAULT) { + return type; + } +/** + * Ideally we should use the platform definition (e.x.: AWS_OS_APPLE) here, however the platform + * definition was declared in aws-c-common. We probably do not want to introduce extra dependency here. + */ +#if defined(AWS_ENABLE_KQUEUE) || defined(AWS_ENABLE_EPOLL) + return AWS_SOCKET_IMPL_POSIX; +#endif +#ifdef AWS_ENABLE_DISPATCH_QUEUE + return AWS_SOCKET_IMPL_APPLE_NETWORK_FRAMEWORK; +#endif +#ifdef AWS_ENABLE_IO_COMPLETION_PORTS + return AWS_SOCKET_IMPL_WINSOCK; +#else + return AWS_SOCKET_IMPL_PLATFORM_DEFAULT; +#endif +} + +static int aws_socket_impl_type_validate_platform(enum aws_socket_impl_type type) { + switch (type) { + case AWS_SOCKET_IMPL_POSIX: +#if !defined(AWS_ENABLE_EPOLL) && !defined(AWS_ENABLE_KQUEUE) + AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "Posix socket is not supported on the platform."); + return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); +#endif // AWS_SOCKET_IMPL_POSIX + break; + case AWS_SOCKET_IMPL_WINSOCK: +#ifndef AWS_ENABLE_IO_COMPLETION_PORTS + AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "WINSOCK is not supported on the platform."); + return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); +#endif // AWS_ENABLE_IO_COMPLETION_PORTS + break; + case AWS_SOCKET_IMPL_APPLE_NETWORK_FRAMEWORK: +#ifndef AWS_ENABLE_DISPATCH_QUEUE + AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "Apple Network Framework is not supported on the platform."); + return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); +#endif // AWS_ENABLE_DISPATCH_QUEUE + break; + default: + AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "Invalid socket implementation type."); + return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); + break; + } + return AWS_OP_SUCCESS; +} + +#if !defined(AWS_ENABLE_EPOLL) && !defined(AWS_ENABLE_KQUEUE) && !defined(AWS_USE_SECITEM) +int aws_socket_init_posix( + struct aws_socket *socket, + struct aws_allocator *alloc, + const struct aws_socket_options *options) { + (void)socket; + (void)alloc; + (void)options; + AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "Posix socket is not supported on the platform."); + return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); +} +#endif + +#ifndef AWS_ENABLE_IO_COMPLETION_PORTS +int aws_socket_init_winsock( + struct aws_socket *socket, + struct aws_allocator *alloc, + const struct aws_socket_options *options) { + (void)socket; + (void)alloc; + (void)options; + AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "WINSOCK is not supported on the platform."); + return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); +} +#endif + +#ifndef AWS_ENABLE_DISPATCH_QUEUE +int aws_socket_init_apple_nw_socket( + struct aws_socket *socket, + struct aws_allocator *alloc, + const struct aws_socket_options *options) { + (void)socket; + (void)alloc; + (void)options; + AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "Apple Network Framework is not supported on the platform."); + return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); +} +#endif diff --git a/source/windows/iocp/iocp_event_loop.c b/source/windows/iocp/iocp_event_loop.c index c7875f799..473629de9 100644 --- a/source/windows/iocp/iocp_event_loop.c +++ b/source/windows/iocp/iocp_event_loop.c @@ -11,6 +11,7 @@ #include #include +#include #include @@ -137,8 +138,7 @@ struct aws_event_loop_vtable s_iocp_vtable = { .schedule_task_now = s_schedule_task_now, .schedule_task_future = s_schedule_task_future, .cancel_task = s_cancel_task, - .register_style.connect_to_completion_port = s_connect_to_io_completion_port, - .event_loop_style = AWS_EVENT_LOOP_STYLE_COMPLETION_PORT_BASED, + .connect_to_io_completion_port = s_connect_to_io_completion_port, .is_on_callers_thread = s_is_event_thread, .unsubscribe_from_io_events = s_unsubscribe_from_io_events, .free_io_event_resources = s_free_io_event_resources, diff --git a/source/windows/iocp/pipe.c b/source/windows/iocp/pipe.c index a534c7e20..a9e2185e5 100644 --- a/source/windows/iocp/pipe.c +++ b/source/windows/iocp/pipe.c @@ -7,6 +7,7 @@ #include #include +#include #include #include @@ -251,7 +252,7 @@ int aws_pipe_init( } } - int err = aws_event_loop_connect_handle_to_completion_port(write_end_event_loop, &write_impl->handle); + int err = aws_event_loop_connect_handle_to_io_completion_port(write_end_event_loop, &write_impl->handle); if (err) { goto clean_up; } @@ -282,7 +283,7 @@ int aws_pipe_init( goto clean_up; } - err = aws_event_loop_connect_handle_to_completion_port(read_end_event_loop, &read_impl->handle); + err = aws_event_loop_connect_handle_to_io_completion_port(read_end_event_loop, &read_impl->handle); if (err) { goto clean_up; } diff --git a/source/windows/iocp/socket.c b/source/windows/iocp/socket.c index e9d019082..7e01ac48b 100644 --- a/source/windows/iocp/socket.c +++ b/source/windows/iocp/socket.c @@ -21,11 +21,11 @@ below, clang-format doesn't work (at least on my version) with the c-style comme #include #include #include -#include #include #include #include +#include #include #include @@ -56,7 +56,7 @@ below, clang-format doesn't work (at least on my version) with the c-style comme #define PIPE_BUFFER_SIZE 512 -struct iocp_socket_vtable { +struct winsock_vtable { int (*connection_success)(struct aws_socket *socket); void (*connection_error)(struct aws_socket *socket, int error_code); int (*close)(struct aws_socket *socket); @@ -199,7 +199,7 @@ static int s_determine_socket_error(int error); as well thought out. There were so many branches to handle three entirely different APIs we decided it was less painful to just have a bunch of function pointers in a table than to want to gouge our eyes out while looking at a ridiculous number of branches. */ -static struct iocp_socket_vtable s_iocp_vtables[3][2] = { +static struct winsock_vtable s_winsock_vtables[3][2] = { [AWS_SOCKET_IPV4] = { [AWS_SOCKET_STREAM] = @@ -277,7 +277,7 @@ static struct iocp_socket_vtable s_iocp_vtables[3][2] = { }, }; -static struct aws_socket_vtable s_socket_vtable = { +struct aws_socket_vtable g_winsock_vtable = { .socket_cleanup_fn = s_socket_clean_up, .socket_connect_fn = s_socket_connect, .socket_bind_fn = s_socket_bind, @@ -356,7 +356,7 @@ struct io_operation_data { }; struct iocp_socket { - struct iocp_socket_vtable *iocp_vtable; + struct winsock_vtable *winsock_vtable; struct io_operation_data *read_io_data; struct aws_socket *incoming_socket; uint8_t accept_buffer[SOCK_STORAGE_SIZE * 2]; @@ -415,10 +415,10 @@ static int s_socket_init( return AWS_OP_ERR; } - socket->vtable = &s_socket_vtable; + socket->vtable = &g_winsock_vtable; - impl->iocp_vtable = &s_iocp_vtables[options->domain][options->type]; - if (!impl->iocp_vtable || !impl->iocp_vtable->connection_success) { + impl->winsock_vtable = &s_winsock_vtables[options->domain][options->type]; + if (!impl->winsock_vtable || !impl->winsock_vtable->connection_success) { aws_mem_release(alloc, impl); socket->impl = NULL; return aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); @@ -441,7 +441,6 @@ static int s_socket_init( socket->state = INIT; socket->impl = impl; socket->options = *options; - socket->event_loop_style = AWS_EVENT_LOOP_STYLE_COMPLETION_PORT_BASED; if (options->domain != AWS_SOCKET_LOCAL && create_underlying_socket) { if (s_create_socket(socket, options)) { @@ -454,8 +453,10 @@ static int s_socket_init( return AWS_OP_SUCCESS; } -#ifdef AWS_USE_IO_COMPLETION_PORTS -int aws_socket_init(struct aws_socket *socket, struct aws_allocator *alloc, const struct aws_socket_options *options) { +int aws_socket_init_winsock( + struct aws_socket *socket, + struct aws_allocator *alloc, + const struct aws_socket_options *options) { AWS_ASSERT(options); aws_check_and_init_winsock(); @@ -464,7 +465,6 @@ int aws_socket_init(struct aws_socket *socket, struct aws_allocator *alloc, cons return err; } -#endif // AWS_USE_IO_COMPLETION_PORTS static void s_socket_clean_up(struct aws_socket *socket) { if (!socket->impl) { @@ -477,7 +477,7 @@ static void s_socket_clean_up(struct aws_socket *socket) { (void *)socket, (void *)socket->io_handle.data.handle); struct iocp_socket *socket_impl = socket->impl; - socket_impl->iocp_vtable->close(socket); + socket_impl->winsock_vtable->close(socket); if (socket_impl->incoming_socket) { aws_socket_clean_up(socket_impl->incoming_socket); @@ -520,7 +520,7 @@ static int s_socket_connect( return AWS_OP_ERR; } - return socket_impl->iocp_vtable->connect(socket, remote_endpoint, event_loop, on_connection_result, user_data); + return socket_impl->winsock_vtable->connect(socket, remote_endpoint, event_loop, on_connection_result, user_data); } static int s_socket_bind( @@ -540,23 +540,8 @@ static int s_socket_bind( } struct iocp_socket *socket_impl = socket->impl; - return socket_impl->iocp_vtable->bind(socket, local_endpoint); -} - -#ifdef AWS_USE_IO_COMPLETION_PORTS -int aws_socket_get_bound_address(const struct aws_socket *socket, struct aws_socket_endpoint *out_address) { - if (socket->local_endpoint.address[0] == 0) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: Socket has no local address. Socket must be bound first.", - (void *)socket, - socket->io_handle.data.fd); - return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); - } - *out_address = socket->local_endpoint; - return AWS_OP_SUCCESS; + return socket_impl->winsock_vtable->bind(socket, local_endpoint); } -#endif // AWS_USE_IO_COMPLETION_PORTS /* Update IPV4 or IPV6 socket->local_endpoint based on the results of getsockname() */ static int s_update_local_endpoint_ipv4_ipv6(struct aws_socket *socket) { @@ -617,7 +602,7 @@ static int s_update_local_endpoint_ipv4_ipv6(struct aws_socket *socket) { static int s_socket_listen(struct aws_socket *socket, int backlog_size) { struct iocp_socket *socket_impl = socket->impl; - return socket_impl->iocp_vtable->listen(socket, backlog_size); + return socket_impl->winsock_vtable->listen(socket, backlog_size); } static int s_socket_start_accept( @@ -626,17 +611,17 @@ static int s_socket_start_accept( aws_socket_on_accept_result_fn *on_accept_result, void *user_data) { struct iocp_socket *socket_impl = socket->impl; - return socket_impl->iocp_vtable->start_accept(socket, accept_loop, on_accept_result, user_data); + return socket_impl->winsock_vtable->start_accept(socket, accept_loop, on_accept_result, user_data); } static int s_socket_stop_accept(struct aws_socket *socket) { struct iocp_socket *socket_impl = socket->impl; - return socket_impl->iocp_vtable->stop_accept(socket); + return socket_impl->winsock_vtable->stop_accept(socket); } static int s_socket_close(struct aws_socket *socket) { struct iocp_socket *socket_impl = socket->impl; - return socket_impl->iocp_vtable->close(socket); + return socket_impl->winsock_vtable->close(socket); } static int s_socket_shutdown_dir(struct aws_socket *socket, enum aws_channel_direction dir) { @@ -678,7 +663,7 @@ static int s_socket_read(struct aws_socket *socket, struct aws_byte_buf *buffer, return aws_raise_error(AWS_IO_SOCKET_NOT_CONNECTED); } - return socket_impl->iocp_vtable->read(socket, buffer, amount_read); + return socket_impl->winsock_vtable->read(socket, buffer, amount_read); } static int s_socket_subscribe_to_readable_events( @@ -698,7 +683,7 @@ static int s_socket_subscribe_to_readable_events( return aws_raise_error(AWS_IO_SOCKET_NOT_CONNECTED); } - return socket_impl->iocp_vtable->subscribe_to_read(socket, on_readable, user_data); + return socket_impl->winsock_vtable->subscribe_to_read(socket, on_readable, user_data); } static int s_determine_socket_error(int error) { @@ -808,7 +793,7 @@ static int s_ipv4_stream_connection_success(struct aws_socket *socket) { return AWS_OP_SUCCESS; error: socket->state = ERRORED; - socket_impl->iocp_vtable->connection_error(socket, aws_last_error()); + socket_impl->winsock_vtable->connection_error(socket, aws_last_error()); return AWS_OP_ERR; } @@ -871,7 +856,7 @@ static int s_ipv6_stream_connection_success(struct aws_socket *socket) { error: socket->state = ERRORED; - socket_impl->iocp_vtable->connection_error(socket, aws_last_error()); + socket_impl->winsock_vtable->connection_error(socket, aws_last_error()); return AWS_OP_ERR; } @@ -944,7 +929,7 @@ void s_socket_connection_completion( socket_args->socket = NULL; if (!status_code) { - socket_impl->iocp_vtable->connection_success(socket); + socket_impl->winsock_vtable->connection_success(socket); } else { AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, @@ -953,7 +938,7 @@ void s_socket_connection_completion( (void *)socket->io_handle.data.handle, status_code); int error = s_determine_socket_error(status_code); - socket_impl->iocp_vtable->connection_error(socket, error); + socket_impl->winsock_vtable->connection_error(socket, error); } } @@ -1248,7 +1233,7 @@ static void s_connection_success_task(struct aws_task *task, void *arg, enum aws struct aws_socket *socket = io_data->socket; struct iocp_socket *socket_impl = socket->impl; - socket_impl->iocp_vtable->connection_success(socket); + socket_impl->winsock_vtable->connection_success(socket); } /* initiate the client end of a named pipe. */ @@ -1736,7 +1721,7 @@ static void s_incoming_pipe_connection_event( socket->state = ERRORED; } - socket_impl->iocp_vtable->connection_error(socket, aws_last_error()); + socket_impl->winsock_vtable->connection_error(socket, aws_last_error()); operation_data->in_use = false; return; } @@ -1754,7 +1739,7 @@ static void s_incoming_pipe_connection_event( if (!new_socket) { socket->state = ERRORED; operation_data->in_use = false; - socket_impl->iocp_vtable->connection_error(socket, AWS_ERROR_OOM); + socket_impl->winsock_vtable->connection_error(socket, AWS_ERROR_OOM); return; } @@ -1762,7 +1747,7 @@ static void s_incoming_pipe_connection_event( aws_mem_release(socket->allocator, new_socket); socket->state = ERRORED; operation_data->in_use = false; - socket_impl->iocp_vtable->connection_error(socket, aws_last_error()); + socket_impl->winsock_vtable->connection_error(socket, aws_last_error()); return; } @@ -1794,7 +1779,7 @@ static void s_incoming_pipe_connection_event( (int)GetLastError()); socket->state = ERRORED; operation_data->in_use = false; - socket_impl->iocp_vtable->connection_error(socket, aws_last_error()); + socket_impl->winsock_vtable->connection_error(socket, aws_last_error()); return; } @@ -1804,7 +1789,7 @@ static void s_incoming_pipe_connection_event( socket->state = ERRORED; operation_data->in_use = false; aws_socket_clean_up(new_socket); - socket_impl->iocp_vtable->connection_error(socket, aws_last_error()); + socket_impl->winsock_vtable->connection_error(socket, aws_last_error()); return; } @@ -1835,7 +1820,7 @@ static void s_incoming_pipe_connection_event( socket->state = ERRORED; socket_impl->read_io_data->in_use = false; int aws_err = s_determine_socket_error(error_code); - socket_impl->iocp_vtable->connection_error(socket, aws_err); + socket_impl->winsock_vtable->connection_error(socket, aws_err); return; } else if (error_code == ERROR_PIPE_CONNECTED) { continue_accept_loop = true; @@ -2026,7 +2011,7 @@ static void s_tcp_accept_event( if (err) { if (aws_last_error() != AWS_IO_READ_WOULD_BLOCK) { socket->state = ERRORED; - socket_impl->iocp_vtable->connection_error(socket, aws_last_error()); + socket_impl->winsock_vtable->connection_error(socket, aws_last_error()); } return; } @@ -2041,7 +2026,7 @@ static void s_tcp_accept_event( socket->state = ERRORED; int aws_error = s_determine_socket_error(status_code); aws_raise_error(aws_error); - socket_impl->iocp_vtable->connection_error(socket, aws_error); + socket_impl->winsock_vtable->connection_error(socket, aws_error); operation_data->in_use = false; } } @@ -2610,7 +2595,7 @@ int aws_socket_half_close(struct aws_socket *socket, enum aws_channel_direction int error = WSAGetLastError(); int aws_error = s_determine_socket_error(error); aws_raise_error(aws_error); - socket_impl->iocp_vtable->connection_error(socket, aws_error); + socket_impl->winsock_vtable->connection_error(socket, aws_error); return AWS_OP_ERR; } @@ -2627,7 +2612,7 @@ static int s_socket_assign_to_event_loop(struct aws_socket *socket, struct aws_e } socket->event_loop = event_loop; - return aws_event_loop_connect_handle_to_completion_port(event_loop, &socket->io_handle); + return aws_event_loop_connect_handle_to_io_completion_port(event_loop, &socket->io_handle); } struct read_cb_args { @@ -3350,17 +3335,6 @@ static struct aws_string *s_socket_get_server_name_fn(const struct aws_socket *s return NULL; } -#ifdef AWS_USE_IO_COMPLETION_PORTS -void aws_socket_endpoint_init_local_address_for_test(struct aws_socket_endpoint *endpoint) { - struct aws_uuid uuid; - AWS_FATAL_ASSERT(aws_uuid_init(&uuid) == AWS_OP_SUCCESS); - char uuid_str[AWS_UUID_STR_LEN] = {0}; - struct aws_byte_buf uuid_buf = aws_byte_buf_from_empty_array(uuid_str, sizeof(uuid_str)); - AWS_FATAL_ASSERT(aws_uuid_to_str(&uuid, &uuid_buf) == AWS_OP_SUCCESS); - snprintf(endpoint->address, sizeof(endpoint->address), "\\\\.\\pipe\\testsock" PRInSTR, AWS_BYTE_BUF_PRI(uuid_buf)); -} -#endif - bool aws_is_network_interface_name_valid(const char *interface_name) { (void)interface_name; AWS_LOGF_ERROR(AWS_LS_IO_SOCKET, "network_interface_names are not supported on Windows"); diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index c84722c5e..868812189 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -17,8 +17,8 @@ endmacro() add_test_case(io_library_init) add_test_case(io_library_init_cleanup_init_cleanup) -# Apple Dispatch Queue does not support pipe -if(NOT AWS_USE_DISPATCH_QUEUE) +# Dispatch Queue does not support pipe +if(NOT AWS_USE_APPLE_NETWORK_FRAMEWORK) add_pipe_test_case(pipe_open_close) add_pipe_test_case(pipe_read_write) add_pipe_test_case(pipe_read_write_large_buffer) @@ -39,7 +39,7 @@ add_test_case(event_loop_canceled_tasks_run_in_el_thread) if(USE_IO_COMPLETION_PORTS) add_test_case(event_loop_completion_events) -elseif(NOT AWS_USE_DISPATCH_QUEUE) # TODO: setup a test for dispatch queue once pipe is there. +elseif(NOT AWS_USE_APPLE_NETWORK_FRAMEWORK) # Dispatch Queue does not support pipe add_test_case(event_loop_subscribe_unsubscribe) add_test_case(event_loop_writable_event_on_subscribe) add_test_case(event_loop_no_readable_event_before_write) @@ -83,7 +83,7 @@ add_test_case(cleanup_in_write_cb_doesnt_explode) add_test_case(sock_write_cb_is_async) add_test_case(socket_validate_port) -if(NOT AWS_USE_DISPATCH_QUEUE) +if(NOT AWS_USE_APPLE_NETWORK_FRAMEWORK) # Apple Network Framework does not support bind+connect add_test_case(udp_bind_connect_communication) # The read/write will always run a different thread for Apple Network Framework diff --git a/tests/alpn_handler_test.c b/tests/alpn_handler_test.c index 5d83bad4e..fa6d88e27 100644 --- a/tests/alpn_handler_test.c +++ b/tests/alpn_handler_test.c @@ -5,6 +5,7 @@ #include #include +#include #include #include diff --git a/tests/byo_crypto_test.c b/tests/byo_crypto_test.c index 878889646..1414f8652 100644 --- a/tests/byo_crypto_test.c +++ b/tests/byo_crypto_test.c @@ -54,7 +54,11 @@ static struct byo_crypto_common_tester c_tester; static int s_byo_crypto_common_tester_init(struct aws_allocator *allocator, struct byo_crypto_common_tester *tester) { AWS_ZERO_STRUCT(*tester); aws_io_library_init(allocator); - tester->el_group = aws_event_loop_group_new_default(allocator, 0, NULL); + + struct aws_event_loop_group_options elg_options = { + .loop_count = 0, + }; + tester->el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_mutex mutex = AWS_MUTEX_INIT; struct aws_condition_variable condition_variable = AWS_CONDITION_VARIABLE_INIT; tester->mutex = mutex; diff --git a/tests/channel_test.c b/tests/channel_test.c index e86d1c62b..95b1e5520 100644 --- a/tests/channel_test.c +++ b/tests/channel_test.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -684,7 +685,10 @@ static int s_test_channel_connect_some_hosts_timeout(struct aws_allocator *alloc .shutdown = false, }; - struct aws_event_loop_group *event_loop_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *event_loop_group = aws_event_loop_group_new(allocator, &elg_options); /* resolve our s3 test bucket and an EC2 host with an ACL that blackholes the connection */ const struct aws_string *addr1_ipv4 = NULL; diff --git a/tests/default_host_resolver_test.c b/tests/default_host_resolver_test.c index 2d0178a73..f47b346bf 100644 --- a/tests/default_host_resolver_test.c +++ b/tests/default_host_resolver_test.c @@ -96,7 +96,10 @@ static int s_test_default_with_ipv6_lookup_fn(struct aws_allocator *allocator, v aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, @@ -189,7 +192,10 @@ static int s_test_default_host_resolver_ipv6_address_variations_fn(struct aws_al }; - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, @@ -263,7 +269,10 @@ static int s_test_default_with_ipv4_only_lookup_fn(struct aws_allocator *allocat aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, @@ -333,7 +342,10 @@ static int s_test_default_with_multiple_lookups_fn(struct aws_allocator *allocat aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, @@ -460,7 +472,10 @@ static int s_test_resolver_ttls_fn(struct aws_allocator *allocator, void *ctx) { s_set_time(0); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, .max_entries = 10, .system_clock_override_fn = s_clock_fn}; @@ -672,7 +687,10 @@ static int s_test_resolver_connect_failure_recording_fn(struct aws_allocator *al aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, @@ -864,7 +882,10 @@ static int s_test_resolver_ttl_refreshes_on_resolve_fn(struct aws_allocator *all aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, @@ -1044,7 +1065,10 @@ static int s_test_resolver_ipv4_address_lookup_fn(struct aws_allocator *allocato aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, @@ -1105,7 +1129,10 @@ static int s_test_resolver_purge_host_cache(struct aws_allocator *allocator, voi (void)ctx; aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, @@ -1220,7 +1247,10 @@ static int s_test_resolver_purge_cache(struct aws_allocator *allocator, void *ct (void)ctx; aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, @@ -1369,7 +1399,10 @@ static int s_test_resolver_ipv6_address_lookup_fn(struct aws_allocator *allocato aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, @@ -1431,7 +1464,10 @@ static int s_test_resolver_low_frequency_starvation_fn(struct aws_allocator *all aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_host_resolver_default_options resolver_options = { .el_group = el_group, diff --git a/tests/event_loop_test.c b/tests/event_loop_test.c index 8818eba0b..6fa75ef02 100644 --- a/tests/event_loop_test.c +++ b/tests/event_loop_test.c @@ -8,9 +8,9 @@ #include #include #include -#include - #include +#include +#include #include struct task_args { @@ -78,11 +78,11 @@ static int s_test_event_loop_xthread_scheduled_tasks_execute(struct aws_allocato ASSERT_TRUE(task_args.invoked); aws_mutex_unlock(&task_args.mutex); -// The dispatch queue will schedule tasks on thread pools, it is unpredicatable which thread we run the task on, -// therefore we do not validate the thread id for dispatch queue. -#ifndef AWS_USE_DISPATCH_QUEUE - ASSERT_FALSE(aws_thread_thread_id_equal(task_args.thread_id, aws_thread_current_thread_id())); -#endif + // The dispatch queue will schedule tasks on thread pools, it is unpredicatable which thread we run the task on, + // therefore we do not validate the thread id for dispatch queue. + if (aws_event_loop_get_default_type() != AWS_EVENT_LOOP_DISPATCH_QUEUE) { + ASSERT_FALSE(aws_thread_thread_id_equal(task_args.thread_id, aws_thread_current_thread_id())); + } /* Test "now" tasks */ task_args.invoked = false; @@ -154,11 +154,11 @@ static int s_test_event_loop_canceled_tasks_run_in_el_thread(struct aws_allocato &task1_args.condition_variable, &task1_args.mutex, s_task_ran_predicate, &task1_args)); ASSERT_TRUE(task1_args.invoked); ASSERT_TRUE(task1_args.was_in_thread); -// The dispatch queue will schedule tasks on thread pools, it is unpredicatable which thread we run the task on, -// therefore we do not validate the thread id for dispatch queue. -#ifndef AWS_USE_DISPATCH_QUEUE - ASSERT_FALSE(aws_thread_thread_id_equal(task1_args.thread_id, aws_thread_current_thread_id())); -#endif + // The dispatch queue will schedule tasks on thread pools, it is unpredicatable which thread we run the task on, + // therefore we do not validate the thread id for dispatch queue. + if (aws_event_loop_get_default_type() != AWS_EVENT_LOOP_DISPATCH_QUEUE) { + ASSERT_FALSE(aws_thread_thread_id_equal(task1_args.thread_id, aws_thread_current_thread_id())); + } ASSERT_INT_EQUALS(AWS_TASK_STATUS_RUN_READY, task1_args.status); aws_mutex_unlock(&task1_args.mutex); @@ -172,11 +172,11 @@ static int s_test_event_loop_canceled_tasks_run_in_el_thread(struct aws_allocato aws_mutex_unlock(&task2_args.mutex); ASSERT_TRUE(task2_args.was_in_thread); -// The dispatch queue will schedule tasks on thread pools, it is unpredictable which thread we run the task on, -// therefore we do not validate the thread id for dispatch queue. -#ifndef AWS_USE_DISPATCH_QUEUE - ASSERT_TRUE(aws_thread_thread_id_equal(task2_args.thread_id, aws_thread_current_thread_id())); -#endif + // The dispatch queue will schedule tasks on thread pools, it is unpredicatable which thread we run the task on, + // therefore we do not validate the thread id for dispatch queue. + if (aws_event_loop_get_default_type() != AWS_EVENT_LOOP_DISPATCH_QUEUE) { + ASSERT_TRUE(aws_thread_thread_id_equal(task2_args.thread_id, aws_thread_current_thread_id())); + } ASSERT_INT_EQUALS(AWS_TASK_STATUS_CANCELED, task2_args.status); return AWS_OP_SUCCESS; @@ -184,7 +184,7 @@ static int s_test_event_loop_canceled_tasks_run_in_el_thread(struct aws_allocato AWS_TEST_CASE(event_loop_canceled_tasks_run_in_el_thread, s_test_event_loop_canceled_tasks_run_in_el_thread) -#if AWS_USE_IO_COMPLETION_PORTS +#if AWS_ENABLE_IO_COMPLETION_PORTS int aws_pipe_get_unique_name(char *dst, size_t dst_size); @@ -282,7 +282,7 @@ static int s_test_event_loop_completion_events(struct aws_allocator *allocator, ASSERT_SUCCESS(s_async_pipe_init(&read_handle, &write_handle)); /* Connect to event-loop */ - ASSERT_SUCCESS(aws_event_loop_connect_handle_to_completion_port(event_loop, &write_handle)); + ASSERT_SUCCESS(aws_event_loop_connect_handle_to_io_completion_port(event_loop, &write_handle)); /* Set up an async (overlapped) write that will result in s_on_overlapped_operation_complete() getting run * and filling out `completion_data` */ @@ -323,7 +323,7 @@ static int s_test_event_loop_completion_events(struct aws_allocator *allocator, AWS_TEST_CASE(event_loop_completion_events, s_test_event_loop_completion_events) -#else /* !AWS_USE_IO_COMPLETION_PORTS */ +#else /* !AWS_ENABLE_IO_COMPLETION_PORTS */ # include @@ -983,7 +983,7 @@ static int s_test_event_loop_readable_event_on_2nd_time_readable(struct aws_allo } AWS_TEST_CASE(event_loop_readable_event_on_2nd_time_readable, s_test_event_loop_readable_event_on_2nd_time_readable); -#endif /* AWS_USE_IO_COMPLETION_PORTS */ +#endif /* AWS_ENABLE_IO_COMPLETION_PORTS */ static int s_event_loop_test_stop_then_restart(struct aws_allocator *allocator, void *ctx) { (void)ctx; @@ -1053,7 +1053,10 @@ static int test_event_loop_group_setup_and_shutdown(struct aws_allocator *alloca (void)ctx; aws_io_library_init(allocator); - struct aws_event_loop_group *event_loop_group = aws_event_loop_group_new_default(allocator, 0, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 0, + }; + struct aws_event_loop_group *event_loop_group = aws_event_loop_group_new(allocator, &elg_options); size_t cpu_count = aws_system_info_processor_count(); size_t el_count = aws_event_loop_group_get_loop_count(event_loop_group); @@ -1086,10 +1089,16 @@ static int test_numa_aware_event_loop_group_setup_and_shutdown(struct aws_alloca size_t cpus_for_group = aws_get_cpu_count_for_group(0); size_t el_count = 1; - /* pass UINT16_MAX here to check the boundary conditions on numa cpu detection. It should never create more threads - * than hw cpus available */ - struct aws_event_loop_group *event_loop_group = - aws_event_loop_group_new_default_pinned_to_cpu_group(allocator, UINT16_MAX, 0, NULL); + uint16_t cpu_group = 0; + struct aws_event_loop_group_options elg_options = { + /* + * pass UINT16_MAX here to check the boundary conditions on numa cpu detection. It should never create more + * threads than hw cpus available + */ + .loop_count = UINT16_MAX, + .cpu_group = &cpu_group, + }; + struct aws_event_loop_group *event_loop_group = aws_event_loop_group_new(allocator, &elg_options); el_count = aws_event_loop_group_get_loop_count(event_loop_group); @@ -1166,8 +1175,12 @@ static int test_event_loop_group_setup_and_shutdown_async(struct aws_allocator * async_shutdown_options.shutdown_callback_user_data = &task_args; async_shutdown_options.shutdown_callback_fn = s_async_shutdown_complete_callback; - struct aws_event_loop_group *event_loop_group = - aws_event_loop_group_new_default(allocator, 0, &async_shutdown_options); + struct aws_event_loop_group_options elg_options = { + .loop_count = 0, + .shutdown_options = &async_shutdown_options, + }; + + struct aws_event_loop_group *event_loop_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_event_loop *event_loop = aws_event_loop_group_get_next_loop(event_loop_group); diff --git a/tests/exponential_backoff_retry_test.c b/tests/exponential_backoff_retry_test.c index a3bf7bde0..779a4f50f 100644 --- a/tests/exponential_backoff_retry_test.c +++ b/tests/exponential_backoff_retry_test.c @@ -66,7 +66,10 @@ static int s_test_exponential_backoff_retry_too_many_retries_for_jitter_mode( aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_exponential_backoff_retry_options config = { .max_retries = 3, .jitter_mode = jitter_mode, @@ -157,7 +160,10 @@ static int s_test_exponential_backoff_retry_client_errors_do_not_count_fn(struct aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_exponential_backoff_retry_options config = { .el_group = el_group, .max_retries = 3, @@ -201,7 +207,10 @@ static int s_test_exponential_backoff_retry_no_jitter_time_taken_fn(struct aws_a aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_exponential_backoff_retry_options config = { .max_retries = 3, .jitter_mode = AWS_EXPONENTIAL_BACKOFF_JITTER_NONE, @@ -253,7 +262,10 @@ static int s_test_exponential_max_backoff_retry_no_jitter_fn(struct aws_allocato aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_exponential_backoff_retry_options config = { .max_retries = 3, .jitter_mode = AWS_EXPONENTIAL_BACKOFF_JITTER_NONE, @@ -310,7 +322,10 @@ static int s_test_exponential_backoff_retry_invalid_options_fn(struct aws_alloca aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_exponential_backoff_retry_options config = { .max_retries = 64, .el_group = el_group, diff --git a/tests/future_test.c b/tests/future_test.c index 1ac94b551..795d30bb5 100644 --- a/tests/future_test.c +++ b/tests/future_test.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include "future_test.h" diff --git a/tests/pipe_test.c b/tests/pipe_test.c index 053c5aefd..f15f4da33 100644 --- a/tests/pipe_test.c +++ b/tests/pipe_test.c @@ -8,6 +8,7 @@ #include #include #include +#include #include enum pipe_loop_setup { diff --git a/tests/pkcs11_test.c b/tests/pkcs11_test.c index 792ed5fa4..4af9d0fb0 100644 --- a/tests/pkcs11_test.c +++ b/tests/pkcs11_test.c @@ -1653,8 +1653,10 @@ static int s_test_pkcs11_tls_negotiation_succeeds_common( ASSERT_SUCCESS(aws_mutex_init(&s_tls_tester.synced.mutex)); ASSERT_SUCCESS(aws_condition_variable_init(&s_tls_tester.synced.cvar)); - struct aws_event_loop_group *event_loop_group = - aws_event_loop_group_new_default(allocator, 1, NULL /*shutdown_opts*/); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *event_loop_group = aws_event_loop_group_new(allocator, &elg_options); ASSERT_NOT_NULL(event_loop_group); struct aws_host_resolver_default_options resolver_opts = { diff --git a/tests/socket_handler_test.c b/tests/socket_handler_test.c index e5f38411e..9950ac102 100644 --- a/tests/socket_handler_test.c +++ b/tests/socket_handler_test.c @@ -4,6 +4,7 @@ */ #include #include +#include #include #include #include @@ -59,7 +60,10 @@ static int s_socket_common_tester_init(struct aws_allocator *allocator, struct s AWS_ZERO_STRUCT(*tester); aws_io_library_init(allocator); - tester->el_group = aws_event_loop_group_new_default(allocator, 0, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 0, + }; + tester->el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_host_resolver_default_options resolver_options = { .el_group = tester->el_group, @@ -1000,7 +1004,7 @@ static struct aws_event_loop *s_default_new_event_loop( void *user_data) { (void)user_data; - return aws_event_loop_new_default_with_options(allocator, options); + return aws_event_loop_new(allocator, options); } static int s_statistic_test_clock_fn(uint64_t *timestamp) { @@ -1016,8 +1020,13 @@ static int s_socket_common_tester_statistics_init( aws_io_library_init(allocator); AWS_ZERO_STRUCT(*tester); - tester->el_group = - aws_event_loop_group_new(allocator, s_statistic_test_clock_fn, 1, s_default_new_event_loop, NULL, NULL); + + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + .clock_override = s_statistic_test_clock_fn, + }; + tester->el_group = aws_event_loop_group_new_internal(allocator, &elg_options, s_default_new_event_loop, NULL); + struct aws_mutex mutex = AWS_MUTEX_INIT; struct aws_condition_variable condition_variable = AWS_CONDITION_VARIABLE_INIT; tester->mutex = mutex; diff --git a/tests/socket_test.c b/tests/socket_test.c index 2d88c67f4..bdcb16685 100644 --- a/tests/socket_test.c +++ b/tests/socket_test.c @@ -12,6 +12,7 @@ #include #include +#include #include #ifdef _MSC_VER @@ -22,13 +23,6 @@ # include #endif -// DEBUG WIP -#ifdef AWS_USE_DISPATCH_QUEUE -static bool s_use_dispatch_queue = true; -#else -static bool s_use_dispatch_queue = false; -#endif - struct local_listener_args { struct aws_socket *incoming; struct aws_mutex *mutex; @@ -253,7 +247,7 @@ static int s_test_socket_ex( // The Apple Network Framework always require a "start listener/start connection" // for setup a server socket - if (options->type == AWS_SOCKET_STREAM || s_use_dispatch_queue) { + if (options->type == AWS_SOCKET_STREAM || aws_event_loop_get_default_type() == AWS_EVENT_LOOP_DISPATCH_QUEUE) { ASSERT_SUCCESS(aws_socket_listen(&listener, 1024)); ASSERT_SUCCESS(aws_socket_start_accept(&listener, event_loop, s_local_listener_incoming, &listener_args)); } @@ -269,7 +263,8 @@ static int s_test_socket_ex( ASSERT_SUCCESS( aws_socket_connect(&outgoing, endpoint, event_loop, s_local_outgoing_connection, NULL, &outgoing_args)); - if (listener.options.type == AWS_SOCKET_STREAM || s_use_dispatch_queue) { + if (listener.options.type == AWS_SOCKET_STREAM || + aws_event_loop_get_default_type() == AWS_EVENT_LOOP_DISPATCH_QUEUE) { ASSERT_SUCCESS(aws_mutex_lock(&mutex)); ASSERT_SUCCESS( aws_condition_variable_wait_pred(&condition_variable, &mutex, s_incoming_predicate, &listener_args)); @@ -282,7 +277,7 @@ static int s_test_socket_ex( struct aws_socket *server_sock = &listener; - if (options->type == AWS_SOCKET_STREAM || s_use_dispatch_queue) { + if (options->type == AWS_SOCKET_STREAM || aws_event_loop_get_default_type() == AWS_EVENT_LOOP_DISPATCH_QUEUE) { ASSERT_TRUE(listener_args.incoming_invoked); ASSERT_FALSE(listener_args.error_invoked); server_sock = listener_args.incoming; @@ -494,7 +489,8 @@ static int s_test_socket_udp_dispatch_queue( ASSERT_SUCCESS(aws_mutex_unlock(&mutex)); ASSERT_INT_EQUALS(AWS_OP_SUCCESS, io_args.error_code); - if (listener.options.type == AWS_SOCKET_STREAM || s_use_dispatch_queue) { + if (listener.options.type == AWS_SOCKET_STREAM || + aws_event_loop_get_default_type() == AWS_EVENT_LOOP_DISPATCH_QUEUE) { ASSERT_SUCCESS(aws_mutex_lock(&mutex)); ASSERT_SUCCESS( aws_condition_variable_wait_pred(&condition_variable, &mutex, s_incoming_predicate, &listener_args)); @@ -594,7 +590,7 @@ static int s_test_socket( struct aws_socket_options *options, struct aws_socket_endpoint *endpoint) { - if (s_use_dispatch_queue && options->type == AWS_SOCKET_DGRAM) + if (aws_event_loop_get_default_type() == AWS_EVENT_LOOP_DISPATCH_QUEUE && options->type == AWS_SOCKET_DGRAM) return s_test_socket_udp_dispatch_queue(allocator, options, endpoint); else return s_test_socket_ex(allocator, options, NULL, endpoint); @@ -695,7 +691,7 @@ static int s_test_socket_with_bind_to_invalid_interface(struct aws_allocator *al options.domain = AWS_SOCKET_IPV4; strncpy(options.network_interface_name, "invalid", AWS_NETWORK_INTERFACE_NAME_MAX); struct aws_socket outgoing; -#if (defined(AWS_OS_APPLE) && defined(AWS_USE_KQUEUE)) || defined(AWS_OS_LINUX) +#if (defined(AWS_OS_APPLE) && !defined(AWS_USE_APPLE_NETWORK_FRAMEWORK)) || defined(AWS_OS_LINUX) ASSERT_ERROR(AWS_IO_SOCKET_INVALID_OPTIONS, aws_socket_init(&outgoing, allocator, &options)); #else ASSERT_ERROR(AWS_ERROR_PLATFORM_NOT_SUPPORTED, aws_socket_init(&outgoing, allocator, &options)); @@ -832,7 +828,10 @@ static int s_test_connect_timeout(struct aws_allocator *allocator, void *ctx) { aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_event_loop *event_loop = aws_event_loop_group_get_next_loop(el_group); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); @@ -913,7 +912,10 @@ static int s_test_connect_timeout_cancelation(struct aws_allocator *allocator, v aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_event_loop *event_loop = aws_event_loop_group_get_next_loop(el_group); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); @@ -1054,7 +1056,7 @@ static int s_test_outgoing_local_sock_errors(struct aws_allocator *allocator, vo int socket_connect_result = aws_socket_connect(&outgoing, &endpoint, event_loop, s_null_sock_connection, NULL, &args); // As Apple network framework has a async API design, we would not get the error back on connect - if (!s_use_dispatch_queue) { + if (aws_event_loop_get_default_type() != AWS_EVENT_LOOP_DISPATCH_QUEUE) { ASSERT_FAILS(socket_connect_result); ASSERT_TRUE( aws_last_error() == AWS_IO_SOCKET_CONNECTION_REFUSED || aws_last_error() == AWS_ERROR_FILE_INVALID_PATH); @@ -1268,7 +1270,7 @@ static int s_test_bind_on_zero_port( ASSERT_SUCCESS(aws_socket_get_bound_address(&incoming, &local_address1)); - if (s_use_dispatch_queue) { + if (aws_event_loop_get_default_type() == AWS_EVENT_LOOP_DISPATCH_QUEUE) { struct aws_mutex mutex = AWS_MUTEX_INIT; struct aws_condition_variable condition_variable = AWS_CONDITION_VARIABLE_INIT; @@ -1429,7 +1431,10 @@ static int s_cleanup_before_connect_or_timeout_doesnt_explode(struct aws_allocat aws_io_library_init(allocator); - struct aws_event_loop_group *el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + }; + struct aws_event_loop_group *el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_event_loop *event_loop = aws_event_loop_group_get_next_loop(el_group); ASSERT_NOT_NULL(event_loop, "Event loop creation failed with error: %s", aws_error_debug_str(aws_last_error())); diff --git a/tests/standard_retry_test.c b/tests/standard_retry_test.c index bb62de691..11991a3e0 100644 --- a/tests/standard_retry_test.c +++ b/tests/standard_retry_test.c @@ -8,6 +8,7 @@ #include #include +#include #include @@ -49,7 +50,12 @@ static int s_fixture_setup(struct aws_allocator *allocator, void *ctx) { .shutdown_callback_user_data = ctx, }; - test_data->el_group = aws_event_loop_group_new_default(allocator, 1, &shutdown_options); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + .shutdown_options = &shutdown_options, + }; + test_data->el_group = aws_event_loop_group_new(allocator, &elg_options); + ASSERT_NOT_NULL(test_data->el_group); struct aws_standard_retry_options retry_options = { .initial_bucket_capacity = 15, diff --git a/tests/tls_handler_test.c b/tests/tls_handler_test.c index be9f51a08..5ad701f44 100644 --- a/tests/tls_handler_test.c +++ b/tests/tls_handler_test.c @@ -10,6 +10,7 @@ # include # include # include +# include # include # include @@ -177,7 +178,10 @@ static int s_tls_common_tester_init(struct aws_allocator *allocator, struct tls_ aws_atomic_store_int(&tester->current_time_ns, 0); aws_atomic_store_ptr(&tester->stats_handler, NULL); - tester->el_group = aws_event_loop_group_new_default(allocator, 0, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 0, + }; + tester->el_group = aws_event_loop_group_new(allocator, &elg_options); struct aws_host_resolver_default_options resolver_options = { .el_group = tester->el_group, @@ -535,7 +539,11 @@ static int s_tls_channel_server_client_tester_init(struct aws_allocator *allocat AWS_ZERO_STRUCT(s_server_client_tester); ASSERT_SUCCESS(aws_mutex_init(&s_server_client_tester.server_mutex)); ASSERT_SUCCESS(aws_condition_variable_init(&s_server_client_tester.server_condition_variable)); - s_server_client_tester.client_el_group = aws_event_loop_group_new_default(allocator, 0, NULL); + + struct aws_event_loop_group_options elg_options = { + .loop_count = 0, + }; + s_server_client_tester.client_el_group = aws_event_loop_group_new(allocator, &elg_options); ASSERT_SUCCESS(s_tls_rw_args_init( &s_server_client_tester.server_rw_args, @@ -896,7 +904,7 @@ static int s_tls_channel_shutdown_with_cache_test_helper(struct aws_allocator *a ASSERT_SUCCESS(s_tls_channel_server_client_tester_cleanup()); // wait for socket ref count drop and released - aws_thread_current_sleep(1000000000); + aws_thread_current_sleep(3000000000); return AWS_OP_SUCCESS; } @@ -1889,7 +1897,7 @@ static struct aws_event_loop *s_default_new_event_loop( void *user_data) { (void)user_data; - return aws_event_loop_new_default_with_options(allocator, options); + return aws_event_loop_new(allocator, options); } static int s_statistic_test_clock_fn(uint64_t *timestamp) { @@ -1911,8 +1919,11 @@ static int s_tls_common_tester_statistics_init(struct aws_allocator *allocator, aws_atomic_store_int(&tester->current_time_ns, 0); aws_atomic_store_ptr(&tester->stats_handler, NULL); - tester->el_group = - aws_event_loop_group_new(allocator, s_statistic_test_clock_fn, 1, s_default_new_event_loop, NULL, NULL); + struct aws_event_loop_group_options elg_options = { + .loop_count = 1, + .clock_override = s_statistic_test_clock_fn, + }; + tester->el_group = aws_event_loop_group_new_internal(allocator, &elg_options, s_default_new_event_loop, NULL); struct aws_host_resolver_default_options resolver_options = { .el_group = tester->el_group,