diff --git a/libcudacxx/include/cuda/annotated_ptr b/libcudacxx/include/cuda/annotated_ptr index f5e04e56623..bd9f26ad591 100644 --- a/libcudacxx/include/cuda/annotated_ptr +++ b/libcudacxx/include/cuda/annotated_ptr @@ -3,128 +3,50 @@ * * NVIDIA SOFTWARE LICENSE * - * This license is a legal agreement between you and NVIDIA Corporation ("NVIDIA") and governs your use of the - * NVIDIA/CUDA C++ Library software and materials provided hereunder (“SOFTWARE”). + * This license is a legal agreement between you and NVIDIA Corporation ("NVIDIA") and governs your use of the NVIDIA/CUDA C++ Library software and materials provided hereunder (“SOFTWARE”). * - * This license can be accepted only by an adult of legal age of majority in the country in which the SOFTWARE is used. - * If you are under the legal age of majority, you must ask your parent or legal guardian to consent to this license. By - * taking delivery of the SOFTWARE, you affirm that you have reached the legal age of majority, you accept the terms of - * this license, and you take legal and financial responsibility for the actions of your permitted users. + * This license can be accepted only by an adult of legal age of majority in the country in which the SOFTWARE is used. If you are under the legal age of majority, you must ask your parent or legal guardian to consent to this license. By taking delivery of the SOFTWARE, you affirm that you have reached the legal age of majority, you accept the terms of this license, and you take legal and financial responsibility for the actions of your permitted users. * - * You agree to use the SOFTWARE only for purposes that are permitted by (a) this license, and (b) any applicable law, - * regulation or generally accepted practices or guidelines in the relevant jurisdictions. + * You agree to use the SOFTWARE only for purposes that are permitted by (a) this license, and (b) any applicable law, regulation or generally accepted practices or guidelines in the relevant jurisdictions. * - * 1. LICENSE. Subject to the terms of this license, NVIDIA grants you a non-exclusive limited license to: (a) install - * and use the SOFTWARE, and (b) distribute the SOFTWARE subject to the distribution requirements described in this - * license. NVIDIA reserves all rights, title and interest in and to the SOFTWARE not expressly granted to you under - * this license. + * 1. LICENSE. Subject to the terms of this license, NVIDIA grants you a non-exclusive limited license to: (a) install and use the SOFTWARE, and (b) distribute the SOFTWARE subject to the distribution requirements described in this license. NVIDIA reserves all rights, title and interest in and to the SOFTWARE not expressly granted to you under this license. * * 2. DISTRIBUTION REQUIREMENTS. These are the distribution requirements for you to exercise the distribution grant: - * a. The terms under which you distribute the SOFTWARE must be consistent with the terms of this license, - * including (without limitation) terms relating to the license grant and license restrictions and protection of - * NVIDIA’s intellectual property rights. b. You agree to notify NVIDIA in writing of any known or suspected - * distribution or use of the SOFTWARE not in compliance with the requirements of this license, and to enforce the terms - * of your agreements with respect to distributed SOFTWARE. + * a. The terms under which you distribute the SOFTWARE must be consistent with the terms of this license, including (without limitation) terms relating to the license grant and license restrictions and protection of NVIDIA’s intellectual property rights. + * b. You agree to notify NVIDIA in writing of any known or suspected distribution or use of the SOFTWARE not in compliance with the requirements of this license, and to enforce the terms of your agreements with respect to distributed SOFTWARE. * * 3. LIMITATIONS. Your license to use the SOFTWARE is restricted as follows: * a. The SOFTWARE is licensed for you to develop applications only for use in systems with NVIDIA GPUs. - * b. You may not reverse engineer, decompile or disassemble, or remove copyright or other proprietary notices from - * any portion of the SOFTWARE or copies of the SOFTWARE. c. You may not modify or create derivative works of any - * portion of the SOFTWARE. d. You may not bypass, disable, or circumvent any technical measure, encryption, - * security, digital rights management or authentication mechanism in the SOFTWARE. e. You may not use the SOFTWARE - * in any manner that would cause it to become subject to an open source software license. As examples, licenses that - * require as a condition of use, modification, and/or distribution that the SOFTWARE be (i) disclosed or distributed in - * source code form; (ii) licensed for the purpose of making derivative works; or (iii) redistributable at no charge. f. - * Unless you have an agreement with NVIDIA for this purpose, you may not use the SOFTWARE with any system or - * application where the use or failure of the system or application can reasonably be expected to threaten or result in - * personal injury, death, or catastrophic loss. Examples include use in avionics, navigation, military, medical, life - * support or other life critical applications. NVIDIA does not design, test or manufacture the SOFTWARE for these - * critical uses and NVIDIA shall not be liable to you or any third party, in whole or in part, for any claims or - * damages arising from such uses. g. You agree to defend, indemnify and hold harmless NVIDIA and its affiliates, - * and their respective employees, contractors, agents, officers and directors, from and against any and all claims, - * damages, obligations, losses, liabilities, costs or debt, fines, restitutions and expenses (including but not limited - * to attorney’s fees and costs incident to establishing the right of indemnification) arising out of or related to use - * of the SOFTWARE outside of the scope of this Agreement, or not in compliance with its terms. + * b. You may not reverse engineer, decompile or disassemble, or remove copyright or other proprietary notices from any portion of the SOFTWARE or copies of the SOFTWARE. + * c. You may not modify or create derivative works of any portion of the SOFTWARE. + * d. You may not bypass, disable, or circumvent any technical measure, encryption, security, digital rights management or authentication mechanism in the SOFTWARE. + * e. You may not use the SOFTWARE in any manner that would cause it to become subject to an open source software license. As examples, licenses that require as a condition of use, modification, and/or distribution that the SOFTWARE be (i) disclosed or distributed in source code form; (ii) licensed for the purpose of making derivative works; or (iii) redistributable at no charge. + * f. Unless you have an agreement with NVIDIA for this purpose, you may not use the SOFTWARE with any system or application where the use or failure of the system or application can reasonably be expected to threaten or result in personal injury, death, or catastrophic loss. Examples include use in avionics, navigation, military, medical, life support or other life critical applications. NVIDIA does not design, test or manufacture the SOFTWARE for these critical uses and NVIDIA shall not be liable to you or any third party, in whole or in part, for any claims or damages arising from such uses. + * g. You agree to defend, indemnify and hold harmless NVIDIA and its affiliates, and their respective employees, contractors, agents, officers and directors, from and against any and all claims, damages, obligations, losses, liabilities, costs or debt, fines, restitutions and expenses (including but not limited to attorney’s fees and costs incident to establishing the right of indemnification) arising out of or related to use of the SOFTWARE outside of the scope of this Agreement, or not in compliance with its terms. * - * 4. PRE-RELEASE. SOFTWARE versions identified as alpha, beta, preview, early access or otherwise as pre-release may - * not be fully functional, may contain errors or design flaws, and may have reduced or different security, privacy, - * availability, and reliability standards relative to commercial versions of NVIDIA software and materials. You may use - * a pre-release SOFTWARE version at your own risk, understanding that these versions are not intended for use in - * production or business-critical systems. + * 4. PRE-RELEASE. SOFTWARE versions identified as alpha, beta, preview, early access or otherwise as pre-release may not be fully functional, may contain errors or design flaws, and may have reduced or different security, privacy, availability, and reliability standards relative to commercial versions of NVIDIA software and materials. You may use a pre-release SOFTWARE version at your own risk, understanding that these versions are not intended for use in production or business-critical systems. * - * 5. OWNERSHIP. The SOFTWARE and the related intellectual property rights therein are and will remain the sole and - * exclusive property of NVIDIA or its licensors. The SOFTWARE is copyrighted and protected by the laws of the United - * States and other countries, and international treaty provisions. NVIDIA may make changes to the SOFTWARE, at any time - * without notice, but is not obligated to support or update the SOFTWARE. + * 5. OWNERSHIP. The SOFTWARE and the related intellectual property rights therein are and will remain the sole and exclusive property of NVIDIA or its licensors. The SOFTWARE is copyrighted and protected by the laws of the United States and other countries, and international treaty provisions. NVIDIA may make changes to the SOFTWARE, at any time without notice, but is not obligated to support or update the SOFTWARE. * - * 6. COMPONENTS UNDER OTHER LICENSES. The SOFTWARE may include NVIDIA or third-party components with separate legal - * notices or terms as may be described in proprietary notices accompanying the SOFTWARE. If and to the extent there is - * a conflict between the terms in this license and the license terms associated with a component, the license terms - * associated with the components control only to the extent necessary to resolve the conflict. + * 6. COMPONENTS UNDER OTHER LICENSES. The SOFTWARE may include NVIDIA or third-party components with separate legal notices or terms as may be described in proprietary notices accompanying the SOFTWARE. If and to the extent there is a conflict between the terms in this license and the license terms associated with a component, the license terms associated with the components control only to the extent necessary to resolve the conflict. * - * 7. FEEDBACK. You may, but don’t have to, provide to NVIDIA any Feedback. “Feedback” means any suggestions, bug fixes, - * enhancements, modifications, feature requests or other feedback regarding the SOFTWARE. For any Feedback that you - * voluntarily provide, you hereby grant NVIDIA and its affiliates a perpetual, non-exclusive, worldwide, irrevocable - * license to use, reproduce, modify, license, sublicense (through multiple tiers of sublicensees), and distribute - * (through multiple tiers of distributors) the Feedback without the payment of any royalties or fees to you. NVIDIA - * will use Feedback at its choice. + * 7. FEEDBACK. You may, but don’t have to, provide to NVIDIA any Feedback. “Feedback” means any suggestions, bug fixes, enhancements, modifications, feature requests or other feedback regarding the SOFTWARE. For any Feedback that you voluntarily provide, you hereby grant NVIDIA and its affiliates a perpetual, non-exclusive, worldwide, irrevocable license to use, reproduce, modify, license, sublicense (through multiple tiers of sublicensees), and distribute (through multiple tiers of distributors) the Feedback without the payment of any royalties or fees to you. NVIDIA will use Feedback at its choice. * - * 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED WARRANTY OF ANY KIND INCLUDING, BUT - * NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. NVIDIA DOES NOT - * WARRANT THAT THE SOFTWARE WILL MEET YOUR REQUIREMENTS OR THAT THE OPERATION THEREOF WILL BE UNINTERRUPTED OR - * ERROR-FREE, OR THAT ALL ERRORS WILL BE CORRECTED. + * 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED WARRANTY OF ANY KIND INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. NVIDIA DOES NOT WARRANT THAT THE SOFTWARE WILL MEET YOUR REQUIREMENTS OR THAT THE OPERATION THEREOF WILL BE UNINTERRUPTED OR ERROR-FREE, OR THAT ALL ERRORS WILL BE CORRECTED. * - * 9. LIMITATIONS OF LIABILITY. TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS AFFILIATES SHALL NOT BE LIABLE - * FOR ANY SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, PROJECT DELAYS, LOSS OF USE, - * LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION WITH - * THIS LICENSE OR THE USE OR PERFORMANCE OF THE SOFTWARE, WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON - * BREACH OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION - * OR THEORY OF LIABILITY, EVEN IF NVIDIA HAS PREVIOUSLY BEEN ADVISED OF, OR COULD REASONABLY HAVE FORESEEN, THE - * POSSIBILITY OF SUCH DAMAGES. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES TOTAL CUMULATIVE LIABILITY UNDER OR ARISING - * OUT OF THIS LICENSE EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE - * OR EXTEND THIS LIMIT. + * 9. LIMITATIONS OF LIABILITY. TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, PROJECT DELAYS, LOSS OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION WITH THIS LICENSE OR THE USE OR PERFORMANCE OF THE SOFTWARE, WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF LIABILITY, EVEN IF NVIDIA HAS PREVIOUSLY BEEN ADVISED OF, OR COULD REASONABLY HAVE FORESEEN, THE POSSIBILITY OF SUCH DAMAGES. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS LICENSE EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS LIMIT. * - * 10. TERMINATION. Your rights under this license will terminate automatically without notice from NVIDIA if you fail - * to comply with any term and condition of this license or if you commence or participate in any legal proceeding - * against NVIDIA with respect to the SOFTWARE. NVIDIA may terminate this license with advance written notice to you if - * NVIDIA decides to no longer provide the SOFTWARE in a country or, in NVIDIA’s sole discretion, the continued use of - * it is no longer commercially viable. Upon any termination of this license, you agree to promptly discontinue use of - * the SOFTWARE and destroy all copies in your possession or control. Your prior distributions in accordance with this - * license are not affected by the termination of this license. All provisions of this license will survive termination, - * except for the license granted to you. + * 10. TERMINATION. Your rights under this license will terminate automatically without notice from NVIDIA if you fail to comply with any term and condition of this license or if you commence or participate in any legal proceeding against NVIDIA with respect to the SOFTWARE. NVIDIA may terminate this license with advance written notice to you if NVIDIA decides to no longer provide the SOFTWARE in a country or, in NVIDIA’s sole discretion, the continued use of it is no longer commercially viable. Upon any termination of this license, you agree to promptly discontinue use of the SOFTWARE and destroy all copies in your possession or control. Your prior distributions in accordance with this license are not affected by the termination of this license. All provisions of this license will survive termination, except for the license granted to you. * - * 11. APPLICABLE LAW. This license will be governed in all respects by the laws of the United States and of the State - * of Delaware as those laws are applied to contracts entered into and performed entirely within Delaware by Delaware - * residents, without regard to the conflicts of laws principles. The United Nations Convention on Contracts for the - * International Sale of Goods is specifically disclaimed. You agree to all terms of this Agreement in the English - * language. The state or federal courts residing in Santa Clara County, California shall have exclusive jurisdiction - * over any dispute or claim arising out of this license. Notwithstanding this, you agree that NVIDIA shall still be - * allowed to apply for injunctive remedies or an equivalent type of urgent legal relief in any jurisdiction. + * 11. APPLICABLE LAW. This license will be governed in all respects by the laws of the United States and of the State of Delaware as those laws are applied to contracts entered into and performed entirely within Delaware by Delaware residents, without regard to the conflicts of laws principles. The United Nations Convention on Contracts for the International Sale of Goods is specifically disclaimed. You agree to all terms of this Agreement in the English language. The state or federal courts residing in Santa Clara County, California shall have exclusive jurisdiction over any dispute or claim arising out of this license. Notwithstanding this, you agree that NVIDIA shall still be allowed to apply for injunctive remedies or an equivalent type of urgent legal relief in any jurisdiction. * - * 12. NO ASSIGNMENT. This license and your rights and obligations thereunder may not be assigned by you by any means or - * operation of law without NVIDIA’s permission. Any attempted assignment not approved by NVIDIA in writing shall be - * void and of no effect. + * 12. NO ASSIGNMENT. This license and your rights and obligations thereunder may not be assigned by you by any means or operation of law without NVIDIA’s permission. Any attempted assignment not approved by NVIDIA in writing shall be void and of no effect. * - * 13. EXPORT. The SOFTWARE is subject to United States export laws and regulations. You agree that you will not ship, - * transfer or export the SOFTWARE into any country, or use the SOFTWARE in any manner, prohibited by the United States - * Bureau of Industry and Security or economic sanctions regulations administered by the U.S. Department of Treasury’s - * Office of Foreign Assets Control (OFAC), or any applicable export laws, restrictions or regulations. These laws - * include restrictions on destinations, end users and end use. By accepting this license, you confirm that you are not - * a resident or citizen of any country currently embargoed by the U.S. and that you are not otherwise prohibited from - * receiving the SOFTWARE. + * 13. EXPORT. The SOFTWARE is subject to United States export laws and regulations. You agree that you will not ship, transfer or export the SOFTWARE into any country, or use the SOFTWARE in any manner, prohibited by the United States Bureau of Industry and Security or economic sanctions regulations administered by the U.S. Department of Treasury’s Office of Foreign Assets Control (OFAC), or any applicable export laws, restrictions or regulations. These laws include restrictions on destinations, end users and end use. By accepting this license, you confirm that you are not a resident or citizen of any country currently embargoed by the U.S. and that you are not otherwise prohibited from receiving the SOFTWARE. * - * 14. GOVERNMENT USE. The SOFTWARE has been developed entirely at private expense and is “commercial items” consisting - * of “commercial computer software” and “commercial computer software documentation” provided with RESTRICTED RIGHTS. - * Use, duplication or disclosure by the U.S. Government or a U.S. Government subcontractor is subject to the - * restrictions in this license pursuant to DFARS 227.7202-3(a) or as set forth in subparagraphs (b)(1) and (2) of the - * Commercial Computer Software - Restricted Rights clause at FAR 52.227-19, as applicable. Contractor/manufacturer is - * NVIDIA, 2788 San Tomas Expressway, Santa Clara, CA 95051. + * 14. GOVERNMENT USE. The SOFTWARE has been developed entirely at private expense and is “commercial items” consisting of “commercial computer software” and “commercial computer software documentation” provided with RESTRICTED RIGHTS. Use, duplication or disclosure by the U.S. Government or a U.S. Government subcontractor is subject to the restrictions in this license pursuant to DFARS 227.7202-3(a) or as set forth in subparagraphs (b)(1) and (2) of the Commercial Computer Software - Restricted Rights clause at FAR 52.227-19, as applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas Expressway, Santa Clara, CA 95051. * - * 15. ENTIRE AGREEMENT. This license is the final, complete and exclusive agreement between the parties relating to the - * subject matter of this license and supersedes all prior or contemporaneous understandings and agreements relating to - * this subject matter, whether oral or written. If any court of competent jurisdiction determines that any provision of - * this license is illegal, invalid or unenforceable, the remaining provisions will remain in full force and effect. - * This license may only be modified in a writing signed by an authorized representative of each party. + * 15. ENTIRE AGREEMENT. This license is the final, complete and exclusive agreement between the parties relating to the subject matter of this license and supersedes all prior or contemporaneous understandings and agreements relating to this subject matter, whether oral or written. If any court of competent jurisdiction determines that any provision of this license is illegal, invalid or unenforceable, the remaining provisions will remain in full force and effect. This license may only be modified in a writing signed by an authorized representative of each party. * * (v. August 20, 2021) */ @@ -149,96 +71,56 @@ _LIBCUDACXX_BEGIN_NAMESPACE_CUDA -class access_property -{ -private: - std::uint64_t __descriptor = 0; - -public: - struct shared - {}; - struct global - {}; - struct persisting - { - _CCCL_HOST_DEVICE constexpr operator cudaAccessProperty() const noexcept - { - return cudaAccessProperty::cudaAccessPropertyPersisting; - } - }; - struct streaming - { - _CCCL_HOST_DEVICE constexpr operator cudaAccessProperty() const noexcept - { - return cudaAccessProperty::cudaAccessPropertyStreaming; - } - }; - struct normal - { - _CCCL_HOST_DEVICE constexpr operator cudaAccessProperty() const noexcept - { - return cudaAccessProperty::cudaAccessPropertyNormal; - } - }; - - _CCCL_HOST_DEVICE constexpr access_property(global) noexcept - : __descriptor(__detail_ap::__sm_80::__interleave_normal()) - {} - _CCCL_HOST_DEVICE constexpr access_property() noexcept - : __descriptor(__detail_ap::__sm_80::__interleave_normal()) - {} - constexpr access_property(access_property const&) noexcept = default; - access_property& operator=(const access_property& other) noexcept = default; - - _CCCL_HOST_DEVICE constexpr access_property(normal, float __fraction) - : __descriptor(__detail_ap::__interleave(normal{}, __fraction)) - {} - _CCCL_HOST_DEVICE constexpr access_property(streaming, float __fraction) - : __descriptor(__detail_ap::__interleave(streaming{}, __fraction)) - {} - _CCCL_HOST_DEVICE constexpr access_property(persisting, float __fraction) - : __descriptor(__detail_ap::__interleave(persisting{}, __fraction)) - {} - _CCCL_HOST_DEVICE constexpr access_property(normal, float __fraction, streaming) - : __descriptor(__detail_ap::__interleave(normal{}, __fraction, streaming{})) - {} - _CCCL_HOST_DEVICE constexpr access_property(persisting, float __fraction, streaming) - : __descriptor(__detail_ap::__interleave(persisting{}, __fraction, streaming{})) - {} - - _CCCL_HOST_DEVICE constexpr access_property(normal) noexcept - : access_property(normal{}, 1.0) - {} - _CCCL_HOST_DEVICE constexpr access_property(streaming) noexcept - : access_property(streaming{}, 1.0) - {} - _CCCL_HOST_DEVICE constexpr access_property(persisting) noexcept - : access_property(persisting{}, 1.0) - {} - - _CCCL_HOST_DEVICE constexpr access_property(void* __ptr, std::size_t __hit_bytes, std::size_t __total_bytes, normal) - : __descriptor(__detail_ap::__block(__ptr, __hit_bytes, __total_bytes, normal{})) - {} - _CCCL_HOST_DEVICE constexpr access_property(void* __ptr, std::size_t __hit_bytes, std::size_t __total_bytes, streaming) - : __descriptor(__detail_ap::__block(__ptr, __hit_bytes, __total_bytes, streaming{})) - {} - _CCCL_HOST_DEVICE constexpr access_property( - void* __ptr, std::size_t __hit_bytes, std::size_t __total_bytes, persisting) - : __descriptor(__detail_ap::__block(__ptr, __hit_bytes, __total_bytes, persisting{})) - {} - _CCCL_HOST_DEVICE constexpr access_property( - void* __ptr, std::size_t __hit_bytes, std::size_t __total_bytes, normal, streaming) - : __descriptor(__detail_ap::__block(__ptr, __hit_bytes, __total_bytes, normal{}, streaming{})) - {} - _CCCL_HOST_DEVICE constexpr access_property( - void* __ptr, std::size_t __hit_bytes, std::size_t __total_bytes, persisting, streaming) - : __descriptor(__detail_ap::__block(__ptr, __hit_bytes, __total_bytes, persisting{}, streaming{})) - {} - - _CCCL_HOST_DEVICE constexpr explicit operator std::uint64_t() const noexcept - { - return __descriptor; - } +class access_property { + private: + std::uint64_t __descriptor = 0; + + public: + struct shared {}; + struct global {}; + struct persisting { + _CCCL_HOST_DEVICE constexpr operator cudaAccessProperty() const noexcept { + return cudaAccessProperty::cudaAccessPropertyPersisting; + } + }; + struct streaming { + _CCCL_HOST_DEVICE constexpr operator cudaAccessProperty() const noexcept { + return cudaAccessProperty::cudaAccessPropertyStreaming; + } + }; + struct normal { + _CCCL_HOST_DEVICE constexpr operator cudaAccessProperty() const noexcept { + return cudaAccessProperty::cudaAccessPropertyNormal; + } + }; + + _CCCL_HOST_DEVICE constexpr access_property(global) noexcept : __descriptor(__detail_ap::__sm_80::__interleave_normal()) {} + _CCCL_HOST_DEVICE constexpr access_property() noexcept : __descriptor(__detail_ap::__sm_80::__interleave_normal()) {} + constexpr access_property(access_property const&) noexcept = default; + access_property& operator=(const access_property& other) noexcept = default; + + _CCCL_HOST_DEVICE constexpr access_property(normal, float __fraction) : __descriptor(__detail_ap::__interleave(normal{}, __fraction)) {} + _CCCL_HOST_DEVICE constexpr access_property(streaming, float __fraction) : __descriptor(__detail_ap::__interleave(streaming{}, __fraction)) {} + _CCCL_HOST_DEVICE constexpr access_property(persisting, float __fraction) : __descriptor(__detail_ap::__interleave(persisting{}, __fraction)) {} + _CCCL_HOST_DEVICE constexpr access_property(normal, float __fraction, streaming) : __descriptor(__detail_ap::__interleave(normal{}, __fraction, streaming{})) {} + _CCCL_HOST_DEVICE constexpr access_property(persisting, float __fraction, streaming) : __descriptor(__detail_ap::__interleave(persisting{}, __fraction, streaming{})) {} + + _CCCL_HOST_DEVICE constexpr access_property(normal) noexcept : access_property(normal{}, 1.0) {} + _CCCL_HOST_DEVICE constexpr access_property(streaming) noexcept : access_property(streaming{}, 1.0) {} + _CCCL_HOST_DEVICE constexpr access_property(persisting) noexcept : access_property(persisting{}, 1.0) {} + + _CCCL_HOST_DEVICE constexpr access_property(void* __ptr, std::size_t __hit_bytes, std::size_t __total_bytes, normal) + : __descriptor(__detail_ap::__block(__ptr, __hit_bytes, __total_bytes, normal{})) {} + _CCCL_HOST_DEVICE constexpr access_property(void* __ptr, std::size_t __hit_bytes, std::size_t __total_bytes, streaming) + : __descriptor(__detail_ap::__block(__ptr, __hit_bytes, __total_bytes, streaming{})) {} + _CCCL_HOST_DEVICE constexpr access_property(void* __ptr, std::size_t __hit_bytes, std::size_t __total_bytes, persisting) + : __descriptor(__detail_ap::__block(__ptr, __hit_bytes, __total_bytes, persisting{})) {} + _CCCL_HOST_DEVICE constexpr access_property(void* __ptr, std::size_t __hit_bytes, std::size_t __total_bytes, normal, streaming) + : __descriptor(__detail_ap::__block(__ptr, __hit_bytes, __total_bytes, normal{}, streaming{})) {} + _CCCL_HOST_DEVICE constexpr access_property(void* __ptr, std::size_t __hit_bytes, std::size_t __total_bytes, persisting, streaming) + : __descriptor(__detail_ap::__block(__ptr, __hit_bytes, __total_bytes, persisting{}, streaming{})) {} + + _CCCL_HOST_DEVICE constexpr explicit operator std::uint64_t() const noexcept { return __descriptor; } }; _LIBCUDACXX_END_NAMESPACE_CUDA @@ -248,201 +130,195 @@ _LIBCUDACXX_END_NAMESPACE_CUDA _LIBCUDACXX_BEGIN_NAMESPACE_CUDA template -_CCCL_HOST_DEVICE _Tp* associate_access_property(_Tp* __ptr, _Property __prop) -{ +_CCCL_HOST_DEVICE +_Tp* associate_access_property(_Tp* __ptr, _Property __prop) { static_assert( - std::is_same<_Property, access_property>::value || std::is_same<_Property, access_property::persisting>::value - || std::is_same<_Property, access_property::streaming>::value - || std::is_same<_Property, access_property::normal>::value - || std::is_same<_Property, access_property::global>::value - || std::is_same<_Property, access_property::shared>::value, - "property is not convertible to cuda::access_property"); + std::is_same<_Property, access_property>::value || + std::is_same<_Property, access_property::persisting>::value || + std::is_same<_Property, access_property::streaming>::value || + std::is_same<_Property, access_property::normal>::value || + std::is_same<_Property, access_property::global>::value || + std::is_same<_Property, access_property::shared>::value + , "property is not convertible to cuda::access_property"); return __detail_ap::__associate(__ptr, __prop); } template -_CCCL_HOST_DEVICE void -apply_access_property(const volatile void* __ptr, const _Shape __shape, access_property::persisting __prop) noexcept -{ - NV_IF_TARGET( - NV_PROVIDES_SM_80, - (if (!__isGlobal((void*) __ptr)) return; - - char* __p = reinterpret_cast(const_cast(__ptr)); - static constexpr std::size_t _LINE_SIZE = 128; - std::size_t __nbytes = static_cast(__shape); - std::size_t __end = ((std::uintptr_t)(__p + __nbytes) % _LINE_SIZE) ? __nbytes + _LINE_SIZE : __nbytes; - __end /= _LINE_SIZE; - - // Apply to all 128 bytes aligned cache lines inclusive of __p - for (std::size_t __i = 0; __i < __end; __i += _LINE_SIZE) { - asm volatile("prefetch.global.L2::evict_last [%0];" ::"l"(__p + (__i * _LINE_SIZE)) :); - })) +_CCCL_HOST_DEVICE +void apply_access_property(const volatile void* __ptr, const _Shape __shape, access_property::persisting __prop) noexcept { + NV_IF_TARGET(NV_PROVIDES_SM_80,( + if (!__isGlobal((void*)__ptr)) return; + + char* __p = reinterpret_cast(const_cast(__ptr)); + static constexpr std::size_t _LINE_SIZE = 128; + std::size_t __nbytes = static_cast(__shape); + std::size_t __end = ((std::uintptr_t)(__p + __nbytes) % _LINE_SIZE) ? __nbytes + _LINE_SIZE : __nbytes; + __end /= _LINE_SIZE; + + //Apply to all 128 bytes aligned cache lines inclusive of __p + for (std::size_t __i = 0; __i < __end; __i += _LINE_SIZE) { + asm volatile ("prefetch.global.L2::evict_last [%0];" ::"l"(__p + (__i * _LINE_SIZE)) :); + } + )) } template -_CCCL_HOST_DEVICE void -apply_access_property(const volatile void* __ptr, const _Shape __shape, access_property::normal __prop) noexcept -{ - NV_IF_TARGET( - NV_PROVIDES_SM_80, - (if (!__isGlobal((void*) __ptr)) return; - - char* __p = reinterpret_cast(const_cast(__ptr)); - static constexpr std::size_t _LINE_SIZE = 128; - std::size_t __nbytes = static_cast(__shape); - std::size_t __end = ((std::uintptr_t)(__p + __nbytes) % _LINE_SIZE) ? __nbytes + _LINE_SIZE : __nbytes; - __end /= _LINE_SIZE; - - // Apply to all 128 bytes aligned cache lines inclusive of __p - for (std::size_t __i = 0; __i < __end; __i += _LINE_SIZE) { - asm volatile("prefetch.global.L2::evict_normal [%0];" ::"l"(__p + (__i * _LINE_SIZE)) :); - })) +_CCCL_HOST_DEVICE +void apply_access_property(const volatile void* __ptr, const _Shape __shape, access_property::normal __prop) noexcept { + NV_IF_TARGET(NV_PROVIDES_SM_80,( + if (!__isGlobal((void*)__ptr)) return; + + char* __p = reinterpret_cast(const_cast(__ptr)); + static constexpr std::size_t _LINE_SIZE = 128; + std::size_t __nbytes = static_cast(__shape); + std::size_t __end = ((std::uintptr_t)(__p + __nbytes) % _LINE_SIZE) ? __nbytes + _LINE_SIZE : __nbytes; + __end /= _LINE_SIZE; + + //Apply to all 128 bytes aligned cache lines inclusive of __p + for (std::size_t __i = 0; __i < __end; __i += _LINE_SIZE) { + asm volatile ("prefetch.global.L2::evict_normal [%0];" ::"l"(__p + (__i * _LINE_SIZE)) :); + } + )) } -template -class annotated_ptr : public __detail_ap::__annotated_ptr_base<_Property> -{ -public: - using value_type = _Tp; - using size_type = std::size_t; - using reference = value_type&; - using pointer = value_type*; - using const_pointer = value_type const*; - using difference_type = std::ptrdiff_t; - -private: - using __self = annotated_ptr<_Tp, _Property>; - - // Converting from a 64-bit to 32-bit shared pointer and maybe back just for storage might or might not be profitable. - pointer __repr = (pointer) ((size_type) nullptr); - - _CCCL_HOST_DEVICE pointer __get(bool __skip_prop = false, difference_type __n = 0) const - { - NV_IF_TARGET(NV_IS_DEVICE, (if (!__skip_prop) { - return static_cast( - this->__apply_prop(const_cast(static_cast(__repr + __n)))); - })) - return __repr + __n; - } - _CCCL_HOST_DEVICE pointer __offset(difference_type __n, bool __skip_prop = false) const - { - return __get(__skip_prop, __n); - } - -public: - _CCCL_HOST_DEVICE pointer operator->() const - { - return __get(); - } - - _CCCL_HOST_DEVICE reference operator*() const - { - return *__get(); - } - - _CCCL_HOST_DEVICE reference operator[](difference_type __n) const - { - return *__offset(__n); - } - - _CCCL_HOST_DEVICE constexpr difference_type operator-(annotated_ptr o) const - { - return __repr - o.__repr; - } - - constexpr annotated_ptr() noexcept = default; - constexpr annotated_ptr(annotated_ptr const&) noexcept = default; - // No constexpr for c11 as the method can't be const - _CCCL_CONSTEXPR_CXX14 annotated_ptr& operator=(annotated_ptr const& other) noexcept = default; - - _CCCL_HOST_DEVICE explicit annotated_ptr(pointer __p) +template +class annotated_ptr: public __detail_ap::__annotated_ptr_base<_Property> { + public: + using value_type = _Tp; + using size_type = std::size_t; + using reference = value_type&; + using pointer = value_type*; + using const_pointer = value_type const*; + using difference_type = std::ptrdiff_t; + + private: + using __self = annotated_ptr<_Tp, _Property>; + + // Converting from a 64-bit to 32-bit shared pointer and maybe back just for storage might or might not be profitable. + pointer __repr = (pointer)((size_type)nullptr); + + _CCCL_HOST_DEVICE pointer __get(bool __skip_prop = false, difference_type __n = 0) const { + NV_IF_TARGET(NV_IS_DEVICE,( + if (!__skip_prop) { + return static_cast(this->__apply_prop(const_cast(static_cast(__repr + __n)))); + } + )) + return __repr + __n; + } + _CCCL_HOST_DEVICE pointer __offset(difference_type __n, bool __skip_prop = false) const { + return __get(__skip_prop, __n); + } + + public: + _CCCL_HOST_DEVICE pointer operator->() const { + return __get(); + } + + _CCCL_HOST_DEVICE reference operator*() const { + return *__get(); + } + + _CCCL_HOST_DEVICE reference operator[](difference_type __n) const { + return *__offset(__n); + } + + _CCCL_HOST_DEVICE constexpr difference_type operator-(annotated_ptr o) const { + return __repr - o.__repr; + } + + constexpr annotated_ptr() noexcept = default; + constexpr annotated_ptr(annotated_ptr const&) noexcept = default; + // No constexpr for c11 as the method can't be const + _CCCL_CONSTEXPR_CXX14 annotated_ptr& operator=(annotated_ptr const& other) noexcept = default; + + _CCCL_HOST_DEVICE explicit annotated_ptr(pointer __p) : __repr(__p) - { - NV_IF_TARGET( - NV_IS_DEVICE, - (_LIBCUDACXX_DEBUG_ASSERT((std::is_same<_Property, shared>::value && __isShared(__p) || __isGlobal(__p)), "");)) - } - - template - _CCCL_HOST_DEVICE annotated_ptr(pointer __p, _RuntimeProperty __prop) - : __detail_ap::__annotated_ptr_base<_Property>(static_cast(access_property(__prop))) - , __repr(__p) - { - static_assert(std::is_same<_Property, access_property>::value, - "This method requires annotated_ptr"); - static_assert( - std::is_same<_RuntimeProperty, access_property::global>::value - || std::is_same<_RuntimeProperty, access_property::normal>::value - || std::is_same<_RuntimeProperty, access_property::streaming>::value - || std::is_same<_RuntimeProperty, access_property::persisting>::value - || std::is_same<_RuntimeProperty, access_property>::value, - "This method requires RuntimeProperty=global|normal|streaming|persisting|access_property"); - NV_IF_TARGET(NV_IS_DEVICE, (_LIBCUDACXX_DEBUG_ASSERT((__isGlobal(__p) == true), "");)) - } - - template - _CCCL_HOST_DEVICE annotated_ptr(const annotated_ptr<_TTp, _Prop>& __other); - - _CCCL_HOST_DEVICE constexpr explicit operator bool() const noexcept - { - return __repr != nullptr; - } - - _CCCL_HOST_DEVICE pointer get() const noexcept - { - constexpr bool __is_shared = std::is_same<_Property, access_property::shared>::value; - return __is_shared ? __repr : &(*annotated_ptr(__repr)); - } - - _CCCL_HOST_DEVICE _Property __property() const noexcept - { - return this->__get_property(); - } + { + NV_IF_TARGET(NV_IS_DEVICE,( + _LIBCUDACXX_DEBUG_ASSERT((std::is_same<_Property, shared>::value && __isShared(__p) || __isGlobal(__p)), ""); + )) + } + + template + _CCCL_HOST_DEVICE annotated_ptr(pointer __p, _RuntimeProperty __prop) + : __detail_ap::__annotated_ptr_base<_Property>(static_cast(access_property(__prop))), __repr(__p) + { + static_assert(std::is_same<_Property, access_property>::value, + "This method requires annotated_ptr"); + static_assert(std::is_same<_RuntimeProperty, access_property::global>::value || + std::is_same<_RuntimeProperty, access_property::normal>::value || + std::is_same<_RuntimeProperty, access_property::streaming>::value || + std::is_same<_RuntimeProperty, access_property::persisting>::value || + std::is_same<_RuntimeProperty, access_property>::value, + "This method requires RuntimeProperty=global|normal|streaming|persisting|access_property"); + NV_IF_TARGET(NV_IS_DEVICE,( + _LIBCUDACXX_DEBUG_ASSERT((__isGlobal(__p) == true), ""); + )) + } + + template + _CCCL_HOST_DEVICE annotated_ptr(const annotated_ptr<_TTp,_Prop>& __other); + + _CCCL_HOST_DEVICE constexpr explicit operator bool() const noexcept { + return __repr != nullptr; + } + + _CCCL_HOST_DEVICE pointer get() const noexcept { + constexpr bool __is_shared = std::is_same<_Property, access_property::shared>::value; + return __is_shared ? __repr : &(*annotated_ptr(__repr)); + } + + _CCCL_HOST_DEVICE _Property __property() const noexcept { + return this->__get_property(); + } }; -template -template -_CCCL_HOST_DEVICE annotated_ptr<_Tp, _Property>::annotated_ptr(const annotated_ptr<_TTp, _Prop>& __other) - : __detail_ap::__annotated_ptr_base<_Property>(__other.__property()) - , __repr(__other.get()) + +template +template +_CCCL_HOST_DEVICE annotated_ptr<_Tp, _Property>::annotated_ptr(const annotated_ptr<_TTp,_Prop>& __other) + : __detail_ap::__annotated_ptr_base<_Property>(__other.__property()), __repr(__other.get()) { static_assert(std::is_assignable::value, "pointer must be assignable from other pointer"); - static_assert( - (std::is_same<_Property, access_property>::value && !std::is_same<_Prop, access_property::shared>::value) - || std::is_same<_Property, _Prop>::value, - "Property must be either access_property or other property, and both properties must have same address space"); + static_assert((std::is_same<_Property, access_property>::value && !std::is_same<_Prop, access_property::shared>::value) || + std::is_same<_Property, _Prop>::value, "Property must be either access_property or other property, and both properties must have same address space"); // note: precondition "__other.__rep must be compatible with _Property" currently always holds } -template -_CCCL_HOST_DEVICE void memcpy_async(_Dst* __dst, annotated_ptr<_Src, _SrcProperty> __src, _Shape __shape, _Sync& __sync) -{ +template +_CCCL_HOST_DEVICE +void memcpy_async(_Dst* __dst, + annotated_ptr<_Src,_SrcProperty> __src, + _Shape __shape, _Sync & __sync) { memcpy_async(__dst, &(*__src), __shape, __sync); } -template -_CCCL_HOST_DEVICE void memcpy_async( - annotated_ptr<_Dst, _DstProperty> __dst, annotated_ptr<_Src, _SrcProperty> __src, _Shape __shape, _Sync& __sync) -{ +template +_CCCL_HOST_DEVICE +void memcpy_async(annotated_ptr<_Dst,_DstProperty> __dst, + annotated_ptr<_Src,_SrcProperty> __src, + _Shape __shape, _Sync & __sync){ memcpy_async(&(*__dst), &(*__src), __shape, __sync); } -template -_CCCL_HOST_DEVICE void -memcpy_async(const _Group& __group, _Dst* __dst, annotated_ptr<_Src, _SrcProperty> __src, _Shape __shape, _Sync& __sync) -{ +template +_CCCL_HOST_DEVICE +void memcpy_async(const _Group & __group, + _Dst * __dst, + annotated_ptr<_Src,_SrcProperty> __src, + _Shape __shape, _Sync & __sync) { memcpy_async(__group, __dst, &(*__src), __shape, __sync); } -template -_CCCL_HOST_DEVICE void memcpy_async( - const _Group& __group, - annotated_ptr<_Dst, _DstProperty> __dst, - annotated_ptr<_Src, _SrcProperty> __src, - _Shape __shape, - _Sync& __sync) -{ +template +_CCCL_HOST_DEVICE +void memcpy_async(const _Group & __group, + annotated_ptr<_Dst,_DstProperty> __dst, + annotated_ptr<_Src,_SrcProperty> __src, + _Shape __shape, _Sync & __sync) { memcpy_async(__group, &(*__dst), &(*__src), __shape, __sync); } diff --git a/libcudacxx/include/cuda/barrier b/libcudacxx/include/cuda/barrier index 99117dde90b..e19684cfece 100644 --- a/libcudacxx/include/cuda/barrier +++ b/libcudacxx/include/cuda/barrier @@ -21,8 +21,8 @@ # pragma system_header #endif // no system header -#include #include +#include // Forward-declare CUtensorMap for use in cp_async_bulk_tensor_* PTX wrapping // functions. These functions take a pointer to CUtensorMap, so do not need to @@ -54,185 +54,175 @@ _LIBCUDACXX_BEGIN_NAMESPACE_CUDA_DEVICE_EXPERIMENTAL #ifdef __cccl_lib_experimental_ctk12_cp_async_exposure // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk -inline _CCCL_DEVICE void cp_async_bulk_global_to_shared( - void* __dest, const void* __src, _CUDA_VSTD::uint32_t __size, ::cuda::barrier<::cuda::thread_scope_block>& __bar) +inline _CCCL_DEVICE +void cp_async_bulk_global_to_shared(void *__dest, const void *__src, _CUDA_VSTD::uint32_t __size, ::cuda::barrier<::cuda::thread_scope_block> &__bar) { - _LIBCUDACXX_DEBUG_ASSERT(__size % 16 == 0, "Size must be multiple of 16."); - _LIBCUDACXX_DEBUG_ASSERT(__isShared(__dest), "Destination must be shared memory address."); - _LIBCUDACXX_DEBUG_ASSERT(__isGlobal(__src), "Source must be global memory address."); - - _CUDA_VPTX::cp_async_bulk( - _CUDA_VPTX::space_cluster, - _CUDA_VPTX::space_global, - __dest, - __src, - __size, - ::cuda::device::barrier_native_handle(__bar)); + _LIBCUDACXX_DEBUG_ASSERT(__size % 16 == 0, "Size must be multiple of 16."); + _LIBCUDACXX_DEBUG_ASSERT(__isShared(__dest), "Destination must be shared memory address."); + _LIBCUDACXX_DEBUG_ASSERT(__isGlobal(__src), "Source must be global memory address."); + + _CUDA_VPTX::cp_async_bulk( + _CUDA_VPTX::space_cluster, _CUDA_VPTX::space_global, + __dest, __src, __size, + ::cuda::device::barrier_native_handle(__bar)); } + // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk -inline _CCCL_DEVICE void cp_async_bulk_shared_to_global(void* __dest, const void* __src, _CUDA_VSTD::uint32_t __size) +inline _CCCL_DEVICE +void cp_async_bulk_shared_to_global(void *__dest, const void * __src, _CUDA_VSTD::uint32_t __size) { - _LIBCUDACXX_DEBUG_ASSERT(__size % 16 == 0, "Size must be multiple of 16."); - _LIBCUDACXX_DEBUG_ASSERT(__isGlobal(__dest), "Destination must be global memory address."); - _LIBCUDACXX_DEBUG_ASSERT(__isShared(__src), "Source must be shared memory address."); + _LIBCUDACXX_DEBUG_ASSERT(__size % 16 == 0, "Size must be multiple of 16."); + _LIBCUDACXX_DEBUG_ASSERT(__isGlobal(__dest), "Destination must be global memory address."); + _LIBCUDACXX_DEBUG_ASSERT(__isShared(__src), "Source must be shared memory address."); - _CUDA_VPTX::cp_async_bulk(_CUDA_VPTX::space_global, _CUDA_VPTX::space_shared, __dest, __src, __size); + _CUDA_VPTX::cp_async_bulk( + _CUDA_VPTX::space_global, _CUDA_VPTX::space_shared, + __dest, __src, __size); } // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor -inline _CCCL_DEVICE void cp_async_bulk_tensor_1d_global_to_shared( - void* __dest, const CUtensorMap* __tensor_map, int __c0, ::cuda::barrier<::cuda::thread_scope_block>& __bar) +inline _CCCL_DEVICE +void cp_async_bulk_tensor_1d_global_to_shared( + void *__dest, const CUtensorMap *__tensor_map , int __c0, ::cuda::barrier<::cuda::thread_scope_block> &__bar) { - const _CUDA_VSTD::int32_t __coords[]{__c0}; - - _CUDA_VPTX::cp_async_bulk_tensor( - _CUDA_VPTX::space_cluster, - _CUDA_VPTX::space_global, - __dest, - __tensor_map, - __coords, - ::cuda::device::barrier_native_handle(__bar)); + const _CUDA_VSTD::int32_t __coords[]{__c0}; + + _CUDA_VPTX::cp_async_bulk_tensor( + _CUDA_VPTX::space_cluster, _CUDA_VPTX::space_global, + __dest, __tensor_map, __coords, + ::cuda::device::barrier_native_handle(__bar)); } // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor -inline _CCCL_DEVICE void cp_async_bulk_tensor_2d_global_to_shared( - void* __dest, const CUtensorMap* __tensor_map, int __c0, int __c1, ::cuda::barrier<::cuda::thread_scope_block>& __bar) +inline _CCCL_DEVICE +void cp_async_bulk_tensor_2d_global_to_shared( + void *__dest, const CUtensorMap *__tensor_map , int __c0, int __c1, ::cuda::barrier<::cuda::thread_scope_block> &__bar) { - const _CUDA_VSTD::int32_t __coords[]{__c0, __c1}; - - _CUDA_VPTX::cp_async_bulk_tensor( - _CUDA_VPTX::space_cluster, - _CUDA_VPTX::space_global, - __dest, - __tensor_map, - __coords, - ::cuda::device::barrier_native_handle(__bar)); + const _CUDA_VSTD::int32_t __coords[]{__c0, __c1}; + + _CUDA_VPTX::cp_async_bulk_tensor( + _CUDA_VPTX::space_cluster, _CUDA_VPTX::space_global, + __dest, __tensor_map, __coords, + ::cuda::device::barrier_native_handle(__bar)); } // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor -inline _CCCL_DEVICE void cp_async_bulk_tensor_3d_global_to_shared( - void* __dest, - const CUtensorMap* __tensor_map, - int __c0, - int __c1, - int __c2, - ::cuda::barrier<::cuda::thread_scope_block>& __bar) +inline _CCCL_DEVICE +void cp_async_bulk_tensor_3d_global_to_shared( + void *__dest, const CUtensorMap *__tensor_map, int __c0, int __c1, int __c2, ::cuda::barrier<::cuda::thread_scope_block> &__bar) { - const _CUDA_VSTD::int32_t __coords[]{__c0, __c1, __c2}; - - _CUDA_VPTX::cp_async_bulk_tensor( - _CUDA_VPTX::space_cluster, - _CUDA_VPTX::space_global, - __dest, - __tensor_map, - __coords, - ::cuda::device::barrier_native_handle(__bar)); + const _CUDA_VSTD::int32_t __coords[]{__c0, __c1, __c2}; + + _CUDA_VPTX::cp_async_bulk_tensor( + _CUDA_VPTX::space_cluster, _CUDA_VPTX::space_global, + __dest, __tensor_map, __coords, + ::cuda::device::barrier_native_handle(__bar)); } // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor -inline _CCCL_DEVICE void cp_async_bulk_tensor_4d_global_to_shared( - void* __dest, - const CUtensorMap* __tensor_map, - int __c0, - int __c1, - int __c2, - int __c3, - ::cuda::barrier<::cuda::thread_scope_block>& __bar) +inline _CCCL_DEVICE +void cp_async_bulk_tensor_4d_global_to_shared( + void *__dest, const CUtensorMap *__tensor_map , int __c0, int __c1, int __c2, int __c3, ::cuda::barrier<::cuda::thread_scope_block> &__bar) { - const _CUDA_VSTD::int32_t __coords[]{__c0, __c1, __c2, __c3}; - - _CUDA_VPTX::cp_async_bulk_tensor( - _CUDA_VPTX::space_cluster, - _CUDA_VPTX::space_global, - __dest, - __tensor_map, - __coords, - ::cuda::device::barrier_native_handle(__bar)); + const _CUDA_VSTD::int32_t __coords[]{__c0, __c1, __c2, __c3}; + + _CUDA_VPTX::cp_async_bulk_tensor( + _CUDA_VPTX::space_cluster, _CUDA_VPTX::space_global, + __dest, __tensor_map, __coords, + ::cuda::device::barrier_native_handle(__bar)); } // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor -inline _CCCL_DEVICE void cp_async_bulk_tensor_5d_global_to_shared( - void* __dest, - const CUtensorMap* __tensor_map, - int __c0, - int __c1, - int __c2, - int __c3, - int __c4, - ::cuda::barrier<::cuda::thread_scope_block>& __bar) +inline _CCCL_DEVICE +void cp_async_bulk_tensor_5d_global_to_shared( + void *__dest, const CUtensorMap *__tensor_map , int __c0, int __c1, int __c2, int __c3, int __c4, ::cuda::barrier<::cuda::thread_scope_block> &__bar) { - const _CUDA_VSTD::int32_t __coords[]{__c0, __c1, __c2, __c3, __c4}; - - _CUDA_VPTX::cp_async_bulk_tensor( - _CUDA_VPTX::space_cluster, - _CUDA_VPTX::space_global, - __dest, - __tensor_map, - __coords, - ::cuda::device::barrier_native_handle(__bar)); + const _CUDA_VSTD::int32_t __coords[]{__c0, __c1, __c2, __c3, __c4}; + + _CUDA_VPTX::cp_async_bulk_tensor( + _CUDA_VPTX::space_cluster, _CUDA_VPTX::space_global, + __dest, __tensor_map, __coords, + ::cuda::device::barrier_native_handle(__bar)); } // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor -inline _CCCL_DEVICE void -cp_async_bulk_tensor_1d_shared_to_global(const CUtensorMap* __tensor_map, int __c0, const void* __src) +inline _CCCL_DEVICE +void cp_async_bulk_tensor_1d_shared_to_global( + const CUtensorMap *__tensor_map, int __c0, const void *__src) { - const _CUDA_VSTD::int32_t __coords[]{__c0}; + const _CUDA_VSTD::int32_t __coords[]{__c0}; - _CUDA_VPTX::cp_async_bulk_tensor(_CUDA_VPTX::space_global, _CUDA_VPTX::space_shared, __tensor_map, __coords, __src); + _CUDA_VPTX::cp_async_bulk_tensor( + _CUDA_VPTX::space_global, _CUDA_VPTX::space_shared, + __tensor_map, __coords, __src); } // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor -inline _CCCL_DEVICE void -cp_async_bulk_tensor_2d_shared_to_global(const CUtensorMap* __tensor_map, int __c0, int __c1, const void* __src) +inline _CCCL_DEVICE +void cp_async_bulk_tensor_2d_shared_to_global( + const CUtensorMap *__tensor_map, int __c0, int __c1, const void *__src) { - const _CUDA_VSTD::int32_t __coords[]{__c0, __c1}; + const _CUDA_VSTD::int32_t __coords[]{__c0, __c1}; - _CUDA_VPTX::cp_async_bulk_tensor(_CUDA_VPTX::space_global, _CUDA_VPTX::space_shared, __tensor_map, __coords, __src); + _CUDA_VPTX::cp_async_bulk_tensor( + _CUDA_VPTX::space_global, _CUDA_VPTX::space_shared, + __tensor_map, __coords, __src); } // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor -inline _CCCL_DEVICE void cp_async_bulk_tensor_3d_shared_to_global( - const CUtensorMap* __tensor_map, int __c0, int __c1, int __c2, const void* __src) +inline _CCCL_DEVICE +void cp_async_bulk_tensor_3d_shared_to_global( + const CUtensorMap *__tensor_map, int __c0, int __c1, int __c2, const void *__src) { - const _CUDA_VSTD::int32_t __coords[]{__c0, __c1, __c2}; + const _CUDA_VSTD::int32_t __coords[]{__c0, __c1, __c2}; - _CUDA_VPTX::cp_async_bulk_tensor(_CUDA_VPTX::space_global, _CUDA_VPTX::space_shared, __tensor_map, __coords, __src); + _CUDA_VPTX::cp_async_bulk_tensor( + _CUDA_VPTX::space_global, _CUDA_VPTX::space_shared, + __tensor_map, __coords, __src); } // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor -inline _CCCL_DEVICE void cp_async_bulk_tensor_4d_shared_to_global( - const CUtensorMap* __tensor_map, int __c0, int __c1, int __c2, int __c3, const void* __src) +inline _CCCL_DEVICE +void cp_async_bulk_tensor_4d_shared_to_global( + const CUtensorMap *__tensor_map, int __c0, int __c1, int __c2, int __c3, const void *__src) { - const _CUDA_VSTD::int32_t __coords[]{__c0, __c1, __c2, __c3}; + const _CUDA_VSTD::int32_t __coords[]{__c0, __c1, __c2, __c3}; - _CUDA_VPTX::cp_async_bulk_tensor(_CUDA_VPTX::space_global, _CUDA_VPTX::space_shared, __tensor_map, __coords, __src); + _CUDA_VPTX::cp_async_bulk_tensor( + _CUDA_VPTX::space_global, _CUDA_VPTX::space_shared, + __tensor_map, __coords, __src); } // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor -inline _CCCL_DEVICE void cp_async_bulk_tensor_5d_shared_to_global( - const CUtensorMap* __tensor_map, int __c0, int __c1, int __c2, int __c3, int __c4, const void* __src) +inline _CCCL_DEVICE +void cp_async_bulk_tensor_5d_shared_to_global( + const CUtensorMap *__tensor_map, int __c0, int __c1, int __c2, int __c3, int __c4, const void *__src) { - const _CUDA_VSTD::int32_t __coords[]{__c0, __c1, __c2, __c3, __c4}; + const _CUDA_VSTD::int32_t __coords[]{__c0, __c1, __c2, __c3, __c4}; - _CUDA_VPTX::cp_async_bulk_tensor(_CUDA_VPTX::space_global, _CUDA_VPTX::space_shared, __tensor_map, __coords, __src); + _CUDA_VPTX::cp_async_bulk_tensor( + _CUDA_VPTX::space_global, _CUDA_VPTX::space_shared, + __tensor_map, __coords, __src); } // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#parallel-synchronization-and-communication-instructions-membar -inline _CCCL_DEVICE void fence_proxy_async_shared_cta() -{ - _CUDA_VPTX::fence_proxy_async(_CUDA_VPTX::space_shared); +inline _CCCL_DEVICE +void fence_proxy_async_shared_cta() { + _CUDA_VPTX::fence_proxy_async(_CUDA_VPTX::space_shared); } // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-commit-group -inline _CCCL_DEVICE void cp_async_bulk_commit_group() +inline _CCCL_DEVICE +void cp_async_bulk_commit_group() { - _CUDA_VPTX::cp_async_bulk_commit_group(); + _CUDA_VPTX::cp_async_bulk_commit_group(); } // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-wait-group template -inline _CCCL_DEVICE void cp_async_bulk_wait_group_read() +inline _CCCL_DEVICE +void cp_async_bulk_wait_group_read() { static_assert(__n_prior <= 63, "cp_async_bulk_wait_group_read: waiting for more than 63 groups is not supported."); _CUDA_VPTX::cp_async_bulk_wait_group_read(_CUDA_VPTX::n32_t<__n_prior>{}); diff --git a/libcudacxx/include/cuda/discard_memory b/libcudacxx/include/cuda/discard_memory index d6c772d57a2..5893bf6108e 100644 --- a/libcudacxx/include/cuda/discard_memory +++ b/libcudacxx/include/cuda/discard_memory @@ -21,8 +21,8 @@ # pragma system_header #endif // no system header -#include #include +#include _LIBCUDACXX_BEGIN_NAMESPACE_CUDA @@ -37,14 +37,14 @@ inline _CCCL_HOST_DEVICE void discard_memory(volatile void* __ptr, size_t __nbyt NV_PROVIDES_SM_80, (if (!__isGlobal((void*) __ptr)) return; - char* __p = reinterpret_cast(const_cast(__ptr)); - char* const __end_p = __p + __nbytes; + char* __p = reinterpret_cast(const_cast(__ptr)); + char* const __end_p = __p + __nbytes; static constexpr size_t _LINE_SIZE = 128; // Trim the first block and last block if they're not 128 bytes aligned - size_t __misalignment = reinterpret_cast(__p) % _LINE_SIZE; - char* __start_aligned = __misalignment == 0 ? __p : __p + (_LINE_SIZE - __misalignment); - char* const __end_aligned = __end_p - (reinterpret_cast(__end_p) % _LINE_SIZE); + size_t __misalignment = reinterpret_cast(__p) % _LINE_SIZE; + char* __start_aligned = __misalignment == 0 ? __p : __p + (_LINE_SIZE - __misalignment); + char* const __end_aligned = __end_p - (reinterpret_cast(__end_p) % _LINE_SIZE); while (__start_aligned < __end_aligned) { asm volatile("discard.global.L2 [%0], 128;" ::"l"(__start_aligned) :); diff --git a/libcudacxx/include/cuda/functional b/libcudacxx/include/cuda/functional index f8aaef4f0a9..955631e23a5 100644 --- a/libcudacxx/include/cuda/functional +++ b/libcudacxx/include/cuda/functional @@ -4,128 +4,50 @@ * * NVIDIA SOFTWARE LICENSE * - * This license is a legal agreement between you and NVIDIA Corporation ("NVIDIA") and governs your use of the - * NVIDIA/CUDA C++ Library software and materials provided hereunder (“SOFTWARE”). + * This license is a legal agreement between you and NVIDIA Corporation ("NVIDIA") and governs your use of the NVIDIA/CUDA C++ Library software and materials provided hereunder (“SOFTWARE”). * - * This license can be accepted only by an adult of legal age of majority in the country in which the SOFTWARE is used. - * If you are under the legal age of majority, you must ask your parent or legal guardian to consent to this license. By - * taking delivery of the SOFTWARE, you affirm that you have reached the legal age of majority, you accept the terms of - * this license, and you take legal and financial responsibility for the actions of your permitted users. + * This license can be accepted only by an adult of legal age of majority in the country in which the SOFTWARE is used. If you are under the legal age of majority, you must ask your parent or legal guardian to consent to this license. By taking delivery of the SOFTWARE, you affirm that you have reached the legal age of majority, you accept the terms of this license, and you take legal and financial responsibility for the actions of your permitted users. * - * You agree to use the SOFTWARE only for purposes that are permitted by (a) this license, and (b) any applicable law, - * regulation or generally accepted practices or guidelines in the relevant jurisdictions. + * You agree to use the SOFTWARE only for purposes that are permitted by (a) this license, and (b) any applicable law, regulation or generally accepted practices or guidelines in the relevant jurisdictions. * - * 1. LICENSE. Subject to the terms of this license, NVIDIA grants you a non-exclusive limited license to: (a) install - * and use the SOFTWARE, and (b) distribute the SOFTWARE subject to the distribution requirements described in this - * license. NVIDIA reserves all rights, title and interest in and to the SOFTWARE not expressly granted to you under - * this license. + * 1. LICENSE. Subject to the terms of this license, NVIDIA grants you a non-exclusive limited license to: (a) install and use the SOFTWARE, and (b) distribute the SOFTWARE subject to the distribution requirements described in this license. NVIDIA reserves all rights, title and interest in and to the SOFTWARE not expressly granted to you under this license. * * 2. DISTRIBUTION REQUIREMENTS. These are the distribution requirements for you to exercise the distribution grant: - * a. The terms under which you distribute the SOFTWARE must be consistent with the terms of this license, - * including (without limitation) terms relating to the license grant and license restrictions and protection of - * NVIDIA’s intellectual property rights. b. You agree to notify NVIDIA in writing of any known or suspected - * distribution or use of the SOFTWARE not in compliance with the requirements of this license, and to enforce the terms - * of your agreements with respect to distributed SOFTWARE. + * a. The terms under which you distribute the SOFTWARE must be consistent with the terms of this license, including (without limitation) terms relating to the license grant and license restrictions and protection of NVIDIA’s intellectual property rights. + * b. You agree to notify NVIDIA in writing of any known or suspected distribution or use of the SOFTWARE not in compliance with the requirements of this license, and to enforce the terms of your agreements with respect to distributed SOFTWARE. * * 3. LIMITATIONS. Your license to use the SOFTWARE is restricted as follows: * a. The SOFTWARE is licensed for you to develop applications only for use in systems with NVIDIA GPUs. - * b. You may not reverse engineer, decompile or disassemble, or remove copyright or other proprietary notices from - * any portion of the SOFTWARE or copies of the SOFTWARE. c. You may not modify or create derivative works of any - * portion of the SOFTWARE. d. You may not bypass, disable, or circumvent any technical measure, encryption, - * security, digital rights management or authentication mechanism in the SOFTWARE. e. You may not use the SOFTWARE - * in any manner that would cause it to become subject to an open source software license. As examples, licenses that - * require as a condition of use, modification, and/or distribution that the SOFTWARE be (i) disclosed or distributed in - * source code form; (ii) licensed for the purpose of making derivative works; or (iii) redistributable at no charge. f. - * Unless you have an agreement with NVIDIA for this purpose, you may not use the SOFTWARE with any system or - * application where the use or failure of the system or application can reasonably be expected to threaten or result in - * personal injury, death, or catastrophic loss. Examples include use in avionics, navigation, military, medical, life - * support or other life critical applications. NVIDIA does not design, test or manufacture the SOFTWARE for these - * critical uses and NVIDIA shall not be liable to you or any third party, in whole or in part, for any claims or - * damages arising from such uses. g. You agree to defend, indemnify and hold harmless NVIDIA and its affiliates, - * and their respective employees, contractors, agents, officers and directors, from and against any and all claims, - * damages, obligations, losses, liabilities, costs or debt, fines, restitutions and expenses (including but not limited - * to attorney’s fees and costs incident to establishing the right of indemnification) arising out of or related to use - * of the SOFTWARE outside of the scope of this Agreement, or not in compliance with its terms. - * - * 4. PRE-RELEASE. SOFTWARE versions identified as alpha, beta, preview, early access or otherwise as pre-release may - * not be fully functional, may contain errors or design flaws, and may have reduced or different security, privacy, - * availability, and reliability standards relative to commercial versions of NVIDIA software and materials. You may use - * a pre-release SOFTWARE version at your own risk, understanding that these versions are not intended for use in - * production or business-critical systems. - * - * 5. OWNERSHIP. The SOFTWARE and the related intellectual property rights therein are and will remain the sole and - * exclusive property of NVIDIA or its licensors. The SOFTWARE is copyrighted and protected by the laws of the United - * States and other countries, and international treaty provisions. NVIDIA may make changes to the SOFTWARE, at any time - * without notice, but is not obligated to support or update the SOFTWARE. - * - * 6. COMPONENTS UNDER OTHER LICENSES. The SOFTWARE may include NVIDIA or third-party components with separate legal - * notices or terms as may be described in proprietary notices accompanying the SOFTWARE. If and to the extent there is - * a conflict between the terms in this license and the license terms associated with a component, the license terms - * associated with the components control only to the extent necessary to resolve the conflict. - * - * 7. FEEDBACK. You may, but don’t have to, provide to NVIDIA any Feedback. “Feedback” means any suggestions, bug fixes, - * enhancements, modifications, feature requests or other feedback regarding the SOFTWARE. For any Feedback that you - * voluntarily provide, you hereby grant NVIDIA and its affiliates a perpetual, non-exclusive, worldwide, irrevocable - * license to use, reproduce, modify, license, sublicense (through multiple tiers of sublicensees), and distribute - * (through multiple tiers of distributors) the Feedback without the payment of any royalties or fees to you. NVIDIA - * will use Feedback at its choice. - * - * 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED WARRANTY OF ANY KIND INCLUDING, BUT - * NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. NVIDIA DOES NOT - * WARRANT THAT THE SOFTWARE WILL MEET YOUR REQUIREMENTS OR THAT THE OPERATION THEREOF WILL BE UNINTERRUPTED OR - * ERROR-FREE, OR THAT ALL ERRORS WILL BE CORRECTED. - * - * 9. LIMITATIONS OF LIABILITY. TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS AFFILIATES SHALL NOT BE LIABLE - * FOR ANY SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, PROJECT DELAYS, LOSS OF USE, - * LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION WITH - * THIS LICENSE OR THE USE OR PERFORMANCE OF THE SOFTWARE, WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON - * BREACH OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION - * OR THEORY OF LIABILITY, EVEN IF NVIDIA HAS PREVIOUSLY BEEN ADVISED OF, OR COULD REASONABLY HAVE FORESEEN, THE - * POSSIBILITY OF SUCH DAMAGES. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES TOTAL CUMULATIVE LIABILITY UNDER OR ARISING - * OUT OF THIS LICENSE EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE - * OR EXTEND THIS LIMIT. - * - * 10. TERMINATION. Your rights under this license will terminate automatically without notice from NVIDIA if you fail - * to comply with any term and condition of this license or if you commence or participate in any legal proceeding - * against NVIDIA with respect to the SOFTWARE. NVIDIA may terminate this license with advance written notice to you if - * NVIDIA decides to no longer provide the SOFTWARE in a country or, in NVIDIA’s sole discretion, the continued use of - * it is no longer commercially viable. Upon any termination of this license, you agree to promptly discontinue use of - * the SOFTWARE and destroy all copies in your possession or control. Your prior distributions in accordance with this - * license are not affected by the termination of this license. All provisions of this license will survive termination, - * except for the license granted to you. - * - * 11. APPLICABLE LAW. This license will be governed in all respects by the laws of the United States and of the State - * of Delaware as those laws are applied to contracts entered into and performed entirely within Delaware by Delaware - * residents, without regard to the conflicts of laws principles. The United Nations Convention on Contracts for the - * International Sale of Goods is specifically disclaimed. You agree to all terms of this Agreement in the English - * language. The state or federal courts residing in Santa Clara County, California shall have exclusive jurisdiction - * over any dispute or claim arising out of this license. Notwithstanding this, you agree that NVIDIA shall still be - * allowed to apply for injunctive remedies or an equivalent type of urgent legal relief in any jurisdiction. - * - * 12. NO ASSIGNMENT. This license and your rights and obligations thereunder may not be assigned by you by any means or - * operation of law without NVIDIA’s permission. Any attempted assignment not approved by NVIDIA in writing shall be - * void and of no effect. - * - * 13. EXPORT. The SOFTWARE is subject to United States export laws and regulations. You agree that you will not ship, - * transfer or export the SOFTWARE into any country, or use the SOFTWARE in any manner, prohibited by the United States - * Bureau of Industry and Security or economic sanctions regulations administered by the U.S. Department of Treasury’s - * Office of Foreign Assets Control (OFAC), or any applicable export laws, restrictions or regulations. These laws - * include restrictions on destinations, end users and end use. By accepting this license, you confirm that you are not - * a resident or citizen of any country currently embargoed by the U.S. and that you are not otherwise prohibited from - * receiving the SOFTWARE. - * - * 14. GOVERNMENT USE. The SOFTWARE has been developed entirely at private expense and is “commercial items” consisting - * of “commercial computer software” and “commercial computer software documentation” provided with RESTRICTED RIGHTS. - * Use, duplication or disclosure by the U.S. Government or a U.S. Government subcontractor is subject to the - * restrictions in this license pursuant to DFARS 227.7202-3(a) or as set forth in subparagraphs (b)(1) and (2) of the - * Commercial Computer Software - Restricted Rights clause at FAR 52.227-19, as applicable. Contractor/manufacturer is - * NVIDIA, 2788 San Tomas Expressway, Santa Clara, CA 95051. - * - * 15. ENTIRE AGREEMENT. This license is the final, complete and exclusive agreement between the parties relating to the - * subject matter of this license and supersedes all prior or contemporaneous understandings and agreements relating to - * this subject matter, whether oral or written. If any court of competent jurisdiction determines that any provision of - * this license is illegal, invalid or unenforceable, the remaining provisions will remain in full force and effect. - * This license may only be modified in a writing signed by an authorized representative of each party. + * b. You may not reverse engineer, decompile or disassemble, or remove copyright or other proprietary notices from any portion of the SOFTWARE or copies of the SOFTWARE. + * c. You may not modify or create derivative works of any portion of the SOFTWARE. + * d. You may not bypass, disable, or circumvent any technical measure, encryption, security, digital rights management or authentication mechanism in the SOFTWARE. + * e. You may not use the SOFTWARE in any manner that would cause it to become subject to an open source software license. As examples, licenses that require as a condition of use, modification, and/or distribution that the SOFTWARE be (i) disclosed or distributed in source code form; (ii) licensed for the purpose of making derivative works; or (iii) redistributable at no charge. + * f. Unless you have an agreement with NVIDIA for this purpose, you may not use the SOFTWARE with any system or application where the use or failure of the system or application can reasonably be expected to threaten or result in personal injury, death, or catastrophic loss. Examples include use in avionics, navigation, military, medical, life support or other life critical applications. NVIDIA does not design, test or manufacture the SOFTWARE for these critical uses and NVIDIA shall not be liable to you or any third party, in whole or in part, for any claims or damages arising from such uses. + * g. You agree to defend, indemnify and hold harmless NVIDIA and its affiliates, and their respective employees, contractors, agents, officers and directors, from and against any and all claims, damages, obligations, losses, liabilities, costs or debt, fines, restitutions and expenses (including but not limited to attorney’s fees and costs incident to establishing the right of indemnification) arising out of or related to use of the SOFTWARE outside of the scope of this Agreement, or not in compliance with its terms. + * + * 4. PRE-RELEASE. SOFTWARE versions identified as alpha, beta, preview, early access or otherwise as pre-release may not be fully functional, may contain errors or design flaws, and may have reduced or different security, privacy, availability, and reliability standards relative to commercial versions of NVIDIA software and materials. You may use a pre-release SOFTWARE version at your own risk, understanding that these versions are not intended for use in production or business-critical systems. + * + * 5. OWNERSHIP. The SOFTWARE and the related intellectual property rights therein are and will remain the sole and exclusive property of NVIDIA or its licensors. The SOFTWARE is copyrighted and protected by the laws of the United States and other countries, and international treaty provisions. NVIDIA may make changes to the SOFTWARE, at any time without notice, but is not obligated to support or update the SOFTWARE. + * + * 6. COMPONENTS UNDER OTHER LICENSES. The SOFTWARE may include NVIDIA or third-party components with separate legal notices or terms as may be described in proprietary notices accompanying the SOFTWARE. If and to the extent there is a conflict between the terms in this license and the license terms associated with a component, the license terms associated with the components control only to the extent necessary to resolve the conflict. + * + * 7. FEEDBACK. You may, but don’t have to, provide to NVIDIA any Feedback. “Feedback” means any suggestions, bug fixes, enhancements, modifications, feature requests or other feedback regarding the SOFTWARE. For any Feedback that you voluntarily provide, you hereby grant NVIDIA and its affiliates a perpetual, non-exclusive, worldwide, irrevocable license to use, reproduce, modify, license, sublicense (through multiple tiers of sublicensees), and distribute (through multiple tiers of distributors) the Feedback without the payment of any royalties or fees to you. NVIDIA will use Feedback at its choice. + * + * 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED WARRANTY OF ANY KIND INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. NVIDIA DOES NOT WARRANT THAT THE SOFTWARE WILL MEET YOUR REQUIREMENTS OR THAT THE OPERATION THEREOF WILL BE UNINTERRUPTED OR ERROR-FREE, OR THAT ALL ERRORS WILL BE CORRECTED. + * + * 9. LIMITATIONS OF LIABILITY. TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, PROJECT DELAYS, LOSS OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION WITH THIS LICENSE OR THE USE OR PERFORMANCE OF THE SOFTWARE, WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF LIABILITY, EVEN IF NVIDIA HAS PREVIOUSLY BEEN ADVISED OF, OR COULD REASONABLY HAVE FORESEEN, THE POSSIBILITY OF SUCH DAMAGES. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS LICENSE EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS LIMIT. + * + * 10. TERMINATION. Your rights under this license will terminate automatically without notice from NVIDIA if you fail to comply with any term and condition of this license or if you commence or participate in any legal proceeding against NVIDIA with respect to the SOFTWARE. NVIDIA may terminate this license with advance written notice to you if NVIDIA decides to no longer provide the SOFTWARE in a country or, in NVIDIA’s sole discretion, the continued use of it is no longer commercially viable. Upon any termination of this license, you agree to promptly discontinue use of the SOFTWARE and destroy all copies in your possession or control. Your prior distributions in accordance with this license are not affected by the termination of this license. All provisions of this license will survive termination, except for the license granted to you. + * + * 11. APPLICABLE LAW. This license will be governed in all respects by the laws of the United States and of the State of Delaware as those laws are applied to contracts entered into and performed entirely within Delaware by Delaware residents, without regard to the conflicts of laws principles. The United Nations Convention on Contracts for the International Sale of Goods is specifically disclaimed. You agree to all terms of this Agreement in the English language. The state or federal courts residing in Santa Clara County, California shall have exclusive jurisdiction over any dispute or claim arising out of this license. Notwithstanding this, you agree that NVIDIA shall still be allowed to apply for injunctive remedies or an equivalent type of urgent legal relief in any jurisdiction. + * + * 12. NO ASSIGNMENT. This license and your rights and obligations thereunder may not be assigned by you by any means or operation of law without NVIDIA’s permission. Any attempted assignment not approved by NVIDIA in writing shall be void and of no effect. + * + * 13. EXPORT. The SOFTWARE is subject to United States export laws and regulations. You agree that you will not ship, transfer or export the SOFTWARE into any country, or use the SOFTWARE in any manner, prohibited by the United States Bureau of Industry and Security or economic sanctions regulations administered by the U.S. Department of Treasury’s Office of Foreign Assets Control (OFAC), or any applicable export laws, restrictions or regulations. These laws include restrictions on destinations, end users and end use. By accepting this license, you confirm that you are not a resident or citizen of any country currently embargoed by the U.S. and that you are not otherwise prohibited from receiving the SOFTWARE. + * + * 14. GOVERNMENT USE. The SOFTWARE has been developed entirely at private expense and is “commercial items” consisting of “commercial computer software” and “commercial computer software documentation” provided with RESTRICTED RIGHTS. Use, duplication or disclosure by the U.S. Government or a U.S. Government subcontractor is subject to the restrictions in this license pursuant to DFARS 227.7202-3(a) or as set forth in subparagraphs (b)(1) and (2) of the Commercial Computer Software - Restricted Rights clause at FAR 52.227-19, as applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas Expressway, Santa Clara, CA 95051. + * + * 15. ENTIRE AGREEMENT. This license is the final, complete and exclusive agreement between the parties relating to the subject matter of this license and supersedes all prior or contemporaneous understandings and agreements relating to this subject matter, whether oral or written. If any court of competent jurisdiction determines that any provision of this license is illegal, invalid or unenforceable, the remaining provisions will remain in full force and effect. This license may only be modified in a writing signed by an authorized representative of each party. * * (v. August 20, 2021) */ @@ -143,8 +65,8 @@ # pragma system_header #endif // no system header -#include #include +#include #include _LIBCUDACXX_BEGIN_NAMESPACE_CUDA @@ -152,72 +74,90 @@ namespace __detail { template -class __return_type_wrapper -{ -private: +class __return_type_wrapper { + private: _DecayFn __fn_; -public: + public: __return_type_wrapper() = delete; template , _DecayFn>::value>> - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 explicit __return_type_wrapper(_Fn&& __fn) noexcept - : __fn_(_CUDA_VSTD::forward<_Fn>(__fn)) - {} + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 + explicit __return_type_wrapper(_Fn &&__fn) noexcept + : __fn_(_CUDA_VSTD::forward<_Fn>(__fn)) {} template - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 _Ret operator()(_As&&... __as) & noexcept - { + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 + _Ret operator()(_As&&... __as) & noexcept { #if !defined(__NVCC__) || defined(__CUDA_ARCH__) - static_assert(_CUDA_VSTD::is_same<_Ret, typename _CUDA_VSTD::__invoke_of<_DecayFn&, _As...>::type>::value, - "Return type shall match the proclaimed one exactly"); + static_assert( + _CUDA_VSTD::is_same< + _Ret, + typename _CUDA_VSTD::__invoke_of<_DecayFn&, _As...>::type + >::value, + "Return type shall match the proclaimed one exactly"); #endif return _CUDA_VSTD::__invoke(__fn_, _CUDA_VSTD::forward<_As>(__as)...); } template - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 _Ret operator()(_As&&... __as) && noexcept - { + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 + _Ret operator()(_As&&... __as) && noexcept { #if !defined(__NVCC__) || defined(__CUDA_ARCH__) - static_assert(_CUDA_VSTD::is_same<_Ret, typename _CUDA_VSTD::__invoke_of<_DecayFn, _As...>::type>::value, - "Return type shall match the proclaimed one exactly"); + static_assert( + _CUDA_VSTD::is_same< + _Ret, + typename _CUDA_VSTD::__invoke_of<_DecayFn, _As...>::type + >::value, + "Return type shall match the proclaimed one exactly"); #endif - return _CUDA_VSTD::__invoke(_CUDA_VSTD::move(__fn_), _CUDA_VSTD::forward<_As>(__as)...); + return _CUDA_VSTD::__invoke(_CUDA_VSTD::move(__fn_), + _CUDA_VSTD::forward<_As>(__as)...); } template - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 _Ret operator()(_As&&... __as) const& noexcept - { + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 + _Ret operator()(_As&&... __as) const& noexcept { #if !defined(__NVCC__) || defined(__CUDA_ARCH__) - static_assert(_CUDA_VSTD::is_same<_Ret, typename _CUDA_VSTD::__invoke_of::type>::value, - "Return type shall match the proclaimed one exactly"); + static_assert( + _CUDA_VSTD::is_same< + _Ret, + typename _CUDA_VSTD::__invoke_of::type + >::value, + "Return type shall match the proclaimed one exactly"); #endif return _CUDA_VSTD::__invoke(__fn_, _CUDA_VSTD::forward<_As>(__as)...); } template - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 _Ret operator()(_As&&... __as) const&& noexcept - { + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 + _Ret operator()(_As&&... __as) const&& noexcept { #if !defined(__NVCC__) || defined(__CUDA_ARCH__) - static_assert(_CUDA_VSTD::is_same<_Ret, typename _CUDA_VSTD::__invoke_of::type>::value, - "Return type shall match the proclaimed one exactly"); + static_assert( + _CUDA_VSTD::is_same< + _Ret, + typename _CUDA_VSTD::__invoke_of::type + >::value, + "Return type shall match the proclaimed one exactly"); #endif - return _CUDA_VSTD::__invoke(_CUDA_VSTD::move(__fn_), _CUDA_VSTD::forward<_As>(__as)...); + return _CUDA_VSTD::__invoke(_CUDA_VSTD::move(__fn_), + _CUDA_VSTD::forward<_As>(__as)...); } }; -} // namespace __detail +} // __detail template -inline _LIBCUDACXX_INLINE_VISIBILITY __detail::__return_type_wrapper<_Ret, _CUDA_VSTD::__decay_t<_Fn>> -proclaim_return_type(_Fn&& __fn) noexcept -{ - return __detail::__return_type_wrapper<_Ret, _CUDA_VSTD::__decay_t<_Fn>>(_CUDA_VSTD::forward<_Fn>(__fn)); +inline _LIBCUDACXX_INLINE_VISIBILITY +__detail::__return_type_wrapper<_Ret, _CUDA_VSTD::__decay_t<_Fn>> +proclaim_return_type(_Fn&& __fn) noexcept { + return __detail::__return_type_wrapper<_Ret, _CUDA_VSTD::__decay_t<_Fn>>( + _CUDA_VSTD::forward<_Fn>(__fn)); } _LIBCUDACXX_END_NAMESPACE_CUDA diff --git a/libcudacxx/include/cuda/memory_resource b/libcudacxx/include/cuda/memory_resource index 894fd9eb2dd..a138995aa5f 100644 --- a/libcudacxx/include/cuda/memory_resource +++ b/libcudacxx/include/cuda/memory_resource @@ -80,17 +80,17 @@ class resource_ref { */ // clang-format on -#include // cuda_runtime_api needs to come first +# include // cuda_runtime_api needs to come first -#include "__cccl_config" +# include "__cccl_config" -#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) -# pragma GCC system_header -#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) -# pragma clang system_header -#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) -# pragma system_header -#endif // no system header +# if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +# elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +# elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +# endif // no system header #include #include diff --git a/libcudacxx/include/cuda/pipeline b/libcudacxx/include/cuda/pipeline index 583a6fb6c72..509dfd65cbe 100644 --- a/libcudacxx/include/cuda/pipeline +++ b/libcudacxx/include/cuda/pipeline @@ -3,128 +3,50 @@ * * NVIDIA SOFTWARE LICENSE * - * This license is a legal agreement between you and NVIDIA Corporation ("NVIDIA") and governs your use of the - * NVIDIA/CUDA C++ Library software and materials provided hereunder (“SOFTWARE”). + * This license is a legal agreement between you and NVIDIA Corporation ("NVIDIA") and governs your use of the NVIDIA/CUDA C++ Library software and materials provided hereunder (“SOFTWARE”). * - * This license can be accepted only by an adult of legal age of majority in the country in which the SOFTWARE is used. - * If you are under the legal age of majority, you must ask your parent or legal guardian to consent to this license. By - * taking delivery of the SOFTWARE, you affirm that you have reached the legal age of majority, you accept the terms of - * this license, and you take legal and financial responsibility for the actions of your permitted users. + * This license can be accepted only by an adult of legal age of majority in the country in which the SOFTWARE is used. If you are under the legal age of majority, you must ask your parent or legal guardian to consent to this license. By taking delivery of the SOFTWARE, you affirm that you have reached the legal age of majority, you accept the terms of this license, and you take legal and financial responsibility for the actions of your permitted users. * - * You agree to use the SOFTWARE only for purposes that are permitted by (a) this license, and (b) any applicable law, - * regulation or generally accepted practices or guidelines in the relevant jurisdictions. + * You agree to use the SOFTWARE only for purposes that are permitted by (a) this license, and (b) any applicable law, regulation or generally accepted practices or guidelines in the relevant jurisdictions. * - * 1. LICENSE. Subject to the terms of this license, NVIDIA grants you a non-exclusive limited license to: (a) install - * and use the SOFTWARE, and (b) distribute the SOFTWARE subject to the distribution requirements described in this - * license. NVIDIA reserves all rights, title and interest in and to the SOFTWARE not expressly granted to you under - * this license. + * 1. LICENSE. Subject to the terms of this license, NVIDIA grants you a non-exclusive limited license to: (a) install and use the SOFTWARE, and (b) distribute the SOFTWARE subject to the distribution requirements described in this license. NVIDIA reserves all rights, title and interest in and to the SOFTWARE not expressly granted to you under this license. * * 2. DISTRIBUTION REQUIREMENTS. These are the distribution requirements for you to exercise the distribution grant: - * a. The terms under which you distribute the SOFTWARE must be consistent with the terms of this license, - * including (without limitation) terms relating to the license grant and license restrictions and protection of - * NVIDIA’s intellectual property rights. b. You agree to notify NVIDIA in writing of any known or suspected - * distribution or use of the SOFTWARE not in compliance with the requirements of this license, and to enforce the terms - * of your agreements with respect to distributed SOFTWARE. + * a. The terms under which you distribute the SOFTWARE must be consistent with the terms of this license, including (without limitation) terms relating to the license grant and license restrictions and protection of NVIDIA’s intellectual property rights. + * b. You agree to notify NVIDIA in writing of any known or suspected distribution or use of the SOFTWARE not in compliance with the requirements of this license, and to enforce the terms of your agreements with respect to distributed SOFTWARE. * * 3. LIMITATIONS. Your license to use the SOFTWARE is restricted as follows: * a. The SOFTWARE is licensed for you to develop applications only for use in systems with NVIDIA GPUs. - * b. You may not reverse engineer, decompile or disassemble, or remove copyright or other proprietary notices from - * any portion of the SOFTWARE or copies of the SOFTWARE. c. You may not modify or create derivative works of any - * portion of the SOFTWARE. d. You may not bypass, disable, or circumvent any technical measure, encryption, - * security, digital rights management or authentication mechanism in the SOFTWARE. e. You may not use the SOFTWARE - * in any manner that would cause it to become subject to an open source software license. As examples, licenses that - * require as a condition of use, modification, and/or distribution that the SOFTWARE be (i) disclosed or distributed in - * source code form; (ii) licensed for the purpose of making derivative works; or (iii) redistributable at no charge. f. - * Unless you have an agreement with NVIDIA for this purpose, you may not use the SOFTWARE with any system or - * application where the use or failure of the system or application can reasonably be expected to threaten or result in - * personal injury, death, or catastrophic loss. Examples include use in avionics, navigation, military, medical, life - * support or other life critical applications. NVIDIA does not design, test or manufacture the SOFTWARE for these - * critical uses and NVIDIA shall not be liable to you or any third party, in whole or in part, for any claims or - * damages arising from such uses. g. You agree to defend, indemnify and hold harmless NVIDIA and its affiliates, - * and their respective employees, contractors, agents, officers and directors, from and against any and all claims, - * damages, obligations, losses, liabilities, costs or debt, fines, restitutions and expenses (including but not limited - * to attorney’s fees and costs incident to establishing the right of indemnification) arising out of or related to use - * of the SOFTWARE outside of the scope of this Agreement, or not in compliance with its terms. + * b. You may not reverse engineer, decompile or disassemble, or remove copyright or other proprietary notices from any portion of the SOFTWARE or copies of the SOFTWARE. + * c. You may not modify or create derivative works of any portion of the SOFTWARE. + * d. You may not bypass, disable, or circumvent any technical measure, encryption, security, digital rights management or authentication mechanism in the SOFTWARE. + * e. You may not use the SOFTWARE in any manner that would cause it to become subject to an open source software license. As examples, licenses that require as a condition of use, modification, and/or distribution that the SOFTWARE be (i) disclosed or distributed in source code form; (ii) licensed for the purpose of making derivative works; or (iii) redistributable at no charge. + * f. Unless you have an agreement with NVIDIA for this purpose, you may not use the SOFTWARE with any system or application where the use or failure of the system or application can reasonably be expected to threaten or result in personal injury, death, or catastrophic loss. Examples include use in avionics, navigation, military, medical, life support or other life critical applications. NVIDIA does not design, test or manufacture the SOFTWARE for these critical uses and NVIDIA shall not be liable to you or any third party, in whole or in part, for any claims or damages arising from such uses. + * g. You agree to defend, indemnify and hold harmless NVIDIA and its affiliates, and their respective employees, contractors, agents, officers and directors, from and against any and all claims, damages, obligations, losses, liabilities, costs or debt, fines, restitutions and expenses (including but not limited to attorney’s fees and costs incident to establishing the right of indemnification) arising out of or related to use of the SOFTWARE outside of the scope of this Agreement, or not in compliance with its terms. * - * 4. PRE-RELEASE. SOFTWARE versions identified as alpha, beta, preview, early access or otherwise as pre-release may - * not be fully functional, may contain errors or design flaws, and may have reduced or different security, privacy, - * availability, and reliability standards relative to commercial versions of NVIDIA software and materials. You may use - * a pre-release SOFTWARE version at your own risk, understanding that these versions are not intended for use in - * production or business-critical systems. + * 4. PRE-RELEASE. SOFTWARE versions identified as alpha, beta, preview, early access or otherwise as pre-release may not be fully functional, may contain errors or design flaws, and may have reduced or different security, privacy, availability, and reliability standards relative to commercial versions of NVIDIA software and materials. You may use a pre-release SOFTWARE version at your own risk, understanding that these versions are not intended for use in production or business-critical systems. * - * 5. OWNERSHIP. The SOFTWARE and the related intellectual property rights therein are and will remain the sole and - * exclusive property of NVIDIA or its licensors. The SOFTWARE is copyrighted and protected by the laws of the United - * States and other countries, and international treaty provisions. NVIDIA may make changes to the SOFTWARE, at any time - * without notice, but is not obligated to support or update the SOFTWARE. + * 5. OWNERSHIP. The SOFTWARE and the related intellectual property rights therein are and will remain the sole and exclusive property of NVIDIA or its licensors. The SOFTWARE is copyrighted and protected by the laws of the United States and other countries, and international treaty provisions. NVIDIA may make changes to the SOFTWARE, at any time without notice, but is not obligated to support or update the SOFTWARE. * - * 6. COMPONENTS UNDER OTHER LICENSES. The SOFTWARE may include NVIDIA or third-party components with separate legal - * notices or terms as may be described in proprietary notices accompanying the SOFTWARE. If and to the extent there is - * a conflict between the terms in this license and the license terms associated with a component, the license terms - * associated with the components control only to the extent necessary to resolve the conflict. + * 6. COMPONENTS UNDER OTHER LICENSES. The SOFTWARE may include NVIDIA or third-party components with separate legal notices or terms as may be described in proprietary notices accompanying the SOFTWARE. If and to the extent there is a conflict between the terms in this license and the license terms associated with a component, the license terms associated with the components control only to the extent necessary to resolve the conflict. * - * 7. FEEDBACK. You may, but don’t have to, provide to NVIDIA any Feedback. “Feedback” means any suggestions, bug fixes, - * enhancements, modifications, feature requests or other feedback regarding the SOFTWARE. For any Feedback that you - * voluntarily provide, you hereby grant NVIDIA and its affiliates a perpetual, non-exclusive, worldwide, irrevocable - * license to use, reproduce, modify, license, sublicense (through multiple tiers of sublicensees), and distribute - * (through multiple tiers of distributors) the Feedback without the payment of any royalties or fees to you. NVIDIA - * will use Feedback at its choice. + * 7. FEEDBACK. You may, but don’t have to, provide to NVIDIA any Feedback. “Feedback” means any suggestions, bug fixes, enhancements, modifications, feature requests or other feedback regarding the SOFTWARE. For any Feedback that you voluntarily provide, you hereby grant NVIDIA and its affiliates a perpetual, non-exclusive, worldwide, irrevocable license to use, reproduce, modify, license, sublicense (through multiple tiers of sublicensees), and distribute (through multiple tiers of distributors) the Feedback without the payment of any royalties or fees to you. NVIDIA will use Feedback at its choice. * - * 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED WARRANTY OF ANY KIND INCLUDING, BUT - * NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. NVIDIA DOES NOT - * WARRANT THAT THE SOFTWARE WILL MEET YOUR REQUIREMENTS OR THAT THE OPERATION THEREOF WILL BE UNINTERRUPTED OR - * ERROR-FREE, OR THAT ALL ERRORS WILL BE CORRECTED. + * 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED WARRANTY OF ANY KIND INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. NVIDIA DOES NOT WARRANT THAT THE SOFTWARE WILL MEET YOUR REQUIREMENTS OR THAT THE OPERATION THEREOF WILL BE UNINTERRUPTED OR ERROR-FREE, OR THAT ALL ERRORS WILL BE CORRECTED. * - * 9. LIMITATIONS OF LIABILITY. TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS AFFILIATES SHALL NOT BE LIABLE - * FOR ANY SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, PROJECT DELAYS, LOSS OF USE, - * LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION WITH - * THIS LICENSE OR THE USE OR PERFORMANCE OF THE SOFTWARE, WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON - * BREACH OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION - * OR THEORY OF LIABILITY, EVEN IF NVIDIA HAS PREVIOUSLY BEEN ADVISED OF, OR COULD REASONABLY HAVE FORESEEN, THE - * POSSIBILITY OF SUCH DAMAGES. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES TOTAL CUMULATIVE LIABILITY UNDER OR ARISING - * OUT OF THIS LICENSE EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE - * OR EXTEND THIS LIMIT. + * 9. LIMITATIONS OF LIABILITY. TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, PROJECT DELAYS, LOSS OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION WITH THIS LICENSE OR THE USE OR PERFORMANCE OF THE SOFTWARE, WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF LIABILITY, EVEN IF NVIDIA HAS PREVIOUSLY BEEN ADVISED OF, OR COULD REASONABLY HAVE FORESEEN, THE POSSIBILITY OF SUCH DAMAGES. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS LICENSE EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS LIMIT. * - * 10. TERMINATION. Your rights under this license will terminate automatically without notice from NVIDIA if you fail - * to comply with any term and condition of this license or if you commence or participate in any legal proceeding - * against NVIDIA with respect to the SOFTWARE. NVIDIA may terminate this license with advance written notice to you if - * NVIDIA decides to no longer provide the SOFTWARE in a country or, in NVIDIA’s sole discretion, the continued use of - * it is no longer commercially viable. Upon any termination of this license, you agree to promptly discontinue use of - * the SOFTWARE and destroy all copies in your possession or control. Your prior distributions in accordance with this - * license are not affected by the termination of this license. All provisions of this license will survive termination, - * except for the license granted to you. + * 10. TERMINATION. Your rights under this license will terminate automatically without notice from NVIDIA if you fail to comply with any term and condition of this license or if you commence or participate in any legal proceeding against NVIDIA with respect to the SOFTWARE. NVIDIA may terminate this license with advance written notice to you if NVIDIA decides to no longer provide the SOFTWARE in a country or, in NVIDIA’s sole discretion, the continued use of it is no longer commercially viable. Upon any termination of this license, you agree to promptly discontinue use of the SOFTWARE and destroy all copies in your possession or control. Your prior distributions in accordance with this license are not affected by the termination of this license. All provisions of this license will survive termination, except for the license granted to you. * - * 11. APPLICABLE LAW. This license will be governed in all respects by the laws of the United States and of the State - * of Delaware as those laws are applied to contracts entered into and performed entirely within Delaware by Delaware - * residents, without regard to the conflicts of laws principles. The United Nations Convention on Contracts for the - * International Sale of Goods is specifically disclaimed. You agree to all terms of this Agreement in the English - * language. The state or federal courts residing in Santa Clara County, California shall have exclusive jurisdiction - * over any dispute or claim arising out of this license. Notwithstanding this, you agree that NVIDIA shall still be - * allowed to apply for injunctive remedies or an equivalent type of urgent legal relief in any jurisdiction. + * 11. APPLICABLE LAW. This license will be governed in all respects by the laws of the United States and of the State of Delaware as those laws are applied to contracts entered into and performed entirely within Delaware by Delaware residents, without regard to the conflicts of laws principles. The United Nations Convention on Contracts for the International Sale of Goods is specifically disclaimed. You agree to all terms of this Agreement in the English language. The state or federal courts residing in Santa Clara County, California shall have exclusive jurisdiction over any dispute or claim arising out of this license. Notwithstanding this, you agree that NVIDIA shall still be allowed to apply for injunctive remedies or an equivalent type of urgent legal relief in any jurisdiction. * - * 12. NO ASSIGNMENT. This license and your rights and obligations thereunder may not be assigned by you by any means or - * operation of law without NVIDIA’s permission. Any attempted assignment not approved by NVIDIA in writing shall be - * void and of no effect. + * 12. NO ASSIGNMENT. This license and your rights and obligations thereunder may not be assigned by you by any means or operation of law without NVIDIA’s permission. Any attempted assignment not approved by NVIDIA in writing shall be void and of no effect. * - * 13. EXPORT. The SOFTWARE is subject to United States export laws and regulations. You agree that you will not ship, - * transfer or export the SOFTWARE into any country, or use the SOFTWARE in any manner, prohibited by the United States - * Bureau of Industry and Security or economic sanctions regulations administered by the U.S. Department of Treasury’s - * Office of Foreign Assets Control (OFAC), or any applicable export laws, restrictions or regulations. These laws - * include restrictions on destinations, end users and end use. By accepting this license, you confirm that you are not - * a resident or citizen of any country currently embargoed by the U.S. and that you are not otherwise prohibited from - * receiving the SOFTWARE. + * 13. EXPORT. The SOFTWARE is subject to United States export laws and regulations. You agree that you will not ship, transfer or export the SOFTWARE into any country, or use the SOFTWARE in any manner, prohibited by the United States Bureau of Industry and Security or economic sanctions regulations administered by the U.S. Department of Treasury’s Office of Foreign Assets Control (OFAC), or any applicable export laws, restrictions or regulations. These laws include restrictions on destinations, end users and end use. By accepting this license, you confirm that you are not a resident or citizen of any country currently embargoed by the U.S. and that you are not otherwise prohibited from receiving the SOFTWARE. * - * 14. GOVERNMENT USE. The SOFTWARE has been developed entirely at private expense and is “commercial items” consisting - * of “commercial computer software” and “commercial computer software documentation” provided with RESTRICTED RIGHTS. - * Use, duplication or disclosure by the U.S. Government or a U.S. Government subcontractor is subject to the - * restrictions in this license pursuant to DFARS 227.7202-3(a) or as set forth in subparagraphs (b)(1) and (2) of the - * Commercial Computer Software - Restricted Rights clause at FAR 52.227-19, as applicable. Contractor/manufacturer is - * NVIDIA, 2788 San Tomas Expressway, Santa Clara, CA 95051. + * 14. GOVERNMENT USE. The SOFTWARE has been developed entirely at private expense and is “commercial items” consisting of “commercial computer software” and “commercial computer software documentation” provided with RESTRICTED RIGHTS. Use, duplication or disclosure by the U.S. Government or a U.S. Government subcontractor is subject to the restrictions in this license pursuant to DFARS 227.7202-3(a) or as set forth in subparagraphs (b)(1) and (2) of the Commercial Computer Software - Restricted Rights clause at FAR 52.227-19, as applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas Expressway, Santa Clara, CA 95051. * - * 15. ENTIRE AGREEMENT. This license is the final, complete and exclusive agreement between the parties relating to the - * subject matter of this license and supersedes all prior or contemporaneous understandings and agreements relating to - * this subject matter, whether oral or written. If any court of competent jurisdiction determines that any provision of - * this license is illegal, invalid or unenforceable, the remaining provisions will remain in full force and effect. - * This license may only be modified in a writing signed by an authorized representative of each party. + * 15. ENTIRE AGREEMENT. This license is the final, complete and exclusive agreement between the parties relating to the subject matter of this license and supersedes all prior or contemporaneous understandings and agreements relating to this subject matter, whether oral or written. If any court of competent jurisdiction determines that any provision of this license is illegal, invalid or unenforceable, the remaining provisions will remain in full force and effect. This license may only be modified in a writing signed by an authorized representative of each party. * * (v. August 20, 2021) */ @@ -141,563 +63,532 @@ # pragma system_header #endif // no system header -#include #include +#include #include _LIBCUDACXX_BEGIN_NAMESPACE_CUDA -// Forward declaration in barrier of pipeline -enum class pipeline_role -{ - producer, - consumer -}; - -template -struct __pipeline_stage -{ - barrier<_Scope> __produced; - barrier<_Scope> __consumed; -}; - -template -class pipeline_shared_state -{ -public: - pipeline_shared_state() = default; - pipeline_shared_state(const pipeline_shared_state&) = delete; - pipeline_shared_state(pipeline_shared_state&&) = delete; - pipeline_shared_state& operator=(pipeline_shared_state&&) = delete; - pipeline_shared_state& operator=(const pipeline_shared_state&) = delete; - -private: - __pipeline_stage<_Scope> __stages[_Stages_count]; - atomic __refcount; - - template - friend class pipeline; - - template - friend _LIBCUDACXX_INLINE_VISIBILITY pipeline<_Pipeline_scope> - make_pipeline(const _Group& __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count>* __shared_state); - - template - friend _LIBCUDACXX_INLINE_VISIBILITY pipeline<_Pipeline_scope> - make_pipeline(const _Group& __group, - pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count>* __shared_state, - size_t __producer_count); - - template - friend _LIBCUDACXX_INLINE_VISIBILITY pipeline<_Pipeline_scope> - make_pipeline(const _Group& __group, - pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count>* __shared_state, - pipeline_role __role); -}; - -struct __pipeline_asm_helper -{ - _CCCL_DEVICE static inline uint32_t __lane_id() - { - NV_IF_ELSE_TARGET( - NV_IS_DEVICE, - (uint32_t __lane_id; asm volatile("mov.u32 %0, %%laneid;" - : "=r"(__lane_id)); - return __lane_id;), - (return 0;)) - } -}; - -template -class pipeline -{ -public: - pipeline(pipeline&&) = default; - pipeline(const pipeline&) = delete; - pipeline& operator=(pipeline&&) = delete; - pipeline& operator=(const pipeline&) = delete; - - _LIBCUDACXX_INLINE_VISIBILITY ~pipeline() - { - if (__active) - { - (void) quit(); - } - } - - _LIBCUDACXX_INLINE_VISIBILITY bool quit() - { - bool __elected; - uint32_t __sub_count; - NV_IF_TARGET( - NV_IS_DEVICE, - const uint32_t __match_mask = - __match_any_sync(__activemask(), reinterpret_cast(__shared_state_get_refcount())); - const uint32_t __elected_id = __ffs(__match_mask) - 1; - __elected = (__pipeline_asm_helper::__lane_id() == __elected_id); - __sub_count = __popc(__match_mask); - , __elected = true; - __sub_count = 1;) - bool __released = false; - if (__elected) - { - const uint32_t __old = __shared_state_get_refcount()->fetch_sub(__sub_count); - const bool __last = (__old == __sub_count); - if (__last) - { - for (uint8_t __stage = 0; __stage < __stages_count; ++__stage) + // Forward declaration in barrier of pipeline + enum class pipeline_role { + producer, + consumer + }; + + template + struct __pipeline_stage { + barrier<_Scope> __produced; + barrier<_Scope> __consumed; + }; + + template + class pipeline_shared_state { + public: + pipeline_shared_state() = default; + pipeline_shared_state(const pipeline_shared_state &) = delete; + pipeline_shared_state(pipeline_shared_state &&) = delete; + pipeline_shared_state & operator=(pipeline_shared_state &&) = delete; + pipeline_shared_state & operator=(const pipeline_shared_state &) = delete; + + private: + __pipeline_stage<_Scope> __stages[_Stages_count]; + atomic __refcount; + + template + friend class pipeline; + + template + friend _LIBCUDACXX_INLINE_VISIBILITY + pipeline<_Pipeline_scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count> * __shared_state); + + template + friend _LIBCUDACXX_INLINE_VISIBILITY + pipeline<_Pipeline_scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count> * __shared_state, size_t __producer_count); + + template + friend _LIBCUDACXX_INLINE_VISIBILITY + pipeline<_Pipeline_scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count> * __shared_state, pipeline_role __role); + }; + + struct __pipeline_asm_helper { + _CCCL_DEVICE + static inline uint32_t __lane_id() { - __shared_state_get_stage(__stage)->__produced.~barrier(); - __shared_state_get_stage(__stage)->__consumed.~barrier(); + NV_IF_ELSE_TARGET( + NV_IS_DEVICE, + ( + uint32_t __lane_id; + asm volatile ("mov.u32 %0, %%laneid;" : "=r"(__lane_id)); + return __lane_id; + ), + ( + return 0; + ) + ) } - __released = true; - } - } - __active = false; - return __released; - } - - _LIBCUDACXX_INLINE_VISIBILITY void producer_acquire() - { - barrier<_Scope>& __stage_barrier = __shared_state_get_stage(__head)->__consumed; - __stage_barrier.wait_parity(__consumed_phase_parity); - } - - _LIBCUDACXX_INLINE_VISIBILITY void producer_commit() - { - barrier<_Scope>& __stage_barrier = __shared_state_get_stage(__head)->__produced; - (void) __memcpy_completion_impl::__defer( - __completion_mechanism::__async_group, __single_thread_group{}, 0, __stage_barrier); - (void) __stage_barrier.arrive(); - if (++__head == __stages_count) - { - __head = 0; - __consumed_phase_parity = !__consumed_phase_parity; - } - } - - _LIBCUDACXX_INLINE_VISIBILITY void consumer_wait() - { - barrier<_Scope>& __stage_barrier = __shared_state_get_stage(__tail)->__produced; - __stage_barrier.wait_parity(__produced_phase_parity); - } - - _LIBCUDACXX_INLINE_VISIBILITY void consumer_release() - { - (void) __shared_state_get_stage(__tail)->__consumed.arrive(); - if (++__tail == __stages_count) - { - __tail = 0; - __produced_phase_parity = !__produced_phase_parity; - } - } - - template - _LIBCUDACXX_INLINE_VISIBILITY bool consumer_wait_for(const _CUDA_VSTD::chrono::duration<_Rep, _Period>& __duration) - { - barrier<_Scope>& __stage_barrier = __shared_state_get_stage(__tail)->__produced; - return _CUDA_VSTD::__libcpp_thread_poll_with_backoff( - _CUDA_VSTD::__barrier_poll_tester_parity>(&__stage_barrier, __produced_phase_parity), - _CUDA_VSTD::chrono::duration_cast<_CUDA_VSTD::chrono::nanoseconds>(__duration)); - } - - template - _LIBCUDACXX_INLINE_VISIBILITY bool - consumer_wait_until(const _CUDA_VSTD::chrono::time_point<_Clock, _Duration>& __time_point) - { - return consumer_wait_for(__time_point - _Clock::now()); - } - -private: - uint8_t __head : 8; - uint8_t __tail : 8; - const uint8_t __stages_count : 8; - bool __consumed_phase_parity : 1; - bool __produced_phase_parity : 1; - bool __active : 1; - // TODO: Remove partitioned on next ABI break - const bool __partitioned : 1; - char* const __shared_state; - - _LIBCUDACXX_INLINE_VISIBILITY pipeline(char* __shared_state, uint8_t __stages_count, bool __partitioned) - : __head(0) - , __tail(0) - , __stages_count(__stages_count) - , __consumed_phase_parity(true) - , __produced_phase_parity(false) - , __active(true) - , __partitioned(__partitioned) - , __shared_state(__shared_state) - {} - - _LIBCUDACXX_INLINE_VISIBILITY __pipeline_stage<_Scope>* __shared_state_get_stage(uint8_t __stage) - { - ptrdiff_t __stage_offset = __stage * sizeof(__pipeline_stage<_Scope>); - return reinterpret_cast<__pipeline_stage<_Scope>*>(__shared_state + __stage_offset); - } - - _LIBCUDACXX_INLINE_VISIBILITY atomic* __shared_state_get_refcount() - { - ptrdiff_t __refcount_offset = __stages_count * sizeof(__pipeline_stage<_Scope>); - return reinterpret_cast*>(__shared_state + __refcount_offset); - } - - template - friend _LIBCUDACXX_INLINE_VISIBILITY pipeline<_Pipeline_scope> - make_pipeline(const _Group& __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count>* __shared_state); - - template - friend _LIBCUDACXX_INLINE_VISIBILITY pipeline<_Pipeline_scope> - make_pipeline(const _Group& __group, - pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count>* __shared_state, - size_t __producer_count); - - template - friend _LIBCUDACXX_INLINE_VISIBILITY pipeline<_Pipeline_scope> - make_pipeline(const _Group& __group, - pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count>* __shared_state, - pipeline_role __role); -}; - -template -_LIBCUDACXX_INLINE_VISIBILITY pipeline<_Scope> -make_pipeline(const _Group& __group, pipeline_shared_state<_Scope, _Stages_count>* __shared_state) -{ - const uint32_t __group_size = static_cast(__group.size()); - const uint32_t __thread_rank = static_cast(__group.thread_rank()); - - if (__thread_rank == 0) - { - for (uint8_t __stage = 0; __stage < _Stages_count; ++__stage) - { - init(&__shared_state->__stages[__stage].__consumed, __group_size); - init(&__shared_state->__stages[__stage].__produced, __group_size); - } - __shared_state->__refcount.store(__group_size, std::memory_order_relaxed); - } - __group.sync(); - - return pipeline<_Scope>(reinterpret_cast(__shared_state->__stages), _Stages_count, false); -} - -template -_LIBCUDACXX_INLINE_VISIBILITY pipeline<_Scope> make_pipeline( - const _Group& __group, pipeline_shared_state<_Scope, _Stages_count>* __shared_state, size_t __producer_count) -{ - const uint32_t __group_size = static_cast(__group.size()); - const uint32_t __thread_rank = static_cast(__group.thread_rank()); - - if (__thread_rank == 0) - { - const size_t __consumer_count = __group_size - __producer_count; - for (uint8_t __stage = 0; __stage < _Stages_count; ++__stage) + }; + + template + class pipeline { + public: + pipeline(pipeline &&) = default; + pipeline(const pipeline &) = delete; + pipeline & operator=(pipeline &&) = delete; + pipeline & operator=(const pipeline &) = delete; + + _LIBCUDACXX_INLINE_VISIBILITY + ~pipeline() + { + if (__active) { + (void)quit(); + } + } + + _LIBCUDACXX_INLINE_VISIBILITY + bool quit() + { + bool __elected; + uint32_t __sub_count; +NV_IF_TARGET(NV_IS_DEVICE, + const uint32_t __match_mask = __match_any_sync(__activemask(), reinterpret_cast(__shared_state_get_refcount())); + const uint32_t __elected_id = __ffs(__match_mask) - 1; + __elected = (__pipeline_asm_helper::__lane_id() == __elected_id); + __sub_count = __popc(__match_mask); +, + __elected = true; + __sub_count = 1; +) + bool __released = false; + if (__elected) { + const uint32_t __old = __shared_state_get_refcount()->fetch_sub(__sub_count); + const bool __last = (__old == __sub_count); + if (__last) { + for (uint8_t __stage = 0; __stage < __stages_count; ++__stage) { + __shared_state_get_stage(__stage)->__produced.~barrier(); + __shared_state_get_stage(__stage)->__consumed.~barrier(); + } + __released = true; + } + } + __active = false; + return __released; + } + + _LIBCUDACXX_INLINE_VISIBILITY + void producer_acquire() + { + barrier<_Scope> & __stage_barrier = __shared_state_get_stage(__head)->__consumed; + __stage_barrier.wait_parity(__consumed_phase_parity); + } + + _LIBCUDACXX_INLINE_VISIBILITY + void producer_commit() + { + barrier<_Scope> & __stage_barrier = __shared_state_get_stage(__head)->__produced; + (void)__memcpy_completion_impl::__defer(__completion_mechanism::__async_group, __single_thread_group{}, 0, __stage_barrier); + (void)__stage_barrier.arrive(); + if (++__head == __stages_count) { + __head = 0; + __consumed_phase_parity = !__consumed_phase_parity; + } + } + + _LIBCUDACXX_INLINE_VISIBILITY + void consumer_wait() + { + barrier<_Scope> & __stage_barrier = __shared_state_get_stage(__tail)->__produced; + __stage_barrier.wait_parity(__produced_phase_parity); + } + + _LIBCUDACXX_INLINE_VISIBILITY + void consumer_release() + { + (void)__shared_state_get_stage(__tail)->__consumed.arrive(); + if (++__tail == __stages_count) { + __tail = 0; + __produced_phase_parity = !__produced_phase_parity; + } + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + bool consumer_wait_for(const _CUDA_VSTD::chrono::duration<_Rep, _Period> & __duration) + { + barrier<_Scope> & __stage_barrier = __shared_state_get_stage(__tail)->__produced; + return _CUDA_VSTD::__libcpp_thread_poll_with_backoff( + _CUDA_VSTD::__barrier_poll_tester_parity>( + &__stage_barrier, + __produced_phase_parity), + _CUDA_VSTD::chrono::duration_cast<_CUDA_VSTD::chrono::nanoseconds>(__duration) + ); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + bool consumer_wait_until(const _CUDA_VSTD::chrono::time_point<_Clock, _Duration> & __time_point) + { + return consumer_wait_for(__time_point - _Clock::now()); + } + + private: + uint8_t __head : 8; + uint8_t __tail : 8; + const uint8_t __stages_count : 8; + bool __consumed_phase_parity : 1; + bool __produced_phase_parity : 1; + bool __active : 1; + // TODO: Remove partitioned on next ABI break + const bool __partitioned : 1; + char * const __shared_state; + + + _LIBCUDACXX_INLINE_VISIBILITY + pipeline(char * __shared_state, uint8_t __stages_count, bool __partitioned) + : __head(0) + , __tail(0) + , __stages_count(__stages_count) + , __consumed_phase_parity(true) + , __produced_phase_parity(false) + , __active(true) + , __partitioned(__partitioned) + , __shared_state(__shared_state) + {} + + _LIBCUDACXX_INLINE_VISIBILITY + __pipeline_stage<_Scope> * __shared_state_get_stage(uint8_t __stage) + { + ptrdiff_t __stage_offset = __stage * sizeof(__pipeline_stage<_Scope>); + return reinterpret_cast<__pipeline_stage<_Scope>*>(__shared_state + __stage_offset); + } + + _LIBCUDACXX_INLINE_VISIBILITY + atomic * __shared_state_get_refcount() + { + ptrdiff_t __refcount_offset = __stages_count * sizeof(__pipeline_stage<_Scope>); + return reinterpret_cast*>(__shared_state + __refcount_offset); + } + + template + friend _LIBCUDACXX_INLINE_VISIBILITY + pipeline<_Pipeline_scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count> * __shared_state); + + template + friend _LIBCUDACXX_INLINE_VISIBILITY + pipeline<_Pipeline_scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count> * __shared_state, size_t __producer_count); + + template + friend _LIBCUDACXX_INLINE_VISIBILITY + pipeline<_Pipeline_scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count> * __shared_state, pipeline_role __role); + }; + + template + _LIBCUDACXX_INLINE_VISIBILITY + pipeline<_Scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Scope, _Stages_count> * __shared_state) { - init(&__shared_state->__stages[__stage].__consumed, __consumer_count); - init(&__shared_state->__stages[__stage].__produced, __producer_count); + const uint32_t __group_size = static_cast(__group.size()); + const uint32_t __thread_rank = static_cast(__group.thread_rank()); + + if (__thread_rank == 0) { + for (uint8_t __stage = 0; __stage < _Stages_count; ++__stage) { + init(&__shared_state->__stages[__stage].__consumed, __group_size); + init(&__shared_state->__stages[__stage].__produced, __group_size); + } + __shared_state->__refcount.store(__group_size, std::memory_order_relaxed); + } + __group.sync(); + + return pipeline<_Scope>(reinterpret_cast(__shared_state->__stages), _Stages_count, false); } - __shared_state->__refcount.store(__group_size, std::memory_order_relaxed); - } - __group.sync(); - - return pipeline<_Scope>(reinterpret_cast(__shared_state->__stages), _Stages_count, true); -} - -template -_LIBCUDACXX_INLINE_VISIBILITY pipeline<_Scope> -make_pipeline(const _Group& __group, pipeline_shared_state<_Scope, _Stages_count>* __shared_state, pipeline_role __role) -{ - const uint32_t __group_size = static_cast(__group.size()); - const uint32_t __thread_rank = static_cast(__group.thread_rank()); - - if (__thread_rank == 0) - { - __shared_state->__refcount.store(0, std::memory_order_relaxed); - } - __group.sync(); - - if (__role == pipeline_role::producer) - { - bool __elected; - uint32_t __add_count; - NV_IF_TARGET( - NV_IS_DEVICE, - const uint32_t __match_mask = - __match_any_sync(__activemask(), reinterpret_cast(&__shared_state->__refcount)); - const uint32_t __elected_id = __ffs(__match_mask) - 1; - __elected = (__pipeline_asm_helper::__lane_id() == __elected_id); - __add_count = __popc(__match_mask); - , __elected = true; - __add_count = 1;) - if (__elected) + + template + _LIBCUDACXX_INLINE_VISIBILITY + pipeline<_Scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Scope, _Stages_count> * __shared_state, size_t __producer_count) { - (void) __shared_state->__refcount.fetch_add(__add_count, std::memory_order_relaxed); + const uint32_t __group_size = static_cast(__group.size()); + const uint32_t __thread_rank = static_cast(__group.thread_rank()); + + if (__thread_rank == 0) { + const size_t __consumer_count = __group_size - __producer_count; + for (uint8_t __stage = 0; __stage < _Stages_count; ++__stage) { + init(&__shared_state->__stages[__stage].__consumed, __consumer_count); + init(&__shared_state->__stages[__stage].__produced, __producer_count); + } + __shared_state->__refcount.store(__group_size, std::memory_order_relaxed); + } + __group.sync(); + + return pipeline<_Scope>(reinterpret_cast(__shared_state->__stages), _Stages_count, true); } - } - __group.sync(); - - if (__thread_rank == 0) - { - const uint32_t __producer_count = __shared_state->__refcount.load(std::memory_order_relaxed); - const uint32_t __consumer_count = __group_size - __producer_count; - for (uint8_t __stage = 0; __stage < _Stages_count; ++__stage) + + template + _LIBCUDACXX_INLINE_VISIBILITY + pipeline<_Scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Scope, _Stages_count> * __shared_state, pipeline_role __role) { - init(&__shared_state->__stages[__stage].__consumed, __consumer_count); - init(&__shared_state->__stages[__stage].__produced, __producer_count); - } - __shared_state->__refcount.store(__group_size, std::memory_order_relaxed); - } - __group.sync(); + const uint32_t __group_size = static_cast(__group.size()); + const uint32_t __thread_rank = static_cast(__group.thread_rank()); + + if (__thread_rank == 0) { + __shared_state->__refcount.store(0, std::memory_order_relaxed); + } + __group.sync(); + + if (__role == pipeline_role::producer) { + bool __elected; + uint32_t __add_count; +NV_IF_TARGET(NV_IS_DEVICE, + const uint32_t __match_mask = __match_any_sync(__activemask(), reinterpret_cast(&__shared_state->__refcount)); + const uint32_t __elected_id = __ffs(__match_mask) - 1; + __elected = (__pipeline_asm_helper::__lane_id() == __elected_id); + __add_count = __popc(__match_mask); +, + __elected = true; + __add_count = 1; +) + if (__elected) { + (void)__shared_state->__refcount.fetch_add(__add_count, std::memory_order_relaxed); + } + } + __group.sync(); + + if (__thread_rank == 0) { + const uint32_t __producer_count = __shared_state->__refcount.load(std::memory_order_relaxed); + const uint32_t __consumer_count = __group_size - __producer_count; + for (uint8_t __stage = 0; __stage < _Stages_count; ++__stage) { + init(&__shared_state->__stages[__stage].__consumed, __consumer_count); + init(&__shared_state->__stages[__stage].__produced, __producer_count); + } + __shared_state->__refcount.store(__group_size, std::memory_order_relaxed); + } + __group.sync(); - return pipeline<_Scope>(reinterpret_cast(__shared_state->__stages), _Stages_count, true); -} + return pipeline<_Scope>(reinterpret_cast(__shared_state->__stages), _Stages_count, true); + } _LIBCUDACXX_END_NAMESPACE_CUDA _LIBCUDACXX_BEGIN_NAMESPACE_CUDA_DEVICE -template -_CCCL_DEVICE void __pipeline_consumer_wait(pipeline& __pipeline); + template + _CCCL_DEVICE + void __pipeline_consumer_wait(pipeline & __pipeline); -_CCCL_DEVICE inline void __pipeline_consumer_wait(pipeline& __pipeline, uint8_t __prior); + _CCCL_DEVICE + inline void __pipeline_consumer_wait(pipeline & __pipeline, uint8_t __prior); _LIBCUDACXX_END_NAMESPACE_CUDA_DEVICE _LIBCUDACXX_BEGIN_NAMESPACE_CUDA -template <> -class pipeline -{ -public: - pipeline(pipeline&&) = default; - pipeline(const pipeline&) = delete; - pipeline& operator=(pipeline&&) = delete; - pipeline& operator=(const pipeline&) = delete; - - _LIBCUDACXX_INLINE_VISIBILITY ~pipeline() {} - - _LIBCUDACXX_INLINE_VISIBILITY bool quit() - { - return true; - } - - _LIBCUDACXX_INLINE_VISIBILITY void producer_acquire() {} - - _LIBCUDACXX_INLINE_VISIBILITY void producer_commit() - { - NV_IF_TARGET(NV_PROVIDES_SM_80, asm volatile("cp.async.commit_group;"); ++__head;) - } - - _LIBCUDACXX_INLINE_VISIBILITY void consumer_wait() - { - NV_IF_TARGET( - NV_PROVIDES_SM_80, - if (__head == __tail) { return; } - - const uint8_t __prior = __head - __tail - 1; - device::__pipeline_consumer_wait(*this, __prior); - ++__tail;) - } - - _LIBCUDACXX_INLINE_VISIBILITY void consumer_release() {} - - template - _LIBCUDACXX_INLINE_VISIBILITY bool consumer_wait_for(const _CUDA_VSTD::chrono::duration<_Rep, _Period>& __duration) - { - (void) __duration; - consumer_wait(); - return true; - } - - template - _LIBCUDACXX_INLINE_VISIBILITY bool - consumer_wait_until(const _CUDA_VSTD::chrono::time_point<_Clock, _Duration>& __time_point) - { - (void) __time_point; - consumer_wait(); - return true; - } - -private: - uint8_t __head; - uint8_t __tail; - - _LIBCUDACXX_INLINE_VISIBILITY pipeline() - : __head(0) - , __tail(0) - {} - - friend _LIBCUDACXX_INLINE_VISIBILITY inline pipeline make_pipeline(); - - template - friend _LIBCUDACXX_INLINE_VISIBILITY void pipeline_consumer_wait_prior(pipeline& __pipeline); - - template - friend _LIBCUDACXX_INLINE_VISIBILITY pipeline<_Pipeline_scope> __make_pipeline( - const _Group& __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count>* __shared_state); -}; + template<> + class pipeline { + public: + pipeline(pipeline &&) = default; + pipeline(const pipeline &) = delete; + pipeline & operator=(pipeline &&) = delete; + pipeline & operator=(const pipeline &) = delete; + + _LIBCUDACXX_INLINE_VISIBILITY + ~pipeline() {} + + _LIBCUDACXX_INLINE_VISIBILITY + bool quit() + { + return true; + } + + _LIBCUDACXX_INLINE_VISIBILITY + void producer_acquire() {} + + _LIBCUDACXX_INLINE_VISIBILITY + void producer_commit() + { +NV_IF_TARGET(NV_PROVIDES_SM_80, + asm volatile ("cp.async.commit_group;"); + ++__head; +) + } + + _LIBCUDACXX_INLINE_VISIBILITY + void consumer_wait() + { +NV_IF_TARGET(NV_PROVIDES_SM_80, + if (__head == __tail) { + return; + } + + const uint8_t __prior = __head - __tail - 1; + device::__pipeline_consumer_wait(*this, __prior); + ++__tail; +) + } + + _LIBCUDACXX_INLINE_VISIBILITY + void consumer_release() {} + + template + _LIBCUDACXX_INLINE_VISIBILITY + bool consumer_wait_for(const _CUDA_VSTD::chrono::duration<_Rep, _Period> & __duration) + { + (void)__duration; + consumer_wait(); + return true; + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + bool consumer_wait_until(const _CUDA_VSTD::chrono::time_point<_Clock, _Duration> & __time_point) + { + (void)__time_point; + consumer_wait(); + return true; + } + + private: + uint8_t __head; + uint8_t __tail; + + _LIBCUDACXX_INLINE_VISIBILITY + pipeline() + : __head(0) + , __tail(0) + {} + + friend _LIBCUDACXX_INLINE_VISIBILITY inline pipeline make_pipeline(); + + template + friend _LIBCUDACXX_INLINE_VISIBILITY + void pipeline_consumer_wait_prior(pipeline & __pipeline); + + template + friend _LIBCUDACXX_INLINE_VISIBILITY + pipeline<_Pipeline_scope> __make_pipeline(const _Group & __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count> * __shared_state); + }; _LIBCUDACXX_END_NAMESPACE_CUDA _LIBCUDACXX_BEGIN_NAMESPACE_CUDA_DEVICE -template -_CCCL_DEVICE void __pipeline_consumer_wait(pipeline& __pipeline) -{ - (void) __pipeline; - NV_IF_TARGET(NV_PROVIDES_SM_80, constexpr uint8_t __max_prior = 8; - - asm volatile("cp.async.wait_group %0;" - : - : "n"(_Prior < __max_prior ? _Prior : __max_prior));) -} - -_CCCL_DEVICE inline void __pipeline_consumer_wait(pipeline& __pipeline, uint8_t __prior) -{ - switch (__prior) - { - case 0: - device::__pipeline_consumer_wait<0>(__pipeline); - break; - case 1: - device::__pipeline_consumer_wait<1>(__pipeline); - break; - case 2: - device::__pipeline_consumer_wait<2>(__pipeline); - break; - case 3: - device::__pipeline_consumer_wait<3>(__pipeline); - break; - case 4: - device::__pipeline_consumer_wait<4>(__pipeline); - break; - case 5: - device::__pipeline_consumer_wait<5>(__pipeline); - break; - case 6: - device::__pipeline_consumer_wait<6>(__pipeline); - break; - case 7: - device::__pipeline_consumer_wait<7>(__pipeline); - break; - default: - device::__pipeline_consumer_wait<8>(__pipeline); - break; - } -} + template + _CCCL_DEVICE + void __pipeline_consumer_wait(pipeline & __pipeline) + { + (void)__pipeline; +NV_IF_TARGET(NV_PROVIDES_SM_80, + constexpr uint8_t __max_prior = 8; + + asm volatile ("cp.async.wait_group %0;" + : + : "n"(_Prior < __max_prior ? _Prior : __max_prior)); +) + } + + _CCCL_DEVICE + inline void __pipeline_consumer_wait(pipeline & __pipeline, uint8_t __prior) + { + switch (__prior) { + case 0: device::__pipeline_consumer_wait<0>(__pipeline); break; + case 1: device::__pipeline_consumer_wait<1>(__pipeline); break; + case 2: device::__pipeline_consumer_wait<2>(__pipeline); break; + case 3: device::__pipeline_consumer_wait<3>(__pipeline); break; + case 4: device::__pipeline_consumer_wait<4>(__pipeline); break; + case 5: device::__pipeline_consumer_wait<5>(__pipeline); break; + case 6: device::__pipeline_consumer_wait<6>(__pipeline); break; + case 7: device::__pipeline_consumer_wait<7>(__pipeline); break; + default: device::__pipeline_consumer_wait<8>(__pipeline); break; + } + } _LIBCUDACXX_END_NAMESPACE_CUDA_DEVICE _LIBCUDACXX_BEGIN_NAMESPACE_CUDA -_LIBCUDACXX_INLINE_VISIBILITY inline pipeline make_pipeline() -{ - return pipeline(); -} - -template -_LIBCUDACXX_INLINE_VISIBILITY void pipeline_consumer_wait_prior(pipeline& __pipeline) -{ - NV_IF_TARGET(NV_PROVIDES_SM_80, device::__pipeline_consumer_wait<_Prior>(__pipeline); - __pipeline.__tail = __pipeline.__head - _Prior;) -} - -template -_LIBCUDACXX_INLINE_VISIBILITY void -pipeline_producer_commit(pipeline& __pipeline, barrier<_Scope>& __barrier) -{ - (void) __pipeline; - NV_IF_TARGET(NV_PROVIDES_SM_80, - ((void) __memcpy_completion_impl::__defer( - __completion_mechanism::__async_group, __single_thread_group{}, 0, __barrier);)); -} - -template -_LIBCUDACXX_INLINE_VISIBILITY async_contract_fulfillment __memcpy_async_pipeline( - _Group const& __group, _Tp* __destination, _Tp const* __source, _Size __size, pipeline<_Scope>& __pipeline) -{ - // 1. Set the completion mechanisms that can be used. - // - // Do not (yet) allow async_bulk_group completion. Do not allow - // mbarrier_complete_tx completion, even though it may be possible if - // the pipeline has stage barriers in shared memory. - _CUDA_VSTD::uint32_t __allowed_completions = _CUDA_VSTD::uint32_t(__completion_mechanism::__async_group); - - // Alignment: Use the maximum of the alignment of _Tp and that of a possible cuda::aligned_size_t. - constexpr _CUDA_VSTD::size_t __size_align = __get_size_align<_Size>::align; - constexpr _CUDA_VSTD::size_t __align = (alignof(_Tp) < __size_align) ? __size_align : alignof(_Tp); - // Cast to char pointers. We don't need the type for alignment anymore and - // erasing the types reduces the number of instantiations of down-stream - // functions. - char* __dest_char = reinterpret_cast(__destination); - char const* __src_char = reinterpret_cast(__source); - - // 2. Issue actual copy instructions. - auto __cm = __dispatch_memcpy_async<__align>(__group, __dest_char, __src_char, __size, __allowed_completions); - - // 3. No need to synchronize with copy instructions. - return __memcpy_completion_impl::__defer(__cm, __group, __size, __pipeline); -} - -template -_LIBCUDACXX_INLINE_VISIBILITY async_contract_fulfillment memcpy_async( - _Group const& __group, _Type* __destination, _Type const* __source, std::size_t __size, pipeline<_Scope>& __pipeline) -{ - return __memcpy_async_pipeline(__group, __destination, __source, __size, __pipeline); -} - -template _Alignment) ? alignof(_Type) : _Alignment> -_LIBCUDACXX_INLINE_VISIBILITY async_contract_fulfillment memcpy_async( - _Group const& __group, - _Type* __destination, - _Type const* __source, - aligned_size_t<_Alignment> __size, - pipeline<_Scope>& __pipeline) -{ - return __memcpy_async_pipeline(__group, __destination, __source, __size, __pipeline); -} - -template -_LIBCUDACXX_INLINE_VISIBILITY async_contract_fulfillment -memcpy_async(_Type* __destination, _Type const* __source, _Size __size, pipeline<_Scope>& __pipeline) -{ - return __memcpy_async_pipeline(__single_thread_group{}, __destination, __source, __size, __pipeline); -} - -template -_LIBCUDACXX_INLINE_VISIBILITY async_contract_fulfillment memcpy_async( - _Group const& __group, void* __destination, void const* __source, std::size_t __size, pipeline<_Scope>& __pipeline) -{ - return __memcpy_async_pipeline( - __group, reinterpret_cast(__destination), reinterpret_cast(__source), __size, __pipeline); -} - -template -_LIBCUDACXX_INLINE_VISIBILITY async_contract_fulfillment memcpy_async( - _Group const& __group, - void* __destination, - void const* __source, - aligned_size_t<_Alignment> __size, - pipeline<_Scope>& __pipeline) -{ - return __memcpy_async_pipeline( - __group, reinterpret_cast(__destination), reinterpret_cast(__source), __size, __pipeline); -} - -template -_LIBCUDACXX_INLINE_VISIBILITY async_contract_fulfillment -memcpy_async(void* __destination, void const* __source, _Size __size, pipeline<_Scope>& __pipeline) -{ - return __memcpy_async_pipeline( - __single_thread_group{}, - reinterpret_cast(__destination), - reinterpret_cast(__source), - __size, - __pipeline); -} + _LIBCUDACXX_INLINE_VISIBILITY + inline pipeline make_pipeline() + { + return pipeline(); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + void pipeline_consumer_wait_prior(pipeline & __pipeline) + { + NV_IF_TARGET(NV_PROVIDES_SM_80, + device::__pipeline_consumer_wait<_Prior>(__pipeline); + __pipeline.__tail = __pipeline.__head - _Prior; + ) + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + void pipeline_producer_commit(pipeline & __pipeline, barrier<_Scope> & __barrier) + { + (void)__pipeline; + NV_IF_TARGET(NV_PROVIDES_SM_80,( + (void)__memcpy_completion_impl::__defer(__completion_mechanism::__async_group, __single_thread_group{}, 0, __barrier); + )); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + async_contract_fulfillment __memcpy_async_pipeline(_Group const & __group, _Tp * __destination, _Tp const * __source, _Size __size, pipeline<_Scope> & __pipeline) { + // 1. Set the completion mechanisms that can be used. + // + // Do not (yet) allow async_bulk_group completion. Do not allow + // mbarrier_complete_tx completion, even though it may be possible if + // the pipeline has stage barriers in shared memory. + _CUDA_VSTD::uint32_t __allowed_completions = _CUDA_VSTD::uint32_t(__completion_mechanism::__async_group); + + // Alignment: Use the maximum of the alignment of _Tp and that of a possible cuda::aligned_size_t. + constexpr _CUDA_VSTD::size_t __size_align = __get_size_align<_Size>::align; + constexpr _CUDA_VSTD::size_t __align = (alignof(_Tp) < __size_align) ? __size_align : alignof(_Tp); + // Cast to char pointers. We don't need the type for alignment anymore and + // erasing the types reduces the number of instantiations of down-stream + // functions. + char * __dest_char = reinterpret_cast(__destination); + char const * __src_char = reinterpret_cast(__source); + + // 2. Issue actual copy instructions. + auto __cm = __dispatch_memcpy_async<__align>(__group, __dest_char, __src_char, __size, __allowed_completions); + + // 3. No need to synchronize with copy instructions. + return __memcpy_completion_impl::__defer(__cm, __group, __size, __pipeline); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + async_contract_fulfillment memcpy_async(_Group const & __group, _Type * __destination, _Type const * __source, std::size_t __size, pipeline<_Scope> & __pipeline) { + return __memcpy_async_pipeline(__group, __destination, __source, __size, __pipeline); + } + + template _Alignment) ? alignof(_Type) : _Alignment> + _LIBCUDACXX_INLINE_VISIBILITY + async_contract_fulfillment memcpy_async(_Group const & __group, _Type * __destination, _Type const * __source, aligned_size_t<_Alignment> __size, pipeline<_Scope> & __pipeline) { + return __memcpy_async_pipeline(__group, __destination, __source, __size, __pipeline); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + async_contract_fulfillment memcpy_async(_Type * __destination, _Type const * __source, _Size __size, pipeline<_Scope> & __pipeline) { + return __memcpy_async_pipeline(__single_thread_group{}, __destination, __source, __size, __pipeline); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + async_contract_fulfillment memcpy_async(_Group const & __group, void * __destination, void const * __source, std::size_t __size, pipeline<_Scope> & __pipeline) { + return __memcpy_async_pipeline(__group, reinterpret_cast(__destination), reinterpret_cast(__source), __size, __pipeline); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + async_contract_fulfillment memcpy_async(_Group const & __group, void * __destination, void const * __source, aligned_size_t<_Alignment> __size, pipeline<_Scope> & __pipeline) { + return __memcpy_async_pipeline(__group, reinterpret_cast(__destination), reinterpret_cast(__source), __size, __pipeline); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + async_contract_fulfillment memcpy_async(void * __destination, void const * __source, _Size __size, pipeline<_Scope> & __pipeline) { + return __memcpy_async_pipeline(__single_thread_group{}, reinterpret_cast(__destination), reinterpret_cast(__source), __size, __pipeline); + } _LIBCUDACXX_END_NAMESPACE_CUDA diff --git a/libcudacxx/include/cuda/std/__algorithm_ b/libcudacxx/include/cuda/std/__algorithm_ index a1762b79ae5..91c4160a8b5 100644 --- a/libcudacxx/include/cuda/std/__algorithm_ +++ b/libcudacxx/include/cuda/std/__algorithm_ @@ -13,8 +13,10 @@ #include -#include #include + #include +#include + #endif // _CUDA_STD_ALGORITHM diff --git a/libcudacxx/include/cuda/std/__exception_ b/libcudacxx/include/cuda/std/__exception_ index c9b2b855f5c..e5aedc1d49d 100644 --- a/libcudacxx/include/cuda/std/__exception_ +++ b/libcudacxx/include/cuda/std/__exception_ @@ -12,8 +12,11 @@ #define _CUDA_STD_NEW #include "detail/__config" -#include "detail/__pragma_pop" + #include "detail/__pragma_push" + #include "detail/libcxx/include/exception" +#include "detail/__pragma_pop" + #endif // _CUDA_STD_NEW diff --git a/libcudacxx/include/cuda/std/__memory_ b/libcudacxx/include/cuda/std/__memory_ index 077c795e1ed..1bff78d6773 100644 --- a/libcudacxx/include/cuda/std/__memory_ +++ b/libcudacxx/include/cuda/std/__memory_ @@ -12,8 +12,11 @@ #define _CUDA_STD_MEMORY #include "detail/__config" -#include "detail/__pragma_pop" + #include "detail/__pragma_push" + #include "detail/libcxx/include/memory" +#include "detail/__pragma_pop" + #endif // _CUDA_STD_MEMORY diff --git a/libcudacxx/include/cuda/std/__new_ b/libcudacxx/include/cuda/std/__new_ index daaf0f48084..3e8aefcdb6f 100644 --- a/libcudacxx/include/cuda/std/__new_ +++ b/libcudacxx/include/cuda/std/__new_ @@ -12,8 +12,11 @@ #define _CUDA_STD_NEW #include "detail/__config" -#include "detail/__pragma_pop" + #include "detail/__pragma_push" + #include "detail/libcxx/include/new" +#include "detail/__pragma_pop" + #endif // _CUDA_STD_NEW diff --git a/libcudacxx/include/cuda/std/array b/libcudacxx/include/cuda/std/array index 4dd41a43020..f0bd5785600 100644 --- a/libcudacxx/include/cuda/std/array +++ b/libcudacxx/include/cuda/std/array @@ -12,8 +12,10 @@ #include -#include #include + #include +#include + #endif // _CUDA_STD_ARRAY diff --git a/libcudacxx/include/cuda/std/atomic b/libcudacxx/include/cuda/std/atomic index 7908a2274ea..0daab5f2cb5 100644 --- a/libcudacxx/include/cuda/std/atomic +++ b/libcudacxx/include/cuda/std/atomic @@ -13,8 +13,10 @@ #include -#include #include + #include +#include + #endif // _CUDA_STD_ATOMIC diff --git a/libcudacxx/include/cuda/std/barrier b/libcudacxx/include/cuda/std/barrier index 94ab6e65df4..415c3f80acf 100644 --- a/libcudacxx/include/cuda/std/barrier +++ b/libcudacxx/include/cuda/std/barrier @@ -17,8 +17,10 @@ #include -#include #include + #include +#include + #endif // _CUDA_STD_BARRIER diff --git a/libcudacxx/include/cuda/std/bit b/libcudacxx/include/cuda/std/bit index a80f1d5d1df..491b346c576 100644 --- a/libcudacxx/include/cuda/std/bit +++ b/libcudacxx/include/cuda/std/bit @@ -13,8 +13,10 @@ #include -#include #include + #include +#include + #endif // _CUDA_STD_BIT diff --git a/libcudacxx/include/cuda/std/cassert b/libcudacxx/include/cuda/std/cassert index b6400ae2694..af8af80e43d 100644 --- a/libcudacxx/include/cuda/std/cassert +++ b/libcudacxx/include/cuda/std/cassert @@ -13,8 +13,10 @@ #include -#include #include + #include +#include + #endif // _CUDA_STD_CASSERT diff --git a/libcudacxx/include/cuda/std/cfloat b/libcudacxx/include/cuda/std/cfloat index 13f64607bf3..31a9f8e4e61 100644 --- a/libcudacxx/include/cuda/std/cfloat +++ b/libcudacxx/include/cuda/std/cfloat @@ -13,8 +13,10 @@ #include -#include #include + #include +#include + #endif // _CUDA_STD_CFLOAT diff --git a/libcudacxx/include/cuda/std/chrono b/libcudacxx/include/cuda/std/chrono index 38eff65fb16..f8d62efb4f6 100644 --- a/libcudacxx/include/cuda/std/chrono +++ b/libcudacxx/include/cuda/std/chrono @@ -13,8 +13,10 @@ #include -#include #include + #include +#include + #endif // _CUDA_STD_CHRONO diff --git a/libcudacxx/include/cuda/std/climits b/libcudacxx/include/cuda/std/climits index fa981537469..f7934b665a9 100644 --- a/libcudacxx/include/cuda/std/climits +++ b/libcudacxx/include/cuda/std/climits @@ -13,8 +13,10 @@ #include -#include #include + #include +#include + #endif // _CUDA_STD_CLIMITS diff --git a/libcudacxx/include/cuda/std/cmath b/libcudacxx/include/cuda/std/cmath index 68524be4bad..a6a05ef2430 100644 --- a/libcudacxx/include/cuda/std/cmath +++ b/libcudacxx/include/cuda/std/cmath @@ -12,8 +12,10 @@ #include -#include #include + #include +#include + #endif // _CUDA_STD_CMATH diff --git a/libcudacxx/include/cuda/std/complex b/libcudacxx/include/cuda/std/complex index 4940f7cb2bc..7c8ea6b5b46 100644 --- a/libcudacxx/include/cuda/std/complex +++ b/libcudacxx/include/cuda/std/complex @@ -12,8 +12,10 @@ #include -#include #include + #include +#include + #endif // _CUDA_STD_COMPLEX diff --git a/libcudacxx/include/cuda/std/concepts b/libcudacxx/include/cuda/std/concepts index eee16d9b100..d3f9eb25dde 100644 --- a/libcudacxx/include/cuda/std/concepts +++ b/libcudacxx/include/cuda/std/concepts @@ -12,8 +12,10 @@ #include -#include #include + #include +#include + #endif // _CUDA_STD_CONCEPTS diff --git a/libcudacxx/include/cuda/std/cstddef b/libcudacxx/include/cuda/std/cstddef index 5fe32da86d8..95aae77de22 100644 --- a/libcudacxx/include/cuda/std/cstddef +++ b/libcudacxx/include/cuda/std/cstddef @@ -13,8 +13,10 @@ #include -#include #include + #include +#include + #endif // _CUDA_STD_CSTDDEF diff --git a/libcudacxx/include/cuda/std/cstdint b/libcudacxx/include/cuda/std/cstdint index f62a90d93ee..22c0754e481 100644 --- a/libcudacxx/include/cuda/std/cstdint +++ b/libcudacxx/include/cuda/std/cstdint @@ -13,8 +13,10 @@ #include -#include #include + #include +#include + #endif // _CUDA_STD_CSTDINT diff --git a/libcudacxx/include/cuda/std/cstdlib b/libcudacxx/include/cuda/std/cstdlib index 36c3d976657..af85815be27 100644 --- a/libcudacxx/include/cuda/std/cstdlib +++ b/libcudacxx/include/cuda/std/cstdlib @@ -12,8 +12,11 @@ #define _CUDA_STD_CSTDLIB #include "detail/__config" -#include "detail/__pragma_pop" + #include "detail/__pragma_push" + #include "detail/libcxx/include/cstdlib" +#include "detail/__pragma_pop" + #endif // _CUDA_STD_CSTDLIB diff --git a/libcudacxx/include/cuda/std/ctime b/libcudacxx/include/cuda/std/ctime index 72275a6bdf3..d610c831077 100644 --- a/libcudacxx/include/cuda/std/ctime +++ b/libcudacxx/include/cuda/std/ctime @@ -13,8 +13,10 @@ #include -#include #include + #include +#include + #endif // _CUDA_STD_CTIME diff --git a/libcudacxx/include/cuda/std/detail/__access_property b/libcudacxx/include/cuda/std/detail/__access_property index c63ec342df9..7d9718503e9 100644 --- a/libcudacxx/include/cuda/std/detail/__access_property +++ b/libcudacxx/include/cuda/std/detail/__access_property @@ -3,445 +3,325 @@ * * NVIDIA SOFTWARE LICENSE * - * This license is a legal agreement between you and NVIDIA Corporation ("NVIDIA") and governs your use of the - * NVIDIA/CUDA C++ Library software and materials provided hereunder (“SOFTWARE”). + * This license is a legal agreement between you and NVIDIA Corporation ("NVIDIA") and governs your use of the NVIDIA/CUDA C++ Library software and materials provided hereunder (“SOFTWARE”). * - * This license can be accepted only by an adult of legal age of majority in the country in which the SOFTWARE is used. - * If you are under the legal age of majority, you must ask your parent or legal guardian to consent to this license. By - * taking delivery of the SOFTWARE, you affirm that you have reached the legal age of majority, you accept the terms of - * this license, and you take legal and financial responsibility for the actions of your permitted users. + * This license can be accepted only by an adult of legal age of majority in the country in which the SOFTWARE is used. If you are under the legal age of majority, you must ask your parent or legal guardian to consent to this license. By taking delivery of the SOFTWARE, you affirm that you have reached the legal age of majority, you accept the terms of this license, and you take legal and financial responsibility for the actions of your permitted users. * - * You agree to use the SOFTWARE only for purposes that are permitted by (a) this license, and (b) any applicable law, - * regulation or generally accepted practices or guidelines in the relevant jurisdictions. + * You agree to use the SOFTWARE only for purposes that are permitted by (a) this license, and (b) any applicable law, regulation or generally accepted practices or guidelines in the relevant jurisdictions. * - * 1. LICENSE. Subject to the terms of this license, NVIDIA grants you a non-exclusive limited license to: (a) install - * and use the SOFTWARE, and (b) distribute the SOFTWARE subject to the distribution requirements described in this - * license. NVIDIA reserves all rights, title and interest in and to the SOFTWARE not expressly granted to you under - * this license. + * 1. LICENSE. Subject to the terms of this license, NVIDIA grants you a non-exclusive limited license to: (a) install and use the SOFTWARE, and (b) distribute the SOFTWARE subject to the distribution requirements described in this license. NVIDIA reserves all rights, title and interest in and to the SOFTWARE not expressly granted to you under this license. * * 2. DISTRIBUTION REQUIREMENTS. These are the distribution requirements for you to exercise the distribution grant: - * a. The terms under which you distribute the SOFTWARE must be consistent with the terms of this license, - * including (without limitation) terms relating to the license grant and license restrictions and protection of - * NVIDIA’s intellectual property rights. b. You agree to notify NVIDIA in writing of any known or suspected - * distribution or use of the SOFTWARE not in compliance with the requirements of this license, and to enforce the terms - * of your agreements with respect to distributed SOFTWARE. + * a. The terms under which you distribute the SOFTWARE must be consistent with the terms of this license, including (without limitation) terms relating to the license grant and license restrictions and protection of NVIDIA’s intellectual property rights. + * b. You agree to notify NVIDIA in writing of any known or suspected distribution or use of the SOFTWARE not in compliance with the requirements of this license, and to enforce the terms of your agreements with respect to distributed SOFTWARE. * * 3. LIMITATIONS. Your license to use the SOFTWARE is restricted as follows: * a. The SOFTWARE is licensed for you to develop applications only for use in systems with NVIDIA GPUs. - * b. You may not reverse engineer, decompile or disassemble, or remove copyright or other proprietary notices from - * any portion of the SOFTWARE or copies of the SOFTWARE. c. You may not modify or create derivative works of any - * portion of the SOFTWARE. d. You may not bypass, disable, or circumvent any technical measure, encryption, - * security, digital rights management or authentication mechanism in the SOFTWARE. e. You may not use the SOFTWARE - * in any manner that would cause it to become subject to an open source software license. As examples, licenses that - * require as a condition of use, modification, and/or distribution that the SOFTWARE be (i) disclosed or distributed in - * source code form; (ii) licensed for the purpose of making derivative works; or (iii) redistributable at no charge. f. - * Unless you have an agreement with NVIDIA for this purpose, you may not use the SOFTWARE with any system or - * application where the use or failure of the system or application can reasonably be expected to threaten or result in - * personal injury, death, or catastrophic loss. Examples include use in avionics, navigation, military, medical, life - * support or other life critical applications. NVIDIA does not design, test or manufacture the SOFTWARE for these - * critical uses and NVIDIA shall not be liable to you or any third party, in whole or in part, for any claims or - * damages arising from such uses. g. You agree to defend, indemnify and hold harmless NVIDIA and its affiliates, - * and their respective employees, contractors, agents, officers and directors, from and against any and all claims, - * damages, obligations, losses, liabilities, costs or debt, fines, restitutions and expenses (including but not limited - * to attorney’s fees and costs incident to establishing the right of indemnification) arising out of or related to use - * of the SOFTWARE outside of the scope of this Agreement, or not in compliance with its terms. + * b. You may not reverse engineer, decompile or disassemble, or remove copyright or other proprietary notices from any portion of the SOFTWARE or copies of the SOFTWARE. + * c. You may not modify or create derivative works of any portion of the SOFTWARE. + * d. You may not bypass, disable, or circumvent any technical measure, encryption, security, digital rights management or authentication mechanism in the SOFTWARE. + * e. You may not use the SOFTWARE in any manner that would cause it to become subject to an open source software license. As examples, licenses that require as a condition of use, modification, and/or distribution that the SOFTWARE be (i) disclosed or distributed in source code form; (ii) licensed for the purpose of making derivative works; or (iii) redistributable at no charge. + * f. Unless you have an agreement with NVIDIA for this purpose, you may not use the SOFTWARE with any system or application where the use or failure of the system or application can reasonably be expected to threaten or result in personal injury, death, or catastrophic loss. Examples include use in avionics, navigation, military, medical, life support or other life critical applications. NVIDIA does not design, test or manufacture the SOFTWARE for these critical uses and NVIDIA shall not be liable to you or any third party, in whole or in part, for any claims or damages arising from such uses. + * g. You agree to defend, indemnify and hold harmless NVIDIA and its affiliates, and their respective employees, contractors, agents, officers and directors, from and against any and all claims, damages, obligations, losses, liabilities, costs or debt, fines, restitutions and expenses (including but not limited to attorney’s fees and costs incident to establishing the right of indemnification) arising out of or related to use of the SOFTWARE outside of the scope of this Agreement, or not in compliance with its terms. * - * 4. PRE-RELEASE. SOFTWARE versions identified as alpha, beta, preview, early access or otherwise as pre-release may - * not be fully functional, may contain errors or design flaws, and may have reduced or different security, privacy, - * availability, and reliability standards relative to commercial versions of NVIDIA software and materials. You may use - * a pre-release SOFTWARE version at your own risk, understanding that these versions are not intended for use in - * production or business-critical systems. + * 4. PRE-RELEASE. SOFTWARE versions identified as alpha, beta, preview, early access or otherwise as pre-release may not be fully functional, may contain errors or design flaws, and may have reduced or different security, privacy, availability, and reliability standards relative to commercial versions of NVIDIA software and materials. You may use a pre-release SOFTWARE version at your own risk, understanding that these versions are not intended for use in production or business-critical systems. * - * 5. OWNERSHIP. The SOFTWARE and the related intellectual property rights therein are and will remain the sole and - * exclusive property of NVIDIA or its licensors. The SOFTWARE is copyrighted and protected by the laws of the United - * States and other countries, and international treaty provisions. NVIDIA may make changes to the SOFTWARE, at any time - * without notice, but is not obligated to support or update the SOFTWARE. + * 5. OWNERSHIP. The SOFTWARE and the related intellectual property rights therein are and will remain the sole and exclusive property of NVIDIA or its licensors. The SOFTWARE is copyrighted and protected by the laws of the United States and other countries, and international treaty provisions. NVIDIA may make changes to the SOFTWARE, at any time without notice, but is not obligated to support or update the SOFTWARE. * - * 6. COMPONENTS UNDER OTHER LICENSES. The SOFTWARE may include NVIDIA or third-party components with separate legal - * notices or terms as may be described in proprietary notices accompanying the SOFTWARE. If and to the extent there is - * a conflict between the terms in this license and the license terms associated with a component, the license terms - * associated with the components control only to the extent necessary to resolve the conflict. + * 6. COMPONENTS UNDER OTHER LICENSES. The SOFTWARE may include NVIDIA or third-party components with separate legal notices or terms as may be described in proprietary notices accompanying the SOFTWARE. If and to the extent there is a conflict between the terms in this license and the license terms associated with a component, the license terms associated with the components control only to the extent necessary to resolve the conflict. * - * 7. FEEDBACK. You may, but don’t have to, provide to NVIDIA any Feedback. “Feedback” means any suggestions, bug fixes, - * enhancements, modifications, feature requests or other feedback regarding the SOFTWARE. For any Feedback that you - * voluntarily provide, you hereby grant NVIDIA and its affiliates a perpetual, non-exclusive, worldwide, irrevocable - * license to use, reproduce, modify, license, sublicense (through multiple tiers of sublicensees), and distribute - * (through multiple tiers of distributors) the Feedback without the payment of any royalties or fees to you. NVIDIA - * will use Feedback at its choice. + * 7. FEEDBACK. You may, but don’t have to, provide to NVIDIA any Feedback. “Feedback” means any suggestions, bug fixes, enhancements, modifications, feature requests or other feedback regarding the SOFTWARE. For any Feedback that you voluntarily provide, you hereby grant NVIDIA and its affiliates a perpetual, non-exclusive, worldwide, irrevocable license to use, reproduce, modify, license, sublicense (through multiple tiers of sublicensees), and distribute (through multiple tiers of distributors) the Feedback without the payment of any royalties or fees to you. NVIDIA will use Feedback at its choice. * - * 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED WARRANTY OF ANY KIND INCLUDING, BUT - * NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. NVIDIA DOES NOT - * WARRANT THAT THE SOFTWARE WILL MEET YOUR REQUIREMENTS OR THAT THE OPERATION THEREOF WILL BE UNINTERRUPTED OR - * ERROR-FREE, OR THAT ALL ERRORS WILL BE CORRECTED. + * 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED WARRANTY OF ANY KIND INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. NVIDIA DOES NOT WARRANT THAT THE SOFTWARE WILL MEET YOUR REQUIREMENTS OR THAT THE OPERATION THEREOF WILL BE UNINTERRUPTED OR ERROR-FREE, OR THAT ALL ERRORS WILL BE CORRECTED. * - * 9. LIMITATIONS OF LIABILITY. TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS AFFILIATES SHALL NOT BE LIABLE - * FOR ANY SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, PROJECT DELAYS, LOSS OF USE, - * LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION WITH - * THIS LICENSE OR THE USE OR PERFORMANCE OF THE SOFTWARE, WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON - * BREACH OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION - * OR THEORY OF LIABILITY, EVEN IF NVIDIA HAS PREVIOUSLY BEEN ADVISED OF, OR COULD REASONABLY HAVE FORESEEN, THE - * POSSIBILITY OF SUCH DAMAGES. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES TOTAL CUMULATIVE LIABILITY UNDER OR ARISING - * OUT OF THIS LICENSE EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE - * OR EXTEND THIS LIMIT. + * 9. LIMITATIONS OF LIABILITY. TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, PROJECT DELAYS, LOSS OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION WITH THIS LICENSE OR THE USE OR PERFORMANCE OF THE SOFTWARE, WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF LIABILITY, EVEN IF NVIDIA HAS PREVIOUSLY BEEN ADVISED OF, OR COULD REASONABLY HAVE FORESEEN, THE POSSIBILITY OF SUCH DAMAGES. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS LICENSE EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS LIMIT. * - * 10. TERMINATION. Your rights under this license will terminate automatically without notice from NVIDIA if you fail - * to comply with any term and condition of this license or if you commence or participate in any legal proceeding - * against NVIDIA with respect to the SOFTWARE. NVIDIA may terminate this license with advance written notice to you if - * NVIDIA decides to no longer provide the SOFTWARE in a country or, in NVIDIA’s sole discretion, the continued use of - * it is no longer commercially viable. Upon any termination of this license, you agree to promptly discontinue use of - * the SOFTWARE and destroy all copies in your possession or control. Your prior distributions in accordance with this - * license are not affected by the termination of this license. All provisions of this license will survive termination, - * except for the license granted to you. + * 10. TERMINATION. Your rights under this license will terminate automatically without notice from NVIDIA if you fail to comply with any term and condition of this license or if you commence or participate in any legal proceeding against NVIDIA with respect to the SOFTWARE. NVIDIA may terminate this license with advance written notice to you if NVIDIA decides to no longer provide the SOFTWARE in a country or, in NVIDIA’s sole discretion, the continued use of it is no longer commercially viable. Upon any termination of this license, you agree to promptly discontinue use of the SOFTWARE and destroy all copies in your possession or control. Your prior distributions in accordance with this license are not affected by the termination of this license. All provisions of this license will survive termination, except for the license granted to you. * - * 11. APPLICABLE LAW. This license will be governed in all respects by the laws of the United States and of the State - * of Delaware as those laws are applied to contracts entered into and performed entirely within Delaware by Delaware - * residents, without regard to the conflicts of laws principles. The United Nations Convention on Contracts for the - * International Sale of Goods is specifically disclaimed. You agree to all terms of this Agreement in the English - * language. The state or federal courts residing in Santa Clara County, California shall have exclusive jurisdiction - * over any dispute or claim arising out of this license. Notwithstanding this, you agree that NVIDIA shall still be - * allowed to apply for injunctive remedies or an equivalent type of urgent legal relief in any jurisdiction. + * 11. APPLICABLE LAW. This license will be governed in all respects by the laws of the United States and of the State of Delaware as those laws are applied to contracts entered into and performed entirely within Delaware by Delaware residents, without regard to the conflicts of laws principles. The United Nations Convention on Contracts for the International Sale of Goods is specifically disclaimed. You agree to all terms of this Agreement in the English language. The state or federal courts residing in Santa Clara County, California shall have exclusive jurisdiction over any dispute or claim arising out of this license. Notwithstanding this, you agree that NVIDIA shall still be allowed to apply for injunctive remedies or an equivalent type of urgent legal relief in any jurisdiction. * - * 12. NO ASSIGNMENT. This license and your rights and obligations thereunder may not be assigned by you by any means or - * operation of law without NVIDIA’s permission. Any attempted assignment not approved by NVIDIA in writing shall be - * void and of no effect. + * 12. NO ASSIGNMENT. This license and your rights and obligations thereunder may not be assigned by you by any means or operation of law without NVIDIA’s permission. Any attempted assignment not approved by NVIDIA in writing shall be void and of no effect. * - * 13. EXPORT. The SOFTWARE is subject to United States export laws and regulations. You agree that you will not ship, - * transfer or export the SOFTWARE into any country, or use the SOFTWARE in any manner, prohibited by the United States - * Bureau of Industry and Security or economic sanctions regulations administered by the U.S. Department of Treasury’s - * Office of Foreign Assets Control (OFAC), or any applicable export laws, restrictions or regulations. These laws - * include restrictions on destinations, end users and end use. By accepting this license, you confirm that you are not - * a resident or citizen of any country currently embargoed by the U.S. and that you are not otherwise prohibited from - * receiving the SOFTWARE. + * 13. EXPORT. The SOFTWARE is subject to United States export laws and regulations. You agree that you will not ship, transfer or export the SOFTWARE into any country, or use the SOFTWARE in any manner, prohibited by the United States Bureau of Industry and Security or economic sanctions regulations administered by the U.S. Department of Treasury’s Office of Foreign Assets Control (OFAC), or any applicable export laws, restrictions or regulations. These laws include restrictions on destinations, end users and end use. By accepting this license, you confirm that you are not a resident or citizen of any country currently embargoed by the U.S. and that you are not otherwise prohibited from receiving the SOFTWARE. * - * 14. GOVERNMENT USE. The SOFTWARE has been developed entirely at private expense and is “commercial items” consisting - * of “commercial computer software” and “commercial computer software documentation” provided with RESTRICTED RIGHTS. - * Use, duplication or disclosure by the U.S. Government or a U.S. Government subcontractor is subject to the - * restrictions in this license pursuant to DFARS 227.7202-3(a) or as set forth in subparagraphs (b)(1) and (2) of the - * Commercial Computer Software - Restricted Rights clause at FAR 52.227-19, as applicable. Contractor/manufacturer is - * NVIDIA, 2788 San Tomas Expressway, Santa Clara, CA 95051. + * 14. GOVERNMENT USE. The SOFTWARE has been developed entirely at private expense and is “commercial items” consisting of “commercial computer software” and “commercial computer software documentation” provided with RESTRICTED RIGHTS. Use, duplication or disclosure by the U.S. Government or a U.S. Government subcontractor is subject to the restrictions in this license pursuant to DFARS 227.7202-3(a) or as set forth in subparagraphs (b)(1) and (2) of the Commercial Computer Software - Restricted Rights clause at FAR 52.227-19, as applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas Expressway, Santa Clara, CA 95051. * - * 15. ENTIRE AGREEMENT. This license is the final, complete and exclusive agreement between the parties relating to the - * subject matter of this license and supersedes all prior or contemporaneous understandings and agreements relating to - * this subject matter, whether oral or written. If any court of competent jurisdiction determines that any provision of - * this license is illegal, invalid or unenforceable, the remaining provisions will remain in full force and effect. - * This license may only be modified in a writing signed by an authorized representative of each party. + * 15. ENTIRE AGREEMENT. This license is the final, complete and exclusive agreement between the parties relating to the subject matter of this license and supersedes all prior or contemporaneous understandings and agreements relating to this subject matter, whether oral or written. If any court of competent jurisdiction determines that any provision of this license is illegal, invalid or unenforceable, the remaining provisions will remain in full force and effect. This license may only be modified in a writing signed by an authorized representative of each party. * * (v. August 20, 2021) */ _LIBCUDACXX_BEGIN_NAMESPACE_CUDA -namespace __detail_ap -{ +namespace __detail_ap { -_CCCL_HOST_DEVICE constexpr uint32_t __ap_floor_log2(uint32_t __x) -{ - return (__x == 1 | __x == 0) ? 0 : 1 + __ap_floor_log2(__x >> 1); -} + _CCCL_HOST_DEVICE + constexpr uint32_t __ap_floor_log2(uint32_t __x) { + return (__x == 1 | __x == 0) ? 0 : 1 + __ap_floor_log2(__x >> 1); + } -_CCCL_HOST_DEVICE constexpr uint32_t __ap_ceil_log2(uint32_t __x) -{ - return (__x == 1 | __x == 0) ? 0 : __ap_floor_log2(__x - 1) + 1; -} + _CCCL_HOST_DEVICE + constexpr uint32_t __ap_ceil_log2(uint32_t __x) { + return (__x == 1 | __x == 0) ? 0 : __ap_floor_log2(__x - 1) + 1; + } -_CCCL_HOST_DEVICE constexpr uint32_t __ap_min(uint32_t __a, uint32_t __b) noexcept -{ - return (__a < __b) ? __a : __b; -} + _CCCL_HOST_DEVICE + constexpr uint32_t __ap_min(uint32_t __a, uint32_t __b) noexcept { + return (__a < __b) ? __a : __b; + } -_CCCL_HOST_DEVICE constexpr uint32_t __ap_max(uint32_t __a, uint32_t __b) noexcept -{ - return (__a > __b) ? __a : __b; -} + _CCCL_HOST_DEVICE + constexpr uint32_t __ap_max(uint32_t __a, uint32_t __b) noexcept { + return (__a > __b) ? __a : __b; + } // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61414 // Specifically search for 8.4 and 9.3 and above to guarantee uint64_t enum. -#if defined(_CCCL_COMPILER_GCC) && (((_GNUC_VER < 804)) || ((_GNUC_VER < 903))) -# define _LIBCUDACXX_AP_ENUM_TYPE_ANNOTATION +#if defined(_CCCL_COMPILER_GCC) && ( \ + ((_GNUC_VER < 804)) || \ + ((_GNUC_VER < 903)) \ + ) +# define _LIBCUDACXX_AP_ENUM_TYPE_ANNOTATION #else -# define _LIBCUDACXX_AP_ENUM_TYPE_ANNOTATION : uint64_t +# define _LIBCUDACXX_AP_ENUM_TYPE_ANNOTATION : uint64_t #endif -namespace __sm_80 -{ -namespace __off -{ -enum __l2_cop_off_t _LIBCUDACXX_AP_ENUM_TYPE_ANNOTATION -{ - _L2_EVICT_NORMAL = 0, - _L2_EVICT_FIRST = 1, -}; -} // namespace __off - -namespace __on -{ -enum __l2_cop_on_t _LIBCUDACXX_AP_ENUM_TYPE_ANNOTATION -{ - _L2_EVICT_NORMAL = 0, - _L2_EVICT_FIRST = 1, - _L2_EVICT_LAST = 2, - _L2_EVICT_NORMAL_DEMOTE = 3, -}; -} // namespace __on - -enum __l2_descriptor_mode_t _LIBCUDACXX_AP_ENUM_TYPE_ANNOTATION -{ - _DESC_IMPLICIT = 0, - _DESC_INTERLEAVED = 2, - _DESC_BLOCK_TYPE = 3, -}; - -enum __l2_eviction_max_way_t _LIBCUDACXX_AP_ENUM_TYPE_ANNOTATION -{ - _CUDA_AMPERE_MAX_L2_WAYS = std::uint32_t{16}, -}; - -enum __block_size_t _LIBCUDACXX_AP_ENUM_TYPE_ANNOTATION -{ - _BLOCKSIZE_4K = 0, - _BLOCKSIZE_8K = 1, - _BLOCKSIZE_16K = 2, - _BLOCKSIZE_32K = 3, - _BLOCKSIZE_64K = 4, - _BLOCKSIZE_128K = 5, - _BLOCKSIZE_256K = 6, - _BLOCKSIZE_512K = 7, - _BLOCKSIZE_1M = 8, - _BLOCKSIZE_2M = 9, - _BLOCKSIZE_4M = 10, - _BLOCKSIZE_8M = 11, - _BLOCKSIZE_16M = 12, - _BLOCKSIZE_32M = 13, -}; - -struct __block_desc_t -{ - uint64_t __ap_reserved : 37; - uint64_t __block_count : 7; - uint64_t __block_start : 7; - uint64_t __ap_reserved2 : 1; - __block_size_t __block_size : 4; - __off::__l2_cop_off_t __l2_cop_off : 1; - __on::__l2_cop_on_t __l2_cop_on : 2; - __l2_descriptor_mode_t __l2_descriptor_mode : 2; - uint64_t __l1_inv_dont_allocate : 1; - uint64_t __l2_sector_promote_256B : 1; - uint64_t __ap_reserved3 : 1; - - _CCCL_HOST_DEVICE constexpr std::uint64_t __get_descriptor_cexpr() const noexcept - { - return std::uint64_t(__ap_reserved) << 0 | std::uint64_t(__block_count) << 37 | std::uint64_t(__block_start) << 44 - | std::uint64_t(__ap_reserved2) << 51 | std::uint64_t(__block_size) << 52 | std::uint64_t(__l2_cop_off) << 56 - | std::uint64_t(__l2_cop_on) << 57 | std::uint64_t(__l2_descriptor_mode) << 59 - | std::uint64_t(__l1_inv_dont_allocate) << 61 | std::uint64_t(__l2_sector_promote_256B) << 62 - | std::uint64_t(__ap_reserved3) << 63; - } - - inline _CCCL_HOST_DEVICE std::uint64_t __get_descriptor_non_cexpr() const noexcept - { - return *reinterpret_cast(this); - } - - _CCCL_HOST_DEVICE constexpr std::uint64_t __get_descriptor() const noexcept - { + namespace __sm_80 { + namespace __off { + enum __l2_cop_off_t _LIBCUDACXX_AP_ENUM_TYPE_ANNOTATION { + _L2_EVICT_NORMAL = 0, + _L2_EVICT_FIRST = 1, + }; + } // namespace __off + + namespace __on { + enum __l2_cop_on_t _LIBCUDACXX_AP_ENUM_TYPE_ANNOTATION { + _L2_EVICT_NORMAL = 0, + _L2_EVICT_FIRST = 1, + _L2_EVICT_LAST = 2, + _L2_EVICT_NORMAL_DEMOTE = 3, + }; + } // namespace __on + + enum __l2_descriptor_mode_t _LIBCUDACXX_AP_ENUM_TYPE_ANNOTATION { + _DESC_IMPLICIT = 0, + _DESC_INTERLEAVED = 2, + _DESC_BLOCK_TYPE = 3, + }; + + enum __l2_eviction_max_way_t _LIBCUDACXX_AP_ENUM_TYPE_ANNOTATION { + _CUDA_AMPERE_MAX_L2_WAYS = std::uint32_t{16}, + }; + + enum __block_size_t _LIBCUDACXX_AP_ENUM_TYPE_ANNOTATION { + _BLOCKSIZE_4K = 0, + _BLOCKSIZE_8K = 1, + _BLOCKSIZE_16K = 2, + _BLOCKSIZE_32K = 3, + _BLOCKSIZE_64K = 4, + _BLOCKSIZE_128K = 5, + _BLOCKSIZE_256K = 6, + _BLOCKSIZE_512K = 7, + _BLOCKSIZE_1M = 8, + _BLOCKSIZE_2M = 9, + _BLOCKSIZE_4M = 10, + _BLOCKSIZE_8M = 11, + _BLOCKSIZE_16M = 12, + _BLOCKSIZE_32M = 13, + }; + + struct __block_desc_t { + uint64_t __ap_reserved : 37; + uint64_t __block_count: 7; + uint64_t __block_start: 7; + uint64_t __ap_reserved2 : 1; + __block_size_t __block_size : 4; + __off::__l2_cop_off_t __l2_cop_off : 1; + __on::__l2_cop_on_t __l2_cop_on : 2; + __l2_descriptor_mode_t __l2_descriptor_mode : 2; + uint64_t __l1_inv_dont_allocate : 1; + uint64_t __l2_sector_promote_256B : 1; + uint64_t __ap_reserved3 : 1; + + _CCCL_HOST_DEVICE + constexpr std::uint64_t __get_descriptor_cexpr() const noexcept { + return + std::uint64_t(__ap_reserved) << 0 | + std::uint64_t(__block_count) << 37 | + std::uint64_t(__block_start) << 44 | + std::uint64_t(__ap_reserved2) << 51 | + std::uint64_t(__block_size) << 52 | + std::uint64_t(__l2_cop_off) << 56 | + std::uint64_t(__l2_cop_on) << 57 | + std::uint64_t(__l2_descriptor_mode) << 59 | + std::uint64_t(__l1_inv_dont_allocate) << 61 | + std::uint64_t(__l2_sector_promote_256B) << 62 | + std::uint64_t(__ap_reserved3) << 63; + } + + inline + _CCCL_HOST_DEVICE + std::uint64_t __get_descriptor_non_cexpr() const noexcept { return *reinterpret_cast(this); } + + _CCCL_HOST_DEVICE + constexpr std::uint64_t __get_descriptor() const noexcept { #if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) - return cuda::std::is_constant_evaluated() ? __get_descriptor_cexpr() : __get_descriptor_non_cexpr(); + return cuda::std::is_constant_evaluated() ? + __get_descriptor_cexpr() : + __get_descriptor_non_cexpr(); #else - return __get_descriptor_cexpr(); + return __get_descriptor_cexpr(); #endif - } -}; -static_assert(sizeof(__block_desc_t) == 8, "__block_desc_t should be 8 bytes"); -static_assert(sizeof(__block_desc_t) == sizeof(std::uint64_t), ""); -static_assert( - __block_desc_t{ - (uint64_t) 1, - (uint64_t) 1, - (uint64_t) 1, - (uint64_t) 1, - __block_size_t::_BLOCKSIZE_8K, - __off::_L2_EVICT_FIRST, - __on::_L2_EVICT_FIRST, - __l2_descriptor_mode_t::_DESC_INTERLEAVED, - (uint64_t) 1, - (uint64_t) 1, - (uint64_t) 1} - .__get_descriptor() - == 0xF318102000000001, - ""); - -/* Factory like struct to build a __block_desc_t due to constexpr C++11 - */ -struct __block_descriptor_builder -{ // variable declaration order matters == usage order - std::uint32_t __offset; - __block_size_t __block_size; - std::uint32_t __block_start, __end_hit; - std::uint32_t __block_count; - __off::__l2_cop_off_t __l2_cop_off; - __on::__l2_cop_on_t __l2_cop_on; - __l2_descriptor_mode_t __l2_descriptor_mode; - bool __l1_inv_dont_allocate, __l2_sector_promote_256B; - - _CCCL_HOST_DEVICE static constexpr std::uint32_t __calc_offset(std::size_t __total_bytes) - { - return __ap_max( - std::uint32_t{12}, - static_cast(__ap_ceil_log2(static_cast(__total_bytes))) - std::uint32_t{7}); - } - - _CCCL_HOST_DEVICE static constexpr std::uint32_t __calc_block_start(std::uintptr_t __ptr, std::size_t __total_bytes) - { - return static_cast(__ptr >> __calc_offset(static_cast(__total_bytes))); - } - - _CCCL_HOST_DEVICE static constexpr std::uint32_t - __calc_end_hit(std::uintptr_t __ptr, std::size_t __hit_bytes, std::size_t __total_bytes) - { - return static_cast( - (__ptr + __hit_bytes + (std::uintptr_t{1} << (__calc_offset(static_cast(__total_bytes)))) - 1) - >> __calc_offset(static_cast(__total_bytes))); - } - - _CCCL_HOST_DEVICE constexpr __block_descriptor_builder( - std::uintptr_t __ptr, - std::size_t __hit_bytes, - std::size_t __total_bytes, - __on::__l2_cop_on_t __hit_prop, - __off::__l2_cop_off_t __miss_prop) - : __offset(__calc_offset(__total_bytes)) - , __block_size(static_cast<__block_size_t>(__calc_offset(__total_bytes) - std::uint32_t{12})) - , __block_start(__calc_block_start(__ptr, __total_bytes)) - , __end_hit(__calc_end_hit(__ptr, __hit_bytes, __total_bytes)) - , __block_count(__calc_end_hit(__ptr, __hit_bytes, __total_bytes) - __calc_block_start(__ptr, __total_bytes)) - , __l2_cop_off(__miss_prop) - , __l2_cop_on(__hit_prop) - , __l2_descriptor_mode(_DESC_BLOCK_TYPE) - , __l1_inv_dont_allocate(false) - , __l2_sector_promote_256B(false) - {} - - _CCCL_HOST_DEVICE constexpr __block_desc_t __get_block() const noexcept - { - return __block_desc_t{ - 0, - __ap_min(std::uint32_t{0x7f}, __block_count), - (__block_start & std::uint32_t{0x7f}), - 0, - __block_size, - __l2_cop_off, - __l2_cop_on, - _DESC_BLOCK_TYPE, - false, - false, - 0}; - } -}; -static_assert(sizeof(std::uintptr_t) > 4, "std::uintptr_t needs at least 5 bytes for this code to work"); - -struct __interleave_descriptor_t -{ - uint64_t __ap_reserved : 52; - uint64_t __fraction : 4; - __off::__l2_cop_off_t __l2_cop_off : 1; - __on::__l2_cop_on_t __l2_cop_on : 2; - __l2_descriptor_mode_t __l2_descriptor_mode : 2; - uint64_t __l1_inv_dont_allocate : 1; - uint64_t __l2_sector_promote_256B : 1; - uint64_t __ap_reserved2 : 1; - - _CCCL_HOST_DEVICE constexpr __interleave_descriptor_t( - __on::__l2_cop_on_t __hit_prop, std::uint32_t __hit_ratio, __off::__l2_cop_off_t __miss_prop) noexcept - : __ap_reserved(0x0) - , __fraction(__hit_ratio) - , __l2_cop_off(__miss_prop) - , __l2_cop_on(__hit_prop) - , __l2_descriptor_mode(_DESC_INTERLEAVED) - , __l1_inv_dont_allocate(0x0) - , __l2_sector_promote_256B(0x0) - , __ap_reserved2(0x0) - {} - - _CCCL_HOST_DEVICE constexpr std::uint64_t __get_descriptor_cexpr() const - { - return std::uint64_t(__ap_reserved) << 0 | std::uint64_t(__fraction) << 52 | std::uint64_t(__l2_cop_off) << 56 - | std::uint64_t(__l2_cop_on) << 57 | std::uint64_t(__l2_descriptor_mode) << 59 - | std::uint64_t(__l1_inv_dont_allocate) << 61 | std::uint64_t(__l2_sector_promote_256B) << 62 - | std::uint64_t(__ap_reserved2) << 63; - } - - inline _CCCL_HOST_DEVICE std::uint64_t __get_descriptor_non_cexpr() const noexcept - { - return *reinterpret_cast(this); - } - - _CCCL_HOST_DEVICE constexpr std::uint64_t __get_descriptor() const noexcept - { + } + }; + static_assert(sizeof(__block_desc_t) == 8, "__block_desc_t should be 8 bytes"); + static_assert(sizeof(__block_desc_t) == sizeof(std::uint64_t), ""); + static_assert( + __block_desc_t{(uint64_t)1, (uint64_t)1, (uint64_t)1, (uint64_t)1, __block_size_t::_BLOCKSIZE_8K, __off::_L2_EVICT_FIRST, __on::_L2_EVICT_FIRST, __l2_descriptor_mode_t::_DESC_INTERLEAVED, (uint64_t)1, (uint64_t)1, (uint64_t)1}.__get_descriptor() + == 0xF318102000000001, ""); + + /* Factory like struct to build a __block_desc_t due to constexpr C++11 + */ + struct __block_descriptor_builder { //variable declaration order matters == usage order + std::uint32_t __offset; + __block_size_t __block_size; + std::uint32_t __block_start, __end_hit; + std::uint32_t __block_count; + __off::__l2_cop_off_t __l2_cop_off; + __on::__l2_cop_on_t __l2_cop_on; + __l2_descriptor_mode_t __l2_descriptor_mode; + bool __l1_inv_dont_allocate, __l2_sector_promote_256B; + + _CCCL_HOST_DEVICE static constexpr std::uint32_t __calc_offset(std::size_t __total_bytes) { + return __ap_max(std::uint32_t{12}, static_cast(__ap_ceil_log2(static_cast(__total_bytes))) - std::uint32_t{7}); + } + + _CCCL_HOST_DEVICE static constexpr std::uint32_t __calc_block_start(std::uintptr_t __ptr, std::size_t __total_bytes) { + return static_cast(__ptr >> __calc_offset(static_cast(__total_bytes))); + } + + _CCCL_HOST_DEVICE static constexpr std::uint32_t __calc_end_hit(std::uintptr_t __ptr, std::size_t __hit_bytes, std::size_t __total_bytes) { + return static_cast((__ptr + __hit_bytes + (std::uintptr_t{1} << (__calc_offset(static_cast(__total_bytes)))) - 1) >> __calc_offset(static_cast(__total_bytes))); + } + + _CCCL_HOST_DEVICE constexpr __block_descriptor_builder(std::uintptr_t __ptr, std::size_t __hit_bytes, std::size_t __total_bytes, __on::__l2_cop_on_t __hit_prop, __off::__l2_cop_off_t __miss_prop) + : __offset(__calc_offset(__total_bytes)) + , __block_size(static_cast<__block_size_t>(__calc_offset(__total_bytes) - std::uint32_t{12})) + , __block_start(__calc_block_start(__ptr, __total_bytes)) + , __end_hit(__calc_end_hit(__ptr, __hit_bytes, __total_bytes)) + , __block_count(__calc_end_hit(__ptr, __hit_bytes, __total_bytes) - __calc_block_start(__ptr, __total_bytes)) + , __l2_cop_off(__miss_prop) + , __l2_cop_on(__hit_prop) + , __l2_descriptor_mode(_DESC_BLOCK_TYPE) + , __l1_inv_dont_allocate(false) + , __l2_sector_promote_256B(false) + {} + + _CCCL_HOST_DEVICE + constexpr __block_desc_t __get_block() const noexcept { + return __block_desc_t { 0, __ap_min(std::uint32_t{0x7f}, __block_count), (__block_start & std::uint32_t{0x7f}), 0, __block_size, __l2_cop_off, __l2_cop_on, _DESC_BLOCK_TYPE, false, false, 0 }; + } + }; + static_assert(sizeof(std::uintptr_t) > 4, "std::uintptr_t needs at least 5 bytes for this code to work"); + + struct __interleave_descriptor_t { + uint64_t __ap_reserved : 52; + uint64_t __fraction : 4; + __off::__l2_cop_off_t __l2_cop_off : 1; + __on::__l2_cop_on_t __l2_cop_on : 2; + __l2_descriptor_mode_t __l2_descriptor_mode : 2; + uint64_t __l1_inv_dont_allocate : 1; + uint64_t __l2_sector_promote_256B : 1; + uint64_t __ap_reserved2 : 1; + + _CCCL_HOST_DEVICE + constexpr __interleave_descriptor_t( + __on::__l2_cop_on_t __hit_prop, + std::uint32_t __hit_ratio, + __off::__l2_cop_off_t __miss_prop) noexcept + : __ap_reserved(0x0), + __fraction(__hit_ratio), + __l2_cop_off(__miss_prop), + __l2_cop_on(__hit_prop), + __l2_descriptor_mode(_DESC_INTERLEAVED), + __l1_inv_dont_allocate(0x0), + __l2_sector_promote_256B(0x0), + __ap_reserved2(0x0) {} + + _CCCL_HOST_DEVICE + constexpr std::uint64_t __get_descriptor_cexpr() const { + return + std::uint64_t(__ap_reserved) << 0 | + std::uint64_t(__fraction) << 52 | + std::uint64_t(__l2_cop_off) << 56 | + std::uint64_t(__l2_cop_on) << 57 | + std::uint64_t(__l2_descriptor_mode) << 59 | + std::uint64_t(__l1_inv_dont_allocate) << 61 | + std::uint64_t(__l2_sector_promote_256B) << 62 | + std::uint64_t(__ap_reserved2) << 63; + } + + inline + _CCCL_HOST_DEVICE + std::uint64_t __get_descriptor_non_cexpr() const noexcept { return *reinterpret_cast(this); } + + + _CCCL_HOST_DEVICE + constexpr std::uint64_t __get_descriptor() const noexcept { #if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) - return cuda::std::is_constant_evaluated() ? __get_descriptor_cexpr() : __get_descriptor_non_cexpr(); + return cuda::std::is_constant_evaluated() ? + __get_descriptor_cexpr() : + __get_descriptor_non_cexpr(); #else - return __get_descriptor_cexpr(); + return __get_descriptor_cexpr(); #endif + } + }; + static_assert(sizeof(__interleave_descriptor_t) == 8, "__interleave_descriptor_t should be 8 bytes"); + static_assert(sizeof(__interleave_descriptor_t) == sizeof(std::uint64_t), ""); + + _CCCL_HOST_DEVICE + static constexpr std::uint64_t __interleave_normal() noexcept { + return 0x10F0000000000000; + } + + _CCCL_HOST_DEVICE + static constexpr std::uint64_t __interleave_streaming() noexcept { + return 0x12F0000000000000; + } + + _CCCL_HOST_DEVICE + static constexpr std::uint64_t __interleave_persisting() noexcept { + return 0x14F0000000000000; + } + + _CCCL_HOST_DEVICE + static constexpr std::uint64_t __interleave_normal_demote() noexcept { + return 0x16F0000000000000; + } + + } // namespace __sm_80 + + _CCCL_HOST_DEVICE + constexpr std::uint64_t __interleave(cudaAccessProperty __hit_prop, float __hit_ratio, cudaAccessProperty __miss_prop = cudaAccessPropertyNormal) { + return __sm_80::__interleave_descriptor_t( + ((__hit_prop == cudaAccessPropertyNormal) ? __sm_80::__on::__l2_cop_on_t::_L2_EVICT_NORMAL_DEMOTE : static_cast<__sm_80::__on::__l2_cop_on_t>(__hit_prop)), + __ap_min((static_cast(__hit_ratio) * __sm_80::__l2_eviction_max_way_t::_CUDA_AMPERE_MAX_L2_WAYS), static_cast(__sm_80::__l2_eviction_max_way_t::_CUDA_AMPERE_MAX_L2_WAYS - 1)), + static_cast<__sm_80::__off::__l2_cop_off_t>(__miss_prop) + ).__get_descriptor(); + } + + _CCCL_HOST_DEVICE + constexpr std::uint64_t __block(void* __ptr, std::size_t __hit_bytes, std::size_t __total_bytes, cudaAccessProperty __hit_prop, cudaAccessProperty __miss_prop = cudaAccessPropertyNormal) { + return (__total_bytes <= (size_t{0xFFFFFFFF}) & __total_bytes != 0 & __hit_bytes <= __total_bytes) ? __sm_80::__block_descriptor_builder( + reinterpret_cast(__ptr), + __hit_bytes, + __total_bytes, + (__hit_prop == cudaAccessPropertyNormal) ? __sm_80::__on::_L2_EVICT_NORMAL_DEMOTE : static_cast<__sm_80::__on::__l2_cop_on_t>(__hit_prop), + static_cast<__sm_80::__off::__l2_cop_off_t>(__miss_prop) + ).__get_block().__get_descriptor() + : __sm_80::__interleave_normal(); } -}; -static_assert(sizeof(__interleave_descriptor_t) == 8, "__interleave_descriptor_t should be 8 bytes"); -static_assert(sizeof(__interleave_descriptor_t) == sizeof(std::uint64_t), ""); - -_CCCL_HOST_DEVICE static constexpr std::uint64_t __interleave_normal() noexcept -{ - return 0x10F0000000000000; -} - -_CCCL_HOST_DEVICE static constexpr std::uint64_t __interleave_streaming() noexcept -{ - return 0x12F0000000000000; -} - -_CCCL_HOST_DEVICE static constexpr std::uint64_t __interleave_persisting() noexcept -{ - return 0x14F0000000000000; -} - -_CCCL_HOST_DEVICE static constexpr std::uint64_t __interleave_normal_demote() noexcept -{ - return 0x16F0000000000000; -} - -} // namespace __sm_80 - -_CCCL_HOST_DEVICE constexpr std::uint64_t __interleave( - cudaAccessProperty __hit_prop, float __hit_ratio, cudaAccessProperty __miss_prop = cudaAccessPropertyNormal) -{ - return __sm_80::__interleave_descriptor_t( - ((__hit_prop == cudaAccessPropertyNormal) ? __sm_80::__on::__l2_cop_on_t::_L2_EVICT_NORMAL_DEMOTE - : static_cast<__sm_80::__on::__l2_cop_on_t>(__hit_prop)), - __ap_min( - (static_cast(__hit_ratio) * __sm_80::__l2_eviction_max_way_t::_CUDA_AMPERE_MAX_L2_WAYS), - static_cast(__sm_80::__l2_eviction_max_way_t::_CUDA_AMPERE_MAX_L2_WAYS - 1)), - static_cast<__sm_80::__off::__l2_cop_off_t>(__miss_prop)) - .__get_descriptor(); -} - -_CCCL_HOST_DEVICE constexpr std::uint64_t __block( - void* __ptr, - std::size_t __hit_bytes, - std::size_t __total_bytes, - cudaAccessProperty __hit_prop, - cudaAccessProperty __miss_prop = cudaAccessPropertyNormal) -{ - return (__total_bytes <= (size_t{0xFFFFFFFF}) & __total_bytes != 0 & __hit_bytes <= __total_bytes) - ? __sm_80::__block_descriptor_builder( - reinterpret_cast(__ptr), - __hit_bytes, - __total_bytes, - (__hit_prop == cudaAccessPropertyNormal) - ? __sm_80::__on::_L2_EVICT_NORMAL_DEMOTE - : static_cast<__sm_80::__on::__l2_cop_on_t>(__hit_prop), - static_cast<__sm_80::__off::__l2_cop_off_t>(__miss_prop)) - .__get_block() - .__get_descriptor() - : __sm_80::__interleave_normal(); -} } // namespace __detail_ap _LIBCUDACXX_END_NAMESPACE_CUDA diff --git a/libcudacxx/include/cuda/std/detail/__annotated_ptr b/libcudacxx/include/cuda/std/detail/__annotated_ptr index eb84a309f45..f1d4b166b6e 100644 --- a/libcudacxx/include/cuda/std/detail/__annotated_ptr +++ b/libcudacxx/include/cuda/std/detail/__annotated_ptr @@ -3,327 +3,229 @@ * * NVIDIA SOFTWARE LICENSE * - * This license is a legal agreement between you and NVIDIA Corporation ("NVIDIA") and governs your use of the - * NVIDIA/CUDA C++ Library software and materials provided hereunder (“SOFTWARE”). + * This license is a legal agreement between you and NVIDIA Corporation ("NVIDIA") and governs your use of the NVIDIA/CUDA C++ Library software and materials provided hereunder (“SOFTWARE”). * - * This license can be accepted only by an adult of legal age of majority in the country in which the SOFTWARE is used. - * If you are under the legal age of majority, you must ask your parent or legal guardian to consent to this license. By - * taking delivery of the SOFTWARE, you affirm that you have reached the legal age of majority, you accept the terms of - * this license, and you take legal and financial responsibility for the actions of your permitted users. + * This license can be accepted only by an adult of legal age of majority in the country in which the SOFTWARE is used. If you are under the legal age of majority, you must ask your parent or legal guardian to consent to this license. By taking delivery of the SOFTWARE, you affirm that you have reached the legal age of majority, you accept the terms of this license, and you take legal and financial responsibility for the actions of your permitted users. * - * You agree to use the SOFTWARE only for purposes that are permitted by (a) this license, and (b) any applicable law, - * regulation or generally accepted practices or guidelines in the relevant jurisdictions. + * You agree to use the SOFTWARE only for purposes that are permitted by (a) this license, and (b) any applicable law, regulation or generally accepted practices or guidelines in the relevant jurisdictions. * - * 1. LICENSE. Subject to the terms of this license, NVIDIA grants you a non-exclusive limited license to: (a) install - * and use the SOFTWARE, and (b) distribute the SOFTWARE subject to the distribution requirements described in this - * license. NVIDIA reserves all rights, title and interest in and to the SOFTWARE not expressly granted to you under - * this license. + * 1. LICENSE. Subject to the terms of this license, NVIDIA grants you a non-exclusive limited license to: (a) install and use the SOFTWARE, and (b) distribute the SOFTWARE subject to the distribution requirements described in this license. NVIDIA reserves all rights, title and interest in and to the SOFTWARE not expressly granted to you under this license. * * 2. DISTRIBUTION REQUIREMENTS. These are the distribution requirements for you to exercise the distribution grant: - * a. The terms under which you distribute the SOFTWARE must be consistent with the terms of this license, - * including (without limitation) terms relating to the license grant and license restrictions and protection of - * NVIDIA’s intellectual property rights. b. You agree to notify NVIDIA in writing of any known or suspected - * distribution or use of the SOFTWARE not in compliance with the requirements of this license, and to enforce the terms - * of your agreements with respect to distributed SOFTWARE. + * a. The terms under which you distribute the SOFTWARE must be consistent with the terms of this license, including (without limitation) terms relating to the license grant and license restrictions and protection of NVIDIA’s intellectual property rights. + * b. You agree to notify NVIDIA in writing of any known or suspected distribution or use of the SOFTWARE not in compliance with the requirements of this license, and to enforce the terms of your agreements with respect to distributed SOFTWARE. * * 3. LIMITATIONS. Your license to use the SOFTWARE is restricted as follows: * a. The SOFTWARE is licensed for you to develop applications only for use in systems with NVIDIA GPUs. - * b. You may not reverse engineer, decompile or disassemble, or remove copyright or other proprietary notices from - * any portion of the SOFTWARE or copies of the SOFTWARE. c. You may not modify or create derivative works of any - * portion of the SOFTWARE. d. You may not bypass, disable, or circumvent any technical measure, encryption, - * security, digital rights management or authentication mechanism in the SOFTWARE. e. You may not use the SOFTWARE - * in any manner that would cause it to become subject to an open source software license. As examples, licenses that - * require as a condition of use, modification, and/or distribution that the SOFTWARE be (i) disclosed or distributed in - * source code form; (ii) licensed for the purpose of making derivative works; or (iii) redistributable at no charge. f. - * Unless you have an agreement with NVIDIA for this purpose, you may not use the SOFTWARE with any system or - * application where the use or failure of the system or application can reasonably be expected to threaten or result in - * personal injury, death, or catastrophic loss. Examples include use in avionics, navigation, military, medical, life - * support or other life critical applications. NVIDIA does not design, test or manufacture the SOFTWARE for these - * critical uses and NVIDIA shall not be liable to you or any third party, in whole or in part, for any claims or - * damages arising from such uses. g. You agree to defend, indemnify and hold harmless NVIDIA and its affiliates, - * and their respective employees, contractors, agents, officers and directors, from and against any and all claims, - * damages, obligations, losses, liabilities, costs or debt, fines, restitutions and expenses (including but not limited - * to attorney’s fees and costs incident to establishing the right of indemnification) arising out of or related to use - * of the SOFTWARE outside of the scope of this Agreement, or not in compliance with its terms. + * b. You may not reverse engineer, decompile or disassemble, or remove copyright or other proprietary notices from any portion of the SOFTWARE or copies of the SOFTWARE. + * c. You may not modify or create derivative works of any portion of the SOFTWARE. + * d. You may not bypass, disable, or circumvent any technical measure, encryption, security, digital rights management or authentication mechanism in the SOFTWARE. + * e. You may not use the SOFTWARE in any manner that would cause it to become subject to an open source software license. As examples, licenses that require as a condition of use, modification, and/or distribution that the SOFTWARE be (i) disclosed or distributed in source code form; (ii) licensed for the purpose of making derivative works; or (iii) redistributable at no charge. + * f. Unless you have an agreement with NVIDIA for this purpose, you may not use the SOFTWARE with any system or application where the use or failure of the system or application can reasonably be expected to threaten or result in personal injury, death, or catastrophic loss. Examples include use in avionics, navigation, military, medical, life support or other life critical applications. NVIDIA does not design, test or manufacture the SOFTWARE for these critical uses and NVIDIA shall not be liable to you or any third party, in whole or in part, for any claims or damages arising from such uses. + * g. You agree to defend, indemnify and hold harmless NVIDIA and its affiliates, and their respective employees, contractors, agents, officers and directors, from and against any and all claims, damages, obligations, losses, liabilities, costs or debt, fines, restitutions and expenses (including but not limited to attorney’s fees and costs incident to establishing the right of indemnification) arising out of or related to use of the SOFTWARE outside of the scope of this Agreement, or not in compliance with its terms. * - * 4. PRE-RELEASE. SOFTWARE versions identified as alpha, beta, preview, early access or otherwise as pre-release may - * not be fully functional, may contain errors or design flaws, and may have reduced or different security, privacy, - * availability, and reliability standards relative to commercial versions of NVIDIA software and materials. You may use - * a pre-release SOFTWARE version at your own risk, understanding that these versions are not intended for use in - * production or business-critical systems. + * 4. PRE-RELEASE. SOFTWARE versions identified as alpha, beta, preview, early access or otherwise as pre-release may not be fully functional, may contain errors or design flaws, and may have reduced or different security, privacy, availability, and reliability standards relative to commercial versions of NVIDIA software and materials. You may use a pre-release SOFTWARE version at your own risk, understanding that these versions are not intended for use in production or business-critical systems. * - * 5. OWNERSHIP. The SOFTWARE and the related intellectual property rights therein are and will remain the sole and - * exclusive property of NVIDIA or its licensors. The SOFTWARE is copyrighted and protected by the laws of the United - * States and other countries, and international treaty provisions. NVIDIA may make changes to the SOFTWARE, at any time - * without notice, but is not obligated to support or update the SOFTWARE. + * 5. OWNERSHIP. The SOFTWARE and the related intellectual property rights therein are and will remain the sole and exclusive property of NVIDIA or its licensors. The SOFTWARE is copyrighted and protected by the laws of the United States and other countries, and international treaty provisions. NVIDIA may make changes to the SOFTWARE, at any time without notice, but is not obligated to support or update the SOFTWARE. * - * 6. COMPONENTS UNDER OTHER LICENSES. The SOFTWARE may include NVIDIA or third-party components with separate legal - * notices or terms as may be described in proprietary notices accompanying the SOFTWARE. If and to the extent there is - * a conflict between the terms in this license and the license terms associated with a component, the license terms - * associated with the components control only to the extent necessary to resolve the conflict. + * 6. COMPONENTS UNDER OTHER LICENSES. The SOFTWARE may include NVIDIA or third-party components with separate legal notices or terms as may be described in proprietary notices accompanying the SOFTWARE. If and to the extent there is a conflict between the terms in this license and the license terms associated with a component, the license terms associated with the components control only to the extent necessary to resolve the conflict. * - * 7. FEEDBACK. You may, but don’t have to, provide to NVIDIA any Feedback. “Feedback” means any suggestions, bug fixes, - * enhancements, modifications, feature requests or other feedback regarding the SOFTWARE. For any Feedback that you - * voluntarily provide, you hereby grant NVIDIA and its affiliates a perpetual, non-exclusive, worldwide, irrevocable - * license to use, reproduce, modify, license, sublicense (through multiple tiers of sublicensees), and distribute - * (through multiple tiers of distributors) the Feedback without the payment of any royalties or fees to you. NVIDIA - * will use Feedback at its choice. + * 7. FEEDBACK. You may, but don’t have to, provide to NVIDIA any Feedback. “Feedback” means any suggestions, bug fixes, enhancements, modifications, feature requests or other feedback regarding the SOFTWARE. For any Feedback that you voluntarily provide, you hereby grant NVIDIA and its affiliates a perpetual, non-exclusive, worldwide, irrevocable license to use, reproduce, modify, license, sublicense (through multiple tiers of sublicensees), and distribute (through multiple tiers of distributors) the Feedback without the payment of any royalties or fees to you. NVIDIA will use Feedback at its choice. * - * 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED WARRANTY OF ANY KIND INCLUDING, BUT - * NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. NVIDIA DOES NOT - * WARRANT THAT THE SOFTWARE WILL MEET YOUR REQUIREMENTS OR THAT THE OPERATION THEREOF WILL BE UNINTERRUPTED OR - * ERROR-FREE, OR THAT ALL ERRORS WILL BE CORRECTED. + * 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED WARRANTY OF ANY KIND INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. NVIDIA DOES NOT WARRANT THAT THE SOFTWARE WILL MEET YOUR REQUIREMENTS OR THAT THE OPERATION THEREOF WILL BE UNINTERRUPTED OR ERROR-FREE, OR THAT ALL ERRORS WILL BE CORRECTED. * - * 9. LIMITATIONS OF LIABILITY. TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS AFFILIATES SHALL NOT BE LIABLE - * FOR ANY SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, PROJECT DELAYS, LOSS OF USE, - * LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION WITH - * THIS LICENSE OR THE USE OR PERFORMANCE OF THE SOFTWARE, WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON - * BREACH OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION - * OR THEORY OF LIABILITY, EVEN IF NVIDIA HAS PREVIOUSLY BEEN ADVISED OF, OR COULD REASONABLY HAVE FORESEEN, THE - * POSSIBILITY OF SUCH DAMAGES. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES TOTAL CUMULATIVE LIABILITY UNDER OR ARISING - * OUT OF THIS LICENSE EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE - * OR EXTEND THIS LIMIT. + * 9. LIMITATIONS OF LIABILITY. TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, PROJECT DELAYS, LOSS OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION WITH THIS LICENSE OR THE USE OR PERFORMANCE OF THE SOFTWARE, WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF LIABILITY, EVEN IF NVIDIA HAS PREVIOUSLY BEEN ADVISED OF, OR COULD REASONABLY HAVE FORESEEN, THE POSSIBILITY OF SUCH DAMAGES. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS LICENSE EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS LIMIT. * - * 10. TERMINATION. Your rights under this license will terminate automatically without notice from NVIDIA if you fail - * to comply with any term and condition of this license or if you commence or participate in any legal proceeding - * against NVIDIA with respect to the SOFTWARE. NVIDIA may terminate this license with advance written notice to you if - * NVIDIA decides to no longer provide the SOFTWARE in a country or, in NVIDIA’s sole discretion, the continued use of - * it is no longer commercially viable. Upon any termination of this license, you agree to promptly discontinue use of - * the SOFTWARE and destroy all copies in your possession or control. Your prior distributions in accordance with this - * license are not affected by the termination of this license. All provisions of this license will survive termination, - * except for the license granted to you. + * 10. TERMINATION. Your rights under this license will terminate automatically without notice from NVIDIA if you fail to comply with any term and condition of this license or if you commence or participate in any legal proceeding against NVIDIA with respect to the SOFTWARE. NVIDIA may terminate this license with advance written notice to you if NVIDIA decides to no longer provide the SOFTWARE in a country or, in NVIDIA’s sole discretion, the continued use of it is no longer commercially viable. Upon any termination of this license, you agree to promptly discontinue use of the SOFTWARE and destroy all copies in your possession or control. Your prior distributions in accordance with this license are not affected by the termination of this license. All provisions of this license will survive termination, except for the license granted to you. * - * 11. APPLICABLE LAW. This license will be governed in all respects by the laws of the United States and of the State - * of Delaware as those laws are applied to contracts entered into and performed entirely within Delaware by Delaware - * residents, without regard to the conflicts of laws principles. The United Nations Convention on Contracts for the - * International Sale of Goods is specifically disclaimed. You agree to all terms of this Agreement in the English - * language. The state or federal courts residing in Santa Clara County, California shall have exclusive jurisdiction - * over any dispute or claim arising out of this license. Notwithstanding this, you agree that NVIDIA shall still be - * allowed to apply for injunctive remedies or an equivalent type of urgent legal relief in any jurisdiction. + * 11. APPLICABLE LAW. This license will be governed in all respects by the laws of the United States and of the State of Delaware as those laws are applied to contracts entered into and performed entirely within Delaware by Delaware residents, without regard to the conflicts of laws principles. The United Nations Convention on Contracts for the International Sale of Goods is specifically disclaimed. You agree to all terms of this Agreement in the English language. The state or federal courts residing in Santa Clara County, California shall have exclusive jurisdiction over any dispute or claim arising out of this license. Notwithstanding this, you agree that NVIDIA shall still be allowed to apply for injunctive remedies or an equivalent type of urgent legal relief in any jurisdiction. * - * 12. NO ASSIGNMENT. This license and your rights and obligations thereunder may not be assigned by you by any means or - * operation of law without NVIDIA’s permission. Any attempted assignment not approved by NVIDIA in writing shall be - * void and of no effect. + * 12. NO ASSIGNMENT. This license and your rights and obligations thereunder may not be assigned by you by any means or operation of law without NVIDIA’s permission. Any attempted assignment not approved by NVIDIA in writing shall be void and of no effect. * - * 13. EXPORT. The SOFTWARE is subject to United States export laws and regulations. You agree that you will not ship, - * transfer or export the SOFTWARE into any country, or use the SOFTWARE in any manner, prohibited by the United States - * Bureau of Industry and Security or economic sanctions regulations administered by the U.S. Department of Treasury’s - * Office of Foreign Assets Control (OFAC), or any applicable export laws, restrictions or regulations. These laws - * include restrictions on destinations, end users and end use. By accepting this license, you confirm that you are not - * a resident or citizen of any country currently embargoed by the U.S. and that you are not otherwise prohibited from - * receiving the SOFTWARE. + * 13. EXPORT. The SOFTWARE is subject to United States export laws and regulations. You agree that you will not ship, transfer or export the SOFTWARE into any country, or use the SOFTWARE in any manner, prohibited by the United States Bureau of Industry and Security or economic sanctions regulations administered by the U.S. Department of Treasury’s Office of Foreign Assets Control (OFAC), or any applicable export laws, restrictions or regulations. These laws include restrictions on destinations, end users and end use. By accepting this license, you confirm that you are not a resident or citizen of any country currently embargoed by the U.S. and that you are not otherwise prohibited from receiving the SOFTWARE. * - * 14. GOVERNMENT USE. The SOFTWARE has been developed entirely at private expense and is “commercial items” consisting - * of “commercial computer software” and “commercial computer software documentation” provided with RESTRICTED RIGHTS. - * Use, duplication or disclosure by the U.S. Government or a U.S. Government subcontractor is subject to the - * restrictions in this license pursuant to DFARS 227.7202-3(a) or as set forth in subparagraphs (b)(1) and (2) of the - * Commercial Computer Software - Restricted Rights clause at FAR 52.227-19, as applicable. Contractor/manufacturer is - * NVIDIA, 2788 San Tomas Expressway, Santa Clara, CA 95051. + * 14. GOVERNMENT USE. The SOFTWARE has been developed entirely at private expense and is “commercial items” consisting of “commercial computer software” and “commercial computer software documentation” provided with RESTRICTED RIGHTS. Use, duplication or disclosure by the U.S. Government or a U.S. Government subcontractor is subject to the restrictions in this license pursuant to DFARS 227.7202-3(a) or as set forth in subparagraphs (b)(1) and (2) of the Commercial Computer Software - Restricted Rights clause at FAR 52.227-19, as applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas Expressway, Santa Clara, CA 95051. * - * 15. ENTIRE AGREEMENT. This license is the final, complete and exclusive agreement between the parties relating to the - * subject matter of this license and supersedes all prior or contemporaneous understandings and agreements relating to - * this subject matter, whether oral or written. If any court of competent jurisdiction determines that any provision of - * this license is illegal, invalid or unenforceable, the remaining provisions will remain in full force and effect. - * This license may only be modified in a writing signed by an authorized representative of each party. + * 15. ENTIRE AGREEMENT. This license is the final, complete and exclusive agreement between the parties relating to the subject matter of this license and supersedes all prior or contemporaneous understandings and agreements relating to this subject matter, whether oral or written. If any court of competent jurisdiction determines that any provision of this license is illegal, invalid or unenforceable, the remaining provisions will remain in full force and effect. This license may only be modified in a writing signed by an authorized representative of each party. * * (v. August 20, 2021) */ _LIBCUDACXX_BEGIN_NAMESPACE_CUDA -namespace __detail_ap -{ +namespace __detail_ap { -template -_CCCL_DEVICE void* __associate_address_space(void* __ptr, _Property __prop) -{ - if (std::is_same<_Property, access_property::shared>::value == true) - { - bool __b = __isShared(__ptr); - _LIBCUDACXX_ASSERT(__b, ""); + template + _CCCL_DEVICE + void* __associate_address_space(void* __ptr, _Property __prop) { + if (std::is_same<_Property, access_property::shared>::value == true) { + bool __b = __isShared(__ptr); + _LIBCUDACXX_ASSERT(__b, ""); #if !defined(_CCCL_CUDACC_BELOW_11_2) - __builtin_assume(__b); + __builtin_assume(__b); #else // ^^^ !_CCCL_CUDACC_BELOW_11_2 ^^^ / vvv _CCCL_CUDACC_BELOW_11_2 vvv - (void) __b; + (void)__b; #endif // _CCCL_CUDACC_BELOW_11_2 - } - else if (std::is_same<_Property, access_property::global>::value == true - || std::is_same<_Property, access_property::normal>::value == true - || std::is_same<_Property, access_property::persisting>::value == true - || std::is_same<_Property, access_property::streaming>::value == true - || std::is_same<_Property, access_property>::value) - { - bool __b = __isGlobal(__ptr); - _LIBCUDACXX_ASSERT(__b, ""); + } else if (std::is_same<_Property, access_property::global>::value == true || + std::is_same<_Property, access_property::normal>::value == true || + std::is_same<_Property, access_property::persisting>::value == true || + std::is_same<_Property, access_property::streaming>::value == true || + std::is_same<_Property, access_property>::value) { + bool __b = __isGlobal(__ptr); + _LIBCUDACXX_ASSERT(__b, ""); #if !defined(_CCCL_CUDACC_BELOW_11_2) - __builtin_assume(__b); + __builtin_assume(__b); #else // ^^^ !_CCCL_CUDACC_BELOW_11_2 ^^^ / vvv _CCCL_CUDACC_BELOW_11_2 vvv - (void) __b; + (void)__b; #endif // _CCCL_CUDACC_BELOW_11_2 - } - - return __ptr; -} - -template -_CCCL_DEVICE void* __associate_descriptor(void* __ptr, __Prop __prop) -{ - return __associate_descriptor(__ptr, static_cast(access_property(__prop))); -} - -template <> -inline _CCCL_DEVICE void* __associate_descriptor(void* __ptr, std::uint64_t __prop) -{ - NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80, (return __nv_associate_access_property(__ptr, __prop);), (return __ptr;)) -} + } -template <> -inline _CCCL_DEVICE void* __associate_descriptor(void* __ptr, access_property::shared) -{ - return __ptr; -} - -template -_CCCL_HOST_DEVICE _Type* __associate(_Type* __ptr, _Property __prop) -{ - NV_IF_ELSE_TARGET(NV_IS_DEVICE, - (return static_cast<_Type*>(__associate_descriptor( - __associate_address_space(const_cast(static_cast(__ptr)), __prop), __prop));), - (return __ptr;)) -} - -template -class __annotated_ptr_base -{ - using __error = typename _Property::__unknown_access_property_type; -}; - -template <> -class __annotated_ptr_base -{ -protected: - static constexpr std::uint64_t __prop = 0; - - constexpr __annotated_ptr_base() noexcept = default; - constexpr __annotated_ptr_base(__annotated_ptr_base const&) = default; - _CCCL_CONSTEXPR_CXX14 __annotated_ptr_base& operator=(const __annotated_ptr_base&) = default; - _CCCL_HOST_DEVICE constexpr __annotated_ptr_base(access_property::shared) noexcept {} - inline _CCCL_DEVICE void* __apply_prop(void* __p) const - { - return __associate(__p, access_property::shared{}); - } - _CCCL_HOST_DEVICE constexpr access_property::shared __get_property() const noexcept - { - return access_property::shared{}; + return __ptr; } -}; -template <> -class __annotated_ptr_base -{ -protected: - static constexpr std::uint64_t __prop = __sm_80::__interleave_normal(); - - constexpr __annotated_ptr_base() noexcept = default; - constexpr __annotated_ptr_base(__annotated_ptr_base const&) = default; - _CCCL_CONSTEXPR_CXX14 __annotated_ptr_base& operator=(const __annotated_ptr_base&) = default; - _CCCL_HOST_DEVICE constexpr __annotated_ptr_base(access_property::global) noexcept {} - inline _CCCL_DEVICE void* __apply_prop(void* __p) const - { - return __associate(__p, access_property::global{}); - } - _CCCL_HOST_DEVICE constexpr access_property::global __get_property() const noexcept - { - return access_property::global{}; + template + _CCCL_DEVICE + void* __associate_descriptor(void* __ptr, __Prop __prop) { + return __associate_descriptor(__ptr, static_cast(access_property(__prop))); } -}; - -template <> -class __annotated_ptr_base -{ -protected: - static constexpr std::uint64_t __prop = __sm_80::__interleave_normal_demote(); - constexpr __annotated_ptr_base() noexcept = default; - constexpr __annotated_ptr_base(__annotated_ptr_base const&) = default; - _CCCL_CONSTEXPR_CXX14 __annotated_ptr_base& operator=(const __annotated_ptr_base&) = default; - _CCCL_HOST_DEVICE constexpr __annotated_ptr_base(access_property::normal) noexcept {} - inline _CCCL_DEVICE void* __apply_prop(void* __p) const - { - return __associate(__p, access_property::normal{}); + template <> + inline _CCCL_DEVICE + void* __associate_descriptor(void* __ptr, std::uint64_t __prop) { + NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80,( + return __nv_associate_access_property(__ptr, __prop); + ),( + return __ptr; + )) } - _CCCL_HOST_DEVICE constexpr access_property::normal __get_property() const noexcept - { - return access_property::normal{}; - } -}; - -template <> -class __annotated_ptr_base -{ -protected: - static constexpr std::uint64_t __prop = __sm_80::__interleave_persisting(); - constexpr __annotated_ptr_base() noexcept = default; - constexpr __annotated_ptr_base(__annotated_ptr_base const&) = default; - _CCCL_CONSTEXPR_CXX14 __annotated_ptr_base& operator=(const __annotated_ptr_base&) = default; - _CCCL_HOST_DEVICE constexpr __annotated_ptr_base(access_property::persisting) noexcept {} - inline _CCCL_DEVICE void* __apply_prop(void* __p) const - { - return __associate(__p, access_property::persisting{}); + template<> + inline _CCCL_DEVICE + void* __associate_descriptor(void* __ptr, access_property::shared) { + return __ptr; } - _CCCL_HOST_DEVICE constexpr access_property::persisting __get_property() const noexcept - { - return access_property::persisting{}; - } -}; - -template <> -class __annotated_ptr_base -{ -protected: - static constexpr std::uint64_t __prop = __sm_80::__interleave_streaming(); - constexpr __annotated_ptr_base() noexcept = default; - constexpr __annotated_ptr_base(__annotated_ptr_base const&) = default; - _CCCL_CONSTEXPR_CXX14 __annotated_ptr_base& operator=(const __annotated_ptr_base&) = default; - _CCCL_HOST_DEVICE constexpr __annotated_ptr_base(access_property::streaming) noexcept {} - inline _CCCL_DEVICE void* __apply_prop(void* __p) const - { - return __associate(__p, access_property::streaming{}); - } - _CCCL_HOST_DEVICE constexpr access_property::streaming __get_property() const noexcept - { - return access_property::streaming{}; + template + _CCCL_HOST_DEVICE + _Type* __associate(_Type* __ptr, _Property __prop) { + NV_IF_ELSE_TARGET(NV_IS_DEVICE,( + return static_cast<_Type*>(__associate_descriptor( + __associate_address_space(const_cast(static_cast(__ptr)), __prop), + __prop)); + ),( + return __ptr; + )) } -}; -template <> -class __annotated_ptr_base -{ -protected: - std::uint64_t __prop; - _CCCL_HOST_DEVICE constexpr __annotated_ptr_base() noexcept - : __prop(access_property()) - {} - _CCCL_HOST_DEVICE constexpr __annotated_ptr_base(std::uint64_t __property) noexcept - : __prop(__property) - {} - _CCCL_HOST_DEVICE constexpr __annotated_ptr_base(access_property __property) noexcept - : __annotated_ptr_base(static_cast(__property)) - {} - constexpr __annotated_ptr_base(__annotated_ptr_base const&) = default; - _CCCL_CONSTEXPR_CXX14 __annotated_ptr_base& operator=(const __annotated_ptr_base&) = default; - inline _CCCL_DEVICE void* __apply_prop(void* __p) const - { - return __associate(__p, __prop); - } - _CCCL_HOST_DEVICE access_property __get_property() const noexcept - { - return reinterpret_cast(const_cast(__prop)); - } -}; + template + class __annotated_ptr_base { + using __error = typename _Property::__unknown_access_property_type; + }; + + template<> + class __annotated_ptr_base { + protected: + static constexpr std::uint64_t __prop = 0; + + constexpr __annotated_ptr_base() noexcept = default; + constexpr __annotated_ptr_base(__annotated_ptr_base const&) = default; + _CCCL_CONSTEXPR_CXX14 __annotated_ptr_base& operator=(const __annotated_ptr_base&) = default; + _CCCL_HOST_DEVICE constexpr __annotated_ptr_base(access_property::shared) noexcept {} + inline _CCCL_DEVICE void* __apply_prop(void* __p) const { + return __associate(__p, access_property::shared{}); + } + _CCCL_HOST_DEVICE constexpr access_property::shared __get_property() const noexcept { + return access_property::shared{}; + } + }; + + template<> + class __annotated_ptr_base { + protected: + static constexpr std::uint64_t __prop = __sm_80::__interleave_normal(); + + constexpr __annotated_ptr_base() noexcept = default; + constexpr __annotated_ptr_base(__annotated_ptr_base const&) = default; + _CCCL_CONSTEXPR_CXX14 __annotated_ptr_base& operator=(const __annotated_ptr_base&) = default; + _CCCL_HOST_DEVICE constexpr __annotated_ptr_base(access_property::global) noexcept {} + inline _CCCL_DEVICE void* __apply_prop(void* __p) const { + return __associate(__p, access_property::global{}); + } + _CCCL_HOST_DEVICE constexpr access_property::global __get_property() const noexcept { + return access_property::global{}; + } + }; + + template<> + class __annotated_ptr_base { + protected: + static constexpr std::uint64_t __prop = __sm_80::__interleave_normal_demote(); + + constexpr __annotated_ptr_base() noexcept = default; + constexpr __annotated_ptr_base(__annotated_ptr_base const&) = default; + _CCCL_CONSTEXPR_CXX14 __annotated_ptr_base& operator=(const __annotated_ptr_base&) = default; + _CCCL_HOST_DEVICE constexpr __annotated_ptr_base(access_property::normal) noexcept {} + inline _CCCL_DEVICE void* __apply_prop(void* __p) const { + return __associate(__p, access_property::normal{}); + } + _CCCL_HOST_DEVICE constexpr access_property::normal __get_property() const noexcept { + return access_property::normal{}; + } + }; + + template<> + class __annotated_ptr_base { + protected: + static constexpr std::uint64_t __prop = __sm_80::__interleave_persisting(); + + constexpr __annotated_ptr_base() noexcept = default; + constexpr __annotated_ptr_base(__annotated_ptr_base const&) = default; + _CCCL_CONSTEXPR_CXX14 __annotated_ptr_base& operator=(const __annotated_ptr_base&) = default; + _CCCL_HOST_DEVICE constexpr __annotated_ptr_base(access_property::persisting) noexcept {} + inline _CCCL_DEVICE void* __apply_prop(void* __p) const { + return __associate(__p, access_property::persisting{}); + } + _CCCL_HOST_DEVICE constexpr access_property::persisting __get_property() const noexcept { + return access_property::persisting{}; + } + }; + + template<> + class __annotated_ptr_base { + protected: + static constexpr std::uint64_t __prop = __sm_80::__interleave_streaming(); + + constexpr __annotated_ptr_base() noexcept = default; + constexpr __annotated_ptr_base(__annotated_ptr_base const&) = default; + _CCCL_CONSTEXPR_CXX14 __annotated_ptr_base& operator=(const __annotated_ptr_base&) = default; + _CCCL_HOST_DEVICE constexpr __annotated_ptr_base(access_property::streaming) noexcept {} + inline _CCCL_DEVICE void* __apply_prop(void* __p) const { + return __associate(__p, access_property::streaming{}); + } + _CCCL_HOST_DEVICE constexpr access_property::streaming __get_property() const noexcept { + return access_property::streaming{}; + } + }; + + template<> + class __annotated_ptr_base { + protected: + std::uint64_t __prop; + + _CCCL_HOST_DEVICE constexpr __annotated_ptr_base() noexcept : __prop(access_property()) {} + _CCCL_HOST_DEVICE constexpr __annotated_ptr_base(std::uint64_t __property) noexcept : __prop(__property) {} + _CCCL_HOST_DEVICE constexpr __annotated_ptr_base(access_property __property) noexcept + : __annotated_ptr_base(static_cast(__property)) {} + constexpr __annotated_ptr_base(__annotated_ptr_base const&) = default; + _CCCL_CONSTEXPR_CXX14 __annotated_ptr_base& operator=(const __annotated_ptr_base&) = default; + inline _CCCL_DEVICE void* __apply_prop(void* __p) const { + return __associate(__p, __prop); + } + _CCCL_HOST_DEVICE access_property __get_property() const noexcept { + return reinterpret_cast(const_cast(__prop)); + } + }; } // namespace __detail_ap _LIBCUDACXX_END_NAMESPACE_CUDA diff --git a/libcudacxx/include/cuda/std/detail/__config b/libcudacxx/include/cuda/std/detail/__config index aaa22d7cf6d..f4fba1f24d6 100644 --- a/libcudacxx/include/cuda/std/detail/__config +++ b/libcudacxx/include/cuda/std/detail/__config @@ -13,7 +13,7 @@ #include -#define _LIBCUDACXX_CUDA_API_VERSION CCCL_VERSION +#define _LIBCUDACXX_CUDA_API_VERSION CCCL_VERSION #define _LIBCUDACXX_CUDA_API_VERSION_MAJOR CCCL_MAJOR_VERSION #define _LIBCUDACXX_CUDA_API_VERSION_MINOR CCCL_MINOR_VERSION #define _LIBCUDACXX_CUDA_API_VERSION_PATCH CCCL_PATCH_VERSION diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/__assert b/libcudacxx/include/cuda/std/detail/libcxx/include/__assert index 3568b3b746f..ad54f46dfd6 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/__assert +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/__assert @@ -27,28 +27,28 @@ // assertions through the Debug mode previously. // TODO: In LLVM 16, make it an error to define _LIBCUDACXX_DEBUG #if defined(_LIBCUDACXX_DEBUG) -# ifndef _LIBCUDACXX_ENABLE_ASSERTIONS -# define _LIBCUDACXX_ENABLE_ASSERTIONS 1 -# endif +# ifndef _LIBCUDACXX_ENABLE_ASSERTIONS +# define _LIBCUDACXX_ENABLE_ASSERTIONS 1 +# endif #endif // Automatically enable assertions when the debug mode is enabled. #if defined(_LIBCUDACXX_ENABLE_DEBUG_MODE) -# ifndef _LIBCUDACXX_ENABLE_ASSERTIONS -# define _LIBCUDACXX_ENABLE_ASSERTIONS 1 -# endif +# ifndef _LIBCUDACXX_ENABLE_ASSERTIONS +# define _LIBCUDACXX_ENABLE_ASSERTIONS 1 +# endif #endif #ifndef _LIBCUDACXX_ENABLE_ASSERTIONS -# define _LIBCUDACXX_ENABLE_ASSERTIONS _LIBCUDACXX_ENABLE_ASSERTIONS_DEFAULT +# define _LIBCUDACXX_ENABLE_ASSERTIONS _LIBCUDACXX_ENABLE_ASSERTIONS_DEFAULT #endif #if _LIBCUDACXX_ENABLE_ASSERTIONS != 0 && _LIBCUDACXX_ENABLE_ASSERTIONS != 1 -# error "_LIBCUDACXX_ENABLE_ASSERTIONS must be set to 0 or 1" +# error "_LIBCUDACXX_ENABLE_ASSERTIONS must be set to 0 or 1" #endif #if _LIBCUDACXX_ENABLE_ASSERTIONS -# define _LIBCUDACXX_ASSERT(expression, message) \ +# define _LIBCUDACXX_ASSERT(expression, message) \ (_CCCL_DIAG_PUSH \ _CCCL_DIAG_SUPPRESS_CLANG("-Wassume") \ __builtin_expect(static_cast(expression), 1) ? \ @@ -56,11 +56,13 @@ ::_CUDA_VSTD::__libcpp_verbose_abort("%s:%d: assertion %s failed: %s", __FILE__, __LINE__, #expression, message) _CCCL_DIAG_POP) #elif 0 // !defined(_LIBCUDACXX_ASSERTIONS_DISABLE_ASSUME) && __has_builtin(__builtin_assume) -# define _LIBCUDACXX_ASSERT(expression, message) \ - (_CCCL_DIAG_PUSH _CCCL_DIAG_SUPPRESS_CLANG("-Wassume") __builtin_assume(static_cast(expression)) \ - _CCCL_DIAG_POP) +# define _LIBCUDACXX_ASSERT(expression, message) \ + (_CCCL_DIAG_PUSH \ + _CCCL_DIAG_SUPPRESS_CLANG("-Wassume") \ + __builtin_assume(static_cast(expression)) \ + _CCCL_DIAG_POP) #else -# define _LIBCUDACXX_ASSERT(expression, message) ((void) 0) +# define _LIBCUDACXX_ASSERT(expression, message) ((void)0) #endif #endif // _LIBCUDACXX___ASSERT diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/__availability b/libcudacxx/include/cuda/std/detail/libcxx/include/__availability index f89d2abf1a0..37ac58934ea 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/__availability +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/__availability @@ -63,230 +63,226 @@ // // [1]: https://clang.llvm.org/docs/AttributeReference.html#availability + // For backwards compatibility, allow users to define _LIBCUDACXX_DISABLE_AVAILABILITY // for a while. #if defined(_LIBCUDACXX_DISABLE_AVAILABILITY) -# if !defined(_LIBCUDACXX_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS) -# define _LIBCUDACXX_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS -# endif +# if !defined(_LIBCUDACXX_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS) +# define _LIBCUDACXX_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS +# endif #endif // Availability markup is disabled when building the library, or when the compiler // doesn't support the proper attributes. -#if defined(_LIBCUDACXX_BUILDING_LIBRARY) || defined(_LIBCXXABI_BUILDING_LIBRARY) \ - || !__has_feature(attribute_availability_with_strict) || !__has_feature(attribute_availability_in_templates) \ - || !__has_extension(pragma_clang_attribute_external_declaration) -# if !defined(_LIBCUDACXX_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS) -# define _LIBCUDACXX_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS -# endif +#if defined(_LIBCUDACXX_BUILDING_LIBRARY) || \ + defined(_LIBCXXABI_BUILDING_LIBRARY) || \ + !__has_feature(attribute_availability_with_strict) || \ + !__has_feature(attribute_availability_in_templates) || \ + !__has_extension(pragma_clang_attribute_external_declaration) +# if !defined(_LIBCUDACXX_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS) +# define _LIBCUDACXX_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS +# endif #endif #if defined(_LIBCUDACXX_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS) -// This controls the availability of std::shared_mutex and std::shared_timed_mutex, -// which were added to the dylib later. -# define _LIBCUDACXX_AVAILABILITY_SHARED_MUTEX + // This controls the availability of std::shared_mutex and std::shared_timed_mutex, + // which were added to the dylib later. +# define _LIBCUDACXX_AVAILABILITY_SHARED_MUTEX // # define _LIBCUDACXX_AVAILABILITY_DISABLE_FTM___cpp_lib_shared_mutex // # define _LIBCUDACXX_AVAILABILITY_DISABLE_FTM___cpp_lib_shared_timed_mutex -// These macros control the availability of std::bad_optional_access and -// other exception types. These were put in the shared library to prevent -// code bloat from every user program defining the vtable for these exception -// types. -// -// Note that when exceptions are disabled, the methods that normally throw -// these exceptions can be used even on older deployment targets, but those -// methods will abort instead of throwing. -# define _LIBCUDACXX_AVAILABILITY_BAD_OPTIONAL_ACCESS -# define _LIBCUDACXX_AVAILABILITY_BAD_VARIANT_ACCESS -# define _LIBCUDACXX_AVAILABILITY_BAD_ANY_CAST - -// This controls the availability of std::uncaught_exceptions(). -# define _LIBCUDACXX_AVAILABILITY_UNCAUGHT_EXCEPTIONS - -// This controls the availability of the sized version of ::operator delete, -// ::operator delete[], and their align_val_t variants, which were all added -// in C++17, and hence not present in early dylibs. -# define _LIBCUDACXX_AVAILABILITY_SIZED_NEW_DELETE - -// This controls the availability of the std::future_error exception. -// -// Note that when exceptions are disabled, the methods that normally throw -// std::future_error can be used even on older deployment targets, but those -// methods will abort instead of throwing. -# define _LIBCUDACXX_AVAILABILITY_FUTURE_ERROR - -// This controls the availability of std::type_info's vtable. -// I can't imagine how using std::type_info can work at all if -// this isn't supported. -# define _LIBCUDACXX_AVAILABILITY_TYPEINFO_VTABLE - -// This controls the availability of std::locale::category members -// (e.g. std::locale::collate), which are defined in the dylib. -# define _LIBCUDACXX_AVAILABILITY_LOCALE_CATEGORY - -// This controls the availability of atomic operations on std::shared_ptr -// (e.g. `std::atomic_store(std::shared_ptr)`), which require a shared -// lock table located in the dylib. -# define _LIBCUDACXX_AVAILABILITY_ATOMIC_SHARED_PTR - -// These macros control the availability of all parts of that -// depend on something in the dylib. -# define _LIBCUDACXX_AVAILABILITY_FILESYSTEM -# define _LIBCUDACXX_AVAILABILITY_FILESYSTEM_PUSH -# define _LIBCUDACXX_AVAILABILITY_FILESYSTEM_POP + // These macros control the availability of std::bad_optional_access and + // other exception types. These were put in the shared library to prevent + // code bloat from every user program defining the vtable for these exception + // types. + // + // Note that when exceptions are disabled, the methods that normally throw + // these exceptions can be used even on older deployment targets, but those + // methods will abort instead of throwing. +# define _LIBCUDACXX_AVAILABILITY_BAD_OPTIONAL_ACCESS +# define _LIBCUDACXX_AVAILABILITY_BAD_VARIANT_ACCESS +# define _LIBCUDACXX_AVAILABILITY_BAD_ANY_CAST + + // This controls the availability of std::uncaught_exceptions(). +# define _LIBCUDACXX_AVAILABILITY_UNCAUGHT_EXCEPTIONS + + // This controls the availability of the sized version of ::operator delete, + // ::operator delete[], and their align_val_t variants, which were all added + // in C++17, and hence not present in early dylibs. +# define _LIBCUDACXX_AVAILABILITY_SIZED_NEW_DELETE + + // This controls the availability of the std::future_error exception. + // + // Note that when exceptions are disabled, the methods that normally throw + // std::future_error can be used even on older deployment targets, but those + // methods will abort instead of throwing. +# define _LIBCUDACXX_AVAILABILITY_FUTURE_ERROR + + // This controls the availability of std::type_info's vtable. + // I can't imagine how using std::type_info can work at all if + // this isn't supported. +# define _LIBCUDACXX_AVAILABILITY_TYPEINFO_VTABLE + + // This controls the availability of std::locale::category members + // (e.g. std::locale::collate), which are defined in the dylib. +# define _LIBCUDACXX_AVAILABILITY_LOCALE_CATEGORY + + // This controls the availability of atomic operations on std::shared_ptr + // (e.g. `std::atomic_store(std::shared_ptr)`), which require a shared + // lock table located in the dylib. +# define _LIBCUDACXX_AVAILABILITY_ATOMIC_SHARED_PTR + + // These macros control the availability of all parts of that + // depend on something in the dylib. +# define _LIBCUDACXX_AVAILABILITY_FILESYSTEM +# define _LIBCUDACXX_AVAILABILITY_FILESYSTEM_PUSH +# define _LIBCUDACXX_AVAILABILITY_FILESYSTEM_POP // # define _LIBCUDACXX_AVAILABILITY_DISABLE_FTM___cpp_lib_filesystem -// This controls the availability of floating-point std::to_chars functions. -// These overloads were added later than the integer overloads. -# define _LIBCUDACXX_AVAILABILITY_TO_CHARS_FLOATING_POINT + // This controls the availability of floating-point std::to_chars functions. + // These overloads were added later than the integer overloads. +# define _LIBCUDACXX_AVAILABILITY_TO_CHARS_FLOATING_POINT -// This controls the availability of the C++20 synchronization library, -// which requires shared library support for various operations -// (see libcxx/src/atomic.cpp). This includes , , -// , and notification functions on std::atomic. -# define _LIBCUDACXX_AVAILABILITY_SYNC + // This controls the availability of the C++20 synchronization library, + // which requires shared library support for various operations + // (see libcxx/src/atomic.cpp). This includes , , + // , and notification functions on std::atomic. +# define _LIBCUDACXX_AVAILABILITY_SYNC // # define _LIBCUDACXX_AVAILABILITY_DISABLE_FTM___cpp_lib_atomic_wait // # define _LIBCUDACXX_AVAILABILITY_DISABLE_FTM___cpp_lib_barrier // # define _LIBCUDACXX_AVAILABILITY_DISABLE_FTM___cpp_lib_latch // # define _LIBCUDACXX_AVAILABILITY_DISABLE_FTM___cpp_lib_semaphore -// This controls the availability of the C++20 format library. -// The library is in development and not ABI stable yet. P2216 is -// retroactively accepted in C++20. This paper contains ABI breaking -// changes. -# define _LIBCUDACXX_AVAILABILITY_FORMAT + // This controls the availability of the C++20 format library. + // The library is in development and not ABI stable yet. P2216 is + // retroactively accepted in C++20. This paper contains ABI breaking + // changes. +# define _LIBCUDACXX_AVAILABILITY_FORMAT // # define _LIBCUDACXX_AVAILABILITY_DISABLE_FTM___cpp_lib_format -// This controls whether the default verbose termination function is -// provided by the library. -// -// Note that when users provide their own custom function, it doesn't -// matter whether the dylib provides a default function, and the -// availability markup can actually give a false positive diagnostic -// (it will think that no function is provided, when in reality the -// user has provided their own). -// -// Users can pass -D_LIBCUDACXX_AVAILABILITY_CUSTOM_VERBOSE_ABORT_PROVIDED -// to the compiler to tell the library not to define its own verbose abort. -// Note that defining this macro but failing to define a custom function -// will lead to a load-time error on back-deployment targets, so it should -// be avoided. + // This controls whether the default verbose termination function is + // provided by the library. + // + // Note that when users provide their own custom function, it doesn't + // matter whether the dylib provides a default function, and the + // availability markup can actually give a false positive diagnostic + // (it will think that no function is provided, when in reality the + // user has provided their own). + // + // Users can pass -D_LIBCUDACXX_AVAILABILITY_CUSTOM_VERBOSE_ABORT_PROVIDED + // to the compiler to tell the library not to define its own verbose abort. + // Note that defining this macro but failing to define a custom function + // will lead to a load-time error on back-deployment targets, so it should + // be avoided. // # define _LIBCUDACXX_HAS_NO_VERBOSE_ABORT_IN_LIBRARY #elif defined(__APPLE__) -# define _LIBCUDACXX_AVAILABILITY_SHARED_MUTEX \ - __attribute__((availability(macos, strict, introduced = 10.12))) \ - __attribute__((availability(ios, strict, introduced = 10.0))) \ - __attribute__((availability(tvos, strict, introduced = 10.0))) \ - __attribute__((availability(watchos, strict, introduced = 3.0))) -# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) \ - && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101200) \ - || (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) \ - && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 100000) \ - || (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 100000) \ - || (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) \ - && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 30000) -# define _LIBCUDACXX_AVAILABILITY_DISABLE_FTM___cpp_lib_shared_mutex -# define _LIBCUDACXX_AVAILABILITY_DISABLE_FTM___cpp_lib_shared_timed_mutex -# endif - -// Note: bad_optional_access & friends were not introduced in the matching -// macOS and iOS versions, so the version mismatch between macOS and others -// is intended. -# define _LIBCUDACXX_AVAILABILITY_BAD_OPTIONAL_ACCESS \ - __attribute__((availability(macos, strict, introduced = 10.13))) \ - __attribute__((availability(ios, strict, introduced = 12.0))) \ - __attribute__((availability(tvos, strict, introduced = 12.0))) \ - __attribute__((availability(watchos, strict, introduced = 5.0))) -# define _LIBCUDACXX_AVAILABILITY_BAD_VARIANT_ACCESS _LIBCUDACXX_AVAILABILITY_BAD_OPTIONAL_ACCESS -# define _LIBCUDACXX_AVAILABILITY_BAD_ANY_CAST _LIBCUDACXX_AVAILABILITY_BAD_OPTIONAL_ACCESS - -# define _LIBCUDACXX_AVAILABILITY_UNCAUGHT_EXCEPTIONS \ - __attribute__((availability(macos, strict, introduced = 10.12))) \ - __attribute__((availability(ios, strict, introduced = 10.0))) \ - __attribute__((availability(tvos, strict, introduced = 10.0))) \ - __attribute__((availability(watchos, strict, introduced = 3.0))) - -# define _LIBCUDACXX_AVAILABILITY_SIZED_NEW_DELETE \ - __attribute__((availability(macos, strict, introduced = 10.12))) \ - __attribute__((availability(ios, strict, introduced = 10.0))) \ - __attribute__((availability(tvos, strict, introduced = 10.0))) \ - __attribute__((availability(watchos, strict, introduced = 3.0))) - -# define _LIBCUDACXX_AVAILABILITY_FUTURE_ERROR __attribute__((availability(ios, strict, introduced = 6.0))) - -# define _LIBCUDACXX_AVAILABILITY_TYPEINFO_VTABLE \ - __attribute__((availability(macos, strict, introduced = 10.9))) \ - __attribute__((availability(ios, strict, introduced = 7.0))) - -# define _LIBCUDACXX_AVAILABILITY_LOCALE_CATEGORY \ - __attribute__((availability(macos, strict, introduced = 10.9))) \ - __attribute__((availability(ios, strict, introduced = 7.0))) - -# define _LIBCUDACXX_AVAILABILITY_ATOMIC_SHARED_PTR \ - __attribute__((availability(macos, strict, introduced = 10.9))) \ - __attribute__((availability(ios, strict, introduced = 7.0))) - -# define _LIBCUDACXX_AVAILABILITY_FILESYSTEM \ - __attribute__((availability(macos, strict, introduced = 10.15))) \ - __attribute__((availability(ios, strict, introduced = 13.0))) \ - __attribute__((availability(tvos, strict, introduced = 13.0))) \ - __attribute__((availability(watchos, strict, introduced = 6.0))) -# define _LIBCUDACXX_AVAILABILITY_FILESYSTEM_PUSH \ - _Pragma("clang attribute push(__attribute__((availability(macos,strict,introduced=10.15))), " \ - "apply_to=any(function,record))") \ - _Pragma("clang attribute push(__attribute__((availability(ios,strict,introduced=13.0))), " \ - "apply_to=any(function,record))") \ - _Pragma("clang attribute push(__attribute__((availability(tvos,strict,introduced=13.0))), " \ - "apply_to=any(function,record))") \ - _Pragma("clang attribute push(__attribute__((availability(watchos,strict,introduced=6.0))), " \ - "apply_to=any(function,record))") -# define _LIBCUDACXX_AVAILABILITY_FILESYSTEM_POP \ - _Pragma("clang attribute pop") _Pragma("clang attribute pop") _Pragma("clang attribute pop") \ - _Pragma("clang attribute pop") -# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) \ - && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101500) \ - || (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) \ - && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 130000) \ - || (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 130000) \ - || (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) \ - && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 60000) -# define _LIBCUDACXX_AVAILABILITY_DISABLE_FTM___cpp_lib_filesystem -# endif - -# define _LIBCUDACXX_AVAILABILITY_TO_CHARS_FLOATING_POINT __attribute__((unavailable)) - -# define _LIBCUDACXX_AVAILABILITY_SYNC \ - __attribute__((availability(macos, strict, introduced = 11.0))) \ - __attribute__((availability(ios, strict, introduced = 14.0))) \ - __attribute__((availability(tvos, strict, introduced = 14.0))) \ - __attribute__((availability(watchos, strict, introduced = 7.0))) -# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) \ - && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 110000) \ - || (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) \ - && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 140000) \ - || (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 140000) \ - || (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) \ - && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 70000) -# define _LIBCUDACXX_AVAILABILITY_DISABLE_FTM___cpp_lib_atomic_wait -# define _LIBCUDACXX_AVAILABILITY_DISABLE_FTM___cpp_lib_barrier -# define _LIBCUDACXX_AVAILABILITY_DISABLE_FTM___cpp_lib_latch -# define _LIBCUDACXX_AVAILABILITY_DISABLE_FTM___cpp_lib_semaphore -# endif - -# define _LIBCUDACXX_AVAILABILITY_FORMAT __attribute__((unavailable)) -# define _LIBCUDACXX_AVAILABILITY_DISABLE_FTM___cpp_lib_format - -# define _LIBCUDACXX_HAS_NO_VERBOSE_ABORT_IN_LIBRARY +# define _LIBCUDACXX_AVAILABILITY_SHARED_MUTEX \ + __attribute__((availability(macos,strict,introduced=10.12))) \ + __attribute__((availability(ios,strict,introduced=10.0))) \ + __attribute__((availability(tvos,strict,introduced=10.0))) \ + __attribute__((availability(watchos,strict,introduced=3.0))) +# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101200) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 100000) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 100000) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 30000) +# define _LIBCUDACXX_AVAILABILITY_DISABLE_FTM___cpp_lib_shared_mutex +# define _LIBCUDACXX_AVAILABILITY_DISABLE_FTM___cpp_lib_shared_timed_mutex +# endif + + // Note: bad_optional_access & friends were not introduced in the matching + // macOS and iOS versions, so the version mismatch between macOS and others + // is intended. +# define _LIBCUDACXX_AVAILABILITY_BAD_OPTIONAL_ACCESS \ + __attribute__((availability(macos,strict,introduced=10.13))) \ + __attribute__((availability(ios,strict,introduced=12.0))) \ + __attribute__((availability(tvos,strict,introduced=12.0))) \ + __attribute__((availability(watchos,strict,introduced=5.0))) +# define _LIBCUDACXX_AVAILABILITY_BAD_VARIANT_ACCESS \ + _LIBCUDACXX_AVAILABILITY_BAD_OPTIONAL_ACCESS +# define _LIBCUDACXX_AVAILABILITY_BAD_ANY_CAST \ + _LIBCUDACXX_AVAILABILITY_BAD_OPTIONAL_ACCESS + +# define _LIBCUDACXX_AVAILABILITY_UNCAUGHT_EXCEPTIONS \ + __attribute__((availability(macos,strict,introduced=10.12))) \ + __attribute__((availability(ios,strict,introduced=10.0))) \ + __attribute__((availability(tvos,strict,introduced=10.0))) \ + __attribute__((availability(watchos,strict,introduced=3.0))) + +# define _LIBCUDACXX_AVAILABILITY_SIZED_NEW_DELETE \ + __attribute__((availability(macos,strict,introduced=10.12))) \ + __attribute__((availability(ios,strict,introduced=10.0))) \ + __attribute__((availability(tvos,strict,introduced=10.0))) \ + __attribute__((availability(watchos,strict,introduced=3.0))) + +# define _LIBCUDACXX_AVAILABILITY_FUTURE_ERROR \ + __attribute__((availability(ios,strict,introduced=6.0))) + +# define _LIBCUDACXX_AVAILABILITY_TYPEINFO_VTABLE \ + __attribute__((availability(macos,strict,introduced=10.9))) \ + __attribute__((availability(ios,strict,introduced=7.0))) + +# define _LIBCUDACXX_AVAILABILITY_LOCALE_CATEGORY \ + __attribute__((availability(macos,strict,introduced=10.9))) \ + __attribute__((availability(ios,strict,introduced=7.0))) + +# define _LIBCUDACXX_AVAILABILITY_ATOMIC_SHARED_PTR \ + __attribute__((availability(macos,strict,introduced=10.9))) \ + __attribute__((availability(ios,strict,introduced=7.0))) + +# define _LIBCUDACXX_AVAILABILITY_FILESYSTEM \ + __attribute__((availability(macos,strict,introduced=10.15))) \ + __attribute__((availability(ios,strict,introduced=13.0))) \ + __attribute__((availability(tvos,strict,introduced=13.0))) \ + __attribute__((availability(watchos,strict,introduced=6.0))) +# define _LIBCUDACXX_AVAILABILITY_FILESYSTEM_PUSH \ + _Pragma("clang attribute push(__attribute__((availability(macos,strict,introduced=10.15))), apply_to=any(function,record))") \ + _Pragma("clang attribute push(__attribute__((availability(ios,strict,introduced=13.0))), apply_to=any(function,record))") \ + _Pragma("clang attribute push(__attribute__((availability(tvos,strict,introduced=13.0))), apply_to=any(function,record))") \ + _Pragma("clang attribute push(__attribute__((availability(watchos,strict,introduced=6.0))), apply_to=any(function,record))") +# define _LIBCUDACXX_AVAILABILITY_FILESYSTEM_POP \ + _Pragma("clang attribute pop") \ + _Pragma("clang attribute pop") \ + _Pragma("clang attribute pop") \ + _Pragma("clang attribute pop") +# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101500) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 130000) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 130000) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 60000) +# define _LIBCUDACXX_AVAILABILITY_DISABLE_FTM___cpp_lib_filesystem +# endif + +# define _LIBCUDACXX_AVAILABILITY_TO_CHARS_FLOATING_POINT \ + __attribute__((unavailable)) + +# define _LIBCUDACXX_AVAILABILITY_SYNC \ + __attribute__((availability(macos,strict,introduced=11.0))) \ + __attribute__((availability(ios,strict,introduced=14.0))) \ + __attribute__((availability(tvos,strict,introduced=14.0))) \ + __attribute__((availability(watchos,strict,introduced=7.0))) +# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 110000) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 140000) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 140000) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 70000) +# define _LIBCUDACXX_AVAILABILITY_DISABLE_FTM___cpp_lib_atomic_wait +# define _LIBCUDACXX_AVAILABILITY_DISABLE_FTM___cpp_lib_barrier +# define _LIBCUDACXX_AVAILABILITY_DISABLE_FTM___cpp_lib_latch +# define _LIBCUDACXX_AVAILABILITY_DISABLE_FTM___cpp_lib_semaphore +# endif + +# define _LIBCUDACXX_AVAILABILITY_FORMAT \ + __attribute__((unavailable)) +# define _LIBCUDACXX_AVAILABILITY_DISABLE_FTM___cpp_lib_format + +# define _LIBCUDACXX_HAS_NO_VERBOSE_ABORT_IN_LIBRARY #else // ...New vendors can add availability markup here... -# error \ - "It looks like you're trying to enable vendor availability markup, but you haven't defined the corresponding macros yet!" +# error "It looks like you're trying to enable vendor availability markup, but you haven't defined the corresponding macros yet!" #endif @@ -294,15 +290,15 @@ // Those are defined in terms of the availability attributes above, and // should not be vendor-specific. #if defined(_LIBCUDACXX_NO_EXCEPTIONS) -# define _LIBCUDACXX_AVAILABILITY_FUTURE -# define _LIBCUDACXX_AVAILABILITY_THROW_BAD_ANY_CAST -# define _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS -# define _LIBCUDACXX_AVAILABILITY_THROW_BAD_VARIANT_ACCESS +# define _LIBCUDACXX_AVAILABILITY_FUTURE +# define _LIBCUDACXX_AVAILABILITY_THROW_BAD_ANY_CAST +# define _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS +# define _LIBCUDACXX_AVAILABILITY_THROW_BAD_VARIANT_ACCESS #else -# define _LIBCUDACXX_AVAILABILITY_FUTURE _LIBCUDACXX_AVAILABILITY_FUTURE_ERROR -# define _LIBCUDACXX_AVAILABILITY_THROW_BAD_ANY_CAST _LIBCUDACXX_AVAILABILITY_BAD_ANY_CAST -# define _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS _LIBCUDACXX_AVAILABILITY_BAD_OPTIONAL_ACCESS -# define _LIBCUDACXX_AVAILABILITY_THROW_BAD_VARIANT_ACCESS _LIBCUDACXX_AVAILABILITY_BAD_VARIANT_ACCESS +# define _LIBCUDACXX_AVAILABILITY_FUTURE _LIBCUDACXX_AVAILABILITY_FUTURE_ERROR +# define _LIBCUDACXX_AVAILABILITY_THROW_BAD_ANY_CAST _LIBCUDACXX_AVAILABILITY_BAD_ANY_CAST +# define _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS _LIBCUDACXX_AVAILABILITY_BAD_OPTIONAL_ACCESS +# define _LIBCUDACXX_AVAILABILITY_THROW_BAD_VARIANT_ACCESS _LIBCUDACXX_AVAILABILITY_BAD_VARIANT_ACCESS #endif #endif // _LIBCUDACXX___AVAILABILITY diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/__bit_reference b/libcudacxx/include/cuda/std/detail/libcxx/include/__bit_reference index 88325c3d5c9..4ce42eb4c6a 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/__bit_reference +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/__bit_reference @@ -10,9 +10,9 @@ #ifndef _LIBCUDACXX___BIT_REFERENCE #define _LIBCUDACXX___BIT_REFERENCE -##include -#include +##include #include +#include #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) # pragma GCC system_header @@ -22,259 +22,229 @@ # pragma system_header #endif // no system header - _LIBCUDACXX_PUSH_MACROS +_LIBCUDACXX_PUSH_MACROS #include <__undef_macros> - _LIBCUDACXX_BEGIN_NAMESPACE_STD -template -class __bit_iterator; -template -class __bit_const_reference; +_LIBCUDACXX_BEGIN_NAMESPACE_STD + +template class __bit_iterator; +template class __bit_const_reference; template struct __has_storage_type { - static const bool value = false; + static const bool value = false; }; template ::value> class __bit_reference { - typedef typename _Cp::__storage_type __storage_type; - typedef typename _Cp::__storage_pointer __storage_pointer; - - __storage_pointer __seg_; - __storage_type __mask_; + typedef typename _Cp::__storage_type __storage_type; + typedef typename _Cp::__storage_pointer __storage_pointer; - friend typename _Cp::__self; + __storage_pointer __seg_; + __storage_type __mask_; - friend class __bit_const_reference<_Cp>; - friend class __bit_iterator<_Cp, false>; + friend typename _Cp::__self; + friend class __bit_const_reference<_Cp>; + friend class __bit_iterator<_Cp, false>; public: - _LIBCUDACXX_INLINE_VISIBILITY __bit_reference(const __bit_reference&) = default; - - _LIBCUDACXX_INLINE_VISIBILITY operator bool() const noexcept - { - return static_cast(*__seg_ & __mask_); - } - _LIBCUDACXX_INLINE_VISIBILITY bool operator~() const noexcept - { - return !static_cast(*this); - } - - _LIBCUDACXX_INLINE_VISIBILITY __bit_reference& operator=(bool __x) noexcept - { - if (__x) - { - *__seg_ |= __mask_; - } - else + _LIBCUDACXX_INLINE_VISIBILITY + __bit_reference(const __bit_reference&) = default; + + _LIBCUDACXX_INLINE_VISIBILITY operator bool() const noexcept + {return static_cast(*__seg_ & __mask_);} + _LIBCUDACXX_INLINE_VISIBILITY bool operator ~() const noexcept + {return !static_cast(*this);} + + _LIBCUDACXX_INLINE_VISIBILITY + __bit_reference& operator=(bool __x) noexcept { - *__seg_ &= ~__mask_; + if (__x) + *__seg_ |= __mask_; + else + *__seg_ &= ~__mask_; + return *this; } - return *this; - } - - _LIBCUDACXX_INLINE_VISIBILITY __bit_reference& operator=(const __bit_reference& __x) noexcept - { - return operator=(static_cast(__x)); - } - - _LIBCUDACXX_INLINE_VISIBILITY void flip() noexcept - { - *__seg_ ^= __mask_; - } - _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator<_Cp, false> operator&() const noexcept - { - return __bit_iterator<_Cp, false>(__seg_, static_cast(__libcpp_ctz(__mask_))); - } + _LIBCUDACXX_INLINE_VISIBILITY + __bit_reference& operator=(const __bit_reference& __x) noexcept + {return operator=(static_cast(__x));} + + _LIBCUDACXX_INLINE_VISIBILITY void flip() noexcept {*__seg_ ^= __mask_;} + _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator<_Cp, false> operator&() const noexcept + {return __bit_iterator<_Cp, false>(__seg_, static_cast(__libcpp_ctz(__mask_)));} private: - _LIBCUDACXX_INLINE_VISIBILITY __bit_reference(__storage_pointer __s, __storage_type __m) noexcept - : __seg_(__s) - , __mask_(__m) - {} + _LIBCUDACXX_INLINE_VISIBILITY + __bit_reference(__storage_pointer __s, __storage_type __m) noexcept + : __seg_(__s), __mask_(__m) {} }; template class __bit_reference<_Cp, false> -{}; +{ +}; template -inline _LIBCUDACXX_INLINE_VISIBILITY void swap(__bit_reference<_Cp> __x, __bit_reference<_Cp> __y) noexcept +inline _LIBCUDACXX_INLINE_VISIBILITY +void +swap(__bit_reference<_Cp> __x, __bit_reference<_Cp> __y) noexcept { - bool __t = __x; - __x = __y; - __y = __t; + bool __t = __x; + __x = __y; + __y = __t; } template -inline _LIBCUDACXX_INLINE_VISIBILITY void swap(__bit_reference<_Cp> __x, __bit_reference<_Dp> __y) noexcept +inline _LIBCUDACXX_INLINE_VISIBILITY +void +swap(__bit_reference<_Cp> __x, __bit_reference<_Dp> __y) noexcept { - bool __t = __x; - __x = __y; - __y = __t; + bool __t = __x; + __x = __y; + __y = __t; } template -inline _LIBCUDACXX_INLINE_VISIBILITY void swap(__bit_reference<_Cp> __x, bool& __y) noexcept +inline _LIBCUDACXX_INLINE_VISIBILITY +void +swap(__bit_reference<_Cp> __x, bool& __y) noexcept { - bool __t = __x; - __x = __y; - __y = __t; + bool __t = __x; + __x = __y; + __y = __t; } template -inline _LIBCUDACXX_INLINE_VISIBILITY void swap(bool& __x, __bit_reference<_Cp> __y) noexcept +inline _LIBCUDACXX_INLINE_VISIBILITY +void +swap(bool& __x, __bit_reference<_Cp> __y) noexcept { - bool __t = __x; - __x = __y; - __y = __t; + bool __t = __x; + __x = __y; + __y = __t; } template class __bit_const_reference { - typedef typename _Cp::__storage_type __storage_type; - typedef typename _Cp::__const_storage_pointer __storage_pointer; - - __storage_pointer __seg_; - __storage_type __mask_; + typedef typename _Cp::__storage_type __storage_type; + typedef typename _Cp::__const_storage_pointer __storage_pointer; - friend typename _Cp::__self; - friend class __bit_iterator<_Cp, true>; + __storage_pointer __seg_; + __storage_type __mask_; + friend typename _Cp::__self; + friend class __bit_iterator<_Cp, true>; public: - _LIBCUDACXX_INLINE_VISIBILITY __bit_const_reference(const __bit_const_reference&) = default; - - _LIBCUDACXX_INLINE_VISIBILITY __bit_const_reference(const __bit_reference<_Cp>& __x) noexcept - : __seg_(__x.__seg_) - , __mask_(__x.__mask_) - {} + _LIBCUDACXX_INLINE_VISIBILITY + __bit_const_reference(const __bit_const_reference&) = default; - _LIBCUDACXX_INLINE_VISIBILITY constexpr operator bool() const noexcept - { - return static_cast(*__seg_ & __mask_); - } + _LIBCUDACXX_INLINE_VISIBILITY + __bit_const_reference(const __bit_reference<_Cp>& __x) noexcept + : __seg_(__x.__seg_), __mask_(__x.__mask_) {} - _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator<_Cp, true> operator&() const noexcept - { - return __bit_iterator<_Cp, true>(__seg_, static_cast(__libcpp_ctz(__mask_))); - } + _LIBCUDACXX_INLINE_VISIBILITY constexpr operator bool() const noexcept + {return static_cast(*__seg_ & __mask_);} + _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator<_Cp, true> operator&() const noexcept + {return __bit_iterator<_Cp, true>(__seg_, static_cast(__libcpp_ctz(__mask_)));} private: - _LIBCUDACXX_INLINE_VISIBILITY constexpr __bit_const_reference(__storage_pointer __s, __storage_type __m) noexcept - : __seg_(__s) - , __mask_(__m) - {} + _LIBCUDACXX_INLINE_VISIBILITY + constexpr + __bit_const_reference(__storage_pointer __s, __storage_type __m) noexcept + : __seg_(__s), __mask_(__m) {} - __bit_const_reference& operator=(const __bit_const_reference&) = delete; + __bit_const_reference& operator=(const __bit_const_reference&) = delete; }; // find template -__bit_iterator<_Cp, _IsConst> __find_bool_true(__bit_iterator<_Cp, _IsConst> __first, typename _Cp::size_type __n) +__bit_iterator<_Cp, _IsConst> +__find_bool_true(__bit_iterator<_Cp, _IsConst> __first, typename _Cp::size_type __n) { - typedef __bit_iterator<_Cp, _IsConst> _It; - typedef typename _It::__storage_type __storage_type; - static const int __bits_per_word = _It::__bits_per_word; - // do first partial word - if (__first.__ctz_ != 0) - { - __storage_type __clz_f = static_cast<__storage_type>(__bits_per_word - __first.__ctz_); - __storage_type __dn = _CUDA_VSTD::min(__clz_f, __n); - __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); - __storage_type __b = *__first.__seg_ & __m; - if (__b) - { - return _It(__first.__seg_, static_cast(_CUDA_VSTD::__libcpp_ctz(__b))); - } - if (__n == __dn) - { - return __first + __n; - } - __n -= __dn; - ++__first.__seg_; - } - // do middle whole words - for (; __n >= __bits_per_word; ++__first.__seg_, __n -= __bits_per_word) - { - if (*__first.__seg_) + typedef __bit_iterator<_Cp, _IsConst> _It; + typedef typename _It::__storage_type __storage_type; + static const int __bits_per_word = _It::__bits_per_word; + // do first partial word + if (__first.__ctz_ != 0) { - return _It(__first.__seg_, static_cast(_CUDA_VSTD::__libcpp_ctz(*__first.__seg_))); + __storage_type __clz_f = static_cast<__storage_type>(__bits_per_word - __first.__ctz_); + __storage_type __dn = _CUDA_VSTD::min(__clz_f, __n); + __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); + __storage_type __b = *__first.__seg_ & __m; + if (__b) + return _It(__first.__seg_, static_cast(_CUDA_VSTD::__libcpp_ctz(__b))); + if (__n == __dn) + return __first + __n; + __n -= __dn; + ++__first.__seg_; } - } - // do last partial word - if (__n > 0) - { - __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); - __storage_type __b = *__first.__seg_ & __m; - if (__b) + // do middle whole words + for (; __n >= __bits_per_word; ++__first.__seg_, __n -= __bits_per_word) + if (*__first.__seg_) + return _It(__first.__seg_, static_cast(_CUDA_VSTD::__libcpp_ctz(*__first.__seg_))); + // do last partial word + if (__n > 0) { - return _It(__first.__seg_, static_cast(_CUDA_VSTD::__libcpp_ctz(__b))); + __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); + __storage_type __b = *__first.__seg_ & __m; + if (__b) + return _It(__first.__seg_, static_cast(_CUDA_VSTD::__libcpp_ctz(__b))); } - } - return _It(__first.__seg_, static_cast(__n)); + return _It(__first.__seg_, static_cast(__n)); } template -__bit_iterator<_Cp, _IsConst> __find_bool_false(__bit_iterator<_Cp, _IsConst> __first, typename _Cp::size_type __n) +__bit_iterator<_Cp, _IsConst> +__find_bool_false(__bit_iterator<_Cp, _IsConst> __first, typename _Cp::size_type __n) { - typedef __bit_iterator<_Cp, _IsConst> _It; - typedef typename _It::__storage_type __storage_type; - const int __bits_per_word = _It::__bits_per_word; - // do first partial word - if (__first.__ctz_ != 0) - { - __storage_type __clz_f = static_cast<__storage_type>(__bits_per_word - __first.__ctz_); - __storage_type __dn = _CUDA_VSTD::min(__clz_f, __n); - __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); - __storage_type __b = ~*__first.__seg_ & __m; - if (__b) - { - return _It(__first.__seg_, static_cast(_CUDA_VSTD::__libcpp_ctz(__b))); - } - if (__n == __dn) + typedef __bit_iterator<_Cp, _IsConst> _It; + typedef typename _It::__storage_type __storage_type; + const int __bits_per_word = _It::__bits_per_word; + // do first partial word + if (__first.__ctz_ != 0) { - return __first + __n; + __storage_type __clz_f = static_cast<__storage_type>(__bits_per_word - __first.__ctz_); + __storage_type __dn = _CUDA_VSTD::min(__clz_f, __n); + __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); + __storage_type __b = ~*__first.__seg_ & __m; + if (__b) + return _It(__first.__seg_, static_cast(_CUDA_VSTD::__libcpp_ctz(__b))); + if (__n == __dn) + return __first + __n; + __n -= __dn; + ++__first.__seg_; } - __n -= __dn; - ++__first.__seg_; - } - // do middle whole words - for (; __n >= __bits_per_word; ++__first.__seg_, __n -= __bits_per_word) - { - __storage_type __b = ~*__first.__seg_; - if (__b) + // do middle whole words + for (; __n >= __bits_per_word; ++__first.__seg_, __n -= __bits_per_word) { - return _It(__first.__seg_, static_cast(_CUDA_VSTD::__libcpp_ctz(__b))); + __storage_type __b = ~*__first.__seg_; + if (__b) + return _It(__first.__seg_, static_cast(_CUDA_VSTD::__libcpp_ctz(__b))); } - } - // do last partial word - if (__n > 0) - { - __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); - __storage_type __b = ~*__first.__seg_ & __m; - if (__b) + // do last partial word + if (__n > 0) { - return _It(__first.__seg_, static_cast(_CUDA_VSTD::__libcpp_ctz(__b))); + __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); + __storage_type __b = ~*__first.__seg_ & __m; + if (__b) + return _It(__first.__seg_, static_cast(_CUDA_VSTD::__libcpp_ctz(__b))); } - } - return _It(__first.__seg_, static_cast(__n)); + return _It(__first.__seg_, static_cast(__n)); } template -inline _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator<_Cp, _IsConst> +inline _LIBCUDACXX_INLINE_VISIBILITY +__bit_iterator<_Cp, _IsConst> find(__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, const _Tp& __value_) { - if (static_cast(__value_)) - { - return __find_bool_true(__first, static_cast(__last - __first)); - } - return __find_bool_false(__first, static_cast(__last - __first)); + if (static_cast(__value_)) + return __find_bool_true(__first, static_cast(__last - __first)); + return __find_bool_false(__first, static_cast(__last - __first)); } // count @@ -283,633 +253,627 @@ template typename __bit_iterator<_Cp, _IsConst>::difference_type __count_bool_true(__bit_iterator<_Cp, _IsConst> __first, typename _Cp::size_type __n) { - typedef __bit_iterator<_Cp, _IsConst> _It; - typedef typename _It::__storage_type __storage_type; - typedef typename _It::difference_type difference_type; - const int __bits_per_word = _It::__bits_per_word; - difference_type __r = 0; - // do first partial word - if (__first.__ctz_ != 0) - { - __storage_type __clz_f = static_cast<__storage_type>(__bits_per_word - __first.__ctz_); - __storage_type __dn = _CUDA_VSTD::min(__clz_f, __n); - __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); - __r = _CUDA_VSTD::__libcpp_popcount(*__first.__seg_ & __m); - __n -= __dn; - ++__first.__seg_; - } - // do middle whole words - for (; __n >= __bits_per_word; ++__first.__seg_, __n -= __bits_per_word) - { - __r += _CUDA_VSTD::__libcpp_popcount(*__first.__seg_); - } - // do last partial word - if (__n > 0) - { - __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); - __r += _CUDA_VSTD::__libcpp_popcount(*__first.__seg_ & __m); - } - return __r; + typedef __bit_iterator<_Cp, _IsConst> _It; + typedef typename _It::__storage_type __storage_type; + typedef typename _It::difference_type difference_type; + const int __bits_per_word = _It::__bits_per_word; + difference_type __r = 0; + // do first partial word + if (__first.__ctz_ != 0) + { + __storage_type __clz_f = static_cast<__storage_type>(__bits_per_word - __first.__ctz_); + __storage_type __dn = _CUDA_VSTD::min(__clz_f, __n); + __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); + __r = _CUDA_VSTD::__libcpp_popcount(*__first.__seg_ & __m); + __n -= __dn; + ++__first.__seg_; + } + // do middle whole words + for (; __n >= __bits_per_word; ++__first.__seg_, __n -= __bits_per_word) + __r += _CUDA_VSTD::__libcpp_popcount(*__first.__seg_); + // do last partial word + if (__n > 0) + { + __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); + __r += _CUDA_VSTD::__libcpp_popcount(*__first.__seg_ & __m); + } + return __r; } template typename __bit_iterator<_Cp, _IsConst>::difference_type __count_bool_false(__bit_iterator<_Cp, _IsConst> __first, typename _Cp::size_type __n) { - typedef __bit_iterator<_Cp, _IsConst> _It; - typedef typename _It::__storage_type __storage_type; - typedef typename _It::difference_type difference_type; - const int __bits_per_word = _It::__bits_per_word; - difference_type __r = 0; - // do first partial word - if (__first.__ctz_ != 0) - { - __storage_type __clz_f = static_cast<__storage_type>(__bits_per_word - __first.__ctz_); - __storage_type __dn = _CUDA_VSTD::min(__clz_f, __n); - __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); - __r = _CUDA_VSTD::__libcpp_popcount(~*__first.__seg_ & __m); - __n -= __dn; - ++__first.__seg_; - } - // do middle whole words - for (; __n >= __bits_per_word; ++__first.__seg_, __n -= __bits_per_word) - { - __r += _CUDA_VSTD::__libcpp_popcount(~*__first.__seg_); - } - // do last partial word - if (__n > 0) - { - __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); - __r += _CUDA_VSTD::__libcpp_popcount(~*__first.__seg_ & __m); - } - return __r; + typedef __bit_iterator<_Cp, _IsConst> _It; + typedef typename _It::__storage_type __storage_type; + typedef typename _It::difference_type difference_type; + const int __bits_per_word = _It::__bits_per_word; + difference_type __r = 0; + // do first partial word + if (__first.__ctz_ != 0) + { + __storage_type __clz_f = static_cast<__storage_type>(__bits_per_word - __first.__ctz_); + __storage_type __dn = _CUDA_VSTD::min(__clz_f, __n); + __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); + __r = _CUDA_VSTD::__libcpp_popcount(~*__first.__seg_ & __m); + __n -= __dn; + ++__first.__seg_; + } + // do middle whole words + for (; __n >= __bits_per_word; ++__first.__seg_, __n -= __bits_per_word) + __r += _CUDA_VSTD::__libcpp_popcount(~*__first.__seg_); + // do last partial word + if (__n > 0) + { + __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); + __r += _CUDA_VSTD::__libcpp_popcount(~*__first.__seg_ & __m); + } + return __r; } template -inline _LIBCUDACXX_INLINE_VISIBILITY typename __bit_iterator<_Cp, _IsConst>::difference_type +inline _LIBCUDACXX_INLINE_VISIBILITY +typename __bit_iterator<_Cp, _IsConst>::difference_type count(__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, const _Tp& __value_) { - if (static_cast(__value_)) - { - return __count_bool_true(__first, static_cast(__last - __first)); - } - return __count_bool_false(__first, static_cast(__last - __first)); + if (static_cast(__value_)) + return __count_bool_true(__first, static_cast(__last - __first)); + return __count_bool_false(__first, static_cast(__last - __first)); } // fill_n template -void __fill_n_false(__bit_iterator<_Cp, false> __first, typename _Cp::size_type __n) +void +__fill_n_false(__bit_iterator<_Cp, false> __first, typename _Cp::size_type __n) { - typedef __bit_iterator<_Cp, false> _It; - typedef typename _It::__storage_type __storage_type; - const int __bits_per_word = _It::__bits_per_word; - // do first partial word - if (__first.__ctz_ != 0) - { - __storage_type __clz_f = static_cast<__storage_type>(__bits_per_word - __first.__ctz_); - __storage_type __dn = _CUDA_VSTD::min(__clz_f, __n); - __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); - *__first.__seg_ &= ~__m; - __n -= __dn; - ++__first.__seg_; - } - // do middle whole words - __storage_type __nw = __n / __bits_per_word; - _CUDA_VSTD::memset(_CUDA_VSTD::__to_raw_pointer(__first.__seg_), 0, __nw * sizeof(__storage_type)); - __n -= __nw * __bits_per_word; - // do last partial word - if (__n > 0) - { - __first.__seg_ += __nw; - __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); - *__first.__seg_ &= ~__m; - } + typedef __bit_iterator<_Cp, false> _It; + typedef typename _It::__storage_type __storage_type; + const int __bits_per_word = _It::__bits_per_word; + // do first partial word + if (__first.__ctz_ != 0) + { + __storage_type __clz_f = static_cast<__storage_type>(__bits_per_word - __first.__ctz_); + __storage_type __dn = _CUDA_VSTD::min(__clz_f, __n); + __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); + *__first.__seg_ &= ~__m; + __n -= __dn; + ++__first.__seg_; + } + // do middle whole words + __storage_type __nw = __n / __bits_per_word; + _CUDA_VSTD::memset(_CUDA_VSTD::__to_raw_pointer(__first.__seg_), 0, __nw * sizeof(__storage_type)); + __n -= __nw * __bits_per_word; + // do last partial word + if (__n > 0) + { + __first.__seg_ += __nw; + __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); + *__first.__seg_ &= ~__m; + } } template -void __fill_n_true(__bit_iterator<_Cp, false> __first, typename _Cp::size_type __n) +void +__fill_n_true(__bit_iterator<_Cp, false> __first, typename _Cp::size_type __n) { - typedef __bit_iterator<_Cp, false> _It; - typedef typename _It::__storage_type __storage_type; - const int __bits_per_word = _It::__bits_per_word; - // do first partial word - if (__first.__ctz_ != 0) - { - __storage_type __clz_f = static_cast<__storage_type>(__bits_per_word - __first.__ctz_); - __storage_type __dn = _CUDA_VSTD::min(__clz_f, __n); - __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); - *__first.__seg_ |= __m; - __n -= __dn; - ++__first.__seg_; - } - // do middle whole words - __storage_type __nw = __n / __bits_per_word; - _CUDA_VSTD::memset(_CUDA_VSTD::__to_raw_pointer(__first.__seg_), -1, __nw * sizeof(__storage_type)); - __n -= __nw * __bits_per_word; - // do last partial word - if (__n > 0) - { - __first.__seg_ += __nw; - __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); - *__first.__seg_ |= __m; - } + typedef __bit_iterator<_Cp, false> _It; + typedef typename _It::__storage_type __storage_type; + const int __bits_per_word = _It::__bits_per_word; + // do first partial word + if (__first.__ctz_ != 0) + { + __storage_type __clz_f = static_cast<__storage_type>(__bits_per_word - __first.__ctz_); + __storage_type __dn = _CUDA_VSTD::min(__clz_f, __n); + __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); + *__first.__seg_ |= __m; + __n -= __dn; + ++__first.__seg_; + } + // do middle whole words + __storage_type __nw = __n / __bits_per_word; + _CUDA_VSTD::memset(_CUDA_VSTD::__to_raw_pointer(__first.__seg_), -1, __nw * sizeof(__storage_type)); + __n -= __nw * __bits_per_word; + // do last partial word + if (__n > 0) + { + __first.__seg_ += __nw; + __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); + *__first.__seg_ |= __m; + } } template -inline _LIBCUDACXX_INLINE_VISIBILITY void +inline _LIBCUDACXX_INLINE_VISIBILITY +void fill_n(__bit_iterator<_Cp, false> __first, typename _Cp::size_type __n, bool __value_) { - if (__n > 0) - { - if (__value_) - { - __fill_n_true(__first, __n); - } - else + if (__n > 0) { - __fill_n_false(__first, __n); + if (__value_) + __fill_n_true(__first, __n); + else + __fill_n_false(__first, __n); } - } } // fill template -inline _LIBCUDACXX_INLINE_VISIBILITY void +inline _LIBCUDACXX_INLINE_VISIBILITY +void fill(__bit_iterator<_Cp, false> __first, __bit_iterator<_Cp, false> __last, bool __value_) { - _CUDA_VSTD::fill_n(__first, static_cast(__last - __first), __value_); + _CUDA_VSTD::fill_n(__first, static_cast(__last - __first), __value_); } // copy template -__bit_iterator<_Cp, false> __copy_aligned( - __bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, __bit_iterator<_Cp, false> __result) +__bit_iterator<_Cp, false> +__copy_aligned(__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, + __bit_iterator<_Cp, false> __result) { - typedef __bit_iterator<_Cp, _IsConst> _In; - typedef typename _In::difference_type difference_type; - typedef typename _In::__storage_type __storage_type; - const int __bits_per_word = _In::__bits_per_word; - difference_type __n = __last - __first; - if (__n > 0) - { - // do first word - if (__first.__ctz_ != 0) - { - unsigned __clz = __bits_per_word - __first.__ctz_; - difference_type __dn = _CUDA_VSTD::min(static_cast(__clz), __n); - __n -= __dn; - __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz - __dn)); - __storage_type __b = *__first.__seg_ & __m; - *__result.__seg_ &= ~__m; - *__result.__seg_ |= __b; - __result.__seg_ += (__dn + __result.__ctz_) / __bits_per_word; - __result.__ctz_ = static_cast((__dn + __result.__ctz_) % __bits_per_word); - ++__first.__seg_; - // __first.__ctz_ = 0; - } - // __first.__ctz_ == 0; - // do middle words - __storage_type __nw = __n / __bits_per_word; - _CUDA_VSTD::memmove(_CUDA_VSTD::__to_raw_pointer(__result.__seg_), - _CUDA_VSTD::__to_raw_pointer(__first.__seg_), - __nw * sizeof(__storage_type)); - __n -= __nw * __bits_per_word; - __result.__seg_ += __nw; - // do last word + typedef __bit_iterator<_Cp, _IsConst> _In; + typedef typename _In::difference_type difference_type; + typedef typename _In::__storage_type __storage_type; + const int __bits_per_word = _In::__bits_per_word; + difference_type __n = __last - __first; if (__n > 0) { - __first.__seg_ += __nw; - __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); - __storage_type __b = *__first.__seg_ & __m; - *__result.__seg_ &= ~__m; - *__result.__seg_ |= __b; - __result.__ctz_ = static_cast(__n); + // do first word + if (__first.__ctz_ != 0) + { + unsigned __clz = __bits_per_word - __first.__ctz_; + difference_type __dn = _CUDA_VSTD::min(static_cast(__clz), __n); + __n -= __dn; + __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz - __dn)); + __storage_type __b = *__first.__seg_ & __m; + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b; + __result.__seg_ += (__dn + __result.__ctz_) / __bits_per_word; + __result.__ctz_ = static_cast((__dn + __result.__ctz_) % __bits_per_word); + ++__first.__seg_; + // __first.__ctz_ = 0; + } + // __first.__ctz_ == 0; + // do middle words + __storage_type __nw = __n / __bits_per_word; + _CUDA_VSTD::memmove(_CUDA_VSTD::__to_raw_pointer(__result.__seg_), + _CUDA_VSTD::__to_raw_pointer(__first.__seg_), + __nw * sizeof(__storage_type)); + __n -= __nw * __bits_per_word; + __result.__seg_ += __nw; + // do last word + if (__n > 0) + { + __first.__seg_ += __nw; + __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); + __storage_type __b = *__first.__seg_ & __m; + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b; + __result.__ctz_ = static_cast(__n); + } } - } - return __result; + return __result; } template -__bit_iterator<_Cp, false> __copy_unaligned( - __bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, __bit_iterator<_Cp, false> __result) +__bit_iterator<_Cp, false> +__copy_unaligned(__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, + __bit_iterator<_Cp, false> __result) { - typedef __bit_iterator<_Cp, _IsConst> _In; - typedef typename _In::difference_type difference_type; - typedef typename _In::__storage_type __storage_type; - static const int __bits_per_word = _In::__bits_per_word; - difference_type __n = __last - __first; - if (__n > 0) - { - // do first word - if (__first.__ctz_ != 0) - { - unsigned __clz_f = __bits_per_word - __first.__ctz_; - difference_type __dn = _CUDA_VSTD::min(static_cast(__clz_f), __n); - __n -= __dn; - __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); - __storage_type __b = *__first.__seg_ & __m; - unsigned __clz_r = __bits_per_word - __result.__ctz_; - __storage_type __ddn = _CUDA_VSTD::min<__storage_type>(__dn, __clz_r); - __m = (~__storage_type(0) << __result.__ctz_) & (~__storage_type(0) >> (__clz_r - __ddn)); - *__result.__seg_ &= ~__m; - if (__result.__ctz_ > __first.__ctz_) - { - *__result.__seg_ |= __b << (__result.__ctz_ - __first.__ctz_); - } - else - { - *__result.__seg_ |= __b >> (__first.__ctz_ - __result.__ctz_); - } - __result.__seg_ += (__ddn + __result.__ctz_) / __bits_per_word; - __result.__ctz_ = static_cast((__ddn + __result.__ctz_) % __bits_per_word); - __dn -= __ddn; - if (__dn > 0) - { - __m = ~__storage_type(0) >> (__bits_per_word - __dn); - *__result.__seg_ &= ~__m; - *__result.__seg_ |= __b >> (__first.__ctz_ + __ddn); - __result.__ctz_ = static_cast(__dn); - } - ++__first.__seg_; - // __first.__ctz_ = 0; - } - // __first.__ctz_ == 0; - // do middle words - unsigned __clz_r = __bits_per_word - __result.__ctz_; - __storage_type __m = ~__storage_type(0) << __result.__ctz_; - for (; __n >= __bits_per_word; __n -= __bits_per_word, ++__first.__seg_) - { - __storage_type __b = *__first.__seg_; - *__result.__seg_ &= ~__m; - *__result.__seg_ |= __b << __result.__ctz_; - ++__result.__seg_; - *__result.__seg_ &= __m; - *__result.__seg_ |= __b >> __clz_r; - } - // do last word + typedef __bit_iterator<_Cp, _IsConst> _In; + typedef typename _In::difference_type difference_type; + typedef typename _In::__storage_type __storage_type; + static const int __bits_per_word = _In::__bits_per_word; + difference_type __n = __last - __first; if (__n > 0) { - __m = ~__storage_type(0) >> (__bits_per_word - __n); - __storage_type __b = *__first.__seg_ & __m; - __storage_type __dn = _CUDA_VSTD::min(__n, static_cast(__clz_r)); - __m = (~__storage_type(0) << __result.__ctz_) & (~__storage_type(0) >> (__clz_r - __dn)); - *__result.__seg_ &= ~__m; - *__result.__seg_ |= __b << __result.__ctz_; - __result.__seg_ += (__dn + __result.__ctz_) / __bits_per_word; - __result.__ctz_ = static_cast((__dn + __result.__ctz_) % __bits_per_word); - __n -= __dn; - if (__n > 0) - { - __m = ~__storage_type(0) >> (__bits_per_word - __n); - *__result.__seg_ &= ~__m; - *__result.__seg_ |= __b >> __dn; - __result.__ctz_ = static_cast(__n); - } + // do first word + if (__first.__ctz_ != 0) + { + unsigned __clz_f = __bits_per_word - __first.__ctz_; + difference_type __dn = _CUDA_VSTD::min(static_cast(__clz_f), __n); + __n -= __dn; + __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); + __storage_type __b = *__first.__seg_ & __m; + unsigned __clz_r = __bits_per_word - __result.__ctz_; + __storage_type __ddn = _CUDA_VSTD::min<__storage_type>(__dn, __clz_r); + __m = (~__storage_type(0) << __result.__ctz_) & (~__storage_type(0) >> (__clz_r - __ddn)); + *__result.__seg_ &= ~__m; + if (__result.__ctz_ > __first.__ctz_) + *__result.__seg_ |= __b << (__result.__ctz_ - __first.__ctz_); + else + *__result.__seg_ |= __b >> (__first.__ctz_ - __result.__ctz_); + __result.__seg_ += (__ddn + __result.__ctz_) / __bits_per_word; + __result.__ctz_ = static_cast((__ddn + __result.__ctz_) % __bits_per_word); + __dn -= __ddn; + if (__dn > 0) + { + __m = ~__storage_type(0) >> (__bits_per_word - __dn); + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b >> (__first.__ctz_ + __ddn); + __result.__ctz_ = static_cast(__dn); + } + ++__first.__seg_; + // __first.__ctz_ = 0; + } + // __first.__ctz_ == 0; + // do middle words + unsigned __clz_r = __bits_per_word - __result.__ctz_; + __storage_type __m = ~__storage_type(0) << __result.__ctz_; + for (; __n >= __bits_per_word; __n -= __bits_per_word, ++__first.__seg_) + { + __storage_type __b = *__first.__seg_; + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b << __result.__ctz_; + ++__result.__seg_; + *__result.__seg_ &= __m; + *__result.__seg_ |= __b >> __clz_r; + } + // do last word + if (__n > 0) + { + __m = ~__storage_type(0) >> (__bits_per_word - __n); + __storage_type __b = *__first.__seg_ & __m; + __storage_type __dn = _CUDA_VSTD::min(__n, static_cast(__clz_r)); + __m = (~__storage_type(0) << __result.__ctz_) & (~__storage_type(0) >> (__clz_r - __dn)); + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b << __result.__ctz_; + __result.__seg_ += (__dn + __result.__ctz_) / __bits_per_word; + __result.__ctz_ = static_cast((__dn + __result.__ctz_) % __bits_per_word); + __n -= __dn; + if (__n > 0) + { + __m = ~__storage_type(0) >> (__bits_per_word - __n); + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b >> __dn; + __result.__ctz_ = static_cast(__n); + } + } } - } - return __result; + return __result; } template -inline _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator<_Cp, false> +inline _LIBCUDACXX_INLINE_VISIBILITY +__bit_iterator<_Cp, false> copy(__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, __bit_iterator<_Cp, false> __result) { - if (__first.__ctz_ == __result.__ctz_) - { - return __copy_aligned(__first, __last, __result); - } - return __copy_unaligned(__first, __last, __result); + if (__first.__ctz_ == __result.__ctz_) + return __copy_aligned(__first, __last, __result); + return __copy_unaligned(__first, __last, __result); } // copy_backward template -__bit_iterator<_Cp, false> __copy_backward_aligned( - __bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, __bit_iterator<_Cp, false> __result) +__bit_iterator<_Cp, false> +__copy_backward_aligned(__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, + __bit_iterator<_Cp, false> __result) { - typedef __bit_iterator<_Cp, _IsConst> _In; - typedef typename _In::difference_type difference_type; - typedef typename _In::__storage_type __storage_type; - const int __bits_per_word = _In::__bits_per_word; - difference_type __n = __last - __first; - if (__n > 0) - { - // do first word - if (__last.__ctz_ != 0) - { - difference_type __dn = _CUDA_VSTD::min(static_cast(__last.__ctz_), __n); - __n -= __dn; - unsigned __clz = __bits_per_word - __last.__ctz_; - __storage_type __m = (~__storage_type(0) << (__last.__ctz_ - __dn)) & (~__storage_type(0) >> __clz); - __storage_type __b = *__last.__seg_ & __m; - *__result.__seg_ &= ~__m; - *__result.__seg_ |= __b; - __result.__ctz_ = static_cast(((-__dn & (__bits_per_word - 1)) + __result.__ctz_) % __bits_per_word); - // __last.__ctz_ = 0 - } - // __last.__ctz_ == 0 || __n == 0 - // __result.__ctz_ == 0 || __n == 0 - // do middle words - __storage_type __nw = __n / __bits_per_word; - __result.__seg_ -= __nw; - __last.__seg_ -= __nw; - _CUDA_VSTD::memmove(_CUDA_VSTD::__to_raw_pointer(__result.__seg_), - _CUDA_VSTD::__to_raw_pointer(__last.__seg_), - __nw * sizeof(__storage_type)); - __n -= __nw * __bits_per_word; - // do last word + typedef __bit_iterator<_Cp, _IsConst> _In; + typedef typename _In::difference_type difference_type; + typedef typename _In::__storage_type __storage_type; + const int __bits_per_word = _In::__bits_per_word; + difference_type __n = __last - __first; if (__n > 0) { - __storage_type __m = ~__storage_type(0) << (__bits_per_word - __n); - __storage_type __b = *--__last.__seg_ & __m; - *--__result.__seg_ &= ~__m; - *__result.__seg_ |= __b; - __result.__ctz_ = static_cast(-__n & (__bits_per_word - 1)); + // do first word + if (__last.__ctz_ != 0) + { + difference_type __dn = _CUDA_VSTD::min(static_cast(__last.__ctz_), __n); + __n -= __dn; + unsigned __clz = __bits_per_word - __last.__ctz_; + __storage_type __m = (~__storage_type(0) << (__last.__ctz_ - __dn)) & (~__storage_type(0) >> __clz); + __storage_type __b = *__last.__seg_ & __m; + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b; + __result.__ctz_ = static_cast(((-__dn & (__bits_per_word - 1)) + + __result.__ctz_) % __bits_per_word); + // __last.__ctz_ = 0 + } + // __last.__ctz_ == 0 || __n == 0 + // __result.__ctz_ == 0 || __n == 0 + // do middle words + __storage_type __nw = __n / __bits_per_word; + __result.__seg_ -= __nw; + __last.__seg_ -= __nw; + _CUDA_VSTD::memmove(_CUDA_VSTD::__to_raw_pointer(__result.__seg_), + _CUDA_VSTD::__to_raw_pointer(__last.__seg_), + __nw * sizeof(__storage_type)); + __n -= __nw * __bits_per_word; + // do last word + if (__n > 0) + { + __storage_type __m = ~__storage_type(0) << (__bits_per_word - __n); + __storage_type __b = *--__last.__seg_ & __m; + *--__result.__seg_ &= ~__m; + *__result.__seg_ |= __b; + __result.__ctz_ = static_cast(-__n & (__bits_per_word - 1)); + } } - } - return __result; + return __result; } template -__bit_iterator<_Cp, false> __copy_backward_unaligned( - __bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, __bit_iterator<_Cp, false> __result) +__bit_iterator<_Cp, false> +__copy_backward_unaligned(__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, + __bit_iterator<_Cp, false> __result) { - typedef __bit_iterator<_Cp, _IsConst> _In; - typedef typename _In::difference_type difference_type; - typedef typename _In::__storage_type __storage_type; - const int __bits_per_word = _In::__bits_per_word; - difference_type __n = __last - __first; - if (__n > 0) - { - // do first word - if (__last.__ctz_ != 0) + typedef __bit_iterator<_Cp, _IsConst> _In; + typedef typename _In::difference_type difference_type; + typedef typename _In::__storage_type __storage_type; + const int __bits_per_word = _In::__bits_per_word; + difference_type __n = __last - __first; + if (__n > 0) { - difference_type __dn = _CUDA_VSTD::min(static_cast(__last.__ctz_), __n); - __n -= __dn; - unsigned __clz_l = __bits_per_word - __last.__ctz_; - __storage_type __m = (~__storage_type(0) << (__last.__ctz_ - __dn)) & (~__storage_type(0) >> __clz_l); - __storage_type __b = *__last.__seg_ & __m; - unsigned __clz_r = __bits_per_word - __result.__ctz_; - __storage_type __ddn = _CUDA_VSTD::min(__dn, static_cast(__result.__ctz_)); - if (__ddn > 0) - { - __m = (~__storage_type(0) << (__result.__ctz_ - __ddn)) & (~__storage_type(0) >> __clz_r); - *__result.__seg_ &= ~__m; - if (__result.__ctz_ > __last.__ctz_) + // do first word + if (__last.__ctz_ != 0) + { + difference_type __dn = _CUDA_VSTD::min(static_cast(__last.__ctz_), __n); + __n -= __dn; + unsigned __clz_l = __bits_per_word - __last.__ctz_; + __storage_type __m = (~__storage_type(0) << (__last.__ctz_ - __dn)) & (~__storage_type(0) >> __clz_l); + __storage_type __b = *__last.__seg_ & __m; + unsigned __clz_r = __bits_per_word - __result.__ctz_; + __storage_type __ddn = _CUDA_VSTD::min(__dn, static_cast(__result.__ctz_)); + if (__ddn > 0) + { + __m = (~__storage_type(0) << (__result.__ctz_ - __ddn)) & (~__storage_type(0) >> __clz_r); + *__result.__seg_ &= ~__m; + if (__result.__ctz_ > __last.__ctz_) + *__result.__seg_ |= __b << (__result.__ctz_ - __last.__ctz_); + else + *__result.__seg_ |= __b >> (__last.__ctz_ - __result.__ctz_); + __result.__ctz_ = static_cast(((-__ddn & (__bits_per_word - 1)) + + __result.__ctz_) % __bits_per_word); + __dn -= __ddn; + } + if (__dn > 0) + { + // __result.__ctz_ == 0 + --__result.__seg_; + __result.__ctz_ = static_cast(-__dn & (__bits_per_word - 1)); + __m = ~__storage_type(0) << __result.__ctz_; + *__result.__seg_ &= ~__m; + __last.__ctz_ -= __dn + __ddn; + *__result.__seg_ |= __b << (__result.__ctz_ - __last.__ctz_); + } + // __last.__ctz_ = 0 + } + // __last.__ctz_ == 0 || __n == 0 + // __result.__ctz_ != 0 || __n == 0 + // do middle words + unsigned __clz_r = __bits_per_word - __result.__ctz_; + __storage_type __m = ~__storage_type(0) >> __clz_r; + for (; __n >= __bits_per_word; __n -= __bits_per_word) { - *__result.__seg_ |= __b << (__result.__ctz_ - __last.__ctz_); + __storage_type __b = *--__last.__seg_; + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b >> __clz_r; + *--__result.__seg_ &= __m; + *__result.__seg_ |= __b << __result.__ctz_; } - else + // do last word + if (__n > 0) { - *__result.__seg_ |= __b >> (__last.__ctz_ - __result.__ctz_); + __m = ~__storage_type(0) << (__bits_per_word - __n); + __storage_type __b = *--__last.__seg_ & __m; + __clz_r = __bits_per_word - __result.__ctz_; + __storage_type __dn = _CUDA_VSTD::min(__n, static_cast(__result.__ctz_)); + __m = (~__storage_type(0) << (__result.__ctz_ - __dn)) & (~__storage_type(0) >> __clz_r); + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b >> (__bits_per_word - __result.__ctz_); + __result.__ctz_ = static_cast(((-__dn & (__bits_per_word - 1)) + + __result.__ctz_) % __bits_per_word); + __n -= __dn; + if (__n > 0) + { + // __result.__ctz_ == 0 + --__result.__seg_; + __result.__ctz_ = static_cast(-__n & (__bits_per_word - 1)); + __m = ~__storage_type(0) << __result.__ctz_; + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b << (__result.__ctz_ - (__bits_per_word - __n - __dn)); + } } - __result.__ctz_ = static_cast(((-__ddn & (__bits_per_word - 1)) + __result.__ctz_) % __bits_per_word); - __dn -= __ddn; - } - if (__dn > 0) - { - // __result.__ctz_ == 0 - --__result.__seg_; - __result.__ctz_ = static_cast(-__dn & (__bits_per_word - 1)); - __m = ~__storage_type(0) << __result.__ctz_; - *__result.__seg_ &= ~__m; - __last.__ctz_ -= __dn + __ddn; - *__result.__seg_ |= __b << (__result.__ctz_ - __last.__ctz_); - } - // __last.__ctz_ = 0 - } - // __last.__ctz_ == 0 || __n == 0 - // __result.__ctz_ != 0 || __n == 0 - // do middle words - unsigned __clz_r = __bits_per_word - __result.__ctz_; - __storage_type __m = ~__storage_type(0) >> __clz_r; - for (; __n >= __bits_per_word; __n -= __bits_per_word) - { - __storage_type __b = *--__last.__seg_; - *__result.__seg_ &= ~__m; - *__result.__seg_ |= __b >> __clz_r; - *--__result.__seg_ &= __m; - *__result.__seg_ |= __b << __result.__ctz_; } - // do last word - if (__n > 0) - { - __m = ~__storage_type(0) << (__bits_per_word - __n); - __storage_type __b = *--__last.__seg_ & __m; - __clz_r = __bits_per_word - __result.__ctz_; - __storage_type __dn = _CUDA_VSTD::min(__n, static_cast(__result.__ctz_)); - __m = (~__storage_type(0) << (__result.__ctz_ - __dn)) & (~__storage_type(0) >> __clz_r); - *__result.__seg_ &= ~__m; - *__result.__seg_ |= __b >> (__bits_per_word - __result.__ctz_); - __result.__ctz_ = static_cast(((-__dn & (__bits_per_word - 1)) + __result.__ctz_) % __bits_per_word); - __n -= __dn; - if (__n > 0) - { - // __result.__ctz_ == 0 - --__result.__seg_; - __result.__ctz_ = static_cast(-__n & (__bits_per_word - 1)); - __m = ~__storage_type(0) << __result.__ctz_; - *__result.__seg_ &= ~__m; - *__result.__seg_ |= __b << (__result.__ctz_ - (__bits_per_word - __n - __dn)); - } - } - } - return __result; + return __result; } template -inline _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator<_Cp, false> copy_backward( - __bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, __bit_iterator<_Cp, false> __result) +inline _LIBCUDACXX_INLINE_VISIBILITY +__bit_iterator<_Cp, false> +copy_backward(__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, __bit_iterator<_Cp, false> __result) { - if (__last.__ctz_ == __result.__ctz_) - { - return __copy_backward_aligned(__first, __last, __result); - } - return __copy_backward_unaligned(__first, __last, __result); + if (__last.__ctz_ == __result.__ctz_) + return __copy_backward_aligned(__first, __last, __result); + return __copy_backward_unaligned(__first, __last, __result); } // move template -inline _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator<_Cp, false> +inline _LIBCUDACXX_INLINE_VISIBILITY +__bit_iterator<_Cp, false> move(__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, __bit_iterator<_Cp, false> __result) { - return _CUDA_VSTD::copy(__first, __last, __result); + return _CUDA_VSTD::copy(__first, __last, __result); } // move_backward template -inline _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator<_Cp, false> move_backward( - __bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, __bit_iterator<_Cp, false> __result) +inline _LIBCUDACXX_INLINE_VISIBILITY +__bit_iterator<_Cp, false> +move_backward(__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, __bit_iterator<_Cp, false> __result) { - return _CUDA_VSTD::copy_backward(__first, __last, __result); + return _CUDA_VSTD::copy_backward(__first, __last, __result); } // swap_ranges template -__bit_iterator<__C2, false> __swap_ranges_aligned( - __bit_iterator<__C1, false> __first, __bit_iterator<__C1, false> __last, __bit_iterator<__C2, false> __result) +__bit_iterator<__C2, false> +__swap_ranges_aligned(__bit_iterator<__C1, false> __first, __bit_iterator<__C1, false> __last, + __bit_iterator<__C2, false> __result) { - typedef __bit_iterator<__C1, false> _I1; - typedef typename _I1::difference_type difference_type; - typedef typename _I1::__storage_type __storage_type; - const int __bits_per_word = _I1::__bits_per_word; - difference_type __n = __last - __first; - if (__n > 0) - { - // do first word - if (__first.__ctz_ != 0) - { - unsigned __clz = __bits_per_word - __first.__ctz_; - difference_type __dn = _CUDA_VSTD::min(static_cast(__clz), __n); - __n -= __dn; - __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz - __dn)); - __storage_type __b1 = *__first.__seg_ & __m; - *__first.__seg_ &= ~__m; - __storage_type __b2 = *__result.__seg_ & __m; - *__result.__seg_ &= ~__m; - *__result.__seg_ |= __b1; - *__first.__seg_ |= __b2; - __result.__seg_ += (__dn + __result.__ctz_) / __bits_per_word; - __result.__ctz_ = static_cast((__dn + __result.__ctz_) % __bits_per_word); - ++__first.__seg_; - // __first.__ctz_ = 0; - } - // __first.__ctz_ == 0; - // do middle words - for (; __n >= __bits_per_word; __n -= __bits_per_word, ++__first.__seg_, ++__result.__seg_) - { - swap(*__first.__seg_, *__result.__seg_); - } - // do last word + typedef __bit_iterator<__C1, false> _I1; + typedef typename _I1::difference_type difference_type; + typedef typename _I1::__storage_type __storage_type; + const int __bits_per_word = _I1::__bits_per_word; + difference_type __n = __last - __first; if (__n > 0) { - __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); - __storage_type __b1 = *__first.__seg_ & __m; - *__first.__seg_ &= ~__m; - __storage_type __b2 = *__result.__seg_ & __m; - *__result.__seg_ &= ~__m; - *__result.__seg_ |= __b1; - *__first.__seg_ |= __b2; - __result.__ctz_ = static_cast(__n); + // do first word + if (__first.__ctz_ != 0) + { + unsigned __clz = __bits_per_word - __first.__ctz_; + difference_type __dn = _CUDA_VSTD::min(static_cast(__clz), __n); + __n -= __dn; + __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz - __dn)); + __storage_type __b1 = *__first.__seg_ & __m; + *__first.__seg_ &= ~__m; + __storage_type __b2 = *__result.__seg_ & __m; + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b1; + *__first.__seg_ |= __b2; + __result.__seg_ += (__dn + __result.__ctz_) / __bits_per_word; + __result.__ctz_ = static_cast((__dn + __result.__ctz_) % __bits_per_word); + ++__first.__seg_; + // __first.__ctz_ = 0; + } + // __first.__ctz_ == 0; + // do middle words + for (; __n >= __bits_per_word; __n -= __bits_per_word, ++__first.__seg_, ++__result.__seg_) + swap(*__first.__seg_, *__result.__seg_); + // do last word + if (__n > 0) + { + __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); + __storage_type __b1 = *__first.__seg_ & __m; + *__first.__seg_ &= ~__m; + __storage_type __b2 = *__result.__seg_ & __m; + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b1; + *__first.__seg_ |= __b2; + __result.__ctz_ = static_cast(__n); + } } - } - return __result; + return __result; } template -__bit_iterator<__C2, false> __swap_ranges_unaligned( - __bit_iterator<__C1, false> __first, __bit_iterator<__C1, false> __last, __bit_iterator<__C2, false> __result) +__bit_iterator<__C2, false> +__swap_ranges_unaligned(__bit_iterator<__C1, false> __first, __bit_iterator<__C1, false> __last, + __bit_iterator<__C2, false> __result) { - typedef __bit_iterator<__C1, false> _I1; - typedef typename _I1::difference_type difference_type; - typedef typename _I1::__storage_type __storage_type; - const int __bits_per_word = _I1::__bits_per_word; - difference_type __n = __last - __first; - if (__n > 0) - { - // do first word - if (__first.__ctz_ != 0) - { - unsigned __clz_f = __bits_per_word - __first.__ctz_; - difference_type __dn = _CUDA_VSTD::min(static_cast(__clz_f), __n); - __n -= __dn; - __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); - __storage_type __b1 = *__first.__seg_ & __m; - *__first.__seg_ &= ~__m; - unsigned __clz_r = __bits_per_word - __result.__ctz_; - __storage_type __ddn = _CUDA_VSTD::min<__storage_type>(__dn, __clz_r); - __m = (~__storage_type(0) << __result.__ctz_) & (~__storage_type(0) >> (__clz_r - __ddn)); - __storage_type __b2 = *__result.__seg_ & __m; - *__result.__seg_ &= ~__m; - if (__result.__ctz_ > __first.__ctz_) - { - unsigned __s = __result.__ctz_ - __first.__ctz_; - *__result.__seg_ |= __b1 << __s; - *__first.__seg_ |= __b2 >> __s; - } - else - { - unsigned __s = __first.__ctz_ - __result.__ctz_; - *__result.__seg_ |= __b1 >> __s; - *__first.__seg_ |= __b2 << __s; - } - __result.__seg_ += (__ddn + __result.__ctz_) / __bits_per_word; - __result.__ctz_ = static_cast((__ddn + __result.__ctz_) % __bits_per_word); - __dn -= __ddn; - if (__dn > 0) - { - __m = ~__storage_type(0) >> (__bits_per_word - __dn); - __b2 = *__result.__seg_ & __m; - *__result.__seg_ &= ~__m; - unsigned __s = __first.__ctz_ + __ddn; - *__result.__seg_ |= __b1 >> __s; - *__first.__seg_ |= __b2 << __s; - __result.__ctz_ = static_cast(__dn); - } - ++__first.__seg_; - // __first.__ctz_ = 0; - } - // __first.__ctz_ == 0; - // do middle words - __storage_type __m = ~__storage_type(0) << __result.__ctz_; - unsigned __clz_r = __bits_per_word - __result.__ctz_; - for (; __n >= __bits_per_word; __n -= __bits_per_word, ++__first.__seg_) - { - __storage_type __b1 = *__first.__seg_; - __storage_type __b2 = *__result.__seg_ & __m; - *__result.__seg_ &= ~__m; - *__result.__seg_ |= __b1 << __result.__ctz_; - *__first.__seg_ = __b2 >> __result.__ctz_; - ++__result.__seg_; - __b2 = *__result.__seg_ & ~__m; - *__result.__seg_ &= __m; - *__result.__seg_ |= __b1 >> __clz_r; - *__first.__seg_ |= __b2 << __clz_r; - } - // do last word + typedef __bit_iterator<__C1, false> _I1; + typedef typename _I1::difference_type difference_type; + typedef typename _I1::__storage_type __storage_type; + const int __bits_per_word = _I1::__bits_per_word; + difference_type __n = __last - __first; if (__n > 0) { - __m = ~__storage_type(0) >> (__bits_per_word - __n); - __storage_type __b1 = *__first.__seg_ & __m; - *__first.__seg_ &= ~__m; - __storage_type __dn = _CUDA_VSTD::min<__storage_type>(__n, __clz_r); - __m = (~__storage_type(0) << __result.__ctz_) & (~__storage_type(0) >> (__clz_r - __dn)); - __storage_type __b2 = *__result.__seg_ & __m; - *__result.__seg_ &= ~__m; - *__result.__seg_ |= __b1 << __result.__ctz_; - *__first.__seg_ |= __b2 >> __result.__ctz_; - __result.__seg_ += (__dn + __result.__ctz_) / __bits_per_word; - __result.__ctz_ = static_cast((__dn + __result.__ctz_) % __bits_per_word); - __n -= __dn; - if (__n > 0) - { - __m = ~__storage_type(0) >> (__bits_per_word - __n); - __b2 = *__result.__seg_ & __m; - *__result.__seg_ &= ~__m; - *__result.__seg_ |= __b1 >> __dn; - *__first.__seg_ |= __b2 << __dn; - __result.__ctz_ = static_cast(__n); - } + // do first word + if (__first.__ctz_ != 0) + { + unsigned __clz_f = __bits_per_word - __first.__ctz_; + difference_type __dn = _CUDA_VSTD::min(static_cast(__clz_f), __n); + __n -= __dn; + __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); + __storage_type __b1 = *__first.__seg_ & __m; + *__first.__seg_ &= ~__m; + unsigned __clz_r = __bits_per_word - __result.__ctz_; + __storage_type __ddn = _CUDA_VSTD::min<__storage_type>(__dn, __clz_r); + __m = (~__storage_type(0) << __result.__ctz_) & (~__storage_type(0) >> (__clz_r - __ddn)); + __storage_type __b2 = *__result.__seg_ & __m; + *__result.__seg_ &= ~__m; + if (__result.__ctz_ > __first.__ctz_) + { + unsigned __s = __result.__ctz_ - __first.__ctz_; + *__result.__seg_ |= __b1 << __s; + *__first.__seg_ |= __b2 >> __s; + } + else + { + unsigned __s = __first.__ctz_ - __result.__ctz_; + *__result.__seg_ |= __b1 >> __s; + *__first.__seg_ |= __b2 << __s; + } + __result.__seg_ += (__ddn + __result.__ctz_) / __bits_per_word; + __result.__ctz_ = static_cast((__ddn + __result.__ctz_) % __bits_per_word); + __dn -= __ddn; + if (__dn > 0) + { + __m = ~__storage_type(0) >> (__bits_per_word - __dn); + __b2 = *__result.__seg_ & __m; + *__result.__seg_ &= ~__m; + unsigned __s = __first.__ctz_ + __ddn; + *__result.__seg_ |= __b1 >> __s; + *__first.__seg_ |= __b2 << __s; + __result.__ctz_ = static_cast(__dn); + } + ++__first.__seg_; + // __first.__ctz_ = 0; + } + // __first.__ctz_ == 0; + // do middle words + __storage_type __m = ~__storage_type(0) << __result.__ctz_; + unsigned __clz_r = __bits_per_word - __result.__ctz_; + for (; __n >= __bits_per_word; __n -= __bits_per_word, ++__first.__seg_) + { + __storage_type __b1 = *__first.__seg_; + __storage_type __b2 = *__result.__seg_ & __m; + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b1 << __result.__ctz_; + *__first.__seg_ = __b2 >> __result.__ctz_; + ++__result.__seg_; + __b2 = *__result.__seg_ & ~__m; + *__result.__seg_ &= __m; + *__result.__seg_ |= __b1 >> __clz_r; + *__first.__seg_ |= __b2 << __clz_r; + } + // do last word + if (__n > 0) + { + __m = ~__storage_type(0) >> (__bits_per_word - __n); + __storage_type __b1 = *__first.__seg_ & __m; + *__first.__seg_ &= ~__m; + __storage_type __dn = _CUDA_VSTD::min<__storage_type>(__n, __clz_r); + __m = (~__storage_type(0) << __result.__ctz_) & (~__storage_type(0) >> (__clz_r - __dn)); + __storage_type __b2 = *__result.__seg_ & __m; + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b1 << __result.__ctz_; + *__first.__seg_ |= __b2 >> __result.__ctz_; + __result.__seg_ += (__dn + __result.__ctz_) / __bits_per_word; + __result.__ctz_ = static_cast((__dn + __result.__ctz_) % __bits_per_word); + __n -= __dn; + if (__n > 0) + { + __m = ~__storage_type(0) >> (__bits_per_word - __n); + __b2 = *__result.__seg_ & __m; + *__result.__seg_ &= ~__m; + *__result.__seg_ |= __b1 >> __dn; + *__first.__seg_ |= __b2 << __dn; + __result.__ctz_ = static_cast(__n); + } + } } - } - return __result; + return __result; } template -inline _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator<__C2, false> swap_ranges( - __bit_iterator<__C1, false> __first1, __bit_iterator<__C1, false> __last1, __bit_iterator<__C2, false> __first2) +inline _LIBCUDACXX_INLINE_VISIBILITY +__bit_iterator<__C2, false> +swap_ranges(__bit_iterator<__C1, false> __first1, __bit_iterator<__C1, false> __last1, + __bit_iterator<__C2, false> __first2) { - if (__first1.__ctz_ == __first2.__ctz_) - { - return __swap_ranges_aligned(__first1, __last1, __first2); - } - return __swap_ranges_unaligned(__first1, __last1, __first2); + if (__first1.__ctz_ == __first2.__ctz_) + return __swap_ranges_aligned(__first1, __last1, __first2); + return __swap_ranges_unaligned(__first1, __last1, __first2); } // rotate @@ -917,469 +881,413 @@ inline _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator<__C2, false> swap_ranges( template struct __bit_array { - typedef typename _Cp::difference_type difference_type; - typedef typename _Cp::__storage_type __storage_type; - typedef typename _Cp::__storage_pointer __storage_pointer; - typedef typename _Cp::iterator iterator; - static const unsigned __bits_per_word = _Cp::__bits_per_word; - static const unsigned _Np = 4; - - difference_type __size_; - __storage_type __word_[_Np]; - - _LIBCUDACXX_INLINE_VISIBILITY static difference_type capacity() - { - return static_cast(_Np * __bits_per_word); - } - _LIBCUDACXX_INLINE_VISIBILITY explicit __bit_array(difference_type __s) - : __size_(__s) - {} - _LIBCUDACXX_INLINE_VISIBILITY iterator begin() - { - return iterator(pointer_traits<__storage_pointer>::pointer_to(__word_[0]), 0); - } - _LIBCUDACXX_INLINE_VISIBILITY iterator end() - { - return iterator(pointer_traits<__storage_pointer>::pointer_to(__word_[0]) + __size_ / __bits_per_word, - static_cast(__size_ % __bits_per_word)); - } + typedef typename _Cp::difference_type difference_type; + typedef typename _Cp::__storage_type __storage_type; + typedef typename _Cp::__storage_pointer __storage_pointer; + typedef typename _Cp::iterator iterator; + static const unsigned __bits_per_word = _Cp::__bits_per_word; + static const unsigned _Np = 4; + + difference_type __size_; + __storage_type __word_[_Np]; + + _LIBCUDACXX_INLINE_VISIBILITY static difference_type capacity() + {return static_cast(_Np * __bits_per_word);} + _LIBCUDACXX_INLINE_VISIBILITY explicit __bit_array(difference_type __s) : __size_(__s) {} + _LIBCUDACXX_INLINE_VISIBILITY iterator begin() + { + return iterator(pointer_traits<__storage_pointer>::pointer_to(__word_[0]), 0); + } + _LIBCUDACXX_INLINE_VISIBILITY iterator end() + { + return iterator(pointer_traits<__storage_pointer>::pointer_to(__word_[0]) + __size_ / __bits_per_word, + static_cast(__size_ % __bits_per_word)); + } }; template __bit_iterator<_Cp, false> rotate(__bit_iterator<_Cp, false> __first, __bit_iterator<_Cp, false> __middle, __bit_iterator<_Cp, false> __last) { - typedef __bit_iterator<_Cp, false> _I1; - typedef typename _I1::difference_type difference_type; - difference_type __d1 = __middle - __first; - difference_type __d2 = __last - __middle; - _I1 __r = __first + __d2; - while (__d1 != 0 && __d2 != 0) - { - if (__d1 <= __d2) - { - if (__d1 <= __bit_array<_Cp>::capacity()) - { - __bit_array<_Cp> __b(__d1); - _CUDA_VSTD::copy(__first, __middle, __b.begin()); - _CUDA_VSTD::copy(__b.begin(), __b.end(), _CUDA_VSTD::copy(__middle, __last, __first)); - break; - } - else - { - __bit_iterator<_Cp, false> __mp = _CUDA_VSTD::swap_ranges(__first, __middle, __middle); - __first = __middle; - __middle = __mp; - __d2 -= __d1; - } - } - else + typedef __bit_iterator<_Cp, false> _I1; + typedef typename _I1::difference_type difference_type; + difference_type __d1 = __middle - __first; + difference_type __d2 = __last - __middle; + _I1 __r = __first + __d2; + while (__d1 != 0 && __d2 != 0) { - if (__d2 <= __bit_array<_Cp>::capacity()) - { - __bit_array<_Cp> __b(__d2); - _CUDA_VSTD::copy(__middle, __last, __b.begin()); - _CUDA_VSTD::copy_backward(__b.begin(), __b.end(), _CUDA_VSTD::copy_backward(__first, __middle, __last)); - break; - } - else - { - __bit_iterator<_Cp, false> __mp = __first + __d2; - _CUDA_VSTD::swap_ranges(__first, __mp, __middle); - __first = __mp; - __d1 -= __d2; - } + if (__d1 <= __d2) + { + if (__d1 <= __bit_array<_Cp>::capacity()) + { + __bit_array<_Cp> __b(__d1); + _CUDA_VSTD::copy(__first, __middle, __b.begin()); + _CUDA_VSTD::copy(__b.begin(), __b.end(), _CUDA_VSTD::copy(__middle, __last, __first)); + break; + } + else + { + __bit_iterator<_Cp, false> __mp = _CUDA_VSTD::swap_ranges(__first, __middle, __middle); + __first = __middle; + __middle = __mp; + __d2 -= __d1; + } + } + else + { + if (__d2 <= __bit_array<_Cp>::capacity()) + { + __bit_array<_Cp> __b(__d2); + _CUDA_VSTD::copy(__middle, __last, __b.begin()); + _CUDA_VSTD::copy_backward(__b.begin(), __b.end(), _CUDA_VSTD::copy_backward(__first, __middle, __last)); + break; + } + else + { + __bit_iterator<_Cp, false> __mp = __first + __d2; + _CUDA_VSTD::swap_ranges(__first, __mp, __middle); + __first = __mp; + __d1 -= __d2; + } + } } - } - return __r; + return __r; } // equal template -bool __equal_unaligned( - __bit_iterator<_Cp, _IC1> __first1, __bit_iterator<_Cp, _IC1> __last1, __bit_iterator<_Cp, _IC2> __first2) +bool +__equal_unaligned(__bit_iterator<_Cp, _IC1> __first1, __bit_iterator<_Cp, _IC1> __last1, + __bit_iterator<_Cp, _IC2> __first2) { - typedef __bit_iterator<_Cp, _IC1> _It; - typedef typename _It::difference_type difference_type; - typedef typename _It::__storage_type __storage_type; - static const int __bits_per_word = _It::__bits_per_word; - difference_type __n = __last1 - __first1; - if (__n > 0) - { - // do first word - if (__first1.__ctz_ != 0) + typedef __bit_iterator<_Cp, _IC1> _It; + typedef typename _It::difference_type difference_type; + typedef typename _It::__storage_type __storage_type; + static const int __bits_per_word = _It::__bits_per_word; + difference_type __n = __last1 - __first1; + if (__n > 0) { - unsigned __clz_f = __bits_per_word - __first1.__ctz_; - difference_type __dn = _CUDA_VSTD::min(static_cast(__clz_f), __n); - __n -= __dn; - __storage_type __m = (~__storage_type(0) << __first1.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); - __storage_type __b = *__first1.__seg_ & __m; - unsigned __clz_r = __bits_per_word - __first2.__ctz_; - __storage_type __ddn = _CUDA_VSTD::min<__storage_type>(__dn, __clz_r); - __m = (~__storage_type(0) << __first2.__ctz_) & (~__storage_type(0) >> (__clz_r - __ddn)); - if (__first2.__ctz_ > __first1.__ctz_) - { - if ((*__first2.__seg_ & __m) != (__b << (__first2.__ctz_ - __first1.__ctz_))) + // do first word + if (__first1.__ctz_ != 0) { - return false; + unsigned __clz_f = __bits_per_word - __first1.__ctz_; + difference_type __dn = _CUDA_VSTD::min(static_cast(__clz_f), __n); + __n -= __dn; + __storage_type __m = (~__storage_type(0) << __first1.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); + __storage_type __b = *__first1.__seg_ & __m; + unsigned __clz_r = __bits_per_word - __first2.__ctz_; + __storage_type __ddn = _CUDA_VSTD::min<__storage_type>(__dn, __clz_r); + __m = (~__storage_type(0) << __first2.__ctz_) & (~__storage_type(0) >> (__clz_r - __ddn)); + if (__first2.__ctz_ > __first1.__ctz_) + { + if ((*__first2.__seg_ & __m) != (__b << (__first2.__ctz_ - __first1.__ctz_))) + return false; + } + else + { + if ((*__first2.__seg_ & __m) != (__b >> (__first1.__ctz_ - __first2.__ctz_))) + return false; + } + __first2.__seg_ += (__ddn + __first2.__ctz_) / __bits_per_word; + __first2.__ctz_ = static_cast((__ddn + __first2.__ctz_) % __bits_per_word); + __dn -= __ddn; + if (__dn > 0) + { + __m = ~__storage_type(0) >> (__bits_per_word - __dn); + if ((*__first2.__seg_ & __m) != (__b >> (__first1.__ctz_ + __ddn))) + return false; + __first2.__ctz_ = static_cast(__dn); + } + ++__first1.__seg_; + // __first1.__ctz_ = 0; } - } - else - { - if ((*__first2.__seg_ & __m) != (__b >> (__first1.__ctz_ - __first2.__ctz_))) + // __first1.__ctz_ == 0; + // do middle words + unsigned __clz_r = __bits_per_word - __first2.__ctz_; + __storage_type __m = ~__storage_type(0) << __first2.__ctz_; + for (; __n >= __bits_per_word; __n -= __bits_per_word, ++__first1.__seg_) { - return false; + __storage_type __b = *__first1.__seg_; + if ((*__first2.__seg_ & __m) != (__b << __first2.__ctz_)) + return false; + ++__first2.__seg_; + if ((*__first2.__seg_ & ~__m) != (__b >> __clz_r)) + return false; } - } - __first2.__seg_ += (__ddn + __first2.__ctz_) / __bits_per_word; - __first2.__ctz_ = static_cast((__ddn + __first2.__ctz_) % __bits_per_word); - __dn -= __ddn; - if (__dn > 0) - { - __m = ~__storage_type(0) >> (__bits_per_word - __dn); - if ((*__first2.__seg_ & __m) != (__b >> (__first1.__ctz_ + __ddn))) + // do last word + if (__n > 0) { - return false; + __m = ~__storage_type(0) >> (__bits_per_word - __n); + __storage_type __b = *__first1.__seg_ & __m; + __storage_type __dn = _CUDA_VSTD::min(__n, static_cast(__clz_r)); + __m = (~__storage_type(0) << __first2.__ctz_) & (~__storage_type(0) >> (__clz_r - __dn)); + if ((*__first2.__seg_ & __m) != (__b << __first2.__ctz_)) + return false; + __first2.__seg_ += (__dn + __first2.__ctz_) / __bits_per_word; + __first2.__ctz_ = static_cast((__dn + __first2.__ctz_) % __bits_per_word); + __n -= __dn; + if (__n > 0) + { + __m = ~__storage_type(0) >> (__bits_per_word - __n); + if ((*__first2.__seg_ & __m) != (__b >> __dn)) + return false; + } } - __first2.__ctz_ = static_cast(__dn); - } - ++__first1.__seg_; - // __first1.__ctz_ = 0; } - // __first1.__ctz_ == 0; - // do middle words - unsigned __clz_r = __bits_per_word - __first2.__ctz_; - __storage_type __m = ~__storage_type(0) << __first2.__ctz_; - for (; __n >= __bits_per_word; __n -= __bits_per_word, ++__first1.__seg_) - { - __storage_type __b = *__first1.__seg_; - if ((*__first2.__seg_ & __m) != (__b << __first2.__ctz_)) - { - return false; - } - ++__first2.__seg_; - if ((*__first2.__seg_ & ~__m) != (__b >> __clz_r)) - { - return false; - } - } - // do last word - if (__n > 0) - { - __m = ~__storage_type(0) >> (__bits_per_word - __n); - __storage_type __b = *__first1.__seg_ & __m; - __storage_type __dn = _CUDA_VSTD::min(__n, static_cast(__clz_r)); - __m = (~__storage_type(0) << __first2.__ctz_) & (~__storage_type(0) >> (__clz_r - __dn)); - if ((*__first2.__seg_ & __m) != (__b << __first2.__ctz_)) - { - return false; - } - __first2.__seg_ += (__dn + __first2.__ctz_) / __bits_per_word; - __first2.__ctz_ = static_cast((__dn + __first2.__ctz_) % __bits_per_word); - __n -= __dn; - if (__n > 0) - { - __m = ~__storage_type(0) >> (__bits_per_word - __n); - if ((*__first2.__seg_ & __m) != (__b >> __dn)) - { - return false; - } - } - } - } - return true; + return true; } template -bool __equal_aligned( - __bit_iterator<_Cp, _IC1> __first1, __bit_iterator<_Cp, _IC1> __last1, __bit_iterator<_Cp, _IC2> __first2) +bool +__equal_aligned(__bit_iterator<_Cp, _IC1> __first1, __bit_iterator<_Cp, _IC1> __last1, + __bit_iterator<_Cp, _IC2> __first2) { - typedef __bit_iterator<_Cp, _IC1> _It; - typedef typename _It::difference_type difference_type; - typedef typename _It::__storage_type __storage_type; - static const int __bits_per_word = _It::__bits_per_word; - difference_type __n = __last1 - __first1; - if (__n > 0) - { - // do first word - if (__first1.__ctz_ != 0) - { - unsigned __clz = __bits_per_word - __first1.__ctz_; - difference_type __dn = _CUDA_VSTD::min(static_cast(__clz), __n); - __n -= __dn; - __storage_type __m = (~__storage_type(0) << __first1.__ctz_) & (~__storage_type(0) >> (__clz - __dn)); - if ((*__first2.__seg_ & __m) != (*__first1.__seg_ & __m)) - { - return false; - } - ++__first2.__seg_; - ++__first1.__seg_; - // __first1.__ctz_ = 0; - // __first2.__ctz_ = 0; - } - // __first1.__ctz_ == 0; - // __first2.__ctz_ == 0; - // do middle words - for (; __n >= __bits_per_word; __n -= __bits_per_word, ++__first1.__seg_, ++__first2.__seg_) - { - if (*__first2.__seg_ != *__first1.__seg_) - { - return false; - } - } - // do last word + typedef __bit_iterator<_Cp, _IC1> _It; + typedef typename _It::difference_type difference_type; + typedef typename _It::__storage_type __storage_type; + static const int __bits_per_word = _It::__bits_per_word; + difference_type __n = __last1 - __first1; if (__n > 0) { - __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); - if ((*__first2.__seg_ & __m) != (*__first1.__seg_ & __m)) - { - return false; - } + // do first word + if (__first1.__ctz_ != 0) + { + unsigned __clz = __bits_per_word - __first1.__ctz_; + difference_type __dn = _CUDA_VSTD::min(static_cast(__clz), __n); + __n -= __dn; + __storage_type __m = (~__storage_type(0) << __first1.__ctz_) & (~__storage_type(0) >> (__clz - __dn)); + if ((*__first2.__seg_ & __m) != (*__first1.__seg_ & __m)) + return false; + ++__first2.__seg_; + ++__first1.__seg_; + // __first1.__ctz_ = 0; + // __first2.__ctz_ = 0; + } + // __first1.__ctz_ == 0; + // __first2.__ctz_ == 0; + // do middle words + for (; __n >= __bits_per_word; __n -= __bits_per_word, ++__first1.__seg_, ++__first2.__seg_) + if (*__first2.__seg_ != *__first1.__seg_) + return false; + // do last word + if (__n > 0) + { + __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); + if ((*__first2.__seg_ & __m) != (*__first1.__seg_ & __m)) + return false; + } } - } - return true; + return true; } template -inline _LIBCUDACXX_INLINE_VISIBILITY bool +inline _LIBCUDACXX_INLINE_VISIBILITY +bool equal(__bit_iterator<_Cp, _IC1> __first1, __bit_iterator<_Cp, _IC1> __last1, __bit_iterator<_Cp, _IC2> __first2) { - if (__first1.__ctz_ == __first2.__ctz_) - { - return __equal_aligned(__first1, __last1, __first2); - } - return __equal_unaligned(__first1, __last1, __first2); + if (__first1.__ctz_ == __first2.__ctz_) + return __equal_aligned(__first1, __last1, __first2); + return __equal_unaligned(__first1, __last1, __first2); } -template +template class __bit_iterator { public: - typedef typename _Cp::difference_type difference_type; - typedef bool value_type; - typedef __bit_iterator pointer; - typedef typename conditional<_IsConst, __bit_const_reference<_Cp>, __bit_reference<_Cp>>::type reference; - typedef random_access_iterator_tag iterator_category; + typedef typename _Cp::difference_type difference_type; + typedef bool value_type; + typedef __bit_iterator pointer; + typedef typename conditional<_IsConst, __bit_const_reference<_Cp>, __bit_reference<_Cp> >::type reference; + typedef random_access_iterator_tag iterator_category; private: - typedef typename _Cp::__storage_type __storage_type; - typedef typename conditional<_IsConst, typename _Cp::__const_storage_pointer, typename _Cp::__storage_pointer>::type - __storage_pointer; - static const unsigned __bits_per_word = _Cp::__bits_per_word; + typedef typename _Cp::__storage_type __storage_type; + typedef typename conditional<_IsConst, typename _Cp::__const_storage_pointer, + typename _Cp::__storage_pointer>::type __storage_pointer; + static const unsigned __bits_per_word = _Cp::__bits_per_word; - __storage_pointer __seg_; - unsigned __ctz_; + __storage_pointer __seg_; + unsigned __ctz_; public: - _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator() noexcept + _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator() noexcept #if _CCCL_STD_VER > 2011 - : __seg_(nullptr) - , __ctz_(0) + : __seg_(nullptr), __ctz_(0) #endif - {} - // avoid re-declaring a copy constructor for the non-const version. - using __type_for_copy_to_const = _If<_IsConst, __bit_iterator<_Cp, false>, struct __private_nat>; - - _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator(const __type_for_copy_to_const& __it) noexcept - : __seg_(__it.__seg_) - , __ctz_(__it.__ctz_) - {} - - _LIBCUDACXX_INLINE_VISIBILITY reference operator*() const noexcept - { - return reference(__seg_, __storage_type(1) << __ctz_); - } - - _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator& operator++() - { - if (__ctz_ != __bits_per_word - 1) + {} + // avoid re-declaring a copy constructor for the non-const version. + using __type_for_copy_to_const = + _If<_IsConst, __bit_iterator<_Cp, false>, struct __private_nat>; + + _LIBCUDACXX_INLINE_VISIBILITY + __bit_iterator(const __type_for_copy_to_const& __it) noexcept + : __seg_(__it.__seg_), __ctz_(__it.__ctz_) {} + + _LIBCUDACXX_INLINE_VISIBILITY reference operator*() const noexcept + {return reference(__seg_, __storage_type(1) << __ctz_);} + + _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator& operator++() + { + if (__ctz_ != __bits_per_word-1) + ++__ctz_; + else + { + __ctz_ = 0; + ++__seg_; + } + return *this; + } + + _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator operator++(int) { - ++__ctz_; + __bit_iterator __tmp = *this; + ++(*this); + return __tmp; } - else + + _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator& operator--() { - __ctz_ = 0; - ++__seg_; + if (__ctz_ != 0) + --__ctz_; + else + { + __ctz_ = __bits_per_word - 1; + --__seg_; + } + return *this; } - return *this; - } - - _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator operator++(int) - { - __bit_iterator __tmp = *this; - ++(*this); - return __tmp; - } - - _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator& operator--() - { - if (__ctz_ != 0) + + _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator operator--(int) { - --__ctz_; + __bit_iterator __tmp = *this; + --(*this); + return __tmp; } - else + + _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator& operator+=(difference_type __n) + { + if (__n >= 0) + __seg_ += (__n + __ctz_) / __bits_per_word; + else + __seg_ += static_cast(__n - __bits_per_word + __ctz_ + 1) + / static_cast(__bits_per_word); + __n &= (__bits_per_word - 1); + __ctz_ = static_cast((__n + __ctz_) % __bits_per_word); + return *this; + } + + _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator& operator-=(difference_type __n) { - __ctz_ = __bits_per_word - 1; - --__seg_; + return *this += -__n; } - return *this; - } - - _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator operator--(int) - { - __bit_iterator __tmp = *this; - --(*this); - return __tmp; - } - - _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator& operator+=(difference_type __n) - { - if (__n >= 0) + + _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator operator+(difference_type __n) const { - __seg_ += (__n + __ctz_) / __bits_per_word; + __bit_iterator __t(*this); + __t += __n; + return __t; } - else + + _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator operator-(difference_type __n) const { - __seg_ += static_cast(__n - __bits_per_word + __ctz_ + 1) - / static_cast(__bits_per_word); + __bit_iterator __t(*this); + __t -= __n; + return __t; } - __n &= (__bits_per_word - 1); - __ctz_ = static_cast((__n + __ctz_) % __bits_per_word); - return *this; - } - - _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator& operator-=(difference_type __n) - { - return *this += -__n; - } - - _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator operator+(difference_type __n) const - { - __bit_iterator __t(*this); - __t += __n; - return __t; - } - - _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator operator-(difference_type __n) const - { - __bit_iterator __t(*this); - __t -= __n; - return __t; - } - - _LIBCUDACXX_INLINE_VISIBILITY friend __bit_iterator operator+(difference_type __n, const __bit_iterator& __it) - { - return __it + __n; - } - - _LIBCUDACXX_INLINE_VISIBILITY friend difference_type operator-(const __bit_iterator& __x, const __bit_iterator& __y) - { - return (__x.__seg_ - __y.__seg_) * __bits_per_word + __x.__ctz_ - __y.__ctz_; - } - - _LIBCUDACXX_INLINE_VISIBILITY reference operator[](difference_type __n) const - { - return *(*this + __n); - } - - _LIBCUDACXX_INLINE_VISIBILITY friend bool operator==(const __bit_iterator& __x, const __bit_iterator& __y) - { - return __x.__seg_ == __y.__seg_ && __x.__ctz_ == __y.__ctz_; - } - - _LIBCUDACXX_INLINE_VISIBILITY friend bool operator!=(const __bit_iterator& __x, const __bit_iterator& __y) - { - return !(__x == __y); - } - - _LIBCUDACXX_INLINE_VISIBILITY friend bool operator<(const __bit_iterator& __x, const __bit_iterator& __y) - { - return __x.__seg_ < __y.__seg_ || (__x.__seg_ == __y.__seg_ && __x.__ctz_ < __y.__ctz_); - } - - _LIBCUDACXX_INLINE_VISIBILITY friend bool operator>(const __bit_iterator& __x, const __bit_iterator& __y) - { - return __y < __x; - } - - _LIBCUDACXX_INLINE_VISIBILITY friend bool operator<=(const __bit_iterator& __x, const __bit_iterator& __y) - { - return !(__y < __x); - } - - _LIBCUDACXX_INLINE_VISIBILITY friend bool operator>=(const __bit_iterator& __x, const __bit_iterator& __y) - { - return !(__x < __y); - } + + _LIBCUDACXX_INLINE_VISIBILITY + friend __bit_iterator operator+(difference_type __n, const __bit_iterator& __it) {return __it + __n;} + + _LIBCUDACXX_INLINE_VISIBILITY + friend difference_type operator-(const __bit_iterator& __x, const __bit_iterator& __y) + {return (__x.__seg_ - __y.__seg_) * __bits_per_word + __x.__ctz_ - __y.__ctz_;} + + _LIBCUDACXX_INLINE_VISIBILITY reference operator[](difference_type __n) const {return *(*this + __n);} + + _LIBCUDACXX_INLINE_VISIBILITY friend bool operator==(const __bit_iterator& __x, const __bit_iterator& __y) + {return __x.__seg_ == __y.__seg_ && __x.__ctz_ == __y.__ctz_;} + + _LIBCUDACXX_INLINE_VISIBILITY friend bool operator!=(const __bit_iterator& __x, const __bit_iterator& __y) + {return !(__x == __y);} + + _LIBCUDACXX_INLINE_VISIBILITY friend bool operator<(const __bit_iterator& __x, const __bit_iterator& __y) + {return __x.__seg_ < __y.__seg_ || (__x.__seg_ == __y.__seg_ && __x.__ctz_ < __y.__ctz_);} + + _LIBCUDACXX_INLINE_VISIBILITY friend bool operator>(const __bit_iterator& __x, const __bit_iterator& __y) + {return __y < __x;} + + _LIBCUDACXX_INLINE_VISIBILITY friend bool operator<=(const __bit_iterator& __x, const __bit_iterator& __y) + {return !(__y < __x);} + + _LIBCUDACXX_INLINE_VISIBILITY friend bool operator>=(const __bit_iterator& __x, const __bit_iterator& __y) + {return !(__x < __y);} private: - _LIBCUDACXX_INLINE_VISIBILITY __bit_iterator(__storage_pointer __s, unsigned __ctz) noexcept - : __seg_(__s) - , __ctz_(__ctz) - {} - - friend typename _Cp::__self; - - friend class __bit_reference<_Cp>; - friend class __bit_const_reference<_Cp>; - friend class __bit_iterator<_Cp, true>; - template - friend struct __bit_array; - template - friend void __fill_n_false(__bit_iterator<_Dp, false> __first, typename _Dp::size_type __n); - template - friend void __fill_n_true(__bit_iterator<_Dp, false> __first, typename _Dp::size_type __n); - template - friend __bit_iterator<_Dp, false> __copy_aligned( - __bit_iterator<_Dp, _IC> __first, __bit_iterator<_Dp, _IC> __last, __bit_iterator<_Dp, false> __result); - template - friend __bit_iterator<_Dp, false> __copy_unaligned( - __bit_iterator<_Dp, _IC> __first, __bit_iterator<_Dp, _IC> __last, __bit_iterator<_Dp, false> __result); - template - friend __bit_iterator<_Dp, false> - copy(__bit_iterator<_Dp, _IC> __first, __bit_iterator<_Dp, _IC> __last, __bit_iterator<_Dp, false> __result); - template - friend __bit_iterator<_Dp, false> __copy_backward_aligned( - __bit_iterator<_Dp, _IC> __first, __bit_iterator<_Dp, _IC> __last, __bit_iterator<_Dp, false> __result); - template - friend __bit_iterator<_Dp, false> __copy_backward_unaligned( - __bit_iterator<_Dp, _IC> __first, __bit_iterator<_Dp, _IC> __last, __bit_iterator<_Dp, false> __result); - template - friend __bit_iterator<_Dp, false> - copy_backward(__bit_iterator<_Dp, _IC> __first, __bit_iterator<_Dp, _IC> __last, __bit_iterator<_Dp, false> __result); - template - friend __bit_iterator<__C2, false> - __swap_ranges_aligned(__bit_iterator<__C1, false>, __bit_iterator<__C1, false>, __bit_iterator<__C2, false>); - template - friend __bit_iterator<__C2, false> - __swap_ranges_unaligned(__bit_iterator<__C1, false>, __bit_iterator<__C1, false>, __bit_iterator<__C2, false>); - template - friend __bit_iterator<__C2, false> - swap_ranges(__bit_iterator<__C1, false>, __bit_iterator<__C1, false>, __bit_iterator<__C2, false>); - template - friend __bit_iterator<_Dp, false> - rotate(__bit_iterator<_Dp, false>, __bit_iterator<_Dp, false>, __bit_iterator<_Dp, false>); - template - friend bool __equal_aligned(__bit_iterator<_Dp, _IC1>, __bit_iterator<_Dp, _IC1>, __bit_iterator<_Dp, _IC2>); - template - friend bool __equal_unaligned(__bit_iterator<_Dp, _IC1>, __bit_iterator<_Dp, _IC1>, __bit_iterator<_Dp, _IC2>); - template - friend bool equal(__bit_iterator<_Dp, _IC1>, __bit_iterator<_Dp, _IC1>, __bit_iterator<_Dp, _IC2>); - template - friend __bit_iterator<_Dp, _IC> __find_bool_true(__bit_iterator<_Dp, _IC>, typename _Dp::size_type); - template - friend __bit_iterator<_Dp, _IC> __find_bool_false(__bit_iterator<_Dp, _IC>, typename _Dp::size_type); - template - friend typename __bit_iterator<_Dp, _IC>::difference_type - __count_bool_true(__bit_iterator<_Dp, _IC>, typename _Dp::size_type); - template - friend typename __bit_iterator<_Dp, _IC>::difference_type - __count_bool_false(__bit_iterator<_Dp, _IC>, typename _Dp::size_type); + _LIBCUDACXX_INLINE_VISIBILITY + __bit_iterator(__storage_pointer __s, unsigned __ctz) noexcept + : __seg_(__s), __ctz_(__ctz) {} + + friend typename _Cp::__self; + + friend class __bit_reference<_Cp>; + friend class __bit_const_reference<_Cp>; + friend class __bit_iterator<_Cp, true>; + template friend struct __bit_array; + template friend void __fill_n_false(__bit_iterator<_Dp, false> __first, typename _Dp::size_type __n); + template friend void __fill_n_true(__bit_iterator<_Dp, false> __first, typename _Dp::size_type __n); + template friend __bit_iterator<_Dp, false> __copy_aligned(__bit_iterator<_Dp, _IC> __first, + __bit_iterator<_Dp, _IC> __last, + __bit_iterator<_Dp, false> __result); + template friend __bit_iterator<_Dp, false> __copy_unaligned(__bit_iterator<_Dp, _IC> __first, + __bit_iterator<_Dp, _IC> __last, + __bit_iterator<_Dp, false> __result); + template friend __bit_iterator<_Dp, false> copy(__bit_iterator<_Dp, _IC> __first, + __bit_iterator<_Dp, _IC> __last, + __bit_iterator<_Dp, false> __result); + template friend __bit_iterator<_Dp, false> __copy_backward_aligned(__bit_iterator<_Dp, _IC> __first, + __bit_iterator<_Dp, _IC> __last, + __bit_iterator<_Dp, false> __result); + template friend __bit_iterator<_Dp, false> __copy_backward_unaligned(__bit_iterator<_Dp, _IC> __first, + __bit_iterator<_Dp, _IC> __last, + __bit_iterator<_Dp, false> __result); + template friend __bit_iterator<_Dp, false> copy_backward(__bit_iterator<_Dp, _IC> __first, + __bit_iterator<_Dp, _IC> __last, + __bit_iterator<_Dp, false> __result); + template friend __bit_iterator<__C2, false> __swap_ranges_aligned(__bit_iterator<__C1, false>, + __bit_iterator<__C1, false>, + __bit_iterator<__C2, false>); + template friend __bit_iterator<__C2, false> __swap_ranges_unaligned(__bit_iterator<__C1, false>, + __bit_iterator<__C1, false>, + __bit_iterator<__C2, false>); + template friend __bit_iterator<__C2, false> swap_ranges(__bit_iterator<__C1, false>, + __bit_iterator<__C1, false>, + __bit_iterator<__C2, false>); + template friend __bit_iterator<_Dp, false> rotate(__bit_iterator<_Dp, false>, + __bit_iterator<_Dp, false>, + __bit_iterator<_Dp, false>); + template friend bool __equal_aligned(__bit_iterator<_Dp, _IC1>, + __bit_iterator<_Dp, _IC1>, + __bit_iterator<_Dp, _IC2>); + template friend bool __equal_unaligned(__bit_iterator<_Dp, _IC1>, + __bit_iterator<_Dp, _IC1>, + __bit_iterator<_Dp, _IC2>); + template friend bool equal(__bit_iterator<_Dp, _IC1>, + __bit_iterator<_Dp, _IC1>, + __bit_iterator<_Dp, _IC2>); + template friend __bit_iterator<_Dp, _IC> __find_bool_true(__bit_iterator<_Dp, _IC>, + typename _Dp::size_type); + template friend __bit_iterator<_Dp, _IC> __find_bool_false(__bit_iterator<_Dp, _IC>, + typename _Dp::size_type); + template friend typename __bit_iterator<_Dp, _IC>::difference_type + __count_bool_true(__bit_iterator<_Dp, _IC>, typename _Dp::size_type); + template friend typename __bit_iterator<_Dp, _IC>::difference_type + __count_bool_false(__bit_iterator<_Dp, _IC>, typename _Dp::size_type); }; _LIBCUDACXX_END_NAMESPACE_STD _LIBCUDACXX_POP_MACROS -#endif // _LIBCUDACXX___BIT_REFERENCE +#endif // _LIBCUDACXX___BIT_REFERENCE diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/__config b/libcudacxx/include/cuda/std/detail/libcxx/include/__config index 274d7e020b4..b7e8bcc3118 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/__config +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/__config @@ -43,13 +43,13 @@ #endif #if defined(_CCCL_COMPILER_MSVC) -# if _MSC_VER < 1917 -# define _LIBCUDACXX_COMPILER_MSVC_2017 -# elif _MSC_VER < 1930 -# define _LIBCUDACXX_COMPILER_MSVC_2019 -# else -# define _LIBCUDACXX_COMPILER_MSVC_2022 -# endif +#if _MSC_VER < 1917 +#define _LIBCUDACXX_COMPILER_MSVC_2017 +#elif _MSC_VER < 1930 +#define _LIBCUDACXX_COMPILER_MSVC_2019 +#else +#define _LIBCUDACXX_COMPILER_MSVC_2022 +#endif #endif // defined(_LIBCUDACXX_COMPILER_MSVC) #if defined(_CCCL_CUDA_COMPILER_NVCC) @@ -80,368 +80,372 @@ // __config may be included in `extern "C"` contexts, switch back to include extern "C++" { -# include +#include } -# ifdef __GNUC__ -# define _GNUC_VER (__GNUC__ * 100 + __GNUC_MINOR__) -# else -# define _GNUC_VER 0 -# endif +#ifdef __GNUC__ +# define _GNUC_VER (__GNUC__ * 100 + __GNUC_MINOR__) +#else +# define _GNUC_VER 0 +#endif -# define _LIBCUDACXX_VERSION 10000 +#define _LIBCUDACXX_VERSION 10000 -# ifndef _LIBCUDACXX_ABI_VERSION -# define _LIBCUDACXX_ABI_VERSION 1 -# endif +#ifndef _LIBCUDACXX_ABI_VERSION +# define _LIBCUDACXX_ABI_VERSION 1 +#endif -# define _LIBCUDACXX_STD_VER _CCCL_STD_VER +#define _LIBCUDACXX_STD_VER _CCCL_STD_VER -# if _CCCL_STD_VER < 2011 -# error libcu++ requires C++11 or later -# endif +#if _CCCL_STD_VER < 2011 +# error libcu++ requires C++11 or later +#endif -# if (defined(_CCCL_COMPILER_NVHPC) && defined(__linux__)) || defined(_CCCL_COMPILER_NVRTC) -# define __ELF__ -# endif +#if (defined(_CCCL_COMPILER_NVHPC) && defined(__linux__)) \ + || defined(_CCCL_COMPILER_NVRTC) + #define __ELF__ +#endif -# if defined(__ELF__) -# define _LIBCUDACXX_OBJECT_FORMAT_ELF 1 -# elif defined(__MACH__) -# define _LIBCUDACXX_OBJECT_FORMAT_MACHO 1 -# elif defined(_WIN32) -# define _LIBCUDACXX_OBJECT_FORMAT_COFF 1 -# elif defined(__wasm__) -# define _LIBCUDACXX_OBJECT_FORMAT_WASM 1 -# else -# error Unknown object file format -# endif +#if defined(__ELF__) +# define _LIBCUDACXX_OBJECT_FORMAT_ELF 1 +#elif defined(__MACH__) +# define _LIBCUDACXX_OBJECT_FORMAT_MACHO 1 +#elif defined(_WIN32) +# define _LIBCUDACXX_OBJECT_FORMAT_COFF 1 +#elif defined(__wasm__) +# define _LIBCUDACXX_OBJECT_FORMAT_WASM 1 +#else +# error Unknown object file format +#endif -# if defined(_LIBCUDACXX_ABI_UNSTABLE) || _LIBCUDACXX_ABI_VERSION >= 2 || defined(__cuda_std__) +#if defined(_LIBCUDACXX_ABI_UNSTABLE) || _LIBCUDACXX_ABI_VERSION >= 2 || defined(__cuda_std__) // Change short string representation so that string data starts at offset 0, // improving its alignment in some cases. -# define _LIBCUDACXX_ABI_ALTERNATE_STRING_LAYOUT +# define _LIBCUDACXX_ABI_ALTERNATE_STRING_LAYOUT // Fix deque iterator type in order to support incomplete types. -# define _LIBCUDACXX_ABI_INCOMPLETE_TYPES_IN_DEQUE +# define _LIBCUDACXX_ABI_INCOMPLETE_TYPES_IN_DEQUE // Fix undefined behavior in how std::list stores its linked nodes. -# define _LIBCUDACXX_ABI_LIST_REMOVE_NODE_POINTER_UB +# define _LIBCUDACXX_ABI_LIST_REMOVE_NODE_POINTER_UB // Fix undefined behavior in how __tree stores its end and parent nodes. -# define _LIBCUDACXX_ABI_TREE_REMOVE_NODE_POINTER_UB +# define _LIBCUDACXX_ABI_TREE_REMOVE_NODE_POINTER_UB // Fix undefined behavior in how __hash_table stores its pointer types. -# define _LIBCUDACXX_ABI_FIX_UNORDERED_NODE_POINTER_UB -# define _LIBCUDACXX_ABI_FORWARD_LIST_REMOVE_NODE_POINTER_UB -# define _LIBCUDACXX_ABI_FIX_UNORDERED_CONTAINER_SIZE_TYPE +# define _LIBCUDACXX_ABI_FIX_UNORDERED_NODE_POINTER_UB +# define _LIBCUDACXX_ABI_FORWARD_LIST_REMOVE_NODE_POINTER_UB +# define _LIBCUDACXX_ABI_FIX_UNORDERED_CONTAINER_SIZE_TYPE // Don't use a nullptr_t simulation type in C++03 instead using C++11 nullptr // provided under the alternate keyword __nullptr, which changes the mangling // of nullptr_t. This option is ABI incompatible with GCC in C++03 mode. -# define _LIBCUDACXX_ABI_ALWAYS_USE_CXX11_NULLPTR +# define _LIBCUDACXX_ABI_ALWAYS_USE_CXX11_NULLPTR // Define the `pointer_safety` enum as a C++11 strongly typed enumeration // instead of as a class simulating an enum. If this option is enabled // `pointer_safety` and `get_pointer_safety()` will no longer be available // in C++03. -# define _LIBCUDACXX_ABI_POINTER_SAFETY_ENUM_TYPE +# define _LIBCUDACXX_ABI_POINTER_SAFETY_ENUM_TYPE // Define a key function for `bad_function_call` in the library, to centralize // its vtable and typeinfo to libc++ rather than having all other libraries // using that class define their own copies. -# define _LIBCUDACXX_ABI_BAD_FUNCTION_CALL_KEY_FUNCTION +# define _LIBCUDACXX_ABI_BAD_FUNCTION_CALL_KEY_FUNCTION // Enable optimized version of __do_get_(un)signed which avoids redundant copies. -# define _LIBCUDACXX_ABI_OPTIMIZED_LOCALE_NUM_GET +# define _LIBCUDACXX_ABI_OPTIMIZED_LOCALE_NUM_GET // Use the smallest possible integer type to represent the index of the variant. // Previously libc++ used "unsigned int" exclusively. -# define _LIBCUDACXX_ABI_VARIANT_INDEX_TYPE_OPTIMIZATION +# define _LIBCUDACXX_ABI_VARIANT_INDEX_TYPE_OPTIMIZATION // Unstable attempt to provide a more optimized std::function -# define _LIBCUDACXX_ABI_OPTIMIZED_FUNCTION +# define _LIBCUDACXX_ABI_OPTIMIZED_FUNCTION // All the regex constants must be distinct and nonzero. -# define _LIBCUDACXX_ABI_REGEX_CONSTANTS_NONZERO -# elif _LIBCUDACXX_ABI_VERSION == 1 -# if !defined(_LIBCUDACXX_OBJECT_FORMAT_COFF) +# define _LIBCUDACXX_ABI_REGEX_CONSTANTS_NONZERO +#elif _LIBCUDACXX_ABI_VERSION == 1 +# if !defined(_LIBCUDACXX_OBJECT_FORMAT_COFF) // Enable compiling copies of now inline methods into the dylib to support // applications compiled against older libraries. This is unnecessary with // COFF dllexport semantics, since dllexport forces a non-inline definition // of inline functions to be emitted anyway. Our own non-inline copy would // conflict with the dllexport-emitted copy, so we disable it. -# define _LIBCUDACXX_DEPRECATED_ABI_LEGACY_LIBRARY_DEFINITIONS_FOR_INLINE_FUNCTIONS -# endif +# define _LIBCUDACXX_DEPRECATED_ABI_LEGACY_LIBRARY_DEFINITIONS_FOR_INLINE_FUNCTIONS # endif +#endif -# ifndef __has_attribute -# define __has_attribute(__x) 0 -# endif +#ifndef __has_attribute +#define __has_attribute(__x) 0 +#endif -# ifndef __has_builtin -# define __has_builtin(__x) 0 -# endif +#ifndef __has_builtin +#define __has_builtin(__x) 0 +#endif -# ifndef __has_extension -# define __has_extension(__x) 0 -# endif +#ifndef __has_extension +#define __has_extension(__x) 0 +#endif -# ifndef __has_feature -# define __has_feature(__x) 0 -# endif +#ifndef __has_feature +#define __has_feature(__x) 0 +#endif -# ifndef __has_cpp_attribute -# define __has_cpp_attribute(__x) 0 -# endif +#ifndef __has_cpp_attribute +#define __has_cpp_attribute(__x) 0 +#endif // '__is_identifier' returns '0' if '__x' is a reserved identifier provided by // the compiler and '1' otherwise. -# ifndef __is_identifier -# define __is_identifier(__x) 1 -# endif +#ifndef __is_identifier +#define __is_identifier(__x) 1 +#endif -# ifndef __has_declspec_attribute -# define __has_declspec_attribute(__x) 0 -# endif +#ifndef __has_declspec_attribute +#define __has_declspec_attribute(__x) 0 +#endif -# define __has_keyword(__x) !(__is_identifier(__x)) +#define __has_keyword(__x) !(__is_identifier(__x)) -# ifndef __has_include -# define __has_include(...) 0 -# endif +#ifndef __has_include +#define __has_include(...) 0 +#endif -# if !defined(_CCCL_CUDA_COMPILER_NVCC) && !defined(_CCCL_COMPILER_NVRTC) +#if !defined(_CCCL_CUDA_COMPILER_NVCC) && !defined(_CCCL_COMPILER_NVRTC) // If NVCC is not being used can safely use `long double` without warnings -# define _LIBCUDACXX_HAS_COMPLEX_LONG_DOUBLE +# define _LIBCUDACXX_HAS_COMPLEX_LONG_DOUBLE // NVCC does not have a way of silencing non '_' prefixed UDLs -# define _LIBCUDACXX_HAS_STL_LITERALS -# endif +# define _LIBCUDACXX_HAS_STL_LITERALS +#endif -# if defined(_CCCL_COMPILER_GCC) && __cplusplus < 201103L -# error "libc++ does not support using GCC with C++03. Please enable C++11" -# endif +#if defined(_CCCL_COMPILER_GCC) && __cplusplus < 201103L +#error "libc++ does not support using GCC with C++03. Please enable C++11" +#endif // FIXME: ABI detection should be done via compiler builtin macros. This // is just a placeholder until Clang implements such macros. For now assume // that Windows compilers pretending to be MSVC++ target the Microsoft ABI, // and allow the user to explicitly specify the ABI to handle cases where this // heuristic falls short. -# if defined(_LIBCUDACXX_ABI_FORCE_ITANIUM) && defined(_LIBCUDACXX_ABI_FORCE_MICROSOFT) -# error "Only one of _LIBCUDACXX_ABI_FORCE_ITANIUM and _LIBCUDACXX_ABI_FORCE_MICROSOFT can be defined" -# elif defined(_LIBCUDACXX_ABI_FORCE_ITANIUM) -# define _LIBCUDACXX_ABI_ITANIUM -# elif defined(_LIBCUDACXX_ABI_FORCE_MICROSOFT) +#if defined(_LIBCUDACXX_ABI_FORCE_ITANIUM) && defined(_LIBCUDACXX_ABI_FORCE_MICROSOFT) +# error "Only one of _LIBCUDACXX_ABI_FORCE_ITANIUM and _LIBCUDACXX_ABI_FORCE_MICROSOFT can be defined" +#elif defined(_LIBCUDACXX_ABI_FORCE_ITANIUM) +# define _LIBCUDACXX_ABI_ITANIUM +#elif defined(_LIBCUDACXX_ABI_FORCE_MICROSOFT) +# define _LIBCUDACXX_ABI_MICROSOFT +#else +# if defined(_WIN32) && defined(_CCCL_COMPILER_MSVC) # define _LIBCUDACXX_ABI_MICROSOFT # else -# if defined(_WIN32) && defined(_CCCL_COMPILER_MSVC) -# define _LIBCUDACXX_ABI_MICROSOFT -# else -# define _LIBCUDACXX_ABI_ITANIUM -# endif +# define _LIBCUDACXX_ABI_ITANIUM # endif +#endif -# if defined(_LIBCUDACXX_ABI_MICROSOFT) && !defined(_LIBCUDACXX_NO_VCRUNTIME) -# define _LIBCUDACXX_ABI_VCRUNTIME -# endif +#if defined(_LIBCUDACXX_ABI_MICROSOFT) && !defined(_LIBCUDACXX_NO_VCRUNTIME) +# define _LIBCUDACXX_ABI_VCRUNTIME +#endif // Need to detect which libc we're using if we're on Linux. -# if defined(__linux__) -# include -# if defined(__GLIBC_PREREQ) -# define _LIBCUDACXX_GLIBC_PREREQ(a, b) __GLIBC_PREREQ(a, b) -# else -# define _LIBCUDACXX_GLIBC_PREREQ(a, b) 0 -# endif // defined(__GLIBC_PREREQ) -# endif // defined(__linux__) - -# ifdef __LITTLE_ENDIAN__ -# if __LITTLE_ENDIAN__ -# define _LIBCUDACXX_LITTLE_ENDIAN -# endif // __LITTLE_ENDIAN__ -# endif // __LITTLE_ENDIAN__ - -# ifdef __BIG_ENDIAN__ -# if __BIG_ENDIAN__ -# define _LIBCUDACXX_BIG_ENDIAN -# endif // __BIG_ENDIAN__ -# endif // __BIG_ENDIAN__ - -# ifdef __BYTE_ORDER__ -# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ -# define _LIBCUDACXX_LITTLE_ENDIAN -# elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -# define _LIBCUDACXX_BIG_ENDIAN -# endif // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -# endif // __BYTE_ORDER__ - -# ifdef __FreeBSD__ -# include -# if _BYTE_ORDER == _LITTLE_ENDIAN -# define _LIBCUDACXX_LITTLE_ENDIAN -# else // _BYTE_ORDER == _LITTLE_ENDIAN -# define _LIBCUDACXX_BIG_ENDIAN -# endif // _BYTE_ORDER == _LITTLE_ENDIAN -# ifndef __LONG_LONG_SUPPORTED -# define _LIBCUDACXX_HAS_NO_LONG_LONG -# endif // __LONG_LONG_SUPPORTED -# endif // __FreeBSD__ - -# ifdef __NetBSD__ -# include -# if _BYTE_ORDER == _LITTLE_ENDIAN -# define _LIBCUDACXX_LITTLE_ENDIAN -# else // _BYTE_ORDER == _LITTLE_ENDIAN -# define _LIBCUDACXX_BIG_ENDIAN -# endif // _BYTE_ORDER == _LITTLE_ENDIAN -# define _LIBCUDACXX_HAS_QUICK_EXIT -# endif // __NetBSD__ +#if defined(__linux__) +# include +# if defined(__GLIBC_PREREQ) +# define _LIBCUDACXX_GLIBC_PREREQ(a, b) __GLIBC_PREREQ(a, b) +# else +# define _LIBCUDACXX_GLIBC_PREREQ(a, b) 0 +# endif // defined(__GLIBC_PREREQ) +#endif // defined(__linux__) + +#ifdef __LITTLE_ENDIAN__ +# if __LITTLE_ENDIAN__ +# define _LIBCUDACXX_LITTLE_ENDIAN +# endif // __LITTLE_ENDIAN__ +#endif // __LITTLE_ENDIAN__ -# if defined(_WIN32) -# define _LIBCUDACXX_WIN32API +#ifdef __BIG_ENDIAN__ +# if __BIG_ENDIAN__ +# define _LIBCUDACXX_BIG_ENDIAN +# endif // __BIG_ENDIAN__ +#endif // __BIG_ENDIAN__ + +#ifdef __BYTE_ORDER__ +# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +# define _LIBCUDACXX_LITTLE_ENDIAN +# elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +# define _LIBCUDACXX_BIG_ENDIAN +# endif // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +#endif // __BYTE_ORDER__ + +#ifdef __FreeBSD__ +# include +# if _BYTE_ORDER == _LITTLE_ENDIAN # define _LIBCUDACXX_LITTLE_ENDIAN -# define _LIBCUDACXX_SHORT_WCHAR 1 +# else // _BYTE_ORDER == _LITTLE_ENDIAN +# define _LIBCUDACXX_BIG_ENDIAN +# endif // _BYTE_ORDER == _LITTLE_ENDIAN +# ifndef __LONG_LONG_SUPPORTED +# define _LIBCUDACXX_HAS_NO_LONG_LONG +# endif // __LONG_LONG_SUPPORTED +#endif // __FreeBSD__ + +#ifdef __NetBSD__ +# include +# if _BYTE_ORDER == _LITTLE_ENDIAN +# define _LIBCUDACXX_LITTLE_ENDIAN +# else // _BYTE_ORDER == _LITTLE_ENDIAN +# define _LIBCUDACXX_BIG_ENDIAN +# endif // _BYTE_ORDER == _LITTLE_ENDIAN +# define _LIBCUDACXX_HAS_QUICK_EXIT +#endif // __NetBSD__ + +#if defined(_WIN32) +# define _LIBCUDACXX_WIN32API +# define _LIBCUDACXX_LITTLE_ENDIAN +# define _LIBCUDACXX_SHORT_WCHAR 1 // Both MinGW and native MSVC provide a "MSVC"-like environment -# define _LIBCUDACXX_MSVCRT_LIKE +# define _LIBCUDACXX_MSVCRT_LIKE // If mingw not explicitly detected, assume using MS C runtime only if // a MS compatibility version is specified. -# if defined(_CCCL_COMPILER_MSVC) && !defined(__MINGW32__) -# define _LIBCUDACXX_MSVCRT // Using Microsoft's C Runtime library -# endif -# if (defined(_M_AMD64) || defined(__x86_64__)) || (defined(_M_ARM) || defined(__arm__)) -# define _LIBCUDACXX_HAS_BITSCAN64 -# endif -# define _LIBCUDACXX_HAS_OPEN_WITH_WCHAR -# if defined(_LIBCUDACXX_MSVCRT) -# define _LIBCUDACXX_HAS_QUICK_EXIT -# endif +# if defined(_CCCL_COMPILER_MSVC) && !defined(__MINGW32__) +# define _LIBCUDACXX_MSVCRT // Using Microsoft's C Runtime library +# endif +# if (defined(_M_AMD64) || defined(__x86_64__)) || (defined(_M_ARM) || defined(__arm__)) +# define _LIBCUDACXX_HAS_BITSCAN64 +# endif +# define _LIBCUDACXX_HAS_OPEN_WITH_WCHAR +# if defined(_LIBCUDACXX_MSVCRT) +# define _LIBCUDACXX_HAS_QUICK_EXIT +# endif // Some CRT APIs are unavailable to store apps -# if defined(WINAPI_FAMILY) -# include -# if !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) \ - && (!defined(WINAPI_PARTITION_SYSTEM) || !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_SYSTEM)) -# define _LIBCUDACXX_WINDOWS_STORE_APP -# endif +# if defined(WINAPI_FAMILY) +# include +# if !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) && \ + (!defined(WINAPI_PARTITION_SYSTEM) || \ + !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_SYSTEM)) +# define _LIBCUDACXX_WINDOWS_STORE_APP # endif -# endif // defined(_WIN32) +# endif +#endif // defined(_WIN32) -# ifdef __sun__ -# include -# ifdef _LITTLE_ENDIAN -# define _LIBCUDACXX_LITTLE_ENDIAN -# else -# define _LIBCUDACXX_BIG_ENDIAN -# endif -# endif // __sun__ - -# if defined(__CloudABI__) -// Certain architectures provide arc4random(). Prefer using -// arc4random() over /dev/{u,}random to make it possible to obtain -// random data even when using sandboxing mechanisms such as chroots, -// Capsicum, etc. -# define _LIBCUDACXX_USING_ARC4_RANDOM -# elif defined(__Fuchsia__) || defined(__wasi__) -# define _LIBCUDACXX_USING_GETENTROPY -# elif defined(__native_client__) -// NaCl's sandbox (which PNaCl also runs in) doesn't allow filesystem access, -// including accesses to the special files under /dev. C++11's -// std::random_device is instead exposed through a NaCl syscall. -# define _LIBCUDACXX_USING_NACL_RANDOM -# elif defined(_LIBCUDACXX_WIN32API) -# define _LIBCUDACXX_USING_WIN32_RANDOM +#ifdef __sun__ +# include +# ifdef _LITTLE_ENDIAN +# define _LIBCUDACXX_LITTLE_ENDIAN # else -# define _LIBCUDACXX_USING_DEV_RANDOM -# endif +# define _LIBCUDACXX_BIG_ENDIAN +# endif +#endif // __sun__ + +#if defined(__CloudABI__) + // Certain architectures provide arc4random(). Prefer using + // arc4random() over /dev/{u,}random to make it possible to obtain + // random data even when using sandboxing mechanisms such as chroots, + // Capsicum, etc. +# define _LIBCUDACXX_USING_ARC4_RANDOM +#elif defined(__Fuchsia__) || defined(__wasi__) +# define _LIBCUDACXX_USING_GETENTROPY +#elif defined(__native_client__) + // NaCl's sandbox (which PNaCl also runs in) doesn't allow filesystem access, + // including accesses to the special files under /dev. C++11's + // std::random_device is instead exposed through a NaCl syscall. +# define _LIBCUDACXX_USING_NACL_RANDOM +#elif defined(_LIBCUDACXX_WIN32API) +# define _LIBCUDACXX_USING_WIN32_RANDOM +#else +# define _LIBCUDACXX_USING_DEV_RANDOM +#endif -# ifndef _LIBCUDACXX_LITTLE_ENDIAN -# if defined(_CCCL_COMPILER_NVRTC) -# define _LIBCUDACXX_LITTLE_ENDIAN -# endif -# endif // _LIBCUDACXX_LITTLE_ENDIAN - -# if !defined(_LIBCUDACXX_LITTLE_ENDIAN) && !defined(_LIBCUDACXX_BIG_ENDIAN) -# include -# if __BYTE_ORDER == __LITTLE_ENDIAN -# define _LIBCUDACXX_LITTLE_ENDIAN -# elif __BYTE_ORDER == __BIG_ENDIAN -# define _LIBCUDACXX_BIG_ENDIAN -# else // __BYTE_ORDER == __BIG_ENDIAN -# error unable to determine endian -# endif -# endif // !defined(_LIBCUDACXX_LITTLE_ENDIAN) && !defined(_LIBCUDACXX_BIG_ENDIAN) +#ifndef _LIBCUDACXX_LITTLE_ENDIAN +#if defined(_CCCL_COMPILER_NVRTC) +# define _LIBCUDACXX_LITTLE_ENDIAN +#endif +#endif // _LIBCUDACXX_LITTLE_ENDIAN -# if __has_attribute(__no_sanitize__) && !defined(_CCCL_COMPILER_GCC) -# define _LIBCUDACXX_NO_CFI __attribute__((__no_sanitize__("cfi"))) -# else -# define _LIBCUDACXX_NO_CFI +#if !defined(_LIBCUDACXX_LITTLE_ENDIAN) && !defined(_LIBCUDACXX_BIG_ENDIAN) +# include +# if __BYTE_ORDER == __LITTLE_ENDIAN +# define _LIBCUDACXX_LITTLE_ENDIAN +# elif __BYTE_ORDER == __BIG_ENDIAN +# define _LIBCUDACXX_BIG_ENDIAN +# else // __BYTE_ORDER == __BIG_ENDIAN +# error unable to determine endian # endif +#endif // !defined(_LIBCUDACXX_LITTLE_ENDIAN) && !defined(_LIBCUDACXX_BIG_ENDIAN) -# if (defined(__ISO_C_VISIBLE) && __ISO_C_VISIBLE >= 2011) || __cplusplus >= 201103L -# if defined(__FreeBSD__) -# define _LIBCUDACXX_HAS_QUICK_EXIT -# define _LIBCUDACXX_HAS_C11_FEATURES -# elif defined(__Fuchsia__) || defined(__wasi__) -# define _LIBCUDACXX_HAS_QUICK_EXIT -# define _LIBCUDACXX_HAS_TIMESPEC_GET -# define _LIBCUDACXX_HAS_C11_FEATURES -# elif defined(__linux__) -# if !defined(_LIBCUDACXX_HAS_MUSL_LIBC) -# if _LIBCUDACXX_GLIBC_PREREQ(2, 15) || defined(__BIONIC__) -# define _LIBCUDACXX_HAS_QUICK_EXIT -# endif -# if _LIBCUDACXX_GLIBC_PREREQ(2, 17) -# define _LIBCUDACXX_HAS_C11_FEATURES -# define _LIBCUDACXX_HAS_TIMESPEC_GET -# endif -# else // defined(_LIBCUDACXX_HAS_MUSL_LIBC) +#if __has_attribute(__no_sanitize__) && !defined(_CCCL_COMPILER_GCC) +# define _LIBCUDACXX_NO_CFI __attribute__((__no_sanitize__("cfi"))) +#else +# define _LIBCUDACXX_NO_CFI +#endif + +#if (defined(__ISO_C_VISIBLE) && __ISO_C_VISIBLE >= 2011) || __cplusplus >= 201103L +# if defined(__FreeBSD__) +# define _LIBCUDACXX_HAS_QUICK_EXIT +# define _LIBCUDACXX_HAS_C11_FEATURES +# elif defined(__Fuchsia__) || defined(__wasi__) +# define _LIBCUDACXX_HAS_QUICK_EXIT +# define _LIBCUDACXX_HAS_TIMESPEC_GET +# define _LIBCUDACXX_HAS_C11_FEATURES +# elif defined(__linux__) +# if !defined(_LIBCUDACXX_HAS_MUSL_LIBC) +# if _LIBCUDACXX_GLIBC_PREREQ(2, 15) || defined(__BIONIC__) # define _LIBCUDACXX_HAS_QUICK_EXIT -# define _LIBCUDACXX_HAS_TIMESPEC_GET +# endif +# if _LIBCUDACXX_GLIBC_PREREQ(2, 17) # define _LIBCUDACXX_HAS_C11_FEATURES +# define _LIBCUDACXX_HAS_TIMESPEC_GET # endif -# endif // __linux__ -# endif - -# if defined(_CCCL_COMPILER_NVRTC) -# define __alignof(x) alignof(x) -# endif // _CCCL_COMPILER_NVRTC +# else // defined(_LIBCUDACXX_HAS_MUSL_LIBC) +# define _LIBCUDACXX_HAS_QUICK_EXIT +# define _LIBCUDACXX_HAS_TIMESPEC_GET +# define _LIBCUDACXX_HAS_C11_FEATURES +# endif +# endif // __linux__ +#endif -# if defined(_CCCL_COMPILER_MSVC) -# define __alignof__ __alignof -# endif +#if defined(_CCCL_COMPILER_NVRTC) +# define __alignof(x) alignof(x) +#endif // _CCCL_COMPILER_NVRTC -# define _LIBCUDACXX_ALIGNOF(_Tp) alignof(_Tp) -# define _LIBCUDACXX_PREFERRED_ALIGNOF(_Tp) __alignof(_Tp) +#if defined(_CCCL_COMPILER_MSVC) +# define __alignof__ __alignof +#endif -# if defined(_CCCL_COMPILER_MSVC) -# define _CCCL_ALIGNAS_TYPE(x) alignas(x) -# define _CCCL_ALIGNAS(x) __declspec(align(x)) -# elif __has_feature(cxx_alignas) -# define _CCCL_ALIGNAS_TYPE(x) alignas(x) -# define _CCCL_ALIGNAS(x) alignas(x) -# else -# define _CCCL_ALIGNAS_TYPE(x) __attribute__((__aligned__(_LIBCUDACXX_ALIGNOF(x)))) -# define _CCCL_ALIGNAS(x) __attribute__((__aligned__(x))) -# endif // !_CCCL_COMPILER_MSVC && !__has_feature(cxx_alignas) +#define _LIBCUDACXX_ALIGNOF(_Tp) alignof(_Tp) +#define _LIBCUDACXX_PREFERRED_ALIGNOF(_Tp) __alignof(_Tp) -# define _LIBCUDACXX_TOSTRING2(_STR) #_STR -# define _LIBCUDACXX_TOSTRING(_STR) _LIBCUDACXX_TOSTRING2(_STR) +#if defined(_CCCL_COMPILER_MSVC) +# define _CCCL_ALIGNAS_TYPE(x) alignas(x) +# define _CCCL_ALIGNAS(x) __declspec(align(x)) +#elif __has_feature(cxx_alignas) +# define _CCCL_ALIGNAS_TYPE(x) alignas(x) +# define _CCCL_ALIGNAS(x) alignas(x) +#else +# define _CCCL_ALIGNAS_TYPE(x) __attribute__((__aligned__(_LIBCUDACXX_ALIGNOF(x)))) +# define _CCCL_ALIGNAS(x) __attribute__((__aligned__(x))) +#endif // !_CCCL_COMPILER_MSVC && !__has_feature(cxx_alignas) + +#define _LIBCUDACXX_TOSTRING2(_STR) #_STR +#define _LIBCUDACXX_TOSTRING(_STR) _LIBCUDACXX_TOSTRING2(_STR) // This is wrapped in __CUDA_ARCH__ to prevent error: "ignoring '#pragma unroll' // [-Werror=unknown-pragmas]" -# if defined(__CUDA_ARCH__) -# if defined(_CCCL_COMPILER_MSVC) -# define _LIBCUDACXX_PRAGMA_UNROLL(_N) __pragma(_LIBCUDACXX_TOSTRING(unroll _N)) -# else // ^^^ _CCCL_COMPILER_MSVC ^^^ / vvv !_CCCL_COMPILER_MSVC vvv -# define _LIBCUDACXX_PRAGMA_UNROLL(_N) _Pragma(_LIBCUDACXX_TOSTRING(unroll _N)) -# endif // !_CCCL_COMPILER_MSVC -# else // ^^^ __CUDA_ARCH__ ^^^ / vvv !__CUDA_ARCH__ vvv -# define _LIBCUDACXX_PRAGMA_UNROLL(_N) -# endif // !__CUDA_ARCH__ +#if defined(__CUDA_ARCH__) +#if defined(_CCCL_COMPILER_MSVC) +# define _LIBCUDACXX_PRAGMA_UNROLL(_N) __pragma(_LIBCUDACXX_TOSTRING(unroll _N)) +#else // ^^^ _CCCL_COMPILER_MSVC ^^^ / vvv !_CCCL_COMPILER_MSVC vvv +# define _LIBCUDACXX_PRAGMA_UNROLL(_N) _Pragma(_LIBCUDACXX_TOSTRING(unroll _N)) +#endif // !_CCCL_COMPILER_MSVC +#else // ^^^ __CUDA_ARCH__ ^^^ / vvv !__CUDA_ARCH__ vvv +# define _LIBCUDACXX_PRAGMA_UNROLL(_N) +#endif // !__CUDA_ARCH__ -# if defined(_CCCL_COMPILER_MSVC) -# define _LIBCUDACXX_ALWAYS_INLINE __forceinline -# else -# define _LIBCUDACXX_ALWAYS_INLINE __attribute__((__always_inline__)) -# endif // !_CCCL_COMPILER_MSVC +#if defined(_CCCL_COMPILER_MSVC) +#define _LIBCUDACXX_ALWAYS_INLINE __forceinline +#else +#define _LIBCUDACXX_ALWAYS_INLINE __attribute__ ((__always_inline__)) +#endif // !_CCCL_COMPILER_MSVC -# if defined(__cuda_std__) -# define _LIBCUDACXX_ATOMIC_ALWAYS_LOCK_FREE(size, ptr) (size <= 8) -# elif defined(_CCCL_COMPILER_CLANG) || defined(_CCCL_COMPILER_GCC) -# define _LIBCUDACXX_ATOMIC_ALWAYS_LOCK_FREE(...) __atomic_always_lock_free(__VA_ARGS__) -# endif // __cuda_std__ +#if defined(__cuda_std__) +#define _LIBCUDACXX_ATOMIC_ALWAYS_LOCK_FREE(size, ptr) (size <= 8) +#elif defined(_CCCL_COMPILER_CLANG) || defined(_CCCL_COMPILER_GCC) +#define _LIBCUDACXX_ATOMIC_ALWAYS_LOCK_FREE(...) __atomic_always_lock_free(__VA_ARGS__) +#endif // __cuda_std__ // https://bugs.llvm.org/show_bug.cgi?id=44517 -# define __check_builtin(__x) (__has_builtin(__##__x) || __has_keyword(__##__x) || __has_feature(__x)) +#define __check_builtin(__x) (__has_builtin(__##__x) || \ + __has_keyword(__##__x) || \ + __has_feature(__x)) // We work around old clang versions (before clang-10) not supporting __has_builtin via __check_builtin // We work around old intel versions (before 2021.3) not supporting __has_builtin via __check_builtin @@ -449,422 +453,486 @@ extern "C++" { // MSVC needs manual handling, has no real way of checking builtins so all is manual // GCC needs manual handling, before gcc-10 as that finally supports __has_builtin -# if __check_builtin(array_rank) -# define _LIBCUDACXX_ARRAY_RANK(...) __array_rank(__VA_ARGS__) -# endif // __check_builtin(array_rank) +#if __check_builtin(array_rank) +#define _LIBCUDACXX_ARRAY_RANK(...) __array_rank(__VA_ARGS__) +#endif // __check_builtin(array_rank) // nvhpc has a bug where it supports __builtin_addressof but does not mark it via __check_builtin -# if __check_builtin(builtin_addressof) || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 700) \ - || defined(_CCCL_COMPILER_MSVC) || defined(_CCCL_COMPILER_NVHPC) -# define _LIBCUDACXX_ADDRESSOF(...) __builtin_addressof(__VA_ARGS__) -# endif // __check_builtin(builtin_addressof) - -# if __check_builtin(builtin_bit_cast) || (defined(_CCCL_COMPILER_MSVC) && _MSC_VER > 1925) -# define _LIBCUDACXX_BIT_CAST(...) __builtin_bit_cast(__VA_ARGS__) -# endif // __check_builtin(builtin_bit_cast) - -# if __check_builtin(builtin_is_constant_evaluated) || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 900) \ - || (defined(_CCCL_COMPILER_MSVC) && _MSC_VER > 1924 && !defined(_CCCL_CUDACC_BELOW_11_3)) -# define _LIBCUDACXX_IS_CONSTANT_EVALUATED(...) __builtin_is_constant_evaluated(__VA_ARGS__) -# endif // __check_builtin(builtin_is_constant_evaluated) +#if __check_builtin(builtin_addressof) \ + || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 700) \ + || defined(_CCCL_COMPILER_MSVC) \ + || defined(_CCCL_COMPILER_NVHPC) +#define _LIBCUDACXX_ADDRESSOF(...) __builtin_addressof(__VA_ARGS__) +#endif // __check_builtin(builtin_addressof) + +#if __check_builtin(builtin_bit_cast) \ + || (defined(_CCCL_COMPILER_MSVC) && _MSC_VER > 1925) +#define _LIBCUDACXX_BIT_CAST(...) __builtin_bit_cast(__VA_ARGS__) +#endif // __check_builtin(builtin_bit_cast) + +#if __check_builtin(builtin_is_constant_evaluated) \ + || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 900) \ + || (defined(_CCCL_COMPILER_MSVC) && _MSC_VER > 1924 && !defined(_CCCL_CUDACC_BELOW_11_3)) +#define _LIBCUDACXX_IS_CONSTANT_EVALUATED(...) __builtin_is_constant_evaluated(__VA_ARGS__) +#endif // __check_builtin(builtin_is_constant_evaluated) // NVCC and NVRTC in C++11 mode freaks out about `__builtin_is_constant_evaluated`. -# if _CCCL_STD_VER < 2014 \ - && (defined(_CCCL_CUDA_COMPILER_NVCC) || defined(_CCCL_COMPILER_NVRTC) || defined(_CCCL_COMPILER_NVHPC)) -# undef _LIBCUDACXX_IS_CONSTANT_EVALUATED -# endif // _CCCL_STD_VER < 2014 && _CCCL_CUDA_COMPILER_NVCC - -# if __check_builtin(builtin_launder) || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 700) -# define _LIBCUDACXX_LAUNDER(...) __builtin_launder(__VA_ARGS__) -# endif // __check_builtin(builtin_launder) +#if _CCCL_STD_VER < 2014 \ + && (defined(_CCCL_CUDA_COMPILER_NVCC) \ + || defined(_CCCL_COMPILER_NVRTC) \ + || defined(_CCCL_COMPILER_NVHPC)) +#undef _LIBCUDACXX_IS_CONSTANT_EVALUATED +#endif // _CCCL_STD_VER < 2014 && _CCCL_CUDA_COMPILER_NVCC + +#if __check_builtin(builtin_launder) \ + || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 700) +#define _LIBCUDACXX_LAUNDER(...) __builtin_launder(__VA_ARGS__) +#endif // __check_builtin(builtin_launder) // Disabled due to libstdc++ conflict -# if 0 // __check_builtin(decay) -# define _LIBCUDACXX_DECAY(...) __decay(__VA_ARGS__) -# endif // __check_builtin(decay) - -# if __check_builtin(has_nothrow_assign) || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 403) \ - || defined(_CCCL_COMPILER_MSVC) || defined(_CCCL_COMPILER_NVRTC) -# define _LIBCUDACXX_HAS_NOTHROW_ASSIGN(...) __has_nothrow_assign(__VA_ARGS__) -# endif // __check_builtin(has_nothrow_assign) - -# if __check_builtin(has_nothrow_constructor) || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 403) \ - || defined(_CCCL_COMPILER_MSVC) || defined(_CCCL_COMPILER_NVRTC) -# define _LIBCUDACXX_HAS_NOTHROW_CONSTRUCTOR(...) __has_nothrow_constructor(__VA_ARGS__) -# endif // __check_builtin(has_nothrow_constructor) - -# if __check_builtin(has_nothrow_copy) || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 403) \ - || defined(_CCCL_COMPILER_MSVC) || defined(_CCCL_COMPILER_NVRTC) -# define _LIBCUDACXX_HAS_NOTHROW_COPY(...) __has_nothrow_copy(__VA_ARGS__) -# endif // __check_builtin(has_nothrow_copy) - -# if __check_builtin(has_trivial_constructor) || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 403) \ - || defined(_CCCL_COMPILER_MSVC) || defined(_CCCL_COMPILER_NVRTC) -# define _LIBCUDACXX_HAS_TRIVIAL_CONSTRUCTOR(...) __has_trivial_constructor(__VA_ARGS__) -# endif // __check_builtin(has_trivial_constructor) - -# if __check_builtin(has_trivial_destructor) || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 403) \ - || defined(_CCCL_COMPILER_MSVC) || defined(_CCCL_COMPILER_NVRTC) -# define _LIBCUDACXX_HAS_TRIVIAL_DESTRUCTOR(...) __has_trivial_destructor(__VA_ARGS__) -# endif // __check_builtin(has_trivial_destructor) - -# if __check_builtin(has_unique_object_representations) || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 700) -# define _LIBCUDACXX_HAS_UNIQUE_OBJECT_REPRESENTATIONS(...) __has_unique_object_representations(__VA_ARGS__) -# endif // __check_builtin(has_unique_object_representations) - -# if __check_builtin(has_virtual_destructor) || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 403) \ - || defined(_CCCL_COMPILER_MSVC) || defined(_CCCL_COMPILER_NVRTC) -# define _LIBCUDACXX_HAS_VIRTUAL_DESTRUCTOR(...) __has_virtual_destructor(__VA_ARGS__) -# endif // __check_builtin(has_virtual_destructor) - -# if __check_builtin(is_aggregate) || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 700) \ - || (defined(_CCCL_COMPILER_MSVC) && _MSC_VER > 1914) || defined(_CCCL_COMPILER_NVRTC) -# define _LIBCUDACXX_IS_AGGREGATE(...) __is_aggregate(__VA_ARGS__) -# endif // __check_builtin(is_aggregate) - -# if __check_builtin(is_array) -# define _LIBCUDACXX_IS_ARRAY(...) __is_array(__VA_ARGS__) -# endif // __check_builtin(is_array) +#if 0 // __check_builtin(decay) +#define _LIBCUDACXX_DECAY(...) __decay(__VA_ARGS__) +#endif // __check_builtin(decay) + +#if __check_builtin(has_nothrow_assign) \ + || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 403) \ + || defined(_CCCL_COMPILER_MSVC) \ + || defined(_CCCL_COMPILER_NVRTC) +#define _LIBCUDACXX_HAS_NOTHROW_ASSIGN(...) __has_nothrow_assign(__VA_ARGS__) +#endif // __check_builtin(has_nothrow_assign) + +#if __check_builtin(has_nothrow_constructor) \ + || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 403) \ + || defined(_CCCL_COMPILER_MSVC) \ + || defined(_CCCL_COMPILER_NVRTC) +#define _LIBCUDACXX_HAS_NOTHROW_CONSTRUCTOR(...) __has_nothrow_constructor(__VA_ARGS__) +#endif // __check_builtin(has_nothrow_constructor) + +#if __check_builtin(has_nothrow_copy) \ + || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 403) \ + || defined(_CCCL_COMPILER_MSVC) \ + || defined(_CCCL_COMPILER_NVRTC) +#define _LIBCUDACXX_HAS_NOTHROW_COPY(...) __has_nothrow_copy(__VA_ARGS__) +#endif // __check_builtin(has_nothrow_copy) + +#if __check_builtin(has_trivial_constructor) \ + || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 403) \ + || defined(_CCCL_COMPILER_MSVC) \ + || defined(_CCCL_COMPILER_NVRTC) +#define _LIBCUDACXX_HAS_TRIVIAL_CONSTRUCTOR(...) __has_trivial_constructor(__VA_ARGS__) +#endif // __check_builtin(has_trivial_constructor) + +#if __check_builtin(has_trivial_destructor) \ + || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 403) \ + || defined(_CCCL_COMPILER_MSVC) \ + || defined(_CCCL_COMPILER_NVRTC) +#define _LIBCUDACXX_HAS_TRIVIAL_DESTRUCTOR(...) __has_trivial_destructor(__VA_ARGS__) +#endif // __check_builtin(has_trivial_destructor) + +#if __check_builtin(has_unique_object_representations) \ + || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 700) +#define _LIBCUDACXX_HAS_UNIQUE_OBJECT_REPRESENTATIONS(...) __has_unique_object_representations(__VA_ARGS__) +#endif // __check_builtin(has_unique_object_representations) + +#if __check_builtin(has_virtual_destructor) \ + || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 403) \ + || defined(_CCCL_COMPILER_MSVC) \ + || defined(_CCCL_COMPILER_NVRTC) +#define _LIBCUDACXX_HAS_VIRTUAL_DESTRUCTOR(...) __has_virtual_destructor(__VA_ARGS__) +#endif // __check_builtin(has_virtual_destructor) + +#if __check_builtin(is_aggregate) \ + || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 700) \ + || (defined(_CCCL_COMPILER_MSVC) && _MSC_VER > 1914) \ + || defined(_CCCL_COMPILER_NVRTC) +#define _LIBCUDACXX_IS_AGGREGATE(...) __is_aggregate(__VA_ARGS__) +#endif // __check_builtin(is_aggregate) + +#if __check_builtin(is_array) +#define _LIBCUDACXX_IS_ARRAY(...) __is_array(__VA_ARGS__) +#endif // __check_builtin(is_array) // TODO: Clang incorrectly reports that __is_array is true for T[0]. // Re-enable the branch once https://llvm.org/PR54705 is fixed. -# ifndef _LIBCUDACXX_USE_IS_ARRAY_FALLBACK -# if defined(_CCCL_COMPILER_CLANG) -# define _LIBCUDACXX_USE_IS_ARRAY_FALLBACK -# endif // _CCCL_COMPILER_CLANG -# endif // !_LIBCUDACXX_USE_IS_ARRAY_FALLBACK - -# if __check_builtin(is_assignable) || defined(_CCCL_COMPILER_MSVC) -# define _LIBCUDACXX_IS_ASSIGNABLE(...) __is_assignable(__VA_ARGS__) -# endif // __check_builtin(is_assignable) - -# if __check_builtin(is_base_of) || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 403) || defined(_CCCL_COMPILER_MSVC) \ - || defined(_CCCL_COMPILER_NVRTC) -# define _LIBCUDACXX_IS_BASE_OF(...) __is_base_of(__VA_ARGS__) -# endif // __check_builtin(is_base_of) - -# if __check_builtin(is_class) || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 403) || defined(_CCCL_COMPILER_MSVC) \ - || defined(_CCCL_COMPILER_NVRTC) -# define _LIBCUDACXX_IS_CLASS(...) __is_class(__VA_ARGS__) -# endif // __check_builtin(is_class) - -# if __check_builtin(is_constructible) || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 800) \ - || defined(_CCCL_COMPILER_MSVC) || defined(_CCCL_COMPILER_NVRTC) -# define _LIBCUDACXX_IS_CONSTRUCTIBLE(...) __is_constructible(__VA_ARGS__) -# endif // __check_builtin(is_constructible) - -# if __check_builtin(is_convertible_to) || defined(_CCCL_COMPILER_MSVC) || defined(_CCCL_COMPILER_NVRTC) -# define _LIBCUDACXX_IS_CONVERTIBLE_TO(...) __is_convertible_to(__VA_ARGS__) -# endif // __check_builtin(is_convertible_to) - -# if __check_builtin(is_destructible) || defined(_CCCL_COMPILER_MSVC) -# define _LIBCUDACXX_IS_DESTRUCTIBLE(...) __is_destructible(__VA_ARGS__) -# endif // __check_builtin(is_destructible) - -# if __check_builtin(is_empty) || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 403) || defined(_CCCL_COMPILER_MSVC) \ - || defined(_CCCL_COMPILER_NVRTC) -# define _LIBCUDACXX_IS_EMPTY(...) __is_empty(__VA_ARGS__) -# endif // __check_builtin(is_empty) - -# if __check_builtin(is_enum) || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 403) || defined(_CCCL_COMPILER_MSVC) \ - || defined(_CCCL_COMPILER_NVRTC) -# define _LIBCUDACXX_IS_ENUM(...) __is_enum(__VA_ARGS__) -# endif // __check_builtin(is_enum) - -# if __check_builtin(is_final) || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 407) || defined(_CCCL_COMPILER_MSVC) \ - || defined(_CCCL_COMPILER_NVRTC) -# define _LIBCUDACXX_IS_FINAL(...) __is_final(__VA_ARGS__) -# endif // __check_builtin(is_final) - -# if __check_builtin(is_function) && !defined(_CCCL_CUDA_COMPILER_NVCC) -# define _LIBCUDACXX_IS_FUNCTION(...) __is_function(__VA_ARGS__) -# endif // __check_builtin(is_function) - -# if __check_builtin(is_literal_type) || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 406) \ - || defined(_CCCL_COMPILER_MSVC) || defined(_CCCL_COMPILER_NVRTC) -# define _LIBCUDACXX_IS_LITERAL(...) __is_literal_type(__VA_ARGS__) -# endif // __check_builtin(is_literal_type) - -# if __check_builtin(is_lvalue_reference) -# define _LIBCUDACXX_IS_LVALUE_REFERENCE(...) __is_lvalue_reference(__VA_ARGS__) -# endif // __check_builtin(is_lvalue_reference) - -# ifndef _LIBCUDACXX_USE_IS_LVALUE_REFERENCE_FALLBACK -# if defined(_CCCL_CUDACC_BELOW_11_3) -# define _LIBCUDACXX_USE_IS_LVALUE_REFERENCE_FALLBACK -# endif // nvcc < 11.3 -# endif // !_LIBCUDACXX_USE_IS_LVALUE_REFERENCE_FALLBACK - -# if __check_builtin(is_nothrow_assignable) || defined(_CCCL_COMPILER_MSVC) || defined(_CCCL_COMPILER_NVRTC) -# define _LIBCUDACXX_IS_NOTHROW_ASSIGNABLE(...) __is_nothrow_assignable(__VA_ARGS__) -# endif // __check_builtin(is_nothrow_assignable) - -# if __check_builtin(is_nothrow_constructible) || defined(_CCCL_COMPILER_MSVC) || defined(_CCCL_COMPILER_NVRTC) -# define _LIBCUDACXX_IS_NOTHROW_CONSTRUCTIBLE(...) __is_nothrow_constructible(__VA_ARGS__) -# endif // __check_builtin(is_nothrow_constructible) - -# if __check_builtin(is_nothrow_destructible) || defined(_CCCL_COMPILER_MSVC) || defined(_CCCL_COMPILER_NVRTC) -# define _LIBCUDACXX_IS_NOTHROW_DESTRUCTIBLE(...) __is_nothrow_destructible(__VA_ARGS__) -# endif // __check_builtin(is_nothrow_destructible) - -# if __check_builtin(is_object) -# define _LIBCUDACXX_IS_OBJECT(...) __is_object(__VA_ARGS__) -# endif // __check_builtin(is_object) - -# ifndef _LIBCUDACXX_USE_IS_OBJECT_FALLBACK -# if defined(_CCCL_CUDACC_BELOW_11_3) -# define _LIBCUDACXX_USE_IS_OBJECT_FALLBACK -# endif // nvcc < 11.3 -# endif // !_LIBCUDACXX_USE_IS_OBJECT_FALLBACK - -# if __check_builtin(is_pod) || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 403) || defined(_CCCL_COMPILER_MSVC) \ - || defined(_CCCL_COMPILER_NVRTC) -# define _LIBCUDACXX_IS_POD(...) __is_pod(__VA_ARGS__) -# endif // __check_builtin(is_pod) +#ifndef _LIBCUDACXX_USE_IS_ARRAY_FALLBACK +#if defined(_CCCL_COMPILER_CLANG) +#define _LIBCUDACXX_USE_IS_ARRAY_FALLBACK +#endif // _CCCL_COMPILER_CLANG +#endif // !_LIBCUDACXX_USE_IS_ARRAY_FALLBACK + +#if __check_builtin(is_assignable) \ + || defined(_CCCL_COMPILER_MSVC) +#define _LIBCUDACXX_IS_ASSIGNABLE(...) __is_assignable(__VA_ARGS__) +#endif // __check_builtin(is_assignable) + +#if __check_builtin(is_base_of) \ + || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 403) \ + || defined(_CCCL_COMPILER_MSVC) \ + || defined(_CCCL_COMPILER_NVRTC) +#define _LIBCUDACXX_IS_BASE_OF(...) __is_base_of(__VA_ARGS__) +#endif // __check_builtin(is_base_of) + +#if __check_builtin(is_class) \ + || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 403) \ + || defined(_CCCL_COMPILER_MSVC) \ + || defined(_CCCL_COMPILER_NVRTC) +#define _LIBCUDACXX_IS_CLASS(...) __is_class(__VA_ARGS__) +#endif // __check_builtin(is_class) + +#if __check_builtin(is_constructible) \ + || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 800) \ + || defined(_CCCL_COMPILER_MSVC) \ + || defined(_CCCL_COMPILER_NVRTC) +#define _LIBCUDACXX_IS_CONSTRUCTIBLE(...) __is_constructible(__VA_ARGS__) +#endif // __check_builtin(is_constructible) + +#if __check_builtin(is_convertible_to) \ + || defined(_CCCL_COMPILER_MSVC) \ + || defined(_CCCL_COMPILER_NVRTC) +#define _LIBCUDACXX_IS_CONVERTIBLE_TO(...) __is_convertible_to(__VA_ARGS__) +#endif // __check_builtin(is_convertible_to) + +#if __check_builtin(is_destructible) \ + || defined(_CCCL_COMPILER_MSVC) +#define _LIBCUDACXX_IS_DESTRUCTIBLE(...) __is_destructible(__VA_ARGS__) +#endif // __check_builtin(is_destructible) + +#if __check_builtin(is_empty) \ + || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 403) \ + || defined(_CCCL_COMPILER_MSVC) \ + || defined(_CCCL_COMPILER_NVRTC) +#define _LIBCUDACXX_IS_EMPTY(...) __is_empty(__VA_ARGS__) +#endif // __check_builtin(is_empty) + +#if __check_builtin(is_enum) \ + || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 403) \ + || defined(_CCCL_COMPILER_MSVC) \ + || defined(_CCCL_COMPILER_NVRTC) +#define _LIBCUDACXX_IS_ENUM(...) __is_enum(__VA_ARGS__) +#endif // __check_builtin(is_enum) + +#if __check_builtin(is_final) \ + || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 407) \ + || defined(_CCCL_COMPILER_MSVC) \ + || defined(_CCCL_COMPILER_NVRTC) +#define _LIBCUDACXX_IS_FINAL(...) __is_final(__VA_ARGS__) +#endif // __check_builtin(is_final) + +#if __check_builtin(is_function) \ + && !defined(_CCCL_CUDA_COMPILER_NVCC) +#define _LIBCUDACXX_IS_FUNCTION(...) __is_function(__VA_ARGS__) +#endif // __check_builtin(is_function) + +#if __check_builtin(is_literal_type) \ + || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 406) \ + || defined(_CCCL_COMPILER_MSVC) \ + || defined(_CCCL_COMPILER_NVRTC) +#define _LIBCUDACXX_IS_LITERAL(...) __is_literal_type(__VA_ARGS__) +#endif // __check_builtin(is_literal_type) + +#if __check_builtin(is_lvalue_reference) +#define _LIBCUDACXX_IS_LVALUE_REFERENCE(...) __is_lvalue_reference(__VA_ARGS__) +#endif // __check_builtin(is_lvalue_reference) + +#ifndef _LIBCUDACXX_USE_IS_LVALUE_REFERENCE_FALLBACK +#if defined(_CCCL_CUDACC_BELOW_11_3) +#define _LIBCUDACXX_USE_IS_LVALUE_REFERENCE_FALLBACK +#endif // nvcc < 11.3 +#endif // !_LIBCUDACXX_USE_IS_LVALUE_REFERENCE_FALLBACK + +#if __check_builtin(is_nothrow_assignable) \ + || defined(_CCCL_COMPILER_MSVC) \ + || defined(_CCCL_COMPILER_NVRTC) +#define _LIBCUDACXX_IS_NOTHROW_ASSIGNABLE(...) __is_nothrow_assignable(__VA_ARGS__) +#endif // __check_builtin(is_nothrow_assignable) + +#if __check_builtin(is_nothrow_constructible) \ + || defined(_CCCL_COMPILER_MSVC) \ + || defined(_CCCL_COMPILER_NVRTC) +#define _LIBCUDACXX_IS_NOTHROW_CONSTRUCTIBLE(...) __is_nothrow_constructible(__VA_ARGS__) +#endif // __check_builtin(is_nothrow_constructible) + +#if __check_builtin(is_nothrow_destructible) \ + || defined(_CCCL_COMPILER_MSVC) \ + || defined(_CCCL_COMPILER_NVRTC) +#define _LIBCUDACXX_IS_NOTHROW_DESTRUCTIBLE(...) __is_nothrow_destructible(__VA_ARGS__) +#endif // __check_builtin(is_nothrow_destructible) + +#if __check_builtin(is_object) +#define _LIBCUDACXX_IS_OBJECT(...) __is_object(__VA_ARGS__) +#endif // __check_builtin(is_object) + +#ifndef _LIBCUDACXX_USE_IS_OBJECT_FALLBACK +#if defined(_CCCL_CUDACC_BELOW_11_3) +#define _LIBCUDACXX_USE_IS_OBJECT_FALLBACK +#endif // nvcc < 11.3 +#endif // !_LIBCUDACXX_USE_IS_OBJECT_FALLBACK + +#if __check_builtin(is_pod) \ + || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 403) \ + || defined(_CCCL_COMPILER_MSVC) \ + || defined(_CCCL_COMPILER_NVRTC) +#define _LIBCUDACXX_IS_POD(...) __is_pod(__VA_ARGS__) +#endif // __check_builtin(is_pod) // libstdc++ defines this as a function, breaking functionality -# if 0 // __check_builtin(is_pointer) -# define _LIBCUDACXX_IS_POINTER(...) __is_pointer(__VA_ARGS__) -# endif // __check_builtin(is_pointer) +#if 0 // __check_builtin(is_pointer) +#define _LIBCUDACXX_IS_POINTER(...) __is_pointer(__VA_ARGS__) +#endif // __check_builtin(is_pointer) -# if __check_builtin(is_polymorphic) || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 403) \ - || defined(_CCCL_COMPILER_MSVC) || defined(_CCCL_COMPILER_NVRTC) -# define _LIBCUDACXX_IS_POLYMORPHIC(...) __is_polymorphic(__VA_ARGS__) -# endif // __check_builtin(is_polymorphic) +#if __check_builtin(is_polymorphic) \ + || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 403) \ + || defined(_CCCL_COMPILER_MSVC) \ + || defined(_CCCL_COMPILER_NVRTC) +#define _LIBCUDACXX_IS_POLYMORPHIC(...) __is_polymorphic(__VA_ARGS__) +#endif // __check_builtin(is_polymorphic) -# if __check_builtin(is_reference) -# define _LIBCUDACXX_IS_REFERENCE(...) __is_reference(__VA_ARGS__) -# endif // __check_builtin(is_reference) +#if __check_builtin(is_reference) +#define _LIBCUDACXX_IS_REFERENCE(...) __is_reference(__VA_ARGS__) +#endif // __check_builtin(is_reference) // Disabled due to libstdc++ conflict -# if 0 // __check_builtin(is_referenceable) -# define _LIBCUDACXX_IS_REFERENCEABLE(...) __is_referenceable(__VA_ARGS__) -# endif // __check_builtin(is_referenceable) +#if 0 // __check_builtin(is_referenceable) +#define _LIBCUDACXX_IS_REFERENCEABLE(...) __is_referenceable(__VA_ARGS__) +#endif // __check_builtin(is_referenceable) -# if __check_builtin(is_rvalue_reference) -# define _LIBCUDACXX_IS_RVALUE_REFERENCE(...) __is_rvalue_reference(__VA_ARGS__) -# endif // __check_builtin(is_rvalue_reference) +#if __check_builtin(is_rvalue_reference) +#define _LIBCUDACXX_IS_RVALUE_REFERENCE(...) __is_rvalue_reference(__VA_ARGS__) +#endif // __check_builtin(is_rvalue_reference) -# if __check_builtin(is_same) && !defined(_CCCL_CUDA_COMPILER_NVCC) -# define _LIBCUDACXX_IS_SAME(...) __is_same(__VA_ARGS__) -# endif // __check_builtin(is_same) +#if __check_builtin(is_same) && !defined(_CCCL_CUDA_COMPILER_NVCC) +#define _LIBCUDACXX_IS_SAME(...) __is_same(__VA_ARGS__) +#endif // __check_builtin(is_same) // libstdc++ defines this as a function, breaking functionality -# if 0 // __check_builtin(is_scalar) -# define _LIBCUDACXX_IS_SCALAR(...) __is_scalar(__VA_ARGS__) -# endif // __check_builtin(is_scalar) +#if 0 // __check_builtin(is_scalar) +#define _LIBCUDACXX_IS_SCALAR(...) __is_scalar(__VA_ARGS__) +#endif // __check_builtin(is_scalar) // libstdc++ defines this as a function, breaking functionality -# if 0 // __check_builtin(is_signed) -# define _LIBCUDACXX_IS_SIGNED(...) __is_signed(__VA_ARGS__) -# endif // __check_builtin(is_signed) - -# if __check_builtin(is_standard_layout) || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 407) \ - || defined(_CCCL_COMPILER_MSVC) || defined(_CCCL_COMPILER_NVRTC) -# define _LIBCUDACXX_IS_STANDARD_LAYOUT(...) __is_standard_layout(__VA_ARGS__) -# endif // __check_builtin(is_standard_layout) - -# if __check_builtin(is_trivial) || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 405) || defined(_CCCL_COMPILER_MSVC) \ - || defined(_CCCL_COMPILER_NVRTC) -# define _LIBCUDACXX_IS_TRIVIAL(...) __is_trivial(__VA_ARGS__) -# endif // __check_builtin(is_trivial) - -# if __check_builtin(is_trivially_assignable) || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 501) \ - || defined(_CCCL_COMPILER_MSVC) || defined(_CCCL_COMPILER_NVRTC) -# define _LIBCUDACXX_IS_TRIVIALLY_ASSIGNABLE(...) __is_trivially_assignable(__VA_ARGS__) -# endif // __check_builtin(is_trivially_assignable) - -# if __check_builtin(is_trivially_constructible) || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 501) \ - || defined(_CCCL_COMPILER_MSVC) || defined(_CCCL_COMPILER_NVRTC) -# define _LIBCUDACXX_IS_TRIVIALLY_CONSTRUCTIBLE(...) __is_trivially_constructible(__VA_ARGS__) -# endif // __check_builtin(is_trivially_constructible) - -# if __check_builtin(is_trivially_copyable) || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 501) \ - || defined(_CCCL_COMPILER_MSVC) || defined(_CCCL_COMPILER_NVRTC) -# define _LIBCUDACXX_IS_TRIVIALLY_COPYABLE(...) __is_trivially_copyable(__VA_ARGS__) -# endif // __check_builtin(is_trivially_copyable) - -# if __check_builtin(is_trivially_destructible) || defined(_CCCL_COMPILER_MSVC) -# define _LIBCUDACXX_IS_TRIVIALLY_DESTRUCTIBLE(...) __is_trivially_destructible(__VA_ARGS__) -# endif // __check_builtin(is_trivially_destructible) - -# if __check_builtin(is_union) || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 403) || defined(_CCCL_COMPILER_MSVC) \ - || defined(_CCCL_COMPILER_NVRTC) -# define _LIBCUDACXX_IS_UNION(...) __is_union(__VA_ARGS__) -# endif // __check_builtin(is_union) - -# if __check_builtin(is_unsigned) -# define _LIBCUDACXX_IS_UNSIGNED(...) __is_unsigned(__VA_ARGS__) -# endif // __check_builtin(is_unsigned) - -# ifndef _LIBCUDACXX_USE_IS_UNSIGNED_FALLBACK -# if defined(_CCCL_CUDACC_BELOW_11_3) -# define _LIBCUDACXX_USE_IS_UNSIGNED_FALLBACK -# endif // nvcc < 11.3 -# endif // !_LIBCUDACXX_USE_IS_UNSIGNED_FALLBACK +#if 0 // __check_builtin(is_signed) +#define _LIBCUDACXX_IS_SIGNED(...) __is_signed(__VA_ARGS__) +#endif // __check_builtin(is_signed) + +#if __check_builtin(is_standard_layout) \ + || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 407) \ + || defined(_CCCL_COMPILER_MSVC) \ + || defined(_CCCL_COMPILER_NVRTC) +#define _LIBCUDACXX_IS_STANDARD_LAYOUT(...) __is_standard_layout(__VA_ARGS__) +#endif // __check_builtin(is_standard_layout) + +#if __check_builtin(is_trivial) \ + || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 405) \ + || defined(_CCCL_COMPILER_MSVC) \ + || defined(_CCCL_COMPILER_NVRTC) +#define _LIBCUDACXX_IS_TRIVIAL(...) __is_trivial(__VA_ARGS__) +#endif // __check_builtin(is_trivial) + +#if __check_builtin(is_trivially_assignable) \ + || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 501) \ + || defined(_CCCL_COMPILER_MSVC) \ + || defined(_CCCL_COMPILER_NVRTC) +#define _LIBCUDACXX_IS_TRIVIALLY_ASSIGNABLE(...) __is_trivially_assignable(__VA_ARGS__) +#endif // __check_builtin(is_trivially_assignable) + +#if __check_builtin(is_trivially_constructible) \ + || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 501) \ + || defined(_CCCL_COMPILER_MSVC) \ + || defined(_CCCL_COMPILER_NVRTC) +#define _LIBCUDACXX_IS_TRIVIALLY_CONSTRUCTIBLE(...) __is_trivially_constructible(__VA_ARGS__) +#endif // __check_builtin(is_trivially_constructible) + +#if __check_builtin(is_trivially_copyable) \ + || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 501) \ + || defined(_CCCL_COMPILER_MSVC) \ + || defined(_CCCL_COMPILER_NVRTC) +#define _LIBCUDACXX_IS_TRIVIALLY_COPYABLE(...) __is_trivially_copyable(__VA_ARGS__) +#endif // __check_builtin(is_trivially_copyable) + +#if __check_builtin(is_trivially_destructible) \ + || defined(_CCCL_COMPILER_MSVC) +#define _LIBCUDACXX_IS_TRIVIALLY_DESTRUCTIBLE(...) __is_trivially_destructible(__VA_ARGS__) +#endif // __check_builtin(is_trivially_destructible) + +#if __check_builtin(is_union) \ + || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 403) \ + || defined(_CCCL_COMPILER_MSVC) \ + || defined(_CCCL_COMPILER_NVRTC) +#define _LIBCUDACXX_IS_UNION(...) __is_union(__VA_ARGS__) +#endif // __check_builtin(is_union) + +#if __check_builtin(is_unsigned) +#define _LIBCUDACXX_IS_UNSIGNED(...) __is_unsigned(__VA_ARGS__) +#endif // __check_builtin(is_unsigned) + +#ifndef _LIBCUDACXX_USE_IS_UNSIGNED_FALLBACK +#if defined(_CCCL_CUDACC_BELOW_11_3) +#define _LIBCUDACXX_USE_IS_UNSIGNED_FALLBACK +#endif // nvcc < 11.3 +#endif // !_LIBCUDACXX_USE_IS_UNSIGNED_FALLBACK // libstdc++ defines this as a function, breaking functionality -# if 0 // __check_builtin(is_void) -# define _LIBCUDACXX_IS_VOID(...) __is_void(__VA_ARGS__) -# endif // __check_builtin(is_void) +#if 0 // __check_builtin(is_void) +#define _LIBCUDACXX_IS_VOID(...) __is_void(__VA_ARGS__) +#endif // __check_builtin(is_void) // Disabled due to libstdc++ conflict -# if 0 // __check_builtin(make_signed) -# define _LIBCUDACXX_MAKE_SIGNED(...) __make_signed(__VA_ARGS__) -# endif // __check_builtin(make_signed) +#if 0 // __check_builtin(make_signed) +#define _LIBCUDACXX_MAKE_SIGNED(...) __make_signed(__VA_ARGS__) +#endif // __check_builtin(make_signed) // Disabled due to libstdc++ conflict -# if 0 // __check_builtin(make_unsigned) -# define _LIBCUDACXX_MAKE_UNSIGNED(...) __make_unsigned(__VA_ARGS__) -# endif // __check_builtin(make_unsigned) +#if 0 // __check_builtin(make_unsigned) +#define _LIBCUDACXX_MAKE_UNSIGNED(...) __make_unsigned(__VA_ARGS__) +#endif // __check_builtin(make_unsigned) // Disabled due to libstdc++ conflict -# if 0 // __check_builtin(remove_all_extents) -# define _LIBCUDACXX_REMOVE_ALL_EXTENTS(...) __remove_all_extents(__VA_ARGS__) -# endif // __check_builtin(remove_all_extents) +#if 0 // __check_builtin(remove_all_extents) +#define _LIBCUDACXX_REMOVE_ALL_EXTENTS(...) __remove_all_extents(__VA_ARGS__) +#endif // __check_builtin(remove_all_extents) // Disabled due to libstdc++ conflict -# if 0 // __check_builtin(remove_const) -# define _LIBCUDACXX_REMOVE_CONST(...) __remove_const(__VA_ARGS__) -# endif // __check_builtin(remove_const) +#if 0 // __check_builtin(remove_const) +#define _LIBCUDACXX_REMOVE_CONST(...) __remove_const(__VA_ARGS__) +#endif // __check_builtin(remove_const) // Disabled due to libstdc++ conflict -# if 0 // __check_builtin(remove_cv) -# define _LIBCUDACXX_REMOVE_CV(...) __remove_cv(__VA_ARGS__) -# endif // __check_builtin(remove_cv) +#if 0 // __check_builtin(remove_cv) +#define _LIBCUDACXX_REMOVE_CV(...) __remove_cv(__VA_ARGS__) +#endif // __check_builtin(remove_cv) // Disabled due to libstdc++ conflict -# if 0 // __check_builtin(remove_cvref) -# define _LIBCUDACXX_REMOVE_CVREF(...) __remove_cvref(__VA_ARGS__) -# endif // __check_builtin(remove_cvref) +#if 0 // __check_builtin(remove_cvref) +#define _LIBCUDACXX_REMOVE_CVREF(...) __remove_cvref(__VA_ARGS__) +#endif // __check_builtin(remove_cvref) // Disabled due to libstdc++ conflict -# if 0 // __check_builtin(remove_extent) -# define _LIBCUDACXX_REMOVE_EXTENT(...) __remove_extent(__VA_ARGS__) -# endif // __check_builtin(remove_extent) +#if 0 // __check_builtin(remove_extent) +#define _LIBCUDACXX_REMOVE_EXTENT(...) __remove_extent(__VA_ARGS__) +#endif // __check_builtin(remove_extent) // Disabled due to libstdc++ conflict -# if 0 // __check_builtin(remove_pointer) -# define _LIBCUDACXX_REMOVE_POINTER(...) __remove_pointer(__VA_ARGS__) -# endif // __check_builtin(remove_pointer) +#if 0 // __check_builtin(remove_pointer) +#define _LIBCUDACXX_REMOVE_POINTER(...) __remove_pointer(__VA_ARGS__) +#endif // __check_builtin(remove_pointer) // Disabled due to libstdc++ conflict -# if 0 // __check_builtin(remove_reference_t) -# define _LIBCUDACXX_REMOVE_REFERENCE_T(...) __remove_reference_t(__VA_ARGS__) -# endif // __check_builtin(remove_reference_t) +#if 0 // __check_builtin(remove_reference_t) +#define _LIBCUDACXX_REMOVE_REFERENCE_T(...) __remove_reference_t(__VA_ARGS__) +#endif // __check_builtin(remove_reference_t) // Disabled due to libstdc++ conflict -# if 0 // __check_builtin(remove_volatile) -# define _LIBCUDACXX_REMOVE_VOLATILE(...) __remove_volatile(__VA_ARGS__) -# endif // __check_builtin(remove_volatile) +#if 0 // __check_builtin(remove_volatile) +#define _LIBCUDACXX_REMOVE_VOLATILE(...) __remove_volatile(__VA_ARGS__) +#endif // __check_builtin(remove_volatile) -# if __check_builtin(underlying_type) || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 407) \ - || defined(_CCCL_COMPILER_MSVC) || defined(_CCCL_COMPILER_NVRTC) -# define _LIBCUDACXX_UNDERLYING_TYPE(...) __underlying_type(__VA_ARGS__) -# endif // __check_builtin(underlying_type) +#if __check_builtin(underlying_type) \ + || (defined(_CCCL_COMPILER_GCC) && _GNUC_VER >= 407) \ + || defined(_CCCL_COMPILER_MSVC) \ + || defined(_CCCL_COMPILER_NVRTC) +#define _LIBCUDACXX_UNDERLYING_TYPE(...) __underlying_type(__VA_ARGS__) +#endif // __check_builtin(underlying_type) -# if defined(_CCCL_COMPILER_CLANG) +#if defined(_CCCL_COMPILER_CLANG) // _LIBCUDACXX_ALTERNATE_STRING_LAYOUT is an old name for // _LIBCUDACXX_ABI_ALTERNATE_STRING_LAYOUT left here for backward compatibility. -# if defined(_LIBCUDACXX_ALTERNATE_STRING_LAYOUT) -# define _LIBCUDACXX_ABI_ALTERNATE_STRING_LAYOUT -# endif +#if defined(_LIBCUDACXX_ALTERNATE_STRING_LAYOUT) +#define _LIBCUDACXX_ABI_ALTERNATE_STRING_LAYOUT +#endif -# if __cplusplus < 201103L +#if __cplusplus < 201103L typedef __char16_t char16_t; typedef __char32_t char32_t; -# endif +#endif -# if !(__has_feature(cxx_strong_enums)) -# define _LIBCUDACXX_HAS_NO_STRONG_ENUMS -# endif +#if !(__has_feature(cxx_strong_enums)) +#define _LIBCUDACXX_HAS_NO_STRONG_ENUMS +#endif -# if !(__has_feature(cxx_lambdas)) -# define _LIBCUDACXX_HAS_NO_LAMBDAS -# endif +#if !(__has_feature(cxx_lambdas)) +#define _LIBCUDACXX_HAS_NO_LAMBDAS +#endif -# if !(__has_feature(cxx_nullptr)) -# if (__has_extension(cxx_nullptr) || __has_keyword(__nullptr)) \ - && defined(_LIBCUDACXX_ABI_ALWAYS_USE_CXX11_NULLPTR) -# define nullptr __nullptr -# else -# define _LIBCUDACXX_HAS_NO_NULLPTR -# endif -# endif +#if !(__has_feature(cxx_nullptr)) +# if (__has_extension(cxx_nullptr) || __has_keyword(__nullptr)) && defined(_LIBCUDACXX_ABI_ALWAYS_USE_CXX11_NULLPTR) +# define nullptr __nullptr +# else +# define _LIBCUDACXX_HAS_NO_NULLPTR +# endif +#endif -# if !(__has_feature(cxx_rvalue_references)) -# define _LIBCUDACXX_HAS_NO_RVALUE_REFERENCES -# endif +#if !(__has_feature(cxx_rvalue_references)) +#define _LIBCUDACXX_HAS_NO_RVALUE_REFERENCES +#endif -# if !(__has_feature(cxx_auto_type)) -# define _LIBCUDACXX_HAS_NO_AUTO_TYPE -# endif +#if !(__has_feature(cxx_auto_type)) +#define _LIBCUDACXX_HAS_NO_AUTO_TYPE +#endif -# if !(__has_feature(cxx_variadic_templates)) -# define _LIBCUDACXX_HAS_NO_VARIADICS -# endif +#if !(__has_feature(cxx_variadic_templates)) +#define _LIBCUDACXX_HAS_NO_VARIADICS +#endif -# if !(__has_feature(cxx_generalized_initializers)) -# define _LIBCUDACXX_HAS_NO_GENERALIZED_INITIALIZERS -# endif +#if !(__has_feature(cxx_generalized_initializers)) +#define _LIBCUDACXX_HAS_NO_GENERALIZED_INITIALIZERS +#endif // Objective-C++ features (opt-in) -# if __has_feature(objc_arc) -# define _LIBCUDACXX_HAS_OBJC_ARC -# endif +#if __has_feature(objc_arc) +#define _LIBCUDACXX_HAS_OBJC_ARC +#endif -# if __has_feature(objc_arc_weak) -# define _LIBCUDACXX_HAS_OBJC_ARC_WEAK -# endif +#if __has_feature(objc_arc_weak) +#define _LIBCUDACXX_HAS_OBJC_ARC_WEAK +#endif -# if !(__has_feature(cxx_variable_templates)) -# define _LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES -# endif +#if !(__has_feature(cxx_variable_templates)) +#define _LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES +#endif -# if !(__has_feature(cxx_noexcept)) -# define _LIBCUDACXX_HAS_NO_NOEXCEPT -# endif +#if !(__has_feature(cxx_noexcept)) +#define _LIBCUDACXX_HAS_NO_NOEXCEPT +#endif // Allow for build-time disabling of unsigned integer sanitization -# if !defined(_LIBCUDACXX_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK) && __has_attribute(no_sanitize) -# define _LIBCUDACXX_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK \ - __attribute__((__no_sanitize__("unsigned-integer-overflow"))) -# endif +#if !defined(_LIBCUDACXX_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK) && __has_attribute(no_sanitize) +#define _LIBCUDACXX_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK __attribute__((__no_sanitize__("unsigned-integer-overflow"))) +#endif -# define _LIBCUDACXX_DISABLE_EXTENSION_WARNING __extension__ +#define _LIBCUDACXX_DISABLE_EXTENSION_WARNING __extension__ -# elif defined(_CCCL_COMPILER_GCC) +#elif defined(_CCCL_COMPILER_GCC) -# ifndef _LIBCUDACXX_USE_IS_ASSIGNABLE_FALLBACK +#ifndef _LIBCUDACXX_USE_IS_ASSIGNABLE_FALLBACK // FIXME: GCC 8.0 supports this trait, but it has a bug. // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=91592 // https://godbolt.org/z/IljfIw -# define _LIBCUDACXX_USE_IS_ASSIGNABLE_FALLBACK -# endif // _LIBCUDACXX_USE_IS_ASSIGNABLE_FALLBACK +#define _LIBCUDACXX_USE_IS_ASSIGNABLE_FALLBACK +#endif // _LIBCUDACXX_USE_IS_ASSIGNABLE_FALLBACK // GCC 5 supports variable templates -# if !defined(__cpp_variable_templates) || __cpp_variable_templates < 201304L -# define _LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES -# endif +#if !defined(__cpp_variable_templates) || __cpp_variable_templates < 201304L +#define _LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES +#endif -# if _GNUC_VER < 600 -# define _LIBCUDACXX_GCC_MATH_IN_STD -# endif +#if _GNUC_VER < 600 +#define _LIBCUDACXX_GCC_MATH_IN_STD +#endif // NVCC cannot properly handle some deductions occuring within NOEXCEPT // C++17 mode causes reference instatiation errors in tuple -# if (_GNUC_VER >= 702 && _GNUC_VER <= 805) -# if defined(_CCCL_CUDA_COMPILER_NVCC) && _CCCL_STD_VER == 2017 -# define _LIBCUDACXX_NO_TUPLE_NOEXCEPT -# endif -# endif +#if (_GNUC_VER >= 702 && _GNUC_VER <= 805) +#if defined(_CCCL_CUDA_COMPILER_NVCC) && _CCCL_STD_VER == 2017 +#define _LIBCUDACXX_NO_TUPLE_NOEXCEPT +#endif +#endif -# define _LIBCUDACXX_DISABLE_EXTENSION_WARNING __extension__ +#define _LIBCUDACXX_DISABLE_EXTENSION_WARNING __extension__ -# elif defined(_CCCL_COMPILER_MSVC) +#elif defined(_CCCL_COMPILER_MSVC) -# define _LIBCUDACXX_WARNING(x) __pragma(message(__FILE__ "(" _LIBCUDACXX_TOSTRING(__LINE__) ") : warning note: " x)) +#define _LIBCUDACXX_WARNING(x) __pragma(message(__FILE__ "(" _LIBCUDACXX_TOSTRING(__LINE__) ") : warning note: " x)) // https://github.com/microsoft/STL/blob/master/stl/inc/yvals_core.h#L353 // warning C4100: 'quack': unreferenced formal parameter @@ -878,125 +946,143 @@ typedef __char32_t char32_t; // warning C4668: 'meow' is not defined as a preprocessor macro, replacing with '0' for '#if/#elif' // warning C4800: 'boo': forcing value to bool 'true' or 'false' (performance warning) // warning C4996: 'meow': was declared deprecated -# define _LIBCUDACXX_MSVC_DISABLED_WARNINGS 4100 4127 4180 4197 4296 4324 4455 4503 4522 4668 4800 4996 /**/ - -# if _MSC_VER < 1900 -# error "MSVC versions prior to Visual Studio 2015 are not supported" -# endif +#define _LIBCUDACXX_MSVC_DISABLED_WARNINGS \ + 4100 \ + 4127 \ + 4180 \ + 4197 \ + 4296 \ + 4324 \ + 4455 \ + 4503 \ + 4522 \ + 4668 \ + 4800 \ + 4996 \ + /**/ + +#if _MSC_VER < 1900 +#error "MSVC versions prior to Visual Studio 2015 are not supported" +#endif // MSVC implemented P0030R1 in 15.7, only available under C++17 -# if _MSC_VER < 1914 -# define _LIBCUDACXX_NO_HOST_CPP17_HYPOT -# endif +#if _MSC_VER < 1914 +#define _LIBCUDACXX_NO_HOST_CPP17_HYPOT +#endif -# if _MSC_VER < 1920 -# define _LIBCUDACXX_HAS_NO_NOEXCEPT_SFINAE -# define _LIBCUDACXX_HAS_NO_LOGICAL_METAFUNCTION_ALIASES -# endif +#if _MSC_VER < 1920 +#define _LIBCUDACXX_HAS_NO_NOEXCEPT_SFINAE +#define _LIBCUDACXX_HAS_NO_LOGICAL_METAFUNCTION_ALIASES +#endif // MSVC exposed __iso_volatile intrinsics beginning on 1924 for x86 -# if _MSC_VER < 1924 -# define _LIBCUDACXX_MSVC_HAS_NO_ISO_INTRIN -# endif +#if _MSC_VER < 1924 + #define _LIBCUDACXX_MSVC_HAS_NO_ISO_INTRIN +#endif -# if _CCCL_STD_VER < 2014 -# define _LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES -# endif +#if _CCCL_STD_VER < 2014 +#define _LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES +#endif -# define _LIBCUDACXX_WEAK +#define _LIBCUDACXX_WEAK -# define _LIBCUDACXX_HAS_NO_VECTOR_EXTENSION +#define _LIBCUDACXX_HAS_NO_VECTOR_EXTENSION -# define _LIBCUDACXX_DISABLE_EXTENSION_WARNING +#define _LIBCUDACXX_DISABLE_EXTENSION_WARNING -# elif defined(_CCCL_COMPILER_IBM) +#elif defined(_CCCL_COMPILER_IBM) -# define _ATTRIBUTE(x) __attribute__((x)) +#define _ATTRIBUTE(x) __attribute__((x)) -# define _LIBCUDACXX_HAS_NO_UNICODE_CHARS -# define _LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES +#define _LIBCUDACXX_HAS_NO_UNICODE_CHARS +#define _LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES -# if defined(_AIX) -# define __MULTILOCALE_API -# endif +#if defined(_AIX) +#define __MULTILOCALE_API +#endif -# define _LIBCUDACXX_HAS_NO_VECTOR_EXTENSION +#define _LIBCUDACXX_HAS_NO_VECTOR_EXTENSION -# elif defined(_CCCL_COMPILER_NVRTC) || defined(_CCCL_COMPILER_NVHPC) +#elif defined(_CCCL_COMPILER_NVRTC) || defined(_CCCL_COMPILER_NVHPC) -# if !defined(__cpp_variable_templates) || __cpp_variable_templates < 201304L -# define _LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES -# endif +#if !defined(__cpp_variable_templates) || __cpp_variable_templates < 201304L +#define _LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES +#endif -# define _LIBCUDACXX_DISABLE_EXTENSION_WARNING +#define _LIBCUDACXX_DISABLE_EXTENSION_WARNING -# endif // _CCCL_COMPILER_[CLANG|GCC|MSVC|IBM|NVRTC] +#endif // _CCCL_COMPILER_[CLANG|GCC|MSVC|IBM|NVRTC] -# if defined(_CCCL_COMPILER_NVHPC) && !defined(__cuda_std__) +#if defined(_CCCL_COMPILER_NVHPC) && !defined(__cuda_std__) // Forcefully disable visibility controls when used as the standard library with NVC++. // TODO: reevaluate. -# define _LIBCUDACXX_HIDE_FROM_ABI -# ifndef _LIBCUDACXX_DISABLE_EXTERN_TEMPLATE -# define _LIBCUDACXX_DISABLE_EXTERN_TEMPLATE -# endif -# endif +#define _LIBCUDACXX_HIDE_FROM_ABI +#ifndef _LIBCUDACXX_DISABLE_EXTERN_TEMPLATE +#define _LIBCUDACXX_DISABLE_EXTERN_TEMPLATE +#endif +#endif -# ifndef _LIBCUDACXX_FREESTANDING -# if defined(__cuda_std__) || !defined(__STDC_HOSTED__) -# define _LIBCUDACXX_FREESTANDING -# endif -# endif // !_LIBCUDACXX_FREESTANDING +#ifndef _LIBCUDACXX_FREESTANDING +#if defined(__cuda_std__) \ + || !defined(__STDC_HOSTED__) +# define _LIBCUDACXX_FREESTANDING +#endif +#endif // !_LIBCUDACXX_FREESTANDING -# ifndef _LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS -# if defined(_CCCL_COMPILER_NVRTC) || (defined(_CCCL_COMPILER_NVHPC) && !defined(__cuda_std__)) -# define _LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS -# endif -# endif // _LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS +#ifndef _LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS +#if defined(_CCCL_COMPILER_NVRTC) \ + || (defined(_CCCL_COMPILER_NVHPC) && !defined(__cuda_std__)) +# define _LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS +#endif +#endif // _LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS -# ifndef _LIBCUDACXX_HAS_CUDA_ATOMIC_EXT -# if defined(__cuda_std__) -# define _LIBCUDACXX_HAS_CUDA_ATOMIC_EXT -# endif -# endif // _LIBCUDACXX_HAS_CUDA_ATOMIC_EXT +#ifndef _LIBCUDACXX_HAS_CUDA_ATOMIC_EXT +#if defined(__cuda_std__) +# define _LIBCUDACXX_HAS_CUDA_ATOMIC_EXT +#endif +#endif // _LIBCUDACXX_HAS_CUDA_ATOMIC_EXT -# ifndef _LIBCUDACXX_HAS_EXTERNAL_ATOMIC_IMP -# if defined(__cuda_std__) -# define _LIBCUDACXX_HAS_EXTERNAL_ATOMIC_IMP -# endif -# endif // _LIBCUDACXX_HAS_EXTERNAL_ATOMIC_IMP - -# ifndef _LIBCUDACXX_HAS_NO_ASAN -# if defined(_CCCL_COMPILER_GCC) -# if !defined(__SANITIZE_ADDRESS__) -# define _LIBCUDACXX_HAS_NO_ASAN -# endif // !__SANITIZE_ADDRESS__ -# elif defined(_CCCL_COMPILER_CLANG) -# if !__has_feature(address_sanitizer) -# define _LIBCUDACXX_HAS_NO_ASAN -# endif // !__has_feature(address_sanitizer) -# else -# define _LIBCUDACXX_HAS_NO_ASAN -# endif // _CCCL_COMPILER[MSVC|IBM|NVHPC|NVRTC] -# endif // _LIBCUDACXX_HAS_NO_ASAN - -# ifndef _LIBCUDACXX_HAS_NO_CXX20_CHRONO_LITERALS -# if defined(__cuda_std__) || (defined(_CCCL_COMPILER_CLANG) && _LIBCUDACXX_CLANG_VER < 800) -# define _LIBCUDACXX_HAS_NO_CXX20_CHRONO_LITERALS -# endif // __cuda_std__ -# endif // _LIBCUDACXX_HAS_NO_CXX20_CHRONO_LITERALS - -# ifndef _LIBCUDACXX_HAS_NO_INT128 -# if defined(_CCCL_COMPILER_MSVC) || (defined(_CCCL_COMPILER_NVRTC) && !defined(__CUDACC_RTC_INT128__)) \ - || (defined(_CCCL_CUDA_COMPILER_NVCC) && (_CCCL_CUDACC_VER < 1105000)) || !defined(__SIZEOF_INT128__) -# define _LIBCUDACXX_HAS_NO_INT128 -# endif -# endif // !_LIBCUDACXX_HAS_NO_INT128 +#ifndef _LIBCUDACXX_HAS_EXTERNAL_ATOMIC_IMP +#if defined(__cuda_std__) +# define _LIBCUDACXX_HAS_EXTERNAL_ATOMIC_IMP +#endif +#endif // _LIBCUDACXX_HAS_EXTERNAL_ATOMIC_IMP -# ifndef _LIBCUDACXX_HAS_NO_LONG_DOUBLE -# if defined(_CCCL_CUDACC) -# define _LIBCUDACXX_HAS_NO_LONG_DOUBLE -# endif -# endif // _LIBCUDACXX_HAS_NO_LONG_DOUBLE +#ifndef _LIBCUDACXX_HAS_NO_ASAN +#if defined(_CCCL_COMPILER_GCC) +# if !defined(__SANITIZE_ADDRESS__) +# define _LIBCUDACXX_HAS_NO_ASAN +# endif // !__SANITIZE_ADDRESS__ +#elif defined(_CCCL_COMPILER_CLANG) +# if !__has_feature(address_sanitizer) +# define _LIBCUDACXX_HAS_NO_ASAN +# endif // !__has_feature(address_sanitizer) +#else +# define _LIBCUDACXX_HAS_NO_ASAN +#endif // _CCCL_COMPILER[MSVC|IBM|NVHPC|NVRTC] +#endif // _LIBCUDACXX_HAS_NO_ASAN + +#ifndef _LIBCUDACXX_HAS_NO_CXX20_CHRONO_LITERALS +#if defined(__cuda_std__) \ + || (defined(_CCCL_COMPILER_CLANG) && _LIBCUDACXX_CLANG_VER < 800) +# define _LIBCUDACXX_HAS_NO_CXX20_CHRONO_LITERALS +#endif // __cuda_std__ +#endif // _LIBCUDACXX_HAS_NO_CXX20_CHRONO_LITERALS + +#ifndef _LIBCUDACXX_HAS_NO_INT128 +#if defined(_CCCL_COMPILER_MSVC) \ + || (defined(_CCCL_COMPILER_NVRTC) && !defined(__CUDACC_RTC_INT128__)) \ + || (defined(_CCCL_CUDA_COMPILER_NVCC) && (_CCCL_CUDACC_VER < 1105000)) \ + || !defined(__SIZEOF_INT128__) +# define _LIBCUDACXX_HAS_NO_INT128 +#endif +#endif // !_LIBCUDACXX_HAS_NO_INT128 + +#ifndef _LIBCUDACXX_HAS_NO_LONG_DOUBLE +#if defined(_CCCL_CUDACC) +# define _LIBCUDACXX_HAS_NO_LONG_DOUBLE +#endif +#endif // _LIBCUDACXX_HAS_NO_LONG_DOUBLE # ifndef _LIBCUDACXX_HAS_NVFP16 # if __has_include() \ @@ -1017,42 +1103,43 @@ typedef __char32_t char32_t; # endif # endif // !_LIBCUDACXX_HAS_NVBF16 -# ifndef _LIBCUDACXX_HAS_NO_MONOTONIC_CLOCK -# if defined(__cuda_std__) -# define _LIBCUDACXX_HAS_NO_MONOTONIC_CLOCK -# endif -# endif // _LIBCUDACXX_HAS_NO_MONOTONIC_CLOCK +#ifndef _LIBCUDACXX_HAS_NO_MONOTONIC_CLOCK +#if defined(__cuda_std__) +# define _LIBCUDACXX_HAS_NO_MONOTONIC_CLOCK +#endif +#endif // _LIBCUDACXX_HAS_NO_MONOTONIC_CLOCK -# ifndef _LIBCUDACXX_HAS_NO_PLATFORM_WAIT -# if defined(__cuda_std__) -# define _LIBCUDACXX_HAS_NO_PLATFORM_WAIT -# endif -# endif // _LIBCUDACXX_HAS_NO_PLATFORM_WAIT +#ifndef _LIBCUDACXX_HAS_NO_PLATFORM_WAIT +#if defined(__cuda_std__) +# define _LIBCUDACXX_HAS_NO_PLATFORM_WAIT +#endif +#endif // _LIBCUDACXX_HAS_NO_PLATFORM_WAIT -# ifndef _LIBCUDACXX_HAS_NO_PRAGMA_PUSH_POP_MACRO -# if (defined(_CCCL_COMPILER_MSVC) && _MSC_VER < 1920) || defined(_CCCL_COMPILER_NVRTC) \ - || defined(_CCCL_COMPILER_IBM) -# define _LIBCUDACXX_HAS_NO_PRAGMA_PUSH_POP_MACRO -# endif -# endif // _LIBCUDACXX_HAS_NO_PRAGMA_PUSH_POP_MACRO +#ifndef _LIBCUDACXX_HAS_NO_PRAGMA_PUSH_POP_MACRO +#if (defined(_CCCL_COMPILER_MSVC) && _MSC_VER < 1920) \ + || defined(_CCCL_COMPILER_NVRTC) \ + || defined(_CCCL_COMPILER_IBM) +#define _LIBCUDACXX_HAS_NO_PRAGMA_PUSH_POP_MACRO +#endif +#endif // _LIBCUDACXX_HAS_NO_PRAGMA_PUSH_POP_MACRO -# ifndef _LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE -# if defined(__cuda_std__) -# define _LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE -# endif -# endif // _LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE +#ifndef _LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE +#if defined(__cuda_std__) +# define _LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE +#endif +#endif // _LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE -# ifndef _LIBCUDACXX_HAS_NO_TREE_BARRIER -# if defined(__cuda_std__) -# define _LIBCUDACXX_HAS_NO_TREE_BARRIER -# endif -# endif // _LIBCUDACXX_HAS_NO_TREE_BARRIER +#ifndef _LIBCUDACXX_HAS_NO_TREE_BARRIER +#if defined(__cuda_std__) +# define _LIBCUDACXX_HAS_NO_TREE_BARRIER +#endif +#endif // _LIBCUDACXX_HAS_NO_TREE_BARRIER -# ifndef _LIBCUDACXX_HAS_NO_WCHAR_H -# if defined(__cuda_std__) -# define _LIBCUDACXX_HAS_NO_WCHAR_H -# endif -# endif // _LIBCUDACXX_HAS_NO_WCHAR_H +#ifndef _LIBCUDACXX_HAS_NO_WCHAR_H +#if defined(__cuda_std__) +# define _LIBCUDACXX_HAS_NO_WCHAR_H +#endif +#endif // _LIBCUDACXX_HAS_NO_WCHAR_H # ifndef _LIBCUDACXX_NO_EXCEPTIONS # if !defined(LIBCUDACXX_ENABLE_EXCEPTIONS) || (defined(_CCCL_COMPILER_MSVC) && _HAS_EXCEPTIONS == 0) \ @@ -1063,508 +1150,405 @@ typedef __char32_t char32_t; // Try to find out if RTTI is disabled. // g++ and cl.exe have RTTI on by default and define a macro when it is. -# ifndef _LIBCUDACXX_NO_RTTI -# if defined(__cuda_std__) || (defined(_CCCL_COMPILER_CLANG) && !(__has_feature(cxx_rtti))) \ - || (defined(_CCCL_COMPILER_GCC) && !defined(__GXX_RTTI)) || (defined(_CCCL_COMPILER_MSVC) && !defined(_CPPRTTI)) -# define _LIBCUDACXX_NO_RTTI -# endif -# endif // !_LIBCUDACXX_NO_RTTI - -# ifndef _LIBCUDACXX_NODEBUG_TYPE -# if defined(__cuda_std__) -# define _LIBCUDACXX_NODEBUG_TYPE -# elif __has_attribute(__nodebug__) && (defined(_CCCL_COMPILER_CLANG) && _LIBCUDACXX_CLANG_VER >= 1210) -# define _LIBCUDACXX_NODEBUG_TYPE __attribute__((nodebug)) -# else -# define _LIBCUDACXX_NODEBUG_TYPE -# endif -# endif // !_LIBCUDACXX_NODEBUG_TYPE +#ifndef _LIBCUDACXX_NO_RTTI +#if defined(__cuda_std__) \ + || (defined(_CCCL_COMPILER_CLANG) && !(__has_feature(cxx_rtti))) \ + || (defined(_CCCL_COMPILER_GCC) && !defined(__GXX_RTTI)) \ + || (defined(_CCCL_COMPILER_MSVC) && !defined(_CPPRTTI)) +# define _LIBCUDACXX_NO_RTTI +#endif +#endif // !_LIBCUDACXX_NO_RTTI + +#ifndef _LIBCUDACXX_NODEBUG_TYPE +#if defined(__cuda_std__) +# define _LIBCUDACXX_NODEBUG_TYPE +#elif __has_attribute(__nodebug__) \ + && (defined(_CCCL_COMPILER_CLANG) && _LIBCUDACXX_CLANG_VER >= 1210) +# define _LIBCUDACXX_NODEBUG_TYPE __attribute__((nodebug)) +#else +# define _LIBCUDACXX_NODEBUG_TYPE +#endif +#endif // !_LIBCUDACXX_NODEBUG_TYPE -# if defined(_LIBCUDACXX_OBJECT_FORMAT_COFF) +#if defined(_LIBCUDACXX_OBJECT_FORMAT_COFF) -# ifdef _DLL -# define _LIBCUDACXX_CRT_FUNC __declspec(dllimport) -# else -# define _LIBCUDACXX_CRT_FUNC -# endif +#ifdef _DLL +# define _LIBCUDACXX_CRT_FUNC __declspec(dllimport) +#else +# define _LIBCUDACXX_CRT_FUNC +#endif -# if defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) -# define _LIBCUDACXX_DLL_VIS -# define _LIBCUDACXX_EXTERN_TEMPLATE_TYPE_VIS -# define _LIBCUDACXX_CLASS_TEMPLATE_INSTANTIATION_VIS -# define _LIBCUDACXX_OVERRIDABLE_FUNC_VIS -# define _LIBCUDACXX_EXPORTED_FROM_ABI -# elif defined(_LIBCUDACXX_BUILDING_LIBRARY) -# define _LIBCUDACXX_DLL_VIS __declspec(dllexport) -# if defined(__MINGW32__) -# define _LIBCUDACXX_EXTERN_TEMPLATE_TYPE_VIS _LIBCUDACXX_DLL_VIS -# define _LIBCUDACXX_CLASS_TEMPLATE_INSTANTIATION_VIS -# else -# define _LIBCUDACXX_EXTERN_TEMPLATE_TYPE_VIS -# define _LIBCUDACXX_CLASS_TEMPLATE_INSTANTIATION_VIS _LIBCUDACXX_DLL_VIS -# endif -# define _LIBCUDACXX_OVERRIDABLE_FUNC_VIS _LIBCUDACXX_DLL_VIS -# define _LIBCUDACXX_EXPORTED_FROM_ABI __declspec(dllexport) -# else -# define _LIBCUDACXX_DLL_VIS __declspec(dllimport) -# define _LIBCUDACXX_EXTERN_TEMPLATE_TYPE_VIS _LIBCUDACXX_DLL_VIS -# define _LIBCUDACXX_CLASS_TEMPLATE_INSTANTIATION_VIS -# define _LIBCUDACXX_OVERRIDABLE_FUNC_VIS -# define _LIBCUDACXX_EXPORTED_FROM_ABI __declspec(dllimport) -# endif +#if defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) +# define _LIBCUDACXX_DLL_VIS +# define _LIBCUDACXX_EXTERN_TEMPLATE_TYPE_VIS +# define _LIBCUDACXX_CLASS_TEMPLATE_INSTANTIATION_VIS +# define _LIBCUDACXX_OVERRIDABLE_FUNC_VIS +# define _LIBCUDACXX_EXPORTED_FROM_ABI +#elif defined(_LIBCUDACXX_BUILDING_LIBRARY) +# define _LIBCUDACXX_DLL_VIS __declspec(dllexport) +# if defined(__MINGW32__) +# define _LIBCUDACXX_EXTERN_TEMPLATE_TYPE_VIS _LIBCUDACXX_DLL_VIS +# define _LIBCUDACXX_CLASS_TEMPLATE_INSTANTIATION_VIS +# else +# define _LIBCUDACXX_EXTERN_TEMPLATE_TYPE_VIS +# define _LIBCUDACXX_CLASS_TEMPLATE_INSTANTIATION_VIS _LIBCUDACXX_DLL_VIS +# endif +# define _LIBCUDACXX_OVERRIDABLE_FUNC_VIS _LIBCUDACXX_DLL_VIS +# define _LIBCUDACXX_EXPORTED_FROM_ABI __declspec(dllexport) +#else +# define _LIBCUDACXX_DLL_VIS __declspec(dllimport) +# define _LIBCUDACXX_EXTERN_TEMPLATE_TYPE_VIS _LIBCUDACXX_DLL_VIS +# define _LIBCUDACXX_CLASS_TEMPLATE_INSTANTIATION_VIS +# define _LIBCUDACXX_OVERRIDABLE_FUNC_VIS +# define _LIBCUDACXX_EXPORTED_FROM_ABI __declspec(dllimport) +#endif -# define _LIBCUDACXX_TYPE_VIS _LIBCUDACXX_DLL_VIS -# define _LIBCUDACXX_FUNC_VIS _LIBCUDACXX_DLL_VIS -# define _LIBCUDACXX_EXCEPTION_ABI _LIBCUDACXX_DLL_VIS -# define _LIBCUDACXX_HIDDEN -# define _LIBCUDACXX_METHOD_TEMPLATE_IMPLICIT_INSTANTIATION_VIS -# define _LIBCUDACXX_TEMPLATE_VIS -# define _LIBCUDACXX_ENUM_VIS +#define _LIBCUDACXX_TYPE_VIS _LIBCUDACXX_DLL_VIS +#define _LIBCUDACXX_FUNC_VIS _LIBCUDACXX_DLL_VIS +#define _LIBCUDACXX_EXCEPTION_ABI _LIBCUDACXX_DLL_VIS +#define _LIBCUDACXX_HIDDEN +#define _LIBCUDACXX_METHOD_TEMPLATE_IMPLICIT_INSTANTIATION_VIS +#define _LIBCUDACXX_TEMPLATE_VIS +#define _LIBCUDACXX_ENUM_VIS -# endif // defined(_LIBCUDACXX_OBJECT_FORMAT_COFF) +#endif // defined(_LIBCUDACXX_OBJECT_FORMAT_COFF) -# ifndef _LIBCUDACXX_HIDDEN -# if !defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) -# define _LIBCUDACXX_HIDDEN __attribute__((__visibility__("hidden"))) -# else -# define _LIBCUDACXX_HIDDEN -# endif +#ifndef _LIBCUDACXX_HIDDEN +# if !defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) +# define _LIBCUDACXX_HIDDEN __attribute__ ((__visibility__("hidden"))) +# else +# define _LIBCUDACXX_HIDDEN # endif +#endif -# ifndef _LIBCUDACXX_METHOD_TEMPLATE_IMPLICIT_INSTANTIATION_VIS -# if !defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) +#ifndef _LIBCUDACXX_METHOD_TEMPLATE_IMPLICIT_INSTANTIATION_VIS +# if !defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) // The inline should be removed once PR32114 is resolved -# define _LIBCUDACXX_METHOD_TEMPLATE_IMPLICIT_INSTANTIATION_VIS inline _LIBCUDACXX_HIDDEN -# else -# define _LIBCUDACXX_METHOD_TEMPLATE_IMPLICIT_INSTANTIATION_VIS -# endif +# define _LIBCUDACXX_METHOD_TEMPLATE_IMPLICIT_INSTANTIATION_VIS inline _LIBCUDACXX_HIDDEN +# else +# define _LIBCUDACXX_METHOD_TEMPLATE_IMPLICIT_INSTANTIATION_VIS # endif +#endif -# ifndef _LIBCUDACXX_FUNC_VIS -# if !defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) -# define _LIBCUDACXX_FUNC_VIS _CCCL_VISIBILITY_DEFAULT -# else -# define _LIBCUDACXX_FUNC_VIS -# endif +#ifndef _LIBCUDACXX_FUNC_VIS +# if !defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) +# define _LIBCUDACXX_FUNC_VIS _CCCL_VISIBILITY_DEFAULT +# else +# define _LIBCUDACXX_FUNC_VIS # endif +#endif -# ifndef _LIBCUDACXX_TYPE_VIS -# if !defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) -# define _LIBCUDACXX_TYPE_VIS _CCCL_VISIBILITY_DEFAULT -# else -# define _LIBCUDACXX_TYPE_VIS -# endif +#ifndef _LIBCUDACXX_TYPE_VIS +# if !defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) +# define _LIBCUDACXX_TYPE_VIS _CCCL_VISIBILITY_DEFAULT +# else +# define _LIBCUDACXX_TYPE_VIS # endif +#endif -# ifndef _LIBCUDACXX_TEMPLATE_VIS -# if !defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) -# if __has_attribute(__type_visibility__) -# define _LIBCUDACXX_TEMPLATE_VIS _CCCL_TYPE_VISIBILITY_DEFAULT -# else -# define _LIBCUDACXX_TEMPLATE_VIS _CCCL_VISIBILITY_DEFAULT -# endif +#ifndef _LIBCUDACXX_TEMPLATE_VIS +# if !defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) +# if __has_attribute(__type_visibility__) +# define _LIBCUDACXX_TEMPLATE_VIS _CCCL_TYPE_VISIBILITY_DEFAULT # else -# define _LIBCUDACXX_TEMPLATE_VIS +# define _LIBCUDACXX_TEMPLATE_VIS _CCCL_VISIBILITY_DEFAULT # endif +# else +# define _LIBCUDACXX_TEMPLATE_VIS # endif +#endif -# ifndef _LIBCUDACXX_EXPORTED_FROM_ABI -# if !defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) -# define _LIBCUDACXX_EXPORTED_FROM_ABI _CCCL_VISIBILITY_DEFAULT -# else -# define _LIBCUDACXX_EXPORTED_FROM_ABI -# endif +#ifndef _LIBCUDACXX_EXPORTED_FROM_ABI +# if !defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) +# define _LIBCUDACXX_EXPORTED_FROM_ABI _CCCL_VISIBILITY_DEFAULT +# else +# define _LIBCUDACXX_EXPORTED_FROM_ABI # endif +#endif -# ifndef _LIBCUDACXX_OVERRIDABLE_FUNC_VIS -# define _LIBCUDACXX_OVERRIDABLE_FUNC_VIS _LIBCUDACXX_FUNC_VIS -# endif +#ifndef _LIBCUDACXX_OVERRIDABLE_FUNC_VIS +#define _LIBCUDACXX_OVERRIDABLE_FUNC_VIS _LIBCUDACXX_FUNC_VIS +#endif -# ifndef _LIBCUDACXX_EXCEPTION_ABI -# if !defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) -# define _LIBCUDACXX_EXCEPTION_ABI _CCCL_VISIBILITY_DEFAULT -# else -# define _LIBCUDACXX_EXCEPTION_ABI -# endif +#ifndef _LIBCUDACXX_EXCEPTION_ABI +# if !defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) +# define _LIBCUDACXX_EXCEPTION_ABI _CCCL_VISIBILITY_DEFAULT +# else +# define _LIBCUDACXX_EXCEPTION_ABI # endif +#endif -# ifndef _LIBCUDACXX_ENUM_VIS -# if !defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) -# define _LIBCUDACXX_ENUM_VIS _CCCL_TYPE_VISIBILITY_DEFAULT -# else -# define _LIBCUDACXX_ENUM_VIS -# endif +#ifndef _LIBCUDACXX_ENUM_VIS +# if !defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) +# define _LIBCUDACXX_ENUM_VIS _CCCL_TYPE_VISIBILITY_DEFAULT +# else +# define _LIBCUDACXX_ENUM_VIS # endif +#endif -# ifndef _LIBCUDACXX_EXTERN_TEMPLATE_TYPE_VIS -# if !defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) && __has_attribute(__type_visibility__) -# define _LIBCUDACXX_EXTERN_TEMPLATE_TYPE_VIS _CCCL_VISIBILITY_DEFAULT -# else -# define _LIBCUDACXX_EXTERN_TEMPLATE_TYPE_VIS -# endif +#ifndef _LIBCUDACXX_EXTERN_TEMPLATE_TYPE_VIS +# if !defined(_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS) && __has_attribute(__type_visibility__) +# define _LIBCUDACXX_EXTERN_TEMPLATE_TYPE_VIS _CCCL_VISIBILITY_DEFAULT +# else +# define _LIBCUDACXX_EXTERN_TEMPLATE_TYPE_VIS # endif +#endif -# ifndef _LIBCUDACXX_CLASS_TEMPLATE_INSTANTIATION_VIS -# define _LIBCUDACXX_CLASS_TEMPLATE_INSTANTIATION_VIS -# endif +#ifndef _LIBCUDACXX_CLASS_TEMPLATE_INSTANTIATION_VIS +#define _LIBCUDACXX_CLASS_TEMPLATE_INSTANTIATION_VIS +#endif -# if __has_attribute(internal_linkage) -# define _LIBCUDACXX_INTERNAL_LINKAGE __attribute__((internal_linkage)) -# else -# define _LIBCUDACXX_INTERNAL_LINKAGE _LIBCUDACXX_ALWAYS_INLINE -# endif +#if __has_attribute(internal_linkage) +# define _LIBCUDACXX_INTERNAL_LINKAGE __attribute__ ((internal_linkage)) +#else +# define _LIBCUDACXX_INTERNAL_LINKAGE _LIBCUDACXX_ALWAYS_INLINE +#endif -# if __has_attribute(exclude_from_explicit_instantiation) -# define _LIBCUDACXX_EXCLUDE_FROM_EXPLICIT_INSTANTIATION __attribute__((__exclude_from_explicit_instantiation__)) -# else -// Try to approximate the effect of exclude_from_explicit_instantiation -// (which is that entities are not assumed to be provided by explicit -// template instantiations in the dylib) by always inlining those entities. -# define _LIBCUDACXX_EXCLUDE_FROM_EXPLICIT_INSTANTIATION _LIBCUDACXX_ALWAYS_INLINE -# endif +#if __has_attribute(exclude_from_explicit_instantiation) +# define _LIBCUDACXX_EXCLUDE_FROM_EXPLICIT_INSTANTIATION __attribute__ ((__exclude_from_explicit_instantiation__)) +#else + // Try to approximate the effect of exclude_from_explicit_instantiation + // (which is that entities are not assumed to be provided by explicit + // template instantiations in the dylib) by always inlining those entities. +# define _LIBCUDACXX_EXCLUDE_FROM_EXPLICIT_INSTANTIATION _LIBCUDACXX_ALWAYS_INLINE +#endif -# ifndef _LIBCUDACXX_HIDE_FROM_ABI_PER_TU -# ifndef _LIBCUDACXX_HIDE_FROM_ABI_PER_TU_BY_DEFAULT -# define _LIBCUDACXX_HIDE_FROM_ABI_PER_TU 0 -# else -# define _LIBCUDACXX_HIDE_FROM_ABI_PER_TU 1 -# endif +#ifndef _LIBCUDACXX_HIDE_FROM_ABI_PER_TU +# ifndef _LIBCUDACXX_HIDE_FROM_ABI_PER_TU_BY_DEFAULT +# define _LIBCUDACXX_HIDE_FROM_ABI_PER_TU 0 +# else +# define _LIBCUDACXX_HIDE_FROM_ABI_PER_TU 1 # endif +#endif -# ifndef _LIBCUDACXX_HAS_MERGED_TYPEINFO_NAMES_DEFAULT -# ifdef _LIBCUDACXX_OBJECT_FORMAT_COFF // Windows binaries can't merge typeinfos. -# define _LIBCUDACXX_HAS_MERGED_TYPEINFO_NAMES_DEFAULT 0 -# else +#ifndef _LIBCUDACXX_HAS_MERGED_TYPEINFO_NAMES_DEFAULT +# ifdef _LIBCUDACXX_OBJECT_FORMAT_COFF // Windows binaries can't merge typeinfos. +# define _LIBCUDACXX_HAS_MERGED_TYPEINFO_NAMES_DEFAULT 0 +#else // TODO: This isn't strictly correct on ELF platforms due to llvm.org/PR37398 // And we should consider defaulting to OFF. -# define _LIBCUDACXX_HAS_MERGED_TYPEINFO_NAMES_DEFAULT 1 -# endif -# endif +# define _LIBCUDACXX_HAS_MERGED_TYPEINFO_NAMES_DEFAULT 1 +#endif +#endif -# ifndef _LIBCUDACXX_HIDE_FROM_ABI -# if _LIBCUDACXX_HIDE_FROM_ABI_PER_TU -# define _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_HIDDEN _LIBCUDACXX_INTERNAL_LINKAGE -# else -# define _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_HIDDEN _LIBCUDACXX_EXCLUDE_FROM_EXPLICIT_INSTANTIATION -# endif +#ifndef _LIBCUDACXX_HIDE_FROM_ABI +# if _LIBCUDACXX_HIDE_FROM_ABI_PER_TU +# define _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_HIDDEN _LIBCUDACXX_INTERNAL_LINKAGE +# else +# define _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_HIDDEN _LIBCUDACXX_EXCLUDE_FROM_EXPLICIT_INSTANTIATION # endif +#endif -# ifdef _LIBCUDACXX_BUILDING_LIBRARY -# if _LIBCUDACXX_ABI_VERSION > 1 -# define _LIBCUDACXX_HIDE_FROM_ABI_AFTER_V1 _LIBCUDACXX_HIDE_FROM_ABI -# else -# define _LIBCUDACXX_HIDE_FROM_ABI_AFTER_V1 -# endif -# else +#ifdef _LIBCUDACXX_BUILDING_LIBRARY +# if _LIBCUDACXX_ABI_VERSION > 1 # define _LIBCUDACXX_HIDE_FROM_ABI_AFTER_V1 _LIBCUDACXX_HIDE_FROM_ABI +# else +# define _LIBCUDACXX_HIDE_FROM_ABI_AFTER_V1 # endif +#else +# define _LIBCUDACXX_HIDE_FROM_ABI_AFTER_V1 _LIBCUDACXX_HIDE_FROM_ABI +#endif // Just so we can migrate to the new macros gradually. -# ifdef __cuda_std__ -# define _LIBCUDACXX_INLINE_VISIBILITY _CCCL_HOST_DEVICE -# else -# define _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_HIDE_FROM_ABI -# endif // __cuda_std__ +#ifdef __cuda_std__ +# define _LIBCUDACXX_INLINE_VISIBILITY _CCCL_HOST_DEVICE +#else +# define _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_HIDE_FROM_ABI +#endif // __cuda_std__ + +#define _LIBCUDACXX_CONCAT1(_LIBCUDACXX_X,_LIBCUDACXX_Y) _LIBCUDACXX_X##_LIBCUDACXX_Y +#define _LIBCUDACXX_CONCAT(_LIBCUDACXX_X,_LIBCUDACXX_Y) _LIBCUDACXX_CONCAT1(_LIBCUDACXX_X,_LIBCUDACXX_Y) + +#ifndef _LIBCUDACXX_ABI_NAMESPACE +#ifdef __cuda_std__ +# define _LIBCUDACXX_ABI_NAMESPACE _LIBCUDACXX_CONCAT(__,_LIBCUDACXX_CUDA_ABI_VERSION) +#else +# define _LIBCUDACXX_ABI_NAMESPACE _LIBCUDACXX_CONCAT(__,_LIBCUDACXX_ABI_VERSION) +#endif // __cuda_std__ +#endif // _LIBCUDACXX_ABI_NAMESPACE + +#ifdef __cuda_std__ +# define _LIBCUDACXX_BEGIN_NAMESPACE_STD_NOVERSION namespace cuda { namespace std { +# define _LIBCUDACXX_END_NAMESPACE_STD_NOVERSION } } +# define _CUDA_VSTD_NOVERSION ::cuda::std +# define _CUDA_VSTD ::cuda::std::_LIBCUDACXX_ABI_NAMESPACE +# define _CUDA_VRANGES ::cuda::std::ranges::_LIBCUDACXX_ABI_NAMESPACE +# define _CUDA_VIEWS ::cuda::std::ranges::views::_LIBCUDACXX_CUDA_ABI_NAMESPACE +# define _CUDA_VMR ::cuda::mr::_LIBCUDACXX_ABI_NAMESPACE +# define _CUDA_VPTX ::cuda::ptx::_LIBCUDACXX_ABI_NAMESPACE +#else +# define _LIBCUDACXX_BEGIN_NAMESPACE_STD_NOVERSION namespace std { +# define _LIBCUDACXX_END_NAMESPACE_STD_NOVERSION } +# define _CUDA_VSTD_NOVERSION ::std +# define _CUDA_VSTD ::std::_LIBCUDACXX_ABI_NAMESPACE +# define _CUDA_VRANGES ::std::ranges::_LIBCUDACXX_ABI_NAMESPACE +# define _CUDA_VIEWS ::std::ranges::views::_LIBCUDACXX_CUDA_ABI_NAMESPACE +#endif -# define _LIBCUDACXX_CONCAT1(_LIBCUDACXX_X, _LIBCUDACXX_Y) _LIBCUDACXX_X##_LIBCUDACXX_Y -# define _LIBCUDACXX_CONCAT(_LIBCUDACXX_X, _LIBCUDACXX_Y) _LIBCUDACXX_CONCAT1(_LIBCUDACXX_X, _LIBCUDACXX_Y) +#ifdef __cuda_std__ +#define _LIBCUDACXX_BEGIN_NAMESPACE_CUDA namespace cuda { inline namespace _LIBCUDACXX_ABI_NAMESPACE { +#define _LIBCUDACXX_END_NAMESPACE_CUDA } } +#define _LIBCUDACXX_BEGIN_NAMESPACE_CUDA_MR namespace cuda { namespace mr { inline namespace _LIBCUDACXX_ABI_NAMESPACE { +#define _LIBCUDACXX_END_NAMESPACE_CUDA_MR } } } +#define _LIBCUDACXX_BEGIN_NAMESPACE_CUDA_DEVICE namespace cuda { namespace device { inline namespace _LIBCUDACXX_ABI_NAMESPACE { +#define _LIBCUDACXX_END_NAMESPACE_CUDA_DEVICE } } } +#define _LIBCUDACXX_BEGIN_NAMESPACE_CUDA_PTX namespace cuda { namespace ptx { inline namespace _LIBCUDACXX_ABI_NAMESPACE { +#define _LIBCUDACXX_END_NAMESPACE_CUDA_PTX } } } +#define _LIBCUDACXX_BEGIN_NAMESPACE_CUDA_DEVICE_EXPERIMENTAL namespace cuda { namespace device { namespace experimental { inline namespace _LIBCUDACXX_ABI_NAMESPACE { +#define _LIBCUDACXX_END_NAMESPACE_CUDA_DEVICE_EXPERIMENTAL } } } } +#endif -# ifndef _LIBCUDACXX_ABI_NAMESPACE -# ifdef __cuda_std__ -# define _LIBCUDACXX_ABI_NAMESPACE _LIBCUDACXX_CONCAT(__, _LIBCUDACXX_CUDA_ABI_VERSION) -# else -# define _LIBCUDACXX_ABI_NAMESPACE _LIBCUDACXX_CONCAT(__, _LIBCUDACXX_ABI_VERSION) -# endif // __cuda_std__ -# endif // _LIBCUDACXX_ABI_NAMESPACE +// Inline namespaces are available in Clang/GCC/MSVC regardless of C++ dialect. +#define _LIBCUDACXX_BEGIN_NAMESPACE_STD _LIBCUDACXX_BEGIN_NAMESPACE_STD_NOVERSION inline namespace _LIBCUDACXX_ABI_NAMESPACE { +#define _LIBCUDACXX_END_NAMESPACE_STD } _LIBCUDACXX_END_NAMESPACE_STD_NOVERSION -# ifdef __cuda_std__ -# define _LIBCUDACXX_BEGIN_NAMESPACE_STD_NOVERSION \ - namespace cuda \ - { \ - namespace std \ - { -# define _LIBCUDACXX_END_NAMESPACE_STD_NOVERSION \ - } \ - } -# define _CUDA_VSTD_NOVERSION ::cuda::std -# define _CUDA_VSTD ::cuda::std::_LIBCUDACXX_ABI_NAMESPACE -# define _CUDA_VRANGES ::cuda::std::ranges::_LIBCUDACXX_ABI_NAMESPACE -# define _CUDA_VIEWS ::cuda::std::ranges::views::_LIBCUDACXX_CUDA_ABI_NAMESPACE -# define _CUDA_VMR ::cuda::mr::_LIBCUDACXX_ABI_NAMESPACE -# define _CUDA_VPTX ::cuda::ptx::_LIBCUDACXX_ABI_NAMESPACE -# else -# define _LIBCUDACXX_BEGIN_NAMESPACE_STD_NOVERSION \ - namespace std \ - { -# define _LIBCUDACXX_END_NAMESPACE_STD_NOVERSION } -# define _CUDA_VSTD_NOVERSION ::std -# define _CUDA_VSTD ::std::_LIBCUDACXX_ABI_NAMESPACE -# define _CUDA_VRANGES ::std::ranges::_LIBCUDACXX_ABI_NAMESPACE -# define _CUDA_VIEWS ::std::ranges::views::_LIBCUDACXX_CUDA_ABI_NAMESPACE -# endif +#ifndef __cuda_std__ +_LIBCUDACXX_BEGIN_NAMESPACE_STD _LIBCUDACXX_END_NAMESPACE_STD +#endif -# ifdef __cuda_std__ -# define _LIBCUDACXX_BEGIN_NAMESPACE_CUDA \ - namespace cuda \ - { \ - inline namespace _LIBCUDACXX_ABI_NAMESPACE \ - { -# define _LIBCUDACXX_END_NAMESPACE_CUDA \ - } \ - } -# define _LIBCUDACXX_BEGIN_NAMESPACE_CUDA_MR \ - namespace cuda \ - { \ - namespace mr \ - { \ - inline namespace _LIBCUDACXX_ABI_NAMESPACE \ - { -# define _LIBCUDACXX_END_NAMESPACE_CUDA_MR \ - } \ - } \ - } -# define _LIBCUDACXX_BEGIN_NAMESPACE_CUDA_DEVICE \ - namespace cuda \ - { \ - namespace device \ - { \ - inline namespace _LIBCUDACXX_ABI_NAMESPACE \ - { -# define _LIBCUDACXX_END_NAMESPACE_CUDA_DEVICE \ - } \ - } \ - } -# define _LIBCUDACXX_BEGIN_NAMESPACE_CUDA_PTX \ - namespace cuda \ - { \ - namespace ptx \ - { \ - inline namespace _LIBCUDACXX_ABI_NAMESPACE \ - { -# define _LIBCUDACXX_END_NAMESPACE_CUDA_PTX \ - } \ - } \ - } -# define _LIBCUDACXX_BEGIN_NAMESPACE_CUDA_DEVICE_EXPERIMENTAL \ - namespace cuda \ - { \ - namespace device \ - { \ - namespace experimental \ - { \ - inline namespace _LIBCUDACXX_ABI_NAMESPACE \ - { -# define _LIBCUDACXX_END_NAMESPACE_CUDA_DEVICE_EXPERIMENTAL \ - } \ - } \ - } \ - } -# endif +#define _LIBCUDACXX_BEGIN_NAMESPACE_RANGES _LIBCUDACXX_BEGIN_NAMESPACE_STD_NOVERSION namespace ranges { inline namespace _LIBCUDACXX_ABI_NAMESPACE { +#define _LIBCUDACXX_END_NAMESPACE_RANGES } } _LIBCUDACXX_END_NAMESPACE_STD_NOVERSION -// Inline namespaces are available in Clang/GCC/MSVC regardless of C++ dialect. -# define _LIBCUDACXX_BEGIN_NAMESPACE_STD \ - _LIBCUDACXX_BEGIN_NAMESPACE_STD_NOVERSION \ - inline namespace _LIBCUDACXX_ABI_NAMESPACE \ - { -# define _LIBCUDACXX_END_NAMESPACE_STD \ - } \ - _LIBCUDACXX_END_NAMESPACE_STD_NOVERSION - -# ifndef __cuda_std__ -_LIBCUDACXX_BEGIN_NAMESPACE_STD -_LIBCUDACXX_END_NAMESPACE_STD -# endif +#if !defined(__cuda_std__) +_LIBCUDACXX_BEGIN_NAMESPACE_RANGES _LIBCUDACXX_END_NAMESPACE_RANGES +#endif -# define _LIBCUDACXX_BEGIN_NAMESPACE_RANGES \ - _LIBCUDACXX_BEGIN_NAMESPACE_STD_NOVERSION \ - namespace ranges \ - { \ - inline namespace _LIBCUDACXX_ABI_NAMESPACE \ - { -# define _LIBCUDACXX_END_NAMESPACE_RANGES \ - } \ - } \ - _LIBCUDACXX_END_NAMESPACE_STD_NOVERSION - -# if !defined(__cuda_std__) -_LIBCUDACXX_BEGIN_NAMESPACE_RANGES -_LIBCUDACXX_END_NAMESPACE_RANGES -# endif +#define _LIBCUDACXX_BEGIN_NAMESPACE_VIEWS _LIBCUDACXX_BEGIN_NAMESPACE_STD_NOVERSION namespace ranges { namespace views { inline namespace _LIBCUDACXX_CUDA_ABI_NAMESPACE { +#define _LIBCUDACXX_END_NAMESPACE_VIEWS } } } _LIBCUDACXX_END_NAMESPACE_STD_NOVERSION +#if !defined(__cuda_std__) +_LIBCUDACXX_BEGIN_NAMESPACE_VIEWS _LIBCUDACXX_END_NAMESPACE_VIEWS +#endif -# define _LIBCUDACXX_BEGIN_NAMESPACE_VIEWS \ - _LIBCUDACXX_BEGIN_NAMESPACE_STD_NOVERSION \ - namespace ranges \ - { \ - namespace views \ - { \ - inline namespace _LIBCUDACXX_CUDA_ABI_NAMESPACE \ - { -# define _LIBCUDACXX_END_NAMESPACE_VIEWS \ - } \ - } \ - } \ - _LIBCUDACXX_END_NAMESPACE_STD_NOVERSION -# if !defined(__cuda_std__) -_LIBCUDACXX_BEGIN_NAMESPACE_VIEWS -_LIBCUDACXX_END_NAMESPACE_VIEWS -# endif +#if _CCCL_STD_VER > 2017 +#define _LIBCUDACXX_BEGIN_NAMESPACE_RANGES_ABI inline namespace __cxx20 { +#else +#define _LIBCUDACXX_BEGIN_NAMESPACE_RANGES_ABI inline namespace __cxx17 { +#endif +#define _LIBCUDACXX_END_NAMESPACE_RANGES_ABI } -# if _CCCL_STD_VER > 2017 -# define _LIBCUDACXX_BEGIN_NAMESPACE_RANGES_ABI \ - inline namespace __cxx20 \ - { -# else -# define _LIBCUDACXX_BEGIN_NAMESPACE_RANGES_ABI \ - inline namespace __cxx17 \ - { -# endif -# define _LIBCUDACXX_END_NAMESPACE_RANGES_ABI } - -# define _LIBCUDACXX_BEGIN_NAMESPACE_CPO(_CPO) \ - namespace _CPO \ - { \ - _LIBCUDACXX_BEGIN_NAMESPACE_RANGES_ABI -# define _LIBCUDACXX_END_NAMESPACE_CPO \ - } \ - } - -# if _CCCL_STD_VER >= 2017 -# define _LIBCUDACXX_BEGIN_NAMESPACE_FILESYSTEM \ - _LIBCUDACXX_BEGIN_NAMESPACE_STD \ - inline namespace __fs \ - { \ - namespace filesystem \ - { -# else -# define _LIBCUDACXX_BEGIN_NAMESPACE_FILESYSTEM \ - _LIBCUDACXX_BEGIN_NAMESPACE_STD \ - namespace __fs \ - { \ - namespace filesystem \ - { -# endif +#define _LIBCUDACXX_BEGIN_NAMESPACE_CPO(_CPO) namespace _CPO { _LIBCUDACXX_BEGIN_NAMESPACE_RANGES_ABI +#define _LIBCUDACXX_END_NAMESPACE_CPO } } -# define _LIBCUDACXX_END_NAMESPACE_FILESYSTEM \ - _LIBCUDACXX_END_NAMESPACE_STD \ - } \ - } +#if _CCCL_STD_VER >= 2017 +#define _LIBCUDACXX_BEGIN_NAMESPACE_FILESYSTEM \ + _LIBCUDACXX_BEGIN_NAMESPACE_STD inline namespace __fs { namespace filesystem { +#else +#define _LIBCUDACXX_BEGIN_NAMESPACE_FILESYSTEM \ + _LIBCUDACXX_BEGIN_NAMESPACE_STD namespace __fs { namespace filesystem { +#endif -# define _CUDA_VSTD_FS _CUDA_VSTD::__fs::filesystem +#define _LIBCUDACXX_END_NAMESPACE_FILESYSTEM \ + _LIBCUDACXX_END_NAMESPACE_STD } } -# ifndef _LIBCUDACXX_PREFERRED_OVERLOAD -# if __has_attribute(__enable_if__) -# define _LIBCUDACXX_PREFERRED_OVERLOAD __attribute__((__enable_if__(true, ""))) -# endif +#define _CUDA_VSTD_FS _CUDA_VSTD::__fs::filesystem + +#ifndef _LIBCUDACXX_PREFERRED_OVERLOAD +# if __has_attribute(__enable_if__) +# define _LIBCUDACXX_PREFERRED_OVERLOAD __attribute__ ((__enable_if__(true, ""))) # endif +#endif -# ifdef _LIBCUDACXX_HAS_NO_UNICODE_CHARS +#ifdef _LIBCUDACXX_HAS_NO_UNICODE_CHARS typedef unsigned short char16_t; -typedef unsigned int char32_t; -# endif // _LIBCUDACXX_HAS_NO_UNICODE_CHARS - -# if defined(_CCCL_COMPILER_GCC) || defined(_CCCL_COMPILER_CLANG) -# define _LIBCUDACXX_NOALIAS __attribute__((__malloc__)) -# else -# define _LIBCUDACXX_NOALIAS -# endif +typedef unsigned int char32_t; +#endif // _LIBCUDACXX_HAS_NO_UNICODE_CHARS + +#if defined(_CCCL_COMPILER_GCC) \ + || defined(_CCCL_COMPILER_CLANG) +# define _LIBCUDACXX_NOALIAS __attribute__((__malloc__)) +#else +# define _LIBCUDACXX_NOALIAS +#endif -# if __has_feature(cxx_explicit_conversions) || defined(_CCCL_COMPILER_IBM) || defined(_CCCL_COMPILER_GCC) \ - || defined(_CCCL_COMPILER_CLANG) -# define _LIBCUDACXX_EXPLICIT explicit -# else -# define _LIBCUDACXX_EXPLICIT -# endif +#if __has_feature(cxx_explicit_conversions) \ + || defined(_CCCL_COMPILER_IBM) \ + || defined(_CCCL_COMPILER_GCC) \ + || defined(_CCCL_COMPILER_CLANG) +# define _LIBCUDACXX_EXPLICIT explicit +#else +# define _LIBCUDACXX_EXPLICIT +#endif -# if !__has_builtin(__builtin_operator_new) || !__has_builtin(__builtin_operator_delete) -# define _LIBCUDACXX_HAS_NO_BUILTIN_OPERATOR_NEW_DELETE -# endif +#if !__has_builtin(__builtin_operator_new) || !__has_builtin(__builtin_operator_delete) +#define _LIBCUDACXX_HAS_NO_BUILTIN_OPERATOR_NEW_DELETE +#endif -# ifdef _LIBCUDACXX_HAS_NO_STRONG_ENUMS -# define _LIBCUDACXX_DECLARE_STRONG_ENUM(x) \ - struct _LIBCUDACXX_TYPE_VIS x \ - { \ - enum __lx -# define _LIBCUDACXX_DECLARE_STRONG_ENUM_EPILOG(x) \ - __lx __v_; \ - _LIBCUDACXX_INLINE_VISIBILITY x(__lx __v) \ - : __v_(__v) \ - {} \ - _LIBCUDACXX_INLINE_VISIBILITY explicit x(int __v) \ - : __v_(static_cast<__lx>(__v)) \ - {} \ - _LIBCUDACXX_INLINE_VISIBILITY operator int() const \ - { \ - return __v_; \ - } \ - } \ - ; -# else // _LIBCUDACXX_HAS_NO_STRONG_ENUMS -# define _LIBCUDACXX_DECLARE_STRONG_ENUM(x) enum class _LIBCUDACXX_ENUM_VIS x -# define _LIBCUDACXX_DECLARE_STRONG_ENUM_EPILOG(x) -# endif // _LIBCUDACXX_HAS_NO_STRONG_ENUMS - -# ifdef _LIBCUDACXX_DEBUG -# if _LIBCUDACXX_DEBUG == 0 -# define _LIBCUDACXX_DEBUG_LEVEL 1 -# elif _LIBCUDACXX_DEBUG == 1 -# define _LIBCUDACXX_DEBUG_LEVEL 2 -# else -# error Supported values for _LIBCUDACXX_DEBUG are 0 and 1 -# endif -# if !defined(_LIBCUDACXX_BUILDING_LIBRARY) -# define _LIBCUDACXX_EXTERN_TEMPLATE(...) -# endif +#ifdef _LIBCUDACXX_HAS_NO_STRONG_ENUMS +# define _LIBCUDACXX_DECLARE_STRONG_ENUM(x) struct _LIBCUDACXX_TYPE_VIS x { enum __lx +# define _LIBCUDACXX_DECLARE_STRONG_ENUM_EPILOG(x) \ + __lx __v_; \ + _LIBCUDACXX_INLINE_VISIBILITY x(__lx __v) : __v_(__v) {} \ + _LIBCUDACXX_INLINE_VISIBILITY explicit x(int __v) : __v_(static_cast<__lx>(__v)) {} \ + _LIBCUDACXX_INLINE_VISIBILITY operator int() const {return __v_;} \ + }; +#else // _LIBCUDACXX_HAS_NO_STRONG_ENUMS +# define _LIBCUDACXX_DECLARE_STRONG_ENUM(x) enum class _LIBCUDACXX_ENUM_VIS x +# define _LIBCUDACXX_DECLARE_STRONG_ENUM_EPILOG(x) +#endif // _LIBCUDACXX_HAS_NO_STRONG_ENUMS + +#ifdef _LIBCUDACXX_DEBUG +# if _LIBCUDACXX_DEBUG == 0 +# define _LIBCUDACXX_DEBUG_LEVEL 1 +# elif _LIBCUDACXX_DEBUG == 1 +# define _LIBCUDACXX_DEBUG_LEVEL 2 +# else +# error Supported values for _LIBCUDACXX_DEBUG are 0 and 1 # endif - -# ifdef _LIBCUDACXX_DISABLE_EXTERN_TEMPLATE +# if !defined(_LIBCUDACXX_BUILDING_LIBRARY) # define _LIBCUDACXX_EXTERN_TEMPLATE(...) -# define _LIBCUDACXX_EXTERN_TEMPLATE2(...) # endif +#endif -# ifndef _LIBCUDACXX_EXTERN_TEMPLATE -# define _LIBCUDACXX_EXTERN_TEMPLATE(...) extern template __VA_ARGS__; -# endif +#ifdef _LIBCUDACXX_DISABLE_EXTERN_TEMPLATE +#define _LIBCUDACXX_EXTERN_TEMPLATE(...) +#define _LIBCUDACXX_EXTERN_TEMPLATE2(...) +#endif -# ifndef _LIBCUDACXX_EXTERN_TEMPLATE2 -# define _LIBCUDACXX_EXTERN_TEMPLATE2(...) extern template __VA_ARGS__; -# endif +#ifndef _LIBCUDACXX_EXTERN_TEMPLATE +#define _LIBCUDACXX_EXTERN_TEMPLATE(...) extern template __VA_ARGS__; +#endif -# if defined(__APPLE__) || defined(__FreeBSD__) || defined(_LIBCUDACXX_MSVCRT_LIKE) || defined(__sun__) \ - || defined(__NetBSD__) || defined(__CloudABI__) -# define _LIBCUDACXX_LOCALE__L_EXTENSIONS 1 -# endif +#ifndef _LIBCUDACXX_EXTERN_TEMPLATE2 +#define _LIBCUDACXX_EXTERN_TEMPLATE2(...) extern template __VA_ARGS__; +#endif + +#if defined(__APPLE__) || defined(__FreeBSD__) || defined(_LIBCUDACXX_MSVCRT_LIKE) || \ + defined(__sun__) || defined(__NetBSD__) || defined(__CloudABI__) +#define _LIBCUDACXX_LOCALE__L_EXTENSIONS 1 +#endif -# if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) +#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) // Most unix variants have catopen. These are the specific ones that don't. -# if !defined(__BIONIC__) && !defined(_NEWLIB_VERSION) -# define _LIBCUDACXX_HAS_CATOPEN 1 -# endif +# if !defined(__BIONIC__) && !defined(_NEWLIB_VERSION) +# define _LIBCUDACXX_HAS_CATOPEN 1 # endif +#endif -# ifdef __FreeBSD__ -# define _DECLARE_C99_LDBL_MATH 1 -# endif +#ifdef __FreeBSD__ +#define _DECLARE_C99_LDBL_MATH 1 +#endif -# if defined(_LIBCUDACXX_ABI_MICROSOFT) && !defined(_LIBCUDACXX_NO_VCRUNTIME) -# define _LIBCUDACXX_DEFER_NEW_TO_VCRUNTIME -# endif +#if defined(_LIBCUDACXX_ABI_MICROSOFT) && !defined(_LIBCUDACXX_NO_VCRUNTIME) +# define _LIBCUDACXX_DEFER_NEW_TO_VCRUNTIME +#endif // If we are getting operator new from the MSVC CRT, then allocation overloads // for align_val_t were added in 19.12, aka VS 2017 version 15.3. -# if defined(_LIBCUDACXX_MSVCRT) && defined(_CCCL_COMPILER_MSVC) && _MSC_VER < 1912 -# define _LIBCUDACXX_HAS_NO_LIBRARY_ALIGNED_ALLOCATION -# elif defined(_LIBCUDACXX_ABI_VCRUNTIME) && !defined(__cpp_aligned_new) -// We're deferring to Microsoft's STL to provide aligned new et al. We don't -// have it unless the language feature test macro is defined. -# define _LIBCUDACXX_HAS_NO_LIBRARY_ALIGNED_ALLOCATION -# endif +#if defined(_LIBCUDACXX_MSVCRT) && defined(_CCCL_COMPILER_MSVC) && _MSC_VER < 1912 +# define _LIBCUDACXX_HAS_NO_LIBRARY_ALIGNED_ALLOCATION +#elif defined(_LIBCUDACXX_ABI_VCRUNTIME) && !defined(__cpp_aligned_new) + // We're deferring to Microsoft's STL to provide aligned new et al. We don't + // have it unless the language feature test macro is defined. +# define _LIBCUDACXX_HAS_NO_LIBRARY_ALIGNED_ALLOCATION +#endif -# if defined(__APPLE__) -# if !defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) -# define __MAC_OS_X_VERSION_MIN_REQUIRED __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ -# endif -# endif // defined(__APPLE__) +#if defined(__APPLE__) +# if !defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && \ + defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) +# define __MAC_OS_X_VERSION_MIN_REQUIRED __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ +# endif +#endif // defined(__APPLE__) # if !defined(_LIBCUDACXX_HAS_NO_ALIGNED_ALLOCATION) \ && (defined(_LIBCUDACXX_HAS_NO_LIBRARY_ALIGNED_ALLOCATION) \ @@ -1590,138 +1574,151 @@ typedef unsigned int char32_t; // Deprecations warnings are always enabled, except when users explicitly opt-out // by defining _LIBCUDACXX_DISABLE_DEPRECATION_WARNINGS. // NVCC 11.1 and 11.2 are broken with the deprecated attribute, so disable it -# if !defined(_LIBCUDACXX_DISABLE_DEPRECATION_WARNINGS) && !defined(_CCCL_CUDACC_BELOW_11_3) -# if __has_attribute(deprecated) -# define _LIBCUDACXX_DEPRECATED __attribute__((deprecated)) -# elif _CCCL_STD_VER > 2011 -# define _LIBCUDACXX_DEPRECATED [[deprecated]] -# else -# define _LIBCUDACXX_DEPRECATED -# endif +#if !defined(_LIBCUDACXX_DISABLE_DEPRECATION_WARNINGS) \ + && !defined(_CCCL_CUDACC_BELOW_11_3) +# if __has_attribute(deprecated) +# define _LIBCUDACXX_DEPRECATED __attribute__ ((deprecated)) +# elif _CCCL_STD_VER > 2011 +# define _LIBCUDACXX_DEPRECATED [[deprecated]] # else # define _LIBCUDACXX_DEPRECATED # endif +#else +# define _LIBCUDACXX_DEPRECATED +#endif -# define _LIBCUDACXX_DEPRECATED_IN_CXX11 _LIBCUDACXX_DEPRECATED +#define _LIBCUDACXX_DEPRECATED_IN_CXX11 _LIBCUDACXX_DEPRECATED -# if _CCCL_STD_VER >= 2014 -# define _LIBCUDACXX_DEPRECATED_IN_CXX14 _LIBCUDACXX_DEPRECATED -# else -# define _LIBCUDACXX_DEPRECATED_IN_CXX14 -# endif +#if _CCCL_STD_VER >= 2014 +# define _LIBCUDACXX_DEPRECATED_IN_CXX14 _LIBCUDACXX_DEPRECATED +#else +# define _LIBCUDACXX_DEPRECATED_IN_CXX14 +#endif -# if _CCCL_STD_VER >= 2017 -# define _LIBCUDACXX_DEPRECATED_IN_CXX17 _LIBCUDACXX_DEPRECATED -# else -# define _LIBCUDACXX_DEPRECATED_IN_CXX17 -# endif +#if _CCCL_STD_VER >= 2017 +# define _LIBCUDACXX_DEPRECATED_IN_CXX17 _LIBCUDACXX_DEPRECATED +#else +# define _LIBCUDACXX_DEPRECATED_IN_CXX17 +#endif -# if _CCCL_STD_VER >= 2020 -# define _LIBCUDACXX_DEPRECATED_IN_CXX20 _LIBCUDACXX_DEPRECATED -# else -# define _LIBCUDACXX_DEPRECATED_IN_CXX20 -# endif +#if _CCCL_STD_VER >= 2020 +# define _LIBCUDACXX_DEPRECATED_IN_CXX20 _LIBCUDACXX_DEPRECATED +#else +# define _LIBCUDACXX_DEPRECATED_IN_CXX20 +#endif -# if _CCCL_STD_VER <= 2011 -# define _LIBCUDACXX_EXPLICIT_AFTER_CXX11 -# else -# define _LIBCUDACXX_EXPLICIT_AFTER_CXX11 explicit -# endif +#if _CCCL_STD_VER <= 2011 +# define _LIBCUDACXX_EXPLICIT_AFTER_CXX11 +#else +# define _LIBCUDACXX_EXPLICIT_AFTER_CXX11 explicit +#endif -# if _CCCL_STD_VER > 2014 && defined(__cpp_inline_variables) && (__cpp_inline_variables >= 201606L) -# define _LIBCUDACXX_INLINE_VAR inline -# else -# define _LIBCUDACXX_INLINE_VAR -# endif +#if _CCCL_STD_VER > 2014 && defined(__cpp_inline_variables) && (__cpp_inline_variables >= 201606L) +# define _LIBCUDACXX_INLINE_VAR inline +#else +# define _LIBCUDACXX_INLINE_VAR +#endif -# ifdef _LIBCUDACXX_HAS_NO_RVALUE_REFERENCES -# define _LIBCUDACXX_EXPLICIT_MOVE(x) _CUDA_VSTD::move(x) -# else -# define _LIBCUDACXX_EXPLICIT_MOVE(x) (x) -# endif +#ifdef _LIBCUDACXX_HAS_NO_RVALUE_REFERENCES +# define _LIBCUDACXX_EXPLICIT_MOVE(x) _CUDA_VSTD::move(x) +#else +# define _LIBCUDACXX_EXPLICIT_MOVE(x) (x) +#endif -# if __has_attribute(no_destroy) -# define _LIBCUDACXX_NO_DESTROY __attribute__((__no_destroy__)) -# else -# define _LIBCUDACXX_NO_DESTROY -# endif +#if __has_attribute(no_destroy) +# define _LIBCUDACXX_NO_DESTROY __attribute__((__no_destroy__)) +#else +# define _LIBCUDACXX_NO_DESTROY +#endif -# ifndef _LIBCUDACXX_HAS_NO_ASAN -extern "C" _LIBCUDACXX_FUNC_VIS void -__sanitizer_annotate_contiguous_container(const void*, const void*, const void*, const void*); -# endif +#ifndef _LIBCUDACXX_HAS_NO_ASAN +extern "C" _LIBCUDACXX_FUNC_VIS void __sanitizer_annotate_contiguous_container( + const void *, const void *, const void *, const void *); +#endif -# ifndef _LIBCUDACXX_WEAK -# define _LIBCUDACXX_WEAK __attribute__((__weak__)) -# endif +#ifndef _LIBCUDACXX_WEAK +#define _LIBCUDACXX_WEAK __attribute__((__weak__)) +#endif // Redefine some macros for internal use -# if defined(__cuda_std__) -# undef _LIBCUDACXX_FUNC_VIS -# define _LIBCUDACXX_FUNC_VIS _LIBCUDACXX_INLINE_VISIBILITY -# undef _LIBCUDACXX_TYPE_VIS -# define _LIBCUDACXX_TYPE_VIS -# endif // __cuda_std__ +#if defined(__cuda_std__) +# undef _LIBCUDACXX_FUNC_VIS +# define _LIBCUDACXX_FUNC_VIS _LIBCUDACXX_INLINE_VISIBILITY +# undef _LIBCUDACXX_TYPE_VIS +# define _LIBCUDACXX_TYPE_VIS +#endif // __cuda_std__ // Thread API -# ifndef _LIBCUDACXX_HAS_THREAD_API_EXTERNAL -# if defined(_CCCL_COMPILER_NVRTC) || defined(__EMSCRIPTEN__) -# define _LIBCUDACXX_HAS_THREAD_API_EXTERNAL -# endif -# endif // _LIBCUDACXX_HAS_THREAD_API_EXTERNAL - -# ifndef _LIBCUDACXX_HAS_THREAD_API_CUDA -# if defined(__cuda_std__) && (defined(__CUDA_ARCH__) || defined(__EMSCRIPTEN__)) -# define _LIBCUDACXX_HAS_THREAD_API_CUDA -# endif // __cuda_std__ -# endif // _LIBCUDACXX_HAS_THREAD_API_CUDA - -# ifndef _LIBCUDACXX_HAS_THREAD_API_WIN32 -# if defined(_CCCL_COMPILER_MSVC) && !defined(_LIBCUDACXX_HAS_THREAD_API_CUDA) -# define _LIBCUDACXX_HAS_THREAD_API_WIN32 -# endif -# endif // _LIBCUDACXX_HAS_THREAD_API_WIN32 - -# if !defined(_LIBCUDACXX_HAS_NO_THREADS) && !defined(_LIBCUDACXX_HAS_THREAD_API_PTHREAD) \ - && !defined(_LIBCUDACXX_HAS_THREAD_API_WIN32) && !defined(_LIBCUDACXX_HAS_THREAD_API_EXTERNAL) -# if defined(__FreeBSD__) || defined(__Fuchsia__) || defined(__wasi__) || defined(__NetBSD__) || defined(__linux__) \ - || defined(__GNU__) || defined(__APPLE__) || defined(__CloudABI__) || defined(__sun__) \ - || (defined(__MINGW32__) && __has_include()) -# define _LIBCUDACXX_HAS_THREAD_API_PTHREAD -# elif defined(_LIBCUDACXX_WIN32API) -# define _LIBCUDACXX_HAS_THREAD_API_WIN32 -# else -# define _LIBCUDACXX_UNSUPPORTED_THREAD_API -# endif // _LIBCUDACXX_HAS_THREAD_API -# endif // _LIBCUDACXX_HAS_NO_THREADS - -# if defined(_LIBCUDACXX_HAS_THREAD_API_PTHREAD) -# if defined(__ANDROID__) && __ANDROID_API__ >= 30 -# define _LIBCUDACXX_HAS_COND_CLOCKWAIT -# elif defined(_LIBCUDACXX_GLIBC_PREREQ) -# if _LIBCUDACXX_GLIBC_PREREQ(2, 30) -# define _LIBCUDACXX_HAS_COND_CLOCKWAIT -# endif -# endif -# endif +#ifndef _LIBCUDACXX_HAS_THREAD_API_EXTERNAL +#if defined(_CCCL_COMPILER_NVRTC) \ + || defined(__EMSCRIPTEN__) +# define _LIBCUDACXX_HAS_THREAD_API_EXTERNAL +#endif +#endif // _LIBCUDACXX_HAS_THREAD_API_EXTERNAL + +#ifndef _LIBCUDACXX_HAS_THREAD_API_CUDA +#if defined(__cuda_std__) \ + && (defined(__CUDA_ARCH__) || defined(__EMSCRIPTEN__)) +# define _LIBCUDACXX_HAS_THREAD_API_CUDA +#endif // __cuda_std__ +#endif // _LIBCUDACXX_HAS_THREAD_API_CUDA + +#ifndef _LIBCUDACXX_HAS_THREAD_API_WIN32 +#if defined(_CCCL_COMPILER_MSVC) \ + && !defined(_LIBCUDACXX_HAS_THREAD_API_CUDA) +# define _LIBCUDACXX_HAS_THREAD_API_WIN32 +#endif +#endif // _LIBCUDACXX_HAS_THREAD_API_WIN32 + +#if !defined(_LIBCUDACXX_HAS_NO_THREADS) \ + && !defined(_LIBCUDACXX_HAS_THREAD_API_PTHREAD) \ + && !defined(_LIBCUDACXX_HAS_THREAD_API_WIN32) \ + && !defined(_LIBCUDACXX_HAS_THREAD_API_EXTERNAL) +# if defined(__FreeBSD__) || \ + defined(__Fuchsia__) || \ + defined(__wasi__) || \ + defined(__NetBSD__) || \ + defined(__linux__) || \ + defined(__GNU__) || \ + defined(__APPLE__) || \ + defined(__CloudABI__) || \ + defined(__sun__) || \ + (defined(__MINGW32__) && __has_include()) +# define _LIBCUDACXX_HAS_THREAD_API_PTHREAD +# elif defined(_LIBCUDACXX_WIN32API) +# define _LIBCUDACXX_HAS_THREAD_API_WIN32 +# else +# define _LIBCUDACXX_UNSUPPORTED_THREAD_API +# endif // _LIBCUDACXX_HAS_THREAD_API +#endif // _LIBCUDACXX_HAS_NO_THREADS + +#if defined(_LIBCUDACXX_HAS_THREAD_API_PTHREAD) +#if defined(__ANDROID__) && __ANDROID_API__ >= 30 +#define _LIBCUDACXX_HAS_COND_CLOCKWAIT +#elif defined(_LIBCUDACXX_GLIBC_PREREQ) +#if _LIBCUDACXX_GLIBC_PREREQ(2, 30) +#define _LIBCUDACXX_HAS_COND_CLOCKWAIT +#endif +#endif +#endif -# if defined(_LIBCUDACXX_HAS_NO_THREADS) && defined(_LIBCUDACXX_HAS_THREAD_API_PTHREAD) -# error _LIBCUDACXX_HAS_THREAD_API_PTHREAD may only be defined when \ +#if defined(_LIBCUDACXX_HAS_NO_THREADS) && defined(_LIBCUDACXX_HAS_THREAD_API_PTHREAD) +#error _LIBCUDACXX_HAS_THREAD_API_PTHREAD may only be defined when \ _LIBCUDACXX_HAS_NO_THREADS is not defined. -# endif +#endif -# if defined(_LIBCUDACXX_HAS_NO_THREADS) && defined(_LIBCUDACXX_HAS_THREAD_API_EXTERNAL) -# error _LIBCUDACXX_HAS_THREAD_API_EXTERNAL may not be defined when \ +#if defined(_LIBCUDACXX_HAS_NO_THREADS) && defined(_LIBCUDACXX_HAS_THREAD_API_EXTERNAL) +#error _LIBCUDACXX_HAS_THREAD_API_EXTERNAL may not be defined when \ _LIBCUDACXX_HAS_NO_THREADS is defined. -# endif +#endif -# if defined(__STDCPP_THREADS__) && defined(_LIBCUDACXX_HAS_NO_THREADS) -# error _LIBCUDACXX_HAS_NO_THREADS cannot be set when __STDCPP_THREADS__ is set. -# endif +#if defined(__STDCPP_THREADS__) && defined(_LIBCUDACXX_HAS_NO_THREADS) +#error _LIBCUDACXX_HAS_NO_THREADS cannot be set when __STDCPP_THREADS__ is set. +#endif -# if !defined(_LIBCUDACXX_HAS_NO_THREADS) && !defined(__STDCPP_THREADS__) -# define __STDCPP_THREADS__ 1 -# endif +#if !defined(_LIBCUDACXX_HAS_NO_THREADS) && !defined(__STDCPP_THREADS__) +#define __STDCPP_THREADS__ 1 +#endif // The glibc and Bionic implementation of pthreads implements // pthread_mutex_destroy as nop for regular mutexes. Additionally, Win32 @@ -1733,9 +1730,10 @@ __sanitizer_annotate_contiguous_container(const void*, const void*, const void*, // // TODO(EricWF): Enable this optimization on Bionic after speaking to their // respective stakeholders. -# if (defined(_LIBCUDACXX_HAS_THREAD_API_PTHREAD) && defined(__GLIBC__)) || defined(_LIBCUDACXX_HAS_THREAD_API_WIN32) -# define _LIBCUDACXX_HAS_TRIVIAL_MUTEX_DESTRUCTION -# endif +#if (defined(_LIBCUDACXX_HAS_THREAD_API_PTHREAD) && defined(__GLIBC__)) \ + || defined(_LIBCUDACXX_HAS_THREAD_API_WIN32) +# define _LIBCUDACXX_HAS_TRIVIAL_MUTEX_DESTRUCTION +#endif // Destroying a condvar is a nop on Windows. // @@ -1745,121 +1743,123 @@ __sanitizer_annotate_contiguous_container(const void*, const void*, const void*, // // TODO(EricWF): This is potentially true for some pthread implementations // as well. -# if defined(_LIBCUDACXX_HAS_THREAD_API_WIN32) -# define _LIBCUDACXX_HAS_TRIVIAL_CONDVAR_DESTRUCTION -# endif +#if defined(_LIBCUDACXX_HAS_THREAD_API_WIN32) +# define _LIBCUDACXX_HAS_TRIVIAL_CONDVAR_DESTRUCTION +#endif // Systems that use capability-based security (FreeBSD with Capsicum, // Nuxi CloudABI) may only provide local filesystem access (using *at()). // Functions like open(), rename(), unlink() and stat() should not be // used, as they attempt to access the global filesystem namespace. -# ifdef __CloudABI__ -# define _LIBCUDACXX_HAS_NO_GLOBAL_FILESYSTEM_NAMESPACE -# endif +#ifdef __CloudABI__ +#define _LIBCUDACXX_HAS_NO_GLOBAL_FILESYSTEM_NAMESPACE +#endif // CloudABI is intended for running networked services. Processes do not // have standard input and output channels. -# ifdef __CloudABI__ -# define _LIBCUDACXX_HAS_NO_STDIN -# define _LIBCUDACXX_HAS_NO_STDOUT -# endif +#ifdef __CloudABI__ +#define _LIBCUDACXX_HAS_NO_STDIN +#define _LIBCUDACXX_HAS_NO_STDOUT +#endif // Some systems do not provide gets() in their C library, for security reasons. -# ifndef _LIBCUDACXX_C_HAS_NO_GETS -# if defined(_LIBCUDACXX_MSVCRT) || (defined(__FreeBSD__) && __FreeBSD__ >= 13) -# define _LIBCUDACXX_C_HAS_NO_GETS -# endif +#ifndef _LIBCUDACXX_C_HAS_NO_GETS +# if defined(_LIBCUDACXX_MSVCRT) || (defined(__FreeBSD__) && __FreeBSD__ >= 13) +# define _LIBCUDACXX_C_HAS_NO_GETS # endif +#endif -# if defined(__BIONIC__) || defined(__CloudABI__) || defined(__Fuchsia__) || defined(__wasi__) \ - || defined(_LIBCUDACXX_HAS_MUSL_LIBC) -# define _LIBCUDACXX_PROVIDES_DEFAULT_RUNE_TABLE -# endif +#if defined(__BIONIC__) || defined(__CloudABI__) || \ + defined(__Fuchsia__) || defined(__wasi__) || defined(_LIBCUDACXX_HAS_MUSL_LIBC) +#define _LIBCUDACXX_PROVIDES_DEFAULT_RUNE_TABLE +#endif // Thread-unsafe functions such as strtok() and localtime() // are not available. -# ifdef __CloudABI__ -# define _LIBCUDACXX_HAS_NO_THREAD_UNSAFE_C_FUNCTIONS -# endif +#ifdef __CloudABI__ +#define _LIBCUDACXX_HAS_NO_THREAD_UNSAFE_C_FUNCTIONS +#endif // TODO: Support C11 Atomics? // #if __has_feature(cxx_atomic) || __has_extension(c_atomic) || __has_keyword(_Atomic) // # define _LIBCUDACXX_HAS_C_ATOMIC_IMP -# if defined(_CCCL_COMPILER_ICC) -# define _LIBCUDACXX_HAS_GCC_ATOMIC_IMP -# elif defined(_CCCL_COMPILER_CLANG) -# define _LIBCUDACXX_HAS_GCC_ATOMIC_IMP -# elif defined(_CCCL_COMPILER_GCC) -# define _LIBCUDACXX_HAS_GCC_ATOMIC_IMP -# elif defined(_CCCL_COMPILER_NVHPC) -# define _LIBCUDACXX_HAS_GCC_ATOMIC_IMP -# elif defined(_CCCL_COMPILER_MSVC) -# define _LIBCUDACXX_HAS_MSVC_ATOMIC_IMPL -# endif +#if defined(_CCCL_COMPILER_ICC) +# define _LIBCUDACXX_HAS_GCC_ATOMIC_IMP +#elif defined(_CCCL_COMPILER_CLANG) +# define _LIBCUDACXX_HAS_GCC_ATOMIC_IMP +#elif defined(_CCCL_COMPILER_GCC) +# define _LIBCUDACXX_HAS_GCC_ATOMIC_IMP +#elif defined(_CCCL_COMPILER_NVHPC) +# define _LIBCUDACXX_HAS_GCC_ATOMIC_IMP +#elif defined(_CCCL_COMPILER_MSVC) +# define _LIBCUDACXX_HAS_MSVC_ATOMIC_IMPL +#endif // CUDA Atomics supersede host atomics in order to insert the host/device dispatch layer -# if defined(_CCCL_CUDA_COMPILER_NVCC) || defined(_CCCL_COMPILER_NVRTC) || defined(_CCCL_COMPILER_NVHPC) \ - || defined(_CCCL_CUDACC) -# define _LIBCUDACXX_HAS_CUDA_ATOMIC_IMPL -# endif +#if defined(_CCCL_CUDA_COMPILER_NVCC) || defined(_CCCL_COMPILER_NVRTC) || defined(_CCCL_COMPILER_NVHPC) || defined(_CCCL_CUDACC) +# define _LIBCUDACXX_HAS_CUDA_ATOMIC_IMPL +#endif -# if (!defined(_LIBCUDACXX_HAS_C_ATOMIC_IMP) && !defined(_LIBCUDACXX_HAS_GCC_ATOMIC_IMP) \ - && !defined(_LIBCUDACXX_HAS_EXTERNAL_ATOMIC_IMP)) \ - || defined(_LIBCUDACXX_HAS_NO_THREADS) -# define _LIBCUDACXX_HAS_NO_ATOMIC_HEADER -# else -# ifdef __cuda_std__ -# undef _LIBCUDACXX_ATOMIC_FLAG_TYPE -# define _LIBCUDACXX_ATOMIC_FLAG_TYPE int -# endif -# ifndef _LIBCUDACXX_ATOMIC_FLAG_TYPE -# define _LIBCUDACXX_ATOMIC_FLAG_TYPE bool -# endif -# ifdef _LIBCUDACXX_FREESTANDING -# define _LIBCUDACXX_ATOMIC_ONLY_USE_BUILTINS -# endif +#if (!defined(_LIBCUDACXX_HAS_C_ATOMIC_IMP) && \ + !defined(_LIBCUDACXX_HAS_GCC_ATOMIC_IMP) && \ + !defined(_LIBCUDACXX_HAS_EXTERNAL_ATOMIC_IMP)) \ + || defined(_LIBCUDACXX_HAS_NO_THREADS) +# define _LIBCUDACXX_HAS_NO_ATOMIC_HEADER +#else +# ifdef __cuda_std__ +# undef _LIBCUDACXX_ATOMIC_FLAG_TYPE +# define _LIBCUDACXX_ATOMIC_FLAG_TYPE int # endif - -# ifndef _LIBCUDACXX_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK -# define _LIBCUDACXX_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK +# ifndef _LIBCUDACXX_ATOMIC_FLAG_TYPE +# define _LIBCUDACXX_ATOMIC_FLAG_TYPE bool +# endif +# ifdef _LIBCUDACXX_FREESTANDING +# define _LIBCUDACXX_ATOMIC_ONLY_USE_BUILTINS # endif +#endif + +#ifndef _LIBCUDACXX_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK +#define _LIBCUDACXX_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK +#endif -# if defined(_LIBCUDACXX_ENABLE_THREAD_SAFETY_ANNOTATIONS) -# if defined(_CCCL_COMPILER_CLANG) && __has_attribute(acquire_capability) +#if defined(_LIBCUDACXX_ENABLE_THREAD_SAFETY_ANNOTATIONS) +# if defined(_CCCL_COMPILER_CLANG) && __has_attribute(acquire_capability) // Work around the attribute handling in clang. When both __declspec and // __attribute__ are present, the processing goes awry preventing the definition // of the types. -# if !defined(_LIBCUDACXX_OBJECT_FORMAT_COFF) -# define _LIBCUDACXX_HAS_THREAD_SAFETY_ANNOTATIONS -# endif +# if !defined(_LIBCUDACXX_OBJECT_FORMAT_COFF) +# define _LIBCUDACXX_HAS_THREAD_SAFETY_ANNOTATIONS # endif # endif +#endif -# if __has_attribute(require_constant_initialization) -# define _LIBCUDACXX_SAFE_STATIC __attribute__((__require_constant_initialization__)) -# else -# define _LIBCUDACXX_SAFE_STATIC -# endif +#if __has_attribute(require_constant_initialization) +# define _LIBCUDACXX_SAFE_STATIC __attribute__((__require_constant_initialization__)) +#else +# define _LIBCUDACXX_SAFE_STATIC +#endif -# if !defined(_LIBCUDACXX_HAS_NO_OFF_T_FUNCTIONS) -# if defined(_LIBCUDACXX_MSVCRT) || defined(_NEWLIB_VERSION) -# define _LIBCUDACXX_HAS_NO_OFF_T_FUNCTIONS -# endif +#if !defined(_LIBCUDACXX_HAS_NO_OFF_T_FUNCTIONS) +# if defined(_LIBCUDACXX_MSVCRT) || defined(_NEWLIB_VERSION) +# define _LIBCUDACXX_HAS_NO_OFF_T_FUNCTIONS # endif +#endif -# if __has_attribute(diagnose_if) && !defined(_LIBCUDACXX_DISABLE_ADDITIONAL_DIAGNOSTICS) -# define _LIBCUDACXX_DIAGNOSE_WARNING(...) __attribute__((diagnose_if(__VA_ARGS__, "warning"))) -# define _LIBCUDACXX_DIAGNOSE_ERROR(...) __attribute__((diagnose_if(__VA_ARGS__, "error"))) -# else -# define _LIBCUDACXX_DIAGNOSE_WARNING(...) -# define _LIBCUDACXX_DIAGNOSE_ERROR(...) -# endif +#if __has_attribute(diagnose_if) && !defined(_LIBCUDACXX_DISABLE_ADDITIONAL_DIAGNOSTICS) +# define _LIBCUDACXX_DIAGNOSE_WARNING(...) \ + __attribute__((diagnose_if(__VA_ARGS__, "warning"))) +# define _LIBCUDACXX_DIAGNOSE_ERROR(...) \ + __attribute__((diagnose_if(__VA_ARGS__, "error"))) +#else +# define _LIBCUDACXX_DIAGNOSE_WARNING(...) +# define _LIBCUDACXX_DIAGNOSE_ERROR(...) +#endif -# if __has_attribute(__nodebug__) -# define _LIBCUDACXX_NODEBUG __attribute__((__nodebug__)) -# else -# define _LIBCUDACXX_NODEBUG -# endif +#if __has_attribute(__nodebug__) +#define _LIBCUDACXX_NODEBUG __attribute__((__nodebug__)) +#else +#define _LIBCUDACXX_NODEBUG +#endif # if __has_attribute(__preferred_name__) # define _LIBCUDACXX_PREFERRED_NAME(x) __attribute__((__preferred_name__(x))) @@ -1867,46 +1867,47 @@ __sanitizer_annotate_contiguous_container(const void*, const void*, const void*, # define _LIBCUDACXX_PREFERRED_NAME(x) # endif -# if defined(_LIBCUDACXX_ABI_MICROSOFT) && (defined(_CCCL_COMPILER_MSVC) || __has_declspec_attribute(empty_bases)) -# define _LIBCUDACXX_DECLSPEC_EMPTY_BASES __declspec(empty_bases) -# else -# define _LIBCUDACXX_DECLSPEC_EMPTY_BASES -# endif +#if defined(_LIBCUDACXX_ABI_MICROSOFT) && \ + (defined(_CCCL_COMPILER_MSVC) || __has_declspec_attribute(empty_bases)) +# define _LIBCUDACXX_DECLSPEC_EMPTY_BASES __declspec(empty_bases) +#else +# define _LIBCUDACXX_DECLSPEC_EMPTY_BASES +#endif -# if defined(_LIBCUDACXX_ENABLE_CXX17_REMOVED_FEATURES) -# define _LIBCUDACXX_ENABLE_CXX17_REMOVED_AUTO_PTR -# define _LIBCUDACXX_ENABLE_CXX17_REMOVED_UNEXPECTED_FUNCTIONS -# define _LIBCUDACXX_ENABLE_CXX17_REMOVED_RANDOM_SHUFFLE -# define _LIBCUDACXX_ENABLE_CXX17_REMOVED_BINDERS -# endif // _LIBCUDACXX_ENABLE_CXX17_REMOVED_FEATURES +#if defined(_LIBCUDACXX_ENABLE_CXX17_REMOVED_FEATURES) +#define _LIBCUDACXX_ENABLE_CXX17_REMOVED_AUTO_PTR +#define _LIBCUDACXX_ENABLE_CXX17_REMOVED_UNEXPECTED_FUNCTIONS +#define _LIBCUDACXX_ENABLE_CXX17_REMOVED_RANDOM_SHUFFLE +#define _LIBCUDACXX_ENABLE_CXX17_REMOVED_BINDERS +#endif // _LIBCUDACXX_ENABLE_CXX17_REMOVED_FEATURES -# if !defined(__cpp_deduction_guides) || __cpp_deduction_guides < 201611 -# define _LIBCUDACXX_HAS_NO_DEDUCTION_GUIDES -# endif +#if !defined(__cpp_deduction_guides) || __cpp_deduction_guides < 201611 +#define _LIBCUDACXX_HAS_NO_DEDUCTION_GUIDES +#endif -# if !defined(__cpp_coroutines) || __cpp_coroutines < 201703L -# define _LIBCUDACXX_HAS_NO_COROUTINES -# endif +#if !defined(__cpp_coroutines) || __cpp_coroutines < 201703L +#define _LIBCUDACXX_HAS_NO_COROUTINES +#endif // We need `is_constant_evaluated` for clang and gcc. MSVC also needs extensive rework -# if !defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) -# define _LIBCUDACXX_HAS_NO_CONSTEXPR_COMPLEX_OPERATIONS -# elif defined(_CCCL_COMPILER_NVRTC) -# define _LIBCUDACXX_HAS_NO_CONSTEXPR_COMPLEX_OPERATIONS -# elif defined(_CCCL_COMPILER_MSVC) -# define _LIBCUDACXX_HAS_NO_CONSTEXPR_COMPLEX_OPERATIONS -# elif defined(_CCCL_CUDACC_BELOW_11_8) -# define _LIBCUDACXX_HAS_NO_CONSTEXPR_COMPLEX_OPERATIONS -# elif defined(_CCCL_CUDA_COMPILER_CLANG) -# define _LIBCUDACXX_HAS_NO_CONSTEXPR_COMPLEX_OPERATIONS -# endif +#if !defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) +#define _LIBCUDACXX_HAS_NO_CONSTEXPR_COMPLEX_OPERATIONS +#elif defined(_CCCL_COMPILER_NVRTC) +#define _LIBCUDACXX_HAS_NO_CONSTEXPR_COMPLEX_OPERATIONS +#elif defined(_CCCL_COMPILER_MSVC) +#define _LIBCUDACXX_HAS_NO_CONSTEXPR_COMPLEX_OPERATIONS +#elif defined(_CCCL_CUDACC_BELOW_11_8) +#define _LIBCUDACXX_HAS_NO_CONSTEXPR_COMPLEX_OPERATIONS +#elif defined(_CCCL_CUDA_COMPILER_CLANG) +#define _LIBCUDACXX_HAS_NO_CONSTEXPR_COMPLEX_OPERATIONS +#endif // FIXME: Correct this macro when either (A) a feature test macro for the // spaceship operator is provided, or (B) a compiler provides a complete // implementation. -# define _LIBCUDACXX_HAS_NO_SPACESHIP_OPERATOR +#define _LIBCUDACXX_HAS_NO_SPACESHIP_OPERATOR -# define _LIBCUDACXX_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS +#define _LIBCUDACXX_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS // The stream API was dropped and re-added in the dylib shipped on macOS // and iOS. We can only assume the dylib to provide these definitions for @@ -1915,114 +1916,123 @@ __sanitizer_annotate_contiguous_container(const void*, const void*, const void*, // declarations for streams exist conditionally to this; if we provide // an explicit instantiation declaration and we try to deploy to a dylib // that does not provide those symbols, we'll get a load-time error. -# if !defined(_LIBCUDACXX_BUILDING_LIBRARY) \ - && ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) \ - && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 1090) \ - || (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) \ - && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 70000)) -# define _LIBCUDACXX_DO_NOT_ASSUME_STREAMS_EXPLICIT_INSTANTIATION_IN_DYLIB -# endif +#if !defined(_LIBCUDACXX_BUILDING_LIBRARY) && \ + ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 1090) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 70000)) +# define _LIBCUDACXX_DO_NOT_ASSUME_STREAMS_EXPLICIT_INSTANTIATION_IN_DYLIB +#endif -# if defined(_LIBCUDACXX_HAS_NO_PRAGMA_PUSH_POP_MACRO) -# define _LIBCUDACXX_PUSH_MACROS -# define _LIBCUDACXX_POP_MACROS +#if defined(_LIBCUDACXX_HAS_NO_PRAGMA_PUSH_POP_MACRO) +# define _LIBCUDACXX_PUSH_MACROS +# define _LIBCUDACXX_POP_MACROS +#else + // Don't warn about macro conflicts when we can restore them at the + // end of the header. +# ifndef _LIBCUDACXX_DISABLE_MACRO_CONFLICT_WARNINGS +# define _LIBCUDACXX_DISABLE_MACRO_CONFLICT_WARNINGS +# endif +# if defined(_CCCL_COMPILER_MSVC) +# define _LIBCUDACXX_PUSH_MACROS \ + __pragma(push_macro("min")) \ + __pragma(push_macro("max")) +# define _LIBCUDACXX_POP_MACROS \ + __pragma(pop_macro("min")) \ + __pragma(pop_macro("max")) # else -// Don't warn about macro conflicts when we can restore them at the -// end of the header. -# ifndef _LIBCUDACXX_DISABLE_MACRO_CONFLICT_WARNINGS -# define _LIBCUDACXX_DISABLE_MACRO_CONFLICT_WARNINGS -# endif -# if defined(_CCCL_COMPILER_MSVC) -# define _LIBCUDACXX_PUSH_MACROS __pragma(push_macro("min")) __pragma(push_macro("max")) -# define _LIBCUDACXX_POP_MACROS __pragma(pop_macro("min")) __pragma(pop_macro("max")) +# define _LIBCUDACXX_PUSH_MACROS \ + _Pragma("push_macro(\"min\")") \ + _Pragma("push_macro(\"max\")") +# define _LIBCUDACXX_POP_MACROS \ + _Pragma("pop_macro(\"min\")") \ + _Pragma("pop_macro(\"max\")") +# endif +#endif // defined(_LIBCUDACXX_HAS_NO_PRAGMA_PUSH_POP_MACRO) + +#if !defined(_LIBCUDACXX_NO_AUTO_LINK) && !defined(__cuda_std__) +# if defined(_LIBCUDACXX_ABI_MICROSOFT) && !defined(_LIBCUDACXX_BUILDING_LIBRARY) +# if defined(_DLL) +# pragma comment(lib, "c++.lib") # else -# define _LIBCUDACXX_PUSH_MACROS _Pragma("push_macro(\"min\")") _Pragma("push_macro(\"max\")") -# define _LIBCUDACXX_POP_MACROS _Pragma("pop_macro(\"min\")") _Pragma("pop_macro(\"max\")") +# pragma comment(lib, "libc++.lib") # endif -# endif // defined(_LIBCUDACXX_HAS_NO_PRAGMA_PUSH_POP_MACRO) - -# if !defined(_LIBCUDACXX_NO_AUTO_LINK) && !defined(__cuda_std__) -# if defined(_LIBCUDACXX_ABI_MICROSOFT) && !defined(_LIBCUDACXX_BUILDING_LIBRARY) -# if defined(_DLL) -# pragma comment(lib, "c++.lib") -# else -# pragma comment(lib, "libc++.lib") -# endif -# endif // defined(_LIBCUDACXX_ABI_MICROSOFT) && !defined(_LIBCUDACXX_BUILDING_LIBRARY) -# endif // !defined(_LIBCUDACXX_NO_AUTO_LINK) +# endif // defined(_LIBCUDACXX_ABI_MICROSOFT) && !defined(_LIBCUDACXX_BUILDING_LIBRARY) +#endif // !defined(_LIBCUDACXX_NO_AUTO_LINK) -# define _LIBCUDACXX_UNUSED_VAR(x) ((void) (x)) +#define _LIBCUDACXX_UNUSED_VAR(x) ((void)(x)) // Configures the fopen close-on-exec mode character, if any. This string will // be appended to any mode string used by fstream for fopen/fdopen. // // Not all platforms support this, but it helps avoid fd-leaks on platforms that // do. -# if defined(__BIONIC__) -# define _LIBCUDACXX_FOPEN_CLOEXEC_MODE "e" -# else -# define _LIBCUDACXX_FOPEN_CLOEXEC_MODE -# endif +#if defined(__BIONIC__) +# define _LIBCUDACXX_FOPEN_CLOEXEC_MODE "e" +#else +# define _LIBCUDACXX_FOPEN_CLOEXEC_MODE +#endif # if __has_attribute(__format__) // The attribute uses 1-based indices for ordinary and static member functions. // The attribute uses 2-based indices for non-static member functions. -# define _LIBCUDACXX_ATTRIBUTE_FORMAT(archetype, format_string_index, first_format_arg_index) \ +# define _LIBCUDACXX_ATTRIBUTE_FORMAT(archetype, format_string_index, first_format_arg_index) \ __attribute__((__format__(archetype, format_string_index, first_format_arg_index))) # else # define _LIBCUDACXX_ATTRIBUTE_FORMAT(archetype, format_string_index, first_format_arg_index) /* nothing */ # endif -# ifndef _LIBCUDACXX_SYS_CLOCK_DURATION -# if defined(__cuda_std__) -# define _LIBCUDACXX_SYS_CLOCK_DURATION nanoseconds -# else -# define _LIBCUDACXX_SYS_CLOCK_DURATION microseconds -# endif -# endif // _LIBCUDACXX_SYS_CLOCK_DURATION +#ifndef _LIBCUDACXX_SYS_CLOCK_DURATION +#if defined(__cuda_std__) +# define _LIBCUDACXX_SYS_CLOCK_DURATION nanoseconds +#else +# define _LIBCUDACXX_SYS_CLOCK_DURATION microseconds +#endif +#endif // _LIBCUDACXX_SYS_CLOCK_DURATION // There are a handful of public standard library types that are intended to // support CTAD but don't need any explicit deduction guides to do so. This // macro is used to mark them as such, which suppresses the // '-Wctad-maybe-unsupported' compiler warning when CTAD is used in user code // with these classes. -# if (!defined(_CCCL_COMPILER_GCC) || __GNUC__ > 6) && _CCCL_STD_VER >= 2017 -# define _LIBCUDACXX_CTAD_SUPPORTED_FOR_TYPE(_ClassName) \ - template \ - _ClassName(typename _Tag::__allow_ctad...)->_ClassName<_Tag...> -# else -# define _LIBCUDACXX_CTAD_SUPPORTED_FOR_TYPE(_ClassName) static_assert(true, "") -# endif +#if (!defined(_CCCL_COMPILER_GCC) || __GNUC__ > 6) \ + && _CCCL_STD_VER >= 2017 +# define _LIBCUDACXX_CTAD_SUPPORTED_FOR_TYPE(_ClassName) \ + template \ + _ClassName(typename _Tag::__allow_ctad...) -> _ClassName<_Tag...> +#else +# define _LIBCUDACXX_CTAD_SUPPORTED_FOR_TYPE(_ClassName) static_assert(true, "") +#endif -# if (defined(__CUDACC_VER_MAJOR__) && __CUDACC_VER_MAJOR__ <= 11) \ - && (defined(__CUDACC_VER_MINOR__) && __CUDACC_VER_MINOR__ <= 2) -# define _LIBCUDACXX_CONSTEXPR_GLOBAL const -# else -# define _LIBCUDACXX_CONSTEXPR_GLOBAL constexpr -# endif +#if (defined(__CUDACC_VER_MAJOR__) && __CUDACC_VER_MAJOR__ <= 11) \ + && (defined(__CUDACC_VER_MINOR__) && __CUDACC_VER_MINOR__ <= 2) +# define _LIBCUDACXX_CONSTEXPR_GLOBAL const +#else +# define _LIBCUDACXX_CONSTEXPR_GLOBAL constexpr +#endif -# if defined(__CUDA_ARCH__) -# define _LIBCUDACXX_CPO_ACCESSIBILITY _CCCL_DEVICE _LIBCUDACXX_CONSTEXPR_GLOBAL -# else -# define _LIBCUDACXX_CPO_ACCESSIBILITY _LIBCUDACXX_INLINE_VAR constexpr -# endif +#if defined(__CUDA_ARCH__) +# define _LIBCUDACXX_CPO_ACCESSIBILITY _CCCL_DEVICE _LIBCUDACXX_CONSTEXPR_GLOBAL +#else +# define _LIBCUDACXX_CPO_ACCESSIBILITY _LIBCUDACXX_INLINE_VAR constexpr +#endif -# if _CCCL_STD_VER > 2014 -# define _LIBCUDACXX_TRAIT(__TRAIT, ...) __TRAIT##_v<__VA_ARGS__> -# else -# define _LIBCUDACXX_TRAIT(__TRAIT, ...) __TRAIT<__VA_ARGS__>::value -# endif +#if _CCCL_STD_VER > 2014 +# define _LIBCUDACXX_TRAIT(__TRAIT, ...) __TRAIT##_v<__VA_ARGS__> +#else +# define _LIBCUDACXX_TRAIT(__TRAIT, ...) __TRAIT<__VA_ARGS__>::value +#endif // Older nvcc do not handle the constraint of `construct_at` in earlier std modes // So to preserve our performance optimization we default to the unconstrained // `__construct_at` and only in C++20 use `construct_at` -# if _CCCL_STD_VER > 2017 -# define _LIBCUDACXX_CONSTRUCT_AT(_LOCATION, ...) \ - _CUDA_VSTD::construct_at(_CUDA_VSTD::addressof(_LOCATION), __VA_ARGS__) -# else -# define _LIBCUDACXX_CONSTRUCT_AT(_LOCATION, ...) \ - _CUDA_VSTD::__construct_at(_CUDA_VSTD::addressof(_LOCATION), __VA_ARGS__) -# endif +#if _CCCL_STD_VER > 2017 +# define _LIBCUDACXX_CONSTRUCT_AT(_LOCATION, ...) \ + _CUDA_VSTD::construct_at(_CUDA_VSTD::addressof(_LOCATION), __VA_ARGS__) +#else +# define _LIBCUDACXX_CONSTRUCT_AT(_LOCATION, ...) \ + _CUDA_VSTD::__construct_at(_CUDA_VSTD::addressof(_LOCATION), __VA_ARGS__) +#endif // We can only expose constexpr allocations if the compiler supports it # if defined(__cpp_constexpr_dynamic_alloc) && defined(__cpp_lib_constexpr_dynamic_alloc) && _CCCL_STD_VER >= 2020 \ @@ -2051,7 +2061,7 @@ __sanitizer_annotate_contiguous_container(const void*, const void*, const void*, constexpr __class() noexcept = default; # endif // !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 -# define _LIBCUDACXX_HAS_NO_INCOMPLETE_RANGES +#define _LIBCUDACXX_HAS_NO_INCOMPLETE_RANGES #endif // __cplusplus diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/__debug b/libcudacxx/include/cuda/std/detail/libcxx/include/__debug index afbc14b6373..8e207184c81 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/__debug +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/__debug @@ -21,22 +21,22 @@ # pragma system_header #endif // no system header +#include #include #include -#include #if defined(_LIBCUDACXX_ENABLE_DEBUG_MODE) && !defined(_LIBCUDACXX_DEBUG_RANDOMIZE_UNSPECIFIED_STABILITY) -# define _LIBCUDACXX_DEBUG_RANDOMIZE_UNSPECIFIED_STABILITY +# define _LIBCUDACXX_DEBUG_RANDOMIZE_UNSPECIFIED_STABILITY #endif #if defined(_LIBCUDACXX_ENABLE_DEBUG_MODE) && !defined(_LIBCUDACXX_DEBUG_ITERATOR_BOUNDS_CHECKING) -# define _LIBCUDACXX_DEBUG_ITERATOR_BOUNDS_CHECKING +# define _LIBCUDACXX_DEBUG_ITERATOR_BOUNDS_CHECKING #endif #ifdef _LIBCUDACXX_ENABLE_DEBUG_MODE -# define _LIBCUDACXX_DEBUG_ASSERT(x, m) _LIBCUDACXX_ASSERT(::std::__libcpp_is_constant_evaluated() || (x), m) +# define _LIBCUDACXX_DEBUG_ASSERT(x, m) _LIBCUDACXX_ASSERT(::std::__libcpp_is_constant_evaluated() || (x), m) #else -# define _LIBCUDACXX_DEBUG_ASSERT(x, m) ((void) 0) +# define _LIBCUDACXX_DEBUG_ASSERT(x, m) ((void)0) #endif #if defined(_LIBCUDACXX_ENABLE_DEBUG_MODE) || defined(_LIBCUDACXX_BUILDING_LIBRARY) @@ -47,165 +47,164 @@ struct _LIBCUDACXX_TYPE_VIS __c_node; struct _LIBCUDACXX_TYPE_VIS __i_node { - void* __i_; - __i_node* __next_; - __c_node* __c_; - - __i_node(const __i_node&) = delete; - __i_node& operator=(const __i_node&) = delete; - - _LIBCUDACXX_INLINE_VISIBILITY __i_node(void* __i, __i_node* __next, __c_node* __c) - : __i_(__i) - , __next_(__next) - , __c_(__c) - {} - ~__i_node(); + void* __i_; + __i_node* __next_; + __c_node* __c_; + + __i_node(const __i_node&) = delete; + __i_node& operator=(const __i_node&) = delete; + + _LIBCUDACXX_INLINE_VISIBILITY + __i_node(void* __i, __i_node* __next, __c_node* __c) + : __i_(__i), __next_(__next), __c_(__c) {} + ~__i_node(); }; struct _LIBCUDACXX_TYPE_VIS __c_node { - void* __c_; - __c_node* __next_; - __i_node** beg_; - __i_node** end_; - __i_node** cap_; - - __c_node(const __c_node&) = delete; - __c_node& operator=(const __c_node&) = delete; - - _LIBCUDACXX_INLINE_VISIBILITY explicit __c_node(void* __c, __c_node* __next) - : __c_(__c) - , __next_(__next) - , beg_(nullptr) - , end_(nullptr) - , cap_(nullptr) - {} - virtual ~__c_node(); - - virtual bool __dereferenceable(const void*) const = 0; - virtual bool __decrementable(const void*) const = 0; - virtual bool __addable(const void*, ptrdiff_t) const = 0; - virtual bool __subscriptable(const void*, ptrdiff_t) const = 0; - - void __add(__i_node* __i); - _LIBCUDACXX_HIDDEN void __remove(__i_node* __i); + void* __c_; + __c_node* __next_; + __i_node** beg_; + __i_node** end_; + __i_node** cap_; + + __c_node(const __c_node&) = delete; + __c_node& operator=(const __c_node&) = delete; + + _LIBCUDACXX_INLINE_VISIBILITY + explicit __c_node(void* __c, __c_node* __next) + : __c_(__c), __next_(__next), beg_(nullptr), end_(nullptr), cap_(nullptr) {} + virtual ~__c_node(); + + virtual bool __dereferenceable(const void*) const = 0; + virtual bool __decrementable(const void*) const = 0; + virtual bool __addable(const void*, ptrdiff_t) const = 0; + virtual bool __subscriptable(const void*, ptrdiff_t) const = 0; + + void __add(__i_node* __i); + _LIBCUDACXX_HIDDEN void __remove(__i_node* __i); }; template -struct _C_node : public __c_node +struct _C_node + : public __c_node { - explicit _C_node(void* __c, __c_node* __n) - : __c_node(__c, __n) - {} - - bool __dereferenceable(const void*) const override; - bool __decrementable(const void*) const override; - bool __addable(const void*, ptrdiff_t) const override; - bool __subscriptable(const void*, ptrdiff_t) const override; + explicit _C_node(void* __c, __c_node* __n) + : __c_node(__c, __n) {} + + bool __dereferenceable(const void*) const override; + bool __decrementable(const void*) const override; + bool __addable(const void*, ptrdiff_t) const override; + bool __subscriptable(const void*, ptrdiff_t) const override; }; template -inline bool _C_node<_Cont>::__dereferenceable(const void* __i) const +inline bool +_C_node<_Cont>::__dereferenceable(const void* __i) const { - typedef typename _Cont::const_iterator iterator; - const iterator* __j = static_cast(__i); - _Cont* _Cp = static_cast<_Cont*>(__c_); - return _Cp->__dereferenceable(__j); + typedef typename _Cont::const_iterator iterator; + const iterator* __j = static_cast(__i); + _Cont* _Cp = static_cast<_Cont*>(__c_); + return _Cp->__dereferenceable(__j); } template -inline bool _C_node<_Cont>::__decrementable(const void* __i) const +inline bool +_C_node<_Cont>::__decrementable(const void* __i) const { - typedef typename _Cont::const_iterator iterator; - const iterator* __j = static_cast(__i); - _Cont* _Cp = static_cast<_Cont*>(__c_); - return _Cp->__decrementable(__j); + typedef typename _Cont::const_iterator iterator; + const iterator* __j = static_cast(__i); + _Cont* _Cp = static_cast<_Cont*>(__c_); + return _Cp->__decrementable(__j); } template -inline bool _C_node<_Cont>::__addable(const void* __i, ptrdiff_t __n) const +inline bool +_C_node<_Cont>::__addable(const void* __i, ptrdiff_t __n) const { - typedef typename _Cont::const_iterator iterator; - const iterator* __j = static_cast(__i); - _Cont* _Cp = static_cast<_Cont*>(__c_); - return _Cp->__addable(__j, __n); + typedef typename _Cont::const_iterator iterator; + const iterator* __j = static_cast(__i); + _Cont* _Cp = static_cast<_Cont*>(__c_); + return _Cp->__addable(__j, __n); } template -inline bool _C_node<_Cont>::__subscriptable(const void* __i, ptrdiff_t __n) const +inline bool +_C_node<_Cont>::__subscriptable(const void* __i, ptrdiff_t __n) const { - typedef typename _Cont::const_iterator iterator; - const iterator* __j = static_cast(__i); - _Cont* _Cp = static_cast<_Cont*>(__c_); - return _Cp->__subscriptable(__j, __n); + typedef typename _Cont::const_iterator iterator; + const iterator* __j = static_cast(__i); + _Cont* _Cp = static_cast<_Cont*>(__c_); + return _Cp->__subscriptable(__j, __n); } class _LIBCUDACXX_TYPE_VIS __libcpp_db { - __c_node** __cbeg_; - __c_node** __cend_; - size_t __csz_; - __i_node** __ibeg_; - __i_node** __iend_; - size_t __isz_; - - explicit __libcpp_db(); - + __c_node** __cbeg_; + __c_node** __cend_; + size_t __csz_; + __i_node** __ibeg_; + __i_node** __iend_; + size_t __isz_; + + explicit __libcpp_db(); public: - __libcpp_db(const __libcpp_db&) = delete; - __libcpp_db& operator=(const __libcpp_db&) = delete; + __libcpp_db(const __libcpp_db&) = delete; + __libcpp_db& operator=(const __libcpp_db&) = delete; - ~__libcpp_db(); + ~__libcpp_db(); - class __db_c_iterator; - class __db_c_const_iterator; - class __db_i_iterator; - class __db_i_const_iterator; + class __db_c_iterator; + class __db_c_const_iterator; + class __db_i_iterator; + class __db_i_const_iterator; - __db_c_const_iterator __c_end() const; - __db_i_const_iterator __i_end() const; + __db_c_const_iterator __c_end() const; + __db_i_const_iterator __i_end() const; - typedef __c_node*(_InsertConstruct) (void*, void*, __c_node*); + typedef __c_node*(_InsertConstruct)(void*, void*, __c_node*); - template - _LIBCUDACXX_INLINE_VISIBILITY static __c_node* __create_C_node(void* __mem, void* __c, __c_node* __next) - { - return ::new (__mem) _C_node<_Cont>(__c, __next); - } + template + _LIBCUDACXX_INLINE_VISIBILITY static __c_node* __create_C_node(void *__mem, void *__c, __c_node *__next) { + return ::new (__mem) _C_node<_Cont>(__c, __next); + } - template - _LIBCUDACXX_INLINE_VISIBILITY void __insert_c(_Cont* __c) - { - __insert_c(static_cast(__c), &__create_C_node<_Cont>); - } + template + _LIBCUDACXX_INLINE_VISIBILITY + void __insert_c(_Cont* __c) + { + __insert_c(static_cast(__c), &__create_C_node<_Cont>); + } - void __insert_i(void* __i); - void __insert_c(void* __c, _InsertConstruct* __fn); - void __erase_c(void* __c); + void __insert_i(void* __i); + void __insert_c(void* __c, _InsertConstruct* __fn); + void __erase_c(void* __c); - void __insert_ic(void* __i, const void* __c); - void __iterator_copy(void* __i, const void* __i0); - void __erase_i(void* __i); + void __insert_ic(void* __i, const void* __c); + void __iterator_copy(void* __i, const void* __i0); + void __erase_i(void* __i); - void* __find_c_from_i(void* __i) const; - void __invalidate_all(void* __c); - __c_node* __find_c_and_lock(void* __c) const; - __c_node* __find_c(void* __c) const; - void unlock() const; + void* __find_c_from_i(void* __i) const; + void __invalidate_all(void* __c); + __c_node* __find_c_and_lock(void* __c) const; + __c_node* __find_c(void* __c) const; + void unlock() const; - void swap(void* __c1, void* __c2); + void swap(void* __c1, void* __c2); - bool __dereferenceable(const void* __i) const; - bool __decrementable(const void* __i) const; - bool __addable(const void* __i, ptrdiff_t __n) const; - bool __subscriptable(const void* __i, ptrdiff_t __n) const; - bool __less_than_comparable(const void* __i, const void* __j) const; + bool __dereferenceable(const void* __i) const; + bool __decrementable(const void* __i) const; + bool __addable(const void* __i, ptrdiff_t __n) const; + bool __subscriptable(const void* __i, ptrdiff_t __n) const; + bool __less_than_comparable(const void* __i, const void* __j) const; private: - _LIBCUDACXX_HIDDEN __i_node* __insert_iterator(void* __i); - _LIBCUDACXX_HIDDEN __i_node* __find_iterator(const void* __i) const; + _LIBCUDACXX_HIDDEN + __i_node* __insert_iterator(void* __i); + _LIBCUDACXX_HIDDEN + __i_node* __find_iterator(const void* __i) const; - friend _LIBCUDACXX_FUNC_VIS __libcpp_db* __get_db(); + friend _LIBCUDACXX_FUNC_VIS __libcpp_db* __get_db(); }; _LIBCUDACXX_FUNC_VIS __libcpp_db* __get_db(); @@ -218,68 +217,58 @@ _LIBCUDACXX_END_NAMESPACE_STD _LIBCUDACXX_BEGIN_NAMESPACE_STD template -_LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 inline void __debug_db_insert_c(_Tp* __c) -{ +_LIBCUDACXX_INLINE_VISIBILITY +_CCCL_CONSTEXPR_CXX14 inline void __debug_db_insert_c(_Tp* __c) { #ifdef _LIBCUDACXX_ENABLE_DEBUG_MODE - if (!__libcpp_is_constant_evaluated()) - { - __get_db()->__insert_c(__c); - } + if (!__libcpp_is_constant_evaluated()) + __get_db()->__insert_c(__c); #else - (void) (__c); + (void)(__c); #endif } template -_LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 inline void __debug_db_insert_i(_Tp* __i) -{ +_LIBCUDACXX_INLINE_VISIBILITY +_CCCL_CONSTEXPR_CXX14 inline void __debug_db_insert_i(_Tp* __i) { #ifdef _LIBCUDACXX_ENABLE_DEBUG_MODE - if (!__libcpp_is_constant_evaluated()) - { - __get_db()->__insert_i(__i); - } + if (!__libcpp_is_constant_evaluated()) + __get_db()->__insert_i(__i); #else - (void) (__i); + (void)(__i); #endif } template -_LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 inline void __debug_db_erase_c(_Tp* __c) -{ +_LIBCUDACXX_INLINE_VISIBILITY +_CCCL_CONSTEXPR_CXX14 inline void __debug_db_erase_c(_Tp* __c) { #ifdef _LIBCUDACXX_ENABLE_DEBUG_MODE - if (!__libcpp_is_constant_evaluated()) - { - __get_db()->__erase_c(__c); - } + if (!__libcpp_is_constant_evaluated()) + __get_db()->__erase_c(__c); #else - (void) (__c); + (void)(__c); #endif } template -_LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 inline void __debug_db_swap(_Tp* __lhs, _Tp* __rhs) -{ +_LIBCUDACXX_INLINE_VISIBILITY +_CCCL_CONSTEXPR_CXX14 inline void __debug_db_swap(_Tp* __lhs, _Tp* __rhs) { #ifdef _LIBCUDACXX_ENABLE_DEBUG_MODE - if (!__libcpp_is_constant_evaluated()) - { - __get_db()->swap(__lhs, __rhs); - } + if (!__libcpp_is_constant_evaluated()) + __get_db()->swap(__lhs, __rhs); #else - (void) (__lhs); - (void) (__rhs); + (void)(__lhs); + (void)(__rhs); #endif } template -_LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 inline void __debug_db_invalidate_all(_Tp* __c) -{ +_LIBCUDACXX_INLINE_VISIBILITY +_CCCL_CONSTEXPR_CXX14 inline void __debug_db_invalidate_all(_Tp* __c) { #ifdef _LIBCUDACXX_ENABLE_DEBUG_MODE - if (!__libcpp_is_constant_evaluated()) - { - __get_db()->__invalidate_all(__c); - } + if (!__libcpp_is_constant_evaluated()) + __get_db()->__invalidate_all(__c); #else - (void) (__c); + (void)(__c); #endif } diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/__functional_base b/libcudacxx/include/cuda/std/detail/libcxx/include/__functional_base index ec160058958..042b7f21c0e 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/__functional_base +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/__functional_base @@ -32,14 +32,15 @@ #include #include #include -#include #include #include +#include + _LIBCUDACXX_BEGIN_NAMESPACE_STD _LIBCUDACXX_END_NAMESPACE_STD #include -#endif // _LIBCUDACXX_FUNCTIONAL_BASE +#endif // _LIBCUDACXX_FUNCTIONAL_BASE diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/__pragma_pop b/libcudacxx/include/cuda/std/detail/libcxx/include/__pragma_pop index 5bd85a09940..27a9a68b4e6 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/__pragma_pop +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/__pragma_pop @@ -8,9 +8,9 @@ //===----------------------------------------------------------------------===// #if defined(_LIBCUDACXX_USE_PRAGMA_MSVC_WARNING) -# pragma warning(pop) + #pragma warning(pop) #endif #if defined(_LIBCUDACXX_POP_MACROS) -_LIBCUDACXX_POP_MACROS + _LIBCUDACXX_POP_MACROS #endif diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/__pragma_push b/libcudacxx/include/cuda/std/detail/libcxx/include/__pragma_push index 1c815653a89..9b4ee961937 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/__pragma_push +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/__pragma_push @@ -8,18 +8,18 @@ //===----------------------------------------------------------------------===// #ifdef _LIBCUDACXX_IMPLICIT_SYSTEM_HEADER -# pragma GCC system_header + #pragma GCC system_header #endif #if defined(_LIBCUDACXX_USE_PRAGMA_MSVC_WARNING) -# pragma warning(push) -# pragma warning(disable : _LIBCUDACXX_MSVC_DISABLED_WARNINGS) + #pragma warning(push) + #pragma warning(disable : _LIBCUDACXX_MSVC_DISABLED_WARNINGS) #endif #if defined(_LIBCUDACXX_PUSH_MACROS) -_LIBCUDACXX_PUSH_MACROS + _LIBCUDACXX_PUSH_MACROS #endif #ifndef __cuda_std__ -# include <__undef_macros> +#include <__undef_macros> #endif diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/__threading_support b/libcudacxx/include/cuda/std/detail/libcxx/include/__threading_support index 1360e4d769e..18bafb86ae3 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/__threading_support +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/__threading_support @@ -20,52 +20,54 @@ # pragma system_header #endif // no system header +#include // all public C++ headers provide the assertion handler #include #include #include #include -#include // all public C++ headers provide the assertion handler -#include #include +#include + #if defined(_LIBCUDACXX_HAS_THREAD_API_EXTERNAL) -# ifndef __cuda_std__ -# include <__external_threading> -# else -# define _LIBCUDACXX_THREAD_ABI_VISIBILITY inline _LIBCUDACXX_INLINE_VISIBILITY -# endif +# ifndef __cuda_std__ +# include <__external_threading> +# else +# define _LIBCUDACXX_THREAD_ABI_VISIBILITY inline _LIBCUDACXX_INLINE_VISIBILITY +# endif #elif !defined(_LIBCUDACXX_HAS_NO_THREADS) -# if defined(_LIBCUDACXX_HAS_THREAD_API_PTHREAD) -# include -# include -# include -# if defined(__APPLE__) -# include -# endif -# if defined(__linux__) -# include -# include -# include -# endif -# endif - -# if defined(_LIBCUDACXX_HAS_THREAD_API_WIN32) -# include -# include -# endif - -# if defined(_LIBCUDACXX_HAS_THREAD_LIBRARY_EXTERNAL) || defined(_LIBCUDACXX_BUILDING_THREAD_LIBRARY_EXTERNAL) -# define _LIBCUDACXX_THREAD_ABI_VISIBILITY _LIBCUDACXX_FUNC_VIS -# else -# define _LIBCUDACXX_THREAD_ABI_VISIBILITY inline _LIBCUDACXX_INLINE_VISIBILITY -# endif - -# if defined(__FreeBSD__) && defined(_CCCL_COMPILER_CLANG) && __has_attribute(no_thread_safety_analysis) -# define _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS __attribute__((no_thread_safety_analysis)) -# else -# define _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS -# endif +#if defined(_LIBCUDACXX_HAS_THREAD_API_PTHREAD) +# include +# include +# include +# if defined(__APPLE__) +# include +# endif +# if defined(__linux__) +# include +# include +# include +# endif +#endif + +#if defined(_LIBCUDACXX_HAS_THREAD_API_WIN32) +# include +# include +#endif + +#if defined(_LIBCUDACXX_HAS_THREAD_LIBRARY_EXTERNAL) || \ + defined(_LIBCUDACXX_BUILDING_THREAD_LIBRARY_EXTERNAL) +#define _LIBCUDACXX_THREAD_ABI_VISIBILITY _LIBCUDACXX_FUNC_VIS +#else +#define _LIBCUDACXX_THREAD_ABI_VISIBILITY inline _LIBCUDACXX_INLINE_VISIBILITY +#endif + +#if defined(__FreeBSD__) && defined(_CCCL_COMPILER_CLANG) && __has_attribute(no_thread_safety_analysis) +#define _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS __attribute__((no_thread_safety_analysis)) +#else +#define _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS +#endif typedef ::timespec __libcpp_timespec_t; #endif // !defined(_LIBCUDACXX_HAS_NO_THREADS) @@ -74,20 +76,24 @@ _LIBCUDACXX_BEGIN_NAMESPACE_STD #if !defined(_LIBCUDACXX_HAS_NO_THREADS) -# define _LIBCUDACXX_POLLING_COUNT 16 +#define _LIBCUDACXX_POLLING_COUNT 16 -_LIBCUDACXX_INLINE_VISIBILITY inline void __libcpp_thread_yield_processor() +_LIBCUDACXX_INLINE_VISIBILITY +inline void __libcpp_thread_yield_processor() { -# if defined(__aarch64__) -# define __LIBCUDACXX_ASM_THREAD_YIELD (asm volatile("yield" :::);) -# elif defined(__x86_64__) -# define __LIBCUDACXX_ASM_THREAD_YIELD (asm volatile("pause" :::);) -# elif defined(__powerpc__) -# define __LIBCUDACXX_ASM_THREAD_YIELD (asm volatile("or 27,27,27" :::);) -# else -# define __LIBCUDACXX_ASM_THREAD_YIELD (;) -# endif - NV_IF_TARGET(NV_IS_HOST, __LIBCUDACXX_ASM_THREAD_YIELD) +#if defined(__aarch64__) +# define __LIBCUDACXX_ASM_THREAD_YIELD (asm volatile ("yield" :::);) +#elif defined(__x86_64__) +# define __LIBCUDACXX_ASM_THREAD_YIELD (asm volatile ("pause" :::);) +#elif defined (__powerpc__) +# define __LIBCUDACXX_ASM_THREAD_YIELD (asm volatile ("or 27,27,27":::);) +#else +# define __LIBCUDACXX_ASM_THREAD_YIELD (;) +#endif + NV_IF_TARGET( + NV_IS_HOST, + __LIBCUDACXX_ASM_THREAD_YIELD + ) } _LIBCUDACXX_THREAD_ABI_VISIBILITY @@ -96,114 +102,116 @@ void __libcpp_thread_yield(); _LIBCUDACXX_THREAD_ABI_VISIBILITY void __libcpp_thread_sleep_for(chrono::nanoseconds __ns); -template -_LIBCUDACXX_THREAD_ABI_VISIBILITY bool -__libcpp_thread_poll_with_backoff(_Fn&& __f, chrono::nanoseconds __max = chrono::nanoseconds::zero()); +template +_LIBCUDACXX_THREAD_ABI_VISIBILITY +bool __libcpp_thread_poll_with_backoff(_Fn && __f, chrono::nanoseconds __max = chrono::nanoseconds::zero()); -# if defined(_LIBCUDACXX_HAS_THREAD_API_PTHREAD) +#if defined(_LIBCUDACXX_HAS_THREAD_API_PTHREAD) // Mutex typedef pthread_mutex_t __libcpp_mutex_t; -# define _LIBCUDACXX_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER +#define _LIBCUDACXX_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER typedef pthread_mutex_t __libcpp_recursive_mutex_t; // Condition Variable typedef pthread_cond_t __libcpp_condvar_t; -# define _LIBCUDACXX_CONDVAR_INITIALIZER PTHREAD_COND_INITIALIZER +#define _LIBCUDACXX_CONDVAR_INITIALIZER PTHREAD_COND_INITIALIZER // Semaphore -# if defined(__APPLE__) +#if defined(__APPLE__) typedef dispatch_semaphore_t __libcpp_semaphore_t; -# define _LIBCUDACXX_SEMAPHORE_MAX numeric_limits::max() -# else +# define _LIBCUDACXX_SEMAPHORE_MAX numeric_limits::max() +#else typedef sem_t __libcpp_semaphore_t; -# define _LIBCUDACXX_SEMAPHORE_MAX SEM_VALUE_MAX -# endif +# define _LIBCUDACXX_SEMAPHORE_MAX SEM_VALUE_MAX +#endif // Execute once typedef pthread_once_t __libcpp_exec_once_flag; -# define _LIBCUDACXX_EXEC_ONCE_INITIALIZER PTHREAD_ONCE_INIT +#define _LIBCUDACXX_EXEC_ONCE_INITIALIZER PTHREAD_ONCE_INIT // Thread id typedef pthread_t __libcpp_thread_id; // Thread -# define _LIBCUDACXX_NULL_THREAD 0U +#define _LIBCUDACXX_NULL_THREAD 0U typedef pthread_t __libcpp_thread_t; // Thread Local Storage typedef pthread_key_t __libcpp_tls_key; -# define _LIBCUDACXX_TLS_DESTRUCTOR_CC -# elif !defined(_LIBCUDACXX_HAS_THREAD_API_EXTERNAL) +#define _LIBCUDACXX_TLS_DESTRUCTOR_CC +#elif !defined(_LIBCUDACXX_HAS_THREAD_API_EXTERNAL) // Mutex typedef void* __libcpp_mutex_t; -# define _LIBCUDACXX_MUTEX_INITIALIZER 0 +#define _LIBCUDACXX_MUTEX_INITIALIZER 0 -# if defined(_M_IX86) || defined(__i386__) || defined(_M_ARM) || defined(__arm__) +#if defined(_M_IX86) || defined(__i386__) || defined(_M_ARM) || defined(__arm__) typedef void* __libcpp_recursive_mutex_t[6]; -# elif defined(_M_AMD64) || defined(__x86_64__) || defined(_M_ARM64) || defined(__aarch64__) +#elif defined(_M_AMD64) || defined(__x86_64__) || defined(_M_ARM64) || defined(__aarch64__) typedef void* __libcpp_recursive_mutex_t[5]; -# else -# error Unsupported architecture -# endif +#else +# error Unsupported architecture +#endif // Condition Variable typedef void* __libcpp_condvar_t; -# define _LIBCUDACXX_CONDVAR_INITIALIZER 0 +#define _LIBCUDACXX_CONDVAR_INITIALIZER 0 // Semaphore typedef void* __libcpp_semaphore_t; // Execute Once typedef void* __libcpp_exec_once_flag; -# define _LIBCUDACXX_EXEC_ONCE_INITIALIZER 0 +#define _LIBCUDACXX_EXEC_ONCE_INITIALIZER 0 // Thread ID typedef long __libcpp_thread_id; // Thread -# define _LIBCUDACXX_NULL_THREAD 0U +#define _LIBCUDACXX_NULL_THREAD 0U typedef void* __libcpp_thread_t; // Thread Local Storage typedef long __libcpp_tls_key; -# define _LIBCUDACXX_TLS_DESTRUCTOR_CC __stdcall -# endif // !defined(_LIBCUDACXX_HAS_THREAD_API_PTHREAD) && !defined(_LIBCUDACXX_HAS_THREAD_API_EXTERNAL) +#define _LIBCUDACXX_TLS_DESTRUCTOR_CC __stdcall +#endif // !defined(_LIBCUDACXX_HAS_THREAD_API_PTHREAD) && !defined(_LIBCUDACXX_HAS_THREAD_API_EXTERNAL) -# if !defined(_LIBCUDACXX_HAS_THREAD_API_EXTERNAL) +#if !defined(_LIBCUDACXX_HAS_THREAD_API_EXTERNAL) _LIBCUDACXX_THREAD_ABI_VISIBILITY __libcpp_timespec_t __libcpp_to_timespec(const chrono::nanoseconds& __ns); // Mutex _LIBCUDACXX_THREAD_ABI_VISIBILITY -int __libcpp_recursive_mutex_init(__libcpp_recursive_mutex_t* __m); +int __libcpp_recursive_mutex_init(__libcpp_recursive_mutex_t *__m); -_LIBCUDACXX_THREAD_ABI_VISIBILITY _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS int -__libcpp_recursive_mutex_lock(__libcpp_recursive_mutex_t* __m); +_LIBCUDACXX_THREAD_ABI_VISIBILITY _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS +int __libcpp_recursive_mutex_lock(__libcpp_recursive_mutex_t *__m); -_LIBCUDACXX_THREAD_ABI_VISIBILITY _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS bool -__libcpp_recursive_mutex_trylock(__libcpp_recursive_mutex_t* __m); +_LIBCUDACXX_THREAD_ABI_VISIBILITY _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS +bool __libcpp_recursive_mutex_trylock(__libcpp_recursive_mutex_t *__m); -_LIBCUDACXX_THREAD_ABI_VISIBILITY _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS int -__libcpp_recursive_mutex_unlock(__libcpp_recursive_mutex_t* __m); +_LIBCUDACXX_THREAD_ABI_VISIBILITY _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS +int __libcpp_recursive_mutex_unlock(__libcpp_recursive_mutex_t *__m); _LIBCUDACXX_THREAD_ABI_VISIBILITY -int __libcpp_recursive_mutex_destroy(__libcpp_recursive_mutex_t* __m); +int __libcpp_recursive_mutex_destroy(__libcpp_recursive_mutex_t *__m); -_LIBCUDACXX_THREAD_ABI_VISIBILITY _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS int __libcpp_mutex_lock(__libcpp_mutex_t* __m); +_LIBCUDACXX_THREAD_ABI_VISIBILITY _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS +int __libcpp_mutex_lock(__libcpp_mutex_t *__m); -_LIBCUDACXX_THREAD_ABI_VISIBILITY _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS bool -__libcpp_mutex_trylock(__libcpp_mutex_t* __m); +_LIBCUDACXX_THREAD_ABI_VISIBILITY _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS +bool __libcpp_mutex_trylock(__libcpp_mutex_t *__m); -_LIBCUDACXX_THREAD_ABI_VISIBILITY _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS int __libcpp_mutex_unlock(__libcpp_mutex_t* __m); +_LIBCUDACXX_THREAD_ABI_VISIBILITY _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS +int __libcpp_mutex_unlock(__libcpp_mutex_t *__m); _LIBCUDACXX_THREAD_ABI_VISIBILITY -int __libcpp_mutex_destroy(__libcpp_mutex_t* __m); +int __libcpp_mutex_destroy(__libcpp_mutex_t *__m); // Condition variable _LIBCUDACXX_THREAD_ABI_VISIBILITY @@ -212,11 +220,12 @@ int __libcpp_condvar_signal(__libcpp_condvar_t* __cv); _LIBCUDACXX_THREAD_ABI_VISIBILITY int __libcpp_condvar_broadcast(__libcpp_condvar_t* __cv); -_LIBCUDACXX_THREAD_ABI_VISIBILITY _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS int -__libcpp_condvar_wait(__libcpp_condvar_t* __cv, __libcpp_mutex_t* __m); +_LIBCUDACXX_THREAD_ABI_VISIBILITY _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS +int __libcpp_condvar_wait(__libcpp_condvar_t* __cv, __libcpp_mutex_t* __m); -_LIBCUDACXX_THREAD_ABI_VISIBILITY _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS int -__libcpp_condvar_timedwait(__libcpp_condvar_t* __cv, __libcpp_mutex_t* __m, __libcpp_timespec_t* __ts); +_LIBCUDACXX_THREAD_ABI_VISIBILITY _LIBCUDACXX_NO_THREAD_SAFETY_ANALYSIS +int __libcpp_condvar_timedwait(__libcpp_condvar_t *__cv, __libcpp_mutex_t *__m, + __libcpp_timespec_t *__ts); _LIBCUDACXX_THREAD_ABI_VISIBILITY int __libcpp_condvar_destroy(__libcpp_condvar_t* __cv); @@ -239,7 +248,8 @@ bool __libcpp_semaphore_wait_timed(__libcpp_semaphore_t* __sem, chrono::nanoseco // Execute once _LIBCUDACXX_THREAD_ABI_VISIBILITY -int __libcpp_execute_once(__libcpp_exec_once_flag* flag, void (*init_routine)()); +int __libcpp_execute_once(__libcpp_exec_once_flag *flag, + void (*init_routine)()); // Thread id _LIBCUDACXX_THREAD_ABI_VISIBILITY @@ -250,38 +260,40 @@ bool __libcpp_thread_id_less(__libcpp_thread_id t1, __libcpp_thread_id t2); // Thread _LIBCUDACXX_THREAD_ABI_VISIBILITY -bool __libcpp_thread_isnull(const __libcpp_thread_t* __t); +bool __libcpp_thread_isnull(const __libcpp_thread_t *__t); _LIBCUDACXX_THREAD_ABI_VISIBILITY -int __libcpp_thread_create(__libcpp_thread_t* __t, void* (*__func)(void*), void* __arg); +int __libcpp_thread_create(__libcpp_thread_t *__t, void *(*__func)(void *), + void *__arg); _LIBCUDACXX_THREAD_ABI_VISIBILITY __libcpp_thread_id __libcpp_thread_get_current_id(); _LIBCUDACXX_THREAD_ABI_VISIBILITY -__libcpp_thread_id __libcpp_thread_get_id(const __libcpp_thread_t* __t); +__libcpp_thread_id __libcpp_thread_get_id(const __libcpp_thread_t *__t); _LIBCUDACXX_THREAD_ABI_VISIBILITY -int __libcpp_thread_join(__libcpp_thread_t* __t); +int __libcpp_thread_join(__libcpp_thread_t *__t); _LIBCUDACXX_THREAD_ABI_VISIBILITY -int __libcpp_thread_detach(__libcpp_thread_t* __t); +int __libcpp_thread_detach(__libcpp_thread_t *__t); // Thread local storage _LIBCUDACXX_THREAD_ABI_VISIBILITY -int __libcpp_tls_create(__libcpp_tls_key* __key, void(_LIBCUDACXX_TLS_DESTRUCTOR_CC* __at_exit)(void*)); +int __libcpp_tls_create(__libcpp_tls_key* __key, + void(_LIBCUDACXX_TLS_DESTRUCTOR_CC* __at_exit)(void*)); _LIBCUDACXX_THREAD_ABI_VISIBILITY -void* __libcpp_tls_get(__libcpp_tls_key __key); +void *__libcpp_tls_get(__libcpp_tls_key __key); _LIBCUDACXX_THREAD_ABI_VISIBILITY -int __libcpp_tls_set(__libcpp_tls_key __key, void* __p); +int __libcpp_tls_set(__libcpp_tls_key __key, void *__p); -# endif // !defined(_LIBCUDACXX_HAS_THREAD_API_EXTERNAL) +#endif // !defined(_LIBCUDACXX_HAS_THREAD_API_EXTERNAL) -# if !defined(_LIBCUDACXX_HAS_THREAD_LIBRARY_EXTERNAL) || defined(_LIBCUDACXX_BUILDING_THREAD_LIBRARY_EXTERNAL) +#if !defined(_LIBCUDACXX_HAS_THREAD_LIBRARY_EXTERNAL) || defined(_LIBCUDACXX_BUILDING_THREAD_LIBRARY_EXTERNAL) -# if defined(_LIBCUDACXX_HAS_THREAD_API_CUDA) +#if defined(_LIBCUDACXX_HAS_THREAD_API_CUDA) _LIBCUDACXX_THREAD_ABI_VISIBILITY void __libcpp_thread_yield() {} @@ -289,215 +301,214 @@ void __libcpp_thread_yield() {} _LIBCUDACXX_THREAD_ABI_VISIBILITY void __libcpp_thread_sleep_for(chrono::nanoseconds __ns) { - NV_IF_TARGET(NV_IS_DEVICE, - (auto const __step = __ns.count(); assert(__step < numeric_limits::max()); - asm volatile("nanosleep.u32 %0;" ::"r"((unsigned) __step) - :);)) + NV_IF_TARGET( + NV_IS_DEVICE, ( + auto const __step = __ns.count(); + assert(__step < numeric_limits::max()); + asm volatile("nanosleep.u32 %0;"::"r"((unsigned)__step):); + ) + ) } -# elif defined(_LIBCUDACXX_HAS_THREAD_API_PTHREAD) +#elif defined(_LIBCUDACXX_HAS_THREAD_API_PTHREAD) _LIBCUDACXX_THREAD_ABI_VISIBILITY __libcpp_timespec_t __libcpp_to_timespec(const chrono::nanoseconds& __ns) { - using namespace chrono; - seconds __s = duration_cast(__ns); - __libcpp_timespec_t __ts; - typedef decltype(__ts.tv_sec) ts_sec; - constexpr ts_sec __ts_sec_max = numeric_limits::max(); - - if (__s.count() < __ts_sec_max) - { - __ts.tv_sec = static_cast(__s.count()); - __ts.tv_nsec = static_cast((__ns - __s).count()); - } - else - { - __ts.tv_sec = __ts_sec_max; - __ts.tv_nsec = 999999999; // (10^9 - 1) - } - return __ts; -} - -_LIBCUDACXX_THREAD_ABI_VISIBILITY -int __libcpp_recursive_mutex_init(__libcpp_recursive_mutex_t* __m) -{ - pthread_mutexattr_t attr; - int __ec = pthread_mutexattr_init(&attr); - if (__ec) - { - return __ec; - } - __ec = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); - if (__ec) - { - pthread_mutexattr_destroy(&attr); - return __ec; - } - __ec = pthread_mutex_init(__m, &attr); - if (__ec) - { - pthread_mutexattr_destroy(&attr); - return __ec; - } - __ec = pthread_mutexattr_destroy(&attr); - if (__ec) - { - pthread_mutex_destroy(__m); - return __ec; - } - return 0; + using namespace chrono; + seconds __s = duration_cast(__ns); + __libcpp_timespec_t __ts; + typedef decltype(__ts.tv_sec) ts_sec; + constexpr ts_sec __ts_sec_max = numeric_limits::max(); + + if (__s.count() < __ts_sec_max) + { + __ts.tv_sec = static_cast(__s.count()); + __ts.tv_nsec = static_cast((__ns - __s).count()); + } + else + { + __ts.tv_sec = __ts_sec_max; + __ts.tv_nsec = 999999999; // (10^9 - 1) + } + return __ts; +} + +_LIBCUDACXX_THREAD_ABI_VISIBILITY +int __libcpp_recursive_mutex_init(__libcpp_recursive_mutex_t *__m) +{ + pthread_mutexattr_t attr; + int __ec = pthread_mutexattr_init(&attr); + if (__ec) + return __ec; + __ec = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); + if (__ec) { + pthread_mutexattr_destroy(&attr); + return __ec; + } + __ec = pthread_mutex_init(__m, &attr); + if (__ec) { + pthread_mutexattr_destroy(&attr); + return __ec; + } + __ec = pthread_mutexattr_destroy(&attr); + if (__ec) { + pthread_mutex_destroy(__m); + return __ec; + } + return 0; } _LIBCUDACXX_THREAD_ABI_VISIBILITY -int __libcpp_recursive_mutex_lock(__libcpp_recursive_mutex_t* __m) +int __libcpp_recursive_mutex_lock(__libcpp_recursive_mutex_t *__m) { - return pthread_mutex_lock(__m); + return pthread_mutex_lock(__m); } _LIBCUDACXX_THREAD_ABI_VISIBILITY -bool __libcpp_recursive_mutex_trylock(__libcpp_recursive_mutex_t* __m) +bool __libcpp_recursive_mutex_trylock(__libcpp_recursive_mutex_t *__m) { - return pthread_mutex_trylock(__m) == 0; + return pthread_mutex_trylock(__m) == 0; } _LIBCUDACXX_THREAD_ABI_VISIBILITY -int __libcpp_recursive_mutex_unlock(__libcpp_mutex_t* __m) +int __libcpp_recursive_mutex_unlock(__libcpp_mutex_t *__m) { - return pthread_mutex_unlock(__m); + return pthread_mutex_unlock(__m); } _LIBCUDACXX_THREAD_ABI_VISIBILITY -int __libcpp_recursive_mutex_destroy(__libcpp_recursive_mutex_t* __m) +int __libcpp_recursive_mutex_destroy(__libcpp_recursive_mutex_t *__m) { - return pthread_mutex_destroy(__m); + return pthread_mutex_destroy(__m); } _LIBCUDACXX_THREAD_ABI_VISIBILITY -int __libcpp_mutex_lock(__libcpp_mutex_t* __m) +int __libcpp_mutex_lock(__libcpp_mutex_t *__m) { - return pthread_mutex_lock(__m); + return pthread_mutex_lock(__m); } _LIBCUDACXX_THREAD_ABI_VISIBILITY -bool __libcpp_mutex_trylock(__libcpp_mutex_t* __m) +bool __libcpp_mutex_trylock(__libcpp_mutex_t *__m) { - return pthread_mutex_trylock(__m) == 0; + return pthread_mutex_trylock(__m) == 0; } _LIBCUDACXX_THREAD_ABI_VISIBILITY -int __libcpp_mutex_unlock(__libcpp_mutex_t* __m) +int __libcpp_mutex_unlock(__libcpp_mutex_t *__m) { - return pthread_mutex_unlock(__m); + return pthread_mutex_unlock(__m); } _LIBCUDACXX_THREAD_ABI_VISIBILITY -int __libcpp_mutex_destroy(__libcpp_mutex_t* __m) +int __libcpp_mutex_destroy(__libcpp_mutex_t *__m) { return pthread_mutex_destroy(__m); } // Condition Variable _LIBCUDACXX_THREAD_ABI_VISIBILITY -int __libcpp_condvar_signal(__libcpp_condvar_t* __cv) +int __libcpp_condvar_signal(__libcpp_condvar_t *__cv) { - return pthread_cond_signal(__cv); + return pthread_cond_signal(__cv); } _LIBCUDACXX_THREAD_ABI_VISIBILITY -int __libcpp_condvar_broadcast(__libcpp_condvar_t* __cv) +int __libcpp_condvar_broadcast(__libcpp_condvar_t *__cv) { - return pthread_cond_broadcast(__cv); + return pthread_cond_broadcast(__cv); } _LIBCUDACXX_THREAD_ABI_VISIBILITY -int __libcpp_condvar_wait(__libcpp_condvar_t* __cv, __libcpp_mutex_t* __m) +int __libcpp_condvar_wait(__libcpp_condvar_t *__cv, __libcpp_mutex_t *__m) { - return pthread_cond_wait(__cv, __m); + return pthread_cond_wait(__cv, __m); } _LIBCUDACXX_THREAD_ABI_VISIBILITY -int __libcpp_condvar_timedwait(__libcpp_condvar_t* __cv, __libcpp_mutex_t* __m, __libcpp_timespec_t* __ts) +int __libcpp_condvar_timedwait(__libcpp_condvar_t *__cv, __libcpp_mutex_t *__m, + __libcpp_timespec_t *__ts) { - return pthread_cond_timedwait(__cv, __m, __ts); + return pthread_cond_timedwait(__cv, __m, __ts); } _LIBCUDACXX_THREAD_ABI_VISIBILITY -int __libcpp_condvar_destroy(__libcpp_condvar_t* __cv) +int __libcpp_condvar_destroy(__libcpp_condvar_t *__cv) { - return pthread_cond_destroy(__cv); + return pthread_cond_destroy(__cv); } // Semaphore -# if defined(__APPLE__) +#if defined(__APPLE__) bool __libcpp_semaphore_init(__libcpp_semaphore_t* __sem, int __init) { - return (*__sem = dispatch_semaphore_create(__init)) != NULL; + return (*__sem = dispatch_semaphore_create(__init)) != NULL; } bool __libcpp_semaphore_destroy(__libcpp_semaphore_t* __sem) { - dispatch_release(*__sem); - return true; + dispatch_release(*__sem); + return true; } bool __libcpp_semaphore_post(__libcpp_semaphore_t* __sem) { - dispatch_semaphore_signal(*__sem); - return true; + dispatch_semaphore_signal(*__sem); + return true; } bool __libcpp_semaphore_wait(__libcpp_semaphore_t* __sem) { - return dispatch_semaphore_wait(*__sem, DISPATCH_TIME_FOREVER) == 0; + return dispatch_semaphore_wait(*__sem, DISPATCH_TIME_FOREVER) == 0; } bool __libcpp_semaphore_wait_timed(__libcpp_semaphore_t* __sem, chrono::nanoseconds const& __ns) { - return dispatch_semaphore_wait(*__sem, dispatch_time(DISPATCH_TIME_NOW, __ns.count())) == 0; + return dispatch_semaphore_wait(*__sem, dispatch_time(DISPATCH_TIME_NOW, __ns.count())) == 0; } -# else +#else _LIBCUDACXX_THREAD_ABI_VISIBILITY bool __libcpp_semaphore_init(__libcpp_semaphore_t* __sem, int __init) { - return sem_init(__sem, 0, __init) == 0; + return sem_init(__sem, 0, __init) == 0; } _LIBCUDACXX_THREAD_ABI_VISIBILITY bool __libcpp_semaphore_destroy(__libcpp_semaphore_t* __sem) { - return sem_destroy(__sem) == 0; + return sem_destroy(__sem) == 0; } _LIBCUDACXX_THREAD_ABI_VISIBILITY bool __libcpp_semaphore_post(__libcpp_semaphore_t* __sem) { - return sem_post(__sem) == 0; + return sem_post(__sem) == 0; } _LIBCUDACXX_THREAD_ABI_VISIBILITY bool __libcpp_semaphore_wait(__libcpp_semaphore_t* __sem) { - return sem_wait(__sem) == 0; + return sem_wait(__sem) == 0; } _LIBCUDACXX_THREAD_ABI_VISIBILITY bool __libcpp_semaphore_wait_timed(__libcpp_semaphore_t* __sem, chrono::nanoseconds const& __ns) { - __libcpp_timespec_t __ts = __libcpp_to_timespec(__ns); - return sem_timedwait(__sem, &__ts) == 0; + __libcpp_timespec_t __ts = __libcpp_to_timespec(__ns); + return sem_timedwait(__sem, &__ts) == 0; } -# endif //__APPLE__ +#endif //__APPLE__ // Execute once _LIBCUDACXX_THREAD_ABI_VISIBILITY -int __libcpp_execute_once(__libcpp_exec_once_flag* flag, void (*init_routine)()) +int __libcpp_execute_once(__libcpp_exec_once_flag *flag, void (*init_routine)()) { - return pthread_once(flag, init_routine); + return pthread_once(flag, init_routine); } // Thread id @@ -505,200 +516,183 @@ int __libcpp_execute_once(__libcpp_exec_once_flag* flag, void (*init_routine)()) _LIBCUDACXX_THREAD_ABI_VISIBILITY bool __libcpp_thread_id_equal(__libcpp_thread_id t1, __libcpp_thread_id t2) { - return pthread_equal(t1, t2) != 0; + return pthread_equal(t1, t2) != 0; } // Returns non-zero if t1 < t2, otherwise 0 _LIBCUDACXX_THREAD_ABI_VISIBILITY bool __libcpp_thread_id_less(__libcpp_thread_id t1, __libcpp_thread_id t2) { - return t1 < t2; + return t1 < t2; } // Thread _LIBCUDACXX_THREAD_ABI_VISIBILITY -bool __libcpp_thread_isnull(const __libcpp_thread_t* __t) +bool __libcpp_thread_isnull(const __libcpp_thread_t *__t) { - return *__t == 0; + return *__t == 0; } _LIBCUDACXX_THREAD_ABI_VISIBILITY -int __libcpp_thread_create(__libcpp_thread_t* __t, void* (*__func)(void*), void* __arg) +int __libcpp_thread_create(__libcpp_thread_t *__t, void *(*__func)(void *), + void *__arg) { - return pthread_create(__t, 0, __func, __arg); + return pthread_create(__t, 0, __func, __arg); } _LIBCUDACXX_THREAD_ABI_VISIBILITY __libcpp_thread_id __libcpp_thread_get_current_id() { - return pthread_self(); + return pthread_self(); } _LIBCUDACXX_THREAD_ABI_VISIBILITY -__libcpp_thread_id __libcpp_thread_get_id(const __libcpp_thread_t* __t) +__libcpp_thread_id __libcpp_thread_get_id(const __libcpp_thread_t *__t) { - return *__t; + return *__t; } _LIBCUDACXX_THREAD_ABI_VISIBILITY -int __libcpp_thread_join(__libcpp_thread_t* __t) +int __libcpp_thread_join(__libcpp_thread_t *__t) { - return pthread_join(*__t, 0); + return pthread_join(*__t, 0); } _LIBCUDACXX_THREAD_ABI_VISIBILITY -int __libcpp_thread_detach(__libcpp_thread_t* __t) +int __libcpp_thread_detach(__libcpp_thread_t *__t) { - return pthread_detach(*__t); + return pthread_detach(*__t); } // Thread local storage _LIBCUDACXX_THREAD_ABI_VISIBILITY -int __libcpp_tls_create(__libcpp_tls_key* __key, void (*__at_exit)(void*)) +int __libcpp_tls_create(__libcpp_tls_key *__key, void (*__at_exit)(void *)) { - return pthread_key_create(__key, __at_exit); + return pthread_key_create(__key, __at_exit); } _LIBCUDACXX_THREAD_ABI_VISIBILITY -void* __libcpp_tls_get(__libcpp_tls_key __key) +void *__libcpp_tls_get(__libcpp_tls_key __key) { - return pthread_getspecific(__key); + return pthread_getspecific(__key); } _LIBCUDACXX_THREAD_ABI_VISIBILITY -int __libcpp_tls_set(__libcpp_tls_key __key, void* __p) +int __libcpp_tls_set(__libcpp_tls_key __key, void *__p) { - return pthread_setspecific(__key, __p); + return pthread_setspecific(__key, __p); } _LIBCUDACXX_THREAD_ABI_VISIBILITY void __libcpp_thread_yield() { - sched_yield(); + sched_yield(); } _LIBCUDACXX_THREAD_ABI_VISIBILITY void __libcpp_thread_sleep_for(chrono::nanoseconds __ns) { - __libcpp_timespec_t __ts = __libcpp_to_timespec(__ns); - while (nanosleep(&__ts, &__ts) == -1 && errno == EINTR) - ; + __libcpp_timespec_t __ts = __libcpp_to_timespec(__ns); + while (nanosleep(&__ts, &__ts) == -1 && errno == EINTR); } -# if defined(__linux__) && !defined(_LIBCUDACXX_HAS_NO_PLATFORM_WAIT) +#if defined(__linux__) && !defined(_LIBCUDACXX_HAS_NO_PLATFORM_WAIT) -# define _LIBCUDACXX_HAS_PLATFORM_WAIT +#define _LIBCUDACXX_HAS_PLATFORM_WAIT typedef int __libcpp_platform_wait_t; -template -struct __libcpp_platform_wait_uses_type -{ - enum - { - __value = is_same<__remove_cv_t<_Tp>, __libcpp_platform_wait_t>::value - }; +template +struct __libcpp_platform_wait_uses_type { + enum { __value = is_same<__remove_cv_t<_Tp>, __libcpp_platform_wait_t>::value }; }; template ::__value, int>::type = 1> -void __libcpp_platform_wait(_Tp const* ptr, _Tp val, void const* timeout) -{ - syscall(SYS_futex, ptr, FUTEX_WAIT_PRIVATE, val, timeout, 0, 0); +void __libcpp_platform_wait(_Tp const* ptr, _Tp val, void const* timeout) { + syscall(SYS_futex, ptr, FUTEX_WAIT_PRIVATE, val, timeout, 0, 0); } template ::__value, int>::type = 1> -void __libcpp_platform_wake(_Tp const* ptr, bool all) -{ - syscall(SYS_futex, ptr, FUTEX_WAKE_PRIVATE, all ? INT_MAX : 1, 0, 0, 0); +void __libcpp_platform_wake(_Tp const* ptr, bool all) { + syscall(SYS_futex, ptr, FUTEX_WAKE_PRIVATE, all ? INT_MAX : 1, 0, 0, 0); } -# endif // defined(__linux__) && !defined(_LIBCUDACXX_HAS_NO_PLATFORM_WAIT) +#endif // defined(__linux__) && !defined(_LIBCUDACXX_HAS_NO_PLATFORM_WAIT) -# elif defined(_LIBCUDACXX_HAS_THREAD_API_WIN32) +#elif defined(_LIBCUDACXX_HAS_THREAD_API_WIN32) void __libcpp_thread_yield() { - SwitchToThread(); + SwitchToThread(); } void __libcpp_thread_sleep_for(chrono::nanoseconds __ns) { - using namespace chrono; - // round-up to the nearest milisecond - milliseconds __ms = duration_cast(__ns + chrono::nanoseconds(999999)); - Sleep(static_cast(__ms.count())); + using namespace chrono; + // round-up to the nearest milisecond + milliseconds __ms = + duration_cast(__ns + chrono::nanoseconds(999999)); + Sleep(static_cast(__ms.count())); } -# endif // defined(_LIBCUDACXX_HAS_THREAD_API_WIN32) +#endif // defined(_LIBCUDACXX_HAS_THREAD_API_WIN32) -# endif // !defined(_LIBCUDACXX_HAS_THREAD_LIBRARY_EXTERNAL) || defined(_LIBCUDACXX_BUILDING_THREAD_LIBRARY_EXTERNAL) +#endif // !defined(_LIBCUDACXX_HAS_THREAD_LIBRARY_EXTERNAL) || defined(_LIBCUDACXX_BUILDING_THREAD_LIBRARY_EXTERNAL) -template -_LIBCUDACXX_THREAD_ABI_VISIBILITY bool __libcpp_thread_poll_with_backoff(_Fn&& __f, chrono::nanoseconds __max) +template +_LIBCUDACXX_THREAD_ABI_VISIBILITY +bool __libcpp_thread_poll_with_backoff(_Fn && __f, chrono::nanoseconds __max) { - chrono::high_resolution_clock::time_point const __start = chrono::high_resolution_clock::now(); - for (int __count = 0;;) - { - if (__f()) - { - return true; - } - if (__count < _LIBCUDACXX_POLLING_COUNT) - { - if (__count > (_LIBCUDACXX_POLLING_COUNT >> 1)) - { - __libcpp_thread_yield_processor(); + chrono::high_resolution_clock::time_point const __start = chrono::high_resolution_clock::now(); + for(int __count = 0;;) { + if(__f()) + return true; + if(__count < _LIBCUDACXX_POLLING_COUNT) { + if(__count > (_LIBCUDACXX_POLLING_COUNT >> 1)) + __libcpp_thread_yield_processor(); + __count += 1; + continue; } - __count += 1; - continue; - } - chrono::high_resolution_clock::duration const __elapsed = chrono::high_resolution_clock::now() - __start; - if (__max != chrono::nanoseconds::zero() && __max < __elapsed) - { - return false; - } - chrono::nanoseconds const __step = __elapsed / 4; - if (__step >= chrono::milliseconds(1)) - { - __libcpp_thread_sleep_for(chrono::milliseconds(1)); + chrono::high_resolution_clock::duration const __elapsed = chrono::high_resolution_clock::now() - __start; + if(__max != chrono::nanoseconds::zero() && + __max < __elapsed) + return false; + chrono::nanoseconds const __step = __elapsed / 4; + if(__step >= chrono::milliseconds(1)) + __libcpp_thread_sleep_for(chrono::milliseconds(1)); + else if(__step >= chrono::microseconds(10)) + __libcpp_thread_sleep_for(__step); + else + __libcpp_thread_yield(); } - else if (__step >= chrono::microseconds(10)) - { - __libcpp_thread_sleep_for(__step); - } - else - { - __libcpp_thread_yield(); - } - } } -# ifndef _LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE +#ifndef _LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE -struct alignas(64) __libcpp_contention_t -{ -# if defined(_LIBCUDACXX_HAS_PLATFORM_WAIT) - ptrdiff_t __waiters = 0; - __libcpp_platform_wait_t __version = 0; -# else - ptrdiff_t __credit = 0; - __libcpp_mutex_t __mutex = _LIBCUDACXX_MUTEX_INITIALIZER; - __libcpp_condvar_t __condvar = _LIBCUDACXX_CONDVAR_INITIALIZER; -# endif +struct alignas(64) __libcpp_contention_t { +#if defined(_LIBCUDACXX_HAS_PLATFORM_WAIT) + ptrdiff_t __waiters = 0; + __libcpp_platform_wait_t __version = 0; +#else + ptrdiff_t __credit = 0; + __libcpp_mutex_t __mutex = _LIBCUDACXX_MUTEX_INITIALIZER; + __libcpp_condvar_t __condvar = _LIBCUDACXX_CONDVAR_INITIALIZER; +#endif }; _LIBCUDACXX_FUNC_VIS -__libcpp_contention_t* __libcpp_contention_state(void const volatile* p) noexcept; +__libcpp_contention_t * __libcpp_contention_state(void const volatile * p) noexcept; -# endif // _LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE +#endif // _LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE -# if !defined(_LIBCUDACXX_HAS_NO_TREE_BARRIER) && !defined(_LIBCUDACXX_HAS_NO_THREAD_FAVORITE_BARRIER_INDEX) +#if !defined(_LIBCUDACXX_HAS_NO_TREE_BARRIER) && !defined(_LIBCUDACXX_HAS_NO_THREAD_FAVORITE_BARRIER_INDEX) -_LIBCUDACXX_EXPORTED_FROM_ABI extern thread_local ptrdiff_t __libcpp_thread_favorite_barrier_index; +_LIBCUDACXX_EXPORTED_FROM_ABI +extern thread_local ptrdiff_t __libcpp_thread_favorite_barrier_index; -# endif +#endif -# ifndef __cuda_std__ +#ifndef __cuda_std__ class _LIBCUDACXX_TYPE_VIS thread; class _LIBCUDACXX_TYPE_VIS __thread_id; @@ -708,96 +702,81 @@ namespace this_thread _LIBCUDACXX_INLINE_VISIBILITY __thread_id get_id() noexcept; -} // namespace this_thread +} // this_thread -template <> -struct hash<__thread_id>; +template<> struct hash<__thread_id>; class _LIBCUDACXX_TEMPLATE_VIS __thread_id { - // FIXME: pthread_t is a pointer on Darwin but a long on Linux. - // NULL is the no-thread value on Darwin. Someone needs to check - // on other platforms. We assume 0 works everywhere for now. - __libcpp_thread_id __id_; + // FIXME: pthread_t is a pointer on Darwin but a long on Linux. + // NULL is the no-thread value on Darwin. Someone needs to check + // on other platforms. We assume 0 works everywhere for now. + __libcpp_thread_id __id_; public: - _LIBCUDACXX_INLINE_VISIBILITY __thread_id() noexcept - : __id_(0) - {} - - friend _LIBCUDACXX_INLINE_VISIBILITY bool operator==(__thread_id __x, __thread_id __y) noexcept - { // don't pass id==0 to underlying routines - if (__x.__id_ == 0) - { - return __y.__id_ == 0; - } - if (__y.__id_ == 0) - { - return false; - } - return __libcpp_thread_id_equal(__x.__id_, __y.__id_); - } - friend _LIBCUDACXX_INLINE_VISIBILITY bool operator!=(__thread_id __x, __thread_id __y) noexcept - { - return !(__x == __y); - } - friend _LIBCUDACXX_INLINE_VISIBILITY bool operator<(__thread_id __x, __thread_id __y) noexcept - { // id==0 is always less than any other thread_id - if (__x.__id_ == 0) - { - return __y.__id_ != 0; - } - if (__y.__id_ == 0) - { - return false; - } - return __libcpp_thread_id_less(__x.__id_, __y.__id_); - } - friend _LIBCUDACXX_INLINE_VISIBILITY bool operator<=(__thread_id __x, __thread_id __y) noexcept - { - return !(__y < __x); - } - friend _LIBCUDACXX_INLINE_VISIBILITY bool operator>(__thread_id __x, __thread_id __y) noexcept - { - return __y < __x; - } - friend _LIBCUDACXX_INLINE_VISIBILITY bool operator>=(__thread_id __x, __thread_id __y) noexcept - { - return !(__x < __y); - } - - _LIBCUDACXX_INLINE_VISIBILITY void __reset() - { - __id_ = 0; - } - -# ifndef __cuda_std__ - template - friend _LIBCUDACXX_INLINE_VISIBILITY basic_ostream<_CharT, _Traits>& - operator<<(basic_ostream<_CharT, _Traits>& __os, __thread_id __id); -# endif + _LIBCUDACXX_INLINE_VISIBILITY + __thread_id() noexcept : __id_(0) {} + + friend _LIBCUDACXX_INLINE_VISIBILITY + bool operator==(__thread_id __x, __thread_id __y) noexcept + { // don't pass id==0 to underlying routines + if (__x.__id_ == 0) return __y.__id_ == 0; + if (__y.__id_ == 0) return false; + return __libcpp_thread_id_equal(__x.__id_, __y.__id_); + } + friend _LIBCUDACXX_INLINE_VISIBILITY + bool operator!=(__thread_id __x, __thread_id __y) noexcept + {return !(__x == __y);} + friend _LIBCUDACXX_INLINE_VISIBILITY + bool operator< (__thread_id __x, __thread_id __y) noexcept + { // id==0 is always less than any other thread_id + if (__x.__id_ == 0) return __y.__id_ != 0; + if (__y.__id_ == 0) return false; + return __libcpp_thread_id_less(__x.__id_, __y.__id_); + } + friend _LIBCUDACXX_INLINE_VISIBILITY + bool operator<=(__thread_id __x, __thread_id __y) noexcept + {return !(__y < __x);} + friend _LIBCUDACXX_INLINE_VISIBILITY + bool operator> (__thread_id __x, __thread_id __y) noexcept + {return __y < __x ;} + friend _LIBCUDACXX_INLINE_VISIBILITY + bool operator>=(__thread_id __x, __thread_id __y) noexcept + {return !(__x < __y);} + + _LIBCUDACXX_INLINE_VISIBILITY + void __reset() { __id_ = 0; } + +#ifndef __cuda_std__ + template + friend + _LIBCUDACXX_INLINE_VISIBILITY + basic_ostream<_CharT, _Traits>& + operator<<(basic_ostream<_CharT, _Traits>& __os, __thread_id __id); +#endif private: - _LIBCUDACXX_INLINE_VISIBILITY __thread_id(__libcpp_thread_id __id) - : __id_(__id) - {} + _LIBCUDACXX_INLINE_VISIBILITY + __thread_id(__libcpp_thread_id __id) : __id_(__id) {} - friend __thread_id this_thread::get_id() noexcept; - friend class _LIBCUDACXX_TYPE_VIS thread; - friend struct _LIBCUDACXX_TEMPLATE_VIS hash<__thread_id>; + friend __thread_id this_thread::get_id() noexcept; + friend class _LIBCUDACXX_TYPE_VIS thread; + friend struct _LIBCUDACXX_TEMPLATE_VIS hash<__thread_id>; }; namespace this_thread { -inline _LIBCUDACXX_INLINE_VISIBILITY __thread_id get_id() noexcept +inline _LIBCUDACXX_INLINE_VISIBILITY +__thread_id +get_id() noexcept { - return __libcpp_thread_get_current_id(); + return __libcpp_thread_get_current_id(); } -} // namespace this_thread +} // this_thread -# endif // __cuda_std__ +#endif // __cuda_std__ #endif // !_LIBCUDACXX_HAS_NO_THREADS diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/__undef_macros b/libcudacxx/include/cuda/std/detail/libcxx/include/__undef_macros index 817a2b97116..37594493df2 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/__undef_macros +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/__undef_macros @@ -7,26 +7,27 @@ // //===----------------------------------------------------------------------===// + #ifdef min -# if !defined(_LIBCUDACXX_DISABLE_MACRO_CONFLICT_WARNINGS) -# if defined(_LIBCUDACXX_WARNING) +#if !defined(_LIBCUDACXX_DISABLE_MACRO_CONFLICT_WARNINGS) +#if defined(_LIBCUDACXX_WARNING) _LIBCUDACXX_WARNING("macro min is incompatible with C++. Try #define NOMINMAX " - "before any Windows header. #undefing min") -# else -# warning : macro min is incompatible with C++. #undefing min -# endif -# endif -# undef min + "before any Windows header. #undefing min") +#else +#warning: macro min is incompatible with C++. #undefing min +#endif +#endif +#undef min #endif #ifdef max -# if !defined(_LIBCUDACXX_DISABLE_MACRO_CONFLICT_WARNINGS) -# if defined(_LIBCUDACXX_WARNING) +#if !defined(_LIBCUDACXX_DISABLE_MACRO_CONFLICT_WARNINGS) +#if defined(_LIBCUDACXX_WARNING) _LIBCUDACXX_WARNING("macro max is incompatible with C++. Try #define NOMINMAX " - "before any Windows header. #undefing max") -# else -# warning : macro max is incompatible with C++. #undefing max -# endif -# endif -# undef max + "before any Windows header. #undefing max") +#else +#warning: macro max is incompatible with C++. #undefing max +#endif +#endif +#undef max #endif diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/__verbose_abort b/libcudacxx/include/cuda/std/detail/libcxx/include/__verbose_abort index cfc433c561b..afb61ecc8cd 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/__verbose_abort +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/__verbose_abort @@ -29,16 +29,14 @@ // // We can't provide a great implementation because it needs to be pretty much // dependency-free (this is included everywhere else in the library). -#if defined(_LIBCUDACXX_HAS_NO_VERBOSE_ABORT_IN_LIBRARY) \ - && !defined(_LIBCUDACXX_AVAILABILITY_CUSTOM_VERBOSE_ABORT_PROVIDED) +#if defined(_LIBCUDACXX_HAS_NO_VERBOSE_ABORT_IN_LIBRARY) && !defined(_LIBCUDACXX_AVAILABILITY_CUSTOM_VERBOSE_ABORT_PROVIDED) extern "C" void abort(); _LIBCUDACXX_BEGIN_NAMESPACE_STD -_CCCL_NORETURN _LIBCUDACXX_ATTRIBUTE_FORMAT(__printf__, 1, 2) - _LIBCUDACXX_HIDE_FROM_ABI inline void __libcpp_verbose_abort(const char*, ...) -{ +_CCCL_NORETURN _LIBCUDACXX_ATTRIBUTE_FORMAT(__printf__, 1, 2) _LIBCUDACXX_HIDE_FROM_ABI inline +void __libcpp_verbose_abort(const char *, ...) { ::abort(); __builtin_unreachable(); // never reached, but needed to tell the compiler that the function never returns } @@ -49,8 +47,8 @@ _LIBCUDACXX_END_NAMESPACE_STD _LIBCUDACXX_BEGIN_NAMESPACE_STD -_CCCL_NORETURN _LIBCUDACXX_OVERRIDABLE_FUNC_VIS - _LIBCUDACXX_ATTRIBUTE_FORMAT(__printf__, 1, 2) void __libcpp_verbose_abort(const char* __format, ...); +_CCCL_NORETURN _LIBCUDACXX_OVERRIDABLE_FUNC_VIS _LIBCUDACXX_ATTRIBUTE_FORMAT(__printf__, 1, 2) +void __libcpp_verbose_abort(const char *__format, ...); _LIBCUDACXX_END_NAMESPACE_STD diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/algorithm b/libcudacxx/include/cuda/std/detail/libcxx/include/algorithm index 5162784e5e0..78c168c8bfd 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/algorithm +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/algorithm @@ -645,81 +645,83 @@ template # pragma system_header #endif // no system header +#include // all public C++ headers provide the assertion handler +#include #include #include #include #include #include -#include #include -#include +#include #include #include #include -#include +#include #include -#include +#include #include -#include +#include #include -#include +#include #include #include -#include #include -#include +#include +#include #include -#include +#include #include +#include #include #include -#include #include +#include #include #include -#include #include +#include #include #include #include #include #include #include -#include #include +#include #include -#include #include -#include +#include #include +#include #include -#include #include +#include #include #include -#include #include -#include +#include #include #include +#include #include #include #include #include -#include -#include #include +#include #include -#include -#include +#include #include +#include #include -#include +#include #include -#include +#include #include -#include +#include #include +#include #include #include #include @@ -730,8 +732,8 @@ template #include #include #include -#include #include +#include #include #include #include @@ -749,17 +751,16 @@ template #include #include #include +#include #include #include -#include // all public C++ headers provide the assertion handler -#include -#include -#include #include #include #include #include +#include + _LIBCUDACXX_BEGIN_NAMESPACE_STD #ifndef __cuda_std__ @@ -768,26 +769,20 @@ template class __invert // invert the sense of a comparison { private: - _Predicate __p_; - + _Predicate __p_; public: - _LIBCUDACXX_INLINE_VISIBILITY __invert() {} + _LIBCUDACXX_INLINE_VISIBILITY __invert() {} - _LIBCUDACXX_INLINE_VISIBILITY explicit __invert(_Predicate __p) - : __p_(__p) - {} + _LIBCUDACXX_INLINE_VISIBILITY + explicit __invert(_Predicate __p) : __p_(__p) {} - template - _LIBCUDACXX_INLINE_VISIBILITY bool operator()(const _T1& __x) - { - return !__p_(__x); - } + template + _LIBCUDACXX_INLINE_VISIBILITY + bool operator()(const _T1& __x) {return !__p_(__x);} - template - _LIBCUDACXX_INLINE_VISIBILITY bool operator()(const _T1& __x, const _T2& __y) - { - return __p_(__y, __x); - } + template + _LIBCUDACXX_INLINE_VISIBILITY + bool operator()(const _T1& __x, const _T2& __y) {return __p_(__y, __x);} }; // random_shuffle @@ -797,335 +792,275 @@ public: template struct __log2_imp { - static const size_t value = _Xp & ((unsigned long long) (1) << _Rp) ? _Rp : __log2_imp<_Xp, _Rp - 1>::value; + static const size_t value = _Xp & ((unsigned long long)(1) << _Rp) ? _Rp + : __log2_imp<_Xp, _Rp - 1>::value; }; template struct __log2_imp<_Xp, 0> { - static const size_t value = 0; + static const size_t value = 0; }; template struct __log2_imp<0, _Rp> { - static const size_t value = _Rp + 1; + static const size_t value = _Rp + 1; }; template struct __log2 { - static const size_t value = __log2_imp<_Xp, sizeof(_UIntType) * __CHAR_BIT__ - 1>::value; + static const size_t value = __log2_imp<_Xp, + sizeof(_UIntType) * __CHAR_BIT__ - 1>::value; }; -template +template class __independent_bits_engine { public: - // types - typedef _UIntType result_type; + // types + typedef _UIntType result_type; private: - typedef typename _Engine::result_type _Engine_result_type; - typedef __conditional_t - _Working_result_type; - - _Engine& __e_; - size_t __w_; - size_t __w0_; - size_t __n_; - size_t __n0_; - _Working_result_type __y0_; - _Working_result_type __y1_; - _Engine_result_type __mask0_; - _Engine_result_type __mask1_; - - static constexpr _Working_result_type _Rp = _Engine::max() - _Engine::min() + _Working_result_type(1); - static constexpr size_t __m = __log2<_Working_result_type, _Rp>::value; - static constexpr size_t _WDt = numeric_limits<_Working_result_type>::digits; - static constexpr size_t _EDt = numeric_limits<_Engine_result_type>::digits; + typedef typename _Engine::result_type _Engine_result_type; + typedef __conditional_t + < + sizeof(_Engine_result_type) <= sizeof(result_type), + result_type, + _Engine_result_type + > _Working_result_type; + + _Engine& __e_; + size_t __w_; + size_t __w0_; + size_t __n_; + size_t __n0_; + _Working_result_type __y0_; + _Working_result_type __y1_; + _Engine_result_type __mask0_; + _Engine_result_type __mask1_; + + static constexpr _Working_result_type _Rp = + _Engine::max() - _Engine::min() + _Working_result_type(1); + static constexpr size_t __m = __log2<_Working_result_type, _Rp>::value; + static constexpr size_t _WDt = numeric_limits<_Working_result_type>::digits; + static constexpr size_t _EDt = numeric_limits<_Engine_result_type>::digits; public: - // constructors and seeding functions - __independent_bits_engine(_Engine& __e, size_t __w); + // constructors and seeding functions + __independent_bits_engine(_Engine& __e, size_t __w); - // generating functions - result_type operator()() - { - return __eval(integral_constant()); - } + // generating functions + result_type operator()() {return __eval(integral_constant());} private: - result_type __eval(false_type); - result_type __eval(true_type); + result_type __eval(false_type); + result_type __eval(true_type); }; -template -__independent_bits_engine<_Engine, _UIntType>::__independent_bits_engine(_Engine& __e, size_t __w) - : __e_(__e) - , __w_(__w) +template +__independent_bits_engine<_Engine, _UIntType> + ::__independent_bits_engine(_Engine& __e, size_t __w) + : __e_(__e), + __w_(__w) { - __n_ = __w_ / __m + (__w_ % __m != 0); - __w0_ = __w_ / __n_; - if (_Rp == 0) - { - __y0_ = _Rp; - } - else if (__w0_ < _WDt) - { - __y0_ = (_Rp >> __w0_) << __w0_; - } - else - { - __y0_ = 0; - } - if (_Rp - __y0_ > __y0_ / __n_) - { - ++__n_; + __n_ = __w_ / __m + (__w_ % __m != 0); __w0_ = __w_ / __n_; - if (__w0_ < _WDt) - { - __y0_ = (_Rp >> __w0_) << __w0_; - } + if (_Rp == 0) + __y0_ = _Rp; + else if (__w0_ < _WDt) + __y0_ = (_Rp >> __w0_) << __w0_; else - { - __y0_ = 0; - } - } - __n0_ = __n_ - __w_ % __n_; - if (__w0_ < _WDt - 1) - { - __y1_ = (_Rp >> (__w0_ + 1)) << (__w0_ + 1); - } - else - { - __y1_ = 0; - } - __mask0_ = __w0_ > 0 ? _Engine_result_type(~0) >> (_EDt - __w0_) : _Engine_result_type(0); - __mask1_ = __w0_ < _EDt - 1 ? _Engine_result_type(~0) >> (_EDt - (__w0_ + 1)) : _Engine_result_type(~0); + __y0_ = 0; + if (_Rp - __y0_ > __y0_ / __n_) + { + ++__n_; + __w0_ = __w_ / __n_; + if (__w0_ < _WDt) + __y0_ = (_Rp >> __w0_) << __w0_; + else + __y0_ = 0; + } + __n0_ = __n_ - __w_ % __n_; + if (__w0_ < _WDt - 1) + __y1_ = (_Rp >> (__w0_ + 1)) << (__w0_ + 1); + else + __y1_ = 0; + __mask0_ = __w0_ > 0 ? _Engine_result_type(~0) >> (_EDt - __w0_) : + _Engine_result_type(0); + __mask1_ = __w0_ < _EDt - 1 ? + _Engine_result_type(~0) >> (_EDt - (__w0_ + 1)) : + _Engine_result_type(~0); } -template -inline _UIntType __independent_bits_engine<_Engine, _UIntType>::__eval(false_type) +template +inline +_UIntType +__independent_bits_engine<_Engine, _UIntType>::__eval(false_type) { - return static_cast(__e_() & __mask0_); + return static_cast(__e_() & __mask0_); } -template -_UIntType __independent_bits_engine<_Engine, _UIntType>::__eval(true_type) +template +_UIntType +__independent_bits_engine<_Engine, _UIntType>::__eval(true_type) { - const size_t _WRt = numeric_limits::digits; - result_type _Sp = 0; - for (size_t __k = 0; __k < __n0_; ++__k) - { - _Engine_result_type __u; - do + const size_t _WRt = numeric_limits::digits; + result_type _Sp = 0; + for (size_t __k = 0; __k < __n0_; ++__k) { - __u = __e_() - _Engine::min(); - } while (__u >= __y0_); - if (__w0_ < _WRt) - { - _Sp <<= __w0_; - } - else - { - _Sp = 0; - } - _Sp += __u & __mask0_; - } - for (size_t __k = __n0_; __k < __n_; ++__k) - { - _Engine_result_type __u; - do - { - __u = __e_() - _Engine::min(); - } while (__u >= __y1_); - if (__w0_ < _WRt - 1) - { - _Sp <<= __w0_ + 1; - } - else - { - _Sp = 0; - } - _Sp += __u & __mask1_; - } - return _Sp; + _Engine_result_type __u; + do + { + __u = __e_() - _Engine::min(); + } while (__u >= __y0_); + if (__w0_ < _WRt) + _Sp <<= __w0_; + else + _Sp = 0; + _Sp += __u & __mask0_; + } + for (size_t __k = __n0_; __k < __n_; ++__k) + { + _Engine_result_type __u; + do + { + __u = __e_() - _Engine::min(); + } while (__u >= __y1_); + if (__w0_ < _WRt - 1) + _Sp <<= __w0_ + 1; + else + _Sp = 0; + _Sp += __u & __mask1_; + } + return _Sp; } // uniform_int_distribution -template +template class uniform_int_distribution { public: - // types - typedef _IntType result_type; - - class param_type - { - result_type __a_; - result_type __b_; + // types + typedef _IntType result_type; - public: - typedef uniform_int_distribution distribution_type; + class param_type + { + result_type __a_; + result_type __b_; + public: + typedef uniform_int_distribution distribution_type; - explicit param_type(result_type __a = 0, result_type __b = numeric_limits::max()) - : __a_(__a) - , __b_(__b) - {} + explicit param_type(result_type __a = 0, + result_type __b = numeric_limits::max()) + : __a_(__a), __b_(__b) {} - result_type a() const - { - return __a_; - } - result_type b() const - { - return __b_; - } + result_type a() const {return __a_;} + result_type b() const {return __b_;} - friend bool operator==(const param_type& __x, const param_type& __y) - { - return __x.__a_ == __y.__a_ && __x.__b_ == __y.__b_; - } - friend bool operator!=(const param_type& __x, const param_type& __y) - { - return !(__x == __y); - } - }; + friend bool operator==(const param_type& __x, const param_type& __y) + {return __x.__a_ == __y.__a_ && __x.__b_ == __y.__b_;} + friend bool operator!=(const param_type& __x, const param_type& __y) + {return !(__x == __y);} + }; private: - param_type __p_; + param_type __p_; public: - // constructors and reset functions - explicit uniform_int_distribution(result_type __a = 0, result_type __b = numeric_limits::max()) - : __p_(param_type(__a, __b)) - {} - explicit uniform_int_distribution(const param_type& __p) - : __p_(__p) - {} - void reset() {} - - // generating functions - template - result_type operator()(_URNG& __g) - { - return (*this)(__g, __p_); - } - template - result_type operator()(_URNG& __g, const param_type& __p); - - // property functions - result_type a() const - { - return __p_.a(); - } - result_type b() const - { - return __p_.b(); - } - - param_type param() const - { - return __p_; - } - void param(const param_type& __p) - { - __p_ = __p; - } - - result_type min() const - { - return a(); - } - result_type max() const - { - return b(); - } - - friend bool operator==(const uniform_int_distribution& __x, const uniform_int_distribution& __y) - { - return __x.__p_ == __y.__p_; - } - friend bool operator!=(const uniform_int_distribution& __x, const uniform_int_distribution& __y) - { - return !(__x == __y); - } + // constructors and reset functions + explicit uniform_int_distribution(result_type __a = 0, + result_type __b = numeric_limits::max()) + : __p_(param_type(__a, __b)) {} + explicit uniform_int_distribution(const param_type& __p) : __p_(__p) {} + void reset() {} + + // generating functions + template result_type operator()(_URNG& __g) + {return (*this)(__g, __p_);} + template result_type operator()(_URNG& __g, const param_type& __p); + + // property functions + result_type a() const {return __p_.a();} + result_type b() const {return __p_.b();} + + param_type param() const {return __p_;} + void param(const param_type& __p) {__p_ = __p;} + + result_type min() const {return a();} + result_type max() const {return b();} + + friend bool operator==(const uniform_int_distribution& __x, + const uniform_int_distribution& __y) + {return __x.__p_ == __y.__p_;} + friend bool operator!=(const uniform_int_distribution& __x, + const uniform_int_distribution& __y) + {return !(__x == __y);} }; -template -template -typename uniform_int_distribution<_IntType>::result_type uniform_int_distribution<_IntType>::operator()( - _URNG& __g, const param_type& __p) _LIBCUDACXX_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK +template +template +typename uniform_int_distribution<_IntType>::result_type +uniform_int_distribution<_IntType>::operator()(_URNG& __g, const param_type& __p) +_LIBCUDACXX_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK { - typedef __conditional_t _UIntType; - const _UIntType _Rp = _UIntType(__p.b()) - _UIntType(__p.a()) + _UIntType(1); - if (_Rp == 1) - { - return __p.a(); - } - const size_t _Dt = numeric_limits<_UIntType>::digits; - typedef __independent_bits_engine<_URNG, _UIntType> _Eng; - if (_Rp == 0) - { - return static_cast(_Eng(__g, _Dt)()); - } - size_t __w = _Dt - __libcpp_clz(_Rp) - 1; - if ((_Rp & (std::numeric_limits<_UIntType>::max() >> (_Dt - __w))) != 0) - { - ++__w; - } - _Eng __e(__g, __w); - _UIntType __u; - do - { - __u = __e(); - } while (__u >= _Rp); - return static_cast(__u + __p.a()); + typedef __conditional_t _UIntType; + const _UIntType _Rp = _UIntType(__p.b()) - _UIntType(__p.a()) + _UIntType(1); + if (_Rp == 1) + return __p.a(); + const size_t _Dt = numeric_limits<_UIntType>::digits; + typedef __independent_bits_engine<_URNG, _UIntType> _Eng; + if (_Rp == 0) + return static_cast(_Eng(__g, _Dt)()); + size_t __w = _Dt - __libcpp_clz(_Rp) - 1; + if ((_Rp & (std::numeric_limits<_UIntType>::max() >> (_Dt - __w))) != 0) + ++__w; + _Eng __e(__g, __w); + _UIntType __u; + do + { + __u = __e(); + } while (__u >= _Rp); + return static_cast(__u + __p.a()); } -template -_LIBCUDACXX_INLINE_VISIBILITY _SampleIterator __sample( - _PopulationIterator __first, - _PopulationIterator __last, - _SampleIterator __output_iter, - _Distance __n, - _UniformRandomNumberGenerator& __g, - input_iterator_tag) -{ +template +_LIBCUDACXX_INLINE_VISIBILITY +_SampleIterator __sample(_PopulationIterator __first, + _PopulationIterator __last, _SampleIterator __output_iter, + _Distance __n, + _UniformRandomNumberGenerator & __g, + input_iterator_tag) { + _Distance __k = 0; for (; __first != __last && __k < __n; ++__first, (void) ++__k) - { __output_iter[__k] = *__first; - } _Distance __sz = __k; - for (; __first != __last; ++__first, (void) ++__k) - { + for (; __first != __last; ++__first, (void) ++__k) { _Distance __r = _CUDA_VSTD::uniform_int_distribution<_Distance>(0, __k)(__g); if (__r < __sz) - { __output_iter[__r] = *__first; - } } return __output_iter + _CUDA_VSTD::min(__n, __k); } -template -_LIBCUDACXX_INLINE_VISIBILITY _SampleIterator __sample( - _PopulationIterator __first, - _PopulationIterator __last, - _SampleIterator __output_iter, - _Distance __n, - _UniformRandomNumberGenerator& __g, - forward_iterator_tag) -{ +template +_LIBCUDACXX_INLINE_VISIBILITY +_SampleIterator __sample(_PopulationIterator __first, + _PopulationIterator __last, _SampleIterator __output_iter, + _Distance __n, + _UniformRandomNumberGenerator& __g, + forward_iterator_tag) { _Distance __unsampled_sz = _CUDA_VSTD::distance(__first, __last); - for (__n = _CUDA_VSTD::min(__n, __unsampled_sz); __n != 0; ++__first) - { - _Distance __r = _CUDA_VSTD::uniform_int_distribution<_Distance>(0, --__unsampled_sz)(__g); - if (__r < __n) - { + for (__n = _CUDA_VSTD::min(__n, __unsampled_sz); __n != 0; ++__first) { + _Distance __r = + _CUDA_VSTD::uniform_int_distribution<_Distance>(0, --__unsampled_sz)(__g); + if (__r < __n) { *__output_iter++ = *__first; --__n; } @@ -1133,360 +1068,336 @@ _LIBCUDACXX_INLINE_VISIBILITY _SampleIterator __sample( return __output_iter; } -template -_LIBCUDACXX_INLINE_VISIBILITY _SampleIterator __sample( - _PopulationIterator __first, - _PopulationIterator __last, - _SampleIterator __output_iter, - _Distance __n, - _UniformRandomNumberGenerator& __g) -{ - typedef typename iterator_traits<_PopulationIterator>::iterator_category _PopCategory; - typedef typename iterator_traits<_PopulationIterator>::difference_type _Difference; - static_assert(__is_cpp17_forward_iterator<_PopulationIterator>::value - || __is_cpp17_random_access_iterator<_SampleIterator>::value, +template +_LIBCUDACXX_INLINE_VISIBILITY +_SampleIterator __sample(_PopulationIterator __first, + _PopulationIterator __last, _SampleIterator __output_iter, + _Distance __n, _UniformRandomNumberGenerator& __g) { + typedef typename iterator_traits<_PopulationIterator>::iterator_category + _PopCategory; + typedef typename iterator_traits<_PopulationIterator>::difference_type + _Difference; + static_assert(__is_cpp17_forward_iterator<_PopulationIterator>::value || + __is_cpp17_random_access_iterator<_SampleIterator>::value, "SampleIterator must meet the requirements of RandomAccessIterator"); typedef typename common_type<_Distance, _Difference>::type _CommonType; _LIBCUDACXX_ASSERT(__n >= 0, "N must be a positive number."); - return _CUDA_VSTD::__sample(__first, __last, __output_iter, _CommonType(__n), __g, _PopCategory()); + return _CUDA_VSTD::__sample( + __first, __last, __output_iter, _CommonType(__n), + __g, _PopCategory()); } -# if _CCCL_STD_VER > 2014 -template -inline _LIBCUDACXX_INLINE_VISIBILITY _SampleIterator sample( - _PopulationIterator __first, - _PopulationIterator __last, - _SampleIterator __output_iter, - _Distance __n, - _UniformRandomNumberGenerator&& __g) -{ - return _CUDA_VSTD::__sample(__first, __last, __output_iter, __n, __g); +#if _CCCL_STD_VER > 2014 +template +inline _LIBCUDACXX_INLINE_VISIBILITY +_SampleIterator sample(_PopulationIterator __first, + _PopulationIterator __last, _SampleIterator __output_iter, + _Distance __n, _UniformRandomNumberGenerator&& __g) { + return _CUDA_VSTD::__sample(__first, __last, __output_iter, __n, __g); } -# endif // _CCCL_STD_VER > 2014 +#endif // _CCCL_STD_VER > 2014 -template -_LIBCUDACXX_INLINE_VISIBILITY void -shuffle(_RandomAccessIterator __first, _RandomAccessIterator __last, _UniformRandomNumberGenerator&& __g) +template +_LIBCUDACXX_INLINE_VISIBILITY + void shuffle(_RandomAccessIterator __first, _RandomAccessIterator __last, + _UniformRandomNumberGenerator&& __g) { - typedef typename iterator_traits<_RandomAccessIterator>::difference_type difference_type; - typedef uniform_int_distribution _Dp; - typedef typename _Dp::param_type _Pp; - difference_type __d = __last - __first; - if (__d > 1) - { - _Dp __uid; - for (--__last, (void) --__d; __first < __last; ++__first, (void) --__d) - { - difference_type __i = __uid(__g, _Pp(0, __d)); - if (__i != difference_type(0)) - { - swap(*__first, *(__first + __i)); - } + typedef typename iterator_traits<_RandomAccessIterator>::difference_type difference_type; + typedef uniform_int_distribution _Dp; + typedef typename _Dp::param_type _Pp; + difference_type __d = __last - __first; + if (__d > 1) + { + _Dp __uid; + for (--__last, (void) --__d; __first < __last; ++__first, (void) --__d) + { + difference_type __i = __uid(__g, _Pp(0, __d)); + if (__i != difference_type(0)) + swap(*__first, *(__first + __i)); + } } - } } // stable_partition template -_CCCL_HOST_DEVICE _ForwardIterator __stable_partition( - _ForwardIterator __first, - _ForwardIterator __last, - _Predicate __pred, - _Distance __len, - _Pair __p, - forward_iterator_tag __fit) +_CCCL_HOST_DEVICE +_ForwardIterator +__stable_partition(_ForwardIterator __first, _ForwardIterator __last, _Predicate __pred, + _Distance __len, _Pair __p, forward_iterator_tag __fit) { - // *__first is known to be false - // __len >= 1 - if (__len == 1) - { - return __first; - } - if (__len == 2) - { - _ForwardIterator __m = __first; - if (__pred(*++__m)) - { - swap(*__first, *__m); - return __m; - } - return __first; - } - if (__len <= __p.second) - { // The buffer is big enough to use - typedef typename iterator_traits<_ForwardIterator>::value_type value_type; - __destruct_n __d(0); - unique_ptr __h(__p.first, __d); - // Move the falses into the temporary buffer, and the trues to the front of the line - // Update __first to always point to the end of the trues - value_type* __t = __p.first; - ::new (__t) value_type(_CUDA_VSTD::move(*__first)); - __d.__incr((value_type*) 0); - ++__t; - _ForwardIterator __i = __first; - while (++__i != __last) - { - if (__pred(*__i)) - { - *__first = _CUDA_VSTD::move(*__i); - ++__first; - } - else - { - ::new (__t) value_type(_CUDA_VSTD::move(*__i)); - __d.__incr((value_type*) 0); + // *__first is known to be false + // __len >= 1 + if (__len == 1) + return __first; + if (__len == 2) + { + _ForwardIterator __m = __first; + if (__pred(*++__m)) + { + swap(*__first, *__m); + return __m; + } + return __first; + } + if (__len <= __p.second) + { // The buffer is big enough to use + typedef typename iterator_traits<_ForwardIterator>::value_type value_type; + __destruct_n __d(0); + unique_ptr __h(__p.first, __d); + // Move the falses into the temporary buffer, and the trues to the front of the line + // Update __first to always point to the end of the trues + value_type* __t = __p.first; + ::new(__t) value_type(_CUDA_VSTD::move(*__first)); + __d.__incr((value_type*)0); ++__t; - } - } - // All trues now at start of range, all falses in buffer - // Move falses back into range, but don't mess up __first which points to first false - __i = __first; - for (value_type* __t2 = __p.first; __t2 < __t; ++__t2, (void) ++__i) - { - *__i = _CUDA_VSTD::move(*__t2); - } - // __h destructs moved-from values out of the temp buffer, but doesn't deallocate buffer - return __first; - } - // Else not enough buffer, do in place - // __len >= 3 - _ForwardIterator __m = __first; - _Distance __len2 = __len / 2; // __len2 >= 2 - _CUDA_VSTD::advance(__m, __len2); - // recurse on [__first, __m), *__first know to be false - // F????????????????? - // f m l - typedef __add_lvalue_reference_t<_Predicate> _PredRef; - _ForwardIterator __first_false = __stable_partition<_PredRef>(__first, __m, __pred, __len2, __p, __fit); - // TTTFFFFF?????????? - // f ff m l - // recurse on [__m, __last], except increase __m until *(__m) is false, *__last know to be true - _ForwardIterator __m1 = __m; - _ForwardIterator __second_false = __last; - _Distance __len_half = __len - __len2; - while (__pred(*__m1)) - { - if (++__m1 == __last) - { - goto __second_half_done; - } - --__len_half; - } - // TTTFFFFFTTTF?????? - // f ff m m1 l - __second_false = __stable_partition<_PredRef>(__m1, __last, __pred, __len_half, __p, __fit); + _ForwardIterator __i = __first; + while (++__i != __last) + { + if (__pred(*__i)) + { + *__first = _CUDA_VSTD::move(*__i); + ++__first; + } + else + { + ::new(__t) value_type(_CUDA_VSTD::move(*__i)); + __d.__incr((value_type*)0); + ++__t; + } + } + // All trues now at start of range, all falses in buffer + // Move falses back into range, but don't mess up __first which points to first false + __i = __first; + for (value_type* __t2 = __p.first; __t2 < __t; ++__t2, (void) ++__i) + *__i = _CUDA_VSTD::move(*__t2); + // __h destructs moved-from values out of the temp buffer, but doesn't deallocate buffer + return __first; + } + // Else not enough buffer, do in place + // __len >= 3 + _ForwardIterator __m = __first; + _Distance __len2 = __len / 2; // __len2 >= 2 + _CUDA_VSTD::advance(__m, __len2); + // recurse on [__first, __m), *__first know to be false + // F????????????????? + // f m l + typedef __add_lvalue_reference_t<_Predicate> _PredRef; + _ForwardIterator __first_false = __stable_partition<_PredRef>(__first, __m, __pred, __len2, __p, __fit); + // TTTFFFFF?????????? + // f ff m l + // recurse on [__m, __last], except increase __m until *(__m) is false, *__last know to be true + _ForwardIterator __m1 = __m; + _ForwardIterator __second_false = __last; + _Distance __len_half = __len - __len2; + while (__pred(*__m1)) + { + if (++__m1 == __last) + goto __second_half_done; + --__len_half; + } + // TTTFFFFFTTTF?????? + // f ff m m1 l + __second_false = __stable_partition<_PredRef>(__m1, __last, __pred, __len_half, __p, __fit); __second_half_done: - // TTTFFFFFTTTTTFFFFF - // f ff m sf l - return _CUDA_VSTD::rotate(__first_false, __m, __second_false); - // TTTTTTTTFFFFFFFFFF - // | + // TTTFFFFFTTTTTFFFFF + // f ff m sf l + return _CUDA_VSTD::rotate(__first_false, __m, __second_false); + // TTTTTTTTFFFFFFFFFF + // | } template -_CCCL_HOST_DEVICE _ForwardIterator -__stable_partition(_ForwardIterator __first, _ForwardIterator __last, _Predicate __pred, forward_iterator_tag) +_CCCL_HOST_DEVICE +_ForwardIterator +__stable_partition(_ForwardIterator __first, _ForwardIterator __last, _Predicate __pred, + forward_iterator_tag) { - const unsigned __alloc_limit = 3; // might want to make this a function of trivial assignment - // Either prove all true and return __first or point to first false - while (true) - { - if (__first == __last) - { - return __first; + const unsigned __alloc_limit = 3; // might want to make this a function of trivial assignment + // Either prove all true and return __first or point to first false + while (true) + { + if (__first == __last) + return __first; + if (!__pred(*__first)) + break; + ++__first; } - if (!__pred(*__first)) + // We now have a reduced range [__first, __last) + // *__first is known to be false + typedef typename iterator_traits<_ForwardIterator>::difference_type difference_type; + typedef typename iterator_traits<_ForwardIterator>::value_type value_type; + difference_type __len = _CUDA_VSTD::distance(__first, __last); + pair __p(0, 0); + unique_ptr __h; + if (__len >= __alloc_limit) { - break; + __p = _CUDA_VSTD::get_temporary_buffer(__len); + __h.reset(__p.first); } - ++__first; - } - // We now have a reduced range [__first, __last) - // *__first is known to be false - typedef typename iterator_traits<_ForwardIterator>::difference_type difference_type; - typedef typename iterator_traits<_ForwardIterator>::value_type value_type; - difference_type __len = _CUDA_VSTD::distance(__first, __last); - pair __p(0, 0); - unique_ptr __h; - if (__len >= __alloc_limit) - { - __p = _CUDA_VSTD::get_temporary_buffer(__len); - __h.reset(__p.first); - } - return __stable_partition<__add_lvalue_reference_t<_Predicate>>( - __first, __last, __pred, __len, __p, forward_iterator_tag()); + return __stable_partition<__add_lvalue_reference_t<_Predicate>> + (__first, __last, __pred, __len, __p, forward_iterator_tag()); } template -_CCCL_HOST_DEVICE _BidirectionalIterator __stable_partition( - _BidirectionalIterator __first, - _BidirectionalIterator __last, - _Predicate __pred, - _Distance __len, - _Pair __p, - bidirectional_iterator_tag __bit) +_CCCL_HOST_DEVICE +_BidirectionalIterator +__stable_partition(_BidirectionalIterator __first, _BidirectionalIterator __last, _Predicate __pred, + _Distance __len, _Pair __p, bidirectional_iterator_tag __bit) { - // *__first is known to be false - // *__last is known to be true - // __len >= 2 - if (__len == 2) - { - swap(*__first, *__last); - return __last; - } - if (__len == 3) - { - _BidirectionalIterator __m = __first; - if (__pred(*++__m)) + // *__first is known to be false + // *__last is known to be true + // __len >= 2 + if (__len == 2) { - swap(*__first, *__m); - swap(*__m, *__last); - return __last; + swap(*__first, *__last); + return __last; } - swap(*__m, *__last); - swap(*__first, *__m); - return __m; - } - if (__len <= __p.second) - { // The buffer is big enough to use - typedef typename iterator_traits<_BidirectionalIterator>::value_type value_type; - __destruct_n __d(0); - unique_ptr __h(__p.first, __d); - // Move the falses into the temporary buffer, and the trues to the front of the line - // Update __first to always point to the end of the trues - value_type* __t = __p.first; - ::new (__t) value_type(_CUDA_VSTD::move(*__first)); - __d.__incr((value_type*) 0); - ++__t; - _BidirectionalIterator __i = __first; - while (++__i != __last) + if (__len == 3) { - if (__pred(*__i)) - { - *__first = _CUDA_VSTD::move(*__i); - ++__first; - } - else - { - ::new (__t) value_type(_CUDA_VSTD::move(*__i)); - __d.__incr((value_type*) 0); + _BidirectionalIterator __m = __first; + if (__pred(*++__m)) + { + swap(*__first, *__m); + swap(*__m, *__last); + return __last; + } + swap(*__m, *__last); + swap(*__first, *__m); + return __m; + } + if (__len <= __p.second) + { // The buffer is big enough to use + typedef typename iterator_traits<_BidirectionalIterator>::value_type value_type; + __destruct_n __d(0); + unique_ptr __h(__p.first, __d); + // Move the falses into the temporary buffer, and the trues to the front of the line + // Update __first to always point to the end of the trues + value_type* __t = __p.first; + ::new(__t) value_type(_CUDA_VSTD::move(*__first)); + __d.__incr((value_type*)0); ++__t; - } - } - // move *__last, known to be true - *__first = _CUDA_VSTD::move(*__i); - __i = ++__first; - // All trues now at start of range, all falses in buffer - // Move falses back into range, but don't mess up __first which points to first false - for (value_type* __t2 = __p.first; __t2 < __t; ++__t2, (void) ++__i) - { - *__i = _CUDA_VSTD::move(*__t2); - } - // __h destructs moved-from values out of the temp buffer, but doesn't deallocate buffer - return __first; - } - // Else not enough buffer, do in place - // __len >= 4 - _BidirectionalIterator __m = __first; - _Distance __len2 = __len / 2; // __len2 >= 2 - _CUDA_VSTD::advance(__m, __len2); - // recurse on [__first, __m-1], except reduce __m-1 until *(__m-1) is true, *__first know to be false - // F????????????????T - // f m l - _BidirectionalIterator __m1 = __m; - _BidirectionalIterator __first_false = __first; - _Distance __len_half = __len2; - while (!__pred(*--__m1)) - { - if (__m1 == __first) - { - goto __first_half_done; - } - --__len_half; - } - // F???TFFF?????????T - // f m1 m l - typedef __add_lvalue_reference_t<_Predicate> _PredRef; - __first_false = __stable_partition<_PredRef>(__first, __m1, __pred, __len_half, __p, __bit); + _BidirectionalIterator __i = __first; + while (++__i != __last) + { + if (__pred(*__i)) + { + *__first = _CUDA_VSTD::move(*__i); + ++__first; + } + else + { + ::new(__t) value_type(_CUDA_VSTD::move(*__i)); + __d.__incr((value_type*)0); + ++__t; + } + } + // move *__last, known to be true + *__first = _CUDA_VSTD::move(*__i); + __i = ++__first; + // All trues now at start of range, all falses in buffer + // Move falses back into range, but don't mess up __first which points to first false + for (value_type* __t2 = __p.first; __t2 < __t; ++__t2, (void) ++__i) + *__i = _CUDA_VSTD::move(*__t2); + // __h destructs moved-from values out of the temp buffer, but doesn't deallocate buffer + return __first; + } + // Else not enough buffer, do in place + // __len >= 4 + _BidirectionalIterator __m = __first; + _Distance __len2 = __len / 2; // __len2 >= 2 + _CUDA_VSTD::advance(__m, __len2); + // recurse on [__first, __m-1], except reduce __m-1 until *(__m-1) is true, *__first know to be false + // F????????????????T + // f m l + _BidirectionalIterator __m1 = __m; + _BidirectionalIterator __first_false = __first; + _Distance __len_half = __len2; + while (!__pred(*--__m1)) + { + if (__m1 == __first) + goto __first_half_done; + --__len_half; + } + // F???TFFF?????????T + // f m1 m l + typedef __add_lvalue_reference_t<_Predicate> _PredRef; + __first_false = __stable_partition<_PredRef>(__first, __m1, __pred, __len_half, __p, __bit); __first_half_done: - // TTTFFFFF?????????T - // f ff m l - // recurse on [__m, __last], except increase __m until *(__m) is false, *__last know to be true - __m1 = __m; - _BidirectionalIterator __second_false = __last; - ++__second_false; - __len_half = __len - __len2; - while (__pred(*__m1)) - { - if (++__m1 == __last) - { - goto __second_half_done; - } - --__len_half; - } - // TTTFFFFFTTTF?????T - // f ff m m1 l - __second_false = __stable_partition<_PredRef>(__m1, __last, __pred, __len_half, __p, __bit); + // TTTFFFFF?????????T + // f ff m l + // recurse on [__m, __last], except increase __m until *(__m) is false, *__last know to be true + __m1 = __m; + _BidirectionalIterator __second_false = __last; + ++__second_false; + __len_half = __len - __len2; + while (__pred(*__m1)) + { + if (++__m1 == __last) + goto __second_half_done; + --__len_half; + } + // TTTFFFFFTTTF?????T + // f ff m m1 l + __second_false = __stable_partition<_PredRef>(__m1, __last, __pred, __len_half, __p, __bit); __second_half_done: - // TTTFFFFFTTTTTFFFFF - // f ff m sf l - return _CUDA_VSTD::rotate(__first_false, __m, __second_false); - // TTTTTTTTFFFFFFFFFF - // | + // TTTFFFFFTTTTTFFFFF + // f ff m sf l + return _CUDA_VSTD::rotate(__first_false, __m, __second_false); + // TTTTTTTTFFFFFFFFFF + // | } template -_CCCL_HOST_DEVICE _BidirectionalIterator __stable_partition( - _BidirectionalIterator __first, _BidirectionalIterator __last, _Predicate __pred, bidirectional_iterator_tag) +_CCCL_HOST_DEVICE +_BidirectionalIterator +__stable_partition(_BidirectionalIterator __first, _BidirectionalIterator __last, _Predicate __pred, + bidirectional_iterator_tag) { - typedef typename iterator_traits<_BidirectionalIterator>::difference_type difference_type; - typedef typename iterator_traits<_BidirectionalIterator>::value_type value_type; - const difference_type __alloc_limit = 4; // might want to make this a function of trivial assignment - // Either prove all true and return __first or point to first false - while (true) - { - if (__first == __last) - { - return __first; - } - if (!__pred(*__first)) - { - break; + typedef typename iterator_traits<_BidirectionalIterator>::difference_type difference_type; + typedef typename iterator_traits<_BidirectionalIterator>::value_type value_type; + const difference_type __alloc_limit = 4; // might want to make this a function of trivial assignment + // Either prove all true and return __first or point to first false + while (true) + { + if (__first == __last) + return __first; + if (!__pred(*__first)) + break; + ++__first; } - ++__first; - } - // __first points to first false, everything prior to __first is already set. - // Either prove [__first, __last) is all false and return __first, or point __last to last true - do - { - if (__first == --__last) + // __first points to first false, everything prior to __first is already set. + // Either prove [__first, __last) is all false and return __first, or point __last to last true + do { - return __first; - } - } while (!__pred(*__last)); - // We now have a reduced range [__first, __last] - // *__first is known to be false - // *__last is known to be true - // __len >= 2 - difference_type __len = _CUDA_VSTD::distance(__first, __last) + 1; - pair __p(0, 0); - unique_ptr __h; - if (__len >= __alloc_limit) - { - __p = _CUDA_VSTD::get_temporary_buffer(__len); - __h.reset(__p.first); - } - return __stable_partition<__add_lvalue_reference_t<_Predicate>>( - __first, __last, __pred, __len, __p, bidirectional_iterator_tag()); + if (__first == --__last) + return __first; + } while (!__pred(*__last)); + // We now have a reduced range [__first, __last] + // *__first is known to be false + // *__last is known to be true + // __len >= 2 + difference_type __len = _CUDA_VSTD::distance(__first, __last) + 1; + pair __p(0, 0); + unique_ptr __h; + if (__len >= __alloc_limit) + { + __p = _CUDA_VSTD::get_temporary_buffer(__len); + __h.reset(__p.first); + } + return __stable_partition<__add_lvalue_reference_t<_Predicate>> + (__first, __last, __pred, __len, __p, bidirectional_iterator_tag()); } template -inline _LIBCUDACXX_INLINE_VISIBILITY _ForwardIterator +inline _LIBCUDACXX_INLINE_VISIBILITY +_ForwardIterator stable_partition(_ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) { - return __stable_partition<__add_lvalue_reference_t<_Predicate>>( - __first, __last, __pred, typename iterator_traits<_ForwardIterator>::iterator_category()); + return __stable_partition<__add_lvalue_reference_t<_Predicate>> + (__first, __last, __pred, typename iterator_traits<_ForwardIterator>::iterator_category()); } // sort @@ -1494,1197 +1405,1120 @@ stable_partition(_ForwardIterator __first, _ForwardIterator __last, _Predicate _ // stable, 2-3 compares, 0-2 swaps template -_CCCL_HOST_DEVICE unsigned __sort3(_ForwardIterator __x, _ForwardIterator __y, _ForwardIterator __z, _Compare __c) +_CCCL_HOST_DEVICE +unsigned +__sort3(_ForwardIterator __x, _ForwardIterator __y, _ForwardIterator __z, _Compare __c) { - unsigned __r = 0; - if (!__c(*__y, *__x)) // if x <= y - { - if (!__c(*__z, *__y)) // if y <= z + unsigned __r = 0; + if (!__c(*__y, *__x)) // if x <= y + { + if (!__c(*__z, *__y)) // if y <= z + return __r; // x <= y && y <= z + // x <= y && y > z + swap(*__y, *__z); // x <= z && y < z + __r = 1; + if (__c(*__y, *__x)) // if x > y + { + swap(*__x, *__y); // x < y && y <= z + __r = 2; + } + return __r; // x <= y && y < z + } + if (__c(*__z, *__y)) // x > y, if y > z { - return __r; // x <= y && y <= z - // x <= y && y > z + swap(*__x, *__z); // x < y && y < z + __r = 1; + return __r; } - swap(*__y, *__z); // x <= z && y < z - __r = 1; - if (__c(*__y, *__x)) // if x > y + swap(*__x, *__y); // x > y && y <= z + __r = 1; // x < y && x <= z + if (__c(*__z, *__y)) // if y > z { - swap(*__x, *__y); // x < y && y <= z - __r = 2; + swap(*__y, *__z); // x <= y && y < z + __r = 2; } - return __r; // x <= y && y < z - } - if (__c(*__z, *__y)) // x > y, if y > z - { - swap(*__x, *__z); // x < y && y < z - __r = 1; return __r; - } - swap(*__x, *__y); // x > y && y <= z - __r = 1; // x < y && x <= z - if (__c(*__z, *__y)) // if y > z - { - swap(*__y, *__z); // x <= y && y < z - __r = 2; - } - return __r; -} // x <= y && y <= z +} // x <= y && y <= z // stable, 3-6 compares, 0-5 swaps template -_CCCL_HOST_DEVICE unsigned -__sort4(_ForwardIterator __x1, _ForwardIterator __x2, _ForwardIterator __x3, _ForwardIterator __x4, _Compare __c) +_CCCL_HOST_DEVICE +unsigned +__sort4(_ForwardIterator __x1, _ForwardIterator __x2, _ForwardIterator __x3, + _ForwardIterator __x4, _Compare __c) { - unsigned __r = __sort3<_Compare>(__x1, __x2, __x3, __c); - if (__c(*__x4, *__x3)) - { - swap(*__x3, *__x4); - ++__r; - if (__c(*__x3, *__x2)) + unsigned __r = __sort3<_Compare>(__x1, __x2, __x3, __c); + if (__c(*__x4, *__x3)) { - swap(*__x2, *__x3); - ++__r; - if (__c(*__x2, *__x1)) - { - swap(*__x1, *__x2); + swap(*__x3, *__x4); ++__r; - } + if (__c(*__x3, *__x2)) + { + swap(*__x2, *__x3); + ++__r; + if (__c(*__x2, *__x1)) + { + swap(*__x1, *__x2); + ++__r; + } + } } - } - return __r; + return __r; } // stable, 4-10 compares, 0-9 swaps template -_LIBCUDACXX_HIDDEN _CCCL_HOST_DEVICE unsigned __sort5( - _ForwardIterator __x1, - _ForwardIterator __x2, - _ForwardIterator __x3, - _ForwardIterator __x4, - _ForwardIterator __x5, - _Compare __c) +_LIBCUDACXX_HIDDEN +_CCCL_HOST_DEVICE +unsigned +__sort5(_ForwardIterator __x1, _ForwardIterator __x2, _ForwardIterator __x3, + _ForwardIterator __x4, _ForwardIterator __x5, _Compare __c) { - unsigned __r = __sort4<_Compare>(__x1, __x2, __x3, __x4, __c); - if (__c(*__x5, *__x4)) - { - swap(*__x4, *__x5); - ++__r; - if (__c(*__x4, *__x3)) + unsigned __r = __sort4<_Compare>(__x1, __x2, __x3, __x4, __c); + if (__c(*__x5, *__x4)) { - swap(*__x3, *__x4); - ++__r; - if (__c(*__x3, *__x2)) - { - swap(*__x2, *__x3); + swap(*__x4, *__x5); ++__r; - if (__c(*__x2, *__x1)) + if (__c(*__x4, *__x3)) { - swap(*__x1, *__x2); - ++__r; + swap(*__x3, *__x4); + ++__r; + if (__c(*__x3, *__x2)) + { + swap(*__x2, *__x3); + ++__r; + if (__c(*__x2, *__x1)) + { + swap(*__x1, *__x2); + ++__r; + } + } } - } } - } - return __r; + return __r; } // Assumes size > 0 template -_CCCL_HOST_DEVICE void __selection_sort(_BirdirectionalIterator __first, _BirdirectionalIterator __last, _Compare __comp) +_CCCL_HOST_DEVICE +void +__selection_sort(_BirdirectionalIterator __first, _BirdirectionalIterator __last, _Compare __comp) { - _BirdirectionalIterator __lm1 = __last; - for (--__lm1; __first != __lm1; ++__first) - { - _BirdirectionalIterator __i = - _CUDA_VSTD::min_element<_BirdirectionalIterator, __add_lvalue_reference_t<_Compare>>(__first, __last, __comp); - if (__i != __first) + _BirdirectionalIterator __lm1 = __last; + for (--__lm1; __first != __lm1; ++__first) { - swap(*__first, *__i); + _BirdirectionalIterator __i = _CUDA_VSTD::min_element<_BirdirectionalIterator, + __add_lvalue_reference_t<_Compare>> + (__first, __last, __comp); + if (__i != __first) + swap(*__first, *__i); } - } } template -_CCCL_HOST_DEVICE void __insertion_sort(_BirdirectionalIterator __first, _BirdirectionalIterator __last, _Compare __comp) +_CCCL_HOST_DEVICE +void +__insertion_sort(_BirdirectionalIterator __first, _BirdirectionalIterator __last, _Compare __comp) { - typedef typename iterator_traits<_BirdirectionalIterator>::value_type value_type; - if (__first != __last) - { - _BirdirectionalIterator __i = __first; - for (++__i; __i != __last; ++__i) + typedef typename iterator_traits<_BirdirectionalIterator>::value_type value_type; + if (__first != __last) { - _BirdirectionalIterator __j = __i; - value_type __t(_CUDA_VSTD::move(*__j)); - for (_BirdirectionalIterator __k = __i; __k != __first && __comp(__t, *--__k); --__j) - { - *__j = _CUDA_VSTD::move(*__k); - } - *__j = _CUDA_VSTD::move(__t); + _BirdirectionalIterator __i = __first; + for (++__i; __i != __last; ++__i) + { + _BirdirectionalIterator __j = __i; + value_type __t(_CUDA_VSTD::move(*__j)); + for (_BirdirectionalIterator __k = __i; __k != __first && __comp(__t, *--__k); --__j) + *__j = _CUDA_VSTD::move(*__k); + *__j = _CUDA_VSTD::move(__t); + } } - } } template -_CCCL_HOST_DEVICE void __insertion_sort_3(_RandomAccessIterator __first, _RandomAccessIterator __last, _Compare __comp) +_CCCL_HOST_DEVICE +void +__insertion_sort_3(_RandomAccessIterator __first, _RandomAccessIterator __last, _Compare __comp) { - typedef typename iterator_traits<_RandomAccessIterator>::value_type value_type; - _RandomAccessIterator __j = __first + 2; - __sort3<_Compare>(__first, __first + 1, __j, __comp); - for (_RandomAccessIterator __i = __j + 1; __i != __last; ++__i) - { - if (__comp(*__i, *__j)) + typedef typename iterator_traits<_RandomAccessIterator>::value_type value_type; + _RandomAccessIterator __j = __first+2; + __sort3<_Compare>(__first, __first+1, __j, __comp); + for (_RandomAccessIterator __i = __j+1; __i != __last; ++__i) { - value_type __t(_CUDA_VSTD::move(*__i)); - _RandomAccessIterator __k = __j; - __j = __i; - do - { - *__j = _CUDA_VSTD::move(*__k); - __j = __k; - } while (__j != __first && __comp(__t, *--__k)); - *__j = _CUDA_VSTD::move(__t); + if (__comp(*__i, *__j)) + { + value_type __t(_CUDA_VSTD::move(*__i)); + _RandomAccessIterator __k = __j; + __j = __i; + do + { + *__j = _CUDA_VSTD::move(*__k); + __j = __k; + } while (__j != __first && __comp(__t, *--__k)); + *__j = _CUDA_VSTD::move(__t); + } + __j = __i; } - __j = __i; - } } template -_CCCL_HOST_DEVICE bool +_CCCL_HOST_DEVICE +bool __insertion_sort_incomplete(_RandomAccessIterator __first, _RandomAccessIterator __last, _Compare __comp) { - switch (__last - __first) - { + switch (__last - __first) + { case 0: case 1: - return true; + return true; case 2: - if (__comp(*--__last, *__first)) - { - swap(*__first, *__last); - } - return true; + if (__comp(*--__last, *__first)) + swap(*__first, *__last); + return true; case 3: - _CUDA_VSTD::__sort3<_Compare>(__first, __first + 1, --__last, __comp); - return true; + _CUDA_VSTD::__sort3<_Compare>(__first, __first+1, --__last, __comp); + return true; case 4: - _CUDA_VSTD::__sort4<_Compare>(__first, __first + 1, __first + 2, --__last, __comp); - return true; + _CUDA_VSTD::__sort4<_Compare>(__first, __first+1, __first+2, --__last, __comp); + return true; case 5: - _CUDA_VSTD::__sort5<_Compare>(__first, __first + 1, __first + 2, __first + 3, --__last, __comp); - return true; - } - typedef typename iterator_traits<_RandomAccessIterator>::value_type value_type; - _RandomAccessIterator __j = __first + 2; - __sort3<_Compare>(__first, __first + 1, __j, __comp); - const unsigned __limit = 8; - unsigned __count = 0; - for (_RandomAccessIterator __i = __j + 1; __i != __last; ++__i) - { - if (__comp(*__i, *__j)) + _CUDA_VSTD::__sort5<_Compare>(__first, __first+1, __first+2, __first+3, --__last, __comp); + return true; + } + typedef typename iterator_traits<_RandomAccessIterator>::value_type value_type; + _RandomAccessIterator __j = __first+2; + __sort3<_Compare>(__first, __first+1, __j, __comp); + const unsigned __limit = 8; + unsigned __count = 0; + for (_RandomAccessIterator __i = __j+1; __i != __last; ++__i) { - value_type __t(_CUDA_VSTD::move(*__i)); - _RandomAccessIterator __k = __j; - __j = __i; - do - { - *__j = _CUDA_VSTD::move(*__k); - __j = __k; - } while (__j != __first && __comp(__t, *--__k)); - *__j = _CUDA_VSTD::move(__t); - if (++__count == __limit) - { - return ++__i == __last; - } + if (__comp(*__i, *__j)) + { + value_type __t(_CUDA_VSTD::move(*__i)); + _RandomAccessIterator __k = __j; + __j = __i; + do + { + *__j = _CUDA_VSTD::move(*__k); + __j = __k; + } while (__j != __first && __comp(__t, *--__k)); + *__j = _CUDA_VSTD::move(__t); + if (++__count == __limit) + return ++__i == __last; + } + __j = __i; } - __j = __i; - } - return true; + return true; } template -_CCCL_HOST_DEVICE void __insertion_sort_move( - _BirdirectionalIterator __first1, - _BirdirectionalIterator __last1, - typename iterator_traits<_BirdirectionalIterator>::value_type* __first2, - _Compare __comp) +_CCCL_HOST_DEVICE +void +__insertion_sort_move(_BirdirectionalIterator __first1, _BirdirectionalIterator __last1, + typename iterator_traits<_BirdirectionalIterator>::value_type* __first2, _Compare __comp) { - typedef typename iterator_traits<_BirdirectionalIterator>::value_type value_type; - if (__first1 != __last1) - { - __destruct_n __d(0); - unique_ptr __h(__first2, __d); - value_type* __last2 = __first2; - ::new (__last2) value_type(_CUDA_VSTD::move(*__first1)); - __d.__incr((value_type*) 0); - for (++__last2; ++__first1 != __last1; ++__last2) - { - value_type* __j2 = __last2; - value_type* __i2 = __j2; - if (__comp(*__first1, *--__i2)) - { - ::new (__j2) value_type(_CUDA_VSTD::move(*__i2)); - __d.__incr((value_type*) 0); - for (--__j2; __i2 != __first2 && __comp(*__first1, *--__i2); --__j2) + typedef typename iterator_traits<_BirdirectionalIterator>::value_type value_type; + if (__first1 != __last1) + { + __destruct_n __d(0); + unique_ptr __h(__first2, __d); + value_type* __last2 = __first2; + ::new(__last2) value_type(_CUDA_VSTD::move(*__first1)); + __d.__incr((value_type*)0); + for (++__last2; ++__first1 != __last1; ++__last2) { - *__j2 = _CUDA_VSTD::move(*__i2); + value_type* __j2 = __last2; + value_type* __i2 = __j2; + if (__comp(*__first1, *--__i2)) + { + ::new(__j2) value_type(_CUDA_VSTD::move(*__i2)); + __d.__incr((value_type*)0); + for (--__j2; __i2 != __first2 && __comp(*__first1, *--__i2); --__j2) + *__j2 = _CUDA_VSTD::move(*__i2); + *__j2 = _CUDA_VSTD::move(*__first1); + } + else + { + ::new(__j2) value_type(_CUDA_VSTD::move(*__first1)); + __d.__incr((value_type*)0); + } } - *__j2 = _CUDA_VSTD::move(*__first1); - } - else - { - ::new (__j2) value_type(_CUDA_VSTD::move(*__first1)); - __d.__incr((value_type*) 0); - } + __h.release(); } - __h.release(); - } } template -_CCCL_HOST_DEVICE void __sort(_RandomAccessIterator __first, _RandomAccessIterator __last, _Compare __comp) +_CCCL_HOST_DEVICE +void +__sort(_RandomAccessIterator __first, _RandomAccessIterator __last, _Compare __comp) { - // _Compare is known to be a reference type - typedef typename iterator_traits<_RandomAccessIterator>::difference_type difference_type; - typedef typename iterator_traits<_RandomAccessIterator>::value_type value_type; - const difference_type __limit = - is_trivially_copy_constructible::value && is_trivially_copy_assignable::value ? 30 : 6; - while (true) - { - __restart: - difference_type __len = __last - __first; - switch (__len) - { - case 0: - case 1: - return; - case 2: - if (__comp(*--__last, *__first)) + // _Compare is known to be a reference type + typedef typename iterator_traits<_RandomAccessIterator>::difference_type difference_type; + typedef typename iterator_traits<_RandomAccessIterator>::value_type value_type; + const difference_type __limit = is_trivially_copy_constructible::value && + is_trivially_copy_assignable::value ? 30 : 6; + while (true) + { + __restart: + difference_type __len = __last - __first; + switch (__len) { - swap(*__first, *__last); + case 0: + case 1: + return; + case 2: + if (__comp(*--__last, *__first)) + swap(*__first, *__last); + return; + case 3: + _CUDA_VSTD::__sort3<_Compare>(__first, __first+1, --__last, __comp); + return; + case 4: + _CUDA_VSTD::__sort4<_Compare>(__first, __first+1, __first+2, --__last, __comp); + return; + case 5: + _CUDA_VSTD::__sort5<_Compare>(__first, __first+1, __first+2, __first+3, --__last, __comp); + return; } - return; - case 3: - _CUDA_VSTD::__sort3<_Compare>(__first, __first + 1, --__last, __comp); - return; - case 4: - _CUDA_VSTD::__sort4<_Compare>(__first, __first + 1, __first + 2, --__last, __comp); - return; - case 5: - _CUDA_VSTD::__sort5<_Compare>(__first, __first + 1, __first + 2, __first + 3, --__last, __comp); - return; - } - if (__len <= __limit) - { - _CUDA_VSTD::__insertion_sort_3<_Compare>(__first, __last, __comp); - return; - } - // __len > 5 - _RandomAccessIterator __m = __first; - _RandomAccessIterator __lm1 = __last; - --__lm1; - unsigned __n_swaps; - { - difference_type __delta; - if (__len >= 1000) - { - __delta = __len / 2; - __m += __delta; - __delta /= 2; - __n_swaps = _CUDA_VSTD::__sort5<_Compare>(__first, __first + __delta, __m, __m + __delta, __lm1, __comp); - } - else - { - __delta = __len / 2; - __m += __delta; - __n_swaps = _CUDA_VSTD::__sort3<_Compare>(__first, __m, __lm1, __comp); - } - } - // *__m is median - // partition [__first, __m) < *__m and *__m <= [__m, __last) - // (this inhibits tossing elements equivalent to __m around unnecessarily) - _RandomAccessIterator __i = __first; - _RandomAccessIterator __j = __lm1; - // j points beyond range to be tested, *__m is known to be <= *__lm1 - // The search going up is known to be guarded but the search coming down isn't. - // Prime the downward search with a guard. - if (!__comp(*__i, *__m)) // if *__first == *__m - { - // *__first == *__m, *__first doesn't go in first part - // manually guard downward moving __j against __i - while (true) - { - if (__i == --__j) + if (__len <= __limit) + { + _CUDA_VSTD::__insertion_sort_3<_Compare>(__first, __last, __comp); + return; + } + // __len > 5 + _RandomAccessIterator __m = __first; + _RandomAccessIterator __lm1 = __last; + --__lm1; + unsigned __n_swaps; { - // *__first == *__m, *__m <= all other elements - // Parition instead into [__first, __i) == *__first and *__first < [__i, __last) - ++__i; // __first + 1 - __j = __last; - if (!__comp(*__first, *--__j)) // we need a guard if *__first == *(__last-1) - { + difference_type __delta; + if (__len >= 1000) + { + __delta = __len/2; + __m += __delta; + __delta /= 2; + __n_swaps = _CUDA_VSTD::__sort5<_Compare>(__first, __first + __delta, __m, __m+__delta, __lm1, __comp); + } + else + { + __delta = __len/2; + __m += __delta; + __n_swaps = _CUDA_VSTD::__sort3<_Compare>(__first, __m, __lm1, __comp); + } + } + // *__m is median + // partition [__first, __m) < *__m and *__m <= [__m, __last) + // (this inhibits tossing elements equivalent to __m around unnecessarily) + _RandomAccessIterator __i = __first; + _RandomAccessIterator __j = __lm1; + // j points beyond range to be tested, *__m is known to be <= *__lm1 + // The search going up is known to be guarded but the search coming down isn't. + // Prime the downward search with a guard. + if (!__comp(*__i, *__m)) // if *__first == *__m + { + // *__first == *__m, *__first doesn't go in first part + // manually guard downward moving __j against __i while (true) { - if (__i == __j) - { - return; // [__first, __last) all equivalent elements - } - if (__comp(*__first, *__i)) - { + if (__i == --__j) + { + // *__first == *__m, *__m <= all other elements + // Parition instead into [__first, __i) == *__first and *__first < [__i, __last) + ++__i; // __first + 1 + __j = __last; + if (!__comp(*__first, *--__j)) // we need a guard if *__first == *(__last-1) + { + while (true) + { + if (__i == __j) + return; // [__first, __last) all equivalent elements + if (__comp(*__first, *__i)) + { + swap(*__i, *__j); + ++__n_swaps; + ++__i; + break; + } + ++__i; + } + } + // [__first, __i) == *__first and *__first < [__j, __last) and __j == __last - 1 + if (__i == __j) + return; + while (true) + { + while (!__comp(*__first, *__i)) + ++__i; + while (__comp(*__first, *--__j)) + ; + if (__i >= __j) + break; + swap(*__i, *__j); + ++__n_swaps; + ++__i; + } + // [__first, __i) == *__first and *__first < [__i, __last) + // The first part is sorted, sort the secod part + // _CUDA_VSTD::__sort<_Compare>(__i, __last, __comp); + __first = __i; + goto __restart; + } + if (__comp(*__j, *__m)) + { + swap(*__i, *__j); + ++__n_swaps; + break; // found guard for downward moving __j, now use unguarded partition + } + } + } + // It is known that *__i < *__m + ++__i; + // j points beyond range to be tested, *__m is known to be <= *__lm1 + // if not yet partitioned... + if (__i < __j) + { + // known that *(__i - 1) < *__m + // known that __i <= __m + while (true) + { + // __m still guards upward moving __i + while (__comp(*__i, *__m)) + ++__i; + // It is now known that a guard exists for downward moving __j + while (!__comp(*--__j, *__m)) + ; + if (__i > __j) + break; swap(*__i, *__j); ++__n_swaps; + // It is known that __m != __j + // If __m just moved, follow it + if (__m == __i) + __m = __j; ++__i; - break; - } - ++__i; } - } - // [__first, __i) == *__first and *__first < [__j, __last) and __j == __last - 1 - if (__i == __j) - { - return; - } - while (true) - { - while (!__comp(*__first, *__i)) - { - ++__i; - } - while (__comp(*__first, *--__j)) - ; - if (__i >= __j) - { - break; - } - swap(*__i, *__j); - ++__n_swaps; - ++__i; - } - // [__first, __i) == *__first and *__first < [__i, __last) - // The first part is sorted, sort the secod part - // _CUDA_VSTD::__sort<_Compare>(__i, __last, __comp); - __first = __i; - goto __restart; - } - if (__comp(*__j, *__m)) - { - swap(*__i, *__j); - ++__n_swaps; - break; // found guard for downward moving __j, now use unguarded partition - } - } - } - // It is known that *__i < *__m - ++__i; - // j points beyond range to be tested, *__m is known to be <= *__lm1 - // if not yet partitioned... - if (__i < __j) - { - // known that *(__i - 1) < *__m - // known that __i <= __m - while (true) - { - // __m still guards upward moving __i - while (__comp(*__i, *__m)) - { - ++__i; } - // It is now known that a guard exists for downward moving __j - while (!__comp(*--__j, *__m)) - ; - if (__i > __j) + // [__first, __i) < *__m and *__m <= [__i, __last) + if (__i != __m && __comp(*__m, *__i)) { - break; + swap(*__i, *__m); + ++__n_swaps; } - swap(*__i, *__j); - ++__n_swaps; - // It is known that __m != __j - // If __m just moved, follow it - if (__m == __i) + // [__first, __i) < *__i and *__i <= [__i+1, __last) + // If we were given a perfect partition, see if insertion sort is quick... + if (__n_swaps == 0) { - __m = __j; + bool __fs = _CUDA_VSTD::__insertion_sort_incomplete<_Compare>(__first, __i, __comp); + if (_CUDA_VSTD::__insertion_sort_incomplete<_Compare>(__i+1, __last, __comp)) + { + if (__fs) + return; + __last = __i; + continue; + } + else + { + if (__fs) + { + __first = ++__i; + continue; + } + } } - ++__i; - } - } - // [__first, __i) < *__m and *__m <= [__i, __last) - if (__i != __m && __comp(*__m, *__i)) - { - swap(*__i, *__m); - ++__n_swaps; - } - // [__first, __i) < *__i and *__i <= [__i+1, __last) - // If we were given a perfect partition, see if insertion sort is quick... - if (__n_swaps == 0) - { - bool __fs = _CUDA_VSTD::__insertion_sort_incomplete<_Compare>(__first, __i, __comp); - if (_CUDA_VSTD::__insertion_sort_incomplete<_Compare>(__i + 1, __last, __comp)) - { - if (__fs) + // sort smaller range with recursive call and larger with tail recursion elimination + if (__i - __first < __last - __i) { - return; + _CUDA_VSTD::__sort<_Compare>(__first, __i, __comp); + // _CUDA_VSTD::__sort<_Compare>(__i+1, __last, __comp); + __first = ++__i; } - __last = __i; - continue; - } - else - { - if (__fs) + else { - __first = ++__i; - continue; + _CUDA_VSTD::__sort<_Compare>(__i+1, __last, __comp); + // _CUDA_VSTD::__sort<_Compare>(__first, __i, __comp); + __last = __i; } - } - } - // sort smaller range with recursive call and larger with tail recursion elimination - if (__i - __first < __last - __i) - { - _CUDA_VSTD::__sort<_Compare>(__first, __i, __comp); - // _CUDA_VSTD::__sort<_Compare>(__i+1, __last, __comp); - __first = ++__i; - } - else - { - _CUDA_VSTD::__sort<_Compare>(__i + 1, __last, __comp); - // _CUDA_VSTD::__sort<_Compare>(__first, __i, __comp); - __last = __i; } - } } // This forwarder keeps the top call and the recursive calls using the same instantiation, forcing a reference _Compare template -inline _LIBCUDACXX_INLINE_VISIBILITY void +inline _LIBCUDACXX_INLINE_VISIBILITY +void sort(_RandomAccessIterator __first, _RandomAccessIterator __last, _Compare __comp) { - using _Comp_ref = __comp_ref_type<_Compare>; - _CUDA_VSTD::__sort<_Comp_ref>(__first, __last, _Comp_ref(__comp)); + using _Comp_ref = __comp_ref_type<_Compare>; + _CUDA_VSTD::__sort<_Comp_ref>(__first, __last, _Comp_ref(__comp)); } template -inline _LIBCUDACXX_INLINE_VISIBILITY void sort(_RandomAccessIterator __first, _RandomAccessIterator __last) +inline _LIBCUDACXX_INLINE_VISIBILITY +void +sort(_RandomAccessIterator __first, _RandomAccessIterator __last) { - _CUDA_VSTD::sort(__first, __last, __less{}); + _CUDA_VSTD::sort(__first, __last, __less{}); } template -inline _LIBCUDACXX_INLINE_VISIBILITY void sort(_Tp** __first, _Tp** __last) +inline _LIBCUDACXX_INLINE_VISIBILITY +void +sort(_Tp** __first, _Tp** __last) { - _CUDA_VSTD::sort((size_t*) __first, (size_t*) __last, __less{}); + _CUDA_VSTD::sort((size_t*)__first, (size_t*)__last, __less{}); } template -inline _LIBCUDACXX_INLINE_VISIBILITY void sort(__wrap_iter<_Tp*> __first, __wrap_iter<_Tp*> __last) +inline _LIBCUDACXX_INLINE_VISIBILITY +void +sort(__wrap_iter<_Tp*> __first, __wrap_iter<_Tp*> __last) { - _CUDA_VSTD::sort(__first.base(), __last.base()); + _CUDA_VSTD::sort(__first.base(), __last.base()); } template -inline _LIBCUDACXX_INLINE_VISIBILITY void sort(__wrap_iter<_Tp*> __first, __wrap_iter<_Tp*> __last, _Compare __comp) +inline _LIBCUDACXX_INLINE_VISIBILITY +void +sort(__wrap_iter<_Tp*> __first, __wrap_iter<_Tp*> __last, _Compare __comp) { - typedef __add_lvalue_reference_t<_Compare> _Comp_ref; - _CUDA_VSTD::sort<_Tp*, _Comp_ref>(__first.base(), __last.base(), __comp); + typedef __add_lvalue_reference_t<_Compare> _Comp_ref; + _CUDA_VSTD::sort<_Tp*, _Comp_ref>(__first.base(), __last.base(), __comp); } _LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS void __sort<__less&, char*>(char*, char*, __less&)) _LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS void __sort<__less&, wchar_t*>(wchar_t*, wchar_t*, __less&)) _LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS void __sort<__less&, signed*>(signed*, signed*, __less&)) -_LIBCUDACXX_EXTERN_TEMPLATE( - _LIBCUDACXX_FUNC_VIS void __sort<__less&, unsigned char*>(unsigned char*, unsigned char*, __less&)) +_LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS void __sort<__less&, unsigned char*>(unsigned char*, unsigned char*, __less&)) _LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS void __sort<__less&, short*>(short*, short*, __less&)) -_LIBCUDACXX_EXTERN_TEMPLATE( - _LIBCUDACXX_FUNC_VIS void __sort<__less&, unsigned short*>(unsigned short*, unsigned short*, __less&)) +_LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS void __sort<__less&, unsigned short*>(unsigned short*, unsigned short*, __less&)) _LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS void __sort<__less&, int*>(int*, int*, __less&)) _LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS void __sort<__less&, unsigned*>(unsigned*, unsigned*, __less&)) _LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS void __sort<__less&, long*>(long*, long*, __less&)) -_LIBCUDACXX_EXTERN_TEMPLATE( - _LIBCUDACXX_FUNC_VIS void __sort<__less&, unsigned long*>(unsigned long*, unsigned long*, __less&)) +_LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS void __sort<__less&, unsigned long*>(unsigned long*, unsigned long*, __less&)) _LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS void __sort<__less&, long long*>(long long*, long long*, __less&)) -_LIBCUDACXX_EXTERN_TEMPLATE( - _LIBCUDACXX_FUNC_VIS void __sort<__less&, unsigned long long*>(unsigned long long*, unsigned long long*, __less&)) +_LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS void __sort<__less&, unsigned long long*>(unsigned long long*, unsigned long long*, __less&)) _LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS void __sort<__less&, float*>(float*, float*, __less&)) _LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS void __sort<__less&, double*>(double*, double*, __less&)) _LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS void __sort<__less&, long double*>(long double*, long double*, __less&)) _LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS bool __insertion_sort_incomplete<__less&, char*>(char*, char*, __less&)) -_LIBCUDACXX_EXTERN_TEMPLATE( - _LIBCUDACXX_FUNC_VIS bool __insertion_sort_incomplete<__less&, wchar_t*>(wchar_t*, wchar_t*, __less&)) -_LIBCUDACXX_EXTERN_TEMPLATE( - _LIBCUDACXX_FUNC_VIS bool __insertion_sort_incomplete<__less&, signed char*>(signed char*, signed char*, __less&)) -_LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS bool __insertion_sort_incomplete<__less&, unsigned char*>( - unsigned char*, unsigned char*, __less&)) -_LIBCUDACXX_EXTERN_TEMPLATE( - _LIBCUDACXX_FUNC_VIS bool __insertion_sort_incomplete<__less&, short*>(short*, short*, __less&)) -_LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS bool __insertion_sort_incomplete<__less&, unsigned short*>( - unsigned short*, unsigned short*, __less&)) +_LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS bool __insertion_sort_incomplete<__less&, wchar_t*>(wchar_t*, wchar_t*, __less&)) +_LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS bool __insertion_sort_incomplete<__less&, signed char*>(signed char*, signed char*, __less&)) +_LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS bool __insertion_sort_incomplete<__less&, unsigned char*>(unsigned char*, unsigned char*, __less&)) +_LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS bool __insertion_sort_incomplete<__less&, short*>(short*, short*, __less&)) +_LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS bool __insertion_sort_incomplete<__less&, unsigned short*>(unsigned short*, unsigned short*, __less&)) _LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS bool __insertion_sort_incomplete<__less&, int*>(int*, int*, __less&)) -_LIBCUDACXX_EXTERN_TEMPLATE( - _LIBCUDACXX_FUNC_VIS bool __insertion_sort_incomplete<__less&, unsigned*>(unsigned*, unsigned*, __less&)) +_LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS bool __insertion_sort_incomplete<__less&, unsigned*>(unsigned*, unsigned*, __less&)) _LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS bool __insertion_sort_incomplete<__less&, long*>(long*, long*, __less&)) -_LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS bool __insertion_sort_incomplete<__less&, unsigned long*>( - unsigned long*, unsigned long*, __less&)) -_LIBCUDACXX_EXTERN_TEMPLATE( - _LIBCUDACXX_FUNC_VIS bool __insertion_sort_incomplete<__less&, long long*>(long long*, long long*, __less&)) -_LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS bool __insertion_sort_incomplete<__less&, unsigned long long*>( - unsigned long long*, unsigned long long*, __less&)) -_LIBCUDACXX_EXTERN_TEMPLATE( - _LIBCUDACXX_FUNC_VIS bool __insertion_sort_incomplete<__less&, float*>(float*, float*, __less&)) -_LIBCUDACXX_EXTERN_TEMPLATE( - _LIBCUDACXX_FUNC_VIS bool __insertion_sort_incomplete<__less&, double*>(double*, double*, __less&)) -_LIBCUDACXX_EXTERN_TEMPLATE( - _LIBCUDACXX_FUNC_VIS bool __insertion_sort_incomplete<__less&, long double*>(long double*, long double*, __less&)) - -_LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS unsigned __sort5<__less&, long double*>( - long double*, long double*, long double*, long double*, long double*, __less&)) +_LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS bool __insertion_sort_incomplete<__less&, unsigned long*>(unsigned long*, unsigned long*, __less&)) +_LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS bool __insertion_sort_incomplete<__less&, long long*>(long long*, long long*, __less&)) +_LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS bool __insertion_sort_incomplete<__less&, unsigned long long*>(unsigned long long*, unsigned long long*, __less&)) +_LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS bool __insertion_sort_incomplete<__less&, float*>(float*, float*, __less&)) +_LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS bool __insertion_sort_incomplete<__less&, double*>(double*, double*, __less&)) +_LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS bool __insertion_sort_incomplete<__less&, long double*>(long double*, long double*, __less&)) + +_LIBCUDACXX_EXTERN_TEMPLATE(_LIBCUDACXX_FUNC_VIS unsigned __sort5<__less&, long double*>(long double*, long double*, long double*, long double*, long double*, __less&)) // inplace_merge -template -_CCCL_HOST_DEVICE void __half_inplace_merge( - _InputIterator1 __first1, - _InputIterator1 __last1, - _InputIterator2 __first2, - _InputIterator2 __last2, - _OutputIterator __result, - _Compare __comp) +template +_CCCL_HOST_DEVICE +void __half_inplace_merge(_InputIterator1 __first1, _InputIterator1 __last1, + _InputIterator2 __first2, _InputIterator2 __last2, + _OutputIterator __result, _Compare __comp) { - for (; __first1 != __last1; ++__result) - { - if (__first2 == __last2) + for (; __first1 != __last1; ++__result) { - _CUDA_VSTD::move(__first1, __last1, __result); - return; - } + if (__first2 == __last2) + { + _CUDA_VSTD::move(__first1, __last1, __result); + return; + } - if (__comp(*__first2, *__first1)) - { - *__result = _CUDA_VSTD::move(*__first2); - ++__first2; - } - else - { - *__result = _CUDA_VSTD::move(*__first1); - ++__first1; + if (__comp(*__first2, *__first1)) + { + *__result = _CUDA_VSTD::move(*__first2); + ++__first2; + } + else + { + *__result = _CUDA_VSTD::move(*__first1); + ++__first1; + } } - } - // __first2 through __last2 are already in the right spot. + // __first2 through __last2 are already in the right spot. } template -_CCCL_HOST_DEVICE void __buffered_inplace_merge( - _BidirectionalIterator __first, - _BidirectionalIterator __middle, - _BidirectionalIterator __last, - _Compare __comp, - typename iterator_traits<_BidirectionalIterator>::difference_type __len1, - typename iterator_traits<_BidirectionalIterator>::difference_type __len2, - typename iterator_traits<_BidirectionalIterator>::value_type* __buff) +_CCCL_HOST_DEVICE +void +__buffered_inplace_merge(_BidirectionalIterator __first, _BidirectionalIterator __middle, _BidirectionalIterator __last, + _Compare __comp, typename iterator_traits<_BidirectionalIterator>::difference_type __len1, + typename iterator_traits<_BidirectionalIterator>::difference_type __len2, + typename iterator_traits<_BidirectionalIterator>::value_type* __buff) { - typedef typename iterator_traits<_BidirectionalIterator>::value_type value_type; - __destruct_n __d(0); - unique_ptr __h2(__buff, __d); - if (__len1 <= __len2) - { - value_type* __p = __buff; - for (_BidirectionalIterator __i = __first; __i != __middle; __d.__incr((value_type*) 0), (void) ++__i, (void) ++__p) + typedef typename iterator_traits<_BidirectionalIterator>::value_type value_type; + __destruct_n __d(0); + unique_ptr __h2(__buff, __d); + if (__len1 <= __len2) { - ::new (__p) value_type(_CUDA_VSTD::move(*__i)); + value_type* __p = __buff; + for (_BidirectionalIterator __i = __first; __i != __middle; __d.__incr((value_type*)0), (void) ++__i, (void) ++__p) + ::new(__p) value_type(_CUDA_VSTD::move(*__i)); + __half_inplace_merge(__buff, __p, __middle, __last, __first, __comp); } - __half_inplace_merge(__buff, __p, __middle, __last, __first, __comp); - } - else - { - value_type* __p = __buff; - for (_BidirectionalIterator __i = __middle; __i != __last; __d.__incr((value_type*) 0), (void) ++__i, (void) ++__p) + else { - ::new (__p) value_type(_CUDA_VSTD::move(*__i)); + value_type* __p = __buff; + for (_BidirectionalIterator __i = __middle; __i != __last; __d.__incr((value_type*)0), (void) ++__i, (void) ++__p) + ::new(__p) value_type(_CUDA_VSTD::move(*__i)); + typedef reverse_iterator<_BidirectionalIterator> _RBi; + typedef reverse_iterator _Rv; + __half_inplace_merge(_Rv(__p), _Rv(__buff), + _RBi(__middle), _RBi(__first), + _RBi(__last), __invert<_Compare>(__comp)); } - typedef reverse_iterator<_BidirectionalIterator> _RBi; - typedef reverse_iterator _Rv; - __half_inplace_merge(_Rv(__p), _Rv(__buff), _RBi(__middle), _RBi(__first), _RBi(__last), __invert<_Compare>(__comp)); - } } template -_CCCL_HOST_DEVICE void __inplace_merge( - _BidirectionalIterator __first, - _BidirectionalIterator __middle, - _BidirectionalIterator __last, - _Compare __comp, - typename iterator_traits<_BidirectionalIterator>::difference_type __len1, - typename iterator_traits<_BidirectionalIterator>::difference_type __len2, - typename iterator_traits<_BidirectionalIterator>::value_type* __buff, - ptrdiff_t __buff_size) +_CCCL_HOST_DEVICE +void +__inplace_merge(_BidirectionalIterator __first, _BidirectionalIterator __middle, _BidirectionalIterator __last, + _Compare __comp, typename iterator_traits<_BidirectionalIterator>::difference_type __len1, + typename iterator_traits<_BidirectionalIterator>::difference_type __len2, + typename iterator_traits<_BidirectionalIterator>::value_type* __buff, ptrdiff_t __buff_size) { - typedef typename iterator_traits<_BidirectionalIterator>::difference_type difference_type; - while (true) - { - // if __middle == __last, we're done - if (__len2 == 0) + typedef typename iterator_traits<_BidirectionalIterator>::difference_type difference_type; + while (true) { - return; - } - if (__len1 <= __buff_size || __len2 <= __buff_size) - { - return __buffered_inplace_merge<_Compare>(__first, __middle, __last, __comp, __len1, __len2, __buff); - } - // shrink [__first, __middle) as much as possible (with no moves), returning if it shrinks to 0 - for (; true; ++__first, (void) --__len1) - { - if (__len1 == 0) - { - return; - } - if (__comp(*__middle, *__first)) - { - break; - } - } - // __first < __middle < __last - // *__first > *__middle - // partition [__first, __m1) [__m1, __middle) [__middle, __m2) [__m2, __last) such that - // all elements in: - // [__first, __m1) <= [__middle, __m2) - // [__middle, __m2) < [__m1, __middle) - // [__m1, __middle) <= [__m2, __last) - // and __m1 or __m2 is in the middle of its range - _BidirectionalIterator __m1; // "median" of [__first, __middle) - _BidirectionalIterator __m2; // "median" of [__middle, __last) - difference_type __len11; // distance(__first, __m1) - difference_type __len21; // distance(__middle, __m2) - // binary search smaller range - if (__len1 < __len2) - { // __len >= 1, __len2 >= 2 - __len21 = __len2 / 2; - __m2 = __middle; - _CUDA_VSTD::advance(__m2, __len21); - __m1 = __upper_bound<_Compare>(__first, __middle, *__m2, __comp); - __len11 = _CUDA_VSTD::distance(__first, __m1); - } - else - { - if (__len1 == 1) - { // __len1 >= __len2 && __len2 > 0, therefore __len2 == 1 - // It is known *__first > *__middle - swap(*__first, *__middle); - return; - } - // __len1 >= 2, __len2 >= 1 - __len11 = __len1 / 2; - __m1 = __first; - _CUDA_VSTD::advance(__m1, __len11); - __m2 = __lower_bound<_Compare>(__middle, __last, *__m1, __comp); - __len21 = _CUDA_VSTD::distance(__middle, __m2); - } - difference_type __len12 = __len1 - __len11; // distance(__m1, __middle) - difference_type __len22 = __len2 - __len21; // distance(__m2, __last) - // [__first, __m1) [__m1, __middle) [__middle, __m2) [__m2, __last) - // swap middle two partitions - __middle = _CUDA_VSTD::rotate(__m1, __middle, __m2); - // __len12 and __len21 now have swapped meanings - // merge smaller range with recurisve call and larger with tail recursion elimination - if (__len11 + __len21 < __len12 + __len22) - { - __inplace_merge<_Compare>(__first, __m1, __middle, __comp, __len11, __len21, __buff, __buff_size); - // __inplace_merge<_Compare>(__middle, __m2, __last, __comp, __len12, __len22, __buff, __buff_size); - __first = __middle; - __middle = __m2; - __len1 = __len12; - __len2 = __len22; - } - else - { - __inplace_merge<_Compare>(__middle, __m2, __last, __comp, __len12, __len22, __buff, __buff_size); - // __inplace_merge<_Compare>(__first, __m1, __middle, __comp, __len11, __len21, __buff, __buff_size); - __last = __middle; - __middle = __m1; - __len1 = __len11; - __len2 = __len21; + // if __middle == __last, we're done + if (__len2 == 0) + return; + if (__len1 <= __buff_size || __len2 <= __buff_size) + return __buffered_inplace_merge<_Compare> + (__first, __middle, __last, __comp, __len1, __len2, __buff); + // shrink [__first, __middle) as much as possible (with no moves), returning if it shrinks to 0 + for (; true; ++__first, (void) --__len1) + { + if (__len1 == 0) + return; + if (__comp(*__middle, *__first)) + break; + } + // __first < __middle < __last + // *__first > *__middle + // partition [__first, __m1) [__m1, __middle) [__middle, __m2) [__m2, __last) such that + // all elements in: + // [__first, __m1) <= [__middle, __m2) + // [__middle, __m2) < [__m1, __middle) + // [__m1, __middle) <= [__m2, __last) + // and __m1 or __m2 is in the middle of its range + _BidirectionalIterator __m1; // "median" of [__first, __middle) + _BidirectionalIterator __m2; // "median" of [__middle, __last) + difference_type __len11; // distance(__first, __m1) + difference_type __len21; // distance(__middle, __m2) + // binary search smaller range + if (__len1 < __len2) + { // __len >= 1, __len2 >= 2 + __len21 = __len2 / 2; + __m2 = __middle; + _CUDA_VSTD::advance(__m2, __len21); + __m1 = __upper_bound<_Compare>(__first, __middle, *__m2, __comp); + __len11 = _CUDA_VSTD::distance(__first, __m1); + } + else + { + if (__len1 == 1) + { // __len1 >= __len2 && __len2 > 0, therefore __len2 == 1 + // It is known *__first > *__middle + swap(*__first, *__middle); + return; + } + // __len1 >= 2, __len2 >= 1 + __len11 = __len1 / 2; + __m1 = __first; + _CUDA_VSTD::advance(__m1, __len11); + __m2 = __lower_bound<_Compare>(__middle, __last, *__m1, __comp); + __len21 = _CUDA_VSTD::distance(__middle, __m2); + } + difference_type __len12 = __len1 - __len11; // distance(__m1, __middle) + difference_type __len22 = __len2 - __len21; // distance(__m2, __last) + // [__first, __m1) [__m1, __middle) [__middle, __m2) [__m2, __last) + // swap middle two partitions + __middle = _CUDA_VSTD::rotate(__m1, __middle, __m2); + // __len12 and __len21 now have swapped meanings + // merge smaller range with recurisve call and larger with tail recursion elimination + if (__len11 + __len21 < __len12 + __len22) + { + __inplace_merge<_Compare>(__first, __m1, __middle, __comp, __len11, __len21, __buff, __buff_size); +// __inplace_merge<_Compare>(__middle, __m2, __last, __comp, __len12, __len22, __buff, __buff_size); + __first = __middle; + __middle = __m2; + __len1 = __len12; + __len2 = __len22; + } + else + { + __inplace_merge<_Compare>(__middle, __m2, __last, __comp, __len12, __len22, __buff, __buff_size); +// __inplace_merge<_Compare>(__first, __m1, __middle, __comp, __len11, __len21, __buff, __buff_size); + __last = __middle; + __middle = __m1; + __len1 = __len11; + __len2 = __len21; + } } - } } template -inline _LIBCUDACXX_INLINE_VISIBILITY void inplace_merge( - _BidirectionalIterator __first, _BidirectionalIterator __middle, _BidirectionalIterator __last, _Compare __comp) +inline _LIBCUDACXX_INLINE_VISIBILITY +void +inplace_merge(_BidirectionalIterator __first, _BidirectionalIterator __middle, _BidirectionalIterator __last, + _Compare __comp) { - typedef typename iterator_traits<_BidirectionalIterator>::value_type value_type; - typedef typename iterator_traits<_BidirectionalIterator>::difference_type difference_type; - difference_type __len1 = _CUDA_VSTD::distance(__first, __middle); - difference_type __len2 = _CUDA_VSTD::distance(__middle, __last); - difference_type __buf_size = _CUDA_VSTD::min(__len1, __len2); - pair __buf = _CUDA_VSTD::get_temporary_buffer(__buf_size); - unique_ptr __h(__buf.first); - using _Comp_ref = __comp_ref_type<_Compare>; - return _CUDA_VSTD::__inplace_merge<_Comp_ref>( - __first, __middle, __last, __comp, __len1, __len2, __buf.first, __buf.second); + typedef typename iterator_traits<_BidirectionalIterator>::value_type value_type; + typedef typename iterator_traits<_BidirectionalIterator>::difference_type difference_type; + difference_type __len1 = _CUDA_VSTD::distance(__first, __middle); + difference_type __len2 = _CUDA_VSTD::distance(__middle, __last); + difference_type __buf_size = _CUDA_VSTD::min(__len1, __len2); + pair __buf = _CUDA_VSTD::get_temporary_buffer(__buf_size); + unique_ptr __h(__buf.first); + using _Comp_ref = __comp_ref_type<_Compare>; + return _CUDA_VSTD::__inplace_merge<_Comp_ref>(__first, __middle, __last, __comp, __len1, __len2, + __buf.first, __buf.second); } template -inline _LIBCUDACXX_INLINE_VISIBILITY void +inline _LIBCUDACXX_INLINE_VISIBILITY +void inplace_merge(_BidirectionalIterator __first, _BidirectionalIterator __middle, _BidirectionalIterator __last) { - _CUDA_VSTD::inplace_merge(__first, __middle, __last, __less{}); + _CUDA_VSTD::inplace_merge(__first, __middle, __last, + __less{}); } // stable_sort template -_CCCL_HOST_DEVICE void __merge_move_construct( - _InputIterator1 __first1, - _InputIterator1 __last1, - _InputIterator2 __first2, - _InputIterator2 __last2, - typename iterator_traits<_InputIterator1>::value_type* __result, - _Compare __comp) +_CCCL_HOST_DEVICE +void +__merge_move_construct(_InputIterator1 __first1, _InputIterator1 __last1, + _InputIterator2 __first2, _InputIterator2 __last2, + typename iterator_traits<_InputIterator1>::value_type* __result, _Compare __comp) { - typedef typename iterator_traits<_InputIterator1>::value_type value_type; - __destruct_n __d(0); - unique_ptr __h(__result, __d); - for (; true; ++__result) - { - if (__first1 == __last1) - { - for (; __first2 != __last2; ++__first2, ++__result, (void) __d.__incr((value_type*) 0)) - { - ::new (__result) value_type(_CUDA_VSTD::move(*__first2)); - } - __h.release(); - return; - } - if (__first2 == __last2) - { - for (; __first1 != __last1; ++__first1, ++__result, (void) __d.__incr((value_type*) 0)) - { - ::new (__result) value_type(_CUDA_VSTD::move(*__first1)); - } - __h.release(); - return; - } - if (__comp(*__first2, *__first1)) - { - ::new (__result) value_type(_CUDA_VSTD::move(*__first2)); - __d.__incr((value_type*) 0); - ++__first2; - } - else + typedef typename iterator_traits<_InputIterator1>::value_type value_type; + __destruct_n __d(0); + unique_ptr __h(__result, __d); + for (; true; ++__result) { - ::new (__result) value_type(_CUDA_VSTD::move(*__first1)); - __d.__incr((value_type*) 0); - ++__first1; + if (__first1 == __last1) + { + for (; __first2 != __last2; ++__first2, ++__result, (void) __d.__incr((value_type*)0)) + ::new (__result) value_type(_CUDA_VSTD::move(*__first2)); + __h.release(); + return; + } + if (__first2 == __last2) + { + for (; __first1 != __last1; ++__first1, ++__result, (void) __d.__incr((value_type*)0)) + ::new (__result) value_type(_CUDA_VSTD::move(*__first1)); + __h.release(); + return; + } + if (__comp(*__first2, *__first1)) + { + ::new (__result) value_type(_CUDA_VSTD::move(*__first2)); + __d.__incr((value_type*)0); + ++__first2; + } + else + { + ::new (__result) value_type(_CUDA_VSTD::move(*__first1)); + __d.__incr((value_type*)0); + ++__first1; + } } - } } template -_CCCL_HOST_DEVICE void __merge_move_assign( - _InputIterator1 __first1, - _InputIterator1 __last1, - _InputIterator2 __first2, - _InputIterator2 __last2, - _OutputIterator __result, - _Compare __comp) +_CCCL_HOST_DEVICE +void +__merge_move_assign(_InputIterator1 __first1, _InputIterator1 __last1, + _InputIterator2 __first2, _InputIterator2 __last2, + _OutputIterator __result, _Compare __comp) { - for (; __first1 != __last1; ++__result) - { - if (__first2 == __last2) - { - for (; __first1 != __last1; ++__first1, (void) ++__result) - { - *__result = _CUDA_VSTD::move(*__first1); - } - return; - } - if (__comp(*__first2, *__first1)) + for (; __first1 != __last1; ++__result) { - *__result = _CUDA_VSTD::move(*__first2); - ++__first2; - } - else - { - *__result = _CUDA_VSTD::move(*__first1); - ++__first1; + if (__first2 == __last2) + { + for (; __first1 != __last1; ++__first1, (void) ++__result) + *__result = _CUDA_VSTD::move(*__first1); + return; + } + if (__comp(*__first2, *__first1)) + { + *__result = _CUDA_VSTD::move(*__first2); + ++__first2; + } + else + { + *__result = _CUDA_VSTD::move(*__first1); + ++__first1; + } } - } - for (; __first2 != __last2; ++__first2, (void) ++__result) - { - *__result = _CUDA_VSTD::move(*__first2); - } + for (; __first2 != __last2; ++__first2, (void) ++__result) + *__result = _CUDA_VSTD::move(*__first2); } template -_CCCL_HOST_DEVICE void __stable_sort( - _RandomAccessIterator __first, - _RandomAccessIterator __last, - _Compare __comp, - typename iterator_traits<_RandomAccessIterator>::difference_type __len, - typename iterator_traits<_RandomAccessIterator>::value_type* __buff, - ptrdiff_t __buff_size); +_CCCL_HOST_DEVICE +void +__stable_sort(_RandomAccessIterator __first, _RandomAccessIterator __last, _Compare __comp, + typename iterator_traits<_RandomAccessIterator>::difference_type __len, + typename iterator_traits<_RandomAccessIterator>::value_type* __buff, ptrdiff_t __buff_size); template -_CCCL_HOST_DEVICE void __stable_sort_move( - _RandomAccessIterator __first1, - _RandomAccessIterator __last1, - _Compare __comp, - typename iterator_traits<_RandomAccessIterator>::difference_type __len, - typename iterator_traits<_RandomAccessIterator>::value_type* __first2) +_CCCL_HOST_DEVICE +void +__stable_sort_move(_RandomAccessIterator __first1, _RandomAccessIterator __last1, _Compare __comp, + typename iterator_traits<_RandomAccessIterator>::difference_type __len, + typename iterator_traits<_RandomAccessIterator>::value_type* __first2) { - typedef typename iterator_traits<_RandomAccessIterator>::value_type value_type; - switch (__len) - { + typedef typename iterator_traits<_RandomAccessIterator>::value_type value_type; + switch (__len) + { case 0: - return; + return; case 1: - ::new (__first2) value_type(_CUDA_VSTD::move(*__first1)); - return; + ::new(__first2) value_type(_CUDA_VSTD::move(*__first1)); + return; case 2: - __destruct_n __d(0); - unique_ptr __h2(__first2, __d); - if (__comp(*--__last1, *__first1)) - { - ::new (__first2) value_type(_CUDA_VSTD::move(*__last1)); - __d.__incr((value_type*) 0); - ++__first2; - ::new (__first2) value_type(_CUDA_VSTD::move(*__first1)); - } - else - { - ::new (__first2) value_type(_CUDA_VSTD::move(*__first1)); - __d.__incr((value_type*) 0); - ++__first2; - ::new (__first2) value_type(_CUDA_VSTD::move(*__last1)); - } - __h2.release(); - return; - } - if (__len <= 8) - { - __insertion_sort_move<_Compare>(__first1, __last1, __first2, __comp); - return; - } - typename iterator_traits<_RandomAccessIterator>::difference_type __l2 = __len / 2; - _RandomAccessIterator __m = __first1 + __l2; - __stable_sort<_Compare>(__first1, __m, __comp, __l2, __first2, __l2); - __stable_sort<_Compare>(__m, __last1, __comp, __len - __l2, __first2 + __l2, __len - __l2); - __merge_move_construct<_Compare>(__first1, __m, __m, __last1, __first2, __comp); + __destruct_n __d(0); + unique_ptr __h2(__first2, __d); + if (__comp(*--__last1, *__first1)) + { + ::new(__first2) value_type(_CUDA_VSTD::move(*__last1)); + __d.__incr((value_type*)0); + ++__first2; + ::new(__first2) value_type(_CUDA_VSTD::move(*__first1)); + } + else + { + ::new(__first2) value_type(_CUDA_VSTD::move(*__first1)); + __d.__incr((value_type*)0); + ++__first2; + ::new(__first2) value_type(_CUDA_VSTD::move(*__last1)); + } + __h2.release(); + return; + } + if (__len <= 8) + { + __insertion_sort_move<_Compare>(__first1, __last1, __first2, __comp); + return; + } + typename iterator_traits<_RandomAccessIterator>::difference_type __l2 = __len / 2; + _RandomAccessIterator __m = __first1 + __l2; + __stable_sort<_Compare>(__first1, __m, __comp, __l2, __first2, __l2); + __stable_sort<_Compare>(__m, __last1, __comp, __len - __l2, __first2 + __l2, __len - __l2); + __merge_move_construct<_Compare>(__first1, __m, __m, __last1, __first2, __comp); } template struct __stable_sort_switch { - static const unsigned value = 128 * is_trivially_copy_assignable<_Tp>::value; + static const unsigned value = 128*is_trivially_copy_assignable<_Tp>::value; }; template -_CCCL_HOST_DEVICE void __stable_sort( - _RandomAccessIterator __first, - _RandomAccessIterator __last, - _Compare __comp, - typename iterator_traits<_RandomAccessIterator>::difference_type __len, - typename iterator_traits<_RandomAccessIterator>::value_type* __buff, - ptrdiff_t __buff_size) +_CCCL_HOST_DEVICE +void +__stable_sort(_RandomAccessIterator __first, _RandomAccessIterator __last, _Compare __comp, + typename iterator_traits<_RandomAccessIterator>::difference_type __len, + typename iterator_traits<_RandomAccessIterator>::value_type* __buff, ptrdiff_t __buff_size) { - typedef typename iterator_traits<_RandomAccessIterator>::value_type value_type; - typedef typename iterator_traits<_RandomAccessIterator>::difference_type difference_type; - switch (__len) - { + typedef typename iterator_traits<_RandomAccessIterator>::value_type value_type; + typedef typename iterator_traits<_RandomAccessIterator>::difference_type difference_type; + switch (__len) + { case 0: case 1: - return; + return; case 2: - if (__comp(*--__last, *__first)) - { - swap(*__first, *__last); - } - return; - } - if (__len <= static_cast(__stable_sort_switch::value)) - { - __insertion_sort<_Compare>(__first, __last, __comp); - return; - } - typename iterator_traits<_RandomAccessIterator>::difference_type __l2 = __len / 2; - _RandomAccessIterator __m = __first + __l2; - if (__len <= __buff_size) - { - __destruct_n __d(0); - unique_ptr __h2(__buff, __d); - __stable_sort_move<_Compare>(__first, __m, __comp, __l2, __buff); - __d.__set(__l2, (value_type*) 0); - __stable_sort_move<_Compare>(__m, __last, __comp, __len - __l2, __buff + __l2); - __d.__set(__len, (value_type*) 0); - __merge_move_assign<_Compare>(__buff, __buff + __l2, __buff + __l2, __buff + __len, __first, __comp); - // __merge<_Compare>(move_iterator(__buff), - // move_iterator(__buff + __l2), - // move_iterator<_RandomAccessIterator>(__buff + __l2), - // move_iterator<_RandomAccessIterator>(__buff + __len), - // __first, __comp); - return; - } - __stable_sort<_Compare>(__first, __m, __comp, __l2, __buff, __buff_size); - __stable_sort<_Compare>(__m, __last, __comp, __len - __l2, __buff, __buff_size); - __inplace_merge<_Compare>(__first, __m, __last, __comp, __l2, __len - __l2, __buff, __buff_size); + if (__comp(*--__last, *__first)) + swap(*__first, *__last); + return; + } + if (__len <= static_cast(__stable_sort_switch::value)) + { + __insertion_sort<_Compare>(__first, __last, __comp); + return; + } + typename iterator_traits<_RandomAccessIterator>::difference_type __l2 = __len / 2; + _RandomAccessIterator __m = __first + __l2; + if (__len <= __buff_size) + { + __destruct_n __d(0); + unique_ptr __h2(__buff, __d); + __stable_sort_move<_Compare>(__first, __m, __comp, __l2, __buff); + __d.__set(__l2, (value_type*)0); + __stable_sort_move<_Compare>(__m, __last, __comp, __len - __l2, __buff + __l2); + __d.__set(__len, (value_type*)0); + __merge_move_assign<_Compare>(__buff, __buff + __l2, __buff + __l2, __buff + __len, __first, __comp); +// __merge<_Compare>(move_iterator(__buff), +// move_iterator(__buff + __l2), +// move_iterator<_RandomAccessIterator>(__buff + __l2), +// move_iterator<_RandomAccessIterator>(__buff + __len), +// __first, __comp); + return; + } + __stable_sort<_Compare>(__first, __m, __comp, __l2, __buff, __buff_size); + __stable_sort<_Compare>(__m, __last, __comp, __len - __l2, __buff, __buff_size); + __inplace_merge<_Compare>(__first, __m, __last, __comp, __l2, __len - __l2, __buff, __buff_size); } template -inline _LIBCUDACXX_INLINE_VISIBILITY void +inline _LIBCUDACXX_INLINE_VISIBILITY +void stable_sort(_RandomAccessIterator __first, _RandomAccessIterator __last, _Compare __comp) { - typedef typename iterator_traits<_RandomAccessIterator>::value_type value_type; - typedef typename iterator_traits<_RandomAccessIterator>::difference_type difference_type; - difference_type __len = __last - __first; - pair __buf(0, 0); - unique_ptr __h; - if (__len > static_cast(__stable_sort_switch::value)) - { - __buf = _CUDA_VSTD::get_temporary_buffer(__len); - __h.reset(__buf.first); - } - using _Comp_ref = __comp_ref_type<_Compare>; - __stable_sort<_Comp_ref>(__first, __last, __comp, __len, __buf.first, __buf.second); + typedef typename iterator_traits<_RandomAccessIterator>::value_type value_type; + typedef typename iterator_traits<_RandomAccessIterator>::difference_type difference_type; + difference_type __len = __last - __first; + pair __buf(0, 0); + unique_ptr __h; + if (__len > static_cast(__stable_sort_switch::value)) + { + __buf = _CUDA_VSTD::get_temporary_buffer(__len); + __h.reset(__buf.first); + } + using _Comp_ref = __comp_ref_type<_Compare>; + __stable_sort<_Comp_ref>(__first, __last, __comp, __len, __buf.first, __buf.second); } template -inline _LIBCUDACXX_INLINE_VISIBILITY void stable_sort(_RandomAccessIterator __first, _RandomAccessIterator __last) +inline _LIBCUDACXX_INLINE_VISIBILITY +void +stable_sort(_RandomAccessIterator __first, _RandomAccessIterator __last) { - _CUDA_VSTD::stable_sort(__first, __last, __less{}); + _CUDA_VSTD::stable_sort(__first, __last, __less{}); } // nth_element template -_CCCL_HOST_DEVICE void +_CCCL_HOST_DEVICE +void __nth_element(_RandomAccessIterator __first, _RandomAccessIterator __nth, _RandomAccessIterator __last, _Compare __comp) { - // _Compare is known to be a reference type - typedef typename iterator_traits<_RandomAccessIterator>::difference_type difference_type; - const difference_type __limit = 7; - while (true) - { - __restart: - if (__nth == __last) + // _Compare is known to be a reference type + typedef typename iterator_traits<_RandomAccessIterator>::difference_type difference_type; + const difference_type __limit = 7; + while (true) { - return; - } - difference_type __len = __last - __first; - switch (__len) - { - case 0: - case 1: - return; - case 2: - if (__comp(*--__last, *__first)) + __restart: + if (__nth == __last) + return; + difference_type __len = __last - __first; + switch (__len) { - swap(*__first, *__last); + case 0: + case 1: + return; + case 2: + if (__comp(*--__last, *__first)) + swap(*__first, *__last); + return; + case 3: + { + _RandomAccessIterator __m = __first; + _CUDA_VSTD::__sort3<_Compare>(__first, ++__m, --__last, __comp); + return; + } } - return; - case 3: { - _RandomAccessIterator __m = __first; - _CUDA_VSTD::__sort3<_Compare>(__first, ++__m, --__last, __comp); - return; - } - } - if (__len <= __limit) - { - __selection_sort<_Compare>(__first, __last, __comp); - return; - } - // __len > __limit >= 3 - _RandomAccessIterator __m = __first + __len / 2; - _RandomAccessIterator __lm1 = __last; - unsigned __n_swaps = _CUDA_VSTD::__sort3<_Compare>(__first, __m, --__lm1, __comp); - // *__m is median - // partition [__first, __m) < *__m and *__m <= [__m, __last) - // (this inhibits tossing elements equivalent to __m around unnecessarily) - _RandomAccessIterator __i = __first; - _RandomAccessIterator __j = __lm1; - // j points beyond range to be tested, *__lm1 is known to be <= *__m - // The search going up is known to be guarded but the search coming down isn't. - // Prime the downward search with a guard. - if (!__comp(*__i, *__m)) // if *__first == *__m - { - // *__first == *__m, *__first doesn't go in first part - // manually guard downward moving __j against __i - while (true) - { - if (__i == --__j) + if (__len <= __limit) + { + __selection_sort<_Compare>(__first, __last, __comp); + return; + } + // __len > __limit >= 3 + _RandomAccessIterator __m = __first + __len/2; + _RandomAccessIterator __lm1 = __last; + unsigned __n_swaps = _CUDA_VSTD::__sort3<_Compare>(__first, __m, --__lm1, __comp); + // *__m is median + // partition [__first, __m) < *__m and *__m <= [__m, __last) + // (this inhibits tossing elements equivalent to __m around unnecessarily) + _RandomAccessIterator __i = __first; + _RandomAccessIterator __j = __lm1; + // j points beyond range to be tested, *__lm1 is known to be <= *__m + // The search going up is known to be guarded but the search coming down isn't. + // Prime the downward search with a guard. + if (!__comp(*__i, *__m)) // if *__first == *__m { - // *__first == *__m, *__m <= all other elements - // Parition instead into [__first, __i) == *__first and *__first < [__i, __last) - ++__i; // __first + 1 - __j = __last; - if (!__comp(*__first, *--__j)) // we need a guard if *__first == *(__last-1) - { + // *__first == *__m, *__first doesn't go in first part + // manually guard downward moving __j against __i while (true) { - if (__i == __j) - { - return; // [__first, __last) all equivalent elements - } - if (__comp(*__first, *__i)) - { + if (__i == --__j) + { + // *__first == *__m, *__m <= all other elements + // Parition instead into [__first, __i) == *__first and *__first < [__i, __last) + ++__i; // __first + 1 + __j = __last; + if (!__comp(*__first, *--__j)) // we need a guard if *__first == *(__last-1) + { + while (true) + { + if (__i == __j) + return; // [__first, __last) all equivalent elements + if (__comp(*__first, *__i)) + { + swap(*__i, *__j); + ++__n_swaps; + ++__i; + break; + } + ++__i; + } + } + // [__first, __i) == *__first and *__first < [__j, __last) and __j == __last - 1 + if (__i == __j) + return; + while (true) + { + while (!__comp(*__first, *__i)) + ++__i; + while (__comp(*__first, *--__j)) + ; + if (__i >= __j) + break; + swap(*__i, *__j); + ++__n_swaps; + ++__i; + } + // [__first, __i) == *__first and *__first < [__i, __last) + // The first part is sorted, + if (__nth < __i) + return; + // __nth_element the secod part + // __nth_element<_Compare>(__i, __nth, __last, __comp); + __first = __i; + goto __restart; + } + if (__comp(*__j, *__m)) + { + swap(*__i, *__j); + ++__n_swaps; + break; // found guard for downward moving __j, now use unguarded partition + } + } + } + ++__i; + // j points beyond range to be tested, *__lm1 is known to be <= *__m + // if not yet partitioned... + if (__i < __j) + { + // known that *(__i - 1) < *__m + while (true) + { + // __m still guards upward moving __i + while (__comp(*__i, *__m)) + ++__i; + // It is now known that a guard exists for downward moving __j + while (!__comp(*--__j, *__m)) + ; + if (__i >= __j) + break; swap(*__i, *__j); ++__n_swaps; + // It is known that __m != __j + // If __m just moved, follow it + if (__m == __i) + __m = __j; ++__i; - break; - } - ++__i; } - } - // [__first, __i) == *__first and *__first < [__j, __last) and __j == __last - 1 - if (__i == __j) - { + } + // [__first, __i) < *__m and *__m <= [__i, __last) + if (__i != __m && __comp(*__m, *__i)) + { + swap(*__i, *__m); + ++__n_swaps; + } + // [__first, __i) < *__i and *__i <= [__i+1, __last) + if (__nth == __i) return; - } - while (true) - { - while (!__comp(*__first, *__i)) + if (__n_swaps == 0) + { + // We were given a perfectly partitioned sequence. Coincidence? + if (__nth < __i) { - ++__i; + // Check for [__first, __i) already sorted + __j = __m = __first; + while (++__j != __i) + { + if (__comp(*__j, *__m)) + // not yet sorted, so sort + goto not_sorted; + __m = __j; + } + // [__first, __i) sorted + return; } - while (__comp(*__first, *--__j)) - ; - if (__i >= __j) + else { - break; + // Check for [__i, __last) already sorted + __j = __m = __i; + while (++__j != __last) + { + if (__comp(*__j, *__m)) + // not yet sorted, so sort + goto not_sorted; + __m = __j; + } + // [__i, __last) sorted + return; } - swap(*__i, *__j); - ++__n_swaps; - ++__i; - } - // [__first, __i) == *__first and *__first < [__i, __last) - // The first part is sorted, - if (__nth < __i) - { - return; - } - // __nth_element the secod part - // __nth_element<_Compare>(__i, __nth, __last, __comp); - __first = __i; - goto __restart; - } - if (__comp(*__j, *__m)) - { - swap(*__i, *__j); - ++__n_swaps; - break; // found guard for downward moving __j, now use unguarded partition - } - } - } - ++__i; - // j points beyond range to be tested, *__lm1 is known to be <= *__m - // if not yet partitioned... - if (__i < __j) - { - // known that *(__i - 1) < *__m - while (true) - { - // __m still guards upward moving __i - while (__comp(*__i, *__m)) - { - ++__i; - } - // It is now known that a guard exists for downward moving __j - while (!__comp(*--__j, *__m)) - ; - if (__i >= __j) - { - break; - } - swap(*__i, *__j); - ++__n_swaps; - // It is known that __m != __j - // If __m just moved, follow it - if (__m == __i) - { - __m = __j; } - ++__i; - } - } - // [__first, __i) < *__m and *__m <= [__i, __last) - if (__i != __m && __comp(*__m, *__i)) - { - swap(*__i, *__m); - ++__n_swaps; - } - // [__first, __i) < *__i and *__i <= [__i+1, __last) - if (__nth == __i) - { - return; - } - if (__n_swaps == 0) - { - // We were given a perfectly partitioned sequence. Coincidence? - if (__nth < __i) - { - // Check for [__first, __i) already sorted - __j = __m = __first; - while (++__j != __i) +not_sorted: + // __nth_element on range containing __nth + if (__nth < __i) { - if (__comp(*__j, *__m)) - { - // not yet sorted, so sort - goto not_sorted; - } - __m = __j; + // __nth_element<_Compare>(__first, __nth, __i, __comp); + __last = __i; } - // [__first, __i) sorted - return; - } - else - { - // Check for [__i, __last) already sorted - __j = __m = __i; - while (++__j != __last) + else { - if (__comp(*__j, *__m)) - { - // not yet sorted, so sort - goto not_sorted; - } - __m = __j; + // __nth_element<_Compare>(__i+1, __nth, __last, __comp); + __first = ++__i; } - // [__i, __last) sorted - return; - } - } - not_sorted: - // __nth_element on range containing __nth - if (__nth < __i) - { - // __nth_element<_Compare>(__first, __nth, __i, __comp); - __last = __i; } - else - { - // __nth_element<_Compare>(__i+1, __nth, __last, __comp); - __first = ++__i; - } - } } template -inline _LIBCUDACXX_INLINE_VISIBILITY void +inline _LIBCUDACXX_INLINE_VISIBILITY +void nth_element(_RandomAccessIterator __first, _RandomAccessIterator __nth, _RandomAccessIterator __last, _Compare __comp) { - using _Comp_ref = __comp_ref_type<_Compare>; - __nth_element<_Comp_ref>(__first, __nth, __last, __comp); + using _Comp_ref = __comp_ref_type<_Compare>; + __nth_element<_Comp_ref>(__first, __nth, __last, __comp); } template -inline _LIBCUDACXX_INLINE_VISIBILITY void +inline _LIBCUDACXX_INLINE_VISIBILITY +void nth_element(_RandomAccessIterator __first, _RandomAccessIterator __nth, _RandomAccessIterator __last) { - _CUDA_VSTD::nth_element(__first, __nth, __last, __less{}); + _CUDA_VSTD::nth_element(__first, __nth, __last, __less{}); } #endif _LIBCUDACXX_END_NAMESPACE_STD #if defined(_LIBCUDACXX_HAS_PARALLEL_ALGORITHMS) && _CCCL_STD_VER >= 2017 -# include <__pstl_algorithm> +# include <__pstl_algorithm> #endif #include -#endif // _LIBCUDACXX_ALGORITHM +#endif // _LIBCUDACXX_ALGORITHM diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/array b/libcudacxx/include/cuda/std/detail/libcxx/include/array index 82223dd3abd..5e1a0429d42 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/array +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/array @@ -123,6 +123,7 @@ template const T&& get(const array&&) noexce #include #include #include +#include // all public C++ headers provide the assertion handler #include #include #include @@ -138,10 +139,10 @@ template const T&& get(const array&&) noexce #include #include #include -#include -#include // all public C++ headers provide the assertion handler #include #include + +#include #include // standard-mandated includes @@ -191,7 +192,8 @@ struct _LIBCUDACXX_TEMPLATE_VIS array _CUDA_VSTD::fill_n(__elems_, _Size, __u); } - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 void swap(array& __a) noexcept(__is_nothrow_swappable<_Tp>::value) + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 void + swap(array& __a) noexcept(__is_nothrow_swappable<_Tp>::value) { _CUDA_VSTD::swap_ranges(__elems_, __elems_ + _Size, __a.data()); } @@ -201,7 +203,8 @@ struct _LIBCUDACXX_TEMPLATE_VIS array { return iterator(__elems_); } - _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const_iterator begin() const noexcept + _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const_iterator + begin() const noexcept { return const_iterator(__elems_); } @@ -209,12 +212,14 @@ struct _LIBCUDACXX_TEMPLATE_VIS array { return iterator(__elems_ + _Size); } - _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const_iterator end() const noexcept + _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const_iterator + end() const noexcept { return const_iterator(__elems_ + _Size); } - _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 reverse_iterator rbegin() noexcept + _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 reverse_iterator + rbegin() noexcept { return reverse_iterator(end()); } @@ -223,7 +228,8 @@ struct _LIBCUDACXX_TEMPLATE_VIS array { return const_reverse_iterator(end()); } - _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 reverse_iterator rend() noexcept + _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 reverse_iterator + rend() noexcept { return reverse_iterator(begin()); } @@ -233,11 +239,13 @@ struct _LIBCUDACXX_TEMPLATE_VIS array return const_reverse_iterator(begin()); } - _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const_iterator cbegin() const noexcept + _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const_iterator + cbegin() const noexcept { return begin(); } - _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const_iterator cend() const noexcept + _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const_iterator + cend() const noexcept { return end(); } @@ -289,7 +297,8 @@ struct _LIBCUDACXX_TEMPLATE_VIS array return __elems_[__n]; } - _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const_reference at(size_type __n) const + _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const_reference + at(size_type __n) const { if (__n >= _Size) { @@ -302,7 +311,8 @@ struct _LIBCUDACXX_TEMPLATE_VIS array { return (*this)[0]; } - _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const_reference front() const noexcept + _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const_reference + front() const noexcept { return (*this)[0]; } @@ -310,7 +320,8 @@ struct _LIBCUDACXX_TEMPLATE_VIS array { return (*this)[_Size - 1]; } - _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const_reference back() const noexcept + _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const_reference + back() const noexcept { return (*this)[_Size - 1]; } @@ -319,7 +330,8 @@ struct _LIBCUDACXX_TEMPLATE_VIS array { return __elems_; } - _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const value_type* data() const noexcept + _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const value_type* + data() const noexcept { return __elems_; } @@ -354,7 +366,8 @@ struct _LIBCUDACXX_TEMPLATE_VIS array<_Tp, 0> { return nullptr; } - _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const value_type* data() const noexcept + _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const value_type* + data() const noexcept { return nullptr; } @@ -375,7 +388,8 @@ struct _LIBCUDACXX_TEMPLATE_VIS array<_Tp, 0> { return iterator(nullptr); } - _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const_iterator begin() const noexcept + _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const_iterator + begin() const noexcept { return const_iterator(nullptr); } @@ -383,12 +397,14 @@ struct _LIBCUDACXX_TEMPLATE_VIS array<_Tp, 0> { return iterator(nullptr); } - _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const_iterator end() const noexcept + _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const_iterator + end() const noexcept { return const_iterator(nullptr); } - _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 reverse_iterator rbegin() noexcept + _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 reverse_iterator + rbegin() noexcept { return reverse_iterator(end()); } @@ -397,7 +413,8 @@ struct _LIBCUDACXX_TEMPLATE_VIS array<_Tp, 0> { return const_reverse_iterator(end()); } - _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 reverse_iterator rend() noexcept + _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 reverse_iterator + rend() noexcept { return reverse_iterator(begin()); } @@ -407,11 +424,13 @@ struct _LIBCUDACXX_TEMPLATE_VIS array<_Tp, 0> return const_reverse_iterator(begin()); } - _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const_iterator cbegin() const noexcept + _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const_iterator + cbegin() const noexcept { return begin(); } - _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const_iterator cend() const noexcept + _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const_iterator + cend() const noexcept { return end(); } @@ -440,10 +459,11 @@ struct _LIBCUDACXX_TEMPLATE_VIS array<_Tp, 0> return true; } - _CCCL_DIAG_PUSH - _CCCL_DIAG_SUPPRESS_MSVC(4702) // Unreachable code +_CCCL_DIAG_PUSH +_CCCL_DIAG_SUPPRESS_MSVC(4702) // Unreachable code // element access: - _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 reference operator[](size_type) noexcept + _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 reference + operator[](size_type) noexcept { _LIBCUDACXX_ASSERT(false, "cannot call array::operator[] on a zero-sized array"); _LIBCUDACXX_UNREACHABLE(); @@ -464,7 +484,8 @@ struct _LIBCUDACXX_TEMPLATE_VIS array<_Tp, 0> _LIBCUDACXX_UNREACHABLE(); } - _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const_reference at(size_type) const + _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const_reference + at(size_type) const { __throw_out_of_range("array::at"); _LIBCUDACXX_UNREACHABLE(); @@ -477,7 +498,8 @@ struct _LIBCUDACXX_TEMPLATE_VIS array<_Tp, 0> return *data(); } - _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const_reference front() const noexcept + _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const_reference + front() const noexcept { _LIBCUDACXX_ASSERT(false, "cannot call array::front() on a zero-sized array"); _LIBCUDACXX_UNREACHABLE(); @@ -491,17 +513,18 @@ struct _LIBCUDACXX_TEMPLATE_VIS array<_Tp, 0> return *data(); } - _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const_reference back() const noexcept + _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const_reference + back() const noexcept { _LIBCUDACXX_ASSERT(false, "cannot call array::back() on a zero-sized array"); _LIBCUDACXX_UNREACHABLE(); return *data(); } - _CCCL_DIAG_POP +_CCCL_DIAG_POP }; #if _CCCL_STD_VER >= 2017 -template && ...)>> +template && ...)> > _CCCL_HOST_DEVICE array(_Tp, _Args...) -> array<_Tp, 1 + sizeof...(_Args)>; #endif // _CCCL_STD_VER >= 2017 @@ -548,18 +571,19 @@ operator>=(const array<_Tp, _Size>& __x, const array<_Tp, _Size>& __y) } template -inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 __enable_if_t<_Size == 0 || __is_swappable<_Tp>::value, void> -swap(array<_Tp, _Size>& __x, array<_Tp, _Size>& __y) noexcept(noexcept(__x.swap(__y))) +inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 + __enable_if_t<_Size == 0 || __is_swappable<_Tp>::value, void> + swap(array<_Tp, _Size>& __x, array<_Tp, _Size>& __y) noexcept(noexcept(__x.swap(__y))) { __x.swap(__y); } template -struct _LIBCUDACXX_TEMPLATE_VIS tuple_size> : public integral_constant +struct _LIBCUDACXX_TEMPLATE_VIS tuple_size > : public integral_constant {}; template -struct _LIBCUDACXX_TEMPLATE_VIS tuple_element<_Ip, array<_Tp, _Size>> +struct _LIBCUDACXX_TEMPLATE_VIS tuple_element<_Ip, array<_Tp, _Size> > { static_assert(_Ip < _Size, "Index out of bounds in std::tuple_element<> (std::array)"); typedef _Tp type; @@ -573,7 +597,8 @@ inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 _Tp& get(array<_Tp, _ } template -inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const _Tp& get(const array<_Tp, _Size>& __a) noexcept +inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const _Tp& +get(const array<_Tp, _Size>& __a) noexcept { static_assert(_Ip < _Size, "Index out of bounds in std::get<> (const std::array)"); return __a.__elems_[_Ip]; @@ -587,7 +612,8 @@ inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 _Tp&& get(array<_Tp, } template -inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const _Tp&& get(const array<_Tp, _Size>&& __a) noexcept +inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const _Tp&& +get(const array<_Tp, _Size>&& __a) noexcept { static_assert(_Ip < _Size, "Index out of bounds in std::get<> (const std::array &&)"); return _CUDA_VSTD::move(__a.__elems_[_Ip]); diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/atomic b/libcudacxx/include/cuda/std/detail/libcxx/include/atomic index d3cda3ed73e..298b69726f9 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/atomic +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/atomic @@ -556,6 +556,9 @@ void atomic_signal_fence(memory_order m) noexcept; # pragma system_header #endif // no system header +#include // all public C++ headers provide the assertion handler +#include +#include #include #include #include @@ -565,41 +568,42 @@ void atomic_signal_fence(memory_order m) noexcept; #include #include #include +#include #include #include -#include // all public C++ headers provide the assertion handler -#include -#include -#include -#include #include #include +#include + #ifdef _LIBCUDACXX_HAS_NO_THREADS -# error is not supported on this single threaded system +# error is not supported on this single threaded system #endif #ifdef _LIBCUDACXX_HAS_NO_ATOMIC_HEADER -# error is not implemented +# error is not implemented #endif #ifdef _LIBCUDACXX_UNSUPPORTED_THREAD_API -# error " is not supported on this system" +# error " is not supported on this system" #endif #ifdef kill_dependency -# error C++ standard library is incompatible with +# error C++ standard library is incompatible with #endif -#define _LIBCUDACXX_CHECK_STORE_MEMORY_ORDER(__m) \ - _LIBCUDACXX_DIAGNOSE_WARNING( \ - __m == memory_order_consume || __m == memory_order_acquire || __m == memory_order_acq_rel, \ - "memory order argument to atomic operation is invalid") +#define _LIBCUDACXX_CHECK_STORE_MEMORY_ORDER(__m) \ + _LIBCUDACXX_DIAGNOSE_WARNING(__m == memory_order_consume || \ + __m == memory_order_acquire || \ + __m == memory_order_acq_rel, \ + "memory order argument to atomic operation is invalid") -#define _LIBCUDACXX_CHECK_LOAD_MEMORY_ORDER(__m) \ - _LIBCUDACXX_DIAGNOSE_WARNING(__m == memory_order_release || __m == memory_order_acq_rel, \ - "memory order argument to atomic operation is invalid") +#define _LIBCUDACXX_CHECK_LOAD_MEMORY_ORDER(__m) \ + _LIBCUDACXX_DIAGNOSE_WARNING(__m == memory_order_release || \ + __m == memory_order_acq_rel, \ + "memory order argument to atomic operation is invalid") -#define _LIBCUDACXX_CHECK_EXCHANGE_MEMORY_ORDER(__m, __f) \ - _LIBCUDACXX_DIAGNOSE_WARNING(__f == memory_order_release || __f == memory_order_acq_rel, \ - "memory order argument to atomic operation is invalid") +#define _LIBCUDACXX_CHECK_EXCHANGE_MEMORY_ORDER(__m, __f) \ + _LIBCUDACXX_DIAGNOSE_WARNING(__f == memory_order_release || \ + __f == memory_order_acq_rel, \ + "memory order argument to atomic operation is invalid") #if defined(_LIBCUDACXX_HAS_MSVC_ATOMIC_IMPL) # include @@ -610,25 +614,25 @@ void atomic_signal_fence(memory_order m) noexcept; #endif #if !defined(__CLANG_ATOMIC_BOOL_LOCK_FREE) && !defined(__GCC_ATOMIC_BOOL_LOCK_FREE) -# define ATOMIC_BOOL_LOCK_FREE 2 -# define ATOMIC_CHAR_LOCK_FREE 2 -# define ATOMIC_CHAR16_T_LOCK_FREE 2 -# define ATOMIC_CHAR32_T_LOCK_FREE 2 -# define ATOMIC_WCHAR_T_LOCK_FREE 2 -# define ATOMIC_SHORT_LOCK_FREE 2 -# define ATOMIC_INT_LOCK_FREE 2 -# define ATOMIC_LONG_LOCK_FREE 2 -# define ATOMIC_LLONG_LOCK_FREE 2 -# define ATOMIC_POINTER_LOCK_FREE 2 -#endif //! defined(__CLANG_ATOMIC_BOOL_LOCK_FREE) && !defined(__GCC_ATOMIC_BOOL_LOCK_FREE) +#define ATOMIC_BOOL_LOCK_FREE 2 +#define ATOMIC_CHAR_LOCK_FREE 2 +#define ATOMIC_CHAR16_T_LOCK_FREE 2 +#define ATOMIC_CHAR32_T_LOCK_FREE 2 +#define ATOMIC_WCHAR_T_LOCK_FREE 2 +#define ATOMIC_SHORT_LOCK_FREE 2 +#define ATOMIC_INT_LOCK_FREE 2 +#define ATOMIC_LONG_LOCK_FREE 2 +#define ATOMIC_LLONG_LOCK_FREE 2 +#define ATOMIC_POINTER_LOCK_FREE 2 +#endif //!defined(__CLANG_ATOMIC_BOOL_LOCK_FREE) && !defined(__GCC_ATOMIC_BOOL_LOCK_FREE) #ifndef __ATOMIC_RELAXED -# define __ATOMIC_RELAXED 0 -# define __ATOMIC_CONSUME 1 -# define __ATOMIC_ACQUIRE 2 -# define __ATOMIC_RELEASE 3 -# define __ATOMIC_ACQ_REL 4 -# define __ATOMIC_SEQ_CST 5 +#define __ATOMIC_RELAXED 0 +#define __ATOMIC_CONSUME 1 +#define __ATOMIC_ACQUIRE 2 +#define __ATOMIC_RELEASE 3 +#define __ATOMIC_ACQ_REL 4 +#define __ATOMIC_SEQ_CST 5 #endif //__ATOMIC_RELAXED _LIBCUDACXX_BEGIN_NAMESPACE_STD @@ -636,22 +640,20 @@ _LIBCUDACXX_BEGIN_NAMESPACE_STD // Figure out what the underlying type for `memory_order` would be if it were // declared as an unscoped enum (accounting for -fshort-enums). Use this result // to pin the underlying type in C++20. -enum __legacy_memory_order -{ - __mo_relaxed, - __mo_consume, - __mo_acquire, - __mo_release, - __mo_acq_rel, - __mo_seq_cst +enum __legacy_memory_order { + __mo_relaxed, + __mo_consume, + __mo_acquire, + __mo_release, + __mo_acq_rel, + __mo_seq_cst }; typedef underlying_type<__legacy_memory_order>::type __memory_order_underlying_t; #if _CCCL_STD_VER > 2017 -enum class memory_order : __memory_order_underlying_t -{ +enum class memory_order : __memory_order_underlying_t { relaxed = __mo_relaxed, consume = __mo_consume, acquire = __mo_acquire, @@ -669,8 +671,7 @@ inline constexpr auto memory_order_seq_cst = memory_order::seq_cst; #else -typedef enum memory_order -{ +typedef enum memory_order { memory_order_relaxed = __mo_relaxed, memory_order_consume = __mo_consume, memory_order_acquire = __mo_acquire, @@ -681,48 +682,43 @@ typedef enum memory_order #endif // _CCCL_STD_VER > 2017 -template -_LIBCUDACXX_INLINE_VISIBILITY bool __cxx_nonatomic_compare_equal(_Tp const& __lhs, _Tp const& __rhs) -{ +template _LIBCUDACXX_INLINE_VISIBILITY +bool __cxx_nonatomic_compare_equal(_Tp const& __lhs, _Tp const& __rhs) { #if defined(_CCCL_CUDA_COMPILER) - return __lhs == __rhs; + return __lhs == __rhs; #else - return memcmp(&__lhs, &__rhs, sizeof(_Tp)) == 0; + return memcmp(&__lhs, &__rhs, sizeof(_Tp)) == 0; #endif } static_assert((is_same::type, __memory_order_underlying_t>::value), - "unexpected underlying type for std::memory_order"); + "unexpected underlying type for std::memory_order"); -#if defined(_LIBCUDACXX_HAS_GCC_ATOMIC_IMP) || defined(_LIBCUDACXX_ATOMIC_ONLY_USE_BUILTINS) +#if defined(_LIBCUDACXX_HAS_GCC_ATOMIC_IMP) || \ + defined(_LIBCUDACXX_ATOMIC_ONLY_USE_BUILTINS) // [atomics.types.generic]p1 guarantees _Tp is trivially copyable. Because // the default operator= in an object is not volatile, a byte-by-byte copy // is required. -template -_LIBCUDACXX_INLINE_VISIBILITY __enable_if_t::value> -__cxx_atomic_assign_volatile(_Tp& __a_value, _Tv const& __val) -{ +template _LIBCUDACXX_INLINE_VISIBILITY +__enable_if_t::value> +__cxx_atomic_assign_volatile(_Tp& __a_value, _Tv const& __val) { __a_value = __val; } -template -_LIBCUDACXX_INLINE_VISIBILITY __enable_if_t::value> -__cxx_atomic_assign_volatile(_Tp volatile& __a_value, _Tv volatile const& __val) -{ - volatile char* __to = reinterpret_cast(&__a_value); - volatile char* __end = __to + sizeof(_Tp); +template _LIBCUDACXX_INLINE_VISIBILITY +__enable_if_t::value> +__cxx_atomic_assign_volatile(_Tp volatile& __a_value, _Tv volatile const& __val) { + volatile char* __to = reinterpret_cast(&__a_value); + volatile char* __end = __to + sizeof(_Tp); volatile const char* __from = reinterpret_cast(&__val); while (__to != __end) - { *__to++ = *__from++; - } } #endif // Headers are wrapped like so: (cuda::std::|std::)detail -namespace __detail -{ +namespace __detail { #if defined(_LIBCUDACXX_HAS_CUDA_ATOMIC_EXT) # include #endif @@ -737,98 +733,91 @@ namespace __detail // TODO: Maybe support C11 atomics? // #include #endif // _LIBCUDACXX_HAS_GCC_ATOMIC_IMP, _LIBCUDACXX_HAS_C_ATOMIC_IMP -} // namespace __detail +} using __detail::__cxx_atomic_base_impl; -using __detail::__cxx_atomic_compare_exchange_strong; -using __detail::__cxx_atomic_compare_exchange_weak; +using __detail::__cxx_atomic_ref_base_impl; +using __detail::__cxx_atomic_thread_fence; +using __detail::__cxx_atomic_signal_fence; +using __detail::__cxx_atomic_load; +using __detail::__cxx_atomic_store; using __detail::__cxx_atomic_exchange; +using __detail::__cxx_atomic_compare_exchange_weak; +using __detail::__cxx_atomic_compare_exchange_strong; using __detail::__cxx_atomic_fetch_add; -using __detail::__cxx_atomic_fetch_and; -using __detail::__cxx_atomic_fetch_or; using __detail::__cxx_atomic_fetch_sub; +using __detail::__cxx_atomic_fetch_or; +using __detail::__cxx_atomic_fetch_and; using __detail::__cxx_atomic_fetch_xor; -using __detail::__cxx_atomic_load; -using __detail::__cxx_atomic_ref_base_impl; -using __detail::__cxx_atomic_signal_fence; -using __detail::__cxx_atomic_store; -using __detail::__cxx_atomic_thread_fence; template -_LIBCUDACXX_INLINE_VISIBILITY _Tp kill_dependency(_Tp __y) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +_Tp kill_dependency(_Tp __y) noexcept { - return __y; + return __y; } #if defined(__CLANG_ATOMIC_BOOL_LOCK_FREE) -# define ATOMIC_BOOL_LOCK_FREE __CLANG_ATOMIC_BOOL_LOCK_FREE -# define ATOMIC_CHAR_LOCK_FREE __CLANG_ATOMIC_CHAR_LOCK_FREE -# define ATOMIC_CHAR16_T_LOCK_FREE __CLANG_ATOMIC_CHAR16_T_LOCK_FREE -# define ATOMIC_CHAR32_T_LOCK_FREE __CLANG_ATOMIC_CHAR32_T_LOCK_FREE -# define ATOMIC_WCHAR_T_LOCK_FREE __CLANG_ATOMIC_WCHAR_T_LOCK_FREE -# define ATOMIC_SHORT_LOCK_FREE __CLANG_ATOMIC_SHORT_LOCK_FREE -# define ATOMIC_INT_LOCK_FREE __CLANG_ATOMIC_INT_LOCK_FREE -# define ATOMIC_LONG_LOCK_FREE __CLANG_ATOMIC_LONG_LOCK_FREE -# define ATOMIC_LLONG_LOCK_FREE __CLANG_ATOMIC_LLONG_LOCK_FREE -# define ATOMIC_POINTER_LOCK_FREE __CLANG_ATOMIC_POINTER_LOCK_FREE +# define ATOMIC_BOOL_LOCK_FREE __CLANG_ATOMIC_BOOL_LOCK_FREE +# define ATOMIC_CHAR_LOCK_FREE __CLANG_ATOMIC_CHAR_LOCK_FREE +# define ATOMIC_CHAR16_T_LOCK_FREE __CLANG_ATOMIC_CHAR16_T_LOCK_FREE +# define ATOMIC_CHAR32_T_LOCK_FREE __CLANG_ATOMIC_CHAR32_T_LOCK_FREE +# define ATOMIC_WCHAR_T_LOCK_FREE __CLANG_ATOMIC_WCHAR_T_LOCK_FREE +# define ATOMIC_SHORT_LOCK_FREE __CLANG_ATOMIC_SHORT_LOCK_FREE +# define ATOMIC_INT_LOCK_FREE __CLANG_ATOMIC_INT_LOCK_FREE +# define ATOMIC_LONG_LOCK_FREE __CLANG_ATOMIC_LONG_LOCK_FREE +# define ATOMIC_LLONG_LOCK_FREE __CLANG_ATOMIC_LLONG_LOCK_FREE +# define ATOMIC_POINTER_LOCK_FREE __CLANG_ATOMIC_POINTER_LOCK_FREE #elif defined(__GCC_ATOMIC_BOOL_LOCK_FREE) -# define ATOMIC_BOOL_LOCK_FREE __GCC_ATOMIC_BOOL_LOCK_FREE -# define ATOMIC_CHAR_LOCK_FREE __GCC_ATOMIC_CHAR_LOCK_FREE -# define ATOMIC_CHAR16_T_LOCK_FREE __GCC_ATOMIC_CHAR16_T_LOCK_FREE -# define ATOMIC_CHAR32_T_LOCK_FREE __GCC_ATOMIC_CHAR32_T_LOCK_FREE -# define ATOMIC_WCHAR_T_LOCK_FREE __GCC_ATOMIC_WCHAR_T_LOCK_FREE -# define ATOMIC_SHORT_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE -# define ATOMIC_INT_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE -# define ATOMIC_LONG_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE -# define ATOMIC_LLONG_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE -# define ATOMIC_POINTER_LOCK_FREE __GCC_ATOMIC_POINTER_LOCK_FREE +# define ATOMIC_BOOL_LOCK_FREE __GCC_ATOMIC_BOOL_LOCK_FREE +# define ATOMIC_CHAR_LOCK_FREE __GCC_ATOMIC_CHAR_LOCK_FREE +# define ATOMIC_CHAR16_T_LOCK_FREE __GCC_ATOMIC_CHAR16_T_LOCK_FREE +# define ATOMIC_CHAR32_T_LOCK_FREE __GCC_ATOMIC_CHAR32_T_LOCK_FREE +# define ATOMIC_WCHAR_T_LOCK_FREE __GCC_ATOMIC_WCHAR_T_LOCK_FREE +# define ATOMIC_SHORT_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE +# define ATOMIC_INT_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE +# define ATOMIC_LONG_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE +# define ATOMIC_LLONG_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE +# define ATOMIC_POINTER_LOCK_FREE __GCC_ATOMIC_POINTER_LOCK_FREE #endif #ifdef _LIBCUDACXX_ATOMIC_ONLY_USE_BUILTINS -template -struct __cxx_atomic_lock_impl -{ - _LIBCUDACXX_INLINE_VISIBILITY __cxx_atomic_lock_impl() noexcept - : __a_value() - , __a_lock(0) - {} - _LIBCUDACXX_INLINE_VISIBILITY constexpr explicit __cxx_atomic_lock_impl(_Tp value) noexcept - : __a_value(value) - , __a_lock(0) - {} +template +struct __cxx_atomic_lock_impl { + + _LIBCUDACXX_INLINE_VISIBILITY + __cxx_atomic_lock_impl() noexcept + : __a_value(), __a_lock(0) {} + _LIBCUDACXX_INLINE_VISIBILITY constexpr explicit + __cxx_atomic_lock_impl(_Tp value) noexcept + : __a_value(value), __a_lock(0) {} _Tp __a_value; mutable __cxx_atomic_base_impl<_LIBCUDACXX_ATOMIC_FLAG_TYPE, _Sco> __a_lock; - _LIBCUDACXX_INLINE_VISIBILITY void __lock() const volatile - { - while (1 == __cxx_atomic_exchange(&__a_lock, _LIBCUDACXX_ATOMIC_FLAG_TYPE(true), memory_order_acquire)) - /*spin*/; + _LIBCUDACXX_INLINE_VISIBILITY void __lock() const volatile { + while(1 == __cxx_atomic_exchange(&__a_lock, _LIBCUDACXX_ATOMIC_FLAG_TYPE(true), memory_order_acquire)) + /*spin*/; } - _LIBCUDACXX_INLINE_VISIBILITY void __lock() const - { - while (1 == __cxx_atomic_exchange(&__a_lock, _LIBCUDACXX_ATOMIC_FLAG_TYPE(true), memory_order_acquire)) - /*spin*/; + _LIBCUDACXX_INLINE_VISIBILITY void __lock() const { + while(1 == __cxx_atomic_exchange(&__a_lock, _LIBCUDACXX_ATOMIC_FLAG_TYPE(true), memory_order_acquire)) + /*spin*/; } - _LIBCUDACXX_INLINE_VISIBILITY void __unlock() const volatile - { + _LIBCUDACXX_INLINE_VISIBILITY void __unlock() const volatile { __cxx_atomic_store(&__a_lock, _LIBCUDACXX_ATOMIC_FLAG_TYPE(false), memory_order_release); } - _LIBCUDACXX_INLINE_VISIBILITY void __unlock() const - { + _LIBCUDACXX_INLINE_VISIBILITY void __unlock() const { __cxx_atomic_store(&__a_lock, _LIBCUDACXX_ATOMIC_FLAG_TYPE(false), memory_order_release); } - _LIBCUDACXX_INLINE_VISIBILITY _Tp __read() const volatile - { + _LIBCUDACXX_INLINE_VISIBILITY _Tp __read() const volatile { __lock(); _Tp __old; __cxx_atomic_assign_volatile(__old, __a_value); __unlock(); return __old; } - _LIBCUDACXX_INLINE_VISIBILITY _Tp __read() const - { + _LIBCUDACXX_INLINE_VISIBILITY _Tp __read() const { __lock(); _Tp __old = __a_value; __unlock(); @@ -837,47 +826,45 @@ struct __cxx_atomic_lock_impl }; template -_LIBCUDACXX_INLINE_VISIBILITY void __cxx_atomic_init(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp __val) -{ +_LIBCUDACXX_INLINE_VISIBILITY +void __cxx_atomic_init(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp __val) { __cxx_atomic_assign_volatile(__a->__a_value, __val); } template -_LIBCUDACXX_INLINE_VISIBILITY void __cxx_atomic_init(__cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp __val) -{ +_LIBCUDACXX_INLINE_VISIBILITY +void __cxx_atomic_init(__cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp __val) { __a->__a_value = __val; } template -_LIBCUDACXX_INLINE_VISIBILITY void -__cxx_atomic_store(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp __val, memory_order) -{ +_LIBCUDACXX_INLINE_VISIBILITY +void __cxx_atomic_store(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp __val, memory_order) { __a->__lock(); __cxx_atomic_assign_volatile(__a->__a_value, __val); __a->__unlock(); } template -_LIBCUDACXX_INLINE_VISIBILITY void __cxx_atomic_store(__cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp __val, memory_order) -{ +_LIBCUDACXX_INLINE_VISIBILITY +void __cxx_atomic_store(__cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp __val, memory_order) { __a->__lock(); __a->__a_value = __val; __a->__unlock(); } template -_LIBCUDACXX_INLINE_VISIBILITY _Tp __cxx_atomic_load(const volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, memory_order) -{ +_LIBCUDACXX_INLINE_VISIBILITY +_Tp __cxx_atomic_load(const volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, memory_order) { return __a->__read(); } template -_LIBCUDACXX_INLINE_VISIBILITY _Tp __cxx_atomic_load(const __cxx_atomic_lock_impl<_Tp, _Sco>* __a, memory_order) -{ +_LIBCUDACXX_INLINE_VISIBILITY +_Tp __cxx_atomic_load(const __cxx_atomic_lock_impl<_Tp, _Sco>* __a, memory_order) { return __a->__read(); } template -_LIBCUDACXX_INLINE_VISIBILITY _Tp -__cxx_atomic_exchange(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp __value, memory_order) -{ +_LIBCUDACXX_INLINE_VISIBILITY +_Tp __cxx_atomic_exchange(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp __value, memory_order) { __a->__lock(); _Tp __old; __cxx_atomic_assign_volatile(__old, __a->__a_value); @@ -886,94 +873,77 @@ __cxx_atomic_exchange(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp __val return __old; } template -_LIBCUDACXX_INLINE_VISIBILITY _Tp -__cxx_atomic_exchange(__cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp __value, memory_order) -{ +_LIBCUDACXX_INLINE_VISIBILITY +_Tp __cxx_atomic_exchange(__cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp __value, memory_order) { __a->__lock(); - _Tp __old = __a->__a_value; + _Tp __old = __a->__a_value; __a->__a_value = __value; __a->__unlock(); return __old; } template -_LIBCUDACXX_INLINE_VISIBILITY bool __cxx_atomic_compare_exchange_strong( - volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp* __expected, _Tp __value, memory_order, memory_order) -{ +_LIBCUDACXX_INLINE_VISIBILITY +bool __cxx_atomic_compare_exchange_strong(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, + _Tp* __expected, _Tp __value, memory_order, memory_order) { __a->__lock(); _Tp __temp; __cxx_atomic_assign_volatile(__temp, __a->__a_value); bool __ret = __temp == *__expected; - if (__ret) - { + if(__ret) __cxx_atomic_assign_volatile(__a->__a_value, __value); - } else - { __cxx_atomic_assign_volatile(*__expected, __a->__a_value); - } __a->__unlock(); return __ret; } template -_LIBCUDACXX_INLINE_VISIBILITY bool __cxx_atomic_compare_exchange_strong( - __cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp* __expected, _Tp __value, memory_order, memory_order) -{ +_LIBCUDACXX_INLINE_VISIBILITY +bool __cxx_atomic_compare_exchange_strong(__cxx_atomic_lock_impl<_Tp, _Sco>* __a, + _Tp* __expected, _Tp __value, memory_order, memory_order) { __a->__lock(); bool __ret = __a->__a_value == *__expected; - if (__ret) - { + if(__ret) __a->__a_value = __value; - } else - { *__expected = __a->__a_value; - } __a->__unlock(); return __ret; } template -_LIBCUDACXX_INLINE_VISIBILITY bool __cxx_atomic_compare_exchange_weak( - volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp* __expected, _Tp __value, memory_order, memory_order) -{ +_LIBCUDACXX_INLINE_VISIBILITY +bool __cxx_atomic_compare_exchange_weak(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, + _Tp* __expected, _Tp __value, memory_order, memory_order) { __a->__lock(); _Tp __temp; __cxx_atomic_assign_volatile(__temp, __a->__a_value); bool __ret = __temp == *__expected; - if (__ret) - { + if(__ret) __cxx_atomic_assign_volatile(__a->__a_value, __value); - } else - { __cxx_atomic_assign_volatile(*__expected, __a->__a_value); - } __a->__unlock(); return __ret; } template -_LIBCUDACXX_INLINE_VISIBILITY bool __cxx_atomic_compare_exchange_weak( - __cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp* __expected, _Tp __value, memory_order, memory_order) -{ +_LIBCUDACXX_INLINE_VISIBILITY +bool __cxx_atomic_compare_exchange_weak(__cxx_atomic_lock_impl<_Tp, _Sco>* __a, + _Tp* __expected, _Tp __value, memory_order, memory_order) { __a->__lock(); bool __ret = __a->__a_value == *__expected; - if (__ret) - { + if(__ret) __a->__a_value = __value; - } else - { *__expected = __a->__a_value; - } __a->__unlock(); return __ret; } template -_LIBCUDACXX_INLINE_VISIBILITY _Tp -__cxx_atomic_fetch_add(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Td __delta, memory_order) -{ +_LIBCUDACXX_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_add(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, + _Td __delta, memory_order) { __a->__lock(); _Tp __old; __cxx_atomic_assign_volatile(__old, __a->__a_value); @@ -982,9 +952,9 @@ __cxx_atomic_fetch_add(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Td __de return __old; } template -_LIBCUDACXX_INLINE_VISIBILITY _Tp -__cxx_atomic_fetch_add(__cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Td __delta, memory_order) -{ +_LIBCUDACXX_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_add(__cxx_atomic_lock_impl<_Tp, _Sco>* __a, + _Td __delta, memory_order) { __a->__lock(); _Tp __old = __a->__a_value; __a->__a_value += __delta; @@ -993,9 +963,9 @@ __cxx_atomic_fetch_add(__cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Td __delta, memo } template -_LIBCUDACXX_INLINE_VISIBILITY _Tp* -__cxx_atomic_fetch_add(volatile __cxx_atomic_lock_impl<_Tp*, _Sco>* __a, ptrdiff_t __delta, memory_order) -{ +_LIBCUDACXX_INLINE_VISIBILITY +_Tp* __cxx_atomic_fetch_add(volatile __cxx_atomic_lock_impl<_Tp*, _Sco>* __a, + ptrdiff_t __delta, memory_order) { __a->__lock(); _Tp* __old; __cxx_atomic_assign_volatile(__old, __a->__a_value); @@ -1004,9 +974,9 @@ __cxx_atomic_fetch_add(volatile __cxx_atomic_lock_impl<_Tp*, _Sco>* __a, ptrdiff return __old; } template -_LIBCUDACXX_INLINE_VISIBILITY _Tp* -__cxx_atomic_fetch_add(__cxx_atomic_lock_impl<_Tp*, _Sco>* __a, ptrdiff_t __delta, memory_order) -{ +_LIBCUDACXX_INLINE_VISIBILITY +_Tp* __cxx_atomic_fetch_add(__cxx_atomic_lock_impl<_Tp*, _Sco>* __a, + ptrdiff_t __delta, memory_order) { __a->__lock(); _Tp* __old = __a->__a_value; __a->__a_value += __delta; @@ -1015,9 +985,9 @@ __cxx_atomic_fetch_add(__cxx_atomic_lock_impl<_Tp*, _Sco>* __a, ptrdiff_t __delt } template -_LIBCUDACXX_INLINE_VISIBILITY _Tp -__cxx_atomic_fetch_sub(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Td __delta, memory_order) -{ +_LIBCUDACXX_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_sub(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, + _Td __delta, memory_order) { __a->__lock(); _Tp __old; __cxx_atomic_assign_volatile(__old, __a->__a_value); @@ -1026,9 +996,9 @@ __cxx_atomic_fetch_sub(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Td __de return __old; } template -_LIBCUDACXX_INLINE_VISIBILITY _Tp -__cxx_atomic_fetch_sub(__cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Td __delta, memory_order) -{ +_LIBCUDACXX_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_sub(__cxx_atomic_lock_impl<_Tp, _Sco>* __a, + _Td __delta, memory_order) { __a->__lock(); _Tp __old = __a->__a_value; __a->__a_value -= __delta; @@ -1037,9 +1007,9 @@ __cxx_atomic_fetch_sub(__cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Td __delta, memo } template -_LIBCUDACXX_INLINE_VISIBILITY _Tp -__cxx_atomic_fetch_and(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp __pattern, memory_order) -{ +_LIBCUDACXX_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_and(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, + _Tp __pattern, memory_order) { __a->__lock(); _Tp __old; __cxx_atomic_assign_volatile(__old, __a->__a_value); @@ -1048,9 +1018,9 @@ __cxx_atomic_fetch_and(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp __pa return __old; } template -_LIBCUDACXX_INLINE_VISIBILITY _Tp -__cxx_atomic_fetch_and(__cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp __pattern, memory_order) -{ +_LIBCUDACXX_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_and(__cxx_atomic_lock_impl<_Tp, _Sco>* __a, + _Tp __pattern, memory_order) { __a->__lock(); _Tp __old = __a->__a_value; __a->__a_value &= __pattern; @@ -1059,9 +1029,9 @@ __cxx_atomic_fetch_and(__cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp __pattern, me } template -_LIBCUDACXX_INLINE_VISIBILITY _Tp -__cxx_atomic_fetch_or(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp __pattern, memory_order) -{ +_LIBCUDACXX_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_or(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, + _Tp __pattern, memory_order) { __a->__lock(); _Tp __old; __cxx_atomic_assign_volatile(__old, __a->__a_value); @@ -1070,9 +1040,9 @@ __cxx_atomic_fetch_or(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp __pat return __old; } template -_LIBCUDACXX_INLINE_VISIBILITY _Tp -__cxx_atomic_fetch_or(__cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp __pattern, memory_order) -{ +_LIBCUDACXX_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_or(__cxx_atomic_lock_impl<_Tp, _Sco>* __a, + _Tp __pattern, memory_order) { __a->__lock(); _Tp __old = __a->__a_value; __a->__a_value |= __pattern; @@ -1081,9 +1051,9 @@ __cxx_atomic_fetch_or(__cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp __pattern, mem } template -_LIBCUDACXX_INLINE_VISIBILITY _Tp -__cxx_atomic_fetch_xor(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp __pattern, memory_order) -{ +_LIBCUDACXX_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_xor(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, + _Tp __pattern, memory_order) { __a->__lock(); _Tp __old; __cxx_atomic_assign_volatile(__old, __a->__a_value); @@ -1092,9 +1062,9 @@ __cxx_atomic_fetch_xor(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp __pa return __old; } template -_LIBCUDACXX_INLINE_VISIBILITY _Tp -__cxx_atomic_fetch_xor(__cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp __pattern, memory_order) -{ +_LIBCUDACXX_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_xor(__cxx_atomic_lock_impl<_Tp, _Sco>* __a, + _Tp __pattern, memory_order) { __a->__lock(); _Tp __old = __a->__a_value; __a->__a_value ^= __pattern; @@ -1102,56 +1072,44 @@ __cxx_atomic_fetch_xor(__cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp __pattern, me return __old; } -# if defined(_LIBCUDACXX_ATOMIC_ALWAYS_LOCK_FREE) +#if defined(_LIBCUDACXX_ATOMIC_ALWAYS_LOCK_FREE) -template -struct __cxx_is_always_lock_free -{ - enum - { - __value = _LIBCUDACXX_ATOMIC_ALWAYS_LOCK_FREE(sizeof(_Tp), 0) - }; -}; +template struct __cxx_is_always_lock_free { + enum { __value = _LIBCUDACXX_ATOMIC_ALWAYS_LOCK_FREE(sizeof(_Tp), 0) }; }; -# else +#else -template -struct __cxx_is_always_lock_free -{ - enum - { - __value = sizeof(_Tp) <= 8 - }; -}; +template struct __cxx_is_always_lock_free { + enum { __value = sizeof(_Tp) <= 8 }; }; -# endif // defined(_LIBCUDACXX_ATOMIC_ALWAYS_LOCK_FREE) +#endif // defined(_LIBCUDACXX_ATOMIC_ALWAYS_LOCK_FREE) template -struct __cxx_atomic_impl_conditional -{ - using type = __conditional_t<__cxx_is_always_lock_free<_Tp>::__value, - __cxx_atomic_base_impl<_Tp, _Sco>, - __cxx_atomic_lock_impl<_Tp, _Sco>>; +struct __cxx_atomic_impl_conditional { + using type = __conditional_t<__cxx_is_always_lock_free<_Tp>::__value, + __cxx_atomic_base_impl<_Tp, _Sco>, + __cxx_atomic_lock_impl<_Tp, _Sco> >; }; -template ::type> +template ::type > #else -template > +template > #endif //_LIBCUDACXX_ATOMIC_ONLY_USE_BUILTINS -struct __cxx_atomic_impl : public _Base -{ +struct __cxx_atomic_impl : public _Base { __cxx_atomic_impl() noexcept = default; _LIBCUDACXX_INLINE_VISIBILITY constexpr explicit __cxx_atomic_impl(_Tp value) noexcept - : _Base(value) - {} + : _Base(value) {} }; -template -_LIBCUDACXX_INLINE_VISIBILITY __cxx_atomic_impl<_Tp, _Sco>* __cxx_atomic_rebind(_Tp* __inst) -{ - static_assert(sizeof(__cxx_atomic_impl<_Tp, _Sco>) == sizeof(_Tp), ""); - static_assert(alignof(__cxx_atomic_impl<_Tp, _Sco>) == alignof(_Tp), ""); - return (__cxx_atomic_impl<_Tp, _Sco>*) __inst; + +template +_LIBCUDACXX_INLINE_VISIBILITY +__cxx_atomic_impl<_Tp, _Sco>* __cxx_atomic_rebind(_Tp* __inst) { + static_assert(sizeof(__cxx_atomic_impl<_Tp, _Sco>) == sizeof(_Tp),""); + static_assert(alignof(__cxx_atomic_impl<_Tp, _Sco>) == alignof(_Tp),""); + return (__cxx_atomic_impl<_Tp, _Sco>*)__inst; } template @@ -1160,29 +1118,25 @@ using __cxx_atomic_ref_impl = __cxx_atomic_ref_base_impl<_Tp, _Sco>; #ifdef _LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE template , int _Sco = _Ty::__sco> -struct __cxx_atomic_poll_tester -{ - _Ty const volatile* __a; - _Tp __val; - memory_order __order; +struct __cxx_atomic_poll_tester { + _Ty const volatile* __a; + _Tp __val; + memory_order __order; - _LIBCUDACXX_INLINE_VISIBILITY __cxx_atomic_poll_tester(_Ty const volatile* __a_, _Tp __val_, memory_order __order_) + _LIBCUDACXX_INLINE_VISIBILITY __cxx_atomic_poll_tester(_Ty const volatile* __a_, _Tp __val_, memory_order __order_) : __a(__a_) , __val(__val_) , __order(__order_) - {} + {} - _LIBCUDACXX_INLINE_VISIBILITY bool operator()() const - { - return !(__cxx_atomic_load(__a, __order) == __val); - } + _LIBCUDACXX_INLINE_VISIBILITY bool operator()() const { + return !(__cxx_atomic_load(__a, __order) == __val); + } }; template , int _Sco = _Ty::__sco> -_LIBCUDACXX_INLINE_VISIBILITY void -__cxx_atomic_try_wait_slow_fallback(_Ty const volatile* __a, _Tp __val, memory_order __order) -{ - __libcpp_thread_poll_with_backoff(__cxx_atomic_poll_tester<_Ty>(__a, __val, __order)); +_LIBCUDACXX_INLINE_VISIBILITY void __cxx_atomic_try_wait_slow_fallback(_Ty const volatile* __a, _Tp __val, memory_order __order) { + __libcpp_thread_poll_with_backoff(__cxx_atomic_poll_tester<_Ty>(__a, __val, __order)); } #endif @@ -1190,888 +1144,632 @@ __cxx_atomic_try_wait_slow_fallback(_Ty const volatile* __a, _Tp __val, memory_o #ifdef _LIBCUDACXX_HAS_PLATFORM_WAIT template ::__value, int> = 1> -_LIBCUDACXX_INLINE_VISIBILITY void __cxx_atomic_notify_all(__cxx_atomic_impl<_Tp, _Sco> const volatile* __a) -{ -# ifndef _LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE - auto* const __c = __libcpp_contention_state(__a); - __cxx_atomic_fetch_add(__cxx_atomic_rebind<_Sco>(&__c->__version), (__libcpp_platform_wait_t) 1, memory_order_relaxed); - __cxx_atomic_thread_fence(memory_order_seq_cst); - if (0 != __cxx_atomic_exchange(__cxx_atomic_rebind<_Sco>(&__c->__waiters), (ptrdiff_t) 0, memory_order_relaxed)) - { - __libcpp_platform_wake(&__c->__version, true); - } -# endif +_LIBCUDACXX_INLINE_VISIBILITY void __cxx_atomic_notify_all(__cxx_atomic_impl<_Tp, _Sco> const volatile* __a) { +#ifndef _LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE + auto * const __c = __libcpp_contention_state(__a); + __cxx_atomic_fetch_add(__cxx_atomic_rebind<_Sco>(&__c->__version), (__libcpp_platform_wait_t)1, memory_order_relaxed); + __cxx_atomic_thread_fence(memory_order_seq_cst); + if (0 != __cxx_atomic_exchange(__cxx_atomic_rebind<_Sco>(&__c->__waiters), (ptrdiff_t)0, memory_order_relaxed)) + __libcpp_platform_wake(&__c->__version, true); +#endif } template ::__value, int> = 1> -_LIBCUDACXX_INLINE_VISIBILITY void __cxx_atomic_notify_one(__cxx_atomic_impl<_Tp, _Sco> const volatile* __a) -{ - __cxx_atomic_notify_all(__a); -} -template , - int _Sco = _Ty::__sco, - __enable_if_t::__value, int> = 1> -_LIBCUDACXX_INLINE_VISIBILITY void -__cxx_atomic_try_wait_slow(_Ty const volatile* __a, _Tp const __val, memory_order __order) -{ -# ifndef _LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE - auto* const __c = __libcpp_contention_state(__a); - __cxx_atomic_store(__cxx_atomic_rebind<_Sco>(&__c->__waiters), (ptrdiff_t) 1, memory_order_relaxed); - __cxx_atomic_thread_fence(memory_order_seq_cst); - auto const __version = __cxx_atomic_load(__cxx_atomic_rebind<_Sco>(&__c->__version), memory_order_relaxed); - if (!__cxx_nonatomic_compare_equal(__cxx_atomic_load(__a, __order), __val)) - { - return; - } - if (sizeof(__libcpp_platform_wait_t) < 8) - { - constexpr timespec __timeout = {2, 0}; // Hedge on rare 'int version' aliasing. - __libcpp_platform_wait(&__c->__version, __version, &__timeout); - } - else - { - __libcpp_platform_wait(&__c->__version, __version, nullptr); - } -# else - __cxx_atomic_try_wait_slow_fallback(__a, __val, __order); -# endif // _LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE +_LIBCUDACXX_INLINE_VISIBILITY void __cxx_atomic_notify_one(__cxx_atomic_impl<_Tp, _Sco> const volatile* __a) { + __cxx_atomic_notify_all(__a); +} +template , int _Sco = _Ty::__sco, __enable_if_t::__value, int> = 1> +_LIBCUDACXX_INLINE_VISIBILITY void __cxx_atomic_try_wait_slow(_Ty const volatile* __a, _Tp const __val, memory_order __order) { +#ifndef _LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE + auto * const __c = __libcpp_contention_state(__a); + __cxx_atomic_store(__cxx_atomic_rebind<_Sco>(&__c->__waiters), (ptrdiff_t)1, memory_order_relaxed); + __cxx_atomic_thread_fence(memory_order_seq_cst); + auto const __version = __cxx_atomic_load(__cxx_atomic_rebind<_Sco>(&__c->__version), memory_order_relaxed); + if (!__cxx_nonatomic_compare_equal(__cxx_atomic_load(__a, __order), __val)) + return; + if(sizeof(__libcpp_platform_wait_t) < 8) { + constexpr timespec __timeout = { 2, 0 }; // Hedge on rare 'int version' aliasing. + __libcpp_platform_wait(&__c->__version, __version, &__timeout); + } + else + __libcpp_platform_wait(&__c->__version, __version, nullptr); +#else + __cxx_atomic_try_wait_slow_fallback(__a, __val, __order); +#endif // _LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE } template ::__value, int> = 1> -_LIBCUDACXX_INLINE_VISIBILITY void -__cxx_atomic_try_wait_slow(__cxx_atomic_impl<_Tp, _Sco> const volatile* __a, _Tp __val, memory_order) -{ -# ifndef _LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE - auto* const __c = __libcpp_contention_state(__a); - __cxx_atomic_fetch_add(__cxx_atomic_rebind<_Sco>(&__c->__waiters), (ptrdiff_t) 1, memory_order_relaxed); - __cxx_atomic_thread_fence(memory_order_seq_cst); -# endif - __libcpp_platform_wait((_Tp*) __a, __val, nullptr); -# ifndef _LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE - __cxx_atomic_fetch_sub(__cxx_atomic_rebind<_Sco>(&__c->__waiters), (ptrdiff_t) 1, memory_order_relaxed); -# endif +_LIBCUDACXX_INLINE_VISIBILITY void __cxx_atomic_try_wait_slow(__cxx_atomic_impl<_Tp, _Sco> const volatile* __a, _Tp __val, memory_order) { +#ifndef _LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE + auto * const __c = __libcpp_contention_state(__a); + __cxx_atomic_fetch_add(__cxx_atomic_rebind<_Sco>(&__c->__waiters), (ptrdiff_t)1, memory_order_relaxed); + __cxx_atomic_thread_fence(memory_order_seq_cst); +#endif + __libcpp_platform_wait((_Tp*)__a, __val, nullptr); +#ifndef _LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE + __cxx_atomic_fetch_sub(__cxx_atomic_rebind<_Sco>(&__c->__waiters), (ptrdiff_t)1, memory_order_relaxed); +#endif } template ::__value, int> = 1> -_LIBCUDACXX_INLINE_VISIBILITY void __cxx_atomic_notify_all(__cxx_atomic_impl<_Tp, _Sco> const volatile* __a) -{ -# ifndef _LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE - auto* const __c = __libcpp_contention_state(__a); - __cxx_atomic_thread_fence(memory_order_seq_cst); - if (0 != __cxx_atomic_load(__cxx_atomic_rebind<_Sco>(&__c->__waiters), memory_order_relaxed)) -# endif - __libcpp_platform_wake((_Tp*) __a, true); +_LIBCUDACXX_INLINE_VISIBILITY void __cxx_atomic_notify_all(__cxx_atomic_impl<_Tp, _Sco> const volatile* __a) { +#ifndef _LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE + auto * const __c = __libcpp_contention_state(__a); + __cxx_atomic_thread_fence(memory_order_seq_cst); + if (0 != __cxx_atomic_load(__cxx_atomic_rebind<_Sco>(&__c->__waiters), memory_order_relaxed)) +#endif + __libcpp_platform_wake((_Tp*)__a, true); } template ::__value, int> = 1> -_LIBCUDACXX_INLINE_VISIBILITY void __cxx_atomic_notify_one(__cxx_atomic_impl<_Tp, _Sco> const volatile* __a) -{ -# ifndef _LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE - auto* const __c = __libcpp_contention_state(__a); - __cxx_atomic_thread_fence(memory_order_seq_cst); - if (0 != __cxx_atomic_load(__cxx_atomic_rebind<_Sco>(&__c->__waiters), memory_order_relaxed)) -# endif - __libcpp_platform_wake((_Tp*) __a, false); +_LIBCUDACXX_INLINE_VISIBILITY void __cxx_atomic_notify_one(__cxx_atomic_impl<_Tp, _Sco> const volatile* __a) { +#ifndef _LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE + auto * const __c = __libcpp_contention_state(__a); + __cxx_atomic_thread_fence(memory_order_seq_cst); + if (0 != __cxx_atomic_load(__cxx_atomic_rebind<_Sco>(&__c->__waiters), memory_order_relaxed)) +#endif + __libcpp_platform_wake((_Tp*)__a, false); } #elif !defined(_LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE) template -_LIBCUDACXX_INLINE_VISIBILITY void __cxx_atomic_notify_all(__cxx_atomic_impl<_Tp, _Sco> const volatile* __a) -{ - auto* const __c = __libcpp_contention_state(__a); - __cxx_atomic_thread_fence(memory_order_seq_cst); - if (0 == __cxx_atomic_load(__cxx_atomic_rebind<_Sco>(&__c->__credit), memory_order_relaxed)) - { - return; - } - if (0 != __cxx_atomic_exchange(__cxx_atomic_rebind<_Sco>(&__c->__credit), (ptrdiff_t) 0, memory_order_relaxed)) - { - __libcpp_mutex_lock(&__c->__mutex); - __libcpp_mutex_unlock(&__c->__mutex); - __libcpp_condvar_broadcast(&__c->__condvar); - } +_LIBCUDACXX_INLINE_VISIBILITY void __cxx_atomic_notify_all(__cxx_atomic_impl<_Tp, _Sco> const volatile* __a) { + auto * const __c = __libcpp_contention_state(__a); + __cxx_atomic_thread_fence(memory_order_seq_cst); + if(0 == __cxx_atomic_load(__cxx_atomic_rebind<_Sco>(&__c->__credit), memory_order_relaxed)) + return; + if(0 != __cxx_atomic_exchange(__cxx_atomic_rebind<_Sco>(&__c->__credit), (ptrdiff_t)0, memory_order_relaxed)) { + __libcpp_mutex_lock(&__c->__mutex); + __libcpp_mutex_unlock(&__c->__mutex); + __libcpp_condvar_broadcast(&__c->__condvar); + } } template -_LIBCUDACXX_INLINE_VISIBILITY void __cxx_atomic_notify_one(__cxx_atomic_impl<_Tp, _Sco> const volatile* __a) -{ - __cxx_atomic_notify_all(__a); +_LIBCUDACXX_INLINE_VISIBILITY void __cxx_atomic_notify_one(__cxx_atomic_impl<_Tp, _Sco> const volatile* __a) { + __cxx_atomic_notify_all(__a); } template -_LIBCUDACXX_INLINE_VISIBILITY void -__cxx_atomic_try_wait_slow(__cxx_atomic_impl<_Tp, _Sco> const volatile* __a, _Tp const __val, memory_order __order) -{ - auto* const __c = __libcpp_contention_state(__a); - __libcpp_mutex_lock(&__c->__mutex); - __cxx_atomic_store(__cxx_atomic_rebind<_Sco>(&__c->__credit), (ptrdiff_t) 1, memory_order_relaxed); - __cxx_atomic_thread_fence(memory_order_seq_cst); - if (__cxx_nonatomic_compare_equal(__cxx_atomic_load(__a, __order), __val)) - { - __libcpp_condvar_wait(&__c->__condvar, &__c->__mutex); - } - __libcpp_mutex_unlock(&__c->__mutex); +_LIBCUDACXX_INLINE_VISIBILITY void __cxx_atomic_try_wait_slow(__cxx_atomic_impl<_Tp, _Sco> const volatile* __a, _Tp const __val, memory_order __order) { + auto * const __c = __libcpp_contention_state(__a); + __libcpp_mutex_lock(&__c->__mutex); + __cxx_atomic_store(__cxx_atomic_rebind<_Sco>(&__c->__credit), (ptrdiff_t)1, memory_order_relaxed); + __cxx_atomic_thread_fence(memory_order_seq_cst); + if (__cxx_nonatomic_compare_equal(__cxx_atomic_load(__a, __order), __val)) + __libcpp_condvar_wait(&__c->__condvar, &__c->__mutex); + __libcpp_mutex_unlock(&__c->__mutex); } #else -template +template struct __atomic_wait_and_notify_supported -# if defined(__CUDA_MINIMUM_ARCH__) && __CUDA_MINIMUM_ARCH__ < 700 +#if defined(__CUDA_MINIMUM_ARCH__) && __CUDA_MINIMUM_ARCH__ < 700 : false_type -# else +#else : true_type -# endif +#endif {}; template > -_LIBCUDACXX_INLINE_VISIBILITY void __cxx_atomic_try_wait_slow(_Ty const volatile* __a, _Tp __val, memory_order __order) -{ - static_assert(__atomic_wait_and_notify_supported<_Tp>::value, "atomic wait operations are unsupported on Pascal"); - __cxx_atomic_try_wait_slow_fallback(__a, __val, __order); +_LIBCUDACXX_INLINE_VISIBILITY void __cxx_atomic_try_wait_slow(_Ty const volatile* __a, _Tp __val, memory_order __order) { + static_assert(__atomic_wait_and_notify_supported<_Tp>::value, "atomic wait operations are unsupported on Pascal"); + __cxx_atomic_try_wait_slow_fallback(__a, __val, __order); } template > -_LIBCUDACXX_INLINE_VISIBILITY void __cxx_atomic_notify_one(_Ty const volatile*) -{ - static_assert(__atomic_wait_and_notify_supported<_Tp>::value, - "atomic notify-one operations are unsupported on Pascal"); +_LIBCUDACXX_INLINE_VISIBILITY void __cxx_atomic_notify_one(_Ty const volatile*) { + static_assert(__atomic_wait_and_notify_supported<_Tp>::value, "atomic notify-one operations are unsupported on Pascal"); } template > -_LIBCUDACXX_INLINE_VISIBILITY void __cxx_atomic_notify_all(_Ty const volatile*) -{ - static_assert(__atomic_wait_and_notify_supported<_Tp>::value, - "atomic notify-all operations are unsupported on Pascal"); +_LIBCUDACXX_INLINE_VISIBILITY void __cxx_atomic_notify_all(_Ty const volatile*) { + static_assert(__atomic_wait_and_notify_supported<_Tp>::value, "atomic notify-all operations are unsupported on Pascal"); } #endif // _LIBCUDACXX_HAS_PLATFORM_WAIT || !defined(_LIBCUDACXX_HAS_NO_THREAD_CONTENTION_TABLE) template > -_LIBCUDACXX_INLINE_VISIBILITY void __cxx_atomic_wait(_Ty const volatile* __a, _Tp const __val, memory_order __order) -{ - for (int __i = 0; __i < _LIBCUDACXX_POLLING_COUNT; ++__i) - { - if (!__cxx_nonatomic_compare_equal(__cxx_atomic_load(__a, __order), __val)) - { - return; - } - if (__i < 12) - { - __libcpp_thread_yield_processor(); - } - else - { - __libcpp_thread_yield(); +_LIBCUDACXX_INLINE_VISIBILITY void __cxx_atomic_wait(_Ty const volatile* __a, _Tp const __val, memory_order __order) { + for(int __i = 0; __i < _LIBCUDACXX_POLLING_COUNT; ++__i) { + if(!__cxx_nonatomic_compare_equal(__cxx_atomic_load(__a, __order), __val)) + return; + if(__i < 12) + __libcpp_thread_yield_processor(); + else + __libcpp_thread_yield(); } - } - while (__cxx_nonatomic_compare_equal(__cxx_atomic_load(__a, __order), __val)) - { - __cxx_atomic_try_wait_slow(__a, __val, __order); - } + while(__cxx_nonatomic_compare_equal(__cxx_atomic_load(__a, __order), __val)) + __cxx_atomic_try_wait_slow(__a, __val, __order); } template -struct __atomic_base_storage -{ - mutable _Storage __a_; +struct __atomic_base_storage { + mutable _Storage __a_; - __atomic_base_storage() = default; - __atomic_base_storage(const __atomic_base_storage&) = default; - __atomic_base_storage(__atomic_base_storage&&) = default; + __atomic_base_storage() = default; + __atomic_base_storage(const __atomic_base_storage&) = default; + __atomic_base_storage(__atomic_base_storage&&) = default; - __atomic_base_storage& operator=(const __atomic_base_storage&) = default; - __atomic_base_storage& operator=(__atomic_base_storage&&) = default; + __atomic_base_storage& operator=(const __atomic_base_storage&) = default; + __atomic_base_storage& operator=(__atomic_base_storage&&) = default; - _LIBCUDACXX_INLINE_VISIBILITY constexpr __atomic_base_storage(_Storage&& __a) noexcept - : __a_(_CUDA_VSTD::forward<_Storage>(__a)) - {} + _LIBCUDACXX_INLINE_VISIBILITY constexpr + __atomic_base_storage(_Storage&& __a) noexcept : __a_(_CUDA_VSTD::forward<_Storage>(__a)) {} }; template -struct __atomic_base_core : public __atomic_base_storage<_Tp, _Storage> -{ - __atomic_base_core() = default; - __atomic_base_core(const __atomic_base_core&) = delete; - __atomic_base_core(__atomic_base_core&&) = delete; +struct __atomic_base_core : public __atomic_base_storage<_Tp, _Storage>{ + __atomic_base_core() = default; + __atomic_base_core(const __atomic_base_core&) = delete; + __atomic_base_core(__atomic_base_core&&) = delete; - __atomic_base_core& operator=(const __atomic_base_core&) = delete; - __atomic_base_core& operator=(__atomic_base_core&&) = delete; + __atomic_base_core& operator=(const __atomic_base_core&) = delete; + __atomic_base_core& operator=(__atomic_base_core&&) = delete; - _LIBCUDACXX_INLINE_VISIBILITY constexpr __atomic_base_core(_Storage&& __a) noexcept - : __atomic_base_storage<_Tp, _Storage>(_CUDA_VSTD::forward<_Storage>(__a)) - {} + _LIBCUDACXX_INLINE_VISIBILITY constexpr + __atomic_base_core(_Storage&& __a) noexcept : __atomic_base_storage<_Tp, _Storage>(_CUDA_VSTD::forward<_Storage>(__a)) {} #if defined(_LIBCUDACXX_ATOMIC_ALWAYS_LOCK_FREE) - static constexpr bool is_always_lock_free = _LIBCUDACXX_ATOMIC_ALWAYS_LOCK_FREE(sizeof(_Tp), 0); + static constexpr bool is_always_lock_free = _LIBCUDACXX_ATOMIC_ALWAYS_LOCK_FREE(sizeof(_Tp), 0); #endif // defined(_LIBCUDACXX_ATOMIC_ALWAYS_LOCK_FREE) - _LIBCUDACXX_INLINE_VISIBILITY bool is_lock_free() const volatile noexcept - { - return _LIBCUDACXX_ATOMIC_IS_LOCK_FREE(sizeof(_Tp)); - } - _LIBCUDACXX_INLINE_VISIBILITY bool is_lock_free() const noexcept - { - return static_cast<__atomic_base_core const volatile*>(this)->is_lock_free(); - } - _LIBCUDACXX_INLINE_VISIBILITY - - void - store(_Tp __d, memory_order __m = memory_order_seq_cst) volatile noexcept _LIBCUDACXX_CHECK_STORE_MEMORY_ORDER(__m) - { - __cxx_atomic_store(&this->__a_, __d, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY void store(_Tp __d, memory_order __m = memory_order_seq_cst) noexcept - _LIBCUDACXX_CHECK_STORE_MEMORY_ORDER(__m) - { - __cxx_atomic_store(&this->__a_, __d, __m); - } - - _LIBCUDACXX_INLINE_VISIBILITY _Tp load(memory_order __m = memory_order_seq_cst) const volatile noexcept - _LIBCUDACXX_CHECK_LOAD_MEMORY_ORDER(__m) - { - return __cxx_atomic_load(&this->__a_, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp load(memory_order __m = memory_order_seq_cst) const noexcept - _LIBCUDACXX_CHECK_LOAD_MEMORY_ORDER(__m) - { - return __cxx_atomic_load(&this->__a_, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY operator _Tp() const volatile noexcept - { - return load(); - } - _LIBCUDACXX_INLINE_VISIBILITY operator _Tp() const noexcept - { - return load(); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp exchange(_Tp __d, memory_order __m = memory_order_seq_cst) volatile noexcept - { - return __cxx_atomic_exchange(&this->__a_, __d, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp exchange(_Tp __d, memory_order __m = memory_order_seq_cst) noexcept - { - return __cxx_atomic_exchange(&this->__a_, __d, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY bool - compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) volatile noexcept - _LIBCUDACXX_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) - { - return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __s, __f); - } - _LIBCUDACXX_INLINE_VISIBILITY bool - compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) noexcept - _LIBCUDACXX_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) - { - return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __s, __f); - } - _LIBCUDACXX_INLINE_VISIBILITY bool - compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) volatile noexcept - _LIBCUDACXX_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) - { - return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __s, __f); - } - _LIBCUDACXX_INLINE_VISIBILITY bool - compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) noexcept - _LIBCUDACXX_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) - { - return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __s, __f); - } - _LIBCUDACXX_INLINE_VISIBILITY bool - compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) volatile noexcept - { - if (memory_order_acq_rel == __m) - { - return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __m, memory_order_acquire); - } - else if (memory_order_release == __m) - { - return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __m, memory_order_relaxed); - } - else - { - return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __m, __m); - } - } - _LIBCUDACXX_INLINE_VISIBILITY bool - compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) noexcept - { - if (memory_order_acq_rel == __m) - { - return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __m, memory_order_acquire); - } - else if (memory_order_release == __m) - { - return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __m, memory_order_relaxed); - } - else - { - return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __m, __m); - } - } - _LIBCUDACXX_INLINE_VISIBILITY bool - compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) volatile noexcept - { - if (memory_order_acq_rel == __m) - { - return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __m, memory_order_acquire); - } - else if (memory_order_release == __m) - { - return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __m, memory_order_relaxed); + _LIBCUDACXX_INLINE_VISIBILITY + bool is_lock_free() const volatile noexcept + {return _LIBCUDACXX_ATOMIC_IS_LOCK_FREE(sizeof(_Tp));} + _LIBCUDACXX_INLINE_VISIBILITY + bool is_lock_free() const noexcept + {return static_cast<__atomic_base_core const volatile*>(this)->is_lock_free();} + _LIBCUDACXX_INLINE_VISIBILITY + + void store(_Tp __d, memory_order __m = memory_order_seq_cst) volatile noexcept + _LIBCUDACXX_CHECK_STORE_MEMORY_ORDER(__m) + {__cxx_atomic_store(&this->__a_, __d, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + void store(_Tp __d, memory_order __m = memory_order_seq_cst) noexcept + _LIBCUDACXX_CHECK_STORE_MEMORY_ORDER(__m) + {__cxx_atomic_store(&this->__a_, __d, __m);} + + _LIBCUDACXX_INLINE_VISIBILITY + _Tp load(memory_order __m = memory_order_seq_cst) const volatile noexcept + _LIBCUDACXX_CHECK_LOAD_MEMORY_ORDER(__m) + {return __cxx_atomic_load(&this->__a_, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp load(memory_order __m = memory_order_seq_cst) const noexcept + _LIBCUDACXX_CHECK_LOAD_MEMORY_ORDER(__m) + {return __cxx_atomic_load(&this->__a_, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + operator _Tp() const volatile noexcept {return load();} + _LIBCUDACXX_INLINE_VISIBILITY + operator _Tp() const noexcept {return load();} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp exchange(_Tp __d, memory_order __m = memory_order_seq_cst) volatile noexcept + {return __cxx_atomic_exchange(&this->__a_, __d, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp exchange(_Tp __d, memory_order __m = memory_order_seq_cst) noexcept + {return __cxx_atomic_exchange(&this->__a_, __d, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + bool compare_exchange_weak(_Tp& __e, _Tp __d, + memory_order __s, memory_order __f) volatile noexcept + _LIBCUDACXX_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) + {return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __s, __f);} + _LIBCUDACXX_INLINE_VISIBILITY + bool compare_exchange_weak(_Tp& __e, _Tp __d, + memory_order __s, memory_order __f) noexcept + _LIBCUDACXX_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) + {return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __s, __f);} + _LIBCUDACXX_INLINE_VISIBILITY + bool compare_exchange_strong(_Tp& __e, _Tp __d, + memory_order __s, memory_order __f) volatile noexcept + _LIBCUDACXX_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) + {return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __s, __f);} + _LIBCUDACXX_INLINE_VISIBILITY + bool compare_exchange_strong(_Tp& __e, _Tp __d, + memory_order __s, memory_order __f) noexcept + _LIBCUDACXX_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) + {return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __s, __f);} + _LIBCUDACXX_INLINE_VISIBILITY + bool compare_exchange_weak(_Tp& __e, _Tp __d, + memory_order __m = memory_order_seq_cst) volatile noexcept { + if (memory_order_acq_rel == __m) + return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __m, memory_order_acquire); + else if (memory_order_release == __m) + return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __m, memory_order_relaxed); + else + return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __m, __m); } - else - { - return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __m, __m); - } - } - _LIBCUDACXX_INLINE_VISIBILITY bool - compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) noexcept - { - if (memory_order_acq_rel == __m) - { - return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __m, memory_order_acquire); + _LIBCUDACXX_INLINE_VISIBILITY + bool compare_exchange_weak(_Tp& __e, _Tp __d, + memory_order __m = memory_order_seq_cst) noexcept { + if(memory_order_acq_rel == __m) + return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __m, memory_order_acquire); + else if(memory_order_release == __m) + return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __m, memory_order_relaxed); + else + return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __m, __m); } - else if (memory_order_release == __m) - { - return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __m, memory_order_relaxed); + _LIBCUDACXX_INLINE_VISIBILITY + bool compare_exchange_strong(_Tp& __e, _Tp __d, + memory_order __m = memory_order_seq_cst) volatile noexcept { + if (memory_order_acq_rel == __m) + return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __m, memory_order_acquire); + else if (memory_order_release == __m) + return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __m, memory_order_relaxed); + else + return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __m, __m); } - else - { - return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __m, __m); + _LIBCUDACXX_INLINE_VISIBILITY + bool compare_exchange_strong(_Tp& __e, _Tp __d, + memory_order __m = memory_order_seq_cst) noexcept { + if (memory_order_acq_rel == __m) + return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __m, memory_order_acquire); + else if (memory_order_release == __m) + return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __m, memory_order_relaxed); + else + return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __m, __m); } - } - _LIBCUDACXX_INLINE_VISIBILITY void wait(_Tp __v, memory_order __m = memory_order_seq_cst) const volatile noexcept - { - __cxx_atomic_wait(&this->__a_, __v, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY void wait(_Tp __v, memory_order __m = memory_order_seq_cst) const noexcept - { - __cxx_atomic_wait(&this->__a_, __v, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY void notify_one() volatile noexcept - { - __cxx_atomic_notify_one(&this->__a_); - } - _LIBCUDACXX_INLINE_VISIBILITY void notify_one() noexcept - { - __cxx_atomic_notify_one(&this->__a_); - } - _LIBCUDACXX_INLINE_VISIBILITY void notify_all() volatile noexcept - { - __cxx_atomic_notify_all(&this->__a_); - } - _LIBCUDACXX_INLINE_VISIBILITY void notify_all() noexcept - { - __cxx_atomic_notify_all(&this->__a_); - } + _LIBCUDACXX_INLINE_VISIBILITY void wait(_Tp __v, memory_order __m = memory_order_seq_cst) const volatile noexcept + {__cxx_atomic_wait(&this->__a_, __v, __m);} + _LIBCUDACXX_INLINE_VISIBILITY void wait(_Tp __v, memory_order __m = memory_order_seq_cst) const noexcept + {__cxx_atomic_wait(&this->__a_, __v, __m);} + _LIBCUDACXX_INLINE_VISIBILITY void notify_one() volatile noexcept + {__cxx_atomic_notify_one(&this->__a_);} + _LIBCUDACXX_INLINE_VISIBILITY void notify_one() noexcept + {__cxx_atomic_notify_one(&this->__a_);} + _LIBCUDACXX_INLINE_VISIBILITY void notify_all() volatile noexcept + {__cxx_atomic_notify_all(&this->__a_);} + _LIBCUDACXX_INLINE_VISIBILITY void notify_all() noexcept + {__cxx_atomic_notify_all(&this->__a_);} }; template -struct __atomic_base_core<_Tp, true, _Storage> : public __atomic_base_storage<_Tp, _Storage> -{ - __atomic_base_core() = default; - __atomic_base_core(const __atomic_base_core&) = default; - __atomic_base_core(__atomic_base_core&&) = default; +struct __atomic_base_core<_Tp, true, _Storage> : public __atomic_base_storage<_Tp, _Storage>{ + __atomic_base_core() = default; + __atomic_base_core(const __atomic_base_core&) = default; + __atomic_base_core(__atomic_base_core&&) = default; - __atomic_base_core& operator=(const __atomic_base_core&) = default; - __atomic_base_core& operator=(__atomic_base_core&&) = default; + __atomic_base_core& operator=(const __atomic_base_core&) = default; + __atomic_base_core& operator=(__atomic_base_core&&) = default; - _LIBCUDACXX_INLINE_VISIBILITY constexpr __atomic_base_core(_Storage&& __a) noexcept - : __atomic_base_storage<_Tp, _Storage>(_CUDA_VSTD::forward<_Storage>(__a)) - {} + _LIBCUDACXX_INLINE_VISIBILITY constexpr + __atomic_base_core(_Storage&& __a) noexcept : __atomic_base_storage<_Tp, _Storage>(_CUDA_VSTD::forward<_Storage>(__a)) {} #if defined(_LIBCUDACXX_ATOMIC_ALWAYS_LOCK_FREE) - static constexpr bool is_always_lock_free = _LIBCUDACXX_ATOMIC_ALWAYS_LOCK_FREE(sizeof(_Tp), 0); + static constexpr bool is_always_lock_free = _LIBCUDACXX_ATOMIC_ALWAYS_LOCK_FREE(sizeof(_Tp), 0); #endif // defined(_LIBCUDACXX_ATOMIC_ALWAYS_LOCK_FREE) - _LIBCUDACXX_INLINE_VISIBILITY bool is_lock_free() const volatile noexcept - { - return _LIBCUDACXX_ATOMIC_IS_LOCK_FREE(sizeof(_Tp)); - } - _LIBCUDACXX_INLINE_VISIBILITY bool is_lock_free() const noexcept - { - return static_cast<__atomic_base_core const volatile*>(this)->is_lock_free(); - } - _LIBCUDACXX_INLINE_VISIBILITY - - void - store(_Tp __d, memory_order __m = memory_order_seq_cst) const volatile noexcept - _LIBCUDACXX_CHECK_STORE_MEMORY_ORDER(__m) - { - __cxx_atomic_store(&this->__a_, __d, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY void store(_Tp __d, memory_order __m = memory_order_seq_cst) const noexcept - _LIBCUDACXX_CHECK_STORE_MEMORY_ORDER(__m) - { - __cxx_atomic_store(&this->__a_, __d, __m); - } - - _LIBCUDACXX_INLINE_VISIBILITY _Tp load(memory_order __m = memory_order_seq_cst) const volatile noexcept - _LIBCUDACXX_CHECK_LOAD_MEMORY_ORDER(__m) - { - return __cxx_atomic_load(&this->__a_, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp load(memory_order __m = memory_order_seq_cst) const noexcept - _LIBCUDACXX_CHECK_LOAD_MEMORY_ORDER(__m) - { - return __cxx_atomic_load(&this->__a_, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY operator _Tp() const volatile noexcept - { - return load(); - } - _LIBCUDACXX_INLINE_VISIBILITY operator _Tp() const noexcept - { - return load(); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp exchange(_Tp __d, memory_order __m = memory_order_seq_cst) const volatile noexcept - { - return __cxx_atomic_exchange(&this->__a_, __d, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp exchange(_Tp __d, memory_order __m = memory_order_seq_cst) const noexcept - { - return __cxx_atomic_exchange(&this->__a_, __d, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY bool compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) const - volatile noexcept _LIBCUDACXX_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) - { - return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __s, __f); - } - _LIBCUDACXX_INLINE_VISIBILITY bool - compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) const noexcept - _LIBCUDACXX_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) - { - return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __s, __f); - } - _LIBCUDACXX_INLINE_VISIBILITY bool compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) const - volatile noexcept _LIBCUDACXX_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) - { - return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __s, __f); - } - _LIBCUDACXX_INLINE_VISIBILITY bool - compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) const noexcept - _LIBCUDACXX_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) - { - return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __s, __f); - } - _LIBCUDACXX_INLINE_VISIBILITY bool - compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) const volatile noexcept - { - if (memory_order_acq_rel == __m) - { - return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __m, memory_order_acquire); + _LIBCUDACXX_INLINE_VISIBILITY + bool is_lock_free() const volatile noexcept + {return _LIBCUDACXX_ATOMIC_IS_LOCK_FREE(sizeof(_Tp));} + _LIBCUDACXX_INLINE_VISIBILITY + bool is_lock_free() const noexcept + {return static_cast<__atomic_base_core const volatile*>(this)->is_lock_free();} + _LIBCUDACXX_INLINE_VISIBILITY + + void store(_Tp __d, memory_order __m = memory_order_seq_cst) const volatile noexcept + _LIBCUDACXX_CHECK_STORE_MEMORY_ORDER(__m) + {__cxx_atomic_store(&this->__a_, __d, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + void store(_Tp __d, memory_order __m = memory_order_seq_cst) const noexcept + _LIBCUDACXX_CHECK_STORE_MEMORY_ORDER(__m) + {__cxx_atomic_store(&this->__a_, __d, __m);} + + _LIBCUDACXX_INLINE_VISIBILITY + _Tp load(memory_order __m = memory_order_seq_cst) const volatile noexcept + _LIBCUDACXX_CHECK_LOAD_MEMORY_ORDER(__m) + {return __cxx_atomic_load(&this->__a_, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp load(memory_order __m = memory_order_seq_cst) const noexcept + _LIBCUDACXX_CHECK_LOAD_MEMORY_ORDER(__m) + {return __cxx_atomic_load(&this->__a_, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + operator _Tp() const volatile noexcept {return load();} + _LIBCUDACXX_INLINE_VISIBILITY + operator _Tp() const noexcept {return load();} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp exchange(_Tp __d, memory_order __m = memory_order_seq_cst) const volatile noexcept + {return __cxx_atomic_exchange(&this->__a_, __d, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp exchange(_Tp __d, memory_order __m = memory_order_seq_cst) const noexcept + {return __cxx_atomic_exchange(&this->__a_, __d, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + bool compare_exchange_weak(_Tp& __e, _Tp __d, + memory_order __s, memory_order __f) const volatile noexcept + _LIBCUDACXX_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) + {return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __s, __f);} + _LIBCUDACXX_INLINE_VISIBILITY + bool compare_exchange_weak(_Tp& __e, _Tp __d, + memory_order __s, memory_order __f) const noexcept + _LIBCUDACXX_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) + {return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __s, __f);} + _LIBCUDACXX_INLINE_VISIBILITY + bool compare_exchange_strong(_Tp& __e, _Tp __d, + memory_order __s, memory_order __f) const volatile noexcept + _LIBCUDACXX_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) + {return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __s, __f);} + _LIBCUDACXX_INLINE_VISIBILITY + bool compare_exchange_strong(_Tp& __e, _Tp __d, + memory_order __s, memory_order __f) const noexcept + _LIBCUDACXX_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) + {return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __s, __f);} + _LIBCUDACXX_INLINE_VISIBILITY + bool compare_exchange_weak(_Tp& __e, _Tp __d, + memory_order __m = memory_order_seq_cst) const volatile noexcept { + if (memory_order_acq_rel == __m) + return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __m, memory_order_acquire); + else if (memory_order_release == __m) + return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __m, memory_order_relaxed); + else + return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __m, __m); } - else if (memory_order_release == __m) - { - return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __m, memory_order_relaxed); + _LIBCUDACXX_INLINE_VISIBILITY + bool compare_exchange_weak(_Tp& __e, _Tp __d, + memory_order __m = memory_order_seq_cst) const noexcept { + if(memory_order_acq_rel == __m) + return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __m, memory_order_acquire); + else if(memory_order_release == __m) + return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __m, memory_order_relaxed); + else + return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __m, __m); } - else - { - return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __m, __m); + _LIBCUDACXX_INLINE_VISIBILITY + bool compare_exchange_strong(_Tp& __e, _Tp __d, + memory_order __m = memory_order_seq_cst) const volatile noexcept { + if (memory_order_acq_rel == __m) + return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __m, memory_order_acquire); + else if (memory_order_release == __m) + return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __m, memory_order_relaxed); + else + return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __m, __m); } - } - _LIBCUDACXX_INLINE_VISIBILITY bool - compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) const noexcept - { - if (memory_order_acq_rel == __m) - { - return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __m, memory_order_acquire); - } - else if (memory_order_release == __m) - { - return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __m, memory_order_relaxed); - } - else - { - return __cxx_atomic_compare_exchange_weak(&this->__a_, &__e, __d, __m, __m); + _LIBCUDACXX_INLINE_VISIBILITY + bool compare_exchange_strong(_Tp& __e, _Tp __d, + memory_order __m = memory_order_seq_cst) const noexcept { + if (memory_order_acq_rel == __m) + return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __m, memory_order_acquire); + else if (memory_order_release == __m) + return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __m, memory_order_relaxed); + else + return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __m, __m); } - } - _LIBCUDACXX_INLINE_VISIBILITY bool - compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) const volatile noexcept - { - if (memory_order_acq_rel == __m) - { - return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __m, memory_order_acquire); - } - else if (memory_order_release == __m) - { - return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __m, memory_order_relaxed); - } - else - { - return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __m, __m); - } - } - _LIBCUDACXX_INLINE_VISIBILITY bool - compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) const noexcept - { - if (memory_order_acq_rel == __m) - { - return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __m, memory_order_acquire); - } - else if (memory_order_release == __m) - { - return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __m, memory_order_relaxed); - } - else - { - return __cxx_atomic_compare_exchange_strong(&this->__a_, &__e, __d, __m, __m); - } - } - _LIBCUDACXX_INLINE_VISIBILITY void wait(_Tp __v, memory_order __m = memory_order_seq_cst) const volatile noexcept - { - __cxx_atomic_wait(&this->__a_, __v, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY void wait(_Tp __v, memory_order __m = memory_order_seq_cst) const noexcept - { - __cxx_atomic_wait(&this->__a_, __v, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY void notify_one() const volatile noexcept - { - __cxx_atomic_notify_one(&this->__a_); - } - _LIBCUDACXX_INLINE_VISIBILITY void notify_one() const noexcept - { - __cxx_atomic_notify_one(&this->__a_); - } - _LIBCUDACXX_INLINE_VISIBILITY void notify_all() const volatile noexcept - { - __cxx_atomic_notify_all(&this->__a_); - } - _LIBCUDACXX_INLINE_VISIBILITY void notify_all() const noexcept - { - __cxx_atomic_notify_all(&this->__a_); - } + _LIBCUDACXX_INLINE_VISIBILITY void wait(_Tp __v, memory_order __m = memory_order_seq_cst) const volatile noexcept + {__cxx_atomic_wait(&this->__a_, __v, __m);} + _LIBCUDACXX_INLINE_VISIBILITY void wait(_Tp __v, memory_order __m = memory_order_seq_cst) const noexcept + {__cxx_atomic_wait(&this->__a_, __v, __m);} + _LIBCUDACXX_INLINE_VISIBILITY void notify_one() const volatile noexcept + {__cxx_atomic_notify_one(&this->__a_);} + _LIBCUDACXX_INLINE_VISIBILITY void notify_one() const noexcept + {__cxx_atomic_notify_one(&this->__a_);} + _LIBCUDACXX_INLINE_VISIBILITY void notify_all() const volatile noexcept + {__cxx_atomic_notify_all(&this->__a_);} + _LIBCUDACXX_INLINE_VISIBILITY void notify_all() const noexcept + {__cxx_atomic_notify_all(&this->__a_);} }; template -struct __atomic_base_arithmetic : public __atomic_base_core<_Tp, _Cq, _Storage> -{ - __atomic_base_arithmetic() = default; - __atomic_base_arithmetic(const __atomic_base_arithmetic&) = delete; - __atomic_base_arithmetic(__atomic_base_arithmetic&&) = delete; - - __atomic_base_arithmetic& operator=(const __atomic_base_arithmetic&) = delete; - __atomic_base_arithmetic& operator=(__atomic_base_arithmetic&&) = delete; - - _LIBCUDACXX_INLINE_VISIBILITY constexpr __atomic_base_arithmetic(_Storage&& __a) noexcept - : __atomic_base_core<_Tp, _Cq, _Storage>(_CUDA_VSTD::forward<_Storage>(__a)) - {} - - _LIBCUDACXX_INLINE_VISIBILITY _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) volatile noexcept - { - return __cxx_atomic_fetch_add(&this->__a_, __op, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) noexcept - { - return __cxx_atomic_fetch_add(&this->__a_, __op, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) volatile noexcept - { - return __cxx_atomic_fetch_sub(&this->__a_, __op, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) noexcept - { - return __cxx_atomic_fetch_sub(&this->__a_, __op, __m); - } - - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator++(int) volatile noexcept - { - return fetch_add(_Tp(1)); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator++(int) noexcept - { - return fetch_add(_Tp(1)); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator--(int) volatile noexcept - { - return fetch_sub(_Tp(1)); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator--(int) noexcept - { - return fetch_sub(_Tp(1)); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator++() volatile noexcept - { - return fetch_add(_Tp(1)) + _Tp(1); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator++() noexcept - { - return fetch_add(_Tp(1)) + _Tp(1); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator--() volatile noexcept - { - return fetch_sub(_Tp(1)) - _Tp(1); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator--() noexcept - { - return fetch_sub(_Tp(1)) - _Tp(1); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator+=(_Tp __op) volatile noexcept - { - return fetch_add(__op) + __op; - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator+=(_Tp __op) noexcept - { - return fetch_add(__op) + __op; - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator-=(_Tp __op) volatile noexcept - { - return fetch_sub(__op) - __op; - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator-=(_Tp __op) noexcept - { - return fetch_sub(__op) - __op; - } +struct __atomic_base_arithmetic : public __atomic_base_core<_Tp, _Cq, _Storage> { + __atomic_base_arithmetic() = default; + __atomic_base_arithmetic(const __atomic_base_arithmetic&) = delete; + __atomic_base_arithmetic(__atomic_base_arithmetic&&) = delete; + + __atomic_base_arithmetic& operator=(const __atomic_base_arithmetic&) = delete; + __atomic_base_arithmetic& operator=(__atomic_base_arithmetic&&) = delete; + + _LIBCUDACXX_INLINE_VISIBILITY constexpr + __atomic_base_arithmetic(_Storage&& __a) noexcept : __atomic_base_core<_Tp, _Cq, _Storage>(_CUDA_VSTD::forward<_Storage>(__a)) {} + + _LIBCUDACXX_INLINE_VISIBILITY + _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) volatile noexcept + {return __cxx_atomic_fetch_add(&this->__a_, __op, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) noexcept + {return __cxx_atomic_fetch_add(&this->__a_, __op, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) volatile noexcept + {return __cxx_atomic_fetch_sub(&this->__a_, __op, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) noexcept + {return __cxx_atomic_fetch_sub(&this->__a_, __op, __m);} + + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator++(int) volatile noexcept {return fetch_add(_Tp(1));} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator++(int) noexcept {return fetch_add(_Tp(1));} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator--(int) volatile noexcept {return fetch_sub(_Tp(1));} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator--(int) noexcept {return fetch_sub(_Tp(1));} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator++() volatile noexcept {return fetch_add(_Tp(1)) + _Tp(1);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator++() noexcept {return fetch_add(_Tp(1)) + _Tp(1);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator--() volatile noexcept {return fetch_sub(_Tp(1)) - _Tp(1);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator--() noexcept {return fetch_sub(_Tp(1)) - _Tp(1);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator+=(_Tp __op) volatile noexcept {return fetch_add(__op) + __op;} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator+=(_Tp __op) noexcept {return fetch_add(__op) + __op;} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator-=(_Tp __op) volatile noexcept {return fetch_sub(__op) - __op;} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator-=(_Tp __op) noexcept {return fetch_sub(__op) - __op;} }; template -struct __atomic_base_arithmetic<_Tp, true, _Storage> : public __atomic_base_core<_Tp, true, _Storage> -{ - __atomic_base_arithmetic() = default; - __atomic_base_arithmetic(const __atomic_base_arithmetic&) = default; - __atomic_base_arithmetic(__atomic_base_arithmetic&&) = default; - - __atomic_base_arithmetic& operator=(const __atomic_base_arithmetic&) = default; - __atomic_base_arithmetic& operator=(__atomic_base_arithmetic&&) = default; - - _LIBCUDACXX_INLINE_VISIBILITY constexpr __atomic_base_arithmetic(_Storage&& __a) noexcept - : __atomic_base_core<_Tp, true, _Storage>(_CUDA_VSTD::forward<_Storage>(__a)) - {} - - _LIBCUDACXX_INLINE_VISIBILITY _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) const volatile noexcept - { - return __cxx_atomic_fetch_add(&this->__a_, __op, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) const noexcept - { - return __cxx_atomic_fetch_add(&this->__a_, __op, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) const volatile noexcept - { - return __cxx_atomic_fetch_sub(&this->__a_, __op, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) const noexcept - { - return __cxx_atomic_fetch_sub(&this->__a_, __op, __m); - } - - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator++(int) const volatile noexcept - { - return fetch_add(_Tp(1)); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator++(int) const noexcept - { - return fetch_add(_Tp(1)); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator--(int) const volatile noexcept - { - return fetch_sub(_Tp(1)); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator--(int) const noexcept - { - return fetch_sub(_Tp(1)); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator++() const volatile noexcept - { - return fetch_add(_Tp(1)) + _Tp(1); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator++() const noexcept - { - return fetch_add(_Tp(1)) + _Tp(1); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator--() const volatile noexcept - { - return fetch_sub(_Tp(1)) - _Tp(1); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator--() const noexcept - { - return fetch_sub(_Tp(1)) - _Tp(1); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator+=(_Tp __op) const volatile noexcept - { - return fetch_add(__op) + __op; - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator+=(_Tp __op) const noexcept - { - return fetch_add(__op) + __op; - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator-=(_Tp __op) const volatile noexcept - { - return fetch_sub(__op) - __op; - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator-=(_Tp __op) const noexcept - { - return fetch_sub(__op) - __op; - } +struct __atomic_base_arithmetic<_Tp, true, _Storage> : public __atomic_base_core<_Tp, true, _Storage> { + __atomic_base_arithmetic() = default; + __atomic_base_arithmetic(const __atomic_base_arithmetic&) = default; + __atomic_base_arithmetic(__atomic_base_arithmetic&&) = default; + + __atomic_base_arithmetic& operator=(const __atomic_base_arithmetic&) = default; + __atomic_base_arithmetic& operator=(__atomic_base_arithmetic&&) = default; + + _LIBCUDACXX_INLINE_VISIBILITY constexpr + __atomic_base_arithmetic(_Storage&& __a) noexcept : __atomic_base_core<_Tp, true, _Storage>(_CUDA_VSTD::forward<_Storage>(__a)) {} + + _LIBCUDACXX_INLINE_VISIBILITY + _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) const volatile noexcept + {return __cxx_atomic_fetch_add(&this->__a_, __op, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) const noexcept + {return __cxx_atomic_fetch_add(&this->__a_, __op, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) const volatile noexcept + {return __cxx_atomic_fetch_sub(&this->__a_, __op, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) const noexcept + {return __cxx_atomic_fetch_sub(&this->__a_, __op, __m);} + + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator++(int) const volatile noexcept {return fetch_add(_Tp(1));} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator++(int) const noexcept {return fetch_add(_Tp(1));} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator--(int) const volatile noexcept {return fetch_sub(_Tp(1));} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator--(int) const noexcept {return fetch_sub(_Tp(1));} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator++() const volatile noexcept {return fetch_add(_Tp(1)) + _Tp(1);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator++() const noexcept {return fetch_add(_Tp(1)) + _Tp(1);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator--() const volatile noexcept {return fetch_sub(_Tp(1)) - _Tp(1);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator--() const noexcept {return fetch_sub(_Tp(1)) - _Tp(1);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator+=(_Tp __op) const volatile noexcept {return fetch_add(__op) + __op;} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator+=(_Tp __op) const noexcept {return fetch_add(__op) + __op;} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator-=(_Tp __op) const volatile noexcept {return fetch_sub(__op) - __op;} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator-=(_Tp __op) const noexcept {return fetch_sub(__op) - __op;} }; template -struct __atomic_base_bitwise : public __atomic_base_arithmetic<_Tp, _Cq, _Storage> -{ - __atomic_base_bitwise() = default; - __atomic_base_bitwise(const __atomic_base_bitwise&) = delete; - __atomic_base_bitwise(__atomic_base_bitwise&&) = delete; - - __atomic_base_bitwise& operator=(const __atomic_base_bitwise&) = delete; - __atomic_base_bitwise& operator=(__atomic_base_bitwise&&) = delete; - - _LIBCUDACXX_INLINE_VISIBILITY constexpr __atomic_base_bitwise(_Storage&& __a) noexcept - : __atomic_base_arithmetic<_Tp, _Cq, _Storage>(_CUDA_VSTD::forward<_Storage>(__a)) - {} - - _LIBCUDACXX_INLINE_VISIBILITY _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) volatile noexcept - { - return __cxx_atomic_fetch_and(&this->__a_, __op, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) noexcept - { - return __cxx_atomic_fetch_and(&this->__a_, __op, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) volatile noexcept - { - return __cxx_atomic_fetch_or(&this->__a_, __op, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) noexcept - { - return __cxx_atomic_fetch_or(&this->__a_, __op, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) volatile noexcept - { - return __cxx_atomic_fetch_xor(&this->__a_, __op, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) noexcept - { - return __cxx_atomic_fetch_xor(&this->__a_, __op, __m); - } - - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator&=(_Tp __op) volatile noexcept - { - return fetch_and(__op) & __op; - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator&=(_Tp __op) noexcept - { - return fetch_and(__op) & __op; - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator|=(_Tp __op) volatile noexcept - { - return fetch_or(__op) | __op; - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator|=(_Tp __op) noexcept - { - return fetch_or(__op) | __op; - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator^=(_Tp __op) volatile noexcept - { - return fetch_xor(__op) ^ __op; - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator^=(_Tp __op) noexcept - { - return fetch_xor(__op) ^ __op; - } +struct __atomic_base_bitwise : public __atomic_base_arithmetic<_Tp, _Cq, _Storage> { + __atomic_base_bitwise() = default; + __atomic_base_bitwise(const __atomic_base_bitwise&) = delete; + __atomic_base_bitwise(__atomic_base_bitwise&&) = delete; + + __atomic_base_bitwise& operator=(const __atomic_base_bitwise&) = delete; + __atomic_base_bitwise& operator=(__atomic_base_bitwise&&) = delete; + + _LIBCUDACXX_INLINE_VISIBILITY constexpr + __atomic_base_bitwise(_Storage&& __a) noexcept : __atomic_base_arithmetic<_Tp, _Cq, _Storage>(_CUDA_VSTD::forward<_Storage>(__a)) {} + + _LIBCUDACXX_INLINE_VISIBILITY + _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) volatile noexcept + {return __cxx_atomic_fetch_and(&this->__a_, __op, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) noexcept + {return __cxx_atomic_fetch_and(&this->__a_, __op, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) volatile noexcept + {return __cxx_atomic_fetch_or(&this->__a_, __op, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) noexcept + {return __cxx_atomic_fetch_or(&this->__a_, __op, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) volatile noexcept + {return __cxx_atomic_fetch_xor(&this->__a_, __op, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) noexcept + {return __cxx_atomic_fetch_xor(&this->__a_, __op, __m);} + + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator&=(_Tp __op) volatile noexcept {return fetch_and(__op) & __op;} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator&=(_Tp __op) noexcept {return fetch_and(__op) & __op;} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator|=(_Tp __op) volatile noexcept {return fetch_or(__op) | __op;} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator|=(_Tp __op) noexcept {return fetch_or(__op) | __op;} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator^=(_Tp __op) volatile noexcept {return fetch_xor(__op) ^ __op;} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator^=(_Tp __op) noexcept {return fetch_xor(__op) ^ __op;} }; template -struct __atomic_base_bitwise<_Tp, true, _Storage> : public __atomic_base_arithmetic<_Tp, true, _Storage> -{ - __atomic_base_bitwise() = default; - __atomic_base_bitwise(const __atomic_base_bitwise&) = default; - __atomic_base_bitwise(__atomic_base_bitwise&&) = default; - - __atomic_base_bitwise& operator=(const __atomic_base_bitwise&) = default; - __atomic_base_bitwise& operator=(__atomic_base_bitwise&&) = default; - - _LIBCUDACXX_INLINE_VISIBILITY constexpr __atomic_base_bitwise(_Storage&& __a) noexcept - : __atomic_base_arithmetic<_Tp, true, _Storage>(_CUDA_VSTD::forward<_Storage>(__a)) - {} - - _LIBCUDACXX_INLINE_VISIBILITY _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) const volatile noexcept - { - return __cxx_atomic_fetch_and(&this->__a_, __op, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) const noexcept - { - return __cxx_atomic_fetch_and(&this->__a_, __op, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) const volatile noexcept - { - return __cxx_atomic_fetch_or(&this->__a_, __op, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) const noexcept - { - return __cxx_atomic_fetch_or(&this->__a_, __op, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) const volatile noexcept - { - return __cxx_atomic_fetch_xor(&this->__a_, __op, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) const noexcept - { - return __cxx_atomic_fetch_xor(&this->__a_, __op, __m); - } - - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator&=(_Tp __op) const volatile noexcept - { - return fetch_and(__op) & __op; - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator&=(_Tp __op) const noexcept - { - return fetch_and(__op) & __op; - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator|=(_Tp __op) const volatile noexcept - { - return fetch_or(__op) | __op; - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator|=(_Tp __op) const noexcept - { - return fetch_or(__op) | __op; - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator^=(_Tp __op) const volatile noexcept - { - return fetch_xor(__op) ^ __op; - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator^=(_Tp __op) const noexcept - { - return fetch_xor(__op) ^ __op; - } +struct __atomic_base_bitwise<_Tp, true, _Storage> : public __atomic_base_arithmetic<_Tp, true, _Storage> { + __atomic_base_bitwise() = default; + __atomic_base_bitwise(const __atomic_base_bitwise&) = default; + __atomic_base_bitwise(__atomic_base_bitwise&&) = default; + + __atomic_base_bitwise& operator=(const __atomic_base_bitwise&) = default; + __atomic_base_bitwise& operator=(__atomic_base_bitwise&&) = default; + + _LIBCUDACXX_INLINE_VISIBILITY constexpr + __atomic_base_bitwise(_Storage&& __a) noexcept : __atomic_base_arithmetic<_Tp, true, _Storage>(_CUDA_VSTD::forward<_Storage>(__a)) {} + + _LIBCUDACXX_INLINE_VISIBILITY + _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) const volatile noexcept + {return __cxx_atomic_fetch_and(&this->__a_, __op, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) const noexcept + {return __cxx_atomic_fetch_and(&this->__a_, __op, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) const volatile noexcept + {return __cxx_atomic_fetch_or(&this->__a_, __op, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) const noexcept + {return __cxx_atomic_fetch_or(&this->__a_, __op, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) const volatile noexcept + {return __cxx_atomic_fetch_xor(&this->__a_, __op, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) const noexcept + {return __cxx_atomic_fetch_xor(&this->__a_, __op, __m);} + + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator&=(_Tp __op) const volatile noexcept {return fetch_and(__op) & __op;} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator&=(_Tp __op) const noexcept {return fetch_and(__op) & __op;} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator|=(_Tp __op) const volatile noexcept {return fetch_or(__op) | __op;} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator|=(_Tp __op) const noexcept {return fetch_or(__op) | __op;} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator^=(_Tp __op) const volatile noexcept {return fetch_xor(__op) ^ __op;} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator^=(_Tp __op) const noexcept {return fetch_xor(__op) ^ __op;} }; template -using __atomic_select_base = - __conditional_t::value, - __atomic_base_arithmetic<_Tp, _Cq, _Storage>, - __conditional_t::value, - __atomic_base_bitwise<_Tp, _Cq, _Storage>, - __atomic_base_core<_Tp, _Cq, _Storage>>>; +using __atomic_select_base = __conditional_t::value, + __atomic_base_arithmetic<_Tp, _Cq, _Storage>, + __conditional_t::value, + __atomic_base_bitwise<_Tp, _Cq, _Storage>, + __atomic_base_core<_Tp, _Cq, _Storage> >>; template >> -struct __atomic_base : public _Base -{ - __atomic_base() = default; - __atomic_base(const __atomic_base&) = delete; - __atomic_base(__atomic_base&&) = delete; +struct __atomic_base : public _Base { + __atomic_base() = default; + __atomic_base(const __atomic_base&) = delete; + __atomic_base(__atomic_base&&) = delete; - __atomic_base& operator=(const __atomic_base&) = delete; - __atomic_base& operator=(__atomic_base&&) = delete; + __atomic_base& operator=(const __atomic_base&) = delete; + __atomic_base& operator=(__atomic_base&&) = delete; - _LIBCUDACXX_INLINE_VISIBILITY constexpr __atomic_base(const _Tp& __a) noexcept - : _Base(__cxx_atomic_impl<_Tp, _Sco>(__a)) - {} + _LIBCUDACXX_INLINE_VISIBILITY constexpr + __atomic_base(const _Tp& __a) noexcept : + _Base(__cxx_atomic_impl<_Tp, _Sco>(__a)) {} }; template >> -struct __atomic_base_ref : public _Base -{ - __atomic_base_ref() = default; - __atomic_base_ref(const __atomic_base_ref&) = default; - __atomic_base_ref(__atomic_base_ref&&) = default; +struct __atomic_base_ref : public _Base { + __atomic_base_ref() = default; + __atomic_base_ref(const __atomic_base_ref&) = default; + __atomic_base_ref(__atomic_base_ref&&) = default; - __atomic_base_ref& operator=(const __atomic_base_ref&) = default; - __atomic_base_ref& operator=(__atomic_base_ref&&) = default; + __atomic_base_ref& operator=(const __atomic_base_ref&) = default; + __atomic_base_ref& operator=(__atomic_base_ref&&) = default; - _LIBCUDACXX_INLINE_VISIBILITY constexpr __atomic_base_ref(_Tp& __a) noexcept - : _Base(__cxx_atomic_ref_impl<_Tp, _Sco>(__a)) - {} + _LIBCUDACXX_INLINE_VISIBILITY constexpr + __atomic_base_ref(_Tp& __a) noexcept : + _Base(__cxx_atomic_ref_impl<_Tp, _Sco>(__a)) {} }; #if defined(_LIBCUDACXX_ATOMIC_ALWAYS_LOCK_FREE) @@ -2081,918 +1779,1059 @@ constexpr bool __atomic_base_core<_Tp, _Cq, _Storage>::is_always_lock_free; // atomic template -struct atomic : public __atomic_base<_Tp> +struct atomic + : public __atomic_base<_Tp> { - typedef __atomic_base<_Tp> __base; - using value_type = _Tp; - - atomic() noexcept = default; - _LIBCUDACXX_INLINE_VISIBILITY constexpr atomic(_Tp __d) noexcept - : __base(__d) - {} + typedef __atomic_base<_Tp> __base; + using value_type = _Tp; - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator=(_Tp __d) volatile noexcept - { - __base::store(__d); - return __d; - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator=(_Tp __d) noexcept - { - __base::store(__d); - return __d; - } + atomic() noexcept = default; + _LIBCUDACXX_INLINE_VISIBILITY + constexpr atomic(_Tp __d) noexcept : __base(__d) {} + + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator=(_Tp __d) volatile noexcept + {__base::store(__d); return __d;} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator=(_Tp __d) noexcept + {__base::store(__d); return __d;} }; // atomic template -struct atomic<_Tp*> : public __atomic_base<_Tp*> +struct atomic<_Tp*> + : public __atomic_base<_Tp*> { - typedef __atomic_base<_Tp*> __base; - using value_type = _Tp*; - - atomic() noexcept = default; - _LIBCUDACXX_INLINE_VISIBILITY constexpr atomic(_Tp* __d) noexcept - : __base(__d) - {} + typedef __atomic_base<_Tp*> __base; + using value_type = _Tp*; - _LIBCUDACXX_INLINE_VISIBILITY _Tp* operator=(_Tp* __d) volatile noexcept - { - __base::store(__d); - return __d; - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp* operator=(_Tp* __d) noexcept - { - __base::store(__d); - return __d; - } - - _LIBCUDACXX_INLINE_VISIBILITY _Tp* fetch_add(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) volatile noexcept - { - return __cxx_atomic_fetch_add(&this->__a_, __op, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp* fetch_add(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) noexcept - { - return __cxx_atomic_fetch_add(&this->__a_, __op, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp* fetch_sub(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) volatile noexcept - { - return __cxx_atomic_fetch_sub(&this->__a_, __op, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp* fetch_sub(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) noexcept - { - return __cxx_atomic_fetch_sub(&this->__a_, __op, __m); - } - - _LIBCUDACXX_INLINE_VISIBILITY _Tp* operator++(int) volatile noexcept - { - return fetch_add(1); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp* operator++(int) noexcept - { - return fetch_add(1); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp* operator--(int) volatile noexcept - { - return fetch_sub(1); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp* operator--(int) noexcept - { - return fetch_sub(1); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp* operator++() volatile noexcept - { - return fetch_add(1) + 1; - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp* operator++() noexcept - { - return fetch_add(1) + 1; - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp* operator--() volatile noexcept - { - return fetch_sub(1) - 1; - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp* operator--() noexcept - { - return fetch_sub(1) - 1; - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp* operator+=(ptrdiff_t __op) volatile noexcept - { - return fetch_add(__op) + __op; - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp* operator+=(ptrdiff_t __op) noexcept - { - return fetch_add(__op) + __op; - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp* operator-=(ptrdiff_t __op) volatile noexcept - { - return fetch_sub(__op) - __op; - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp* operator-=(ptrdiff_t __op) noexcept - { - return fetch_sub(__op) - __op; - } + atomic() noexcept = default; + _LIBCUDACXX_INLINE_VISIBILITY + constexpr atomic(_Tp* __d) noexcept : __base(__d) {} + + _LIBCUDACXX_INLINE_VISIBILITY + _Tp* operator=(_Tp* __d) volatile noexcept + {__base::store(__d); return __d;} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp* operator=(_Tp* __d) noexcept + {__base::store(__d); return __d;} + + _LIBCUDACXX_INLINE_VISIBILITY + _Tp* fetch_add(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) + volatile noexcept + {return __cxx_atomic_fetch_add(&this->__a_, __op, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp* fetch_add(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) + noexcept + {return __cxx_atomic_fetch_add(&this->__a_, __op, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp* fetch_sub(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) + volatile noexcept + {return __cxx_atomic_fetch_sub(&this->__a_, __op, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp* fetch_sub(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) + noexcept + {return __cxx_atomic_fetch_sub(&this->__a_, __op, __m);} + + _LIBCUDACXX_INLINE_VISIBILITY + _Tp* operator++(int) volatile noexcept {return fetch_add(1);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp* operator++(int) noexcept {return fetch_add(1);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp* operator--(int) volatile noexcept {return fetch_sub(1);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp* operator--(int) noexcept {return fetch_sub(1);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp* operator++() volatile noexcept {return fetch_add(1) + 1;} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp* operator++() noexcept {return fetch_add(1) + 1;} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp* operator--() volatile noexcept {return fetch_sub(1) - 1;} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp* operator--() noexcept {return fetch_sub(1) - 1;} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp* operator+=(ptrdiff_t __op) volatile noexcept {return fetch_add(__op) + __op;} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp* operator+=(ptrdiff_t __op) noexcept {return fetch_add(__op) + __op;} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp* operator-=(ptrdiff_t __op) volatile noexcept {return fetch_sub(__op) - __op;} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp* operator-=(ptrdiff_t __op) noexcept {return fetch_sub(__op) - __op;} }; // atomic_ref template -struct atomic_ref : public __atomic_base_ref<_Tp> + struct atomic_ref + : public __atomic_base_ref<_Tp> { - typedef __atomic_base_ref<_Tp> __base; - using value_type = _Tp; + typedef __atomic_base_ref<_Tp> __base; + using value_type = _Tp; - static constexpr size_t required_alignment = sizeof(_Tp); + static constexpr size_t required_alignment = sizeof(_Tp); - static constexpr bool is_always_lock_free = sizeof(_Tp) <= 8; + static constexpr bool is_always_lock_free = sizeof(_Tp) <= 8; - _LIBCUDACXX_INLINE_VISIBILITY explicit atomic_ref(_Tp& __ref) - : __base(__ref) - {} + _LIBCUDACXX_INLINE_VISIBILITY + explicit atomic_ref(_Tp& __ref) : __base(__ref) {} - _LIBCUDACXX_INLINE_VISIBILITY _Tp operator=(_Tp __v) const volatile noexcept - { - __base::store(__v); - return __v; - } + _LIBCUDACXX_INLINE_VISIBILITY + _Tp operator=(_Tp __v) const volatile noexcept {__base::store(__v); return __v;} }; // atomic_ref template -struct atomic_ref<_Tp*> : public __atomic_base_ref<_Tp*> + struct atomic_ref<_Tp*> + : public __atomic_base_ref<_Tp*> { - typedef __atomic_base_ref<_Tp*> __base; - using value_type = _Tp*; + typedef __atomic_base_ref<_Tp*> __base; + using value_type = _Tp*; - static constexpr size_t required_alignment = sizeof(_Tp*); + static constexpr size_t required_alignment = sizeof(_Tp*); - static constexpr bool is_always_lock_free = sizeof(_Tp*) <= 8; + static constexpr bool is_always_lock_free = sizeof(_Tp*) <= 8; - _LIBCUDACXX_INLINE_VISIBILITY explicit atomic_ref(_Tp*& __ref) - : __base(__ref) - {} + _LIBCUDACXX_INLINE_VISIBILITY + explicit atomic_ref(_Tp*& __ref) : __base(__ref) {} - _LIBCUDACXX_INLINE_VISIBILITY _Tp* operator=(_Tp* __v) const noexcept - { - __base::store(__v); - return __v; - } + _LIBCUDACXX_INLINE_VISIBILITY + _Tp* operator=(_Tp* __v) const noexcept {__base::store(__v); return __v;} - _LIBCUDACXX_INLINE_VISIBILITY _Tp* fetch_add(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) const noexcept - { - return __cxx_atomic_fetch_add(&this->__a_, __op, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp* fetch_sub(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) const noexcept - { - return __cxx_atomic_fetch_sub(&this->__a_, __op, __m); - } + _LIBCUDACXX_INLINE_VISIBILITY + _Tp* fetch_add(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) + const noexcept + {return __cxx_atomic_fetch_add(&this->__a_, __op, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp* fetch_sub(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) + const noexcept + {return __cxx_atomic_fetch_sub(&this->__a_, __op, __m);} - _LIBCUDACXX_INLINE_VISIBILITY _Tp* operator++(int) const noexcept - { - return fetch_add(1); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp* operator--(int) const noexcept - { - return fetch_sub(1); - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp* operator++() const noexcept - { - return fetch_add(1) + 1; - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp* operator--() const noexcept - { - return fetch_sub(1) - 1; - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp* operator+=(ptrdiff_t __op) const noexcept - { - return fetch_add(__op) + __op; - } - _LIBCUDACXX_INLINE_VISIBILITY _Tp* operator-=(ptrdiff_t __op) const noexcept - { - return fetch_sub(__op) - __op; - } + _LIBCUDACXX_INLINE_VISIBILITY + _Tp* operator++(int) const noexcept {return fetch_add(1);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp* operator--(int) const noexcept {return fetch_sub(1);} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp* operator++() const noexcept {return fetch_add(1) + 1;} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp* operator--() const noexcept {return fetch_sub(1) - 1;} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp* operator+=(ptrdiff_t __op) const noexcept {return fetch_add(__op) + __op;} + _LIBCUDACXX_INLINE_VISIBILITY + _Tp* operator-=(ptrdiff_t __op) const noexcept {return fetch_sub(__op) - __op;} }; // atomic_is_lock_free template -_LIBCUDACXX_INLINE_VISIBILITY bool atomic_is_lock_free(const volatile atomic<_Tp>* __o) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +bool +atomic_is_lock_free(const volatile atomic<_Tp>* __o) noexcept { - return __o->is_lock_free(); + return __o->is_lock_free(); } template -_LIBCUDACXX_INLINE_VISIBILITY bool atomic_is_lock_free(const atomic<_Tp>* __o) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +bool +atomic_is_lock_free(const atomic<_Tp>* __o) noexcept { - return __o->is_lock_free(); + return __o->is_lock_free(); } // atomic_init template -_LIBCUDACXX_INLINE_VISIBILITY void atomic_init(volatile atomic<_Tp>* __o, _Tp __d) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +void +atomic_init(volatile atomic<_Tp>* __o, _Tp __d) noexcept { - __cxx_atomic_init(&__o->__a_, __d); + __cxx_atomic_init(&__o->__a_, __d); } template -_LIBCUDACXX_INLINE_VISIBILITY void atomic_init(atomic<_Tp>* __o, _Tp __d) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +void +atomic_init(atomic<_Tp>* __o, _Tp __d) noexcept { - __cxx_atomic_init(&__o->__a_, __d); + __cxx_atomic_init(&__o->__a_, __d); } // atomic_store template -_LIBCUDACXX_INLINE_VISIBILITY void atomic_store(volatile atomic<_Tp>* __o, _Tp __d) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +void +atomic_store(volatile atomic<_Tp>* __o, _Tp __d) noexcept { - __o->store(__d); + __o->store(__d); } template -_LIBCUDACXX_INLINE_VISIBILITY void atomic_store(atomic<_Tp>* __o, _Tp __d) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +void +atomic_store(atomic<_Tp>* __o, _Tp __d) noexcept { - __o->store(__d); + __o->store(__d); } // atomic_store_explicit template -_LIBCUDACXX_INLINE_VISIBILITY void atomic_store_explicit(volatile atomic<_Tp>* __o, _Tp __d, memory_order __m) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +void +atomic_store_explicit(volatile atomic<_Tp>* __o, _Tp __d, memory_order __m) noexcept _LIBCUDACXX_CHECK_STORE_MEMORY_ORDER(__m) { - __o->store(__d, __m); + __o->store(__d, __m); } template -_LIBCUDACXX_INLINE_VISIBILITY void atomic_store_explicit(atomic<_Tp>* __o, _Tp __d, memory_order __m) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +void +atomic_store_explicit(atomic<_Tp>* __o, _Tp __d, memory_order __m) noexcept _LIBCUDACXX_CHECK_STORE_MEMORY_ORDER(__m) { - __o->store(__d, __m); + __o->store(__d, __m); } // atomic_load template -_LIBCUDACXX_INLINE_VISIBILITY _Tp atomic_load(const volatile atomic<_Tp>* __o) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +_Tp +atomic_load(const volatile atomic<_Tp>* __o) noexcept { - return __o->load(); + return __o->load(); } template -_LIBCUDACXX_INLINE_VISIBILITY _Tp atomic_load(const atomic<_Tp>* __o) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +_Tp +atomic_load(const atomic<_Tp>* __o) noexcept { - return __o->load(); + return __o->load(); } // atomic_load_explicit template -_LIBCUDACXX_INLINE_VISIBILITY _Tp atomic_load_explicit(const volatile atomic<_Tp>* __o, memory_order __m) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +_Tp +atomic_load_explicit(const volatile atomic<_Tp>* __o, memory_order __m) noexcept _LIBCUDACXX_CHECK_LOAD_MEMORY_ORDER(__m) { - return __o->load(__m); + return __o->load(__m); } template -_LIBCUDACXX_INLINE_VISIBILITY _Tp atomic_load_explicit(const atomic<_Tp>* __o, memory_order __m) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +_Tp +atomic_load_explicit(const atomic<_Tp>* __o, memory_order __m) noexcept _LIBCUDACXX_CHECK_LOAD_MEMORY_ORDER(__m) { - return __o->load(__m); + return __o->load(__m); } // atomic_exchange template -_LIBCUDACXX_INLINE_VISIBILITY _Tp atomic_exchange(volatile atomic<_Tp>* __o, _Tp __d) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +_Tp +atomic_exchange(volatile atomic<_Tp>* __o, _Tp __d) noexcept { - return __o->exchange(__d); + return __o->exchange(__d); } template -_LIBCUDACXX_INLINE_VISIBILITY _Tp atomic_exchange(atomic<_Tp>* __o, _Tp __d) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +_Tp +atomic_exchange(atomic<_Tp>* __o, _Tp __d) noexcept { - return __o->exchange(__d); + return __o->exchange(__d); } // atomic_exchange_explicit template -_LIBCUDACXX_INLINE_VISIBILITY _Tp atomic_exchange_explicit(volatile atomic<_Tp>* __o, _Tp __d, memory_order __m) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +_Tp +atomic_exchange_explicit(volatile atomic<_Tp>* __o, _Tp __d, memory_order __m) noexcept { - return __o->exchange(__d, __m); + return __o->exchange(__d, __m); } template -_LIBCUDACXX_INLINE_VISIBILITY _Tp atomic_exchange_explicit(atomic<_Tp>* __o, _Tp __d, memory_order __m) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +_Tp +atomic_exchange_explicit(atomic<_Tp>* __o, _Tp __d, memory_order __m) noexcept { - return __o->exchange(__d, __m); + return __o->exchange(__d, __m); } // atomic_compare_exchange_weak template -_LIBCUDACXX_INLINE_VISIBILITY bool atomic_compare_exchange_weak(volatile atomic<_Tp>* __o, _Tp* __e, _Tp __d) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +bool +atomic_compare_exchange_weak(volatile atomic<_Tp>* __o, _Tp* __e, _Tp __d) noexcept { - return __o->compare_exchange_weak(*__e, __d); + return __o->compare_exchange_weak(*__e, __d); } template -_LIBCUDACXX_INLINE_VISIBILITY bool atomic_compare_exchange_weak(atomic<_Tp>* __o, _Tp* __e, _Tp __d) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +bool +atomic_compare_exchange_weak(atomic<_Tp>* __o, _Tp* __e, _Tp __d) noexcept { - return __o->compare_exchange_weak(*__e, __d); + return __o->compare_exchange_weak(*__e, __d); } // atomic_compare_exchange_strong template -_LIBCUDACXX_INLINE_VISIBILITY bool atomic_compare_exchange_strong(volatile atomic<_Tp>* __o, _Tp* __e, _Tp __d) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +bool +atomic_compare_exchange_strong(volatile atomic<_Tp>* __o, _Tp* __e, _Tp __d) noexcept { - return __o->compare_exchange_strong(*__e, __d); + return __o->compare_exchange_strong(*__e, __d); } template -_LIBCUDACXX_INLINE_VISIBILITY bool atomic_compare_exchange_strong(atomic<_Tp>* __o, _Tp* __e, _Tp __d) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +bool +atomic_compare_exchange_strong(atomic<_Tp>* __o, _Tp* __e, _Tp __d) noexcept { - return __o->compare_exchange_strong(*__e, __d); + return __o->compare_exchange_strong(*__e, __d); } // atomic_compare_exchange_weak_explicit template -_LIBCUDACXX_INLINE_VISIBILITY bool atomic_compare_exchange_weak_explicit( - volatile atomic<_Tp>* __o, _Tp* __e, _Tp __d, memory_order __s, memory_order __f) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +bool +atomic_compare_exchange_weak_explicit(volatile atomic<_Tp>* __o, _Tp* __e, + _Tp __d, + memory_order __s, memory_order __f) noexcept _LIBCUDACXX_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) { - return __o->compare_exchange_weak(*__e, __d, __s, __f); + return __o->compare_exchange_weak(*__e, __d, __s, __f); } template -_LIBCUDACXX_INLINE_VISIBILITY bool -atomic_compare_exchange_weak_explicit(atomic<_Tp>* __o, _Tp* __e, _Tp __d, memory_order __s, memory_order __f) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +bool +atomic_compare_exchange_weak_explicit(atomic<_Tp>* __o, _Tp* __e, _Tp __d, + memory_order __s, memory_order __f) noexcept _LIBCUDACXX_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) { - return __o->compare_exchange_weak(*__e, __d, __s, __f); + return __o->compare_exchange_weak(*__e, __d, __s, __f); } // atomic_compare_exchange_strong_explicit template -_LIBCUDACXX_INLINE_VISIBILITY bool atomic_compare_exchange_strong_explicit( - volatile atomic<_Tp>* __o, _Tp* __e, _Tp __d, memory_order __s, memory_order __f) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +bool +atomic_compare_exchange_strong_explicit(volatile atomic<_Tp>* __o, + _Tp* __e, _Tp __d, + memory_order __s, memory_order __f) noexcept _LIBCUDACXX_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) { - return __o->compare_exchange_strong(*__e, __d, __s, __f); + return __o->compare_exchange_strong(*__e, __d, __s, __f); } template -_LIBCUDACXX_INLINE_VISIBILITY bool atomic_compare_exchange_strong_explicit( - atomic<_Tp>* __o, _Tp* __e, _Tp __d, memory_order __s, memory_order __f) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +bool +atomic_compare_exchange_strong_explicit(atomic<_Tp>* __o, _Tp* __e, + _Tp __d, + memory_order __s, memory_order __f) noexcept _LIBCUDACXX_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) { - return __o->compare_exchange_strong(*__e, __d, __s, __f); + return __o->compare_exchange_strong(*__e, __d, __s, __f); } // atomic_wait template -_LIBCUDACXX_INLINE_VISIBILITY void -atomic_wait(const volatile atomic<_Tp>* __o, typename atomic<_Tp>::value_type __v) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +void atomic_wait(const volatile atomic<_Tp>* __o, + typename atomic<_Tp>::value_type __v) noexcept { - return __o->wait(__v); + return __o->wait(__v); } template -_LIBCUDACXX_INLINE_VISIBILITY void atomic_wait(const atomic<_Tp>* __o, typename atomic<_Tp>::value_type __v) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +void atomic_wait(const atomic<_Tp>* __o, + typename atomic<_Tp>::value_type __v) noexcept { - return __o->wait(__v); + return __o->wait(__v); } // atomic_wait_explicit template -_LIBCUDACXX_INLINE_VISIBILITY void -atomic_wait_explicit(const volatile atomic<_Tp>* __o, typename atomic<_Tp>::value_type __v, memory_order __m) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +void atomic_wait_explicit(const volatile atomic<_Tp>* __o, + typename atomic<_Tp>::value_type __v, + memory_order __m) noexcept _LIBCUDACXX_CHECK_LOAD_MEMORY_ORDER(__m) { - return __o->wait(__v, __m); + return __o->wait(__v, __m); } template -_LIBCUDACXX_INLINE_VISIBILITY void -atomic_wait_explicit(const atomic<_Tp>* __o, typename atomic<_Tp>::value_type __v, memory_order __m) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +void atomic_wait_explicit(const atomic<_Tp>* __o, + typename atomic<_Tp>::value_type __v, + memory_order __m) noexcept _LIBCUDACXX_CHECK_LOAD_MEMORY_ORDER(__m) { - return __o->wait(__v, __m); + return __o->wait(__v, __m); } // atomic_notify_one template -_LIBCUDACXX_INLINE_VISIBILITY void atomic_notify_one(volatile atomic<_Tp>* __o) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +void atomic_notify_one(volatile atomic<_Tp>* __o) noexcept { - __o->notify_one(); + __o->notify_one(); } template -_LIBCUDACXX_INLINE_VISIBILITY void atomic_notify_one(atomic<_Tp>* __o) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +void atomic_notify_one(atomic<_Tp>* __o) noexcept { - __o->notify_one(); + __o->notify_one(); } // atomic_notify_one template -_LIBCUDACXX_INLINE_VISIBILITY void atomic_notify_all(volatile atomic<_Tp>* __o) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +void atomic_notify_all(volatile atomic<_Tp>* __o) noexcept { - __o->notify_all(); + __o->notify_all(); } template -_LIBCUDACXX_INLINE_VISIBILITY void atomic_notify_all(atomic<_Tp>* __o) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +void atomic_notify_all(atomic<_Tp>* __o) noexcept { - __o->notify_all(); + __o->notify_all(); } // atomic_fetch_add template _LIBCUDACXX_INLINE_VISIBILITY - __enable_if_t<(is_integral<_Tp>::value && !is_same<_Tp, bool>::value) || is_floating_point<_Tp>::value, _Tp> - atomic_fetch_add(volatile atomic<_Tp>* __o, _Tp __op) noexcept +__enable_if_t +< + (is_integral<_Tp>::value && !is_same<_Tp, bool>::value) || is_floating_point<_Tp>::value, + _Tp +> +atomic_fetch_add(volatile atomic<_Tp>* __o, _Tp __op) noexcept { - return __o->fetch_add(__op); + return __o->fetch_add(__op); } template _LIBCUDACXX_INLINE_VISIBILITY - __enable_if_t<(is_integral<_Tp>::value && !is_same<_Tp, bool>::value) || is_floating_point<_Tp>::value, _Tp> - atomic_fetch_add(atomic<_Tp>* __o, _Tp __op) noexcept +__enable_if_t +< + (is_integral<_Tp>::value && !is_same<_Tp, bool>::value) || is_floating_point<_Tp>::value, + _Tp +> +atomic_fetch_add(atomic<_Tp>* __o, _Tp __op) noexcept { - return __o->fetch_add(__op); + return __o->fetch_add(__op); } template -_LIBCUDACXX_INLINE_VISIBILITY _Tp* atomic_fetch_add(volatile atomic<_Tp*>* __o, ptrdiff_t __op) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +_Tp* +atomic_fetch_add(volatile atomic<_Tp*>* __o, ptrdiff_t __op) noexcept { - return __o->fetch_add(__op); + return __o->fetch_add(__op); } template -_LIBCUDACXX_INLINE_VISIBILITY _Tp* atomic_fetch_add(atomic<_Tp*>* __o, ptrdiff_t __op) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +_Tp* +atomic_fetch_add(atomic<_Tp*>* __o, ptrdiff_t __op) noexcept { - return __o->fetch_add(__op); + return __o->fetch_add(__op); } // atomic_fetch_add_explicit template _LIBCUDACXX_INLINE_VISIBILITY - __enable_if_t<(is_integral<_Tp>::value && !is_same<_Tp, bool>::value) || is_floating_point<_Tp>::value, _Tp> - atomic_fetch_add_explicit(volatile atomic<_Tp>* __o, _Tp __op, memory_order __m) noexcept +__enable_if_t +< + (is_integral<_Tp>::value && !is_same<_Tp, bool>::value) || is_floating_point<_Tp>::value, + _Tp +> +atomic_fetch_add_explicit(volatile atomic<_Tp>* __o, _Tp __op, memory_order __m) noexcept { - return __o->fetch_add(__op, __m); + return __o->fetch_add(__op, __m); } template _LIBCUDACXX_INLINE_VISIBILITY - __enable_if_t<(is_integral<_Tp>::value && !is_same<_Tp, bool>::value) || is_floating_point<_Tp>::value, _Tp> - atomic_fetch_add_explicit(atomic<_Tp>* __o, _Tp __op, memory_order __m) noexcept +__enable_if_t +< + (is_integral<_Tp>::value && !is_same<_Tp, bool>::value) || is_floating_point<_Tp>::value, + _Tp +> +atomic_fetch_add_explicit(atomic<_Tp>* __o, _Tp __op, memory_order __m) noexcept { - return __o->fetch_add(__op, __m); + return __o->fetch_add(__op, __m); } template -_LIBCUDACXX_INLINE_VISIBILITY _Tp* -atomic_fetch_add_explicit(volatile atomic<_Tp*>* __o, ptrdiff_t __op, memory_order __m) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +_Tp* +atomic_fetch_add_explicit(volatile atomic<_Tp*>* __o, ptrdiff_t __op, + memory_order __m) noexcept { - return __o->fetch_add(__op, __m); + return __o->fetch_add(__op, __m); } template -_LIBCUDACXX_INLINE_VISIBILITY _Tp* +_LIBCUDACXX_INLINE_VISIBILITY +_Tp* atomic_fetch_add_explicit(atomic<_Tp*>* __o, ptrdiff_t __op, memory_order __m) noexcept { - return __o->fetch_add(__op, __m); + return __o->fetch_add(__op, __m); } // atomic_fetch_sub template _LIBCUDACXX_INLINE_VISIBILITY - __enable_if_t<(is_integral<_Tp>::value && !is_same<_Tp, bool>::value) || is_floating_point<_Tp>::value, _Tp> - atomic_fetch_sub(volatile atomic<_Tp>* __o, _Tp __op) noexcept +__enable_if_t +< + (is_integral<_Tp>::value && !is_same<_Tp, bool>::value) || is_floating_point<_Tp>::value, + _Tp +> +atomic_fetch_sub(volatile atomic<_Tp>* __o, _Tp __op) noexcept { - return __o->fetch_sub(__op); + return __o->fetch_sub(__op); } template _LIBCUDACXX_INLINE_VISIBILITY - __enable_if_t<(is_integral<_Tp>::value && !is_same<_Tp, bool>::value) || is_floating_point<_Tp>::value, _Tp> - atomic_fetch_sub(atomic<_Tp>* __o, _Tp __op) noexcept +__enable_if_t +< + (is_integral<_Tp>::value && !is_same<_Tp, bool>::value) || is_floating_point<_Tp>::value, + _Tp +> +atomic_fetch_sub(atomic<_Tp>* __o, _Tp __op) noexcept { - return __o->fetch_sub(__op); + return __o->fetch_sub(__op); } template -_LIBCUDACXX_INLINE_VISIBILITY _Tp* atomic_fetch_sub(volatile atomic<_Tp*>* __o, ptrdiff_t __op) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +_Tp* +atomic_fetch_sub(volatile atomic<_Tp*>* __o, ptrdiff_t __op) noexcept { - return __o->fetch_sub(__op); + return __o->fetch_sub(__op); } template -_LIBCUDACXX_INLINE_VISIBILITY _Tp* atomic_fetch_sub(atomic<_Tp*>* __o, ptrdiff_t __op) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +_Tp* +atomic_fetch_sub(atomic<_Tp*>* __o, ptrdiff_t __op) noexcept { - return __o->fetch_sub(__op); + return __o->fetch_sub(__op); } // atomic_fetch_sub_explicit template _LIBCUDACXX_INLINE_VISIBILITY - __enable_if_t<(is_integral<_Tp>::value && !is_same<_Tp, bool>::value) || is_floating_point<_Tp>::value, _Tp> - atomic_fetch_sub_explicit(volatile atomic<_Tp>* __o, _Tp __op, memory_order __m) noexcept +__enable_if_t +< + (is_integral<_Tp>::value && !is_same<_Tp, bool>::value) || is_floating_point<_Tp>::value, + _Tp +> +atomic_fetch_sub_explicit(volatile atomic<_Tp>* __o, _Tp __op, memory_order __m) noexcept { - return __o->fetch_sub(__op, __m); + return __o->fetch_sub(__op, __m); } template _LIBCUDACXX_INLINE_VISIBILITY - __enable_if_t<(is_integral<_Tp>::value && !is_same<_Tp, bool>::value) || is_floating_point<_Tp>::value, _Tp> - atomic_fetch_sub_explicit(atomic<_Tp>* __o, _Tp __op, memory_order __m) noexcept +__enable_if_t +< + (is_integral<_Tp>::value && !is_same<_Tp, bool>::value) || is_floating_point<_Tp>::value, + _Tp +> +atomic_fetch_sub_explicit(atomic<_Tp>* __o, _Tp __op, memory_order __m) noexcept { - return __o->fetch_sub(__op, __m); + return __o->fetch_sub(__op, __m); } template -_LIBCUDACXX_INLINE_VISIBILITY _Tp* -atomic_fetch_sub_explicit(volatile atomic<_Tp*>* __o, ptrdiff_t __op, memory_order __m) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +_Tp* +atomic_fetch_sub_explicit(volatile atomic<_Tp*>* __o, ptrdiff_t __op, + memory_order __m) noexcept { - return __o->fetch_sub(__op, __m); + return __o->fetch_sub(__op, __m); } template -_LIBCUDACXX_INLINE_VISIBILITY _Tp* +_LIBCUDACXX_INLINE_VISIBILITY +_Tp* atomic_fetch_sub_explicit(atomic<_Tp*>* __o, ptrdiff_t __op, memory_order __m) noexcept { - return __o->fetch_sub(__op, __m); + return __o->fetch_sub(__op, __m); } // atomic_fetch_and template -_LIBCUDACXX_INLINE_VISIBILITY __enable_if_t::value && !is_same<_Tp, bool>::value, _Tp> +_LIBCUDACXX_INLINE_VISIBILITY +__enable_if_t +< + is_integral<_Tp>::value && !is_same<_Tp, bool>::value, + _Tp +> atomic_fetch_and(volatile atomic<_Tp>* __o, _Tp __op) noexcept { - return __o->fetch_and(__op); + return __o->fetch_and(__op); } template -_LIBCUDACXX_INLINE_VISIBILITY __enable_if_t::value && !is_same<_Tp, bool>::value, _Tp> +_LIBCUDACXX_INLINE_VISIBILITY +__enable_if_t +< + is_integral<_Tp>::value && !is_same<_Tp, bool>::value, + _Tp +> atomic_fetch_and(atomic<_Tp>* __o, _Tp __op) noexcept { - return __o->fetch_and(__op); + return __o->fetch_and(__op); } // atomic_fetch_and_explicit template -_LIBCUDACXX_INLINE_VISIBILITY __enable_if_t::value && !is_same<_Tp, bool>::value, _Tp> +_LIBCUDACXX_INLINE_VISIBILITY +__enable_if_t +< + is_integral<_Tp>::value && !is_same<_Tp, bool>::value, + _Tp +> atomic_fetch_and_explicit(volatile atomic<_Tp>* __o, _Tp __op, memory_order __m) noexcept { - return __o->fetch_and(__op, __m); + return __o->fetch_and(__op, __m); } template -_LIBCUDACXX_INLINE_VISIBILITY __enable_if_t::value && !is_same<_Tp, bool>::value, _Tp> +_LIBCUDACXX_INLINE_VISIBILITY +__enable_if_t +< + is_integral<_Tp>::value && !is_same<_Tp, bool>::value, + _Tp +> atomic_fetch_and_explicit(atomic<_Tp>* __o, _Tp __op, memory_order __m) noexcept { - return __o->fetch_and(__op, __m); + return __o->fetch_and(__op, __m); } // atomic_fetch_or template -_LIBCUDACXX_INLINE_VISIBILITY __enable_if_t::value && !is_same<_Tp, bool>::value, _Tp> +_LIBCUDACXX_INLINE_VISIBILITY +__enable_if_t +< + is_integral<_Tp>::value && !is_same<_Tp, bool>::value, + _Tp +> atomic_fetch_or(volatile atomic<_Tp>* __o, _Tp __op) noexcept { - return __o->fetch_or(__op); + return __o->fetch_or(__op); } template -_LIBCUDACXX_INLINE_VISIBILITY __enable_if_t::value && !is_same<_Tp, bool>::value, _Tp> +_LIBCUDACXX_INLINE_VISIBILITY +__enable_if_t +< + is_integral<_Tp>::value && !is_same<_Tp, bool>::value, + _Tp +> atomic_fetch_or(atomic<_Tp>* __o, _Tp __op) noexcept { - return __o->fetch_or(__op); + return __o->fetch_or(__op); } // atomic_fetch_or_explicit template -_LIBCUDACXX_INLINE_VISIBILITY __enable_if_t::value && !is_same<_Tp, bool>::value, _Tp> +_LIBCUDACXX_INLINE_VISIBILITY +__enable_if_t +< + is_integral<_Tp>::value && !is_same<_Tp, bool>::value, + _Tp +> atomic_fetch_or_explicit(volatile atomic<_Tp>* __o, _Tp __op, memory_order __m) noexcept { - return __o->fetch_or(__op, __m); + return __o->fetch_or(__op, __m); } template -_LIBCUDACXX_INLINE_VISIBILITY __enable_if_t::value && !is_same<_Tp, bool>::value, _Tp> +_LIBCUDACXX_INLINE_VISIBILITY +__enable_if_t +< + is_integral<_Tp>::value && !is_same<_Tp, bool>::value, + _Tp +> atomic_fetch_or_explicit(atomic<_Tp>* __o, _Tp __op, memory_order __m) noexcept { - return __o->fetch_or(__op, __m); + return __o->fetch_or(__op, __m); } // atomic_fetch_xor template -_LIBCUDACXX_INLINE_VISIBILITY __enable_if_t::value && !is_same<_Tp, bool>::value, _Tp> +_LIBCUDACXX_INLINE_VISIBILITY +__enable_if_t +< + is_integral<_Tp>::value && !is_same<_Tp, bool>::value, + _Tp +> atomic_fetch_xor(volatile atomic<_Tp>* __o, _Tp __op) noexcept { - return __o->fetch_xor(__op); + return __o->fetch_xor(__op); } template -_LIBCUDACXX_INLINE_VISIBILITY __enable_if_t::value && !is_same<_Tp, bool>::value, _Tp> +_LIBCUDACXX_INLINE_VISIBILITY +__enable_if_t +< + is_integral<_Tp>::value && !is_same<_Tp, bool>::value, + _Tp +> atomic_fetch_xor(atomic<_Tp>* __o, _Tp __op) noexcept { - return __o->fetch_xor(__op); + return __o->fetch_xor(__op); } // atomic_fetch_xor_explicit template -_LIBCUDACXX_INLINE_VISIBILITY __enable_if_t::value && !is_same<_Tp, bool>::value, _Tp> +_LIBCUDACXX_INLINE_VISIBILITY +__enable_if_t +< + is_integral<_Tp>::value && !is_same<_Tp, bool>::value, + _Tp +> atomic_fetch_xor_explicit(volatile atomic<_Tp>* __o, _Tp __op, memory_order __m) noexcept { - return __o->fetch_xor(__op, __m); + return __o->fetch_xor(__op, __m); } template -_LIBCUDACXX_INLINE_VISIBILITY __enable_if_t::value && !is_same<_Tp, bool>::value, _Tp> +_LIBCUDACXX_INLINE_VISIBILITY +__enable_if_t +< + is_integral<_Tp>::value && !is_same<_Tp, bool>::value, + _Tp +> atomic_fetch_xor_explicit(atomic<_Tp>* __o, _Tp __op, memory_order __m) noexcept { - return __o->fetch_xor(__op, __m); + return __o->fetch_xor(__op, __m); } // flag type and operations typedef struct atomic_flag { - __cxx_atomic_impl<_LIBCUDACXX_ATOMIC_FLAG_TYPE, 0> __a_; - - _LIBCUDACXX_INLINE_VISIBILITY bool test(memory_order __m = memory_order_seq_cst) const volatile noexcept - { - return _LIBCUDACXX_ATOMIC_FLAG_TYPE(true) == __cxx_atomic_load(&__a_, __m); - } - _LIBCUDACXX_INLINE_VISIBILITY bool test(memory_order __m = memory_order_seq_cst) const noexcept - { - return _LIBCUDACXX_ATOMIC_FLAG_TYPE(true) == __cxx_atomic_load(&__a_, __m); - } - - _LIBCUDACXX_INLINE_VISIBILITY bool test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept - { - return __cxx_atomic_exchange(&__a_, _LIBCUDACXX_ATOMIC_FLAG_TYPE(true), __m); - } - _LIBCUDACXX_INLINE_VISIBILITY bool test_and_set(memory_order __m = memory_order_seq_cst) noexcept - { - return __cxx_atomic_exchange(&__a_, _LIBCUDACXX_ATOMIC_FLAG_TYPE(true), __m); - } - _LIBCUDACXX_INLINE_VISIBILITY void clear(memory_order __m = memory_order_seq_cst) volatile noexcept - { - __cxx_atomic_store(&__a_, _LIBCUDACXX_ATOMIC_FLAG_TYPE(false), __m); - } - _LIBCUDACXX_INLINE_VISIBILITY void clear(memory_order __m = memory_order_seq_cst) noexcept - { - __cxx_atomic_store(&__a_, _LIBCUDACXX_ATOMIC_FLAG_TYPE(false), __m); - } + __cxx_atomic_impl<_LIBCUDACXX_ATOMIC_FLAG_TYPE, 0> __a_; + + _LIBCUDACXX_INLINE_VISIBILITY + bool test(memory_order __m = memory_order_seq_cst) const volatile noexcept + {return _LIBCUDACXX_ATOMIC_FLAG_TYPE(true)==__cxx_atomic_load(&__a_, __m);} + _LIBCUDACXX_INLINE_VISIBILITY + bool test(memory_order __m = memory_order_seq_cst) const noexcept + {return _LIBCUDACXX_ATOMIC_FLAG_TYPE(true)==__cxx_atomic_load(&__a_, __m);} + + _LIBCUDACXX_INLINE_VISIBILITY + bool test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept + {return __cxx_atomic_exchange(&__a_, _LIBCUDACXX_ATOMIC_FLAG_TYPE(true), __m);} + _LIBCUDACXX_INLINE_VISIBILITY + bool test_and_set(memory_order __m = memory_order_seq_cst) noexcept + {return __cxx_atomic_exchange(&__a_, _LIBCUDACXX_ATOMIC_FLAG_TYPE(true), __m);} + _LIBCUDACXX_INLINE_VISIBILITY + void clear(memory_order __m = memory_order_seq_cst) volatile noexcept + {__cxx_atomic_store(&__a_, _LIBCUDACXX_ATOMIC_FLAG_TYPE(false), __m);} + _LIBCUDACXX_INLINE_VISIBILITY + void clear(memory_order __m = memory_order_seq_cst) noexcept + {__cxx_atomic_store(&__a_, _LIBCUDACXX_ATOMIC_FLAG_TYPE(false), __m);} #if !defined(__CUDA_MINIMUM_ARCH__) || __CUDA_MINIMUM_ARCH__ >= 700 - _LIBCUDACXX_INLINE_VISIBILITY void wait(bool __v, memory_order __m = memory_order_seq_cst) const volatile noexcept - { - __cxx_atomic_wait(&__a_, _LIBCUDACXX_ATOMIC_FLAG_TYPE(__v), __m); - } - _LIBCUDACXX_INLINE_VISIBILITY void wait(bool __v, memory_order __m = memory_order_seq_cst) const noexcept - { - __cxx_atomic_wait(&__a_, _LIBCUDACXX_ATOMIC_FLAG_TYPE(__v), __m); - } - _LIBCUDACXX_INLINE_VISIBILITY void notify_one() volatile noexcept - { - __cxx_atomic_notify_one(&__a_); - } - _LIBCUDACXX_INLINE_VISIBILITY void notify_one() noexcept - { - __cxx_atomic_notify_one(&__a_); - } - _LIBCUDACXX_INLINE_VISIBILITY void notify_all() volatile noexcept - { - __cxx_atomic_notify_all(&__a_); - } - _LIBCUDACXX_INLINE_VISIBILITY void notify_all() noexcept - { - __cxx_atomic_notify_all(&__a_); - } + _LIBCUDACXX_INLINE_VISIBILITY + void wait(bool __v, memory_order __m = memory_order_seq_cst) const volatile noexcept + {__cxx_atomic_wait(&__a_, _LIBCUDACXX_ATOMIC_FLAG_TYPE(__v), __m);} + _LIBCUDACXX_INLINE_VISIBILITY + void wait(bool __v, memory_order __m = memory_order_seq_cst) const noexcept + {__cxx_atomic_wait(&__a_, _LIBCUDACXX_ATOMIC_FLAG_TYPE(__v), __m);} + _LIBCUDACXX_INLINE_VISIBILITY + void notify_one() volatile noexcept + {__cxx_atomic_notify_one(&__a_);} + _LIBCUDACXX_INLINE_VISIBILITY + void notify_one() noexcept + {__cxx_atomic_notify_one(&__a_);} + _LIBCUDACXX_INLINE_VISIBILITY + void notify_all() volatile noexcept + {__cxx_atomic_notify_all(&__a_);} + _LIBCUDACXX_INLINE_VISIBILITY + void notify_all() noexcept + {__cxx_atomic_notify_all(&__a_);} #endif - atomic_flag() noexcept = default; + atomic_flag() noexcept = default; - _LIBCUDACXX_INLINE_VISIBILITY constexpr atomic_flag(bool __b) noexcept - : __a_(__b) - {} // EXTENSION + _LIBCUDACXX_INLINE_VISIBILITY constexpr + atomic_flag(bool __b) noexcept : __a_(__b) {} // EXTENSION - atomic_flag(const atomic_flag&) = delete; - atomic_flag& operator=(const atomic_flag&) = delete; - atomic_flag& operator=(const atomic_flag&) volatile = delete; + atomic_flag(const atomic_flag&) = delete; + atomic_flag& operator=(const atomic_flag&) = delete; + atomic_flag& operator=(const atomic_flag&) volatile = delete; } atomic_flag; -inline _LIBCUDACXX_INLINE_VISIBILITY bool atomic_flag_test(const volatile atomic_flag* __o) noexcept + +inline _LIBCUDACXX_INLINE_VISIBILITY +bool +atomic_flag_test(const volatile atomic_flag* __o) noexcept { - return __o->test(); + return __o->test(); } -inline _LIBCUDACXX_INLINE_VISIBILITY bool atomic_flag_test(const atomic_flag* __o) noexcept +inline _LIBCUDACXX_INLINE_VISIBILITY +bool +atomic_flag_test(const atomic_flag* __o) noexcept { - return __o->test(); + return __o->test(); } -inline _LIBCUDACXX_INLINE_VISIBILITY bool +inline _LIBCUDACXX_INLINE_VISIBILITY +bool atomic_flag_test_explicit(const volatile atomic_flag* __o, memory_order __m) noexcept { - return __o->test(__m); + return __o->test(__m); } -inline _LIBCUDACXX_INLINE_VISIBILITY bool atomic_flag_test_explicit(const atomic_flag* __o, memory_order __m) noexcept +inline _LIBCUDACXX_INLINE_VISIBILITY +bool +atomic_flag_test_explicit(const atomic_flag* __o, memory_order __m) noexcept { - return __o->test(__m); + return __o->test(__m); } -inline _LIBCUDACXX_INLINE_VISIBILITY bool atomic_flag_test_and_set(volatile atomic_flag* __o) noexcept +inline _LIBCUDACXX_INLINE_VISIBILITY +bool +atomic_flag_test_and_set(volatile atomic_flag* __o) noexcept { - return __o->test_and_set(); + return __o->test_and_set(); } -inline _LIBCUDACXX_INLINE_VISIBILITY bool atomic_flag_test_and_set(atomic_flag* __o) noexcept +inline _LIBCUDACXX_INLINE_VISIBILITY +bool +atomic_flag_test_and_set(atomic_flag* __o) noexcept { - return __o->test_and_set(); + return __o->test_and_set(); } -inline _LIBCUDACXX_INLINE_VISIBILITY bool +inline _LIBCUDACXX_INLINE_VISIBILITY +bool atomic_flag_test_and_set_explicit(volatile atomic_flag* __o, memory_order __m) noexcept { - return __o->test_and_set(__m); + return __o->test_and_set(__m); } -inline _LIBCUDACXX_INLINE_VISIBILITY bool atomic_flag_test_and_set_explicit(atomic_flag* __o, memory_order __m) noexcept +inline _LIBCUDACXX_INLINE_VISIBILITY +bool +atomic_flag_test_and_set_explicit(atomic_flag* __o, memory_order __m) noexcept { - return __o->test_and_set(__m); + return __o->test_and_set(__m); } -inline _LIBCUDACXX_INLINE_VISIBILITY void atomic_flag_clear(volatile atomic_flag* __o) noexcept +inline _LIBCUDACXX_INLINE_VISIBILITY +void +atomic_flag_clear(volatile atomic_flag* __o) noexcept { - __o->clear(); + __o->clear(); } -inline _LIBCUDACXX_INLINE_VISIBILITY void atomic_flag_clear(atomic_flag* __o) noexcept +inline _LIBCUDACXX_INLINE_VISIBILITY +void +atomic_flag_clear(atomic_flag* __o) noexcept { - __o->clear(); + __o->clear(); } -inline _LIBCUDACXX_INLINE_VISIBILITY void +inline _LIBCUDACXX_INLINE_VISIBILITY +void atomic_flag_clear_explicit(volatile atomic_flag* __o, memory_order __m) noexcept { - __o->clear(__m); + __o->clear(__m); } -inline _LIBCUDACXX_INLINE_VISIBILITY void atomic_flag_clear_explicit(atomic_flag* __o, memory_order __m) noexcept +inline _LIBCUDACXX_INLINE_VISIBILITY +void +atomic_flag_clear_explicit(atomic_flag* __o, memory_order __m) noexcept { - __o->clear(__m); + __o->clear(__m); } #if !defined(__CUDA_MINIMUM_ARCH__) || __CUDA_MINIMUM_ARCH__ >= 700 -inline _LIBCUDACXX_INLINE_VISIBILITY void atomic_flag_wait(const volatile atomic_flag* __o, bool __v) noexcept +inline _LIBCUDACXX_INLINE_VISIBILITY +void +atomic_flag_wait(const volatile atomic_flag* __o, bool __v) noexcept { - __o->wait(__v); + __o->wait(__v); } -inline _LIBCUDACXX_INLINE_VISIBILITY void atomic_flag_wait(const atomic_flag* __o, bool __v) noexcept +inline _LIBCUDACXX_INLINE_VISIBILITY +void +atomic_flag_wait(const atomic_flag* __o, bool __v) noexcept { - __o->wait(__v); + __o->wait(__v); } -inline _LIBCUDACXX_INLINE_VISIBILITY void -atomic_flag_wait_explicit(const volatile atomic_flag* __o, bool __v, memory_order __m) noexcept +inline _LIBCUDACXX_INLINE_VISIBILITY +void +atomic_flag_wait_explicit(const volatile atomic_flag* __o, + bool __v, memory_order __m) noexcept { - __o->wait(__v, __m); + __o->wait(__v, __m); } -inline _LIBCUDACXX_INLINE_VISIBILITY void -atomic_flag_wait_explicit(const atomic_flag* __o, bool __v, memory_order __m) noexcept +inline _LIBCUDACXX_INLINE_VISIBILITY +void +atomic_flag_wait_explicit(const atomic_flag* __o, + bool __v, memory_order __m) noexcept { - __o->wait(__v, __m); + __o->wait(__v, __m); } -inline _LIBCUDACXX_INLINE_VISIBILITY void atomic_flag_notify_one(volatile atomic_flag* __o) noexcept +inline _LIBCUDACXX_INLINE_VISIBILITY +void +atomic_flag_notify_one(volatile atomic_flag* __o) noexcept { - __o->notify_one(); + __o->notify_one(); } -inline _LIBCUDACXX_INLINE_VISIBILITY void atomic_flag_notify_one(atomic_flag* __o) noexcept +inline _LIBCUDACXX_INLINE_VISIBILITY +void +atomic_flag_notify_one(atomic_flag* __o) noexcept { - __o->notify_one(); + __o->notify_one(); } -inline _LIBCUDACXX_INLINE_VISIBILITY void atomic_flag_notify_all(volatile atomic_flag* __o) noexcept +inline _LIBCUDACXX_INLINE_VISIBILITY +void +atomic_flag_notify_all(volatile atomic_flag* __o) noexcept { - __o->notify_all(); + __o->notify_all(); } -inline _LIBCUDACXX_INLINE_VISIBILITY void atomic_flag_notify_all(atomic_flag* __o) noexcept +inline _LIBCUDACXX_INLINE_VISIBILITY +void +atomic_flag_notify_all(atomic_flag* __o) noexcept { - __o->notify_all(); + __o->notify_all(); } #endif // fences -inline _LIBCUDACXX_INLINE_VISIBILITY void atomic_thread_fence(memory_order __m) noexcept +inline _LIBCUDACXX_INLINE_VISIBILITY +void +atomic_thread_fence(memory_order __m) noexcept { - __cxx_atomic_thread_fence(__m); + __cxx_atomic_thread_fence(__m); } -inline _LIBCUDACXX_INLINE_VISIBILITY void atomic_signal_fence(memory_order __m) noexcept +inline _LIBCUDACXX_INLINE_VISIBILITY +void +atomic_signal_fence(memory_order __m) noexcept { - __cxx_atomic_signal_fence(__m); + __cxx_atomic_signal_fence(__m); } // Atomics for standard typedef types -typedef atomic atomic_bool; -typedef atomic atomic_char; -typedef atomic atomic_schar; -typedef atomic atomic_uchar; -typedef atomic atomic_short; -typedef atomic atomic_ushort; -typedef atomic atomic_int; -typedef atomic atomic_uint; -typedef atomic atomic_long; -typedef atomic atomic_ulong; -typedef atomic atomic_llong; +typedef atomic atomic_bool; +typedef atomic atomic_char; +typedef atomic atomic_schar; +typedef atomic atomic_uchar; +typedef atomic atomic_short; +typedef atomic atomic_ushort; +typedef atomic atomic_int; +typedef atomic atomic_uint; +typedef atomic atomic_long; +typedef atomic atomic_ulong; +typedef atomic atomic_llong; typedef atomic atomic_ullong; -typedef atomic atomic_char16_t; -typedef atomic atomic_char32_t; -typedef atomic atomic_wchar_t; +typedef atomic atomic_char16_t; +typedef atomic atomic_char32_t; +typedef atomic atomic_wchar_t; -typedef atomic atomic_int_least8_t; -typedef atomic atomic_uint_least8_t; -typedef atomic atomic_int_least16_t; +typedef atomic atomic_int_least8_t; +typedef atomic atomic_uint_least8_t; +typedef atomic atomic_int_least16_t; typedef atomic atomic_uint_least16_t; -typedef atomic atomic_int_least32_t; +typedef atomic atomic_int_least32_t; typedef atomic atomic_uint_least32_t; -typedef atomic atomic_int_least64_t; +typedef atomic atomic_int_least64_t; typedef atomic atomic_uint_least64_t; -typedef atomic atomic_int_fast8_t; -typedef atomic atomic_uint_fast8_t; -typedef atomic atomic_int_fast16_t; +typedef atomic atomic_int_fast8_t; +typedef atomic atomic_uint_fast8_t; +typedef atomic atomic_int_fast16_t; typedef atomic atomic_uint_fast16_t; -typedef atomic atomic_int_fast32_t; +typedef atomic atomic_int_fast32_t; typedef atomic atomic_uint_fast32_t; -typedef atomic atomic_int_fast64_t; +typedef atomic atomic_int_fast64_t; typedef atomic atomic_uint_fast64_t; -typedef atomic atomic_int8_t; -typedef atomic atomic_uint8_t; -typedef atomic atomic_int16_t; +typedef atomic< int8_t> atomic_int8_t; +typedef atomic atomic_uint8_t; +typedef atomic< int16_t> atomic_int16_t; typedef atomic atomic_uint16_t; -typedef atomic atomic_int32_t; +typedef atomic< int32_t> atomic_int32_t; typedef atomic atomic_uint32_t; -typedef atomic atomic_int64_t; +typedef atomic< int64_t> atomic_int64_t; typedef atomic atomic_uint64_t; -typedef atomic atomic_intptr_t; +typedef atomic atomic_intptr_t; typedef atomic atomic_uintptr_t; -typedef atomic atomic_size_t; +typedef atomic atomic_size_t; typedef atomic atomic_ptrdiff_t; -typedef atomic atomic_intmax_t; +typedef atomic atomic_intmax_t; typedef atomic atomic_uintmax_t; static_assert(ATOMIC_INT_LOCK_FREE, "This library assumes atomic is lock-free."); -typedef atomic atomic_signed_lock_free; -typedef atomic atomic_unsigned_lock_free; +typedef atomic atomic_signed_lock_free; +typedef atomic atomic_unsigned_lock_free; -#define ATOMIC_FLAG_INIT \ - { \ - false \ - } -#define ATOMIC_VAR_INIT(__v) \ - { \ - __v \ - } +#define ATOMIC_FLAG_INIT {false} +#define ATOMIC_VAR_INIT(__v) {__v} _LIBCUDACXX_END_NAMESPACE_STD #include #include -#endif // _LIBCUDACXX_ATOMIC +#endif // _LIBCUDACXX_ATOMIC diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/barrier b/libcudacxx/include/cuda/std/detail/libcxx/include/barrier index a2a9b8f3516..4127fe75266 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/barrier +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/barrier @@ -53,404 +53,416 @@ namespace std # pragma system_header #endif // no system header +#include // all public C++ headers provide the assertion handler +#include +#include #include #include #include -#include // all public C++ headers provide the assertion handler -#include + #include -#include #ifdef _LIBCUDACXX_HAS_NO_THREADS -# error is not supported on this single threaded system +# error is not supported on this single threaded system #endif _LIBCUDACXX_BEGIN_NAMESPACE_STD struct __empty_completion { - inline _LIBCUDACXX_INLINE_VISIBILITY void operator()() noexcept {} + inline _LIBCUDACXX_INLINE_VISIBILITY + void operator()() noexcept { } }; #ifndef _LIBCUDACXX_HAS_NO_TREE_BARRIER -template -class alignas(64) __barrier_base -{ - ptrdiff_t __expected; - __atomic_base __expected_adjustment; - _CompletionF __completion; +template +class alignas(64) __barrier_base { - using __phase_t = uint8_t; - __atomic_base<__phase_t, _Sco> __phase; + ptrdiff_t __expected; + __atomic_base __expected_adjustment; + _CompletionF __completion; - struct alignas(64) __state_t - { - struct - { - __atomic_base<__phase_t, _Sco> __phase = ATOMIC_VAR_INIT(0); - } __tickets[64]; - }; - ::std::vector<__state_t> __state; + using __phase_t = uint8_t; + __atomic_base<__phase_t, _Sco> __phase; - inline _LIBCUDACXX_INLINE_VISIBILITY bool __arrive(__phase_t const __old_phase) - { - __phase_t const __half_step = __old_phase + 1, __full_step = __old_phase + 2; -# ifndef _LIBCUDACXX_HAS_NO_THREAD_FAVORITE_BARRIER_INDEX - ptrdiff_t __current = __libcpp_thread_favorite_barrier_index, -# else - ptrdiff_t __current = 0, -# endif - __current_expected = __expected, __last_node = (__current_expected >> 1); - for (size_t __round = 0;; ++__round) + struct alignas(64) __state_t { - _LIBCUDACXX_ASSERT(__round <= 63, ""); - if (__current_expected == 1) - { - return true; - } - for (;; ++__current) - { -# ifndef _LIBCUDACXX_HAS_NO_THREAD_FAVORITE_BARRIER_INDEX - if (0 == __round) - { - if (__current >= __current_expected) - { - __current = 0; - } - __libcpp_thread_favorite_barrier_index = __current; - } -# endif - _LIBCUDACXX_ASSERT(__current <= __last_node, ""); - __phase_t expect = __old_phase; - if (__current == __last_node && (__current_expected & 1)) - { - if (__state[__current].__tickets[__round].__phase.compare_exchange_strong( - expect, __full_step, memory_order_acq_rel)) - { - break; // I'm 1 in 1, go to next __round - } - _LIBCUDACXX_ASSERT(expect == __full_step, ""); - } - else if (__state[__current].__tickets[__round].__phase.compare_exchange_strong( - expect, __half_step, memory_order_acq_rel)) - { - return false; // I'm 1 in 2, done with arrival - } - else if (expect == __half_step) - { - if (__state[__current].__tickets[__round].__phase.compare_exchange_strong( - expect, __full_step, memory_order_acq_rel)) - { - break; // I'm 2 in 2, go to next __round - } - _LIBCUDACXX_ASSERT(expect == __full_step, ""); + struct { + __atomic_base<__phase_t, _Sco> __phase = ATOMIC_VAR_INIT(0); + } __tickets[64]; + }; + ::std::vector<__state_t> __state; + + inline _LIBCUDACXX_INLINE_VISIBILITY + bool __arrive(__phase_t const __old_phase) + { + __phase_t const __half_step = __old_phase + 1, __full_step = __old_phase + 2; +#ifndef _LIBCUDACXX_HAS_NO_THREAD_FAVORITE_BARRIER_INDEX + ptrdiff_t __current = __libcpp_thread_favorite_barrier_index, +#else + ptrdiff_t __current = 0, +#endif + __current_expected = __expected, + __last_node = (__current_expected >> 1); + for(size_t __round = 0;; ++__round) { + _LIBCUDACXX_ASSERT(__round <= 63, ""); + if(__current_expected == 1) + return true; + for(;;++__current) { +#ifndef _LIBCUDACXX_HAS_NO_THREAD_FAVORITE_BARRIER_INDEX + if(0 == __round) { + if(__current >= __current_expected) + __current = 0; + __libcpp_thread_favorite_barrier_index = __current; + } +#endif + _LIBCUDACXX_ASSERT(__current <= __last_node, ""); + __phase_t expect = __old_phase; + if(__current == __last_node && (__current_expected & 1)) + { + if(__state[__current].__tickets[__round].__phase.compare_exchange_strong(expect, __full_step, memory_order_acq_rel)) + break; // I'm 1 in 1, go to next __round + _LIBCUDACXX_ASSERT(expect == __full_step, ""); + } + else if(__state[__current].__tickets[__round].__phase.compare_exchange_strong(expect, __half_step, memory_order_acq_rel)) + { + return false; // I'm 1 in 2, done with arrival + } + else if(expect == __half_step) + { + if(__state[__current].__tickets[__round].__phase.compare_exchange_strong(expect, __full_step, memory_order_acq_rel)) + break; // I'm 2 in 2, go to next __round + _LIBCUDACXX_ASSERT(expect == __full_step, ""); + } + _LIBCUDACXX_ASSERT(__round == 0 && expect == __full_step, ""); + } + __current_expected = (__current_expected >> 1) + (__current_expected & 1); + __current &= ~( 1 << __round ); + __last_node &= ~( 1 << __round ); } - _LIBCUDACXX_ASSERT(__round == 0 && expect == __full_step, ""); - } - __current_expected = (__current_expected >> 1) + (__current_expected & 1); - __current &= ~(1 << __round); - __last_node &= ~(1 << __round); } - } public: - using arrival_token = __phase_t; - - inline _LIBCUDACXX_INLINE_VISIBILITY __barrier_base(ptrdiff_t __expected, _CompletionF __completion = _CompletionF()) - : __expected(__expected) - , __expected_adjustment(0) - , __completion(__completion) - , __phase(0) - , __state((__expected + 1) >> 1) - { - _LIBCUDACXX_ASSERT(__expected >= 0, ""); - } + using arrival_token = __phase_t; - inline _LIBCUDACXX_INLINE_VISIBILITY ~__barrier_base() = default; + inline _LIBCUDACXX_INLINE_VISIBILITY + __barrier_base(ptrdiff_t __expected, _CompletionF __completion = _CompletionF()) + : __expected(__expected), __expected_adjustment(0), __completion(__completion), + __phase(0), __state((__expected+1) >> 1) + { + _LIBCUDACXX_ASSERT(__expected >= 0, ""); + } - __barrier_base(__barrier_base const&) = delete; - __barrier_base& operator=(__barrier_base const&) = delete; + inline _LIBCUDACXX_INLINE_VISIBILITY + ~__barrier_base() = default; - _CCCL_NODISCARD inline _LIBCUDACXX_INLINE_VISIBILITY arrival_token arrive(ptrdiff_t update = 1) - { - _LIBCUDACXX_ASSERT(update > 0, ""); - auto __old_phase = __phase.load(memory_order_relaxed); - for (; update; --update) + __barrier_base(__barrier_base const&) = delete; + __barrier_base& operator=(__barrier_base const&) = delete; + + _CCCL_NODISCARD inline _LIBCUDACXX_INLINE_VISIBILITY + arrival_token arrive(ptrdiff_t update = 1) { - if (__arrive(__old_phase)) - { - __completion(); - __expected += __expected_adjustment.load(memory_order_relaxed); - __expected_adjustment.store(0, memory_order_relaxed); - __phase.store(__old_phase + 2, memory_order_release); - } + _LIBCUDACXX_ASSERT(update > 0, ""); + auto __old_phase = __phase.load(memory_order_relaxed); + for(; update; --update) + if(__arrive(__old_phase)) { + __completion(); + __expected += __expected_adjustment.load(memory_order_relaxed); + __expected_adjustment.store(0, memory_order_relaxed); + __phase.store(__old_phase + 2, memory_order_release); + } + return __old_phase; + } + inline _LIBCUDACXX_INLINE_VISIBILITY + void wait(arrival_token&& __old_phase) const + { + __libcpp_thread_poll_with_backoff([=]() -> bool { + return __phase.load(memory_order_acquire) != __old_phase; + }); + } + inline _LIBCUDACXX_INLINE_VISIBILITY + void arrive_and_wait() + { + wait(arrive()); + } + inline _LIBCUDACXX_INLINE_VISIBILITY + void arrive_and_drop() + { + __expected_adjustment.fetch_sub(1, memory_order_relaxed); + (void)arrive(); } - return __old_phase; - } - inline _LIBCUDACXX_INLINE_VISIBILITY void wait(arrival_token&& __old_phase) const - { - __libcpp_thread_poll_with_backoff([=]() -> bool { - return __phase.load(memory_order_acquire) != __old_phase; - }); - } - inline _LIBCUDACXX_INLINE_VISIBILITY void arrive_and_wait() - { - wait(arrive()); - } - inline _LIBCUDACXX_INLINE_VISIBILITY void arrive_and_drop() - { - __expected_adjustment.fetch_sub(1, memory_order_relaxed); - (void) arrive(); - } }; #else -# if _LIBCUDACXX_CUDA_ABI_VERSION < 3 -# define _LIBCUDACXX_BARRIER_ALIGNMENTS alignas(64) -# else -# define _LIBCUDACXX_BARRIER_ALIGNMENTS -# endif +# if _LIBCUDACXX_CUDA_ABI_VERSION < 3 +# define _LIBCUDACXX_BARRIER_ALIGNMENTS alignas(64) +# else +# define _LIBCUDACXX_BARRIER_ALIGNMENTS +# endif -template -class __barrier_poll_tester_phase -{ - _Barrier const* __this; - typename _Barrier::arrival_token __phase; +template +class __barrier_poll_tester_phase { + _Barrier const* __this; + typename _Barrier::arrival_token __phase; public: - _LIBCUDACXX_INLINE_VISIBILITY - __barrier_poll_tester_phase(_Barrier const* __this_, typename _Barrier::arrival_token&& __phase_) - : __this(__this_) - , __phase(_CUDA_VSTD::move(__phase_)) - {} - - _LIBCUDACXX_INLINE_VISIBILITY bool operator()() const - { - return __this->__try_wait(__phase); - } + _LIBCUDACXX_INLINE_VISIBILITY + __barrier_poll_tester_phase(_Barrier const* __this_, + typename _Barrier::arrival_token&& __phase_) + : __this(__this_) + , __phase(_CUDA_VSTD::move(__phase_)) + {} + + _LIBCUDACXX_INLINE_VISIBILITY + bool operator()() const + { + return __this->__try_wait(__phase); + } }; -template -class __barrier_poll_tester_parity -{ - _Barrier const* __this; - bool __parity; +template +class __barrier_poll_tester_parity { + _Barrier const* __this; + bool __parity; public: - _LIBCUDACXX_INLINE_VISIBILITY __barrier_poll_tester_parity(_Barrier const* __this_, bool __parity_) - : __this(__this_) - , __parity(__parity_) - {} - - inline _LIBCUDACXX_INLINE_VISIBILITY bool operator()() const - { - return __this->__try_wait_parity(__parity); - } + _LIBCUDACXX_INLINE_VISIBILITY + __barrier_poll_tester_parity(_Barrier const* __this_, bool __parity_) + : __this(__this_) + , __parity(__parity_) + {} + + inline _LIBCUDACXX_INLINE_VISIBILITY + bool operator()() const + { + return __this->__try_wait_parity(__parity); + } }; -template -_LIBCUDACXX_INLINE_VISIBILITY bool __call_try_wait(const _Barrier& __b, typename _Barrier::arrival_token&& __phase) +template +_LIBCUDACXX_INLINE_VISIBILITY +bool __call_try_wait(const _Barrier& __b, typename _Barrier::arrival_token&& __phase) { - return __b.__try_wait(_CUDA_VSTD::move(__phase)); + return __b.__try_wait(_CUDA_VSTD::move(__phase)); } -template -_LIBCUDACXX_INLINE_VISIBILITY bool __call_try_wait_parity(const _Barrier& __b, bool __parity) +template +_LIBCUDACXX_INLINE_VISIBILITY +bool __call_try_wait_parity(const _Barrier& __b, bool __parity) { - return __b.__try_wait_parity(__parity); + return __b.__try_wait_parity(__parity); } -template -class __barrier_base -{ - _LIBCUDACXX_BARRIER_ALIGNMENTS __atomic_base __expected, __arrived; - _LIBCUDACXX_BARRIER_ALIGNMENTS _CompletionF __completion; - _LIBCUDACXX_BARRIER_ALIGNMENTS __atomic_base __phase; + +template +class __barrier_base { + + _LIBCUDACXX_BARRIER_ALIGNMENTS __atomic_base __expected, __arrived; + _LIBCUDACXX_BARRIER_ALIGNMENTS _CompletionF __completion; + _LIBCUDACXX_BARRIER_ALIGNMENTS __atomic_base __phase; public: - using arrival_token = bool; + using arrival_token = bool; private: - template - friend class __barrier_poll_tester_phase; - template - friend class __barrier_poll_tester_parity; - template - _LIBCUDACXX_INLINE_VISIBILITY friend bool - __call_try_wait(const _Barrier& __b, typename _Barrier::arrival_token&& __phase); - template - _LIBCUDACXX_INLINE_VISIBILITY friend bool __call_try_wait_parity(const _Barrier& __b, bool __parity); - - _LIBCUDACXX_INLINE_VISIBILITY bool __try_wait(arrival_token __old) const - { - return __phase.load(memory_order_acquire) != __old; - } - _LIBCUDACXX_INLINE_VISIBILITY bool __try_wait_parity(bool __parity) const - { - return __try_wait(__parity); - } + template + friend class __barrier_poll_tester_phase; + template + friend class __barrier_poll_tester_parity; + template + _LIBCUDACXX_INLINE_VISIBILITY + friend bool __call_try_wait(const _Barrier& __b, + typename _Barrier::arrival_token&& __phase); + template + _LIBCUDACXX_INLINE_VISIBILITY + friend bool __call_try_wait_parity(const _Barrier& __b, bool __parity); + + _LIBCUDACXX_INLINE_VISIBILITY + bool __try_wait(arrival_token __old) const + { + return __phase.load(memory_order_acquire) != __old; + } + _LIBCUDACXX_INLINE_VISIBILITY + bool __try_wait_parity(bool __parity) const + { + return __try_wait(__parity); + } public: - __barrier_base() = default; + __barrier_base() = default; - _LIBCUDACXX_INLINE_VISIBILITY __barrier_base(ptrdiff_t __expected, _CompletionF __completion = _CompletionF()) - : __expected(__expected) - , __arrived(__expected) - , __completion(__completion) - , __phase(false) - {} + _LIBCUDACXX_INLINE_VISIBILITY + __barrier_base(ptrdiff_t __expected, _CompletionF __completion = _CompletionF()) + : __expected(__expected), __arrived(__expected), __completion(__completion), __phase(false) + { + } - ~__barrier_base() = default; + ~__barrier_base() = default; - __barrier_base(__barrier_base const&) = delete; - __barrier_base& operator=(__barrier_base const&) = delete; + __barrier_base(__barrier_base const&) = delete; + __barrier_base& operator=(__barrier_base const&) = delete; - _CCCL_NODISCARD _LIBCUDACXX_INLINE_VISIBILITY arrival_token arrive(ptrdiff_t __update = 1) - { - auto const __old_phase = __phase.load(memory_order_relaxed); - auto const __result = __arrived.fetch_sub(__update, memory_order_acq_rel) - __update; - auto const __new_expected = __expected.load(memory_order_relaxed); + _CCCL_NODISCARD _LIBCUDACXX_INLINE_VISIBILITY + arrival_token arrive(ptrdiff_t __update = 1) + { + auto const __old_phase = __phase.load(memory_order_relaxed); + auto const __result = __arrived.fetch_sub(__update, memory_order_acq_rel) - __update; + auto const __new_expected = __expected.load(memory_order_relaxed); - _LIBCUDACXX_DEBUG_ASSERT(__result >= 0, ""); + _LIBCUDACXX_DEBUG_ASSERT(__result >= 0, ""); - if (0 == __result) + if(0 == __result) { + __completion(); + __arrived.store(__new_expected, memory_order_relaxed); + __phase.store(!__old_phase, memory_order_release); + __cxx_atomic_notify_all(&__phase.__a_); + } + return __old_phase; + } + _LIBCUDACXX_INLINE_VISIBILITY + void wait(arrival_token&& __old_phase) const { - __completion(); - __arrived.store(__new_expected, memory_order_relaxed); - __phase.store(!__old_phase, memory_order_release); - __cxx_atomic_notify_all(&__phase.__a_); + __phase.wait(__old_phase, memory_order_acquire); + } + _LIBCUDACXX_INLINE_VISIBILITY + void arrive_and_wait() + { + wait(arrive()); + } + _LIBCUDACXX_INLINE_VISIBILITY + void arrive_and_drop() + { + __expected.fetch_sub(1, memory_order_relaxed); + (void)arrive(); } - return __old_phase; - } - _LIBCUDACXX_INLINE_VISIBILITY void wait(arrival_token&& __old_phase) const - { - __phase.wait(__old_phase, memory_order_acquire); - } - _LIBCUDACXX_INLINE_VISIBILITY void arrive_and_wait() - { - wait(arrive()); - } - _LIBCUDACXX_INLINE_VISIBILITY void arrive_and_drop() - { - __expected.fetch_sub(1, memory_order_relaxed); - (void) arrive(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr ptrdiff_t max() noexcept - { - return numeric_limits::max(); - } + _LIBCUDACXX_INLINE_VISIBILITY + static constexpr ptrdiff_t max() noexcept + { + return numeric_limits::max(); + } }; -template -class __barrier_base<__empty_completion, _Sco> -{ - static constexpr uint64_t __expected_unit = 1ull; - static constexpr uint64_t __arrived_unit = 1ull << 32; - static constexpr uint64_t __expected_mask = __arrived_unit - 1; - static constexpr uint64_t __phase_bit = 1ull << 63; - static constexpr uint64_t __arrived_mask = (__phase_bit - 1) & ~__expected_mask; +template +class __barrier_base<__empty_completion, _Sco> { - _LIBCUDACXX_BARRIER_ALIGNMENTS __atomic_base __phase_arrived_expected; + static constexpr uint64_t __expected_unit = 1ull; + static constexpr uint64_t __arrived_unit = 1ull << 32; + static constexpr uint64_t __expected_mask = __arrived_unit - 1; + static constexpr uint64_t __phase_bit = 1ull << 63; + static constexpr uint64_t __arrived_mask = (__phase_bit - 1) & ~__expected_mask; + + _LIBCUDACXX_BARRIER_ALIGNMENTS __atomic_base __phase_arrived_expected; public: - using arrival_token = uint64_t; + using arrival_token = uint64_t; private: - template - friend class __barrier_poll_tester_phase; - template - friend class __barrier_poll_tester_parity; - template - _LIBCUDACXX_INLINE_VISIBILITY friend bool - __call_try_wait(const _Barrier& __b, typename _Barrier::arrival_token&& __phase); - template - _LIBCUDACXX_INLINE_VISIBILITY friend bool __call_try_wait_parity(const _Barrier& __b, bool __parity); - - static _LIBCUDACXX_INLINE_VISIBILITY constexpr uint64_t __init(ptrdiff_t __count) noexcept - { -# if _CCCL_STD_VER > 2011 - // This debug assert is not supported in C++11 due to resulting in a - // multi-statement constexpr function. - _LIBCUDACXX_DEBUG_ASSERT(__count >= 0, "Count must be non-negative."); -# endif // _CCCL_STD_VER > 2011 - return (((1u << 31) - __count) << 32) | ((1u << 31) - __count); - } - _LIBCUDACXX_INLINE_VISIBILITY bool __try_wait_phase(uint64_t __phase) const - { - uint64_t const __current = __phase_arrived_expected.load(memory_order_acquire); - return ((__current & __phase_bit) != __phase); - } - _LIBCUDACXX_INLINE_VISIBILITY bool __try_wait(arrival_token __old) const - { - return __try_wait_phase(__old & __phase_bit); - } - _LIBCUDACXX_INLINE_VISIBILITY bool __try_wait_parity(bool __parity) const - { - return __try_wait_phase(__parity ? __phase_bit : 0); - } + template + friend class __barrier_poll_tester_phase; + template + friend class __barrier_poll_tester_parity; + template + _LIBCUDACXX_INLINE_VISIBILITY + friend bool __call_try_wait(const _Barrier& __b, + typename _Barrier::arrival_token&& __phase); + template + _LIBCUDACXX_INLINE_VISIBILITY + friend bool __call_try_wait_parity(const _Barrier& __b, bool __parity); + + static _LIBCUDACXX_INLINE_VISIBILITY constexpr + uint64_t __init(ptrdiff_t __count) noexcept + { +#if _CCCL_STD_VER > 2011 + // This debug assert is not supported in C++11 due to resulting in a + // multi-statement constexpr function. + _LIBCUDACXX_DEBUG_ASSERT(__count >= 0, "Count must be non-negative."); +#endif // _CCCL_STD_VER > 2011 + return (((1u << 31) - __count) << 32) + | ((1u << 31) - __count); + } + _LIBCUDACXX_INLINE_VISIBILITY + bool __try_wait_phase(uint64_t __phase) const + { + uint64_t const __current = __phase_arrived_expected.load(memory_order_acquire); + return ((__current & __phase_bit) != __phase); + } + _LIBCUDACXX_INLINE_VISIBILITY + bool __try_wait(arrival_token __old) const + { + return __try_wait_phase(__old & __phase_bit); + } + _LIBCUDACXX_INLINE_VISIBILITY + bool __try_wait_parity(bool __parity) const + { + return __try_wait_phase(__parity ? __phase_bit : 0); + } public: - __barrier_base() = default; + __barrier_base() = default; - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 - __barrier_base(ptrdiff_t __count, __empty_completion = __empty_completion()) - : __phase_arrived_expected(__init(__count)) - { - _LIBCUDACXX_DEBUG_ASSERT(__count >= 0, ""); - } + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 + __barrier_base(ptrdiff_t __count, __empty_completion = __empty_completion()) + : __phase_arrived_expected(__init(__count)) { + _LIBCUDACXX_DEBUG_ASSERT(__count >= 0, ""); + } - ~__barrier_base() = default; + ~__barrier_base() = default; - __barrier_base(__barrier_base const&) = delete; - __barrier_base& operator=(__barrier_base const&) = delete; + __barrier_base(__barrier_base const&) = delete; + __barrier_base& operator=(__barrier_base const&) = delete; - _CCCL_NODISCARD inline _LIBCUDACXX_INLINE_VISIBILITY arrival_token arrive(ptrdiff_t __update = 1) - { - auto const __inc = __arrived_unit * __update; - auto const __old = __phase_arrived_expected.fetch_add(__inc, memory_order_acq_rel); - if ((__old ^ (__old + __inc)) & __phase_bit) + _CCCL_NODISCARD inline _LIBCUDACXX_INLINE_VISIBILITY + arrival_token arrive(ptrdiff_t __update = 1) { - __phase_arrived_expected.fetch_add((__old & __expected_mask) << 32, memory_order_relaxed); - __phase_arrived_expected.notify_all(); + auto const __inc = __arrived_unit * __update; + auto const __old = __phase_arrived_expected.fetch_add(__inc, memory_order_acq_rel); + if((__old ^ (__old + __inc)) & __phase_bit) { + __phase_arrived_expected.fetch_add((__old & __expected_mask) << 32, memory_order_relaxed); + __phase_arrived_expected.notify_all(); + } + return __old & __phase_bit; + } + _LIBCUDACXX_INLINE_VISIBILITY + void wait(arrival_token&& __phase) const + { + __libcpp_thread_poll_with_backoff(__barrier_poll_tester_phase<__barrier_base>(this, _CUDA_VSTD::move(__phase))); + } + _LIBCUDACXX_INLINE_VISIBILITY + void wait_parity(bool __parity) const + { + __libcpp_thread_poll_with_backoff(__barrier_poll_tester_parity<__barrier_base>(this, __parity)); + } + _LIBCUDACXX_INLINE_VISIBILITY + void arrive_and_wait() + { + wait(arrive()); + } + _LIBCUDACXX_INLINE_VISIBILITY + void arrive_and_drop() + { + __phase_arrived_expected.fetch_add(__expected_unit, memory_order_relaxed); + (void)arrive(); } - return __old & __phase_bit; - } - _LIBCUDACXX_INLINE_VISIBILITY void wait(arrival_token&& __phase) const - { - __libcpp_thread_poll_with_backoff(__barrier_poll_tester_phase<__barrier_base>(this, _CUDA_VSTD::move(__phase))); - } - _LIBCUDACXX_INLINE_VISIBILITY void wait_parity(bool __parity) const - { - __libcpp_thread_poll_with_backoff(__barrier_poll_tester_parity<__barrier_base>(this, __parity)); - } - _LIBCUDACXX_INLINE_VISIBILITY void arrive_and_wait() - { - wait(arrive()); - } - _LIBCUDACXX_INLINE_VISIBILITY void arrive_and_drop() - { - __phase_arrived_expected.fetch_add(__expected_unit, memory_order_relaxed); - (void) arrive(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr ptrdiff_t max() noexcept - { - return numeric_limits::max(); - } + _LIBCUDACXX_INLINE_VISIBILITY + static constexpr ptrdiff_t max() noexcept + { + return numeric_limits::max(); + } }; #endif //_LIBCUDACXX_HAS_NO_TREE_BARRIER -template -class barrier : public __barrier_base<_CompletionF> -{ +template +class barrier : public __barrier_base<_CompletionF> { public: - _LIBCUDACXX_INLINE_VISIBILITY constexpr barrier(ptrdiff_t __count, _CompletionF __completion = _CompletionF()) - : __barrier_base<_CompletionF>(__count, __completion) - {} + _LIBCUDACXX_INLINE_VISIBILITY constexpr + barrier(ptrdiff_t __count, _CompletionF __completion = _CompletionF()) + : __barrier_base<_CompletionF>(__count, __completion) { + } }; _LIBCUDACXX_END_NAMESPACE_STD diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/bit b/libcudacxx/include/cuda/std/detail/libcxx/include/bit index b562af71f79..ae31988c7c6 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/bit +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/bit @@ -63,750 +63,810 @@ namespace std { # pragma system_header #endif // no system header -#include #include // all public C++ headers provide the assertion handler #include -#include +#include #include #include #include +#include + #if defined(_CCCL_COMPILER_MSVC) -# include +#include #endif // _CCCL_COMPILER_MSVC #if defined(_CCCL_COMPILER_IBM) -# include +#include #endif // _CCCL_COMPILER_IBM _LIBCUDACXX_BEGIN_NAMESPACE_STD #define _LIBCUDACXX_BIT_CONSTEXPR constexpr -inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __binary_ctz2(uint64_t __x, int __c) noexcept -{ +inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __binary_ctz2(uint64_t __x, int __c) noexcept { return (__x & 0x1) ? __c : __c + 1; } -inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __binary_ctz4(uint64_t __x, int __c) noexcept -{ - return __binary_ctz2(__x >> 2 * !(__x & 0x3), __c + 2 * !(__x & 0x3)); +inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __binary_ctz4(uint64_t __x, int __c) noexcept { + return __binary_ctz2( + __x >> 2*!(__x & 0x3), + __c + 2*!(__x & 0x3)); } -inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __binary_ctz8(uint64_t __x, int __c) noexcept -{ - return __binary_ctz4(__x >> 4 * !(__x & 0x0F), __c + 4 * !(__x & 0x0F)); +inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __binary_ctz8(uint64_t __x, int __c) noexcept { + return __binary_ctz4( + __x >> 4*!(__x & 0x0F), + __c + 4*!(__x & 0x0F)); } -inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __binary_ctz16(uint64_t __x, int __c) noexcept -{ - return __binary_ctz8(__x >> 8 * !(__x & 0x00FF), __c + 8 * !(__x & 0x00FF)); +inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __binary_ctz16(uint64_t __x, int __c) noexcept { + return __binary_ctz8( + __x >> 8*!(__x & 0x00FF), + __c + 8*!(__x & 0x00FF)); } -inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __binary_ctz32(uint64_t __x, int __c) noexcept -{ - return __binary_ctz16(__x >> 16 * !(__x & 0x0000FFFF), __c + 16 * !(__x & 0x0000FFFF)); +inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __binary_ctz32(uint64_t __x, int __c) noexcept { + return __binary_ctz16( + __x >> 16*!(__x & 0x0000FFFF), + __c + 16*!(__x & 0x0000FFFF)); } -inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __binary_ctz64(uint64_t __x) noexcept -{ - return __binary_ctz32(__x >> 32 * !(__x & 0x00000000FFFFFFFF), 32 * !(__x & 0x00000000FFFFFFFF)); +inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __binary_ctz64(uint64_t __x) noexcept { + return __binary_ctz32( + __x >> 32*!(__x & 0x00000000FFFFFFFF), + 32*!(__x & 0x00000000FFFFFFFF)); } -inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __binary_clz2(uint64_t __x, int __c) -{ +inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __binary_clz2(uint64_t __x, int __c) { return !!(~__x & 0x2) ^ __c; } -inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __binary_clz4(uint64_t __x, int __c) -{ - return __binary_clz2(__x >> 2 * !!(__x & 0xC), __c + 2 * !(__x & 0xC)); +inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __binary_clz4(uint64_t __x, int __c) { + return __binary_clz2( + __x >> 2*!!(__x & 0xC), + __c + 2*!(__x & 0xC)); } -inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __binary_clz8(uint64_t __x, int __c) -{ - return __binary_clz4(__x >> 4 * !!(__x & 0xF0), __c + 4 * !(__x & 0xF0)); +inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __binary_clz8(uint64_t __x, int __c) { + return __binary_clz4( + __x >> 4*!!(__x & 0xF0), + __c + 4*!(__x & 0xF0)); } -inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __binary_clz16(uint64_t __x, int __c) -{ - return __binary_clz8(__x >> 8 * !!(__x & 0xFF00), __c + 8 * !(__x & 0xFF00)); +inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __binary_clz16(uint64_t __x, int __c) { + return __binary_clz8( + __x >> 8*!!(__x & 0xFF00), + __c + 8*!(__x & 0xFF00)); } -inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __binary_clz32(uint64_t __x, int __c) -{ - return __binary_clz16(__x >> 16 * !!(__x & 0xFFFF0000), __c + 16 * !(__x & 0xFFFF0000)); +inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __binary_clz32(uint64_t __x, int __c) { + return __binary_clz16( + __x >> 16*!!(__x & 0xFFFF0000), + __c + 16*!(__x & 0xFFFF0000)); } -inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __binary_clz64(uint64_t __x) -{ - return __binary_clz32(__x >> 32 * !!(__x & 0xFFFFFFFF00000000), 32 * !(__x & 0xFFFFFFFF00000000)); +inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __binary_clz64(uint64_t __x) { + return __binary_clz32( + __x >> 32*!!(__x & 0xFFFFFFFF00000000), + 32*!(__x & 0xFFFFFFFF00000000)); } -inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __fallback_popc8(uint64_t __x) -{ +inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __fallback_popc8(uint64_t __x) { return static_cast((__x * 0x0101010101010101) >> 56); } -inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __fallback_popc16(uint64_t __x) -{ - return __fallback_popc8((__x + (__x >> 4)) & 0x0f0f0f0f0f0f0f0f); +inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __fallback_popc16(uint64_t __x) { + return __fallback_popc8( + (__x + (__x >> 4)) & 0x0f0f0f0f0f0f0f0f); } -inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __fallback_popc32(uint64_t __x) -{ - return __fallback_popc16((__x & 0x3333333333333333) + ((__x >> 2) & 0x3333333333333333)); +inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __fallback_popc32(uint64_t __x) { + return __fallback_popc16( + (__x & 0x3333333333333333) + ((__x >> 2) & 0x3333333333333333)); } -inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __fallback_popc64(uint64_t __x) -{ - return __fallback_popc32(__x - ((__x >> 1) & 0x5555555555555555)); +inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __fallback_popc64(uint64_t __x) { + return __fallback_popc32( + __x - ((__x >> 1) & 0x5555555555555555)); } #ifndef _CCCL_COMPILER_MSVC -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr int __libcpp_ctz(unsigned __x) noexcept -{ -# if defined(_CCCL_COMPILER_NVRTC) || (defined(_CCCL_CUDACC_BELOW_11_3)) -# if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) - if (!__libcpp_is_constant_evaluated()) - { - NV_IF_ELSE_TARGET( - NV_IS_DEVICE, (return (!__x) ? sizeof(unsigned) * 8 : __ffs(__x) - 1;), (return __builtin_ctz(__x);)) +inline _LIBCUDACXX_INLINE_VISIBILITY constexpr +int __libcpp_ctz(unsigned __x) noexcept { +#if defined(_CCCL_COMPILER_NVRTC) \ + || (defined(_CCCL_CUDACC_BELOW_11_3)) +#if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) + if (!__libcpp_is_constant_evaluated()) { + NV_IF_ELSE_TARGET(NV_IS_DEVICE, ( + return (!__x) ? sizeof(unsigned) * 8 : __ffs(__x) - 1; + ), ( + return __builtin_ctz(__x); + )) } -# endif // defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) - - return __binary_ctz32(static_cast(__x), 0); -# else // ^^^ _CCCL_COMPILER_NVRTC || nvcc < 11.3 ^^^ / vvv !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 vvv - return __builtin_ctz(__x); -# endif // !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 -} - -inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __libcpp_ctz(unsigned long __x) noexcept -{ -# if defined(_CCCL_COMPILER_NVRTC) || (defined(_CCCL_CUDACC_BELOW_11_3)) -# if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) - if (!__libcpp_is_constant_evaluated()) - { - NV_IF_ELSE_TARGET( - NV_IS_DEVICE, (return (!__x) ? sizeof(unsigned long) * 8 : __ffsll(__x) - 1;), (return __builtin_ctzl(__x);)) +#endif // defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) + + return __binary_ctz32(static_cast(__x), 0); +#else // ^^^ _CCCL_COMPILER_NVRTC || nvcc < 11.3 ^^^ / vvv !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 vvv + return __builtin_ctz(__x); +#endif // !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 +} + +inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __libcpp_ctz(unsigned long __x) noexcept { +#if defined(_CCCL_COMPILER_NVRTC) \ + || (defined(_CCCL_CUDACC_BELOW_11_3)) +#if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) + if (!__libcpp_is_constant_evaluated()) { + NV_IF_ELSE_TARGET(NV_IS_DEVICE, ( + return (!__x) ? sizeof(unsigned long) * 8 : __ffsll(__x) - 1; + ), ( + return __builtin_ctzl(__x); + )) } -# endif // defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) +#endif // defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) - return __binary_ctz64(static_cast(__x)); -# else // ^^^ _CCCL_COMPILER_NVRTC || nvcc < 11.3 ^^^ / vvv !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 vvv - return __builtin_ctzl(__x); -# endif // !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 + return __binary_ctz64(static_cast(__x)); +#else // ^^^ _CCCL_COMPILER_NVRTC || nvcc < 11.3 ^^^ / vvv !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 vvv + return __builtin_ctzl(__x); +#endif // !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 } -inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __libcpp_ctz(unsigned long long __x) noexcept -{ +inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __libcpp_ctz(unsigned long long __x) noexcept { // For whatever reason __builtin_ctzll does not compile although it should -# if 1 // def _CCCL_COMPILER_NVRTC -# if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) - if (!__libcpp_is_constant_evaluated()) - { - NV_IF_ELSE_TARGET(NV_IS_DEVICE, - (return (!__x) ? sizeof(unsigned long long) * 8 : __ffsll(__x) - 1;), - (return __builtin_ctzll(__x);)) +#if 1 //def _CCCL_COMPILER_NVRTC +#if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) + if (!__libcpp_is_constant_evaluated()) { + NV_IF_ELSE_TARGET(NV_IS_DEVICE, ( + return (!__x) ? sizeof(unsigned long long) * 8 : __ffsll(__x) - 1; + ), ( + return __builtin_ctzll(__x); + )) } -# endif // defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) - - return __binary_ctz64(static_cast(__x)); -# else // 0 - return __builtin_ctzll(__x); -# endif // !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 -} - -inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __libcpp_clz(unsigned __x) noexcept -{ -# if defined(_CCCL_COMPILER_NVRTC) || (defined(_CCCL_CUDACC_BELOW_11_3)) -# if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) - if (!__libcpp_is_constant_evaluated()) - { - NV_IF_ELSE_TARGET(NV_IS_DEVICE, (return __clz(__x);), (return __builtin_clz(__x);)) +#endif // defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) + + return __binary_ctz64(static_cast(__x)); +#else // 0 + return __builtin_ctzll(__x); +#endif // !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 +} + +inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __libcpp_clz(unsigned __x) noexcept { +#if defined(_CCCL_COMPILER_NVRTC) \ + || (defined(_CCCL_CUDACC_BELOW_11_3)) +#if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) + if (!__libcpp_is_constant_evaluated()) { + NV_IF_ELSE_TARGET(NV_IS_DEVICE, ( + return __clz(__x); + ), ( + return __builtin_clz(__x); + )) } -# endif // defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) +#endif // defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) return __binary_clz32(static_cast(__x), 0); -# else // ^^^ _CCCL_COMPILER_NVRTC || nvcc < 11.3 ^^^ / vvv !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 vvv +#else // ^^^ _CCCL_COMPILER_NVRTC || nvcc < 11.3 ^^^ / vvv !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 vvv return __builtin_clz(__x); -# endif // !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 -} - -inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __libcpp_clz(unsigned long __x) noexcept -{ -# if defined(_CCCL_COMPILER_NVRTC) || (defined(_CCCL_CUDACC_BELOW_11_3)) -# if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) - if (!__libcpp_is_constant_evaluated()) - { - NV_IF_ELSE_TARGET(NV_IS_DEVICE, (return __clzll(__x);), (return __builtin_clzl(__x);)) +#endif // !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 +} + +inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __libcpp_clz(unsigned long __x) noexcept { +#if defined(_CCCL_COMPILER_NVRTC) \ + || (defined(_CCCL_CUDACC_BELOW_11_3)) +#if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) + if (!__libcpp_is_constant_evaluated()) { + NV_IF_ELSE_TARGET(NV_IS_DEVICE, ( + return __clzll(__x); + ), ( + return __builtin_clzl(__x); + )) } -# endif // defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) +#endif // defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) return __binary_clz64(static_cast(__x)); -# else // ^^^ _CCCL_COMPILER_NVRTC || nvcc < 11.3 ^^^ / vvv !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 vvv +#else // ^^^ _CCCL_COMPILER_NVRTC || nvcc < 11.3 ^^^ / vvv !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 vvv return __builtin_clzl(__x); -# endif // !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 -} - -inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __libcpp_clz(unsigned long long __x) noexcept -{ -# if defined(_CCCL_COMPILER_NVRTC) || (defined(_CCCL_CUDACC_BELOW_11_3)) -# if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) - if (!__libcpp_is_constant_evaluated()) - { - NV_IF_ELSE_TARGET(NV_IS_DEVICE, (return __clzll(__x);), (return __builtin_clzll(__x);)) +#endif // !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 +} + +inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __libcpp_clz(unsigned long long __x) noexcept { +#if defined(_CCCL_COMPILER_NVRTC) \ + || (defined(_CCCL_CUDACC_BELOW_11_3)) +#if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) + if (!__libcpp_is_constant_evaluated()) { + NV_IF_ELSE_TARGET(NV_IS_DEVICE, ( + return __clzll(__x); + ), ( + return __builtin_clzll(__x); + )) } -# endif // defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) +#endif // defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) return __binary_clz64(static_cast(__x)); -# else // ^^^ _CCCL_COMPILER_NVRTC || nvcc < 11.3 ^^^ / vvv !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 vvv +#else // ^^^ _CCCL_COMPILER_NVRTC || nvcc < 11.3 ^^^ / vvv !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 vvv return __builtin_clzll(__x); -# endif // !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 -} - -inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __libcpp_popcount(unsigned __x) noexcept -{ -# if defined(_CCCL_COMPILER_NVRTC) || (defined(_CCCL_CUDACC_BELOW_11_3)) -# if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) - if (!__libcpp_is_constant_evaluated()) - { - NV_IF_ELSE_TARGET(NV_IS_DEVICE, (return __popc(__x);), (return __builtin_popcount(__x);)) +#endif // !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 +} + +inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __libcpp_popcount(unsigned __x) noexcept { +#if defined(_CCCL_COMPILER_NVRTC) \ + || (defined(_CCCL_CUDACC_BELOW_11_3)) +#if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) + if (!__libcpp_is_constant_evaluated()) { + NV_IF_ELSE_TARGET(NV_IS_DEVICE, ( + return __popc(__x); + ), ( + return __builtin_popcount(__x); + )) } -# endif // defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) +#endif // defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) return __fallback_popc64(static_cast(__x)); -# else // ^^^ _CCCL_COMPILER_NVRTC || nvcc < 11.3 ^^^ / vvv !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 vvv +#else // ^^^ _CCCL_COMPILER_NVRTC || nvcc < 11.3 ^^^ / vvv !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 vvv return __builtin_popcount(__x); -# endif // !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 -} - -inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __libcpp_popcount(unsigned long __x) noexcept -{ -# if defined(_CCCL_COMPILER_NVRTC) || (defined(_CCCL_CUDACC_BELOW_11_3)) -# if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) - if (!__libcpp_is_constant_evaluated()) - { - NV_IF_ELSE_TARGET(NV_IS_DEVICE, (return __popcll(__x);), (return __builtin_popcountl(__x);)) +#endif // !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 +} + +inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __libcpp_popcount(unsigned long __x) noexcept { +#if defined(_CCCL_COMPILER_NVRTC) \ + || (defined(_CCCL_CUDACC_BELOW_11_3)) +#if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) + if (!__libcpp_is_constant_evaluated()) { + NV_IF_ELSE_TARGET(NV_IS_DEVICE, ( + return __popcll(__x); + ), ( + return __builtin_popcountl(__x); + )) } -# endif // defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) +#endif // defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) return __fallback_popc64(static_cast(__x)); -# else // ^^^ _CCCL_COMPILER_NVRTC || nvcc < 11.3 ^^^ / vvv !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 vvv +#else // ^^^ _CCCL_COMPILER_NVRTC || nvcc < 11.3 ^^^ / vvv !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 vvv return __builtin_popcountl(__x); -# endif // !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 -} - -inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __libcpp_popcount(unsigned long long __x) noexcept -{ -# if defined(_CCCL_COMPILER_NVRTC) || (defined(_CCCL_CUDACC_BELOW_11_3)) -# if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) - if (!__libcpp_is_constant_evaluated()) - { - NV_IF_ELSE_TARGET(NV_IS_DEVICE, (return __popcll(__x);), (return __builtin_popcountll(__x);)) +#endif // !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 +} + +inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __libcpp_popcount(unsigned long long __x) noexcept { +#if defined(_CCCL_COMPILER_NVRTC) \ + || (defined(_CCCL_CUDACC_BELOW_11_3)) +#if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) + if (!__libcpp_is_constant_evaluated()) { + NV_IF_ELSE_TARGET(NV_IS_DEVICE, ( + return __popcll(__x); + ), ( + return __builtin_popcountll(__x); + )) } -# endif // defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) +#endif // defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && (_CCCL_STD_VER >= 2014) return __fallback_popc64(static_cast(__x)); -# else // ^^^ _CCCL_COMPILER_NVRTC || nvcc < 11.3 ^^^ / vvv !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 vvv +#else // ^^^ _CCCL_COMPILER_NVRTC || nvcc < 11.3 ^^^ / vvv !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 vvv return __builtin_popcountll(__x); -# endif // !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 +#endif // !_CCCL_COMPILER_NVRTC || nvcc >= 11.3 } -#else // _CCCL_COMPILER_MSVC +#else // _CCCL_COMPILER_MSVC // Precondition: __x != 0 -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr int __libcpp_ctz(unsigned __x) -{ +inline _LIBCUDACXX_INLINE_VISIBILITY +constexpr int __libcpp_ctz(unsigned __x) { static_assert(sizeof(unsigned) == sizeof(unsigned long), ""); static_assert(sizeof(unsigned long) == 4, ""); -# if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && !defined(__CUDA_ARCH__) - if (!__libcpp_is_constant_evaluated()) - { +#if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && !defined(__CUDA_ARCH__) + if (!__libcpp_is_constant_evaluated()) { unsigned long __where = 0; if (_BitScanForward(&__where, __x)) - { return static_cast(__where); - } return 32; } -# endif // _LIBCUDACXX_IS_CONSTANT_EVALUATED && !defined(__CUDA_ARCH__) +#endif // _LIBCUDACXX_IS_CONSTANT_EVALUATED && !defined(__CUDA_ARCH__) return __binary_ctz32(static_cast(__x), 0); } -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr int __libcpp_ctz(unsigned long __x) -{ - static_assert(sizeof(unsigned long) == sizeof(unsigned), ""); - return __libcpp_ctz(static_cast(__x)); +inline _LIBCUDACXX_INLINE_VISIBILITY +constexpr int __libcpp_ctz(unsigned long __x) { + static_assert(sizeof(unsigned long) == sizeof(unsigned), ""); + return __libcpp_ctz(static_cast(__x)); } -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr int __libcpp_ctz(unsigned long long __x) -{ -# if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && !defined(__CUDA_ARCH__) - if (!__libcpp_is_constant_evaluated()) - { +inline _LIBCUDACXX_INLINE_VISIBILITY +constexpr int __libcpp_ctz(unsigned long long __x) { +#if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && !defined(__CUDA_ARCH__) + if (!__libcpp_is_constant_evaluated()) { unsigned long __where = 0; -# if defined(_LIBCUDACXX_HAS_BITSCAN64) && (defined(_M_AMD64) || defined(__x86_64__)) +# if defined(_LIBCUDACXX_HAS_BITSCAN64) && (defined(_M_AMD64) || defined(__x86_64__)) if (_BitScanForward64(&__where, __x)) - { return static_cast(__where); - } -# else +# else // Win32 doesn't have _BitScanForward64 so emulate it with two 32 bit calls. if (_BitScanForward(&__where, static_cast(__x))) - { return static_cast(__where); - } if (_BitScanForward(&__where, static_cast(__x >> 32))) - { return static_cast(__where + 32); - } -# endif +# endif return 64; } -# endif // _LIBCUDACXX_IS_CONSTANT_EVALUATED && !defined(__CUDA_ARCH__) +#endif // _LIBCUDACXX_IS_CONSTANT_EVALUATED && !defined(__CUDA_ARCH__) return __binary_ctz64(__x); } // Precondition: __x != 0 -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr int __libcpp_clz(unsigned __x) -{ +inline _LIBCUDACXX_INLINE_VISIBILITY +constexpr int __libcpp_clz(unsigned __x) { static_assert(sizeof(unsigned) == sizeof(unsigned long), ""); static_assert(sizeof(unsigned long) == 4, ""); -# if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && !defined(__CUDA_ARCH__) - if (!__libcpp_is_constant_evaluated()) - { +#if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && !defined(__CUDA_ARCH__) + if (!__libcpp_is_constant_evaluated()) { unsigned long __where = 0; if (_BitScanReverse(&__where, __x)) - { return static_cast(31 - __where); - } return 32; // Undefined Behavior. } -# endif // _LIBCUDACXX_IS_CONSTANT_EVALUATED && !defined(__CUDA_ARCH__) +#endif // _LIBCUDACXX_IS_CONSTANT_EVALUATED && !defined(__CUDA_ARCH__) return __binary_clz32(static_cast(__x), 0); } -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr int __libcpp_clz(unsigned long __x) -{ - static_assert(sizeof(unsigned) == sizeof(unsigned long), ""); - return __libcpp_clz(static_cast(__x)); +inline _LIBCUDACXX_INLINE_VISIBILITY +constexpr int __libcpp_clz(unsigned long __x) { + static_assert(sizeof(unsigned) == sizeof(unsigned long), ""); + return __libcpp_clz(static_cast(__x)); } -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr int __libcpp_clz(unsigned long long __x) -{ -# if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && !defined(__CUDA_ARCH__) - if (!__libcpp_is_constant_evaluated()) - { +inline _LIBCUDACXX_INLINE_VISIBILITY +constexpr int __libcpp_clz(unsigned long long __x) { +#if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && !defined(__CUDA_ARCH__) + if (!__libcpp_is_constant_evaluated()) { unsigned long __where = 0; -# if defined(_LIBCUDACXX_HAS_BITSCAN64) +# if defined(_LIBCUDACXX_HAS_BITSCAN64) if (_BitScanReverse64(&__where, __x)) - { return static_cast(63 - __where); - } -# else +# else // Win32 doesn't have _BitScanReverse64 so emulate it with two 32 bit calls. if (_BitScanReverse(&__where, static_cast(__x >> 32))) - { return static_cast(63 - (__where + 32)); - } if (_BitScanReverse(&__where, static_cast(__x))) - { return static_cast(63 - __where); - } -# endif +# endif return 64; // Undefined Behavior. } -# endif // _LIBCUDACXX_IS_CONSTANT_EVALUATED && !defined(__CUDA_ARCH__) +#endif // _LIBCUDACXX_IS_CONSTANT_EVALUATED && !defined(__CUDA_ARCH__) return __binary_clz64(static_cast(__x)); } -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr int __libcpp_popcount(unsigned __x) -{ +inline _LIBCUDACXX_INLINE_VISIBILITY +constexpr int __libcpp_popcount(unsigned __x) { static_assert(sizeof(unsigned) == 4, ""); -# if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && !defined(__CUDA_ARCH__) - if (!__libcpp_is_constant_evaluated()) - { +#if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && !defined(__CUDA_ARCH__) + if (!__libcpp_is_constant_evaluated()) { return static_cast(__popcnt(__x)); } -# endif // _LIBCUDACXX_IS_CONSTANT_EVALUATED && !defined(__CUDA_ARCH__) +#endif // _LIBCUDACXX_IS_CONSTANT_EVALUATED && !defined(__CUDA_ARCH__) return __fallback_popc64(static_cast(__x)); } -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr int __libcpp_popcount(unsigned long __x) -{ +inline _LIBCUDACXX_INLINE_VISIBILITY +constexpr int __libcpp_popcount(unsigned long __x) { static_assert(sizeof(unsigned long) == 4, ""); -# if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && !defined(__CUDA_ARCH__) - if (!__libcpp_is_constant_evaluated()) - { +#if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && !defined(__CUDA_ARCH__) + if (!__libcpp_is_constant_evaluated()) { return static_cast(__popcnt(__x)); } -# endif // _LIBCUDACXX_IS_CONSTANT_EVALUATED && !defined(__CUDA_ARCH__) +#endif // _LIBCUDACXX_IS_CONSTANT_EVALUATED && !defined(__CUDA_ARCH__) return __fallback_popc64(static_cast(__x)); } -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr int __libcpp_popcount(unsigned long long __x) -{ +inline _LIBCUDACXX_INLINE_VISIBILITY +constexpr int __libcpp_popcount(unsigned long long __x) { static_assert(sizeof(unsigned long long) == 8, ""); -# if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && !defined(__CUDA_ARCH__) - if (!__libcpp_is_constant_evaluated()) - { +#if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && !defined(__CUDA_ARCH__) + if (!__libcpp_is_constant_evaluated()) { return static_cast(__popcnt64(__x)); } -# endif // _LIBCUDACXX_IS_CONSTANT_EVALUATED && !defined(__CUDA_ARCH__ +#endif // _LIBCUDACXX_IS_CONSTANT_EVALUATED && !defined(__CUDA_ARCH__ return __fallback_popc64(static_cast(__x)); } #endif // _CCCL_COMPILER_MSVC -template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR _Tp __rotl(_Tp __t, unsigned int __cnt) noexcept +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +_Tp __rotl(_Tp __t, unsigned int __cnt) noexcept { - static_assert(__libcpp_is_unsigned_integer<_Tp>::value, "__rotl requires unsigned"); - using __nlt = numeric_limits<_Tp>; + static_assert(__libcpp_is_unsigned_integer<_Tp>::value, "__rotl requires unsigned"); + using __nlt = numeric_limits<_Tp>; - return ((__cnt % __nlt::digits) == 0) - ? __t - : (__t << (__cnt % __nlt::digits)) | (__t >> (__nlt::digits - (__cnt % __nlt::digits))); + return ((__cnt % __nlt::digits) == 0) ? + __t : + (__t << (__cnt % __nlt::digits)) | (__t >> (__nlt::digits - (__cnt % __nlt::digits))); } -template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR _Tp __rotr(_Tp __t, unsigned int __cnt) noexcept + +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +_Tp __rotr(_Tp __t, unsigned int __cnt) noexcept { - static_assert(__libcpp_is_unsigned_integer<_Tp>::value, "__rotr requires unsigned"); - using __nlt = numeric_limits<_Tp>; + static_assert(__libcpp_is_unsigned_integer<_Tp>::value, "__rotr requires unsigned"); + using __nlt = numeric_limits<_Tp>; - return ((__cnt % __nlt::digits) == 0) - ? __t - : (__t >> (__cnt % __nlt::digits)) | (__t << (__nlt::digits - (__cnt % __nlt::digits))); + return ((__cnt % __nlt::digits) == 0) ? + __t : + (__t >> (__cnt % __nlt::digits)) | (__t << (__nlt::digits - (__cnt % __nlt::digits))); } // Forward decl for recursive use in split word operations -template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __countr_zero(_Tp __t) noexcept; +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __countr_zero(_Tp __t) noexcept; -template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR __enable_if_t -__countr_zero_dispatch(_Tp __t) noexcept +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +__enable_if_t __countr_zero_dispatch(_Tp __t) noexcept { return __libcpp_ctz(static_cast(__t)); } -template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR __enable_if_t -__countr_zero_dispatch(_Tp __t) noexcept +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +__enable_if_t __countr_zero_dispatch(_Tp __t) noexcept { return __libcpp_ctz(static_cast(__t)); } -template -struct __countr_zero_rsh_impl -{ - static _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __short_circuit(_Tp __t, int __cur, int __count) - { - // Stops processing early if non-zero - return (__cur == numeric_limits::digits) - ? __countr_zero_rsh_impl<_Tp, _St - 1>::__count(__t, __cur + __count) - : __cur + __count; - } +template +struct __countr_zero_rsh_impl { + static _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR + int __short_circuit(_Tp __t, int __cur, int __count) { + // Stops processing early if non-zero + return (__cur == numeric_limits::digits) ? + __countr_zero_rsh_impl<_Tp, _St-1>::__count(__t, __cur + __count) : + __cur + __count; + } - static _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __count(_Tp __t, int __count) - { - return __short_circuit( - __t >> numeric_limits::digits, __countr_zero(static_cast(__t)), __count); - } + static _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR + int __count(_Tp __t, int __count) { + return __short_circuit( + __t >> numeric_limits::digits, + __countr_zero(static_cast(__t)), + __count); + } }; template -struct __countr_zero_rsh_impl<_Tp, 1> -{ - static _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __count(_Tp __t, int __count) - { - return __count + __countr_zero(static_cast(__t)); - } +struct __countr_zero_rsh_impl<_Tp, 1> { + static _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR + int __count(_Tp __t, int __count) { + return __count + __countr_zero(static_cast(__t)); + } }; -template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR __enable_if_t<(sizeof(_Tp) > sizeof(unsigned long long)), int> -__countr_zero_dispatch(_Tp __t) noexcept -{ - return __countr_zero_rsh_impl<_Tp>::__count(__t, 0); -} +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +__enable_if_t<(sizeof(_Tp) > sizeof(unsigned long long)), int> __countr_zero_dispatch(_Tp __t) noexcept +{ return __countr_zero_rsh_impl<_Tp>::__count(__t, 0); } -template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __countr_zero(_Tp __t) noexcept +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __countr_zero(_Tp __t) noexcept { - static_assert(__libcpp_is_unsigned_integer<_Tp>::value, "__countr_zero requires unsigned"); + static_assert(__libcpp_is_unsigned_integer<_Tp>::value, "__countr_zero requires unsigned"); - return __t ? __countr_zero_dispatch(__t) : numeric_limits<_Tp>::digits; + return __t ? __countr_zero_dispatch(__t) : numeric_limits<_Tp>::digits; } // Forward decl for recursive use in split word operations -template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __countl_zero(_Tp __t) noexcept; +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __countl_zero(_Tp __t) noexcept; -template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR __enable_if_t -__countl_zero_dispatch(_Tp __t) noexcept +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +__enable_if_t __countl_zero_dispatch(_Tp __t) noexcept { return __libcpp_clz(static_cast(__t)) - - (numeric_limits::digits - numeric_limits<_Tp>::digits); + - (numeric_limits::digits - numeric_limits<_Tp>::digits); } -template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR __enable_if_t -__countl_zero_dispatch(_Tp __t) noexcept +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +__enable_if_t __countl_zero_dispatch(_Tp __t) noexcept { return __libcpp_clz(static_cast(__t)) - - (numeric_limits::digits - numeric_limits<_Tp>::digits); + - (numeric_limits::digits - numeric_limits<_Tp>::digits); } -template -struct __countl_zero_rotl_impl -{ - static _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __short_circuit(_Tp __t, int __cur) - { - // This stops processing early if the current word is not empty - return (__cur == numeric_limits::digits) - ? __cur + __countl_zero_rotl_impl<_Tp, _St - 1>::__count(__t) - : __cur; - } +template +struct __countl_zero_rotl_impl { + static _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR + int __short_circuit(_Tp __t, int __cur) { + // This stops processing early if the current word is not empty + return (__cur == numeric_limits::digits) ? + __cur + __countl_zero_rotl_impl<_Tp, _St-1>::__count(__t) : + __cur; + } - static _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __countl_iter(_Tp __t) - { - // After rotating pass result of clz to another step for processing - return __short_circuit(__t, __countl_zero(static_cast(__t))); - } + static _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR + int __countl_iter(_Tp __t) { + // After rotating pass result of clz to another step for processing + return __short_circuit( + __t, + __countl_zero(static_cast(__t))); + } - static _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __count(_Tp __t) - { - return __countl_iter(__rotl(__t, numeric_limits::digits)); - } + static _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR + int __count(_Tp __t) { + return __countl_iter( + __rotl(__t, numeric_limits::digits)); + } }; template -struct __countl_zero_rotl_impl<_Tp, 1> -{ - static _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __count(_Tp __t) - { - return __countl_zero(static_cast(__rotl(__t, numeric_limits::digits))); - } +struct __countl_zero_rotl_impl<_Tp, 1> { + static _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR + int __count(_Tp __t) { + return __countl_zero(static_cast(__rotl(__t, numeric_limits::digits))); + } }; -template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR __enable_if_t<(sizeof(_Tp) > sizeof(unsigned long long)), int> -__countl_zero_dispatch(_Tp __t) noexcept -{ - return __countl_zero_rotl_impl<_Tp>::__count(__t); -} +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +__enable_if_t<(sizeof(_Tp) > sizeof(unsigned long long)), int> __countl_zero_dispatch(_Tp __t) noexcept +{ return __countl_zero_rotl_impl<_Tp>::__count(__t); } -template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __countl_zero(_Tp __t) noexcept +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __countl_zero(_Tp __t) noexcept { - static_assert(__libcpp_is_unsigned_integer<_Tp>::value, "__countl_zero requires unsigned"); - return __t ? __countl_zero_dispatch(__t) : numeric_limits<_Tp>::digits; + static_assert(__libcpp_is_unsigned_integer<_Tp>::value, "__countl_zero requires unsigned"); + return __t ? __countl_zero_dispatch(__t) : numeric_limits<_Tp>::digits; } -template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __countl_one(_Tp __t) noexcept +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __countl_one(_Tp __t) noexcept { - static_assert(__libcpp_is_unsigned_integer<_Tp>::value, "__countl_one requires unsigned"); - return __t != numeric_limits<_Tp>::max() ? __countl_zero(static_cast<_Tp>(~__t)) : numeric_limits<_Tp>::digits; + static_assert(__libcpp_is_unsigned_integer<_Tp>::value, "__countl_one requires unsigned"); + return __t != numeric_limits<_Tp>::max() + ? __countl_zero(static_cast<_Tp>(~__t)) + : numeric_limits<_Tp>::digits; } -template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __countr_one(_Tp __t) noexcept -{ - static_assert(__libcpp_is_unsigned_integer<_Tp>::value, "__countr_one requires unsigned"); - return __t != numeric_limits<_Tp>::max() ? __countr_zero(static_cast<_Tp>(~__t)) : numeric_limits<_Tp>::digits; -} -template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR __enable_if_t -__popcount_dispatch(_Tp __t) noexcept +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __countr_one(_Tp __t) noexcept { - return __libcpp_popcount(static_cast(__t)); + static_assert(__libcpp_is_unsigned_integer<_Tp>::value, "__countr_one requires unsigned"); + return __t != numeric_limits<_Tp>::max() + ? __countr_zero(static_cast<_Tp>(~__t)) + : numeric_limits<_Tp>::digits; } -template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR __enable_if_t -__popcount_dispatch(_Tp __t) noexcept -{ - return __libcpp_popcount(static_cast(__t)); -} +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +__enable_if_t __popcount_dispatch(_Tp __t) noexcept +{ return __libcpp_popcount(static_cast(__t)); } -template -struct __popcount_rsh_impl -{ - static _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __count(_Tp __t) - { - return __popcount_rsh_impl<_Tp, _St - 1>::__count(__t >> numeric_limits::digits) - + __libcpp_popcount(static_cast(__t)); - } +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +__enable_if_t __popcount_dispatch(_Tp __t) noexcept +{ return __libcpp_popcount(static_cast(__t)); } + +template +struct __popcount_rsh_impl { + static _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR + int __count(_Tp __t) { + return __popcount_rsh_impl<_Tp, _St-1>::__count( + __t >> numeric_limits::digits) + + __libcpp_popcount(static_cast(__t)); + } }; template -struct __popcount_rsh_impl<_Tp, 1> -{ - static _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __count(_Tp __t) - { - return __libcpp_popcount(static_cast(__t)); - } +struct __popcount_rsh_impl<_Tp, 1> { + static _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR + int __count(_Tp __t) { + return __libcpp_popcount(static_cast(__t)); + } }; -template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR __enable_if_t<(sizeof(_Tp) > sizeof(unsigned long long)), int> -__popcount_dispatch(_Tp __t) noexcept -{ - return __popcount_rsh_impl<_Tp>::__count(__t); -} +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +__enable_if_t<(sizeof(_Tp) > sizeof(unsigned long long)), int> __popcount_dispatch(_Tp __t) noexcept +{ return __popcount_rsh_impl<_Tp>::__count(__t); } -template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR int __popcount(_Tp __t) noexcept +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +int __popcount(_Tp __t) noexcept { - static_assert(__libcpp_is_unsigned_integer<_Tp>::value, "__libcpp_popcount requires unsigned"); + static_assert(__libcpp_is_unsigned_integer<_Tp>::value, "__libcpp_popcount requires unsigned"); - return __popcount_dispatch(__t); + return __popcount_dispatch(__t); } + // integral log base 2 -template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR unsigned __bit_log2(_Tp __t) noexcept +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +unsigned __bit_log2(_Tp __t) noexcept { - static_assert(__libcpp_is_unsigned_integer<_Tp>::value, "__bit_log2 requires unsigned"); - return std::numeric_limits<_Tp>::digits - 1 - __countl_zero(__t); + static_assert(__libcpp_is_unsigned_integer<_Tp>::value, "__bit_log2 requires unsigned"); + return std::numeric_limits<_Tp>::digits - 1 - __countl_zero(__t); } template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR bool __has_single_bit(_Tp __t) noexcept +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +bool __has_single_bit(_Tp __t) noexcept { - static_assert(__libcpp_is_unsigned_integer<_Tp>::value, "__has_single_bit requires unsigned"); - return __t != 0 && (((__t & (__t - 1)) == 0)); + static_assert(__libcpp_is_unsigned_integer<_Tp>::value, "__has_single_bit requires unsigned"); + return __t != 0 && (((__t & (__t - 1)) == 0)); } template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR __enable_if_t= sizeof(unsigned), _Tp> +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +__enable_if_t= sizeof(unsigned), _Tp> __ceil2(_Tp __t) noexcept { - // const unsigned __n = numeric_limits<_Tp>::digits - countl_zero((_Tp)(__t - 1u)); - // _LIBCUDACXX_DEBUG_ASSERT(__libcpp_is_constant_evaluated() || __n != numeric_limits<_Tp>::digits, "Bad input to - // ceil2"); - return _Tp{1} << (numeric_limits<_Tp>::digits - __countl_zero((_Tp) (__t - 1u))); + // const unsigned __n = numeric_limits<_Tp>::digits - countl_zero((_Tp)(__t - 1u)); + // _LIBCUDACXX_DEBUG_ASSERT(__libcpp_is_constant_evaluated() || __n != numeric_limits<_Tp>::digits, "Bad input to ceil2"); + return _Tp{1} << (numeric_limits<_Tp>::digits - __countl_zero((_Tp)(__t - 1u))); } template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR __enable_if_t +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +__enable_if_t __ceil2(_Tp __t) noexcept { - // const unsigned __n = numeric_limits<_Tp>::digits - countl_zero((_Tp)(__t - 1u)); - // _LIBCUDACXX_DEBUG_ASSERT(__libcpp_is_constant_evaluated() || __n != numeric_limits<_Tp>::digits, "Bad input to - // ceil2"); + // const unsigned __n = numeric_limits<_Tp>::digits - countl_zero((_Tp)(__t - 1u)); + // _LIBCUDACXX_DEBUG_ASSERT(__libcpp_is_constant_evaluated() || __n != numeric_limits<_Tp>::digits, "Bad input to ceil2"); - // const unsigned __extra = numeric_limits::digits - numeric_limits<_Tp>::digits; - // const unsigned __retVal = 1u << (__n + __extra); - return (_Tp) ((1u << ((numeric_limits<_Tp>::digits - __countl_zero((_Tp) (__t - 1u))) - + (numeric_limits::digits - numeric_limits<_Tp>::digits))) - >> (numeric_limits::digits - numeric_limits<_Tp>::digits)); + // const unsigned __extra = numeric_limits::digits - numeric_limits<_Tp>::digits; + // const unsigned __retVal = 1u << (__n + __extra); + return (_Tp) + ((1u << ((numeric_limits<_Tp>::digits - __countl_zero((_Tp)(__t - 1u))) + (numeric_limits::digits - numeric_limits<_Tp>::digits))) >> + (numeric_limits::digits - numeric_limits<_Tp>::digits)); } + #if (_CCCL_STD_VER > 2017) || defined(__cuda_std__) -template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR __enable_if_t<__libcpp_is_unsigned_integer<_Tp>::value, _Tp> +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +__enable_if_t<__libcpp_is_unsigned_integer<_Tp>::value, _Tp> rotl(_Tp __t, unsigned int __cnt) noexcept { - return __rotl(__t, __cnt); + return __rotl(__t, __cnt); } + // rotr -template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR __enable_if_t<__libcpp_is_unsigned_integer<_Tp>::value, _Tp> +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +__enable_if_t<__libcpp_is_unsigned_integer<_Tp>::value, _Tp> rotr(_Tp __t, unsigned int __cnt) noexcept { - return __rotr(__t, __cnt); + return __rotr(__t, __cnt); } -template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR __enable_if_t<__libcpp_is_unsigned_integer<_Tp>::value, int> + +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +__enable_if_t<__libcpp_is_unsigned_integer<_Tp>::value, int> countl_zero(_Tp __t) noexcept { - return __countl_zero(__t); + return __countl_zero(__t); } -template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR __enable_if_t<__libcpp_is_unsigned_integer<_Tp>::value, int> + +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +__enable_if_t<__libcpp_is_unsigned_integer<_Tp>::value, int> countl_one(_Tp __t) noexcept { - return __countl_one(__t); + return __countl_one(__t); } -template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR __enable_if_t<__libcpp_is_unsigned_integer<_Tp>::value, int> + +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +__enable_if_t<__libcpp_is_unsigned_integer<_Tp>::value, int> countr_zero(_Tp __t) noexcept { - return __countr_zero(__t); + return __countr_zero(__t); } -template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR __enable_if_t<__libcpp_is_unsigned_integer<_Tp>::value, int> + +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +__enable_if_t<__libcpp_is_unsigned_integer<_Tp>::value, int> countr_one(_Tp __t) noexcept { - return __countr_one(__t); + return __countr_one(__t); } -template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR __enable_if_t<__libcpp_is_unsigned_integer<_Tp>::value, int> + +template +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +__enable_if_t<__libcpp_is_unsigned_integer<_Tp>::value, int> popcount(_Tp __t) noexcept { - return __popcount(__t); + return __popcount(__t); } + template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t<__libcpp_is_unsigned_integer<_Tp>::value, bool> +_LIBCUDACXX_INLINE_VISIBILITY constexpr +__enable_if_t<__libcpp_is_unsigned_integer<_Tp>::value, bool> has_single_bit(_Tp __t) noexcept { - return __has_single_bit(__t); + return __has_single_bit(__t); } template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR __enable_if_t<__libcpp_is_unsigned_integer<_Tp>::value, _Tp> +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +__enable_if_t<__libcpp_is_unsigned_integer<_Tp>::value, _Tp> bit_floor(_Tp __t) noexcept { - return __t == 0 ? 0 : static_cast<_Tp>(_Tp{1} << __bit_log2(__t)); + return __t == 0 ? 0 : static_cast<_Tp>(_Tp{1} << __bit_log2(__t)); } template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR __enable_if_t<__libcpp_is_unsigned_integer<_Tp>::value, _Tp> +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +__enable_if_t<__libcpp_is_unsigned_integer<_Tp>::value, _Tp> bit_ceil(_Tp __t) noexcept { - return (__t < 2) ? 1 : static_cast<_Tp>(__ceil2(__t)); + return (__t < 2) ? 1 : static_cast<_Tp>(__ceil2(__t)); } template -_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR __enable_if_t<__libcpp_is_unsigned_integer<_Tp>::value, _Tp> +_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_BIT_CONSTEXPR +__enable_if_t<__libcpp_is_unsigned_integer<_Tp>::value, _Tp> bit_width(_Tp __t) noexcept { - return __t == 0 ? 0 : static_cast<_Tp>(__bit_log2(__t) + 1); + return __t == 0 ? 0 : static_cast<_Tp>(__bit_log2(__t) + 1); } + enum class endian { - little = 0xDEAD, - big = 0xFACE, -# if defined(_LIBCUDACXX_LITTLE_ENDIAN) - native = little -# elif defined(_LIBCUDACXX_BIG_ENDIAN) - native = big -# else - native = 0xCAFE -# endif + little = 0xDEAD, + big = 0xFACE, +#if defined(_LIBCUDACXX_LITTLE_ENDIAN) + native = little +#elif defined(_LIBCUDACXX_BIG_ENDIAN) + native = big +#else + native = 0xCAFE +#endif }; #endif // _CCCL_STD_VER > 2017 diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/bitset b/libcudacxx/include/cuda/std/detail/libcxx/include/bitset index ebf17ae02a2..c475bfb7d9f 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/bitset +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/bitset @@ -74,10 +74,14 @@ public: template basic_string > to_string(charT zero = charT('0'), charT one = charT('1')) const; template - basic_string, allocator > to_string(charT zero = charT('0'), charT one = -charT('1')) const; basic_string, allocator > to_string(char zero = '0', char one = '1') -const; size_t count() const noexcept; constexpr size_t size() const noexcept; bool operator==(const bitset& rhs) const -noexcept; bool operator!=(const bitset& rhs) const noexcept; bool test(size_t pos) const; bool all() const noexcept; + basic_string, allocator > to_string(charT zero = charT('0'), charT one = charT('1')) const; + basic_string, allocator > to_string(char zero = '0', char one = '1') const; + size_t count() const noexcept; + constexpr size_t size() const noexcept; + bool operator==(const bitset& rhs) const noexcept; + bool operator!=(const bitset& rhs) const noexcept; + bool test(size_t pos) const; + bool all() const noexcept; bool any() const noexcept; bool none() const noexcept; bitset operator<<(size_t pos) const noexcept; @@ -108,14 +112,14 @@ template struct hash>; */ -#include <__bit_reference> #include <__config> -#include <__functional_base> -#include +#include <__bit_reference> #include -#include -#include +#include #include +#include +#include +#include <__functional_base> #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) # pragma GCC system_header @@ -128,901 +132,929 @@ template struct hash>; _LIBCUDACXX_PUSH_MACROS #include <__undef_macros> + _LIBCUDACXX_BEGIN_NAMESPACE_STD template class __bitset; template -struct __has_storage_type<__bitset<_N_words, _Size>> +struct __has_storage_type<__bitset<_N_words, _Size> > { - static const bool value = true; + static const bool value = true; }; template class __bitset { public: - typedef ptrdiff_t difference_type; - typedef size_t size_type; - typedef size_type __storage_type; - + typedef ptrdiff_t difference_type; + typedef size_t size_type; + typedef size_type __storage_type; protected: - typedef __bitset __self; - typedef __storage_type* __storage_pointer; - typedef const __storage_type* __const_storage_pointer; - static const unsigned __bits_per_word = static_cast(sizeof(__storage_type) * CHAR_BIT); - - friend class __bit_reference<__bitset>; - friend class __bit_const_reference<__bitset>; - friend class __bit_iterator<__bitset, false>; - friend class __bit_iterator<__bitset, true>; - friend struct __bit_array<__bitset>; - - __storage_type __first_[_N_words]; - - typedef __bit_reference<__bitset> reference; - typedef __bit_const_reference<__bitset> const_reference; - typedef __bit_iterator<__bitset, false> iterator; - typedef __bit_iterator<__bitset, true> const_iterator; - - _LIBCUDACXX_INLINE_VISIBILITY constexpr __bitset() noexcept; - _LIBCUDACXX_INLINE_VISIBILITY explicit constexpr __bitset(unsigned long long __v) noexcept; - - _LIBCUDACXX_INLINE_VISIBILITY reference __make_ref(size_t __pos) noexcept - { - return reference(__first_ + __pos / __bits_per_word, __storage_type(1) << __pos % __bits_per_word); - } - _LIBCUDACXX_INLINE_VISIBILITY constexpr const_reference __make_ref(size_t __pos) const noexcept - { - return const_reference(__first_ + __pos / __bits_per_word, __storage_type(1) << __pos % __bits_per_word); - } - _LIBCUDACXX_INLINE_VISIBILITY iterator __make_iter(size_t __pos) noexcept - { - return iterator(__first_ + __pos / __bits_per_word, __pos % __bits_per_word); - } - _LIBCUDACXX_INLINE_VISIBILITY const_iterator __make_iter(size_t __pos) const noexcept - { - return const_iterator(__first_ + __pos / __bits_per_word, __pos % __bits_per_word); - } - - _LIBCUDACXX_INLINE_VISIBILITY void operator&=(const __bitset& __v) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY void operator|=(const __bitset& __v) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY void operator^=(const __bitset& __v) noexcept; - - void flip() noexcept; - _LIBCUDACXX_INLINE_VISIBILITY unsigned long to_ulong() const - { - return to_ulong(integral_constant < bool, _Size()); - } - _LIBCUDACXX_INLINE_VISIBILITY unsigned long long to_ullong() const - { - return to_ullong(integral_constant < bool, _Size()); - } - - bool all() const noexcept; - bool any() const noexcept; - _LIBCUDACXX_INLINE_VISIBILITY size_t __hash_code() const noexcept; - + typedef __bitset __self; + typedef __storage_type* __storage_pointer; + typedef const __storage_type* __const_storage_pointer; + static const unsigned __bits_per_word = static_cast(sizeof(__storage_type) * CHAR_BIT); + + friend class __bit_reference<__bitset>; + friend class __bit_const_reference<__bitset>; + friend class __bit_iterator<__bitset, false>; + friend class __bit_iterator<__bitset, true>; + friend struct __bit_array<__bitset>; + + __storage_type __first_[_N_words]; + + typedef __bit_reference<__bitset> reference; + typedef __bit_const_reference<__bitset> const_reference; + typedef __bit_iterator<__bitset, false> iterator; + typedef __bit_iterator<__bitset, true> const_iterator; + + _LIBCUDACXX_INLINE_VISIBILITY + constexpr __bitset() noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + explicit constexpr __bitset(unsigned long long __v) noexcept; + + _LIBCUDACXX_INLINE_VISIBILITY reference __make_ref(size_t __pos) noexcept + {return reference(__first_ + __pos / __bits_per_word, __storage_type(1) << __pos % __bits_per_word);} + _LIBCUDACXX_INLINE_VISIBILITY constexpr const_reference __make_ref(size_t __pos) const noexcept + {return const_reference(__first_ + __pos / __bits_per_word, __storage_type(1) << __pos % __bits_per_word);} + _LIBCUDACXX_INLINE_VISIBILITY iterator __make_iter(size_t __pos) noexcept + {return iterator(__first_ + __pos / __bits_per_word, __pos % __bits_per_word);} + _LIBCUDACXX_INLINE_VISIBILITY const_iterator __make_iter(size_t __pos) const noexcept + {return const_iterator(__first_ + __pos / __bits_per_word, __pos % __bits_per_word);} + + _LIBCUDACXX_INLINE_VISIBILITY + void operator&=(const __bitset& __v) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + void operator|=(const __bitset& __v) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + void operator^=(const __bitset& __v) noexcept; + + void flip() noexcept; + _LIBCUDACXX_INLINE_VISIBILITY unsigned long to_ulong() const + {return to_ulong(integral_constant());} + _LIBCUDACXX_INLINE_VISIBILITY unsigned long long to_ullong() const + {return to_ullong(integral_constant());} + + bool all() const noexcept; + bool any() const noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + size_t __hash_code() const noexcept; private: - unsigned long to_ulong(false_type) const; - _LIBCUDACXX_INLINE_VISIBILITY unsigned long to_ulong(true_type) const; - unsigned long long to_ullong(false_type) const; - _LIBCUDACXX_INLINE_VISIBILITY unsigned long long to_ullong(true_type) const; - _LIBCUDACXX_INLINE_VISIBILITY unsigned long long to_ullong(true_type, false_type) const; - unsigned long long to_ullong(true_type, true_type) const; + unsigned long to_ulong(false_type) const; + _LIBCUDACXX_INLINE_VISIBILITY + unsigned long to_ulong(true_type) const; + unsigned long long to_ullong(false_type) const; + _LIBCUDACXX_INLINE_VISIBILITY + unsigned long long to_ullong(true_type) const; + _LIBCUDACXX_INLINE_VISIBILITY + unsigned long long to_ullong(true_type, false_type) const; + unsigned long long to_ullong(true_type, true_type) const; }; template -inline constexpr __bitset<_N_words, _Size>::__bitset() noexcept +inline constexpr +__bitset<_N_words, _Size>::__bitset() noexcept : __first_{0} {} template -inline constexpr __bitset<_N_words, _Size>::__bitset(unsigned long long __v) noexcept +inline +constexpr +__bitset<_N_words, _Size>::__bitset(unsigned long long __v) noexcept #if __SIZEOF_SIZE_T__ == 8 : __first_{__v} #elif __SIZEOF_SIZE_T__ == 4 - : __first_{ - static_cast<__storage_type>(__v), - _Size >= 2 * __bits_per_word - ? static_cast<__storage_type>(__v >> __bits_per_word) - : static_cast<__storage_type>((__v >> __bits_per_word) & (__storage_type(1) << (_Size - __bits_per_word)) - 1)} + : __first_{static_cast<__storage_type>(__v), + _Size >= 2 * __bits_per_word ? static_cast<__storage_type>(__v >> __bits_per_word) + : static_cast<__storage_type>((__v >> __bits_per_word) & (__storage_type(1) << (_Size - __bits_per_word)) - 1)} #else -# error This constructor has not been ported to this platform +#error This constructor has not been ported to this platform #endif {} template -inline void __bitset<_N_words, _Size>::operator&=(const __bitset& __v) noexcept +inline +void +__bitset<_N_words, _Size>::operator&=(const __bitset& __v) noexcept { - for (size_type __i = 0; __i < _N_words; ++__i) - { - __first_[__i] &= __v.__first_[__i]; - } + for (size_type __i = 0; __i < _N_words; ++__i) + __first_[__i] &= __v.__first_[__i]; } template -inline void __bitset<_N_words, _Size>::operator|=(const __bitset& __v) noexcept +inline +void +__bitset<_N_words, _Size>::operator|=(const __bitset& __v) noexcept { - for (size_type __i = 0; __i < _N_words; ++__i) - { - __first_[__i] |= __v.__first_[__i]; - } + for (size_type __i = 0; __i < _N_words; ++__i) + __first_[__i] |= __v.__first_[__i]; } template -inline void __bitset<_N_words, _Size>::operator^=(const __bitset& __v) noexcept +inline +void +__bitset<_N_words, _Size>::operator^=(const __bitset& __v) noexcept { - for (size_type __i = 0; __i < _N_words; ++__i) - { - __first_[__i] ^= __v.__first_[__i]; - } + for (size_type __i = 0; __i < _N_words; ++__i) + __first_[__i] ^= __v.__first_[__i]; } template -void __bitset<_N_words, _Size>::flip() noexcept -{ - // do middle whole words - size_type __n = _Size; - __storage_pointer __p = __first_; - for (; __n >= __bits_per_word; ++__p, __n -= __bits_per_word) - { - *__p = ~*__p; - } - // do last partial word - if (__n > 0) - { - __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); - __storage_type __b = *__p & __m; - *__p &= ~__m; - *__p |= ~__b & __m; - } +void +__bitset<_N_words, _Size>::flip() noexcept +{ + // do middle whole words + size_type __n = _Size; + __storage_pointer __p = __first_; + for (; __n >= __bits_per_word; ++__p, __n -= __bits_per_word) + *__p = ~*__p; + // do last partial word + if (__n > 0) + { + __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); + __storage_type __b = *__p & __m; + *__p &= ~__m; + *__p |= ~__b & __m; + } } template -unsigned long __bitset<_N_words, _Size>::to_ulong(false_type) const +unsigned long +__bitset<_N_words, _Size>::to_ulong(false_type) const { - const_iterator __e = __make_iter(_Size); - const_iterator __i = _CUDA_VSTD::find(__make_iter(sizeof(unsigned long) * CHAR_BIT), __e, true); - if (__i != __e) - { - __throw_overflow_error("bitset to_ulong overflow error"); - } + const_iterator __e = __make_iter(_Size); + const_iterator __i = _CUDA_VSTD::find(__make_iter(sizeof(unsigned long) * CHAR_BIT), __e, true); + if (__i != __e) + __throw_overflow_error("bitset to_ulong overflow error"); - return __first_[0]; + return __first_[0]; } template -inline unsigned long __bitset<_N_words, _Size>::to_ulong(true_type) const +inline +unsigned long +__bitset<_N_words, _Size>::to_ulong(true_type) const { - return __first_[0]; + return __first_[0]; } template -unsigned long long __bitset<_N_words, _Size>::to_ullong(false_type) const +unsigned long long +__bitset<_N_words, _Size>::to_ullong(false_type) const { - const_iterator __e = __make_iter(_Size); - const_iterator __i = _CUDA_VSTD::find(__make_iter(sizeof(unsigned long long) * CHAR_BIT), __e, true); - if (__i != __e) - { - __throw_overflow_error("bitset to_ullong overflow error"); - } + const_iterator __e = __make_iter(_Size); + const_iterator __i = _CUDA_VSTD::find(__make_iter(sizeof(unsigned long long) * CHAR_BIT), __e, true); + if (__i != __e) + __throw_overflow_error("bitset to_ullong overflow error"); - return to_ullong(true_type()); + return to_ullong(true_type()); } template -inline unsigned long long __bitset<_N_words, _Size>::to_ullong(true_type) const +inline +unsigned long long +__bitset<_N_words, _Size>::to_ullong(true_type) const { - return to_ullong(true_type(), integral_constant()); + return to_ullong(true_type(), integral_constant()); } template -inline unsigned long long __bitset<_N_words, _Size>::to_ullong(true_type, false_type) const +inline +unsigned long long +__bitset<_N_words, _Size>::to_ullong(true_type, false_type) const { - return __first_[0]; + return __first_[0]; } template -unsigned long long __bitset<_N_words, _Size>::to_ullong(true_type, true_type) const -{ - unsigned long long __r = __first_[0]; - for (std::size_t __i = 1; __i < sizeof(unsigned long long) / sizeof(__storage_type); ++__i) - { - __r |= static_cast(__first_[__i]) << (sizeof(__storage_type) * CHAR_BIT); - } - return __r; +unsigned long long +__bitset<_N_words, _Size>::to_ullong(true_type, true_type) const +{ + unsigned long long __r = __first_[0]; + for (std::size_t __i = 1; __i < sizeof(unsigned long long) / sizeof(__storage_type); ++__i) + __r |= static_cast(__first_[__i]) << (sizeof(__storage_type) * CHAR_BIT); + return __r; } template -bool __bitset<_N_words, _Size>::all() const noexcept -{ - // do middle whole words - size_type __n = _Size; - __const_storage_pointer __p = __first_; - for (; __n >= __bits_per_word; ++__p, __n -= __bits_per_word) - { - if (~*__p) - { - return false; - } - } - // do last partial word - if (__n > 0) - { - __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); - if (~*__p & __m) +bool +__bitset<_N_words, _Size>::all() const noexcept +{ + // do middle whole words + size_type __n = _Size; + __const_storage_pointer __p = __first_; + for (; __n >= __bits_per_word; ++__p, __n -= __bits_per_word) + if (~*__p) + return false; + // do last partial word + if (__n > 0) { - return false; + __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); + if (~*__p & __m) + return false; } - } - return true; + return true; } template -bool __bitset<_N_words, _Size>::any() const noexcept -{ - // do middle whole words - size_type __n = _Size; - __const_storage_pointer __p = __first_; - for (; __n >= __bits_per_word; ++__p, __n -= __bits_per_word) - { - if (*__p) - { - return true; - } - } - // do last partial word - if (__n > 0) - { - __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); - if (*__p & __m) +bool +__bitset<_N_words, _Size>::any() const noexcept +{ + // do middle whole words + size_type __n = _Size; + __const_storage_pointer __p = __first_; + for (; __n >= __bits_per_word; ++__p, __n -= __bits_per_word) + if (*__p) + return true; + // do last partial word + if (__n > 0) { - return true; + __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); + if (*__p & __m) + return true; } - } - return false; + return false; } template -inline size_t __bitset<_N_words, _Size>::__hash_code() const noexcept -{ - size_t __h = 0; - for (size_type __i = 0; __i < _N_words; ++__i) - { - __h ^= __first_[__i]; - } - return __h; +inline +size_t +__bitset<_N_words, _Size>::__hash_code() const noexcept +{ + size_t __h = 0; + for (size_type __i = 0; __i < _N_words; ++__i) + __h ^= __first_[__i]; + return __h; } template class __bitset<1, _Size> { public: - typedef ptrdiff_t difference_type; - typedef size_t size_type; - typedef size_type __storage_type; - + typedef ptrdiff_t difference_type; + typedef size_t size_type; + typedef size_type __storage_type; protected: - typedef __bitset __self; - typedef __storage_type* __storage_pointer; - typedef const __storage_type* __const_storage_pointer; - static const unsigned __bits_per_word = static_cast(sizeof(__storage_type) * CHAR_BIT); - - friend class __bit_reference<__bitset>; - friend class __bit_const_reference<__bitset>; - friend class __bit_iterator<__bitset, false>; - friend class __bit_iterator<__bitset, true>; - friend struct __bit_array<__bitset>; - - __storage_type __first_; - - typedef __bit_reference<__bitset> reference; - typedef __bit_const_reference<__bitset> const_reference; - typedef __bit_iterator<__bitset, false> iterator; - typedef __bit_iterator<__bitset, true> const_iterator; - - _LIBCUDACXX_INLINE_VISIBILITY constexpr __bitset() noexcept; - _LIBCUDACXX_INLINE_VISIBILITY explicit constexpr __bitset(unsigned long long __v) noexcept; - - _LIBCUDACXX_INLINE_VISIBILITY reference __make_ref(size_t __pos) noexcept - { - return reference(&__first_, __storage_type(1) << __pos); - } - _LIBCUDACXX_INLINE_VISIBILITY constexpr const_reference __make_ref(size_t __pos) const noexcept - { - return const_reference(&__first_, __storage_type(1) << __pos); - } - _LIBCUDACXX_INLINE_VISIBILITY iterator __make_iter(size_t __pos) noexcept - { - return iterator(&__first_ + __pos / __bits_per_word, __pos % __bits_per_word); - } - _LIBCUDACXX_INLINE_VISIBILITY const_iterator __make_iter(size_t __pos) const noexcept - { - return const_iterator(&__first_ + __pos / __bits_per_word, __pos % __bits_per_word); - } - - _LIBCUDACXX_INLINE_VISIBILITY void operator&=(const __bitset& __v) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY void operator|=(const __bitset& __v) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY void operator^=(const __bitset& __v) noexcept; - - _LIBCUDACXX_INLINE_VISIBILITY void flip() noexcept; - - _LIBCUDACXX_INLINE_VISIBILITY unsigned long to_ulong() const; - _LIBCUDACXX_INLINE_VISIBILITY unsigned long long to_ullong() const; - - _LIBCUDACXX_INLINE_VISIBILITY bool all() const noexcept; - _LIBCUDACXX_INLINE_VISIBILITY bool any() const noexcept; - - _LIBCUDACXX_INLINE_VISIBILITY size_t __hash_code() const noexcept; + typedef __bitset __self; + typedef __storage_type* __storage_pointer; + typedef const __storage_type* __const_storage_pointer; + static const unsigned __bits_per_word = static_cast(sizeof(__storage_type) * CHAR_BIT); + + friend class __bit_reference<__bitset>; + friend class __bit_const_reference<__bitset>; + friend class __bit_iterator<__bitset, false>; + friend class __bit_iterator<__bitset, true>; + friend struct __bit_array<__bitset>; + + __storage_type __first_; + + typedef __bit_reference<__bitset> reference; + typedef __bit_const_reference<__bitset> const_reference; + typedef __bit_iterator<__bitset, false> iterator; + typedef __bit_iterator<__bitset, true> const_iterator; + + _LIBCUDACXX_INLINE_VISIBILITY + constexpr __bitset() noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + explicit constexpr __bitset(unsigned long long __v) noexcept; + + _LIBCUDACXX_INLINE_VISIBILITY reference __make_ref(size_t __pos) noexcept + {return reference(&__first_, __storage_type(1) << __pos);} + _LIBCUDACXX_INLINE_VISIBILITY constexpr const_reference __make_ref(size_t __pos) const noexcept + {return const_reference(&__first_, __storage_type(1) << __pos);} + _LIBCUDACXX_INLINE_VISIBILITY iterator __make_iter(size_t __pos) noexcept + {return iterator(&__first_ + __pos / __bits_per_word, __pos % __bits_per_word);} + _LIBCUDACXX_INLINE_VISIBILITY const_iterator __make_iter(size_t __pos) const noexcept + {return const_iterator(&__first_ + __pos / __bits_per_word, __pos % __bits_per_word);} + + _LIBCUDACXX_INLINE_VISIBILITY + void operator&=(const __bitset& __v) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + void operator|=(const __bitset& __v) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + void operator^=(const __bitset& __v) noexcept; + + _LIBCUDACXX_INLINE_VISIBILITY + void flip() noexcept; + + _LIBCUDACXX_INLINE_VISIBILITY + unsigned long to_ulong() const; + _LIBCUDACXX_INLINE_VISIBILITY + unsigned long long to_ullong() const; + + _LIBCUDACXX_INLINE_VISIBILITY + bool all() const noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + bool any() const noexcept; + + _LIBCUDACXX_INLINE_VISIBILITY + size_t __hash_code() const noexcept; }; template -inline constexpr __bitset<1, _Size>::__bitset() noexcept +inline constexpr +__bitset<1, _Size>::__bitset() noexcept : __first_(0) -{} +{ +} template -inline constexpr __bitset<1, _Size>::__bitset(unsigned long long __v) noexcept - : __first_(_Size == __bits_per_word ? static_cast<__storage_type>(__v) - : static_cast<__storage_type>(__v) & ((__storage_type(1) << _Size) - 1)) -{} +inline constexpr +__bitset<1, _Size>::__bitset(unsigned long long __v) noexcept + : __first_( + _Size == __bits_per_word ? static_cast<__storage_type>(__v) + : static_cast<__storage_type>(__v) & ((__storage_type(1) << _Size) - 1) + ) +{ +} template -inline void __bitset<1, _Size>::operator&=(const __bitset& __v) noexcept +inline +void +__bitset<1, _Size>::operator&=(const __bitset& __v) noexcept { - __first_ &= __v.__first_; + __first_ &= __v.__first_; } template -inline void __bitset<1, _Size>::operator|=(const __bitset& __v) noexcept +inline +void +__bitset<1, _Size>::operator|=(const __bitset& __v) noexcept { - __first_ |= __v.__first_; + __first_ |= __v.__first_; } template -inline void __bitset<1, _Size>::operator^=(const __bitset& __v) noexcept +inline +void +__bitset<1, _Size>::operator^=(const __bitset& __v) noexcept { - __first_ ^= __v.__first_; + __first_ ^= __v.__first_; } template -inline void __bitset<1, _Size>::flip() noexcept +inline +void +__bitset<1, _Size>::flip() noexcept { - __storage_type __m = ~__storage_type(0) >> (__bits_per_word - _Size); - __first_ = ~__first_; - __first_ &= __m; + __storage_type __m = ~__storage_type(0) >> (__bits_per_word - _Size); + __first_ = ~__first_; + __first_ &= __m; } template -inline unsigned long __bitset<1, _Size>::to_ulong() const +inline +unsigned long +__bitset<1, _Size>::to_ulong() const { - return __first_; + return __first_; } template -inline unsigned long long __bitset<1, _Size>::to_ullong() const +inline +unsigned long long +__bitset<1, _Size>::to_ullong() const { - return __first_; + return __first_; } template -inline bool __bitset<1, _Size>::all() const noexcept +inline +bool +__bitset<1, _Size>::all() const noexcept { - __storage_type __m = ~__storage_type(0) >> (__bits_per_word - _Size); - return !(~__first_ & __m); + __storage_type __m = ~__storage_type(0) >> (__bits_per_word - _Size); + return !(~__first_ & __m); } template -inline bool __bitset<1, _Size>::any() const noexcept +inline +bool +__bitset<1, _Size>::any() const noexcept { - __storage_type __m = ~__storage_type(0) >> (__bits_per_word - _Size); - return __first_ & __m; + __storage_type __m = ~__storage_type(0) >> (__bits_per_word - _Size); + return __first_ & __m; } template -inline size_t __bitset<1, _Size>::__hash_code() const noexcept +inline +size_t +__bitset<1, _Size>::__hash_code() const noexcept { - return __first_; + return __first_; } template <> class __bitset<0, 0> { public: - typedef ptrdiff_t difference_type; - typedef size_t size_type; - typedef size_type __storage_type; - + typedef ptrdiff_t difference_type; + typedef size_t size_type; + typedef size_type __storage_type; protected: - typedef __bitset __self; - typedef __storage_type* __storage_pointer; - typedef const __storage_type* __const_storage_pointer; - static const unsigned __bits_per_word = static_cast(sizeof(__storage_type) * CHAR_BIT); - - friend class __bit_reference<__bitset>; - friend class __bit_const_reference<__bitset>; - friend class __bit_iterator<__bitset, false>; - friend class __bit_iterator<__bitset, true>; - friend struct __bit_array<__bitset>; - - typedef __bit_reference<__bitset> reference; - typedef __bit_const_reference<__bitset> const_reference; - typedef __bit_iterator<__bitset, false> iterator; - typedef __bit_iterator<__bitset, true> const_iterator; - - _LIBCUDACXX_INLINE_VISIBILITY constexpr __bitset() noexcept; - _LIBCUDACXX_INLINE_VISIBILITY explicit constexpr __bitset(unsigned long long) noexcept; - - _LIBCUDACXX_INLINE_VISIBILITY reference __make_ref(size_t) noexcept - { - return reference(0, 1); - } - _LIBCUDACXX_INLINE_VISIBILITY constexpr const_reference __make_ref(size_t) const noexcept - { - return const_reference(0, 1); - } - _LIBCUDACXX_INLINE_VISIBILITY iterator __make_iter(size_t) noexcept - { - return iterator(0, 0); - } - _LIBCUDACXX_INLINE_VISIBILITY const_iterator __make_iter(size_t) const noexcept - { - return const_iterator(0, 0); - } - - _LIBCUDACXX_INLINE_VISIBILITY void operator&=(const __bitset&) noexcept {} - _LIBCUDACXX_INLINE_VISIBILITY void operator|=(const __bitset&) noexcept {} - _LIBCUDACXX_INLINE_VISIBILITY void operator^=(const __bitset&) noexcept {} - - _LIBCUDACXX_INLINE_VISIBILITY void flip() noexcept {} - - _LIBCUDACXX_INLINE_VISIBILITY unsigned long to_ulong() const - { - return 0; - } - _LIBCUDACXX_INLINE_VISIBILITY unsigned long long to_ullong() const - { - return 0; - } - - _LIBCUDACXX_INLINE_VISIBILITY bool all() const noexcept - { - return true; - } - _LIBCUDACXX_INLINE_VISIBILITY bool any() const noexcept - { - return false; - } - - _LIBCUDACXX_INLINE_VISIBILITY size_t __hash_code() const noexcept - { - return 0; - } + typedef __bitset __self; + typedef __storage_type* __storage_pointer; + typedef const __storage_type* __const_storage_pointer; + static const unsigned __bits_per_word = static_cast(sizeof(__storage_type) * CHAR_BIT); + + friend class __bit_reference<__bitset>; + friend class __bit_const_reference<__bitset>; + friend class __bit_iterator<__bitset, false>; + friend class __bit_iterator<__bitset, true>; + friend struct __bit_array<__bitset>; + + typedef __bit_reference<__bitset> reference; + typedef __bit_const_reference<__bitset> const_reference; + typedef __bit_iterator<__bitset, false> iterator; + typedef __bit_iterator<__bitset, true> const_iterator; + + _LIBCUDACXX_INLINE_VISIBILITY + constexpr __bitset() noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + explicit constexpr __bitset(unsigned long long) noexcept; + + _LIBCUDACXX_INLINE_VISIBILITY reference __make_ref(size_t) noexcept + {return reference(0, 1);} + _LIBCUDACXX_INLINE_VISIBILITY constexpr const_reference __make_ref(size_t) const noexcept + {return const_reference(0, 1);} + _LIBCUDACXX_INLINE_VISIBILITY iterator __make_iter(size_t) noexcept + {return iterator(0, 0);} + _LIBCUDACXX_INLINE_VISIBILITY const_iterator __make_iter(size_t) const noexcept + {return const_iterator(0, 0);} + + _LIBCUDACXX_INLINE_VISIBILITY void operator&=(const __bitset&) noexcept {} + _LIBCUDACXX_INLINE_VISIBILITY void operator|=(const __bitset&) noexcept {} + _LIBCUDACXX_INLINE_VISIBILITY void operator^=(const __bitset&) noexcept {} + + _LIBCUDACXX_INLINE_VISIBILITY void flip() noexcept {} + + _LIBCUDACXX_INLINE_VISIBILITY unsigned long to_ulong() const {return 0;} + _LIBCUDACXX_INLINE_VISIBILITY unsigned long long to_ullong() const {return 0;} + + _LIBCUDACXX_INLINE_VISIBILITY bool all() const noexcept {return true;} + _LIBCUDACXX_INLINE_VISIBILITY bool any() const noexcept {return false;} + + _LIBCUDACXX_INLINE_VISIBILITY size_t __hash_code() const noexcept {return 0;} }; -inline constexpr __bitset<0, 0>::__bitset() noexcept {} +inline +constexpr +__bitset<0, 0>::__bitset() noexcept +{ +} -inline constexpr __bitset<0, 0>::__bitset(unsigned long long) noexcept {} +inline +constexpr +__bitset<0, 0>::__bitset(unsigned long long) noexcept +{ +} -template -class _LIBCUDACXX_TEMPLATE_VIS bitset; -template -struct hash>; +template class _LIBCUDACXX_TEMPLATE_VIS bitset; +template struct hash >; template class _LIBCUDACXX_TEMPLATE_VIS bitset : private __bitset<_Size == 0 ? 0 : (_Size - 1) / (sizeof(size_t) * CHAR_BIT) + 1, _Size> { public: - static const unsigned __n_words = _Size == 0 ? 0 : (_Size - 1) / (sizeof(size_t) * CHAR_BIT) + 1; - typedef __bitset<__n_words, _Size> base; + static const unsigned __n_words = _Size == 0 ? 0 : (_Size - 1) / (sizeof(size_t) * CHAR_BIT) + 1; + typedef __bitset<__n_words, _Size> base; public: - typedef typename base::reference reference; - typedef typename base::const_reference const_reference; - - // 23.3.5.1 constructors: - _LIBCUDACXX_INLINE_VISIBILITY constexpr bitset() noexcept {} - _LIBCUDACXX_INLINE_VISIBILITY constexpr bitset(unsigned long long __v) noexcept - : base(__v) - {} - template ::value>> - explicit bitset(const _CharT* __str, - typename basic_string<_CharT>::size_type __n = basic_string<_CharT>::npos, - _CharT __zero = _CharT('0'), - _CharT __one = _CharT('1')); - template - explicit bitset(const basic_string<_CharT, _Traits, _Allocator>& __str, - typename basic_string<_CharT, _Traits, _Allocator>::size_type __pos = 0, - typename basic_string<_CharT, _Traits, _Allocator>::size_type __n = - (basic_string<_CharT, _Traits, _Allocator>::npos), - _CharT __zero = _CharT('0'), - _CharT __one = _CharT('1')); - - // 23.3.5.2 bitset operations: - _LIBCUDACXX_INLINE_VISIBILITY bitset& operator&=(const bitset& __rhs) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY bitset& operator|=(const bitset& __rhs) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY bitset& operator^=(const bitset& __rhs) noexcept; - bitset& operator<<=(size_t __pos) noexcept; - bitset& operator>>=(size_t __pos) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY bitset& set() noexcept; - bitset& set(size_t __pos, bool __val = true); - _LIBCUDACXX_INLINE_VISIBILITY bitset& reset() noexcept; - bitset& reset(size_t __pos); - _LIBCUDACXX_INLINE_VISIBILITY bitset operator~() const noexcept; - _LIBCUDACXX_INLINE_VISIBILITY bitset& flip() noexcept; - bitset& flip(size_t __pos); - - // element access: - _LIBCUDACXX_INLINE_VISIBILITY constexpr const_reference operator[](size_t __p) const - { - return base::__make_ref(__p); - } - _LIBCUDACXX_INLINE_VISIBILITY reference operator[](size_t __p) - { - return base::__make_ref(__p); - } - _LIBCUDACXX_INLINE_VISIBILITY unsigned long to_ulong() const; - _LIBCUDACXX_INLINE_VISIBILITY unsigned long long to_ullong() const; - template - basic_string<_CharT, _Traits, _Allocator> to_string(_CharT __zero = _CharT('0'), _CharT __one = _CharT('1')) const; - template - _LIBCUDACXX_INLINE_VISIBILITY basic_string<_CharT, _Traits, allocator<_CharT>> - to_string(_CharT __zero = _CharT('0'), _CharT __one = _CharT('1')) const; - template - _LIBCUDACXX_INLINE_VISIBILITY basic_string<_CharT, char_traits<_CharT>, allocator<_CharT>> - to_string(_CharT __zero = _CharT('0'), _CharT __one = _CharT('1')) const; - _LIBCUDACXX_INLINE_VISIBILITY basic_string, allocator> - to_string(char __zero = '0', char __one = '1') const; - _LIBCUDACXX_INLINE_VISIBILITY size_t count() const noexcept; - _LIBCUDACXX_INLINE_VISIBILITY constexpr size_t size() const noexcept - { - return _Size; - } - _LIBCUDACXX_INLINE_VISIBILITY bool operator==(const bitset& __rhs) const noexcept; - _LIBCUDACXX_INLINE_VISIBILITY bool operator!=(const bitset& __rhs) const noexcept; - bool test(size_t __pos) const; - _LIBCUDACXX_INLINE_VISIBILITY bool all() const noexcept; - _LIBCUDACXX_INLINE_VISIBILITY bool any() const noexcept; - _LIBCUDACXX_INLINE_VISIBILITY bool none() const noexcept - { - return !any(); - } - _LIBCUDACXX_INLINE_VISIBILITY bitset operator<<(size_t __pos) const noexcept; - _LIBCUDACXX_INLINE_VISIBILITY bitset operator>>(size_t __pos) const noexcept; + typedef typename base::reference reference; + typedef typename base::const_reference const_reference; + + // 23.3.5.1 constructors: + _LIBCUDACXX_INLINE_VISIBILITY constexpr bitset() noexcept {} + _LIBCUDACXX_INLINE_VISIBILITY constexpr + bitset(unsigned long long __v) noexcept : base(__v) {} + template::value> > + explicit bitset(const _CharT* __str, + typename basic_string<_CharT>::size_type __n = basic_string<_CharT>::npos, + _CharT __zero = _CharT('0'), _CharT __one = _CharT('1')); + template + explicit bitset(const basic_string<_CharT,_Traits,_Allocator>& __str, + typename basic_string<_CharT,_Traits,_Allocator>::size_type __pos = 0, + typename basic_string<_CharT,_Traits,_Allocator>::size_type __n = + (basic_string<_CharT,_Traits,_Allocator>::npos), + _CharT __zero = _CharT('0'), _CharT __one = _CharT('1')); + + // 23.3.5.2 bitset operations: + _LIBCUDACXX_INLINE_VISIBILITY + bitset& operator&=(const bitset& __rhs) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + bitset& operator|=(const bitset& __rhs) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + bitset& operator^=(const bitset& __rhs) noexcept; + bitset& operator<<=(size_t __pos) noexcept; + bitset& operator>>=(size_t __pos) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + bitset& set() noexcept; + bitset& set(size_t __pos, bool __val = true); + _LIBCUDACXX_INLINE_VISIBILITY + bitset& reset() noexcept; + bitset& reset(size_t __pos); + _LIBCUDACXX_INLINE_VISIBILITY + bitset operator~() const noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + bitset& flip() noexcept; + bitset& flip(size_t __pos); + + // element access: + _LIBCUDACXX_INLINE_VISIBILITY constexpr + const_reference operator[](size_t __p) const {return base::__make_ref(__p);} + _LIBCUDACXX_INLINE_VISIBILITY reference operator[](size_t __p) {return base::__make_ref(__p);} + _LIBCUDACXX_INLINE_VISIBILITY + unsigned long to_ulong() const; + _LIBCUDACXX_INLINE_VISIBILITY + unsigned long long to_ullong() const; + template + basic_string<_CharT, _Traits, _Allocator> to_string(_CharT __zero = _CharT('0'), + _CharT __one = _CharT('1')) const; + template + _LIBCUDACXX_INLINE_VISIBILITY + basic_string<_CharT, _Traits, allocator<_CharT> > to_string(_CharT __zero = _CharT('0'), + _CharT __one = _CharT('1')) const; + template + _LIBCUDACXX_INLINE_VISIBILITY + basic_string<_CharT, char_traits<_CharT>, allocator<_CharT> > to_string(_CharT __zero = _CharT('0'), + _CharT __one = _CharT('1')) const; + _LIBCUDACXX_INLINE_VISIBILITY + basic_string, allocator > to_string(char __zero = '0', + char __one = '1') const; + _LIBCUDACXX_INLINE_VISIBILITY + size_t count() const noexcept; + _LIBCUDACXX_INLINE_VISIBILITY constexpr size_t size() const noexcept {return _Size;} + _LIBCUDACXX_INLINE_VISIBILITY + bool operator==(const bitset& __rhs) const noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + bool operator!=(const bitset& __rhs) const noexcept; + bool test(size_t __pos) const; + _LIBCUDACXX_INLINE_VISIBILITY + bool all() const noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + bool any() const noexcept; + _LIBCUDACXX_INLINE_VISIBILITY bool none() const noexcept {return !any();} + _LIBCUDACXX_INLINE_VISIBILITY + bitset operator<<(size_t __pos) const noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + bitset operator>>(size_t __pos) const noexcept; private: - _LIBCUDACXX_INLINE_VISIBILITY size_t __hash_code() const noexcept - { - return base::__hash_code(); - } - friend struct hash; + _LIBCUDACXX_INLINE_VISIBILITY + size_t __hash_code() const noexcept {return base::__hash_code();} + + friend struct hash; }; template -template -bitset<_Size>::bitset(const _CharT* __str, typename basic_string<_CharT>::size_type __n, _CharT __zero, _CharT __one) +template +bitset<_Size>::bitset(const _CharT* __str, + typename basic_string<_CharT>::size_type __n, + _CharT __zero, _CharT __one) { - size_t __rlen = _CUDA_VSTD::min(__n, char_traits<_CharT>::length(__str)); - for (size_t __i = 0; __i < __rlen; ++__i) - { - if (__str[__i] != __zero && __str[__i] != __one) + size_t __rlen = _CUDA_VSTD::min(__n, char_traits<_CharT>::length(__str)); + for (size_t __i = 0; __i < __rlen; ++__i) + if (__str[__i] != __zero && __str[__i] != __one) + __throw_invalid_argument("bitset string ctor has invalid argument"); + + size_t _Mp = _CUDA_VSTD::min(__rlen, _Size); + size_t __i = 0; + for (; __i < _Mp; ++__i) { - __throw_invalid_argument("bitset string ctor has invalid argument"); + _CharT __c = __str[_Mp - 1 - __i]; + if (__c == __zero) + (*this)[__i] = false; + else + (*this)[__i] = true; } - } - - size_t _Mp = _CUDA_VSTD::min(__rlen, _Size); - size_t __i = 0; - for (; __i < _Mp; ++__i) - { - _CharT __c = __str[_Mp - 1 - __i]; - if (__c == __zero) - { - (*this)[__i] = false; - } - else - { - (*this)[__i] = true; - } - } - _CUDA_VSTD::fill(base::__make_iter(__i), base::__make_iter(_Size), false); + _CUDA_VSTD::fill(base::__make_iter(__i), base::__make_iter(_Size), false); } template -template -bitset<_Size>::bitset( - const basic_string<_CharT, _Traits, _Allocator>& __str, - typename basic_string<_CharT, _Traits, _Allocator>::size_type __pos, - typename basic_string<_CharT, _Traits, _Allocator>::size_type __n, - _CharT __zero, - _CharT __one) -{ - if (__pos > __str.size()) - { - __throw_out_of_range("bitset string pos out of range"); - } - - size_t __rlen = _CUDA_VSTD::min(__n, __str.size() - __pos); - for (size_t __i = __pos; __i < __pos + __rlen; ++__i) - { - if (!_Traits::eq(__str[__i], __zero) && !_Traits::eq(__str[__i], __one)) - { - __throw_invalid_argument("bitset string ctor has invalid argument"); - } - } - - size_t _Mp = _CUDA_VSTD::min(__rlen, _Size); - size_t __i = 0; - for (; __i < _Mp; ++__i) - { - _CharT __c = __str[__pos + _Mp - 1 - __i]; - if (_Traits::eq(__c, __zero)) - { - (*this)[__i] = false; - } - else +template +bitset<_Size>::bitset(const basic_string<_CharT,_Traits,_Allocator>& __str, + typename basic_string<_CharT,_Traits,_Allocator>::size_type __pos, + typename basic_string<_CharT,_Traits,_Allocator>::size_type __n, + _CharT __zero, _CharT __one) +{ + if (__pos > __str.size()) + __throw_out_of_range("bitset string pos out of range"); + + size_t __rlen = _CUDA_VSTD::min(__n, __str.size() - __pos); + for (size_t __i = __pos; __i < __pos + __rlen; ++__i) + if (!_Traits::eq(__str[__i], __zero) && !_Traits::eq(__str[__i], __one)) + __throw_invalid_argument("bitset string ctor has invalid argument"); + + size_t _Mp = _CUDA_VSTD::min(__rlen, _Size); + size_t __i = 0; + for (; __i < _Mp; ++__i) { - (*this)[__i] = true; + _CharT __c = __str[__pos + _Mp - 1 - __i]; + if (_Traits::eq(__c, __zero)) + (*this)[__i] = false; + else + (*this)[__i] = true; } - } - _CUDA_VSTD::fill(base::__make_iter(__i), base::__make_iter(_Size), false); + _CUDA_VSTD::fill(base::__make_iter(__i), base::__make_iter(_Size), false); } template -inline bitset<_Size>& bitset<_Size>::operator&=(const bitset& __rhs) noexcept +inline +bitset<_Size>& +bitset<_Size>::operator&=(const bitset& __rhs) noexcept { - base::operator&=(__rhs); - return *this; + base::operator&=(__rhs); + return *this; } template -inline bitset<_Size>& bitset<_Size>::operator|=(const bitset& __rhs) noexcept +inline +bitset<_Size>& +bitset<_Size>::operator|=(const bitset& __rhs) noexcept { - base::operator|=(__rhs); - return *this; + base::operator|=(__rhs); + return *this; } template -inline bitset<_Size>& bitset<_Size>::operator^=(const bitset& __rhs) noexcept +inline +bitset<_Size>& +bitset<_Size>::operator^=(const bitset& __rhs) noexcept { - base::operator^=(__rhs); - return *this; + base::operator^=(__rhs); + return *this; } template -bitset<_Size>& bitset<_Size>::operator<<=(size_t __pos) noexcept +bitset<_Size>& +bitset<_Size>::operator<<=(size_t __pos) noexcept { - __pos = _CUDA_VSTD::min(__pos, _Size); - _CUDA_VSTD::copy_backward(base::__make_iter(0), base::__make_iter(_Size - __pos), base::__make_iter(_Size)); - _CUDA_VSTD::fill_n(base::__make_iter(0), __pos, false); - return *this; + __pos = _CUDA_VSTD::min(__pos, _Size); + _CUDA_VSTD::copy_backward(base::__make_iter(0), base::__make_iter(_Size - __pos), base::__make_iter(_Size)); + _CUDA_VSTD::fill_n(base::__make_iter(0), __pos, false); + return *this; } template -bitset<_Size>& bitset<_Size>::operator>>=(size_t __pos) noexcept +bitset<_Size>& +bitset<_Size>::operator>>=(size_t __pos) noexcept { - __pos = _CUDA_VSTD::min(__pos, _Size); - _CUDA_VSTD::copy(base::__make_iter(__pos), base::__make_iter(_Size), base::__make_iter(0)); - _CUDA_VSTD::fill_n(base::__make_iter(_Size - __pos), __pos, false); - return *this; + __pos = _CUDA_VSTD::min(__pos, _Size); + _CUDA_VSTD::copy(base::__make_iter(__pos), base::__make_iter(_Size), base::__make_iter(0)); + _CUDA_VSTD::fill_n(base::__make_iter(_Size - __pos), __pos, false); + return *this; } template -inline bitset<_Size>& bitset<_Size>::set() noexcept +inline +bitset<_Size>& +bitset<_Size>::set() noexcept { - _CUDA_VSTD::fill_n(base::__make_iter(0), _Size, true); - return *this; + _CUDA_VSTD::fill_n(base::__make_iter(0), _Size, true); + return *this; } template -bitset<_Size>& bitset<_Size>::set(size_t __pos, bool __val) +bitset<_Size>& +bitset<_Size>::set(size_t __pos, bool __val) { - if (__pos >= _Size) - { - __throw_out_of_range("bitset set argument out of range"); - } + if (__pos >= _Size) + __throw_out_of_range("bitset set argument out of range"); - (*this)[__pos] = __val; - return *this; + (*this)[__pos] = __val; + return *this; } template -inline bitset<_Size>& bitset<_Size>::reset() noexcept +inline +bitset<_Size>& +bitset<_Size>::reset() noexcept { - _CUDA_VSTD::fill_n(base::__make_iter(0), _Size, false); - return *this; + _CUDA_VSTD::fill_n(base::__make_iter(0), _Size, false); + return *this; } template -bitset<_Size>& bitset<_Size>::reset(size_t __pos) +bitset<_Size>& +bitset<_Size>::reset(size_t __pos) { - if (__pos >= _Size) - { - __throw_out_of_range("bitset reset argument out of range"); - } + if (__pos >= _Size) + __throw_out_of_range("bitset reset argument out of range"); - (*this)[__pos] = false; - return *this; + (*this)[__pos] = false; + return *this; } template -inline bitset<_Size> bitset<_Size>::operator~() const noexcept +inline +bitset<_Size> +bitset<_Size>::operator~() const noexcept { - bitset __x(*this); - __x.flip(); - return __x; + bitset __x(*this); + __x.flip(); + return __x; } template -inline bitset<_Size>& bitset<_Size>::flip() noexcept +inline +bitset<_Size>& +bitset<_Size>::flip() noexcept { - base::flip(); - return *this; + base::flip(); + return *this; } template -bitset<_Size>& bitset<_Size>::flip(size_t __pos) +bitset<_Size>& +bitset<_Size>::flip(size_t __pos) { - if (__pos >= _Size) - { - __throw_out_of_range("bitset flip argument out of range"); - } + if (__pos >= _Size) + __throw_out_of_range("bitset flip argument out of range"); - reference r = base::__make_ref(__pos); - r = ~r; - return *this; + reference r = base::__make_ref(__pos); + r = ~r; + return *this; } template -inline unsigned long bitset<_Size>::to_ulong() const +inline +unsigned long +bitset<_Size>::to_ulong() const { - return base::to_ulong(); + return base::to_ulong(); } template -inline unsigned long long bitset<_Size>::to_ullong() const +inline +unsigned long long +bitset<_Size>::to_ullong() const { - return base::to_ullong(); + return base::to_ullong(); } template template -basic_string<_CharT, _Traits, _Allocator> bitset<_Size>::to_string(_CharT __zero, _CharT __one) const +basic_string<_CharT, _Traits, _Allocator> +bitset<_Size>::to_string(_CharT __zero, _CharT __one) const { - basic_string<_CharT, _Traits, _Allocator> __r(_Size, __zero); - for (size_t __i = 0; __i < _Size; ++__i) - { - if ((*this)[__i]) + basic_string<_CharT, _Traits, _Allocator> __r(_Size, __zero); + for (size_t __i = 0; __i < _Size; ++__i) { - __r[_Size - 1 - __i] = __one; + if ((*this)[__i]) + __r[_Size - 1 - __i] = __one; } - } - return __r; + return __r; } template template -inline basic_string<_CharT, _Traits, allocator<_CharT>> bitset<_Size>::to_string(_CharT __zero, _CharT __one) const +inline +basic_string<_CharT, _Traits, allocator<_CharT> > +bitset<_Size>::to_string(_CharT __zero, _CharT __one) const { - return to_string<_CharT, _Traits, allocator<_CharT>>(__zero, __one); + return to_string<_CharT, _Traits, allocator<_CharT> >(__zero, __one); } template template -inline basic_string<_CharT, char_traits<_CharT>, allocator<_CharT>> +inline +basic_string<_CharT, char_traits<_CharT>, allocator<_CharT> > bitset<_Size>::to_string(_CharT __zero, _CharT __one) const { - return to_string<_CharT, char_traits<_CharT>, allocator<_CharT>>(__zero, __one); + return to_string<_CharT, char_traits<_CharT>, allocator<_CharT> >(__zero, __one); } template -inline basic_string, allocator> bitset<_Size>::to_string(char __zero, char __one) const +inline +basic_string, allocator > +bitset<_Size>::to_string(char __zero, char __one) const { - return to_string, allocator>(__zero, __one); + return to_string, allocator >(__zero, __one); } template -inline size_t bitset<_Size>::count() const noexcept +inline +size_t +bitset<_Size>::count() const noexcept { - return static_cast(__count_bool_true(base::__make_iter(0), _Size)); + return static_cast(__count_bool_true(base::__make_iter(0), _Size)); } template -inline bool bitset<_Size>::operator==(const bitset& __rhs) const noexcept +inline +bool +bitset<_Size>::operator==(const bitset& __rhs) const noexcept { - return _CUDA_VSTD::equal(base::__make_iter(0), base::__make_iter(_Size), __rhs.__make_iter(0)); + return _CUDA_VSTD::equal(base::__make_iter(0), base::__make_iter(_Size), __rhs.__make_iter(0)); } template -inline bool bitset<_Size>::operator!=(const bitset& __rhs) const noexcept +inline +bool +bitset<_Size>::operator!=(const bitset& __rhs) const noexcept { - return !(*this == __rhs); + return !(*this == __rhs); } template -bool bitset<_Size>::test(size_t __pos) const +bool +bitset<_Size>::test(size_t __pos) const { - if (__pos >= _Size) - { - __throw_out_of_range("bitset test argument out of range"); - } + if (__pos >= _Size) + __throw_out_of_range("bitset test argument out of range"); - return (*this)[__pos]; + return (*this)[__pos]; } template -inline bool bitset<_Size>::all() const noexcept +inline +bool +bitset<_Size>::all() const noexcept { - return base::all(); + return base::all(); } template -inline bool bitset<_Size>::any() const noexcept +inline +bool +bitset<_Size>::any() const noexcept { - return base::any(); + return base::any(); } template -inline bitset<_Size> bitset<_Size>::operator<<(size_t __pos) const noexcept +inline +bitset<_Size> +bitset<_Size>::operator<<(size_t __pos) const noexcept { - bitset __r = *this; - __r <<= __pos; - return __r; + bitset __r = *this; + __r <<= __pos; + return __r; } template -inline bitset<_Size> bitset<_Size>::operator>>(size_t __pos) const noexcept +inline +bitset<_Size> +bitset<_Size>::operator>>(size_t __pos) const noexcept { - bitset __r = *this; - __r >>= __pos; - return __r; + bitset __r = *this; + __r >>= __pos; + return __r; } template -inline _LIBCUDACXX_INLINE_VISIBILITY bitset<_Size> operator&(const bitset<_Size>& __x, const bitset<_Size>& __y) noexcept +inline _LIBCUDACXX_INLINE_VISIBILITY +bitset<_Size> +operator&(const bitset<_Size>& __x, const bitset<_Size>& __y) noexcept { - bitset<_Size> __r = __x; - __r &= __y; - return __r; + bitset<_Size> __r = __x; + __r &= __y; + return __r; } template -inline _LIBCUDACXX_INLINE_VISIBILITY bitset<_Size> operator|(const bitset<_Size>& __x, const bitset<_Size>& __y) noexcept +inline _LIBCUDACXX_INLINE_VISIBILITY +bitset<_Size> +operator|(const bitset<_Size>& __x, const bitset<_Size>& __y) noexcept { - bitset<_Size> __r = __x; - __r |= __y; - return __r; + bitset<_Size> __r = __x; + __r |= __y; + return __r; } template -inline _LIBCUDACXX_INLINE_VISIBILITY bitset<_Size> operator^(const bitset<_Size>& __x, const bitset<_Size>& __y) noexcept +inline _LIBCUDACXX_INLINE_VISIBILITY +bitset<_Size> +operator^(const bitset<_Size>& __x, const bitset<_Size>& __y) noexcept { - bitset<_Size> __r = __x; - __r ^= __y; - return __r; + bitset<_Size> __r = __x; + __r ^= __y; + return __r; } template -struct _LIBCUDACXX_TEMPLATE_VIS hash> : public __unary_function, size_t> +struct _LIBCUDACXX_TEMPLATE_VIS hash > + : public __unary_function, size_t> { - _LIBCUDACXX_INLINE_VISIBILITY size_t operator()(const bitset<_Size>& __bs) const noexcept - { - return __bs.__hash_code(); - } + _LIBCUDACXX_INLINE_VISIBILITY + size_t operator()(const bitset<_Size>& __bs) const noexcept + {return __bs.__hash_code();} }; template -basic_istream<_CharT, _Traits>& operator>>(basic_istream<_CharT, _Traits>& __is, bitset<_Size>& __x); +basic_istream<_CharT, _Traits>& +operator>>(basic_istream<_CharT, _Traits>& __is, bitset<_Size>& __x); template -basic_ostream<_CharT, _Traits>& operator<<(basic_ostream<_CharT, _Traits>& __os, const bitset<_Size>& __x); +basic_ostream<_CharT, _Traits>& +operator<<(basic_ostream<_CharT, _Traits>& __os, const bitset<_Size>& __x); _LIBCUDACXX_END_NAMESPACE_STD _LIBCUDACXX_POP_MACROS -#endif // _LIBCUDACXX_BITSET +#endif // _LIBCUDACXX_BITSET diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/cassert b/libcudacxx/include/cuda/std/detail/libcxx/include/cassert index 0a66560dc38..f995ee8e79b 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/cassert +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/cassert @@ -20,9 +20,8 @@ Macros: #define _LIBCUDACXX_CASSERT #ifndef _CCCL_COMPILER_NVRTC -# include - -# include +#include +#include #endif //_CCCL_COMPILER_NVRTC -#endif // _LIBCUDACXX_CASSERT +#endif // _LIBCUDACXX_CASSERT diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/ccomplex b/libcudacxx/include/cuda/std/detail/libcxx/include/ccomplex index 563e5c2e8e4..fca1dcf1edb 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/ccomplex +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/ccomplex @@ -29,4 +29,4 @@ // hh 080623 Created -#endif // _LIBCUDACXX_CCOMPLEX +#endif // _LIBCUDACXX_CCOMPLEX diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/cfloat b/libcudacxx/include/cuda/std/detail/libcxx/include/cfloat index 9480e45cdf6..142e40313e3 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/cfloat +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/cfloat @@ -70,9 +70,8 @@ Macros: */ #ifndef _CCCL_COMPILER_NVRTC -# include - -# include +#include +#include #endif // _CCCL_COMPILER_NVRTC -#endif // _LIBCUDACXX_CFLOAT +#endif // _LIBCUDACXX_CFLOAT diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/chrono b/libcudacxx/include/cuda/std/detail/libcxx/include/chrono index f36d6cfcc7f..43b22135e6d 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/chrono +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/chrono @@ -834,27 +834,28 @@ constexpr chrono::year operator ""y(unsigned lo #endif // no system header #ifdef __cuda_std__ -# ifndef _CCCL_COMPILER_NVRTC -# include -# endif // _CCCL_COMPILER_NVRTC +#ifndef _CCCL_COMPILER_NVRTC +#include +#endif // _CCCL_COMPILER_NVRTC #endif // __cuda_std__ +#include // all public C++ headers provide the assertion handler #include #include #include #include #include #include -#include // all public C++ headers provide the assertion handler #include #include // standard-mandated includes // TODO: Fix CPOs in split H/D compilation or inform users of what may happen // #include -#include #include +#include + // Silence NVCC warnings `long double` arising from chrono floating pointer // user-defined literals which are defined in terms of `long double`. @@ -868,179 +869,181 @@ _LIBCUDACXX_BEGIN_NAMESPACE_FILESYSTEM struct _FilesystemClock; _LIBCUDACXX_END_NAMESPACE_FILESYSTEM -#if _LIBCUDACXX_CUDA_ABI_VERSION > 3 +# if _LIBCUDACXX_CUDA_ABI_VERSION > 3 # define _LIBCUDACXX_CHRONO_LITERAL_INTERNAL_T double -#else +# else # define _LIBCUDACXX_CHRONO_LITERAL_INTERNAL_T long double -#endif +# endif _LIBCUDACXX_BEGIN_NAMESPACE_STD namespace chrono { -template > -class _LIBCUDACXX_TEMPLATE_VIS duration; +template > class _LIBCUDACXX_TEMPLATE_VIS duration; template -struct __is_duration : false_type -{}; +struct __is_duration : false_type {}; template -struct __is_duration> : true_type -{}; +struct __is_duration > : true_type {}; template -struct __is_duration> : true_type -{}; +struct __is_duration > : true_type {}; template -struct __is_duration> : true_type -{}; +struct __is_duration > : true_type {}; template -struct __is_duration> : true_type -{}; +struct __is_duration > : true_type {}; -} // namespace chrono +} // chrono template -struct _LIBCUDACXX_TEMPLATE_VIS common_type, chrono::duration<_Rep2, _Period2>> +struct _LIBCUDACXX_TEMPLATE_VIS common_type, + chrono::duration<_Rep2, _Period2> > { - typedef chrono::duration::type, typename __ratio_gcd<_Period1, _Period2>::type> - type; + typedef chrono::duration::type, + typename __ratio_gcd<_Period1, _Period2>::type> type; }; -namespace chrono -{ +namespace chrono { // duration_cast -template ::type, - bool = _Period::num == 1, - bool = _Period::den == 1> + bool = _Period::num == 1, + bool = _Period::den == 1> struct __duration_cast; template struct __duration_cast<_FromDuration, _ToDuration, _Period, true, true> { - _LIBCUDACXX_INLINE_VISIBILITY constexpr _ToDuration operator()(const _FromDuration& __fd) const - { - return _ToDuration(static_cast(__fd.count())); - } + _LIBCUDACXX_INLINE_VISIBILITY constexpr + _ToDuration operator()(const _FromDuration& __fd) const + { + return _ToDuration(static_cast(__fd.count())); + } }; template struct __duration_cast<_FromDuration, _ToDuration, _Period, true, false> { - _LIBCUDACXX_INLINE_VISIBILITY constexpr _ToDuration operator()(const _FromDuration& __fd) const - { - typedef typename common_type::type _Ct; - return _ToDuration( - static_cast(static_cast<_Ct>(__fd.count()) / static_cast<_Ct>(_Period::den))); - } + _LIBCUDACXX_INLINE_VISIBILITY constexpr + _ToDuration operator()(const _FromDuration& __fd) const + { + typedef typename common_type::type _Ct; + return _ToDuration(static_cast( + static_cast<_Ct>(__fd.count()) / static_cast<_Ct>(_Period::den))); + } }; template struct __duration_cast<_FromDuration, _ToDuration, _Period, false, true> { - _LIBCUDACXX_INLINE_VISIBILITY constexpr _ToDuration operator()(const _FromDuration& __fd) const - { - typedef typename common_type::type _Ct; - return _ToDuration( - static_cast(static_cast<_Ct>(__fd.count()) * static_cast<_Ct>(_Period::num))); - } + _LIBCUDACXX_INLINE_VISIBILITY constexpr + _ToDuration operator()(const _FromDuration& __fd) const + { + typedef typename common_type::type _Ct; + return _ToDuration(static_cast( + static_cast<_Ct>(__fd.count()) * static_cast<_Ct>(_Period::num))); + } }; template struct __duration_cast<_FromDuration, _ToDuration, _Period, false, false> { - _LIBCUDACXX_INLINE_VISIBILITY constexpr _ToDuration operator()(const _FromDuration& __fd) const - { - typedef typename common_type::type _Ct; - return _ToDuration(static_cast( - static_cast<_Ct>(__fd.count()) * static_cast<_Ct>(_Period::num) / static_cast<_Ct>(_Period::den))); - } + _LIBCUDACXX_INLINE_VISIBILITY constexpr + _ToDuration operator()(const _FromDuration& __fd) const + { + typedef typename common_type::type _Ct; + return _ToDuration(static_cast( + static_cast<_Ct>(__fd.count()) * static_cast<_Ct>(_Period::num) + / static_cast<_Ct>(_Period::den))); + } }; template -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t<__is_duration<_ToDuration>::value, _ToDuration> +inline _LIBCUDACXX_INLINE_VISIBILITY +constexpr +__enable_if_t +< + __is_duration<_ToDuration>::value, + _ToDuration +> duration_cast(const duration<_Rep, _Period>& __fd) { - return __duration_cast, _ToDuration>()(__fd); + return __duration_cast, _ToDuration>()(__fd); } template -struct _LIBCUDACXX_TEMPLATE_VIS treat_as_floating_point : is_floating_point<_Rep> -{}; +struct _LIBCUDACXX_TEMPLATE_VIS treat_as_floating_point : is_floating_point<_Rep> {}; #if _CCCL_STD_VER > 2011 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES) template -_LIBCUDACXX_INLINE_VAR constexpr bool treat_as_floating_point_v = treat_as_floating_point<_Rep>::value; +_LIBCUDACXX_INLINE_VAR constexpr bool treat_as_floating_point_v + = treat_as_floating_point<_Rep>::value; #endif // _CCCL_STD_VER > 2011 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES) template struct _LIBCUDACXX_TEMPLATE_VIS duration_values { public: - _LIBCUDACXX_INLINE_VISIBILITY static constexpr _Rep zero() noexcept - { - return _Rep(0); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr _Rep max() noexcept - { - return numeric_limits<_Rep>::max(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr _Rep min() noexcept - { - return numeric_limits<_Rep>::lowest(); - } + _LIBCUDACXX_INLINE_VISIBILITY static constexpr _Rep zero() noexcept {return _Rep(0);} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr _Rep max() noexcept {return numeric_limits<_Rep>::max();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr _Rep min() noexcept {return numeric_limits<_Rep>::lowest();} }; #if _CCCL_STD_VER > 2011 template -inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 __enable_if_t<__is_duration<_ToDuration>::value, _ToDuration> +inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 +__enable_if_t +< + __is_duration<_ToDuration>::value, + _ToDuration +> floor(const duration<_Rep, _Period>& __d) { - _ToDuration __t = duration_cast<_ToDuration>(__d); - if (__t > __d) - { - __t = __t - _ToDuration{1}; - } - return __t; + _ToDuration __t = duration_cast<_ToDuration>(__d); + if (__t > __d) + __t = __t - _ToDuration{1}; + return __t; } template -inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 __enable_if_t<__is_duration<_ToDuration>::value, _ToDuration> +inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 +__enable_if_t +< + __is_duration<_ToDuration>::value, + _ToDuration +> ceil(const duration<_Rep, _Period>& __d) { - _ToDuration __t = duration_cast<_ToDuration>(__d); - if (__t < __d) - { - __t = __t + _ToDuration{1}; - } - return __t; + _ToDuration __t = duration_cast<_ToDuration>(__d); + if (__t < __d) + __t = __t + _ToDuration{1}; + return __t; } template -inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 __enable_if_t<__is_duration<_ToDuration>::value, _ToDuration> +inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 +__enable_if_t +< + __is_duration<_ToDuration>::value, + _ToDuration +> round(const duration<_Rep, _Period>& __d) { - _ToDuration __lower = floor<_ToDuration>(__d); - _ToDuration __upper = __lower + _ToDuration{1}; - auto __lowerDiff = __d - __lower; - auto __upperDiff = __upper - __d; - if (__lowerDiff < __upperDiff) - { - return __lower; - } - if (__lowerDiff > __upperDiff) - { - return __upper; - } - return __lower.count() & 1 ? __upper : __lower; + _ToDuration __lower = floor<_ToDuration>(__d); + _ToDuration __upper = __lower + _ToDuration{1}; + auto __lowerDiff = __d - __lower; + auto __upperDiff = __upper - __d; + if (__lowerDiff < __upperDiff) + return __lower; + if (__lowerDiff > __upperDiff) + return __upper; + return __lower.count() & 1 ? __upper : __lower; } #endif // _CCCL_STD_VER > 2011 @@ -1049,199 +1052,150 @@ round(const duration<_Rep, _Period>& __d) template class _LIBCUDACXX_TEMPLATE_VIS duration { - static_assert(!__is_duration<_Rep>::value, "A duration representation can not be a duration"); - static_assert(__is_ratio<_Period>::value, "Second template parameter of duration must be a std::ratio"); - static_assert(_Period::num > 0, "duration period must be positive"); + static_assert(!__is_duration<_Rep>::value, "A duration representation can not be a duration"); + static_assert(__is_ratio<_Period>::value, "Second template parameter of duration must be a std::ratio"); + static_assert(_Period::num > 0, "duration period must be positive"); - template - struct __no_overflow - { - private: - static const intmax_t __gcd_n1_n2 = __static_gcd<_R1::num, _R2::num>::value; - static const intmax_t __gcd_d1_d2 = __static_gcd<_R1::den, _R2::den>::value; - static const intmax_t __n1 = _R1::num / __gcd_n1_n2; - static const intmax_t __d1 = _R1::den / __gcd_d1_d2; - static const intmax_t __n2 = _R2::num / __gcd_n1_n2; - static const intmax_t __d2 = _R2::den / __gcd_d1_d2; - static const intmax_t max = -((intmax_t(1) << (sizeof(intmax_t) * CHAR_BIT - 1)) + 1); - - template - struct __mul // __overflow == false + template + struct __no_overflow { - static const intmax_t value = _Xp * _Yp; + private: + static const intmax_t __gcd_n1_n2 = __static_gcd<_R1::num, _R2::num>::value; + static const intmax_t __gcd_d1_d2 = __static_gcd<_R1::den, _R2::den>::value; + static const intmax_t __n1 = _R1::num / __gcd_n1_n2; + static const intmax_t __d1 = _R1::den / __gcd_d1_d2; + static const intmax_t __n2 = _R2::num / __gcd_n1_n2; + static const intmax_t __d2 = _R2::den / __gcd_d1_d2; + static const intmax_t max = -((intmax_t(1) << (sizeof(intmax_t) * CHAR_BIT - 1)) + 1); + + template + struct __mul // __overflow == false + { + static const intmax_t value = _Xp * _Yp; + }; + + template + struct __mul<_Xp, _Yp, true> + { + static const intmax_t value = 1; + }; + + public: + static const bool value = (__n1 <= max / __d2) && (__n2 <= max / __d1); + typedef ratio<__mul<__n1, __d2, !value>::value, + __mul<__n2, __d1, !value>::value> type; }; - template - struct __mul<_Xp, _Yp, true> - { - static const intmax_t value = 1; - }; +public: + typedef _Rep rep; + typedef typename _Period::type period; +private: + rep __rep_; +public: - public: - static const bool value = (__n1 <= max / __d2) && (__n2 <= max / __d1); - typedef ratio<__mul<__n1, __d2, !value>::value, __mul<__n2, __d1, !value>::value> type; - }; + constexpr duration() = default; -public: - typedef _Rep rep; - typedef typename _Period::type period; + template + _LIBCUDACXX_INLINE_VISIBILITY constexpr + explicit duration(const _Rep2& __r, + __enable_if_t + < + is_convertible<_Rep2, rep>::value && + (treat_as_floating_point::value || + !treat_as_floating_point<_Rep2>::value) + >* = 0) + : __rep_(static_cast(__r)) {} -private: - rep __rep_; + // conversions + template + _LIBCUDACXX_INLINE_VISIBILITY constexpr + duration(const duration<_Rep2, _Period2>& __d, + __enable_if_t + < + __no_overflow<_Period2, period>::value && ( + treat_as_floating_point::value || + (__no_overflow<_Period2, period>::type::den == 1 && + !treat_as_floating_point<_Rep2>::value)) + >* = 0) + : __rep_(_CUDA_VSTD::chrono::duration_cast(__d).count()) {} -public: - constexpr duration() = default; - - template - _LIBCUDACXX_INLINE_VISIBILITY constexpr explicit duration( - const _Rep2& __r, - __enable_if_t::value - && (treat_as_floating_point::value || !treat_as_floating_point<_Rep2>::value)>* = 0) - : __rep_(static_cast(__r)) - {} - - // conversions - template - _LIBCUDACXX_INLINE_VISIBILITY constexpr duration( - const duration<_Rep2, _Period2>& __d, - __enable_if_t<__no_overflow<_Period2, period>::value - && (treat_as_floating_point::value - || (__no_overflow<_Period2, period>::type::den == 1 && !treat_as_floating_point<_Rep2>::value))>* = - 0) - : __rep_(_CUDA_VSTD::chrono::duration_cast(__d).count()) - {} - - // observer - - _LIBCUDACXX_INLINE_VISIBILITY constexpr rep count() const - { - return __rep_; - } + // observer - // arithmetic + _LIBCUDACXX_INLINE_VISIBILITY constexpr rep count() const {return __rep_;} - _LIBCUDACXX_INLINE_VISIBILITY constexpr typename common_type::type operator+() const - { - return typename common_type::type(*this); - } - _LIBCUDACXX_INLINE_VISIBILITY constexpr typename common_type::type operator-() const - { - return typename common_type::type(-__rep_); - } - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 duration& operator++() - { - ++__rep_; - return *this; - } - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 duration operator++(int) - { - return duration(__rep_++); - } - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 duration& operator--() - { - --__rep_; - return *this; - } - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 duration operator--(int) - { - return duration(__rep_--); - } + // arithmetic - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 duration& operator+=(const duration& __d) - { - __rep_ += __d.count(); - return *this; - } - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 duration& operator-=(const duration& __d) - { - __rep_ -= __d.count(); - return *this; - } + _LIBCUDACXX_INLINE_VISIBILITY constexpr typename common_type::type operator+() const {return typename common_type::type(*this);} + _LIBCUDACXX_INLINE_VISIBILITY constexpr typename common_type::type operator-() const {return typename common_type::type(-__rep_);} + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 duration& operator++() {++__rep_; return *this;} + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 duration operator++(int) {return duration(__rep_++);} + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 duration& operator--() {--__rep_; return *this;} + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 duration operator--(int) {return duration(__rep_--);} - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 duration& operator*=(const rep& rhs) - { - __rep_ *= rhs; - return *this; - } - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 duration& operator/=(const rep& rhs) - { - __rep_ /= rhs; - return *this; - } - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 duration& operator%=(const rep& rhs) - { - __rep_ %= rhs; - return *this; - } - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 duration& operator%=(const duration& rhs) - { - __rep_ %= rhs.count(); - return *this; - } + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 duration& operator+=(const duration& __d) {__rep_ += __d.count(); return *this;} + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 duration& operator-=(const duration& __d) {__rep_ -= __d.count(); return *this;} - // special values + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 duration& operator*=(const rep& rhs) {__rep_ *= rhs; return *this;} + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 duration& operator/=(const rep& rhs) {__rep_ /= rhs; return *this;} + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 duration& operator%=(const rep& rhs) {__rep_ %= rhs; return *this;} + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 duration& operator%=(const duration& rhs) {__rep_ %= rhs.count(); return *this;} - _LIBCUDACXX_INLINE_VISIBILITY static constexpr duration zero() noexcept - { - return duration(duration_values::zero()); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr duration min() noexcept - { - return duration(duration_values::min()); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr duration max() noexcept - { - return duration(duration_values::max()); - } + // special values + + _LIBCUDACXX_INLINE_VISIBILITY static constexpr duration zero() noexcept {return duration(duration_values::zero());} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr duration min() noexcept {return duration(duration_values::min());} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr duration max() noexcept {return duration(duration_values::max());} }; -typedef duration nanoseconds; -typedef duration microseconds; -typedef duration milliseconds; -typedef duration seconds; -typedef duration> minutes; -typedef duration> hours; +typedef duration nanoseconds; +typedef duration microseconds; +typedef duration milliseconds; +typedef duration seconds; +typedef duration< long, ratio< 60> > minutes; +typedef duration< long, ratio<3600> > hours; #if _CCCL_STD_VER > 2011 -typedef duration, hours::period>> days; -typedef duration, days::period>> weeks; -typedef duration, days::period>> years; -typedef duration>> months; +typedef duration< int, ratio_multiply, hours::period>> days; +typedef duration< int, ratio_multiply, days::period>> weeks; +typedef duration< int, ratio_multiply, days::period>> years; +typedef duration< int, ratio_divide>> months; #endif // _CCCL_STD_VER > 2011 // Duration == template struct __duration_eq { - _LIBCUDACXX_INLINE_VISIBILITY constexpr bool operator()(const _LhsDuration& __lhs, const _RhsDuration& __rhs) const - { - typedef typename common_type<_LhsDuration, _RhsDuration>::type _Ct; - return _Ct(__lhs).count() == _Ct(__rhs).count(); - } + _LIBCUDACXX_INLINE_VISIBILITY constexpr + bool operator()(const _LhsDuration& __lhs, const _RhsDuration& __rhs) const + { + typedef typename common_type<_LhsDuration, _RhsDuration>::type _Ct; + return _Ct(__lhs).count() == _Ct(__rhs).count(); + } }; template struct __duration_eq<_LhsDuration, _LhsDuration> { - _LIBCUDACXX_INLINE_VISIBILITY constexpr bool operator()(const _LhsDuration& __lhs, const _LhsDuration& __rhs) const - { - return __lhs.count() == __rhs.count(); - } + _LIBCUDACXX_INLINE_VISIBILITY constexpr + bool operator()(const _LhsDuration& __lhs, const _LhsDuration& __rhs) const + {return __lhs.count() == __rhs.count();} }; template -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr bool +inline _LIBCUDACXX_INLINE_VISIBILITY +constexpr bool operator==(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { - return __duration_eq, duration<_Rep2, _Period2>>()(__lhs, __rhs); + return __duration_eq, duration<_Rep2, _Period2> >()(__lhs, __rhs); } // Duration != template -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr bool +inline _LIBCUDACXX_INLINE_VISIBILITY +constexpr bool operator!=(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { - return !(__lhs == __rhs); + return !(__lhs == __rhs); } // Duration < @@ -1249,142 +1203,169 @@ operator!=(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period template struct __duration_lt { - _LIBCUDACXX_INLINE_VISIBILITY constexpr bool operator()(const _LhsDuration& __lhs, const _RhsDuration& __rhs) const - { - typedef typename common_type<_LhsDuration, _RhsDuration>::type _Ct; - return _Ct(__lhs).count() < _Ct(__rhs).count(); - } + _LIBCUDACXX_INLINE_VISIBILITY constexpr + bool operator()(const _LhsDuration& __lhs, const _RhsDuration& __rhs) const + { + typedef typename common_type<_LhsDuration, _RhsDuration>::type _Ct; + return _Ct(__lhs).count() < _Ct(__rhs).count(); + } }; template struct __duration_lt<_LhsDuration, _LhsDuration> { - _LIBCUDACXX_INLINE_VISIBILITY constexpr bool operator()(const _LhsDuration& __lhs, const _LhsDuration& __rhs) const - { - return __lhs.count() < __rhs.count(); - } + _LIBCUDACXX_INLINE_VISIBILITY constexpr + bool operator()(const _LhsDuration& __lhs, const _LhsDuration& __rhs) const + {return __lhs.count() < __rhs.count();} }; template -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr bool -operator<(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs) +inline _LIBCUDACXX_INLINE_VISIBILITY +constexpr bool +operator< (const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { - return __duration_lt, duration<_Rep2, _Period2>>()(__lhs, __rhs); + return __duration_lt, duration<_Rep2, _Period2> >()(__lhs, __rhs); } // Duration > template -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr bool -operator>(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs) +inline _LIBCUDACXX_INLINE_VISIBILITY +constexpr bool +operator> (const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { - return __rhs < __lhs; + return __rhs < __lhs; } // Duration <= template -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr bool +inline _LIBCUDACXX_INLINE_VISIBILITY +constexpr bool operator<=(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { - return !(__rhs < __lhs); + return !(__rhs < __lhs); } // Duration >= template -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr bool +inline _LIBCUDACXX_INLINE_VISIBILITY +constexpr bool operator>=(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { - return !(__lhs < __rhs); + return !(__lhs < __rhs); } // Duration + template -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr - typename common_type, duration<_Rep2, _Period2>>::type - operator+(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs) +inline _LIBCUDACXX_INLINE_VISIBILITY +constexpr +typename common_type, duration<_Rep2, _Period2> >::type +operator+(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { - typedef typename common_type, duration<_Rep2, _Period2>>::type _Cd; - return _Cd(_Cd(__lhs).count() + _Cd(__rhs).count()); + typedef typename common_type, duration<_Rep2, _Period2> >::type _Cd; + return _Cd(_Cd(__lhs).count() + _Cd(__rhs).count()); } // Duration - template -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr - typename common_type, duration<_Rep2, _Period2>>::type - operator-(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs) +inline _LIBCUDACXX_INLINE_VISIBILITY +constexpr +typename common_type, duration<_Rep2, _Period2> >::type +operator-(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { - typedef typename common_type, duration<_Rep2, _Period2>>::type _Cd; - return _Cd(_Cd(__lhs).count() - _Cd(__rhs).count()); + typedef typename common_type, duration<_Rep2, _Period2> >::type _Cd; + return _Cd(_Cd(__lhs).count() - _Cd(__rhs).count()); } // Duration * template -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t< - is_convertible<_Rep2, typename common_type<_Rep1, _Rep2>::type>::value, - duration::type, _Period>> +inline _LIBCUDACXX_INLINE_VISIBILITY +constexpr +__enable_if_t +< + is_convertible<_Rep2, typename common_type<_Rep1, _Rep2>::type>::value, + duration::type, _Period> +> operator*(const duration<_Rep1, _Period>& __d, const _Rep2& __s) { - typedef typename common_type<_Rep1, _Rep2>::type _Cr; - typedef duration<_Cr, _Period> _Cd; - return _Cd(_Cd(__d).count() * static_cast<_Cr>(__s)); + typedef typename common_type<_Rep1, _Rep2>::type _Cr; + typedef duration<_Cr, _Period> _Cd; + return _Cd(_Cd(__d).count() * static_cast<_Cr>(__s)); } template -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t< - is_convertible<_Rep1, typename common_type<_Rep1, _Rep2>::type>::value, - duration::type, _Period>> +inline _LIBCUDACXX_INLINE_VISIBILITY +constexpr +__enable_if_t +< + is_convertible<_Rep1, typename common_type<_Rep1, _Rep2>::type>::value, + duration::type, _Period> +> operator*(const _Rep1& __s, const duration<_Rep2, _Period>& __d) { - return __d * __s; + return __d * __s; } // Duration / template -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t< - !__is_duration<_Rep2>::value && is_convertible<_Rep2, typename common_type<_Rep1, _Rep2>::type>::value, - duration::type, _Period>> +inline _LIBCUDACXX_INLINE_VISIBILITY +constexpr +__enable_if_t +< + !__is_duration<_Rep2>::value && + is_convertible<_Rep2, typename common_type<_Rep1, _Rep2>::type>::value, + duration::type, _Period> +> operator/(const duration<_Rep1, _Period>& __d, const _Rep2& __s) { - typedef typename common_type<_Rep1, _Rep2>::type _Cr; - typedef duration<_Cr, _Period> _Cd; - return _Cd(_Cd(__d).count() / static_cast<_Cr>(__s)); + typedef typename common_type<_Rep1, _Rep2>::type _Cr; + typedef duration<_Cr, _Period> _Cd; + return _Cd(_Cd(__d).count() / static_cast<_Cr>(__s)); } template -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr typename common_type<_Rep1, _Rep2>::type +inline _LIBCUDACXX_INLINE_VISIBILITY +constexpr +typename common_type<_Rep1, _Rep2>::type operator/(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { - typedef typename common_type, duration<_Rep2, _Period2>>::type _Ct; - return _Ct(__lhs).count() / _Ct(__rhs).count(); + typedef typename common_type, duration<_Rep2, _Period2> >::type _Ct; + return _Ct(__lhs).count() / _Ct(__rhs).count(); } // Duration % template -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t< - !__is_duration<_Rep2>::value && is_convertible<_Rep2, typename common_type<_Rep1, _Rep2>::type>::value, - duration::type, _Period>> +inline _LIBCUDACXX_INLINE_VISIBILITY +constexpr +__enable_if_t +< + !__is_duration<_Rep2>::value && + is_convertible<_Rep2, typename common_type<_Rep1, _Rep2>::type>::value, + duration::type, _Period> +> operator%(const duration<_Rep1, _Period>& __d, const _Rep2& __s) { - typedef typename common_type<_Rep1, _Rep2>::type _Cr; - typedef duration<_Cr, _Period> _Cd; - return _Cd(_Cd(__d).count() % static_cast<_Cr>(__s)); + typedef typename common_type<_Rep1, _Rep2>::type _Cr; + typedef duration<_Cr, _Period> _Cd; + return _Cd(_Cd(__d).count() % static_cast<_Cr>(__s)); } template -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr - typename common_type, duration<_Rep2, _Period2>>::type - operator%(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs) +inline _LIBCUDACXX_INLINE_VISIBILITY +constexpr +typename common_type, duration<_Rep2, _Period2> >::type +operator%(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { - typedef typename common_type<_Rep1, _Rep2>::type _Cr; - typedef typename common_type, duration<_Rep2, _Period2>>::type _Cd; - return _Cd(static_cast<_Cr>(_Cd(__lhs).count()) % static_cast<_Cr>(_Cd(__rhs).count())); + typedef typename common_type<_Rep1, _Rep2>::type _Cr; + typedef typename common_type, duration<_Rep2, _Period2> >::type _Cd; + return _Cd(static_cast<_Cr>(_Cd(__lhs).count()) % static_cast<_Cr>(_Cd(__rhs).count())); } ////////////////////////////////////////////////////////// @@ -1394,210 +1375,214 @@ inline _LIBCUDACXX_INLINE_VISIBILITY constexpr template class _LIBCUDACXX_TEMPLATE_VIS time_point { - static_assert(__is_duration<_Duration>::value, - "Second template parameter of time_point must be a std::chrono::duration"); - + static_assert(__is_duration<_Duration>::value, + "Second template parameter of time_point must be a std::chrono::duration"); public: - typedef _Clock clock; - typedef _Duration duration; - typedef typename duration::rep rep; - typedef typename duration::period period; - + typedef _Clock clock; + typedef _Duration duration; + typedef typename duration::rep rep; + typedef typename duration::period period; private: - duration __d_; + duration __d_; public: - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 time_point() - : __d_(duration::zero()) - {} - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 explicit time_point(const duration& __d) - : __d_(__d) - {} - - // conversions - template - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 - time_point(const time_point& t, __enable_if_t::value>* = 0) - : __d_(t.time_since_epoch()) - {} - - // observer - - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 duration time_since_epoch() const - { - return __d_; - } + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 time_point() : __d_(duration::zero()) {} + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 explicit time_point(const duration& __d) : __d_(__d) {} - // arithmetic + // conversions + template + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 + time_point(const time_point& t, + __enable_if_t + < + is_convertible<_Duration2, duration>::value + >* = 0) + : __d_(t.time_since_epoch()) {} - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 time_point& operator+=(const duration& __d) - { - __d_ += __d; - return *this; - } - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 time_point& operator-=(const duration& __d) - { - __d_ -= __d; - return *this; - } + // observer - // special values + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 duration time_since_epoch() const {return __d_;} - _LIBCUDACXX_INLINE_VISIBILITY static constexpr time_point min() noexcept - { - return time_point(duration::min()); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr time_point max() noexcept - { - return time_point(duration::max()); - } + // arithmetic + + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 time_point& operator+=(const duration& __d) {__d_ += __d; return *this;} + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 time_point& operator-=(const duration& __d) {__d_ -= __d; return *this;} + + // special values + + _LIBCUDACXX_INLINE_VISIBILITY static constexpr time_point min() noexcept {return time_point(duration::min());} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr time_point max() noexcept {return time_point(duration::max());} }; -} // namespace chrono +} // chrono template -struct _LIBCUDACXX_TEMPLATE_VIS - common_type, chrono::time_point<_Clock, _Duration2>> +struct _LIBCUDACXX_TEMPLATE_VIS common_type, + chrono::time_point<_Clock, _Duration2> > { - typedef chrono::time_point<_Clock, typename common_type<_Duration1, _Duration2>::type> type; + typedef chrono::time_point<_Clock, typename common_type<_Duration1, _Duration2>::type> type; }; -namespace chrono -{ +namespace chrono { template -inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 time_point<_Clock, _ToDuration> +inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 +time_point<_Clock, _ToDuration> time_point_cast(const time_point<_Clock, _Duration>& __t) { - return time_point<_Clock, _ToDuration>(_CUDA_VSTD::chrono::duration_cast<_ToDuration>(__t.time_since_epoch())); + return time_point<_Clock, _ToDuration>(_CUDA_VSTD::chrono::duration_cast<_ToDuration>(__t.time_since_epoch())); } #if _CCCL_STD_VER > 2011 template -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t<__is_duration<_ToDuration>::value, - time_point<_Clock, _ToDuration>> +inline _LIBCUDACXX_INLINE_VISIBILITY constexpr +__enable_if_t +< + __is_duration<_ToDuration>::value, + time_point<_Clock, _ToDuration> +> floor(const time_point<_Clock, _Duration>& __t) { - return time_point<_Clock, _ToDuration>{floor<_ToDuration>(__t.time_since_epoch())}; + return time_point<_Clock, _ToDuration>{floor<_ToDuration>(__t.time_since_epoch())}; } template -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t<__is_duration<_ToDuration>::value, - time_point<_Clock, _ToDuration>> +inline _LIBCUDACXX_INLINE_VISIBILITY constexpr +__enable_if_t +< + __is_duration<_ToDuration>::value, + time_point<_Clock, _ToDuration> +> ceil(const time_point<_Clock, _Duration>& __t) { - return time_point<_Clock, _ToDuration>{ceil<_ToDuration>(__t.time_since_epoch())}; + return time_point<_Clock, _ToDuration>{ceil<_ToDuration>(__t.time_since_epoch())}; } template -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t<__is_duration<_ToDuration>::value, - time_point<_Clock, _ToDuration>> +inline _LIBCUDACXX_INLINE_VISIBILITY constexpr +__enable_if_t +< + __is_duration<_ToDuration>::value, + time_point<_Clock, _ToDuration> +> round(const time_point<_Clock, _Duration>& __t) { - return time_point<_Clock, _ToDuration>{round<_ToDuration>(__t.time_since_epoch())}; + return time_point<_Clock, _ToDuration>{round<_ToDuration>(__t.time_since_epoch())}; } template -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t::is_signed, duration<_Rep, _Period>> +inline _LIBCUDACXX_INLINE_VISIBILITY constexpr +__enable_if_t +< + numeric_limits<_Rep>::is_signed, + duration<_Rep, _Period> +> abs(duration<_Rep, _Period> __d) { - return __d >= __d.zero() ? +__d : -__d; + return __d >= __d.zero() ? +__d : -__d; } #endif // _CCCL_STD_VER > 2011 // time_point == template -inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 bool +inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 +bool operator==(const time_point<_Clock, _Duration1>& __lhs, const time_point<_Clock, _Duration2>& __rhs) { - return __lhs.time_since_epoch() == __rhs.time_since_epoch(); + return __lhs.time_since_epoch() == __rhs.time_since_epoch(); } // time_point != template -inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 bool +inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 +bool operator!=(const time_point<_Clock, _Duration1>& __lhs, const time_point<_Clock, _Duration2>& __rhs) { - return !(__lhs == __rhs); + return !(__lhs == __rhs); } // time_point < template -inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 bool +inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 +bool operator<(const time_point<_Clock, _Duration1>& __lhs, const time_point<_Clock, _Duration2>& __rhs) { - return __lhs.time_since_epoch() < __rhs.time_since_epoch(); + return __lhs.time_since_epoch() < __rhs.time_since_epoch(); } // time_point > template -inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 bool +inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 +bool operator>(const time_point<_Clock, _Duration1>& __lhs, const time_point<_Clock, _Duration2>& __rhs) { - return __rhs < __lhs; + return __rhs < __lhs; } // time_point <= template -inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 bool +inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 +bool operator<=(const time_point<_Clock, _Duration1>& __lhs, const time_point<_Clock, _Duration2>& __rhs) { - return !(__rhs < __lhs); + return !(__rhs < __lhs); } // time_point >= template -inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 bool +inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 +bool operator>=(const time_point<_Clock, _Duration1>& __lhs, const time_point<_Clock, _Duration2>& __rhs) { - return !(__lhs < __rhs); + return !(__lhs < __rhs); } // time_point operator+(time_point x, duration y); template inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 - time_point<_Clock, typename common_type<_Duration1, duration<_Rep2, _Period2>>::type> - operator+(const time_point<_Clock, _Duration1>& __lhs, const duration<_Rep2, _Period2>& __rhs) +time_point<_Clock, typename common_type<_Duration1, duration<_Rep2, _Period2> >::type> +operator+(const time_point<_Clock, _Duration1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { - typedef time_point<_Clock, typename common_type<_Duration1, duration<_Rep2, _Period2>>::type> _Tr; - return _Tr(__lhs.time_since_epoch() + __rhs); + typedef time_point<_Clock, typename common_type<_Duration1, duration<_Rep2, _Period2> >::type> _Tr; + return _Tr (__lhs.time_since_epoch() + __rhs); } // time_point operator+(duration x, time_point y); template inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 - time_point<_Clock, typename common_type, _Duration2>::type> - operator+(const duration<_Rep1, _Period1>& __lhs, const time_point<_Clock, _Duration2>& __rhs) +time_point<_Clock, typename common_type, _Duration2>::type> +operator+(const duration<_Rep1, _Period1>& __lhs, const time_point<_Clock, _Duration2>& __rhs) { - return __rhs + __lhs; + return __rhs + __lhs; } // time_point operator-(time_point x, duration y); template inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 - time_point<_Clock, typename common_type<_Duration1, duration<_Rep2, _Period2>>::type> - operator-(const time_point<_Clock, _Duration1>& __lhs, const duration<_Rep2, _Period2>& __rhs) +time_point<_Clock, typename common_type<_Duration1, duration<_Rep2, _Period2> >::type> +operator-(const time_point<_Clock, _Duration1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { - typedef time_point<_Clock, typename common_type<_Duration1, duration<_Rep2, _Period2>>::type> _Ret; - return _Ret(__lhs.time_since_epoch() - __rhs); + typedef time_point<_Clock, typename common_type<_Duration1, duration<_Rep2, _Period2> >::type> _Ret; + return _Ret(__lhs.time_since_epoch() -__rhs); } // duration operator-(time_point x, time_point y); template -inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 typename common_type<_Duration1, _Duration2>::type +inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 +typename common_type<_Duration1, _Duration2>::type operator-(const time_point<_Clock, _Duration1>& __lhs, const time_point<_Clock, _Duration2>& __rhs) { - return __lhs.time_since_epoch() - __rhs.time_since_epoch(); + return __lhs.time_since_epoch() - __rhs.time_since_epoch(); } ////////////////////////////////////////////////////////// @@ -1606,28 +1591,31 @@ operator-(const time_point<_Clock, _Duration1>& __lhs, const time_point<_Clock, class _LIBCUDACXX_TYPE_VIS system_clock { public: - typedef _LIBCUDACXX_SYS_CLOCK_DURATION duration; - typedef duration::rep rep; - typedef duration::period period; - typedef chrono::time_point time_point; - static _CCCL_CONSTEXPR_CXX14 const bool is_steady = false; + typedef _LIBCUDACXX_SYS_CLOCK_DURATION duration; + typedef duration::rep rep; + typedef duration::period period; + typedef chrono::time_point time_point; + static _CCCL_CONSTEXPR_CXX14 const bool is_steady = false; - _CCCL_HOST_DEVICE static time_point now() noexcept; - _CCCL_HOST_DEVICE static time_t to_time_t(const time_point& __t) noexcept; - _CCCL_HOST_DEVICE static time_point from_time_t(time_t __t) noexcept; + _CCCL_HOST_DEVICE + static time_point now() noexcept; + _CCCL_HOST_DEVICE + static time_t to_time_t (const time_point& __t) noexcept; + _CCCL_HOST_DEVICE + static time_point from_time_t(time_t __t) noexcept; }; #ifndef _LIBCUDACXX_HAS_NO_MONOTONIC_CLOCK class _LIBCUDACXX_TYPE_VIS steady_clock { public: - typedef nanoseconds duration; - typedef duration::rep rep; - typedef duration::period period; - typedef chrono::time_point time_point; - static _CCCL_CONSTEXPR_CXX14 const bool is_steady = true; + typedef nanoseconds duration; + typedef duration::rep rep; + typedef duration::period period; + typedef chrono::time_point time_point; + static _CCCL_CONSTEXPR_CXX14 const bool is_steady = true; - static time_point now() noexcept; + static time_point now() noexcept; }; typedef steady_clock high_resolution_clock; @@ -1640,594 +1628,489 @@ typedef system_clock high_resolution_clock; // [time.clock.file], type file_clock using file_clock = _CUDA_VSTD_FS::_FilesystemClock; -template +template using file_time = time_point; + template using sys_time = time_point; using sys_seconds = sys_time; using sys_days = sys_time; -struct local_t -{}; -template -using local_time = time_point; +struct local_t {}; +template +using local_time = time_point; using local_seconds = local_time; using local_days = local_time; -struct last_spec -{ - explicit last_spec() = default; -}; +struct last_spec { explicit last_spec() = default; }; -class day -{ +class day { private: - unsigned char __d; - + unsigned char __d; public: - day() = default; - _LIBCUDACXX_INLINE_VISIBILITY explicit inline constexpr day(unsigned __val) noexcept - : __d(static_cast(__val)) - {} - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr day& operator++() noexcept - { - ++__d; - return *this; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr day operator++(int) noexcept - { - day __tmp = *this; - ++(*this); - return __tmp; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr day& operator--() noexcept - { - --__d; - return *this; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr day operator--(int) noexcept - { - day __tmp = *this; - --(*this); - return __tmp; - } - _LIBCUDACXX_INLINE_VISIBILITY constexpr day& operator+=(const days& __dd) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY constexpr day& operator-=(const days& __dd) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY explicit inline constexpr operator unsigned() const noexcept - { - return __d; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool ok() const noexcept - { - return __d >= 1 && __d <= 31; - } -}; + day() = default; + _LIBCUDACXX_INLINE_VISIBILITY + explicit inline constexpr day(unsigned __val) noexcept : __d(static_cast(__val)) {} + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr day& operator++() noexcept { ++__d; return *this; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr day operator++(int) noexcept { day __tmp = *this; ++(*this); return __tmp; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr day& operator--() noexcept { --__d; return *this; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr day operator--(int) noexcept { day __tmp = *this; --(*this); return __tmp; } + _LIBCUDACXX_INLINE_VISIBILITY + constexpr day& operator+=(const days& __dd) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + constexpr day& operator-=(const days& __dd) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + explicit inline constexpr operator unsigned() const noexcept { return __d; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr bool ok() const noexcept { return __d >= 1 && __d <= 31; } + }; -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator==(const day& __lhs, const day& __rhs) noexcept -{ - return static_cast(__lhs) == static_cast(__rhs); -} -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator!=(const day& __lhs, const day& __rhs) noexcept -{ - return !(__lhs == __rhs); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator==(const day& __lhs, const day& __rhs) noexcept +{ return static_cast(__lhs) == static_cast(__rhs); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator<(const day& __lhs, const day& __rhs) noexcept -{ - return static_cast(__lhs) < static_cast(__rhs); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator!=(const day& __lhs, const day& __rhs) noexcept +{ return !(__lhs == __rhs); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator>(const day& __lhs, const day& __rhs) noexcept -{ - return __rhs < __lhs; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator< (const day& __lhs, const day& __rhs) noexcept +{ return static_cast(__lhs) < static_cast(__rhs); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator<=(const day& __lhs, const day& __rhs) noexcept -{ - return !(__rhs < __lhs); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator> (const day& __lhs, const day& __rhs) noexcept +{ return __rhs < __lhs; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator>=(const day& __lhs, const day& __rhs) noexcept -{ - return !(__lhs < __rhs); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator<=(const day& __lhs, const day& __rhs) noexcept +{ return !(__rhs < __lhs);} -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr day operator+(const day& __lhs, const days& __rhs) noexcept -{ - return day(static_cast(__lhs) + static_cast(__rhs.count())); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator>=(const day& __lhs, const day& __rhs) noexcept +{ return !(__lhs < __rhs); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr day operator+(const days& __lhs, const day& __rhs) noexcept -{ - return __rhs + __lhs; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +day operator+ (const day& __lhs, const days& __rhs) noexcept +{ return day(static_cast(__lhs) + static_cast(__rhs.count())); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr day operator-(const day& __lhs, const days& __rhs) noexcept -{ - return __lhs + -__rhs; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +day operator+ (const days& __lhs, const day& __rhs) noexcept +{ return __rhs + __lhs; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr days operator-(const day& __lhs, const day& __rhs) noexcept -{ - return days(static_cast(static_cast(__lhs)) - static_cast(static_cast(__rhs))); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +day operator- (const day& __lhs, const days& __rhs) noexcept +{ return __lhs + -__rhs; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr day& day::operator+=(const days& __dd) noexcept -{ - *this = *this + __dd; - return *this; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +days operator-(const day& __lhs, const day& __rhs) noexcept +{ return days(static_cast(static_cast(__lhs)) - + static_cast(static_cast(__rhs))); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr day& day::operator-=(const days& __dd) noexcept -{ - *this = *this - __dd; - return *this; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr day& day::operator+=(const days& __dd) noexcept +{ *this = *this + __dd; return *this; } -class month -{ -private: - unsigned char __m; +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr day& day::operator-=(const days& __dd) noexcept +{ *this = *this - __dd; return *this; } + +class month { +private: + unsigned char __m; public: - month() = default; - _LIBCUDACXX_INLINE_VISIBILITY explicit inline constexpr month(unsigned __val) noexcept - : __m(static_cast(__val)) - {} - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr month& operator++() noexcept - { - ++__m; - return *this; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr month operator++(int) noexcept - { - month __tmp = *this; - ++(*this); - return __tmp; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr month& operator--() noexcept - { - --__m; - return *this; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr month operator--(int) noexcept - { - month __tmp = *this; - --(*this); - return __tmp; - } - _LIBCUDACXX_INLINE_VISIBILITY constexpr month& operator+=(const months& __m1) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY constexpr month& operator-=(const months& __m1) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY explicit inline constexpr operator unsigned() const noexcept - { - return __m; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool ok() const noexcept - { - return __m >= 1 && __m <= 12; - } + month() = default; + _LIBCUDACXX_INLINE_VISIBILITY + explicit inline constexpr month(unsigned __val) noexcept : __m(static_cast(__val)) {} + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr month& operator++() noexcept { ++__m; return *this; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr month operator++(int) noexcept { month __tmp = *this; ++(*this); return __tmp; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr month& operator--() noexcept { --__m; return *this; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr month operator--(int) noexcept { month __tmp = *this; --(*this); return __tmp; } + _LIBCUDACXX_INLINE_VISIBILITY + constexpr month& operator+=(const months& __m1) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + constexpr month& operator-=(const months& __m1) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + explicit inline constexpr operator unsigned() const noexcept { return __m; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr bool ok() const noexcept { return __m >= 1 && __m <= 12; } }; -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator==(const month& __lhs, const month& __rhs) noexcept -{ - return static_cast(__lhs) == static_cast(__rhs); -} -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator!=(const month& __lhs, const month& __rhs) noexcept -{ - return !(__lhs == __rhs); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator==(const month& __lhs, const month& __rhs) noexcept +{ return static_cast(__lhs) == static_cast(__rhs); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator<(const month& __lhs, const month& __rhs) noexcept -{ - return static_cast(__lhs) < static_cast(__rhs); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator!=(const month& __lhs, const month& __rhs) noexcept +{ return !(__lhs == __rhs); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator>(const month& __lhs, const month& __rhs) noexcept -{ - return __rhs < __lhs; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator< (const month& __lhs, const month& __rhs) noexcept +{ return static_cast(__lhs) < static_cast(__rhs); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator<=(const month& __lhs, const month& __rhs) noexcept -{ - return !(__rhs < __lhs); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator> (const month& __lhs, const month& __rhs) noexcept +{ return __rhs < __lhs; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator>=(const month& __lhs, const month& __rhs) noexcept -{ - return !(__lhs < __rhs); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator<=(const month& __lhs, const month& __rhs) noexcept +{ return !(__rhs < __lhs); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr month operator+(const month& __lhs, const months& __rhs) noexcept -{ - auto const __mu = static_cast(static_cast(__lhs)) + (__rhs.count() - 1); - auto const __yr = (__mu >= 0 ? __mu : __mu - 11) / 12; - return month{static_cast(__mu - __yr * 12 + 1)}; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator>=(const month& __lhs, const month& __rhs) noexcept +{ return !(__lhs < __rhs); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr month operator+(const months& __lhs, const month& __rhs) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +month operator+ (const month& __lhs, const months& __rhs) noexcept { - return __rhs + __lhs; + auto const __mu = static_cast(static_cast(__lhs)) + (__rhs.count() - 1); + auto const __yr = (__mu >= 0 ? __mu : __mu - 11) / 12; + return month{static_cast(__mu - __yr * 12 + 1)}; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr month operator-(const month& __lhs, const months& __rhs) noexcept -{ - return __lhs + -__rhs; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +month operator+ (const months& __lhs, const month& __rhs) noexcept +{ return __rhs + __lhs; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr months operator-(const month& __lhs, const month& __rhs) noexcept -{ - auto const __dm = static_cast(__lhs) - static_cast(__rhs); - return months(__dm <= 11 ? __dm : __dm + 12); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +month operator- (const month& __lhs, const months& __rhs) noexcept +{ return __lhs + -__rhs; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr month& month::operator+=(const months& __dm) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +months operator-(const month& __lhs, const month& __rhs) noexcept { - *this = *this + __dm; - return *this; + auto const __dm = static_cast(__lhs) - static_cast(__rhs); + return months(__dm <= 11 ? __dm : __dm + 12); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr month& month::operator-=(const months& __dm) noexcept -{ - *this = *this - __dm; - return *this; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr month& month::operator+=(const months& __dm) noexcept +{ *this = *this + __dm; return *this; } -class year -{ -private: - short __y; +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr month& month::operator-=(const months& __dm) noexcept +{ *this = *this - __dm; return *this; } + +class year { +private: + short __y; public: - year() = default; - _LIBCUDACXX_INLINE_VISIBILITY explicit inline constexpr year(int __val) noexcept - : __y(static_cast(__val)) - {} + year() = default; + _LIBCUDACXX_INLINE_VISIBILITY + explicit inline constexpr year(int __val) noexcept : __y(static_cast(__val)) {} + + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr year& operator++() noexcept { ++__y; return *this; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr year operator++(int) noexcept { year __tmp = *this; ++(*this); return __tmp; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr year& operator--() noexcept { --__y; return *this; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr year operator--(int) noexcept { year __tmp = *this; --(*this); return __tmp; } + _LIBCUDACXX_INLINE_VISIBILITY + constexpr year& operator+=(const years& __dy) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + constexpr year& operator-=(const years& __dy) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr year operator+() const noexcept { return *this; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr year operator-() const noexcept { return year{-__y}; } + + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr bool is_leap() const noexcept { return __y % 4 == 0 && (__y % 100 != 0 || __y % 400 == 0); } + _LIBCUDACXX_INLINE_VISIBILITY + explicit inline constexpr operator int() const noexcept { return __y; } + _LIBCUDACXX_INLINE_VISIBILITY + constexpr bool ok() const noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + static inline constexpr year min() noexcept { return year{-32767}; } + _LIBCUDACXX_INLINE_VISIBILITY + static inline constexpr year max() noexcept { return year{ 32767}; } +}; - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr year& operator++() noexcept - { - ++__y; - return *this; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr year operator++(int) noexcept - { - year __tmp = *this; - ++(*this); - return __tmp; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr year& operator--() noexcept - { - --__y; - return *this; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr year operator--(int) noexcept - { - year __tmp = *this; - --(*this); - return __tmp; - } - _LIBCUDACXX_INLINE_VISIBILITY constexpr year& operator+=(const years& __dy) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY constexpr year& operator-=(const years& __dy) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr year operator+() const noexcept - { - return *this; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr year operator-() const noexcept - { - return year{-__y}; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool is_leap() const noexcept - { - return __y % 4 == 0 && (__y % 100 != 0 || __y % 400 == 0); - } - _LIBCUDACXX_INLINE_VISIBILITY explicit inline constexpr operator int() const noexcept - { - return __y; - } - _LIBCUDACXX_INLINE_VISIBILITY constexpr bool ok() const noexcept; - _LIBCUDACXX_INLINE_VISIBILITY static inline constexpr year min() noexcept - { - return year{-32767}; - } - _LIBCUDACXX_INLINE_VISIBILITY static inline constexpr year max() noexcept - { - return year{32767}; - } -}; +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator==(const year& __lhs, const year& __rhs) noexcept +{ return static_cast(__lhs) == static_cast(__rhs); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator==(const year& __lhs, const year& __rhs) noexcept -{ - return static_cast(__lhs) == static_cast(__rhs); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator!=(const year& __lhs, const year& __rhs) noexcept +{ return !(__lhs == __rhs); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator!=(const year& __lhs, const year& __rhs) noexcept -{ - return !(__lhs == __rhs); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator< (const year& __lhs, const year& __rhs) noexcept +{ return static_cast(__lhs) < static_cast(__rhs); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator<(const year& __lhs, const year& __rhs) noexcept -{ - return static_cast(__lhs) < static_cast(__rhs); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator> (const year& __lhs, const year& __rhs) noexcept +{ return __rhs < __lhs; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator>(const year& __lhs, const year& __rhs) noexcept -{ - return __rhs < __lhs; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator<=(const year& __lhs, const year& __rhs) noexcept +{ return !(__rhs < __lhs); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator<=(const year& __lhs, const year& __rhs) noexcept -{ - return !(__rhs < __lhs); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator>=(const year& __lhs, const year& __rhs) noexcept +{ return !(__lhs < __rhs); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator>=(const year& __lhs, const year& __rhs) noexcept -{ - return !(__lhs < __rhs); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year operator+ (const year& __lhs, const years& __rhs) noexcept +{ return year(static_cast(__lhs) + __rhs.count()); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year operator+(const year& __lhs, const years& __rhs) noexcept -{ - return year(static_cast(__lhs) + __rhs.count()); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year operator+ (const years& __lhs, const year& __rhs) noexcept +{ return __rhs + __lhs; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year operator+(const years& __lhs, const year& __rhs) noexcept -{ - return __rhs + __lhs; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year operator- (const year& __lhs, const years& __rhs) noexcept +{ return __lhs + -__rhs; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year operator-(const year& __lhs, const years& __rhs) noexcept -{ - return __lhs + -__rhs; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +years operator-(const year& __lhs, const year& __rhs) noexcept +{ return years{static_cast(__lhs) - static_cast(__rhs)}; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr years operator-(const year& __lhs, const year& __rhs) noexcept -{ - return years{static_cast(__lhs) - static_cast(__rhs)}; -} -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year& year::operator+=(const years& __dy) noexcept -{ - *this = *this + __dy; - return *this; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr year& year::operator+=(const years& __dy) noexcept +{ *this = *this + __dy; return *this; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year& year::operator-=(const years& __dy) noexcept -{ - *this = *this - __dy; - return *this; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr year& year::operator-=(const years& __dy) noexcept +{ *this = *this - __dy; return *this; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool year::ok() const noexcept -{ - return static_cast(min()) <= __y && __y <= static_cast(max()); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr bool year::ok() const noexcept +{ return static_cast(min()) <= __y && __y <= static_cast(max()); } class weekday_indexed; class weekday_last; -class weekday -{ +class weekday { private: - unsigned char __wd; - + unsigned char __wd; public: weekday() = default; - _LIBCUDACXX_INLINE_VISIBILITY inline explicit constexpr weekday(unsigned __val) noexcept - : __wd(static_cast(__val == 7 ? 0 : __val)) - {} - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr weekday(const sys_days& __sysd) noexcept - : __wd(__weekday_from_days(__sysd.time_since_epoch().count())) - {} - _LIBCUDACXX_INLINE_VISIBILITY inline explicit constexpr weekday(const local_days& __locd) noexcept - : __wd(__weekday_from_days(__locd.time_since_epoch().count())) - {} - - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr weekday& operator++() noexcept - { - __wd = (__wd == 6 ? 0 : __wd + 1); - return *this; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr weekday operator++(int) noexcept - { - weekday __tmp = *this; - ++(*this); - return __tmp; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr weekday& operator--() noexcept - { - __wd = (__wd == 0 ? 6 : __wd - 1); - return *this; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr weekday operator--(int) noexcept - { - weekday __tmp = *this; - --(*this); - return __tmp; - } - _LIBCUDACXX_INLINE_VISIBILITY constexpr weekday& operator+=(const days& __dd) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY constexpr weekday& operator-=(const days& __dd) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr unsigned c_encoding() const noexcept - { - return __wd; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr unsigned iso_encoding() const noexcept - { - return __wd == 0u ? 7 : __wd; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool ok() const noexcept - { - return __wd <= 6; - } - _LIBCUDACXX_INLINE_VISIBILITY constexpr weekday_indexed operator[](unsigned __index) const noexcept; - _LIBCUDACXX_INLINE_VISIBILITY constexpr weekday_last operator[](last_spec) const noexcept; - - _LIBCUDACXX_INLINE_VISIBILITY static constexpr unsigned char __weekday_from_days(int __days) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + inline explicit constexpr weekday(unsigned __val) noexcept : __wd(static_cast(__val == 7 ? 0 : __val)) {} + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr weekday(const sys_days& __sysd) noexcept + : __wd(__weekday_from_days(__sysd.time_since_epoch().count())) {} + _LIBCUDACXX_INLINE_VISIBILITY + inline explicit constexpr weekday(const local_days& __locd) noexcept + : __wd(__weekday_from_days(__locd.time_since_epoch().count())) {} + + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr weekday& operator++() noexcept { __wd = (__wd == 6 ? 0 : __wd + 1); return *this; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr weekday operator++(int) noexcept { weekday __tmp = *this; ++(*this); return __tmp; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr weekday& operator--() noexcept { __wd = (__wd == 0 ? 6 : __wd - 1); return *this; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr weekday operator--(int) noexcept { weekday __tmp = *this; --(*this); return __tmp; } + _LIBCUDACXX_INLINE_VISIBILITY + constexpr weekday& operator+=(const days& __dd) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + constexpr weekday& operator-=(const days& __dd) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr unsigned c_encoding() const noexcept { return __wd; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr unsigned iso_encoding() const noexcept { return __wd == 0u ? 7 : __wd; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr bool ok() const noexcept { return __wd <= 6; } + _LIBCUDACXX_INLINE_VISIBILITY + constexpr weekday_indexed operator[](unsigned __index) const noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + constexpr weekday_last operator[](last_spec) const noexcept; + + _LIBCUDACXX_INLINE_VISIBILITY + static constexpr unsigned char __weekday_from_days(int __days) noexcept; }; + // https://howardhinnant.github.io/date_algorithms.html#weekday_from_days -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr unsigned char weekday::__weekday_from_days(int __days) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +unsigned char weekday::__weekday_from_days(int __days) noexcept { - return static_cast(static_cast(__days >= -4 ? (__days + 4) % 7 : (__days + 5) % 7 + 6)); + return static_cast( + static_cast(__days >= -4 ? (__days+4) % 7 : (__days+5) % 7 + 6) + ); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator==(const weekday& __lhs, const weekday& __rhs) noexcept -{ - return __lhs.c_encoding() == __rhs.c_encoding(); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator==(const weekday& __lhs, const weekday& __rhs) noexcept +{ return __lhs.c_encoding() == __rhs.c_encoding(); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator!=(const weekday& __lhs, const weekday& __rhs) noexcept -{ - return !(__lhs == __rhs); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator!=(const weekday& __lhs, const weekday& __rhs) noexcept +{ return !(__lhs == __rhs); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator<(const weekday& __lhs, const weekday& __rhs) noexcept -{ - return __lhs.c_encoding() < __rhs.c_encoding(); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator< (const weekday& __lhs, const weekday& __rhs) noexcept +{ return __lhs.c_encoding() < __rhs.c_encoding(); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator>(const weekday& __lhs, const weekday& __rhs) noexcept -{ - return __rhs < __lhs; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator> (const weekday& __lhs, const weekday& __rhs) noexcept +{ return __rhs < __lhs; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator<=(const weekday& __lhs, const weekday& __rhs) noexcept -{ - return !(__rhs < __lhs); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator<=(const weekday& __lhs, const weekday& __rhs) noexcept +{ return !(__rhs < __lhs);} -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator>=(const weekday& __lhs, const weekday& __rhs) noexcept -{ - return !(__lhs < __rhs); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator>=(const weekday& __lhs, const weekday& __rhs) noexcept +{ return !(__lhs < __rhs); } -_LIBCUDACXX_INLINE_VISIBILITY constexpr weekday operator+(const weekday& __lhs, const days& __rhs) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +constexpr weekday operator+(const weekday& __lhs, const days& __rhs) noexcept { - auto const __mu = static_cast(__lhs.c_encoding()) + __rhs.count(); - auto const __yr = (__mu >= 0 ? __mu : __mu - 6) / 7; - return weekday{static_cast(__mu - __yr * 7)}; + auto const __mu = static_cast(__lhs.c_encoding()) + __rhs.count(); + auto const __yr = (__mu >= 0 ? __mu : __mu - 6) / 7; + return weekday{static_cast(__mu - __yr * 7)}; } -_LIBCUDACXX_INLINE_VISIBILITY constexpr weekday operator+(const days& __lhs, const weekday& __rhs) noexcept -{ - return __rhs + __lhs; -} +_LIBCUDACXX_INLINE_VISIBILITY +constexpr weekday operator+(const days& __lhs, const weekday& __rhs) noexcept +{ return __rhs + __lhs; } -_LIBCUDACXX_INLINE_VISIBILITY constexpr weekday operator-(const weekday& __lhs, const days& __rhs) noexcept -{ - return __lhs + -__rhs; -} +_LIBCUDACXX_INLINE_VISIBILITY +constexpr weekday operator-(const weekday& __lhs, const days& __rhs) noexcept +{ return __lhs + -__rhs; } -_LIBCUDACXX_INLINE_VISIBILITY constexpr days operator-(const weekday& __lhs, const weekday& __rhs) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +constexpr days operator-(const weekday& __lhs, const weekday& __rhs) noexcept { - // casts are required to work around nvcc bug 3145483 - const int __wdu = static_cast(__lhs.c_encoding()) - static_cast(__rhs.c_encoding()); - const int __wk = (__wdu >= 0 ? __wdu : __wdu - 6) / 7; - return days{__wdu - __wk * 7}; + // casts are required to work around nvcc bug 3145483 + const int __wdu = static_cast(__lhs.c_encoding()) - static_cast(__rhs.c_encoding()); + const int __wk = (__wdu >= 0 ? __wdu : __wdu-6) / 7; + return days{__wdu - __wk * 7}; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr weekday& weekday::operator+=(const days& __dd) noexcept -{ - *this = *this + __dd; - return *this; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr weekday& weekday::operator+=(const days& __dd) noexcept +{ *this = *this + __dd; return *this; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr weekday& weekday::operator-=(const days& __dd) noexcept -{ - *this = *this - __dd; - return *this; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr weekday& weekday::operator-=(const days& __dd) noexcept +{ *this = *this - __dd; return *this; } -class weekday_indexed -{ -private: - _CUDA_VSTD::chrono::weekday __wd; - unsigned char __idx; +class weekday_indexed { +private: + _CUDA_VSTD::chrono::weekday __wd; + unsigned char __idx; public: - weekday_indexed() = default; - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr weekday_indexed( - const _CUDA_VSTD::chrono::weekday& __wdval, unsigned __idxval) noexcept - : __wd{__wdval} - , __idx(static_cast(__idxval)) - {} - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr _CUDA_VSTD::chrono::weekday weekday() const noexcept - { - return __wd; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr unsigned index() const noexcept - { - return __idx; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool ok() const noexcept - { - return __wd.ok() && __idx >= 1 && __idx <= 5; - } + weekday_indexed() = default; + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr weekday_indexed(const _CUDA_VSTD::chrono::weekday& __wdval, unsigned __idxval) noexcept + : __wd{__wdval}, __idx(static_cast(__idxval)) {} + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr _CUDA_VSTD::chrono::weekday weekday() const noexcept { return __wd; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr unsigned index() const noexcept { return __idx; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr bool ok() const noexcept { return __wd.ok() && __idx >= 1 && __idx <= 5; } }; -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator==(const weekday_indexed& __lhs, const weekday_indexed& __rhs) noexcept -{ - return __lhs.weekday() == __rhs.weekday() && __lhs.index() == __rhs.index(); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator==(const weekday_indexed& __lhs, const weekday_indexed& __rhs) noexcept +{ return __lhs.weekday() == __rhs.weekday() && __lhs.index() == __rhs.index(); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator!=(const weekday_indexed& __lhs, const weekday_indexed& __rhs) noexcept -{ - return !(__lhs == __rhs); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator!=(const weekday_indexed& __lhs, const weekday_indexed& __rhs) noexcept +{ return !(__lhs == __rhs); } -class weekday_last -{ -private: - _CUDA_VSTD::chrono::weekday __wd; +class weekday_last { +private: + _CUDA_VSTD::chrono::weekday __wd; public: - _LIBCUDACXX_INLINE_VISIBILITY explicit constexpr weekday_last(const _CUDA_VSTD::chrono::weekday& __val) noexcept - : __wd{__val} - {} - _LIBCUDACXX_INLINE_VISIBILITY constexpr _CUDA_VSTD::chrono::weekday weekday() const noexcept - { - return __wd; - } - _LIBCUDACXX_INLINE_VISIBILITY constexpr bool ok() const noexcept - { - return __wd.ok(); - } + _LIBCUDACXX_INLINE_VISIBILITY + explicit constexpr weekday_last(const _CUDA_VSTD::chrono::weekday& __val) noexcept + : __wd{__val} {} + _LIBCUDACXX_INLINE_VISIBILITY + constexpr _CUDA_VSTD::chrono::weekday weekday() const noexcept { return __wd; } + _LIBCUDACXX_INLINE_VISIBILITY + constexpr bool ok() const noexcept { return __wd.ok(); } }; -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator==(const weekday_last& __lhs, const weekday_last& __rhs) noexcept -{ - return __lhs.weekday() == __rhs.weekday(); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator==(const weekday_last& __lhs, const weekday_last& __rhs) noexcept +{ return __lhs.weekday() == __rhs.weekday(); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator!=(const weekday_last& __lhs, const weekday_last& __rhs) noexcept -{ - return !(__lhs == __rhs); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator!=(const weekday_last& __lhs, const weekday_last& __rhs) noexcept +{ return !(__lhs == __rhs); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr weekday_indexed weekday::operator[](unsigned __index) const noexcept -{ - return weekday_indexed{*this, __index}; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +weekday_indexed weekday::operator[](unsigned __index) const noexcept { return weekday_indexed{*this, __index}; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +weekday_last weekday::operator[](last_spec) const noexcept { return weekday_last{*this}; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr weekday_last weekday::operator[](last_spec) const noexcept -{ - return weekday_last{*this}; -} _LIBCUDACXX_INLINE_VAR constexpr last_spec last{}; -_LIBCUDACXX_INLINE_VAR constexpr weekday Sunday{0}; -_LIBCUDACXX_INLINE_VAR constexpr weekday Monday{1}; -_LIBCUDACXX_INLINE_VAR constexpr weekday Tuesday{2}; -_LIBCUDACXX_INLINE_VAR constexpr weekday Wednesday{3}; -_LIBCUDACXX_INLINE_VAR constexpr weekday Thursday{4}; -_LIBCUDACXX_INLINE_VAR constexpr weekday Friday{5}; -_LIBCUDACXX_INLINE_VAR constexpr weekday Saturday{6}; +_LIBCUDACXX_INLINE_VAR constexpr weekday Sunday{0}; +_LIBCUDACXX_INLINE_VAR constexpr weekday Monday{1}; +_LIBCUDACXX_INLINE_VAR constexpr weekday Tuesday{2}; +_LIBCUDACXX_INLINE_VAR constexpr weekday Wednesday{3}; +_LIBCUDACXX_INLINE_VAR constexpr weekday Thursday{4}; +_LIBCUDACXX_INLINE_VAR constexpr weekday Friday{5}; +_LIBCUDACXX_INLINE_VAR constexpr weekday Saturday{6}; _LIBCUDACXX_INLINE_VAR constexpr month January{1}; _LIBCUDACXX_INLINE_VAR constexpr month February{2}; @@ -2242,1363 +2125,1085 @@ _LIBCUDACXX_INLINE_VAR constexpr month October{10}; _LIBCUDACXX_INLINE_VAR constexpr month November{11}; _LIBCUDACXX_INLINE_VAR constexpr month December{12}; -class month_day -{ -private: - chrono::month __m; - chrono::day __d; +class month_day { +private: + chrono::month __m; + chrono::day __d; public: - month_day() = default; - _LIBCUDACXX_INLINE_VISIBILITY constexpr month_day(const chrono::month& __mval, const chrono::day& __dval) noexcept - : __m{__mval} - , __d{__dval} - {} - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr chrono::month month() const noexcept - { - return __m; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr chrono::day day() const noexcept - { - return __d; - } - _LIBCUDACXX_INLINE_VISIBILITY constexpr bool ok() const noexcept; + month_day() = default; + _LIBCUDACXX_INLINE_VISIBILITY + constexpr month_day(const chrono::month& __mval, const chrono::day& __dval) noexcept + : __m{__mval}, __d{__dval} {} + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr chrono::month month() const noexcept { return __m; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr chrono::day day() const noexcept { return __d; } + _LIBCUDACXX_INLINE_VISIBILITY + constexpr bool ok() const noexcept; }; -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool month_day::ok() const noexcept -{ - if (!__m.ok()) - { - return false; - } - const unsigned __dval = static_cast(__d); - if (__dval < 1 || __dval > 31) - { - return false; - } - if (__dval <= 29) - { +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool month_day::ok() const noexcept +{ + if (!__m.ok()) return false; + const unsigned __dval = static_cast(__d); + if (__dval < 1 || __dval > 31) return false; + if (__dval <= 29) return true; +// Now we've got either 30 or 31 + const unsigned __mval = static_cast(__m); + if (__mval == 2) return false; + if (__mval == 4 || __mval == 6 || __mval == 9 || __mval == 11) + return __dval == 30; return true; - } - // Now we've got either 30 or 31 - const unsigned __mval = static_cast(__m); - if (__mval == 2) - { - return false; - } - if (__mval == 4 || __mval == 6 || __mval == 9 || __mval == 11) - { - return __dval == 30; - } - return true; -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator==(const month_day& __lhs, const month_day& __rhs) noexcept -{ - return __lhs.month() == __rhs.month() && __lhs.day() == __rhs.day(); -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator!=(const month_day& __lhs, const month_day& __rhs) noexcept -{ - return !(__lhs == __rhs); -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr month_day operator/(const month& __lhs, const day& __rhs) noexcept -{ - return month_day{__lhs, __rhs}; } -_LIBCUDACXX_INLINE_VISIBILITY constexpr month_day operator/(const day& __lhs, const month& __rhs) noexcept -{ - return __rhs / __lhs; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator==(const month_day& __lhs, const month_day& __rhs) noexcept +{ return __lhs.month() == __rhs.month() && __lhs.day() == __rhs.day(); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr month_day operator/(const month& __lhs, int __rhs) noexcept -{ - return __lhs / day(__rhs); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator!=(const month_day& __lhs, const month_day& __rhs) noexcept +{ return !(__lhs == __rhs); } -_LIBCUDACXX_INLINE_VISIBILITY constexpr month_day operator/(int __lhs, const day& __rhs) noexcept -{ - return month(__lhs) / __rhs; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +month_day operator/(const month& __lhs, const day& __rhs) noexcept +{ return month_day{__lhs, __rhs}; } -_LIBCUDACXX_INLINE_VISIBILITY constexpr month_day operator/(const day& __lhs, int __rhs) noexcept -{ - return month(__rhs) / __lhs; -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator<(const month_day& __lhs, const month_day& __rhs) noexcept -{ - return __lhs.month() != __rhs.month() ? __lhs.month() < __rhs.month() : __lhs.day() < __rhs.day(); -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator>(const month_day& __lhs, const month_day& __rhs) noexcept -{ - return __rhs < __lhs; -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator<=(const month_day& __lhs, const month_day& __rhs) noexcept -{ - return !(__rhs < __lhs); -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator>=(const month_day& __lhs, const month_day& __rhs) noexcept -{ - return !(__lhs < __rhs); -} - -class month_day_last -{ -private: - chrono::month __m; - -public: - _LIBCUDACXX_INLINE_VISIBILITY explicit constexpr month_day_last(const chrono::month& __val) noexcept - : __m{__val} - {} - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr chrono::month month() const noexcept - { - return __m; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool ok() const noexcept - { - return __m.ok(); - } -}; +_LIBCUDACXX_INLINE_VISIBILITY +constexpr +month_day operator/(const day& __lhs, const month& __rhs) noexcept +{ return __rhs / __lhs; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator==(const month_day_last& __lhs, const month_day_last& __rhs) noexcept -{ - return __lhs.month() == __rhs.month(); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +month_day operator/(const month& __lhs, int __rhs) noexcept +{ return __lhs / day(__rhs); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator!=(const month_day_last& __lhs, const month_day_last& __rhs) noexcept -{ - return !(__lhs == __rhs); -} +_LIBCUDACXX_INLINE_VISIBILITY +constexpr +month_day operator/(int __lhs, const day& __rhs) noexcept +{ return month(__lhs) / __rhs; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator<(const month_day_last& __lhs, const month_day_last& __rhs) noexcept -{ - return __lhs.month() < __rhs.month(); -} +_LIBCUDACXX_INLINE_VISIBILITY +constexpr +month_day operator/(const day& __lhs, int __rhs) noexcept +{ return month(__rhs) / __lhs; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator>(const month_day_last& __lhs, const month_day_last& __rhs) noexcept -{ - return __rhs < __lhs; -} -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator<=(const month_day_last& __lhs, const month_day_last& __rhs) noexcept -{ - return !(__rhs < __lhs); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator< (const month_day& __lhs, const month_day& __rhs) noexcept +{ return __lhs.month() != __rhs.month() ? __lhs.month() < __rhs.month() : __lhs.day() < __rhs.day(); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator>=(const month_day_last& __lhs, const month_day_last& __rhs) noexcept -{ - return !(__lhs < __rhs); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator> (const month_day& __lhs, const month_day& __rhs) noexcept +{ return __rhs < __lhs; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr month_day_last operator/(const month& __lhs, last_spec) noexcept -{ - return month_day_last{__lhs}; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator<=(const month_day& __lhs, const month_day& __rhs) noexcept +{ return !(__rhs < __lhs);} -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr month_day_last operator/(last_spec, const month& __rhs) noexcept -{ - return month_day_last{__rhs}; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator>=(const month_day& __lhs, const month_day& __rhs) noexcept +{ return !(__lhs < __rhs); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr month_day_last operator/(int __lhs, last_spec) noexcept -{ - return month_day_last{month(__lhs)}; -} -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr month_day_last operator/(last_spec, int __rhs) noexcept -{ - return month_day_last{month(__rhs)}; -} -class month_weekday -{ +class month_day_last { private: - chrono::month __m; - chrono::weekday_indexed __wdi; - -public: - month_weekday() = default; - _LIBCUDACXX_INLINE_VISIBILITY constexpr month_weekday( - const chrono::month& __mval, const chrono::weekday_indexed& __wdival) noexcept - : __m{__mval} - , __wdi{__wdival} - {} - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr chrono::month month() const noexcept - { - return __m; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr chrono::weekday_indexed weekday_indexed() const noexcept - { - return __wdi; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool ok() const noexcept - { - return __m.ok() && __wdi.ok(); - } -}; - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator==(const month_weekday& __lhs, const month_weekday& __rhs) noexcept -{ - return __lhs.month() == __rhs.month() && __lhs.weekday_indexed() == __rhs.weekday_indexed(); -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator!=(const month_weekday& __lhs, const month_weekday& __rhs) noexcept -{ - return !(__lhs == __rhs); -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr month_weekday -operator/(const month& __lhs, const weekday_indexed& __rhs) noexcept -{ - return month_weekday{__lhs, __rhs}; -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr month_weekday operator/(int __lhs, const weekday_indexed& __rhs) noexcept -{ - return month_weekday{month(__lhs), __rhs}; -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr month_weekday -operator/(const weekday_indexed& __lhs, const month& __rhs) noexcept -{ - return month_weekday{__rhs, __lhs}; -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr month_weekday operator/(const weekday_indexed& __lhs, int __rhs) noexcept -{ - return month_weekday{month(__rhs), __lhs}; -} - -class month_weekday_last -{ - chrono::month __m; - chrono::weekday_last __wdl; - + chrono::month __m; public: - _LIBCUDACXX_INLINE_VISIBILITY constexpr month_weekday_last( - const chrono::month& __mval, const chrono::weekday_last& __wdlval) noexcept - : __m{__mval} - , __wdl{__wdlval} - {} - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr chrono::month month() const noexcept - { - return __m; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr chrono::weekday_last weekday_last() const noexcept - { - return __wdl; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool ok() const noexcept - { - return __m.ok() && __wdl.ok(); - } -}; - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator==(const month_weekday_last& __lhs, const month_weekday_last& __rhs) noexcept -{ - return __lhs.month() == __rhs.month() && __lhs.weekday_last() == __rhs.weekday_last(); -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator!=(const month_weekday_last& __lhs, const month_weekday_last& __rhs) noexcept -{ - return !(__lhs == __rhs); -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr month_weekday_last -operator/(const month& __lhs, const weekday_last& __rhs) noexcept -{ - return month_weekday_last{__lhs, __rhs}; -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr month_weekday_last -operator/(int __lhs, const weekday_last& __rhs) noexcept -{ - return month_weekday_last{month(__lhs), __rhs}; -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr month_weekday_last -operator/(const weekday_last& __lhs, const month& __rhs) noexcept -{ - return month_weekday_last{__rhs, __lhs}; -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr month_weekday_last -operator/(const weekday_last& __lhs, int __rhs) noexcept -{ - return month_weekday_last{month(__rhs), __lhs}; -} - -class year_month -{ - chrono::year __y; - chrono::month __m; - -public: - year_month() = default; - _LIBCUDACXX_INLINE_VISIBILITY constexpr year_month(const chrono::year& __yval, const chrono::month& __mval) noexcept - : __y{__yval} - , __m{__mval} - {} - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr chrono::year year() const noexcept - { - return __y; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr chrono::month month() const noexcept - { - return __m; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month& operator+=(const months& __dm) noexcept - { - this->__m += __dm; - return *this; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month& operator-=(const months& __dm) noexcept - { - this->__m -= __dm; - return *this; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month& operator+=(const years& __dy) noexcept - { - this->__y += __dy; - return *this; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month& operator-=(const years& __dy) noexcept - { - this->__y -= __dy; - return *this; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool ok() const noexcept - { - return __y.ok() && __m.ok(); - } + _LIBCUDACXX_INLINE_VISIBILITY + explicit constexpr month_day_last(const chrono::month& __val) noexcept + : __m{__val} {} + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr chrono::month month() const noexcept { return __m; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr bool ok() const noexcept { return __m.ok(); } }; -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month operator/(const year& __y, const month& __m) noexcept -{ - return year_month{__y, __m}; -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month operator/(const year& __y, int __m) noexcept -{ - return year_month{__y, month(__m)}; -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator==(const year_month& __lhs, const year_month& __rhs) noexcept -{ - return __lhs.year() == __rhs.year() && __lhs.month() == __rhs.month(); -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator!=(const year_month& __lhs, const year_month& __rhs) noexcept -{ - return !(__lhs == __rhs); -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator<(const year_month& __lhs, const year_month& __rhs) noexcept -{ - return __lhs.year() != __rhs.year() ? __lhs.year() < __rhs.year() : __lhs.month() < __rhs.month(); -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator>(const year_month& __lhs, const year_month& __rhs) noexcept -{ - return __rhs < __lhs; -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator<=(const year_month& __lhs, const year_month& __rhs) noexcept -{ - return !(__rhs < __lhs); -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool operator>=(const year_month& __lhs, const year_month& __rhs) noexcept -{ - return !(__lhs < __rhs); -} - -_LIBCUDACXX_INLINE_VISIBILITY constexpr year_month operator+(const year_month& __lhs, const months& __rhs) noexcept -{ - int __dmi = static_cast(static_cast(__lhs.month())) - 1 + __rhs.count(); - const int __dy = (__dmi >= 0 ? __dmi : __dmi - 11) / 12; - __dmi = __dmi - __dy * 12 + 1; - return (__lhs.year() + years(__dy)) / month(static_cast(__dmi)); -} - -_LIBCUDACXX_INLINE_VISIBILITY constexpr year_month operator+(const months& __lhs, const year_month& __rhs) noexcept -{ - return __rhs + __lhs; -} - -_LIBCUDACXX_INLINE_VISIBILITY constexpr year_month operator+(const year_month& __lhs, const years& __rhs) noexcept -{ - return (__lhs.year() + __rhs) / __lhs.month(); -} - -_LIBCUDACXX_INLINE_VISIBILITY constexpr year_month operator+(const years& __lhs, const year_month& __rhs) noexcept -{ - return __rhs + __lhs; -} - -_LIBCUDACXX_INLINE_VISIBILITY constexpr months operator-(const year_month& __lhs, const year_month& __rhs) noexcept -{ - return (__lhs.year() - __rhs.year()) - + months(static_cast(__lhs.month()) - static_cast(__rhs.month())); -} - -_LIBCUDACXX_INLINE_VISIBILITY constexpr year_month operator-(const year_month& __lhs, const months& __rhs) noexcept -{ - return __lhs + -__rhs; -} - -_LIBCUDACXX_INLINE_VISIBILITY constexpr year_month operator-(const year_month& __lhs, const years& __rhs) noexcept -{ - return __lhs + -__rhs; -} - -class year_month_day_last; - -class year_month_day -{ +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator==(const month_day_last& __lhs, const month_day_last& __rhs) noexcept +{ return __lhs.month() == __rhs.month(); } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator!=(const month_day_last& __lhs, const month_day_last& __rhs) noexcept +{ return !(__lhs == __rhs); } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator< (const month_day_last& __lhs, const month_day_last& __rhs) noexcept +{ return __lhs.month() < __rhs.month(); } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator> (const month_day_last& __lhs, const month_day_last& __rhs) noexcept +{ return __rhs < __lhs; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator<=(const month_day_last& __lhs, const month_day_last& __rhs) noexcept +{ return !(__rhs < __lhs);} + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator>=(const month_day_last& __lhs, const month_day_last& __rhs) noexcept +{ return !(__lhs < __rhs); } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +month_day_last operator/(const month& __lhs, last_spec) noexcept +{ return month_day_last{__lhs}; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +month_day_last operator/(last_spec, const month& __rhs) noexcept +{ return month_day_last{__rhs}; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +month_day_last operator/(int __lhs, last_spec) noexcept +{ return month_day_last{month(__lhs)}; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +month_day_last operator/(last_spec, int __rhs) noexcept +{ return month_day_last{month(__rhs)}; } + + +class month_weekday { private: - chrono::year __y; - chrono::month __m; - chrono::day __d; - + chrono::month __m; + chrono::weekday_indexed __wdi; public: - year_month_day() = default; - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day( - const chrono::year& __yval, const chrono::month& __mval, const chrono::day& __dval) noexcept - : __y{__yval} - , __m{__mval} - , __d{__dval} - {} - _LIBCUDACXX_INLINE_VISIBILITY constexpr year_month_day(const year_month_day_last& __ymdl) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day(const sys_days& __sysd) noexcept - : year_month_day(__from_days(__sysd.time_since_epoch())) - {} - _LIBCUDACXX_INLINE_VISIBILITY inline explicit constexpr year_month_day(const local_days& __locd) noexcept - : year_month_day(__from_days(__locd.time_since_epoch())) - {} - - _LIBCUDACXX_INLINE_VISIBILITY constexpr year_month_day& operator+=(const months& __dm) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY constexpr year_month_day& operator-=(const months& __dm) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY constexpr year_month_day& operator+=(const years& __dy) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY constexpr year_month_day& operator-=(const years& __dy) noexcept; - - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr chrono::year year() const noexcept - { - return __y; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr chrono::month month() const noexcept - { - return __m; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr chrono::day day() const noexcept - { - return __d; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr operator sys_days() const noexcept - { - return sys_days{__to_days()}; - } - _LIBCUDACXX_INLINE_VISIBILITY inline explicit constexpr operator local_days() const noexcept - { - return local_days{__to_days()}; - } - - _LIBCUDACXX_INLINE_VISIBILITY constexpr bool ok() const noexcept; - - _LIBCUDACXX_INLINE_VISIBILITY static constexpr year_month_day __from_days(days __d) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY constexpr days __to_days() const noexcept; + month_weekday() = default; + _LIBCUDACXX_INLINE_VISIBILITY + constexpr month_weekday(const chrono::month& __mval, const chrono::weekday_indexed& __wdival) noexcept + : __m{__mval}, __wdi{__wdival} {} + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr chrono::month month() const noexcept { return __m; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr chrono::weekday_indexed weekday_indexed() const noexcept { return __wdi; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr bool ok() const noexcept { return __m.ok() && __wdi.ok(); } }; -// https://howardhinnant.github.io/date_algorithms.html#civil_from_days -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day year_month_day::__from_days(days __d) noexcept -{ - static_assert(std::numeric_limits::digits >= 18, ""); - static_assert(std::numeric_limits::digits >= 20, ""); - const int __z = __d.count() + 719468; - const int __era = (__z >= 0 ? __z : __z - 146096) / 146097; - const unsigned __doe = static_cast(__z - __era * 146097); // [0, 146096] - const unsigned __yoe = (__doe - __doe / 1460 + __doe / 36524 - __doe / 146096) / 365; // [0, 399] - const int __yr = static_cast(__yoe) + __era * 400; - const unsigned __doy = __doe - (365 * __yoe + __yoe / 4 - __yoe / 100); // [0, 365] - const unsigned __mp = (5 * __doy + 2) / 153; // [0, 11] - const unsigned __dy = __doy - (153 * __mp + 2) / 5 + 1; // [1, 31] - const unsigned __mth = __mp + static_cast(__mp < 10 ? 3 : -9); // [1, 12] - return year_month_day{chrono::year{__yr + (__mth <= 2)}, chrono::month{__mth}, chrono::day{__dy}}; -} - -// https://howardhinnant.github.io/date_algorithms.html#days_from_civil -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr days year_month_day::__to_days() const noexcept -{ - static_assert(std::numeric_limits::digits >= 18, ""); - static_assert(std::numeric_limits::digits >= 20, ""); - - // nvcc doesn't allow ODR using constexpr globals. Therefore, - // make a temporary initialized from the global - auto constexpr __Feb = February; - const int __yr = static_cast(__y) - (__m <= __Feb); - const unsigned __mth = static_cast(__m); - const unsigned __dy = static_cast(__d); - - const int __era = (__yr >= 0 ? __yr : __yr - 399) / 400; - const unsigned __yoe = static_cast(__yr - __era * 400); // [0, 399] - const unsigned __doy = - static_cast((153 * (__mth + static_cast(__mth > 2 ? -3 : 9)) + 2) / 5 + __dy - 1); // [0, 365] - const unsigned __doe = __yoe * 365 + __yoe / 4 - __yoe / 100 + __doy; // [0, 146096] - return days{__era * 146097 + static_cast(__doe) - 719468}; -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator==(const year_month_day& __lhs, const year_month_day& __rhs) noexcept -{ - return __lhs.year() == __rhs.year() && __lhs.month() == __rhs.month() && __lhs.day() == __rhs.day(); -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator!=(const year_month_day& __lhs, const year_month_day& __rhs) noexcept -{ - return !(__lhs == __rhs); -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator<(const year_month_day& __lhs, const year_month_day& __rhs) noexcept -{ - if (__lhs.year() < __rhs.year()) - { - return true; - } - if (__lhs.year() > __rhs.year()) - { - return false; - } - if (__lhs.month() < __rhs.month()) - { - return true; - } - if (__lhs.month() > __rhs.month()) - { - return false; - } - return __lhs.day() < __rhs.day(); -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator>(const year_month_day& __lhs, const year_month_day& __rhs) noexcept -{ - return __rhs < __lhs; -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator<=(const year_month_day& __lhs, const year_month_day& __rhs) noexcept -{ - return !(__rhs < __lhs); -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator>=(const year_month_day& __lhs, const year_month_day& __rhs) noexcept -{ - return !(__lhs < __rhs); -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day -operator/(const year_month& __lhs, const day& __rhs) noexcept -{ - return year_month_day{__lhs.year(), __lhs.month(), __rhs}; -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day operator/(const year_month& __lhs, int __rhs) noexcept -{ - return __lhs / day(__rhs); -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day -operator/(const year& __lhs, const month_day& __rhs) noexcept -{ - return __lhs / __rhs.month() / __rhs.day(); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator==(const month_weekday& __lhs, const month_weekday& __rhs) noexcept +{ return __lhs.month() == __rhs.month() && __lhs.weekday_indexed() == __rhs.weekday_indexed(); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day operator/(int __lhs, const month_day& __rhs) noexcept -{ - return year(__lhs) / __rhs; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator!=(const month_weekday& __lhs, const month_weekday& __rhs) noexcept +{ return !(__lhs == __rhs); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day -operator/(const month_day& __lhs, const year& __rhs) noexcept -{ - return __rhs / __lhs; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +month_weekday operator/(const month& __lhs, const weekday_indexed& __rhs) noexcept +{ return month_weekday{__lhs, __rhs}; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day operator/(const month_day& __lhs, int __rhs) noexcept -{ - return year(__rhs) / __lhs; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +month_weekday operator/(int __lhs, const weekday_indexed& __rhs) noexcept +{ return month_weekday{month(__lhs), __rhs}; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day -operator+(const year_month_day& __lhs, const months& __rhs) noexcept -{ - return (__lhs.year() / __lhs.month() + __rhs) / __lhs.day(); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +month_weekday operator/(const weekday_indexed& __lhs, const month& __rhs) noexcept +{ return month_weekday{__rhs, __lhs}; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day -operator+(const months& __lhs, const year_month_day& __rhs) noexcept -{ - return __rhs + __lhs; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +month_weekday operator/(const weekday_indexed& __lhs, int __rhs) noexcept +{ return month_weekday{month(__rhs), __lhs}; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day -operator-(const year_month_day& __lhs, const months& __rhs) noexcept -{ - return __lhs + -__rhs; -} -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day -operator+(const year_month_day& __lhs, const years& __rhs) noexcept -{ - return (__lhs.year() + __rhs) / __lhs.month() / __lhs.day(); -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day -operator+(const years& __lhs, const year_month_day& __rhs) noexcept -{ - return __rhs + __lhs; -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day -operator-(const year_month_day& __lhs, const years& __rhs) noexcept -{ - return __lhs + -__rhs; -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day& year_month_day::operator+=(const months& __dm) noexcept -{ - *this = *this + __dm; - return *this; -} -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day& year_month_day::operator-=(const months& __dm) noexcept -{ - *this = *this - __dm; - return *this; -} -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day& year_month_day::operator+=(const years& __dy) noexcept -{ - *this = *this + __dy; - return *this; -} -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day& year_month_day::operator-=(const years& __dy) noexcept -{ - *this = *this - __dy; - return *this; -} - -class year_month_day_last -{ -private: - chrono::year __y; - chrono::month_day_last __mdl; - -public: - _LIBCUDACXX_INLINE_VISIBILITY constexpr year_month_day_last( - const year& __yval, const month_day_last& __mdlval) noexcept - : __y{__yval} - , __mdl{__mdlval} - {} - - _LIBCUDACXX_INLINE_VISIBILITY constexpr year_month_day_last& operator+=(const months& __m) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY constexpr year_month_day_last& operator-=(const months& __m) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY constexpr year_month_day_last& operator+=(const years& __y) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY constexpr year_month_day_last& operator-=(const years& __y) noexcept; - - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr chrono::year year() const noexcept - { - return __y; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr chrono::month month() const noexcept - { - return __mdl.month(); - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr chrono::month_day_last month_day_last() const noexcept - { - return __mdl; - } - _LIBCUDACXX_INLINE_VISIBILITY constexpr chrono::day day() const noexcept; - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr operator sys_days() const noexcept - { - return sys_days{year() / month() / day()}; - } - _LIBCUDACXX_INLINE_VISIBILITY inline explicit constexpr operator local_days() const noexcept - { - return local_days{year() / month() / day()}; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool ok() const noexcept - { - return __y.ok() && __mdl.ok(); - } +class month_weekday_last { + chrono::month __m; + chrono::weekday_last __wdl; + public: + _LIBCUDACXX_INLINE_VISIBILITY + constexpr month_weekday_last(const chrono::month& __mval, const chrono::weekday_last& __wdlval) noexcept + : __m{__mval}, __wdl{__wdlval} {} + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr chrono::month month() const noexcept { return __m; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr chrono::weekday_last weekday_last() const noexcept { return __wdl; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr bool ok() const noexcept { return __m.ok() && __wdl.ok(); } }; -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr chrono::day year_month_day_last::day() const noexcept -{ - constexpr chrono::day __d[] = { - chrono::day(31), - chrono::day(28), - chrono::day(31), - chrono::day(30), - chrono::day(31), - chrono::day(30), - chrono::day(31), - chrono::day(31), - chrono::day(30), - chrono::day(31), - chrono::day(30), - chrono::day(31)}; - - // nvcc doesn't allow ODR using constexpr globals. Therefore, - // make a temporary initialized from the global - auto constexpr __Feb = February; - return month() != __Feb || !__y.is_leap() ? __d[static_cast(month()) - 1] : chrono::day{29}; -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator==(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept -{ - return __lhs.year() == __rhs.year() && __lhs.month_day_last() == __rhs.month_day_last(); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator==(const month_weekday_last& __lhs, const month_weekday_last& __rhs) noexcept +{ return __lhs.month() == __rhs.month() && __lhs.weekday_last() == __rhs.weekday_last(); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator!=(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept -{ - return !(__lhs == __rhs); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator!=(const month_weekday_last& __lhs, const month_weekday_last& __rhs) noexcept +{ return !(__lhs == __rhs); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator<(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept -{ - if (__lhs.year() < __rhs.year()) - { - return true; - } - if (__lhs.year() > __rhs.year()) - { - return false; - } - return __lhs.month_day_last() < __rhs.month_day_last(); -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator>(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept -{ - return __rhs < __lhs; -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator<=(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept -{ - return !(__rhs < __lhs); -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator>=(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept -{ - return !(__lhs < __rhs); -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day_last operator/(const year_month& __lhs, last_spec) noexcept -{ - return year_month_day_last{__lhs.year(), month_day_last{__lhs.month()}}; -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day_last -operator/(const year& __lhs, const month_day_last& __rhs) noexcept -{ - return year_month_day_last{__lhs, __rhs}; -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day_last -operator/(int __lhs, const month_day_last& __rhs) noexcept -{ - return year_month_day_last{year{__lhs}, __rhs}; -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day_last -operator/(const month_day_last& __lhs, const year& __rhs) noexcept -{ - return __rhs / __lhs; -} - -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day_last -operator/(const month_day_last& __lhs, int __rhs) noexcept -{ - return year{__rhs} / __lhs; -} -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day_last -operator+(const year_month_day_last& __lhs, const months& __rhs) noexcept -{ - return (__lhs.year() / __lhs.month() + __rhs) / last; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +month_weekday_last operator/(const month& __lhs, const weekday_last& __rhs) noexcept +{ return month_weekday_last{__lhs, __rhs}; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day_last -operator+(const months& __lhs, const year_month_day_last& __rhs) noexcept -{ - return __rhs + __lhs; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +month_weekday_last operator/(int __lhs, const weekday_last& __rhs) noexcept +{ return month_weekday_last{month(__lhs), __rhs}; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day_last -operator-(const year_month_day_last& __lhs, const months& __rhs) noexcept -{ - return __lhs + (-__rhs); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +month_weekday_last operator/(const weekday_last& __lhs, const month& __rhs) noexcept +{ return month_weekday_last{__rhs, __lhs}; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day_last -operator+(const year_month_day_last& __lhs, const years& __rhs) noexcept -{ - return year_month_day_last{__lhs.year() + __rhs, __lhs.month_day_last()}; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +month_weekday_last operator/(const weekday_last& __lhs, int __rhs) noexcept +{ return month_weekday_last{month(__rhs), __lhs}; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day_last -operator+(const years& __lhs, const year_month_day_last& __rhs) noexcept -{ - return __rhs + __lhs; -} -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day_last -operator-(const year_month_day_last& __lhs, const years& __rhs) noexcept -{ - return __lhs + (-__rhs); -} +class year_month { + chrono::year __y; + chrono::month __m; +public: + year_month() = default; + _LIBCUDACXX_INLINE_VISIBILITY + constexpr year_month(const chrono::year& __yval, const chrono::month& __mval) noexcept + : __y{__yval}, __m{__mval} {} + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr chrono::year year() const noexcept { return __y; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr chrono::month month() const noexcept { return __m; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr year_month& operator+=(const months& __dm) noexcept { this->__m += __dm; return *this; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr year_month& operator-=(const months& __dm) noexcept { this->__m -= __dm; return *this; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr year_month& operator+=(const years& __dy) noexcept { this->__y += __dy; return *this; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr year_month& operator-=(const years& __dy) noexcept { this->__y -= __dy; return *this; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr bool ok() const noexcept { return __y.ok() && __m.ok(); } +}; -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day_last& -year_month_day_last::operator+=(const months& __dm) noexcept -{ - *this = *this + __dm; - return *this; -} -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day_last& -year_month_day_last::operator-=(const months& __dm) noexcept -{ - *this = *this - __dm; - return *this; -} -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day_last& -year_month_day_last::operator+=(const years& __dy) noexcept -{ - *this = *this + __dy; - return *this; -} -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day_last& -year_month_day_last::operator-=(const years& __dy) noexcept -{ - *this = *this - __dy; - return *this; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month operator/(const year& __y, const month& __m) noexcept { return year_month{__y, __m}; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_day::year_month_day(const year_month_day_last& __ymdl) noexcept - : __y{__ymdl.year()} - , __m{__ymdl.month()} - , __d{__ymdl.day()} -{} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month operator/(const year& __y, int __m) noexcept { return year_month{__y, month(__m)}; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool year_month_day::ok() const noexcept -{ - if (!__y.ok() || !__m.ok()) - { - return false; - } - return chrono::day{1} <= __d && __d <= (__y / __m / last).day(); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator==(const year_month& __lhs, const year_month& __rhs) noexcept +{ return __lhs.year() == __rhs.year() && __lhs.month() == __rhs.month(); } -class year_month_weekday -{ - chrono::year __y; - chrono::month __m; - chrono::weekday_indexed __wdi; +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator!=(const year_month& __lhs, const year_month& __rhs) noexcept +{ return !(__lhs == __rhs); } -public: - year_month_weekday() = default; - _LIBCUDACXX_INLINE_VISIBILITY constexpr year_month_weekday( - const chrono::year& __yval, const chrono::month& __mval, const chrono::weekday_indexed& __wdival) noexcept - : __y{__yval} - , __m{__mval} - , __wdi{__wdival} - {} - _LIBCUDACXX_INLINE_VISIBILITY constexpr year_month_weekday(const sys_days& __sysd) noexcept - : year_month_weekday(__from_days(__sysd.time_since_epoch())) - {} - _LIBCUDACXX_INLINE_VISIBILITY inline explicit constexpr year_month_weekday(const local_days& __locd) noexcept - : year_month_weekday(__from_days(__locd.time_since_epoch())) - {} - _LIBCUDACXX_INLINE_VISIBILITY constexpr year_month_weekday& operator+=(const months& m) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY constexpr year_month_weekday& operator-=(const months& m) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY constexpr year_month_weekday& operator+=(const years& y) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY constexpr year_month_weekday& operator-=(const years& y) noexcept; - - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr chrono::year year() const noexcept - { - return __y; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr chrono::month month() const noexcept - { - return __m; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr chrono::weekday weekday() const noexcept - { - return __wdi.weekday(); - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr unsigned index() const noexcept - { - return __wdi.index(); - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr chrono::weekday_indexed weekday_indexed() const noexcept - { - return __wdi; - } +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator< (const year_month& __lhs, const year_month& __rhs) noexcept +{ return __lhs.year() != __rhs.year() ? __lhs.year() < __rhs.year() : __lhs.month() < __rhs.month(); } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr operator sys_days() const noexcept - { - return sys_days{__to_days()}; - } - _LIBCUDACXX_INLINE_VISIBILITY inline explicit constexpr operator local_days() const noexcept - { - return local_days{__to_days()}; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool ok() const noexcept - { - if (!__y.ok() || !__m.ok() || !__wdi.ok()) - { - return false; - } - // TODO: make sure it's a valid date - return true; - } +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator> (const year_month& __lhs, const year_month& __rhs) noexcept +{ return __rhs < __lhs; } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr year_month_weekday __from_days(days __d) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY constexpr days __to_days() const noexcept; -}; +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator<=(const year_month& __lhs, const year_month& __rhs) noexcept +{ return !(__rhs < __lhs);} -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday year_month_weekday::__from_days(days __d) noexcept -{ - const sys_days __sysd{__d}; - const chrono::weekday __wd = chrono::weekday(__sysd); - const year_month_day __ymd = year_month_day(__sysd); - return year_month_weekday{__ymd.year(), __ymd.month(), __wd[(static_cast(__ymd.day()) - 1) / 7 + 1]}; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator>=(const year_month& __lhs, const year_month& __rhs) noexcept +{ return !(__lhs < __rhs); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr days year_month_weekday::__to_days() const noexcept +_LIBCUDACXX_INLINE_VISIBILITY +constexpr year_month operator+(const year_month& __lhs, const months& __rhs) noexcept { - const sys_days __sysd = sys_days(__y / __m / 1); - return (__sysd + (__wdi.weekday() - chrono::weekday(__sysd) + days{(__wdi.index() - 1) * 7})).time_since_epoch(); + int __dmi = static_cast(static_cast(__lhs.month())) - 1 + __rhs.count(); + const int __dy = (__dmi >= 0 ? __dmi : __dmi-11) / 12; + __dmi = __dmi - __dy * 12 + 1; + return (__lhs.year() + years(__dy)) / month(static_cast(__dmi)); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator==(const year_month_weekday& __lhs, const year_month_weekday& __rhs) noexcept -{ - return __lhs.year() == __rhs.year() && __lhs.month() == __rhs.month() - && __lhs.weekday_indexed() == __rhs.weekday_indexed(); -} +_LIBCUDACXX_INLINE_VISIBILITY +constexpr year_month operator+(const months& __lhs, const year_month& __rhs) noexcept +{ return __rhs + __lhs; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator!=(const year_month_weekday& __lhs, const year_month_weekday& __rhs) noexcept -{ - return !(__lhs == __rhs); -} +_LIBCUDACXX_INLINE_VISIBILITY +constexpr year_month operator+(const year_month& __lhs, const years& __rhs) noexcept +{ return (__lhs.year() + __rhs) / __lhs.month(); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday -operator/(const year_month& __lhs, const weekday_indexed& __rhs) noexcept -{ - return year_month_weekday{__lhs.year(), __lhs.month(), __rhs}; -} +_LIBCUDACXX_INLINE_VISIBILITY +constexpr year_month operator+(const years& __lhs, const year_month& __rhs) noexcept +{ return __rhs + __lhs; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday -operator/(const year& __lhs, const month_weekday& __rhs) noexcept -{ - return year_month_weekday{__lhs, __rhs.month(), __rhs.weekday_indexed()}; -} +_LIBCUDACXX_INLINE_VISIBILITY +constexpr months operator-(const year_month& __lhs, const year_month& __rhs) noexcept +{ return (__lhs.year() - __rhs.year()) + months(static_cast(__lhs.month()) - static_cast(__rhs.month())); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday -operator/(int __lhs, const month_weekday& __rhs) noexcept -{ - return year(__lhs) / __rhs; -} +_LIBCUDACXX_INLINE_VISIBILITY +constexpr year_month operator-(const year_month& __lhs, const months& __rhs) noexcept +{ return __lhs + -__rhs; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday -operator/(const month_weekday& __lhs, const year& __rhs) noexcept -{ - return __rhs / __lhs; -} +_LIBCUDACXX_INLINE_VISIBILITY +constexpr year_month operator-(const year_month& __lhs, const years& __rhs) noexcept +{ return __lhs + -__rhs; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday -operator/(const month_weekday& __lhs, int __rhs) noexcept -{ - return year(__rhs) / __lhs; -} +class year_month_day_last; -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday -operator+(const year_month_weekday& __lhs, const months& __rhs) noexcept -{ - return (__lhs.year() / __lhs.month() + __rhs) / __lhs.weekday_indexed(); -} +class year_month_day { +private: + chrono::year __y; + chrono::month __m; + chrono::day __d; +public: + year_month_day() = default; + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr year_month_day( + const chrono::year& __yval, const chrono::month& __mval, const chrono::day& __dval) noexcept + : __y{__yval}, __m{__mval}, __d{__dval} {} + _LIBCUDACXX_INLINE_VISIBILITY + constexpr year_month_day(const year_month_day_last& __ymdl) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr year_month_day(const sys_days& __sysd) noexcept + : year_month_day(__from_days(__sysd.time_since_epoch())) {} + _LIBCUDACXX_INLINE_VISIBILITY + inline explicit constexpr year_month_day(const local_days& __locd) noexcept + : year_month_day(__from_days(__locd.time_since_epoch())) {} + + _LIBCUDACXX_INLINE_VISIBILITY + constexpr year_month_day& operator+=(const months& __dm) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + constexpr year_month_day& operator-=(const months& __dm) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + constexpr year_month_day& operator+=(const years& __dy) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + constexpr year_month_day& operator-=(const years& __dy) noexcept; + + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr chrono::year year() const noexcept { return __y; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr chrono::month month() const noexcept { return __m; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr chrono::day day() const noexcept { return __d; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr operator sys_days() const noexcept { return sys_days{__to_days()}; } + _LIBCUDACXX_INLINE_VISIBILITY + inline explicit constexpr operator local_days() const noexcept { return local_days{__to_days()}; } + + _LIBCUDACXX_INLINE_VISIBILITY + constexpr bool ok() const noexcept; + + _LIBCUDACXX_INLINE_VISIBILITY + static constexpr year_month_day __from_days(days __d) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + constexpr days __to_days() const noexcept; +}; -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday -operator+(const months& __lhs, const year_month_weekday& __rhs) noexcept -{ - return __rhs + __lhs; -} -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday -operator-(const year_month_weekday& __lhs, const months& __rhs) noexcept -{ - return __lhs + (-__rhs); +// https://howardhinnant.github.io/date_algorithms.html#civil_from_days +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_day +year_month_day::__from_days(days __d) noexcept +{ + static_assert(std::numeric_limits::digits >= 18, ""); + static_assert(std::numeric_limits::digits >= 20 , ""); + const int __z = __d.count() + 719468; + const int __era = (__z >= 0 ? __z : __z - 146096) / 146097; + const unsigned __doe = static_cast(__z - __era * 146097); // [0, 146096] + const unsigned __yoe = (__doe - __doe/1460 + __doe/36524 - __doe/146096) / 365; // [0, 399] + const int __yr = static_cast(__yoe) + __era * 400; + const unsigned __doy = __doe - (365 * __yoe + __yoe/4 - __yoe/100); // [0, 365] + const unsigned __mp = (5 * __doy + 2)/153; // [0, 11] + const unsigned __dy = __doy - (153 * __mp + 2)/5 + 1; // [1, 31] + const unsigned __mth = __mp + static_cast(__mp < 10 ? 3 : -9); // [1, 12] + return year_month_day{chrono::year{__yr + (__mth <= 2)}, chrono::month{__mth}, chrono::day{__dy}}; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday -operator+(const year_month_weekday& __lhs, const years& __rhs) noexcept -{ - return year_month_weekday{__lhs.year() + __rhs, __lhs.month(), __lhs.weekday_indexed()}; -} +// https://howardhinnant.github.io/date_algorithms.html#days_from_civil +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr days year_month_day::__to_days() const noexcept +{ + static_assert(std::numeric_limits::digits >= 18, ""); + static_assert(std::numeric_limits::digits >= 20 , ""); + + // nvcc doesn't allow ODR using constexpr globals. Therefore, + // make a temporary initialized from the global + auto constexpr __Feb = February; + const int __yr = static_cast(__y) - (__m <= __Feb); + const unsigned __mth = static_cast(__m); + const unsigned __dy = static_cast(__d); + + const int __era = (__yr >= 0 ? __yr : __yr - 399) / 400; + const unsigned __yoe = static_cast(__yr - __era * 400); // [0, 399] + const unsigned __doy = static_cast( + (153 * (__mth + static_cast(__mth > 2 ? -3 : 9)) + 2) / 5 + __dy-1); // [0, 365] + const unsigned __doe = __yoe * 365 + __yoe/4 - __yoe/100 + __doy; // [0, 146096] + return days{__era * 146097 + static_cast(__doe) - 719468}; +} + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator==(const year_month_day& __lhs, const year_month_day& __rhs) noexcept +{ return __lhs.year() == __rhs.year() && __lhs.month() == __rhs.month() && __lhs.day() == __rhs.day(); } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator!=(const year_month_day& __lhs, const year_month_day& __rhs) noexcept +{ return !(__lhs == __rhs); } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator< (const year_month_day& __lhs, const year_month_day& __rhs) noexcept +{ + if (__lhs.year() < __rhs.year()) return true; + if (__lhs.year() > __rhs.year()) return false; + if (__lhs.month() < __rhs.month()) return true; + if (__lhs.month() > __rhs.month()) return false; + return __lhs.day() < __rhs.day(); +} + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator> (const year_month_day& __lhs, const year_month_day& __rhs) noexcept +{ return __rhs < __lhs; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator<=(const year_month_day& __lhs, const year_month_day& __rhs) noexcept +{ return !(__rhs < __lhs);} + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator>=(const year_month_day& __lhs, const year_month_day& __rhs) noexcept +{ return !(__lhs < __rhs); } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_day operator/(const year_month& __lhs, const day& __rhs) noexcept +{ return year_month_day{__lhs.year(), __lhs.month(), __rhs}; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_day operator/(const year_month& __lhs, int __rhs) noexcept +{ return __lhs / day(__rhs); } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_day operator/(const year& __lhs, const month_day& __rhs) noexcept +{ return __lhs / __rhs.month() / __rhs.day(); } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_day operator/(int __lhs, const month_day& __rhs) noexcept +{ return year(__lhs) / __rhs; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_day operator/(const month_day& __lhs, const year& __rhs) noexcept +{ return __rhs / __lhs; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_day operator/(const month_day& __lhs, int __rhs) noexcept +{ return year(__rhs) / __lhs; } + + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_day operator+(const year_month_day& __lhs, const months& __rhs) noexcept +{ return (__lhs.year()/__lhs.month() + __rhs)/__lhs.day(); } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_day operator+(const months& __lhs, const year_month_day& __rhs) noexcept +{ return __rhs + __lhs; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_day operator-(const year_month_day& __lhs, const months& __rhs) noexcept +{ return __lhs + -__rhs; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_day operator+(const year_month_day& __lhs, const years& __rhs) noexcept +{ return (__lhs.year() + __rhs) / __lhs.month() / __lhs.day(); } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_day operator+(const years& __lhs, const year_month_day& __rhs) noexcept +{ return __rhs + __lhs; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_day operator-(const year_month_day& __lhs, const years& __rhs) noexcept +{ return __lhs + -__rhs; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr year_month_day& year_month_day::operator+=(const months& __dm) noexcept { *this = *this + __dm; return *this; } +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr year_month_day& year_month_day::operator-=(const months& __dm) noexcept { *this = *this - __dm; return *this; } +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr year_month_day& year_month_day::operator+=(const years& __dy) noexcept { *this = *this + __dy; return *this; } +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr year_month_day& year_month_day::operator-=(const years& __dy) noexcept { *this = *this - __dy; return *this; } + +class year_month_day_last { +private: + chrono::year __y; + chrono::month_day_last __mdl; +public: + _LIBCUDACXX_INLINE_VISIBILITY + constexpr year_month_day_last(const year& __yval, const month_day_last& __mdlval) noexcept + : __y{__yval}, __mdl{__mdlval} {} + + _LIBCUDACXX_INLINE_VISIBILITY + constexpr year_month_day_last& operator+=(const months& __m) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + constexpr year_month_day_last& operator-=(const months& __m) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + constexpr year_month_day_last& operator+=(const years& __y) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + constexpr year_month_day_last& operator-=(const years& __y) noexcept; + + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr chrono::year year() const noexcept { return __y; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr chrono::month month() const noexcept { return __mdl.month(); } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr chrono::month_day_last month_day_last() const noexcept { return __mdl; } + _LIBCUDACXX_INLINE_VISIBILITY + constexpr chrono::day day() const noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr operator sys_days() const noexcept { return sys_days{year()/month()/day()}; } + _LIBCUDACXX_INLINE_VISIBILITY + inline explicit constexpr operator local_days() const noexcept { return local_days{year()/month()/day()}; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr bool ok() const noexcept { return __y.ok() && __mdl.ok(); } +}; -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday -operator+(const years& __lhs, const year_month_weekday& __rhs) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +chrono::day year_month_day_last::day() const noexcept { - return __rhs + __lhs; -} + constexpr chrono::day __d[] = + { + chrono::day(31), chrono::day(28), chrono::day(31), + chrono::day(30), chrono::day(31), chrono::day(30), + chrono::day(31), chrono::day(31), chrono::day(30), + chrono::day(31), chrono::day(30), chrono::day(31) + }; -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday -operator-(const year_month_weekday& __lhs, const years& __rhs) noexcept -{ - return __lhs + (-__rhs); -} + // nvcc doesn't allow ODR using constexpr globals. Therefore, + // make a temporary initialized from the global + auto constexpr __Feb = February; + return month() != __Feb || !__y.is_leap() ? + __d[static_cast(month()) - 1] : chrono::day{29}; +} + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator==(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept +{ return __lhs.year() == __rhs.year() && __lhs.month_day_last() == __rhs.month_day_last(); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday& -year_month_weekday::operator+=(const months& __dm) noexcept -{ - *this = *this + __dm; - return *this; -} -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday& -year_month_weekday::operator-=(const months& __dm) noexcept -{ - *this = *this - __dm; - return *this; -} -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday& -year_month_weekday::operator+=(const years& __dy) noexcept -{ - *this = *this + __dy; - return *this; -} -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday& -year_month_weekday::operator-=(const years& __dy) noexcept -{ - *this = *this - __dy; - return *this; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator!=(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept +{ return !(__lhs == __rhs); } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator< (const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept +{ + if (__lhs.year() < __rhs.year()) return true; + if (__lhs.year() > __rhs.year()) return false; + return __lhs.month_day_last() < __rhs.month_day_last(); +} + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator> (const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept +{ return __rhs < __lhs; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator<=(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept +{ return !(__rhs < __lhs);} + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator>=(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept +{ return !(__lhs < __rhs); } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr year_month_day_last operator/(const year_month& __lhs, last_spec) noexcept +{ return year_month_day_last{__lhs.year(), month_day_last{__lhs.month()}}; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr year_month_day_last operator/(const year& __lhs, const month_day_last& __rhs) noexcept +{ return year_month_day_last{__lhs, __rhs}; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr year_month_day_last operator/(int __lhs, const month_day_last& __rhs) noexcept +{ return year_month_day_last{year{__lhs}, __rhs}; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr year_month_day_last operator/(const month_day_last& __lhs, const year& __rhs) noexcept +{ return __rhs / __lhs; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr year_month_day_last operator/(const month_day_last& __lhs, int __rhs) noexcept +{ return year{__rhs} / __lhs; } + + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_day_last operator+(const year_month_day_last& __lhs, const months& __rhs) noexcept +{ return (__lhs.year() / __lhs.month() + __rhs) / last; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_day_last operator+(const months& __lhs, const year_month_day_last& __rhs) noexcept +{ return __rhs + __lhs; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_day_last operator-(const year_month_day_last& __lhs, const months& __rhs) noexcept +{ return __lhs + (-__rhs); } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_day_last operator+(const year_month_day_last& __lhs, const years& __rhs) noexcept +{ return year_month_day_last{__lhs.year() + __rhs, __lhs.month_day_last()}; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_day_last operator+(const years& __lhs, const year_month_day_last& __rhs) noexcept +{ return __rhs + __lhs; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_day_last operator-(const year_month_day_last& __lhs, const years& __rhs) noexcept +{ return __lhs + (-__rhs); } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr year_month_day_last& year_month_day_last::operator+=(const months& __dm) noexcept { *this = *this + __dm; return *this; } +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr year_month_day_last& year_month_day_last::operator-=(const months& __dm) noexcept { *this = *this - __dm; return *this; } +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr year_month_day_last& year_month_day_last::operator+=(const years& __dy) noexcept { *this = *this + __dy; return *this; } +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr year_month_day_last& year_month_day_last::operator-=(const years& __dy) noexcept { *this = *this - __dy; return *this; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr year_month_day::year_month_day(const year_month_day_last& __ymdl) noexcept + : __y{__ymdl.year()}, __m{__ymdl.month()}, __d{__ymdl.day()} {} + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr bool year_month_day::ok() const noexcept +{ + if (!__y.ok() || !__m.ok()) return false; + return chrono::day{1} <= __d && __d <= (__y / __m / last).day(); +} + +class year_month_weekday { + chrono::year __y; + chrono::month __m; + chrono::weekday_indexed __wdi; +public: + year_month_weekday() = default; + _LIBCUDACXX_INLINE_VISIBILITY + constexpr year_month_weekday(const chrono::year& __yval, const chrono::month& __mval, + const chrono::weekday_indexed& __wdival) noexcept + : __y{__yval}, __m{__mval}, __wdi{__wdival} {} + _LIBCUDACXX_INLINE_VISIBILITY + constexpr year_month_weekday(const sys_days& __sysd) noexcept + : year_month_weekday(__from_days(__sysd.time_since_epoch())) {} + _LIBCUDACXX_INLINE_VISIBILITY + inline explicit constexpr year_month_weekday(const local_days& __locd) noexcept + : year_month_weekday(__from_days(__locd.time_since_epoch())) {} + _LIBCUDACXX_INLINE_VISIBILITY + constexpr year_month_weekday& operator+=(const months& m) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + constexpr year_month_weekday& operator-=(const months& m) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + constexpr year_month_weekday& operator+=(const years& y) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + constexpr year_month_weekday& operator-=(const years& y) noexcept; + + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr chrono::year year() const noexcept { return __y; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr chrono::month month() const noexcept { return __m; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr chrono::weekday weekday() const noexcept { return __wdi.weekday(); } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr unsigned index() const noexcept { return __wdi.index(); } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr chrono::weekday_indexed weekday_indexed() const noexcept { return __wdi; } + + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr operator sys_days() const noexcept { return sys_days{__to_days()}; } + _LIBCUDACXX_INLINE_VISIBILITY + inline explicit constexpr operator local_days() const noexcept { return local_days{__to_days()}; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr bool ok() const noexcept + { + if (!__y.ok() || !__m.ok() || !__wdi.ok()) return false; + // TODO: make sure it's a valid date + return true; + } -class year_month_weekday_last -{ -private: - chrono::year __y; - chrono::month __m; - chrono::weekday_last __wdl; + _LIBCUDACXX_INLINE_VISIBILITY + static constexpr year_month_weekday __from_days(days __d) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + constexpr days __to_days() const noexcept; +}; +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_weekday year_month_weekday::__from_days(days __d) noexcept +{ + const sys_days __sysd{__d}; + const chrono::weekday __wd = chrono::weekday(__sysd); + const year_month_day __ymd = year_month_day(__sysd); + return year_month_weekday{__ymd.year(), __ymd.month(), + __wd[(static_cast(__ymd.day())-1)/7+1]}; +} + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +days year_month_weekday::__to_days() const noexcept +{ + const sys_days __sysd = sys_days(__y/__m/1); + return (__sysd + (__wdi.weekday() - chrono::weekday(__sysd) + days{(__wdi.index()-1)*7})) + .time_since_epoch(); +} + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator==(const year_month_weekday& __lhs, const year_month_weekday& __rhs) noexcept +{ return __lhs.year() == __rhs.year() && __lhs.month() == __rhs.month() && __lhs.weekday_indexed() == __rhs.weekday_indexed(); } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator!=(const year_month_weekday& __lhs, const year_month_weekday& __rhs) noexcept +{ return !(__lhs == __rhs); } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_weekday operator/(const year_month& __lhs, const weekday_indexed& __rhs) noexcept +{ return year_month_weekday{__lhs.year(), __lhs.month(), __rhs}; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_weekday operator/(const year& __lhs, const month_weekday& __rhs) noexcept +{ return year_month_weekday{__lhs, __rhs.month(), __rhs.weekday_indexed()}; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_weekday operator/(int __lhs, const month_weekday& __rhs) noexcept +{ return year(__lhs) / __rhs; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_weekday operator/(const month_weekday& __lhs, const year& __rhs) noexcept +{ return __rhs / __lhs; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_weekday operator/(const month_weekday& __lhs, int __rhs) noexcept +{ return year(__rhs) / __lhs; } + + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_weekday operator+(const year_month_weekday& __lhs, const months& __rhs) noexcept +{ return (__lhs.year() / __lhs.month() + __rhs) / __lhs.weekday_indexed(); } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_weekday operator+(const months& __lhs, const year_month_weekday& __rhs) noexcept +{ return __rhs + __lhs; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_weekday operator-(const year_month_weekday& __lhs, const months& __rhs) noexcept +{ return __lhs + (-__rhs); } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_weekday operator+(const year_month_weekday& __lhs, const years& __rhs) noexcept +{ return year_month_weekday{__lhs.year() + __rhs, __lhs.month(), __lhs.weekday_indexed()}; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_weekday operator+(const years& __lhs, const year_month_weekday& __rhs) noexcept +{ return __rhs + __lhs; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_weekday operator-(const year_month_weekday& __lhs, const years& __rhs) noexcept +{ return __lhs + (-__rhs); } + + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr year_month_weekday& year_month_weekday::operator+=(const months& __dm) noexcept { *this = *this + __dm; return *this; } +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr year_month_weekday& year_month_weekday::operator-=(const months& __dm) noexcept { *this = *this - __dm; return *this; } +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr year_month_weekday& year_month_weekday::operator+=(const years& __dy) noexcept { *this = *this + __dy; return *this; } +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr year_month_weekday& year_month_weekday::operator-=(const years& __dy) noexcept { *this = *this - __dy; return *this; } + +class year_month_weekday_last { +private: + chrono::year __y; + chrono::month __m; + chrono::weekday_last __wdl; public: - _LIBCUDACXX_INLINE_VISIBILITY constexpr year_month_weekday_last( - const chrono::year& __yval, const chrono::month& __mval, const chrono::weekday_last& __wdlval) noexcept - : __y{__yval} - , __m{__mval} - , __wdl{__wdlval} - {} - _LIBCUDACXX_INLINE_VISIBILITY constexpr year_month_weekday_last& operator+=(const months& __dm) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY constexpr year_month_weekday_last& operator-=(const months& __dm) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY constexpr year_month_weekday_last& operator+=(const years& __dy) noexcept; - _LIBCUDACXX_INLINE_VISIBILITY constexpr year_month_weekday_last& operator-=(const years& __dy) noexcept; - - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr chrono::year year() const noexcept - { - return __y; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr chrono::month month() const noexcept - { - return __m; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr chrono::weekday weekday() const noexcept - { - return __wdl.weekday(); - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr chrono::weekday_last weekday_last() const noexcept - { - return __wdl; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr operator sys_days() const noexcept - { - return sys_days{__to_days()}; - } - _LIBCUDACXX_INLINE_VISIBILITY inline explicit constexpr operator local_days() const noexcept - { - return local_days{__to_days()}; - } - _LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool ok() const noexcept - { - return __y.ok() && __m.ok() && __wdl.ok(); - } + _LIBCUDACXX_INLINE_VISIBILITY + constexpr year_month_weekday_last(const chrono::year& __yval, const chrono::month& __mval, + const chrono::weekday_last& __wdlval) noexcept + : __y{__yval}, __m{__mval}, __wdl{__wdlval} {} + _LIBCUDACXX_INLINE_VISIBILITY + constexpr year_month_weekday_last& operator+=(const months& __dm) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + constexpr year_month_weekday_last& operator-=(const months& __dm) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + constexpr year_month_weekday_last& operator+=(const years& __dy) noexcept; + _LIBCUDACXX_INLINE_VISIBILITY + constexpr year_month_weekday_last& operator-=(const years& __dy) noexcept; + + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr chrono::year year() const noexcept { return __y; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr chrono::month month() const noexcept { return __m; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr chrono::weekday weekday() const noexcept { return __wdl.weekday(); } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr chrono::weekday_last weekday_last() const noexcept { return __wdl; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr operator sys_days() const noexcept { return sys_days{__to_days()}; } + _LIBCUDACXX_INLINE_VISIBILITY + inline explicit constexpr operator local_days() const noexcept { return local_days{__to_days()}; } + _LIBCUDACXX_INLINE_VISIBILITY + inline constexpr bool ok() const noexcept { return __y.ok() && __m.ok() && __wdl.ok(); } + + _LIBCUDACXX_INLINE_VISIBILITY + constexpr days __to_days() const noexcept; - _LIBCUDACXX_INLINE_VISIBILITY constexpr days __to_days() const noexcept; }; -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr days year_month_weekday_last::__to_days() const noexcept +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +days year_month_weekday_last::__to_days() const noexcept { - const sys_days __last = sys_days{__y / __m / last}; - return (__last - (chrono::weekday{__last} - __wdl.weekday())).time_since_epoch(); -} + const sys_days __last = sys_days{__y/__m/last}; + return (__last - (chrono::weekday{__last} - __wdl.weekday())).time_since_epoch(); -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator==(const year_month_weekday_last& __lhs, const year_month_weekday_last& __rhs) noexcept -{ - return __lhs.year() == __rhs.year() && __lhs.month() == __rhs.month() && __lhs.weekday_last() == __rhs.weekday_last(); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr bool -operator!=(const year_month_weekday_last& __lhs, const year_month_weekday_last& __rhs) noexcept -{ - return !(__lhs == __rhs); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator==(const year_month_weekday_last& __lhs, const year_month_weekday_last& __rhs) noexcept +{ return __lhs.year() == __rhs.year() && __lhs.month() == __rhs.month() && __lhs.weekday_last() == __rhs.weekday_last(); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday_last -operator/(const year_month& __lhs, const weekday_last& __rhs) noexcept -{ - return year_month_weekday_last{__lhs.year(), __lhs.month(), __rhs}; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +bool operator!=(const year_month_weekday_last& __lhs, const year_month_weekday_last& __rhs) noexcept +{ return !(__lhs == __rhs); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday_last -operator/(const year& __lhs, const month_weekday_last& __rhs) noexcept -{ - return year_month_weekday_last{__lhs, __rhs.month(), __rhs.weekday_last()}; -} -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday_last -operator/(int __lhs, const month_weekday_last& __rhs) noexcept -{ - return year(__lhs) / __rhs; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_weekday_last operator/(const year_month& __lhs, const weekday_last& __rhs) noexcept +{ return year_month_weekday_last{__lhs.year(), __lhs.month(), __rhs}; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday_last -operator/(const month_weekday_last& __lhs, const year& __rhs) noexcept -{ - return __rhs / __lhs; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_weekday_last operator/(const year& __lhs, const month_weekday_last& __rhs) noexcept +{ return year_month_weekday_last{__lhs, __rhs.month(), __rhs.weekday_last()}; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday_last -operator/(const month_weekday_last& __lhs, int __rhs) noexcept -{ - return year(__rhs) / __lhs; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_weekday_last operator/(int __lhs, const month_weekday_last& __rhs) noexcept +{ return year(__lhs) / __rhs; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday_last -operator+(const year_month_weekday_last& __lhs, const months& __rhs) noexcept -{ - return (__lhs.year() / __lhs.month() + __rhs) / __lhs.weekday_last(); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_weekday_last operator/(const month_weekday_last& __lhs, const year& __rhs) noexcept +{ return __rhs / __lhs; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday_last -operator+(const months& __lhs, const year_month_weekday_last& __rhs) noexcept -{ - return __rhs + __lhs; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_weekday_last operator/(const month_weekday_last& __lhs, int __rhs) noexcept +{ return year(__rhs) / __lhs; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday_last -operator-(const year_month_weekday_last& __lhs, const months& __rhs) noexcept -{ - return __lhs + (-__rhs); -} -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday_last -operator+(const year_month_weekday_last& __lhs, const years& __rhs) noexcept -{ - return year_month_weekday_last{__lhs.year() + __rhs, __lhs.month(), __lhs.weekday_last()}; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_weekday_last operator+(const year_month_weekday_last& __lhs, const months& __rhs) noexcept +{ return (__lhs.year() / __lhs.month() + __rhs) / __lhs.weekday_last(); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday_last -operator+(const years& __lhs, const year_month_weekday_last& __rhs) noexcept -{ - return __rhs + __lhs; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_weekday_last operator+(const months& __lhs, const year_month_weekday_last& __rhs) noexcept +{ return __rhs + __lhs; } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday_last -operator-(const year_month_weekday_last& __lhs, const years& __rhs) noexcept -{ - return __lhs + (-__rhs); -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_weekday_last operator-(const year_month_weekday_last& __lhs, const months& __rhs) noexcept +{ return __lhs + (-__rhs); } -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday_last& -year_month_weekday_last::operator+=(const months& __dm) noexcept -{ - *this = *this + __dm; - return *this; -} -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday_last& -year_month_weekday_last::operator-=(const months& __dm) noexcept -{ - *this = *this - __dm; - return *this; -} -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday_last& -year_month_weekday_last::operator+=(const years& __dy) noexcept -{ - *this = *this + __dy; - return *this; -} -_LIBCUDACXX_INLINE_VISIBILITY inline constexpr year_month_weekday_last& -year_month_weekday_last::operator-=(const years& __dy) noexcept -{ - *this = *this - __dy; - return *this; -} +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_weekday_last operator+(const year_month_weekday_last& __lhs, const years& __rhs) noexcept +{ return year_month_weekday_last{__lhs.year() + __rhs, __lhs.month(), __lhs.weekday_last()}; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_weekday_last operator+(const years& __lhs, const year_month_weekday_last& __rhs) noexcept +{ return __rhs + __lhs; } + +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr +year_month_weekday_last operator-(const year_month_weekday_last& __lhs, const years& __rhs) noexcept +{ return __lhs + (-__rhs); } -_CCCL_NODISCARD inline _LIBCUDACXX_INLINE_VISIBILITY constexpr unsigned -__hh_mm_ss_width(uint64_t __n, uint64_t __d = 10, unsigned __w = 0) +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr year_month_weekday_last& year_month_weekday_last::operator+=(const months& __dm) noexcept { *this = *this + __dm; return *this; } +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr year_month_weekday_last& year_month_weekday_last::operator-=(const months& __dm) noexcept { *this = *this - __dm; return *this; } +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr year_month_weekday_last& year_month_weekday_last::operator+=(const years& __dy) noexcept { *this = *this + __dy; return *this; } +_LIBCUDACXX_INLINE_VISIBILITY +inline constexpr year_month_weekday_last& year_month_weekday_last::operator-=(const years& __dy) noexcept { *this = *this - __dy; return *this; } + +_CCCL_NODISCARD inline _LIBCUDACXX_INLINE_VISIBILITY +constexpr unsigned __hh_mm_ss_width(uint64_t __n, uint64_t __d = 10, unsigned __w = 0) { - if (__n >= 2 && __d != 0 && __w < 19) - { - return 1 + __hh_mm_ss_width(__n, __d % __n * 10, __w + 1); - } - return 0; + if (__n >= 2 && __d != 0 && __w < 19) + return 1 + __hh_mm_ss_width(__n, __d % __n * 10, __w+1); + return 0; } template class hh_mm_ss { private: - static_assert(__is_duration<_Duration>::value, "template parameter of hh_mm_ss must be a std::chrono::duration"); - using __CommonType = typename common_type<_Duration, chrono::seconds>::type; + static_assert(__is_duration<_Duration>::value, "template parameter of hh_mm_ss must be a std::chrono::duration"); + using __CommonType = typename common_type<_Duration, chrono::seconds>::type; - _LIBCUDACXX_INLINE_VISIBILITY static constexpr uint64_t __pow10(unsigned __exp) - { - uint64_t __ret = 1; - for (unsigned __i = 0; __i < __exp; ++__i) + _LIBCUDACXX_INLINE_VISIBILITY + static constexpr uint64_t __pow10(unsigned __exp) { - __ret *= 10U; + uint64_t __ret = 1; + for (unsigned __i = 0; __i < __exp; ++__i) + __ret *= 10U; + return __ret; } - return __ret; - } - public: - static unsigned constexpr fractional_width = - __hh_mm_ss_width(__CommonType::period::den) < 19 ? __hh_mm_ss_width(__CommonType::period::den) : 6u; - using precision = duration>; - - _LIBCUDACXX_INLINE_VISIBILITY constexpr hh_mm_ss() noexcept - : hh_mm_ss{_Duration::zero()} - {} - - _LIBCUDACXX_INLINE_VISIBILITY constexpr explicit hh_mm_ss(_Duration __d) noexcept - : __is_neg(__d < _Duration(0)) - , __h(duration_cast(abs(__d))) - , __m(duration_cast(abs(__d) - hours())) - , __s(duration_cast(abs(__d) - hours() - minutes())) - , __f(duration_cast(abs(__d) - hours() - minutes() - seconds())) - {} - - _LIBCUDACXX_INLINE_VISIBILITY constexpr bool is_negative() const noexcept - { - return __is_neg; - } - _LIBCUDACXX_INLINE_VISIBILITY constexpr chrono::hours hours() const noexcept - { - return __h; - } - _LIBCUDACXX_INLINE_VISIBILITY constexpr chrono::minutes minutes() const noexcept - { - return __m; - } - _LIBCUDACXX_INLINE_VISIBILITY constexpr chrono::seconds seconds() const noexcept - { - return __s; - } - _LIBCUDACXX_INLINE_VISIBILITY constexpr precision subseconds() const noexcept - { - return __f; - } - - _LIBCUDACXX_INLINE_VISIBILITY constexpr precision to_duration() const noexcept - { - auto __dur = __h + __m + __s + __f; - return __is_neg ? -__dur : __dur; - } + static unsigned constexpr fractional_width = __hh_mm_ss_width(__CommonType::period::den) < 19 ? + __hh_mm_ss_width(__CommonType::period::den) : 6u; + using precision = duration>; + + _LIBCUDACXX_INLINE_VISIBILITY + constexpr hh_mm_ss() noexcept : hh_mm_ss{_Duration::zero()} {} + + _LIBCUDACXX_INLINE_VISIBILITY + constexpr explicit hh_mm_ss(_Duration __d) noexcept : + __is_neg(__d < _Duration(0)), + __h(duration_cast (abs(__d))), + __m(duration_cast(abs(__d) - hours())), + __s(duration_cast(abs(__d) - hours() - minutes())), + __f(duration_cast (abs(__d) - hours() - minutes() - seconds())) + {} + + _LIBCUDACXX_INLINE_VISIBILITY + constexpr bool is_negative() const noexcept { return __is_neg; } + _LIBCUDACXX_INLINE_VISIBILITY + constexpr chrono::hours hours() const noexcept { return __h; } + _LIBCUDACXX_INLINE_VISIBILITY + constexpr chrono::minutes minutes() const noexcept { return __m; } + _LIBCUDACXX_INLINE_VISIBILITY + constexpr chrono::seconds seconds() const noexcept { return __s; } + _LIBCUDACXX_INLINE_VISIBILITY + constexpr precision subseconds() const noexcept { return __f; } + + _LIBCUDACXX_INLINE_VISIBILITY + constexpr precision to_duration() const noexcept + { + auto __dur = __h + __m + __s + __f; + return __is_neg ? -__dur : __dur; + } - _LIBCUDACXX_INLINE_VISIBILITY constexpr explicit operator precision() const noexcept - { - return to_duration(); - } + _LIBCUDACXX_INLINE_VISIBILITY + constexpr explicit operator precision() const noexcept { return to_duration(); } private: - bool __is_neg; - chrono::hours __h; - chrono::minutes __m; - chrono::seconds __s; - precision __f; + bool __is_neg; + chrono::hours __h; + chrono::minutes __m; + chrono::seconds __s; + precision __f; }; -_LIBCUDACXX_INLINE_VISIBILITY constexpr bool is_am(const hours& __h) noexcept -{ - return __h >= hours(0) && __h < hours(12); -} -_LIBCUDACXX_INLINE_VISIBILITY constexpr bool is_pm(const hours& __h) noexcept -{ - return __h >= hours(12) && __h < hours(24); -} +_LIBCUDACXX_INLINE_VISIBILITY +constexpr bool is_am(const hours& __h) noexcept { return __h >= hours( 0) && __h < hours(12); } +_LIBCUDACXX_INLINE_VISIBILITY +constexpr bool is_pm(const hours& __h) noexcept { return __h >= hours(12) && __h < hours(24); } -_LIBCUDACXX_INLINE_VISIBILITY constexpr hours make12(const hours& __h) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +constexpr hours make12(const hours& __h) noexcept { - if (__h == hours(0)) - { - return hours(12); - } - else if (__h <= hours(12)) - { - return __h; - } - else - { - return __h - hours(12); - } + if (__h == hours( 0)) return hours(12); + else if (__h <= hours(12)) return __h; + else return __h - hours(12); } -_LIBCUDACXX_INLINE_VISIBILITY constexpr hours make24(const hours& __h, bool __is_pm) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +constexpr hours make24(const hours& __h, bool __is_pm) noexcept { - if (__is_pm) - { - return __h == hours(12) ? __h : __h + hours(12); - } - else - { - return __h == hours(12) ? hours(0) : __h; - } + if (__is_pm) + return __h == hours(12) ? __h : __h + hours(12); + else + return __h == hours(12) ? hours(0) : __h; } #endif // _CCCL_STD_VER > 2011 -} // namespace chrono +} // chrono #if _CCCL_STD_VER > 2011 // GCC 5 and 6 warn (and then error) on us using the standard reserved UDL names, // but have no way of disabling that. Use the system_header pragma on those GCC versions // for the remainder of this file, even if it has been requested to disable the pragma // earlier. -# if defined(_CCCL_COMPILER_GCC) && (__GNUC__ == 5 || __GNUC__ == 6) -# pragma GCC system_header -# endif +#if defined(_CCCL_COMPILER_GCC) && (__GNUC__ == 5 || __GNUC__ == 6) +#pragma GCC system_header +#endif _CCCL_DIAG_PUSH _CCCL_DIAG_SUPPRESS_GCC("-Wliteral-suffix") @@ -3607,94 +3212,100 @@ _CCCL_DIAG_SUPPRESS_MSVC(4455) // Suffixes for duration literals [time.duration.literals] inline namespace literals { -inline namespace chrono_literals -{ + inline namespace chrono_literals + { -_LIBCUDACXX_INLINE_VISIBILITY constexpr chrono::hours operator""h(unsigned long long __h) -{ - return chrono::hours(static_cast(__h)); -} + _LIBCUDACXX_INLINE_VISIBILITY + constexpr chrono::hours operator""h(unsigned long long __h) + { + return chrono::hours(static_cast(__h)); + } -_LIBCUDACXX_INLINE_VISIBILITY constexpr chrono::duration<_LIBCUDACXX_CHRONO_LITERAL_INTERNAL_T, ratio<3600, 1>> -operator""h(long double __h) -{ - return chrono::duration<_LIBCUDACXX_CHRONO_LITERAL_INTERNAL_T, ratio<3600, 1>>(__h); -} + _LIBCUDACXX_INLINE_VISIBILITY + constexpr chrono::duration<_LIBCUDACXX_CHRONO_LITERAL_INTERNAL_T, ratio<3600,1>> operator""h(long double __h) + { + return chrono::duration<_LIBCUDACXX_CHRONO_LITERAL_INTERNAL_T, ratio<3600,1>>(__h); + } -_LIBCUDACXX_INLINE_VISIBILITY constexpr chrono::minutes operator""min(unsigned long long __m) -{ - return chrono::minutes(static_cast(__m)); -} + _LIBCUDACXX_INLINE_VISIBILITY + constexpr chrono::minutes operator""min(unsigned long long __m) + { + return chrono::minutes(static_cast(__m)); + } -_LIBCUDACXX_INLINE_VISIBILITY constexpr chrono::duration<_LIBCUDACXX_CHRONO_LITERAL_INTERNAL_T, ratio<60, 1>> -operator""min(long double __m) -{ - return chrono::duration<_LIBCUDACXX_CHRONO_LITERAL_INTERNAL_T, ratio<60, 1>>(__m); -} + _LIBCUDACXX_INLINE_VISIBILITY + constexpr chrono::duration<_LIBCUDACXX_CHRONO_LITERAL_INTERNAL_T, ratio<60,1>> operator""min(long double __m) + { + return chrono::duration<_LIBCUDACXX_CHRONO_LITERAL_INTERNAL_T, ratio<60,1>> (__m); + } -_LIBCUDACXX_INLINE_VISIBILITY constexpr chrono::seconds operator""s(unsigned long long __s) -{ - return chrono::seconds(static_cast(__s)); -} + _LIBCUDACXX_INLINE_VISIBILITY + constexpr chrono::seconds operator""s(unsigned long long __s) + { + return chrono::seconds(static_cast(__s)); + } -_LIBCUDACXX_INLINE_VISIBILITY constexpr chrono::duration<_LIBCUDACXX_CHRONO_LITERAL_INTERNAL_T> -operator""s(long double __s) -{ - return chrono::duration<_LIBCUDACXX_CHRONO_LITERAL_INTERNAL_T>(__s); -} + _LIBCUDACXX_INLINE_VISIBILITY + constexpr chrono::duration<_LIBCUDACXX_CHRONO_LITERAL_INTERNAL_T> operator""s(long double __s) + { + return chrono::duration<_LIBCUDACXX_CHRONO_LITERAL_INTERNAL_T> (__s); + } -_LIBCUDACXX_INLINE_VISIBILITY constexpr chrono::milliseconds operator""ms(unsigned long long __ms) -{ - return chrono::milliseconds(static_cast(__ms)); -} + _LIBCUDACXX_INLINE_VISIBILITY + constexpr chrono::milliseconds operator""ms(unsigned long long __ms) + { + return chrono::milliseconds(static_cast(__ms)); + } -_LIBCUDACXX_INLINE_VISIBILITY constexpr chrono::duration<_LIBCUDACXX_CHRONO_LITERAL_INTERNAL_T, milli> -operator""ms(long double __ms) -{ - return chrono::duration<_LIBCUDACXX_CHRONO_LITERAL_INTERNAL_T, milli>(__ms); -} + _LIBCUDACXX_INLINE_VISIBILITY + constexpr chrono::duration<_LIBCUDACXX_CHRONO_LITERAL_INTERNAL_T, milli> operator""ms(long double __ms) + { + return chrono::duration<_LIBCUDACXX_CHRONO_LITERAL_INTERNAL_T, milli>(__ms); + } -_LIBCUDACXX_INLINE_VISIBILITY constexpr chrono::microseconds operator""us(unsigned long long __us) -{ - return chrono::microseconds(static_cast(__us)); -} + _LIBCUDACXX_INLINE_VISIBILITY + constexpr chrono::microseconds operator""us(unsigned long long __us) + { + return chrono::microseconds(static_cast(__us)); + } -_LIBCUDACXX_INLINE_VISIBILITY constexpr chrono::duration<_LIBCUDACXX_CHRONO_LITERAL_INTERNAL_T, micro> -operator""us(long double __us) -{ - return chrono::duration<_LIBCUDACXX_CHRONO_LITERAL_INTERNAL_T, micro>(__us); -} + _LIBCUDACXX_INLINE_VISIBILITY + constexpr chrono::duration<_LIBCUDACXX_CHRONO_LITERAL_INTERNAL_T, micro> operator""us(long double __us) + { + return chrono::duration<_LIBCUDACXX_CHRONO_LITERAL_INTERNAL_T, micro> (__us); + } -_LIBCUDACXX_INLINE_VISIBILITY constexpr chrono::nanoseconds operator""ns(unsigned long long __ns) -{ - return chrono::nanoseconds(static_cast(__ns)); -} + _LIBCUDACXX_INLINE_VISIBILITY + constexpr chrono::nanoseconds operator""ns(unsigned long long __ns) + { + return chrono::nanoseconds(static_cast(__ns)); + } -_LIBCUDACXX_INLINE_VISIBILITY constexpr chrono::duration<_LIBCUDACXX_CHRONO_LITERAL_INTERNAL_T, nano> -operator""ns(long double __ns) -{ - return chrono::duration<_LIBCUDACXX_CHRONO_LITERAL_INTERNAL_T, nano>(__ns); -} + _LIBCUDACXX_INLINE_VISIBILITY + constexpr chrono::duration<_LIBCUDACXX_CHRONO_LITERAL_INTERNAL_T, nano> operator""ns(long double __ns) + { + return chrono::duration<_LIBCUDACXX_CHRONO_LITERAL_INTERNAL_T, nano> (__ns); + } -# if _CCCL_STD_VER > 2017 && !defined(_LIBCUDACXX_HAS_NO_CXX20_CHRONO_LITERALS) -_LIBCUDACXX_INLINE_VISIBILITY constexpr chrono::day operator""d(unsigned long long __d) noexcept -{ - return chrono::day(static_cast(__d)); -} +#if _CCCL_STD_VER > 2017 && !defined(_LIBCUDACXX_HAS_NO_CXX20_CHRONO_LITERALS) + _LIBCUDACXX_INLINE_VISIBILITY + constexpr chrono::day operator""d(unsigned long long __d) noexcept + { + return chrono::day(static_cast(__d)); + } -_LIBCUDACXX_INLINE_VISIBILITY constexpr chrono::year operator""y(unsigned long long __y) noexcept -{ - return chrono::year(static_cast(__y)); -} -# endif //_CCCL_STD_VER > 2017 && !defined(_LIBCUDACXX_HAS_NO_CXX20_CHRONO_LITERALS) -} // namespace chrono_literals -} // namespace literals + _LIBCUDACXX_INLINE_VISIBILITY + constexpr chrono::year operator""y(unsigned long long __y) noexcept + { + return chrono::year(static_cast(__y)); + } +#endif //_CCCL_STD_VER > 2017 && !defined(_LIBCUDACXX_HAS_NO_CXX20_CHRONO_LITERALS) +}} _CCCL_DIAG_POP -namespace chrono -{ // hoist the literals into namespace std::chrono -using namespace literals::chrono_literals; +namespace chrono { // hoist the literals into namespace std::chrono + using namespace literals::chrono_literals; } #endif // _CCCL_STD_VER > 2011 @@ -3704,33 +3315,34 @@ _LIBCUDACXX_END_NAMESPACE_STD #ifndef __cuda_std__ _LIBCUDACXX_BEGIN_NAMESPACE_FILESYSTEM -struct _FilesystemClock -{ -# if !defined(_LIBCUDACXX_HAS_NO_INT128) +struct _FilesystemClock { +#if !defined(_LIBCUDACXX_HAS_NO_INT128) typedef __int128_t rep; typedef nano period; -# else +#else typedef long long rep; typedef nano period; -# endif +#endif typedef chrono::duration duration; typedef chrono::time_point<_FilesystemClock> time_point; - _LIBCUDACXX_EXPORTED_FROM_ABI static _CCCL_CONSTEXPR_CXX14 const bool is_steady = false; + _LIBCUDACXX_EXPORTED_FROM_ABI + static _CCCL_CONSTEXPR_CXX14 const bool is_steady = false; _LIBCUDACXX_AVAILABILITY_FILESYSTEM _LIBCUDACXX_FUNC_VIS static time_point now() noexcept; - _LIBCUDACXX_INLINE_VISIBILITY static time_t to_time_t(const time_point& __t) noexcept - { - typedef chrono::duration __secs; - return time_t(chrono::duration_cast<__secs>(__t.time_since_epoch()).count()); + _LIBCUDACXX_INLINE_VISIBILITY + static time_t to_time_t(const time_point& __t) noexcept { + typedef chrono::duration __secs; + return time_t( + chrono::duration_cast<__secs>(__t.time_since_epoch()).count()); } - _LIBCUDACXX_INLINE_VISIBILITY static time_point from_time_t(time_t __t) noexcept - { - typedef chrono::duration __secs; - return time_point(__secs(__t)); + _LIBCUDACXX_INLINE_VISIBILITY + static time_point from_time_t(time_t __t) noexcept { + typedef chrono::duration __secs; + return time_point(__secs(__t)); } }; _LIBCUDACXX_END_NAMESPACE_FILESYSTEM @@ -3741,4 +3353,4 @@ _CCCL_NV_DIAG_DEFAULT(cuda_demote_unsupported_floating_point) #include #include -#endif // _LIBCUDACXX_CHRONO +#endif // _LIBCUDACXX_CHRONO diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/climits b/libcudacxx/include/cuda/std/detail/libcxx/include/climits index 85581a30fe5..330c54a02d4 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/climits +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/climits @@ -51,21 +51,21 @@ Macros: #include #if defined(_CCCL_COMPILER_MSVC) -# include +#include #endif // _LIBCUDACXX_MSVCRT #if defined(_CCCL_COMPILER_IBM) -# include +#include #endif // _CCCL_COMPILER_IBM // ICC defines __CHAR_BIT__ by default // accept that, but assert it is what we expect #ifdef __CHAR_BIT__ -static_assert(__CHAR_BIT__ == 8, ""); + static_assert(__CHAR_BIT__ == 8, ""); #else -# define __CHAR_BIT__ 8 + #define __CHAR_BIT__ 8 #endif #include -#endif // _LIBCUDACXX_CLIMITS +#endif // _LIBCUDACXX_CLIMITS diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/cmath b/libcudacxx/include/cuda/std/detail/libcxx/include/cmath index 7a62092cdad..4ef9fbcc74b 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/cmath +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/cmath @@ -311,11 +311,11 @@ long double truncl(long double x); #endif // no system header #if !defined(_CCCL_COMPILER_NVRTC) || !defined(__cuda_std__) -# include +#include #endif #if defined(__cuda_std__) && defined(_CCCL_COMPILER_NVHPC) -# include +#include #endif #include @@ -323,36 +323,36 @@ long double truncl(long double x); #include #ifdef _LIBCUDACXX_HAS_NVFP16 -# include +#include #endif // _LIBCUDACXX_HAS_NVFP16 #ifdef _LIBCUDACXX_HAS_NVBF16 -# include +#include #endif // _LIBCUDACXX_HAS_NVBF16 #include #ifdef _CCCL_COMPILER_NVRTC -# include -# define INFINITY __builtin_huge_val() -# define NAN __builtin_nan() +#include +#define INFINITY __builtin_huge_val() +#define NAN __builtin_nan() #endif // _CCCL_COMPILER_NVRTC _LIBCUDACXX_BEGIN_NAMESPACE_STD +using ::signbit; using ::isfinite; using ::isinf; using ::isnan; -using ::signbit; using ::acos; using ::acosf; using ::asin; using ::asinf; using ::atan; +using ::atanf; using ::atan2; using ::atan2f; -using ::atanf; using ::ceil; using ::ceilf; using ::cos; @@ -399,17 +399,17 @@ using ::abs; #ifndef _CCCL_COMPILER_NVRTC using ::fpclassify; +using ::isnormal; using ::isgreater; using ::isgreaterequal; using ::isless; using ::islessequal; using ::islessgreater; -using ::isnormal; using ::isunordered; -using ::double_t; using ::float_t; +using ::double_t; using ::fabs; using ::fabsf; @@ -461,17 +461,17 @@ using ::copysign; using ::copysignf; using ::erf; +using ::erff; using ::erfc; using ::erfcf; -using ::erff; using ::exp2; using ::exp2f; using ::expm1; using ::expm1f; using ::fdim; using ::fdimf; -using ::fma; using ::fmaf; +using ::fma; using ::fmax; using ::fmaxf; using ::fmin; @@ -523,36 +523,36 @@ using ::truncf; using ::acosl; using ::asinl; -using ::atan2l; using ::atanl; +using ::atan2l; using ::ceill; -using ::coshl; using ::cosl; +using ::coshl; using ::expl; using ::fabsl; using ::floorl; using ::fmodl; using ::frexpl; using ::ldexpl; -using ::log10l; using ::logl; +using ::log10l; using ::modfl; using ::powl; -using ::sinhl; using ::sinl; +using ::sinhl; using ::sqrtl; using ::tanl; +using ::tanhl; using ::acoshl; using ::asinhl; using ::atanhl; using ::cbrtl; -using ::tanhl; using ::copysignl; -using ::erfcl; using ::erfl; +using ::erfcl; using ::exp2l; using ::expm1l; using ::fdiml; @@ -585,244 +585,224 @@ using ::truncl; #endif // _CCCL_COMPILER_NVRTC #if _CCCL_STD_VER > 2014 && !defined(__cuda_std__) -inline _LIBCUDACXX_INLINE_VISIBILITY float hypot(float x, float y, float z) -{ - return sqrt(x * x + y * y + z * z); -} -inline _LIBCUDACXX_INLINE_VISIBILITY double hypot(double x, double y, double z) -{ - return sqrt(x * x + y * y + z * z); -} -# ifdef _LIBCUDACXX_HAS_COMPLEX_LONG_DOUBLE -inline _LIBCUDACXX_INLINE_VISIBILITY long double hypot(long double x, long double y, long double z) -{ - return sqrt(x * x + y * y + z * z); -} -# endif +inline _LIBCUDACXX_INLINE_VISIBILITY float hypot( float x, float y, float z ) { return sqrt(x*x + y*y + z*z); } +inline _LIBCUDACXX_INLINE_VISIBILITY double hypot( double x, double y, double z ) { return sqrt(x*x + y*y + z*z); } +#ifdef _LIBCUDACXX_HAS_COMPLEX_LONG_DOUBLE +inline _LIBCUDACXX_INLINE_VISIBILITY long double hypot( long double x, long double y, long double z ) { return sqrt(x*x + y*y + z*z); } +#endif template inline _LIBCUDACXX_INLINE_VISIBILITY - __enable_if_t::value && is_arithmetic<_A2>::value && is_arithmetic<_A3>::value, - __promote<_A1, _A2, _A3>> - hypot(_A1 __lcpp_x, _A2 __lcpp_y, _A3 __lcpp_z) noexcept -{ - using __result_type = __promote_t<_A1, _A2, _A3>; - static_assert( - (!(is_same<_A1, __result_type>::value && is_same<_A2, __result_type>::value && is_same<_A3, __result_type>::value)), - ""); - return ::hypot((__result_type) __lcpp_x, (__result_type) __lcpp_y, (__result_type) __lcpp_z); +__enable_if_t +< + is_arithmetic<_A1>::value && + is_arithmetic<_A2>::value && + is_arithmetic<_A3>::value, + __promote<_A1, _A2, _A3> +> +hypot(_A1 __lcpp_x, _A2 __lcpp_y, _A3 __lcpp_z) noexcept +{ + using __result_type = __promote_t<_A1, _A2, _A3>; + static_assert((!(is_same<_A1, __result_type>::value && + is_same<_A2, __result_type>::value && + is_same<_A3, __result_type>::value)), ""); + return ::hypot((__result_type)__lcpp_x, (__result_type)__lcpp_y, (__result_type)__lcpp_z); } #endif #ifndef _LIBCUDACXX_HAS_NO_CONSTEXPR_COMPLEX_OPERATIONS -# define _CCCL_CONSTEXPR_CXX14_COMPLEX _CCCL_CONSTEXPR_CXX14 +#define _CCCL_CONSTEXPR_CXX14_COMPLEX _CCCL_CONSTEXPR_CXX14 #else -# define _CCCL_CONSTEXPR_CXX14_COMPLEX +#define _CCCL_CONSTEXPR_CXX14_COMPLEX #endif // _LIBCUDACXX_HAS_NO_CONSTEXPR_COMPLEX_OPERATIONS template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t::value, bool> +_LIBCUDACXX_INLINE_VISIBILITY +constexpr __enable_if_t::value, bool> __constexpr_isnan(_A1 __lcpp_x) noexcept { #if defined(_CCCL_CUDACC_BELOW_11_8) - return __isnan(__lcpp_x); + return __isnan(__lcpp_x); #elif __has_builtin(__builtin_isnan) - // nvcc at times has issues determining the type of __lcpp_x - return __builtin_isnan(static_cast(__lcpp_x)); + // nvcc at times has issues determining the type of __lcpp_x + return __builtin_isnan(static_cast(__lcpp_x)); #else - return ::isnan(__lcpp_x); + return ::isnan(__lcpp_x); #endif } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t::value, bool> +_LIBCUDACXX_INLINE_VISIBILITY +constexpr __enable_if_t::value, bool> __constexpr_isnan(_A1 __lcpp_x) noexcept { - return ::isnan(__lcpp_x); + return ::isnan(__lcpp_x); } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t::value, bool> +_LIBCUDACXX_INLINE_VISIBILITY +constexpr __enable_if_t::value, bool> __constexpr_isinf(_A1 __lcpp_x) noexcept { #if defined(_CCCL_CUDACC_BELOW_11_8) - return __isinf(__lcpp_x); + return __isinf(__lcpp_x); #elif __has_builtin(__builtin_isinf) - // nvcc at times has issues determining the type of __lcpp_x - return __builtin_isinf(static_cast(__lcpp_x)); + // nvcc at times has issues determining the type of __lcpp_x + return __builtin_isinf(static_cast(__lcpp_x)); #else - return ::isinf(__lcpp_x); + return ::isinf(__lcpp_x); #endif } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t::value, bool> +_LIBCUDACXX_INLINE_VISIBILITY +constexpr __enable_if_t::value, bool> __constexpr_isinf(_A1 __lcpp_x) noexcept { - return ::isinf(__lcpp_x); + return ::isinf(__lcpp_x); } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t::value, bool> +_LIBCUDACXX_INLINE_VISIBILITY +constexpr __enable_if_t::value, bool> __constexpr_isfinite(_A1 __lcpp_x) noexcept { #if defined(_CCCL_CUDACC_BELOW_11_8) - return !__isinf(__lcpp_x) && !__isnan(__lcpp_x); + return !__isinf(__lcpp_x) && !__isnan(__lcpp_x); #elif __has_builtin(__builtin_isfinite) - // nvcc at times has issues determining the type of __lcpp_x - return __builtin_isfinite(static_cast(__lcpp_x)); + // nvcc at times has issues determining the type of __lcpp_x + return __builtin_isfinite(static_cast(__lcpp_x)); #else - return ::isfinite(__lcpp_x); + return ::isfinite(__lcpp_x); #endif } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t::value, bool> +_LIBCUDACXX_INLINE_VISIBILITY +constexpr __enable_if_t::value, bool> __constexpr_isfinite(_A1 __lcpp_x) noexcept { - return isfinite(__lcpp_x); + return isfinite(__lcpp_x); } #if defined(_CCCL_COMPILER_MSVC) || defined(_CCCL_COMPILER_NVRTC) template -_LIBCUDACXX_INLINE_VISIBILITY _A1 __constexpr_copysign(_A1 __x, _A1 __y) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +_A1 __constexpr_copysign(_A1 __x, _A1 __y) noexcept { - return ::copysign(__x, __y); + return ::copysign(__x, __y); } #else -inline _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 float -__constexpr_copysign(float __x, float __y) noexcept -{ - return __builtin_copysignf(__x, __y); +inline _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY +_CCCL_CONSTEXPR_CXX14 float __constexpr_copysign(float __x, float __y) noexcept { + return __builtin_copysignf(__x, __y); } -inline _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 double -__constexpr_copysign(double __x, double __y) noexcept -{ - return __builtin_copysign(__x, __y); +inline _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY +_CCCL_CONSTEXPR_CXX14 double __constexpr_copysign(double __x, double __y) noexcept { + return __builtin_copysign(__x, __y); } -inline _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 long double -__constexpr_copysign(long double __x, long double __y) noexcept -{ - return __builtin_copysignl(__x, __y); +inline _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY +_CCCL_CONSTEXPR_CXX14 long double __constexpr_copysign(long double __x, long double __y) noexcept { + return __builtin_copysignl(__x, __y); } template -inline _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 - __enable_if_t::value && is_arithmetic<_A2>::value, __promote_t<_A1, _A2>> - __constexpr_copysign(_A1 __x, _A2 __y) noexcept -{ - using __result_type = __promote_t<_A1, _A2>; - static_assert((!(_IsSame<_A1, __result_type>::value && _IsSame<_A2, __result_type>::value)), ""); - return __builtin_copysign((__result_type) __x, (__result_type) __y); +inline _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY +_CCCL_CONSTEXPR_CXX14 __enable_if_t::value && is_arithmetic<_A2>::value, + __promote_t<_A1, _A2>> + __constexpr_copysign(_A1 __x, _A2 __y) noexcept { + using __result_type = __promote_t<_A1, _A2>; + static_assert((!(_IsSame<_A1, __result_type>::value && _IsSame<_A2, __result_type>::value)), ""); + return __builtin_copysign((__result_type)__x, (__result_type)__y); } #endif // !_CCCL_COMPILER_MSVC #if defined(_CCCL_COMPILER_MSVC) || defined(_CCCL_COMPILER_NVRTC) template -_LIBCUDACXX_INLINE_VISIBILITY _A1 __constexpr_fabs(_A1 __x) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +_A1 __constexpr_fabs(_A1 __x) noexcept { - return ::fabs(__x); + return ::fabs(__x); } #else -inline _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 float -__constexpr_fabs(float __x) noexcept -{ - return __builtin_fabsf(__x); +inline _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY +_CCCL_CONSTEXPR_CXX14 float __constexpr_fabs(float __x) noexcept { + return __builtin_fabsf(__x); } -inline _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 double -__constexpr_fabs(double __x) noexcept -{ - return __builtin_fabs(__x); +inline _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY +_CCCL_CONSTEXPR_CXX14 double __constexpr_fabs(double __x) noexcept { + return __builtin_fabs(__x); } -inline _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 long double -__constexpr_fabs(long double __x) noexcept -{ - return __builtin_fabsl(__x); +inline _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY +_CCCL_CONSTEXPR_CXX14 long double __constexpr_fabs(long double __x) noexcept { + return __builtin_fabsl(__x); } template ::value, int> = 0> -inline _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 double -__constexpr_fabs(_Tp __x) noexcept -{ - return __builtin_fabs(static_cast(__x)); +inline _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY +_CCCL_CONSTEXPR_CXX14 double __constexpr_fabs(_Tp __x) noexcept { + return __builtin_fabs(static_cast(__x)); } #endif // !_CCCL_COMPILER_MSVC #if defined(_CCCL_COMPILER_MSVC) || defined(_CCCL_COMPILER_NVRTC) template -_LIBCUDACXX_INLINE_VISIBILITY _A1 __constexpr_fmax(_A1 __x, _A1 __y) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +_A1 __constexpr_fmax(_A1 __x, _A1 __y) noexcept { - return ::fmax(__x, __y); + return ::fmax(__x, __y); } #else -inline _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14_COMPLEX float -__constexpr_fmax(float __x, float __y) noexcept -{ -# if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && !defined(_LIBCUDACXX_HAS_NO_CONSTEXPR_COMPLEX_OPERATIONS) - if (_LIBCUDACXX_IS_CONSTANT_EVALUATED()) - { +inline _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY +_CCCL_CONSTEXPR_CXX14_COMPLEX float __constexpr_fmax(float __x, float __y) noexcept { +#if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && !defined(_LIBCUDACXX_HAS_NO_CONSTEXPR_COMPLEX_OPERATIONS) + if (_LIBCUDACXX_IS_CONSTANT_EVALUATED()) { if (__constexpr_isnan(__x)) - { return __y; - } if (__constexpr_isnan(__y)) - { return __x; - } return __x < __y ? __y : __x; } -# endif // defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) +#endif // defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) return __builtin_fmaxf(__x, __y); } -inline _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14_COMPLEX double -__constexpr_fmax(double __x, double __y) noexcept -{ -# if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && !defined(_LIBCUDACXX_HAS_NO_CONSTEXPR_COMPLEX_OPERATIONS) - if (_LIBCUDACXX_IS_CONSTANT_EVALUATED()) - { +inline _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY +_CCCL_CONSTEXPR_CXX14_COMPLEX double __constexpr_fmax(double __x, double __y) noexcept { +#if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && !defined(_LIBCUDACXX_HAS_NO_CONSTEXPR_COMPLEX_OPERATIONS) + if (_LIBCUDACXX_IS_CONSTANT_EVALUATED()) { if (__constexpr_isnan(__x)) - { return __y; - } if (__constexpr_isnan(__y)) - { return __x; - } return __x < __y ? __y : __x; } -# endif // defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) +#endif // defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) return __builtin_fmax(__x, __y); } -inline _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14_COMPLEX long double -__constexpr_fmax(long double __x, long double __y) noexcept -{ -# if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && !defined(_LIBCUDACXX_HAS_NO_CONSTEXPR_COMPLEX_OPERATIONS) - if (_LIBCUDACXX_IS_CONSTANT_EVALUATED()) - { +inline _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY +_CCCL_CONSTEXPR_CXX14_COMPLEX long double __constexpr_fmax(long double __x, long double __y) noexcept { +#if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && !defined(_LIBCUDACXX_HAS_NO_CONSTEXPR_COMPLEX_OPERATIONS) + if (_LIBCUDACXX_IS_CONSTANT_EVALUATED()) { if (__constexpr_isnan(__x)) - { return __y; - } if (__constexpr_isnan(__y)) - { return __x; - } return __x < __y ? __y : __x; } -# endif // defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) +#endif // defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) return __builtin_fmax(__x, __y); } template ::value && is_arithmetic<_Up>::value, int> = 0> -inline _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14_COMPLEX __promote_t<_Tp, _Up> -__constexpr_fmax(_Tp __x, _Up __y) noexcept -{ +inline _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY +_CCCL_CONSTEXPR_CXX14_COMPLEX __promote_t<_Tp, _Up> +__constexpr_fmax(_Tp __x, _Up __y) noexcept { using __result_type = __promote_t<_Tp, _Up>; return _CUDA_VSTD::__constexpr_fmax(static_cast<__result_type>(__x), static_cast<__result_type>(__y)); } @@ -830,193 +810,156 @@ __constexpr_fmax(_Tp __x, _Up __y) noexcept #if defined(_CCCL_COMPILER_MSVC) || defined(_CCCL_COMPILER_NVRTC) || defined(_CCCL_CUDA_COMPILER_CLANG) template -_LIBCUDACXX_INLINE_VISIBILITY _A1 __constexpr_logb(_A1 __x) +_LIBCUDACXX_INLINE_VISIBILITY +_A1 __constexpr_logb(_A1 __x) { - return ::logb(__x); + return ::logb(__x); } #else template -inline _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14_COMPLEX _Tp -__constexpr_logb(_Tp __x) -{ -# if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && !defined(_LIBCUDACXX_HAS_NO_CONSTEXPR_COMPLEX_OPERATIONS) - if (_LIBCUDACXX_IS_CONSTANT_EVALUATED()) - { - if (__x == _Tp(0)) - { +inline _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY +_CCCL_CONSTEXPR_CXX14_COMPLEX _Tp __constexpr_logb(_Tp __x) { +#if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && !defined(_LIBCUDACXX_HAS_NO_CONSTEXPR_COMPLEX_OPERATIONS) + if (_LIBCUDACXX_IS_CONSTANT_EVALUATED()) { + if (__x == _Tp(0)) { // raise FE_DIVBYZERO return -numeric_limits<_Tp>::infinity(); } if (__constexpr_isinf(__x)) - { return numeric_limits<_Tp>::infinity(); - } if (__constexpr_isnan(__x)) - { return numeric_limits<_Tp>::quiet_NaN(); - } - __x = __constexpr_fabs(__x); + __x = __constexpr_fabs(__x); unsigned long long __exp = 0; - while (__x >= _Tp(numeric_limits<_Tp>::radix)) - { + while (__x >= _Tp(numeric_limits<_Tp>::radix)) { __x /= numeric_limits<_Tp>::radix; __exp += 1; } return _Tp(__exp); } -# endif // defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) +#endif // defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) return __builtin_logb(__x); } #endif // !_MSVC #if defined(_CCCL_COMPILER_MSVC) || defined(_CCCL_COMPILER_NVRTC) || defined(_CCCL_CUDA_COMPILER_CLANG) template -inline _LIBCUDACXX_INLINE_VISIBILITY _Tp __constexpr_scalbn(_Tp __x, int __i) -{ - return static_cast<_Tp>(::scalbn(static_cast(__x), __i)); +inline _LIBCUDACXX_INLINE_VISIBILITY +_Tp __constexpr_scalbn(_Tp __x, int __i) { + return static_cast<_Tp>(::scalbn(static_cast(__x), __i)); } template <> -inline _LIBCUDACXX_INLINE_VISIBILITY float __constexpr_scalbn(float __x, int __i) -{ - return ::scalbnf(__x, __i); +inline _LIBCUDACXX_INLINE_VISIBILITY +float __constexpr_scalbn(float __x, int __i) { + return ::scalbnf(__x, __i); } template <> -inline _LIBCUDACXX_INLINE_VISIBILITY double __constexpr_scalbn(double __x, int __i) -{ - return ::scalbn(__x, __i); +inline _LIBCUDACXX_INLINE_VISIBILITY +double __constexpr_scalbn(double __x, int __i) { + return ::scalbn(__x, __i); } -# ifndef _LIBCUDACXX_HAS_NO_LONG_DOUBLE +#ifndef _LIBCUDACXX_HAS_NO_LONG_DOUBLE template <> -inline _LIBCUDACXX_INLINE_VISIBILITY long double __constexpr_scalbn(long double __x, int __i) -{ - return ::scalbnl(__x, __i); +inline _LIBCUDACXX_INLINE_VISIBILITY +long double __constexpr_scalbn(long double __x, int __i) { + return ::scalbnl(__x, __i); } -# endif // _LIBCUDACXX_HAS_NO_LONG_DOUBLE +#endif // _LIBCUDACXX_HAS_NO_LONG_DOUBLE #else template -inline _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14_COMPLEX _Tp -__constexpr_scalbn(_Tp __x, int __exp) -{ -# if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && !defined(_LIBCUDACXX_HAS_NO_CONSTEXPR_COMPLEX_OPERATIONS) - if (_LIBCUDACXX_IS_CONSTANT_EVALUATED()) - { +inline _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY +_CCCL_CONSTEXPR_CXX14_COMPLEX _Tp __constexpr_scalbn(_Tp __x, int __exp) { +#if defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) && !defined(_LIBCUDACXX_HAS_NO_CONSTEXPR_COMPLEX_OPERATIONS) + if (_LIBCUDACXX_IS_CONSTANT_EVALUATED()) { if (__x == _Tp(0)) - { return __x; - } if (__constexpr_isinf(__x)) - { return __x; - } if (_Tp(__exp) == _Tp(0)) - { return __x; - } if (__constexpr_isnan(__x)) - { return numeric_limits<_Tp>::quiet_NaN(); - } _Tp __mult(1); - if (__exp > 0) - { + if (__exp > 0) { __mult = numeric_limits<_Tp>::radix; --__exp; - } - else - { + } else { ++__exp; __exp = -__exp; __mult /= numeric_limits<_Tp>::radix; } - while (__exp > 0) - { - if (!(__exp & 1)) - { + while (__exp > 0) { + if (!(__exp & 1)) { __mult *= __mult; __exp >>= 1; - } - else - { + } else { __x *= __mult; --__exp; } } return __x; } -# endif // defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) +#endif // defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED) return __builtin_scalbn(__x, __exp); } #endif // !_CCCL_COMPILER_MSVC #if _CCCL_STD_VER > 2017 template -_LIBCUDACXX_INLINE_VISIBILITY constexpr _Fp __lerp(_Fp __a, _Fp __b, _Fp __t) noexcept -{ - if ((__a <= 0 && __b >= 0) || (__a >= 0 && __b <= 0)) - { - return __t * __b + (1 - __t) * __a; - } - - if (__t == 1) - { - return __b; - } - const _Fp __x = __a + __t * (__b - __a); - if (__t > 1 == __b > __a) - { - return __b < __x ? __x : __b; - } - else - { - return __x < __b ? __x : __b; - } +_LIBCUDACXX_INLINE_VISIBILITY +constexpr _Fp __lerp(_Fp __a, _Fp __b, _Fp __t) noexcept { + if ((__a <= 0 && __b >= 0) || (__a >= 0 && __b <= 0)) + return __t * __b + (1 - __t) * __a; + + if (__t == 1) return __b; + const _Fp __x = __a + __t * (__b - __a); + if (__t > 1 == __b > __a) + return __b < __x ? __x : __b; + else + return __x < __b ? __x : __b; } -_LIBCUDACXX_INLINE_VISIBILITY constexpr float lerp(float __a, float __b, float __t) noexcept -{ - return __lerp(__a, __b, __t); -} +_LIBCUDACXX_INLINE_VISIBILITY +constexpr float +lerp(float __a, float __b, float __t) noexcept { return __lerp(__a, __b, __t); } -_LIBCUDACXX_INLINE_VISIBILITY constexpr double lerp(double __a, double __b, double __t) noexcept -{ - return __lerp(__a, __b, __t); -} +_LIBCUDACXX_INLINE_VISIBILITY +constexpr double +lerp(double __a, double __b, double __t) noexcept { return __lerp(__a, __b, __t); } -_LIBCUDACXX_INLINE_VISIBILITY constexpr long double lerp(long double __a, long double __b, long double __t) noexcept -{ - return __lerp(__a, __b, __t); -} +_LIBCUDACXX_INLINE_VISIBILITY +constexpr long double +lerp(long double __a, long double __b, long double __t) noexcept { return __lerp(__a, __b, __t); } #endif // _CCCL_STD_VER > 2017 -template ::digits > numeric_limits<_IntT>::digits), - int _Bits = (numeric_limits<_IntT>::digits - numeric_limits<_FloatT>::digits)> -_LIBCUDACXX_INLINE_VISIBILITY constexpr _IntT __max_representable_int_for_float() noexcept -{ +template ::digits > numeric_limits<_IntT>::digits), + int _Bits = (numeric_limits<_IntT>::digits - numeric_limits<_FloatT>::digits)> +_LIBCUDACXX_INLINE_VISIBILITY +constexpr _IntT __max_representable_int_for_float() noexcept { static_assert(is_floating_point<_FloatT>::value, "must be a floating point type"); static_assert(is_integral<_IntT>::value, "must be an integral type"); static_assert(numeric_limits<_FloatT>::radix == 2, "FloatT has incorrect radix"); #ifdef _LIBCUDACXX_HAS_COMPLEX_LONG_DOUBLE - static_assert( - (_IsSame<_FloatT, float>::value || _IsSame<_FloatT, double>::value || _IsSame<_FloatT, long double>::value), - "unsupported floating point type"); + static_assert((_IsSame<_FloatT, float>::value || _IsSame<_FloatT, double>::value + || _IsSame<_FloatT,long double>::value), "unsupported floating point type"); #else - static_assert((_IsSame<_FloatT, float>::value || _IsSame<_FloatT, double>::value), "unsupported floating point type"); + static_assert((_IsSame<_FloatT, float>::value || _IsSame<_FloatT, double>::value), + "unsupported floating point type"); #endif - return _FloatBigger ? numeric_limits<_IntT>::max() : (numeric_limits<_IntT>::max() >> _Bits << _Bits); + return _FloatBigger ? numeric_limits<_IntT>::max() : (numeric_limits<_IntT>::max() >> _Bits << _Bits); } // Convert a floating point number to the specified integral type after @@ -1024,16 +967,13 @@ _LIBCUDACXX_INLINE_VISIBILITY constexpr _IntT __max_representable_int_for_float( // // The behavior is undefined if `__r` is NaN. template -_LIBCUDACXX_INLINE_VISIBILITY _IntT __clamp_to_integral(_RealT __r) noexcept -{ - using _Lim = _CUDA_VSTD::numeric_limits<_IntT>; +_LIBCUDACXX_INLINE_VISIBILITY +_IntT __clamp_to_integral(_RealT __r) noexcept { + using _Lim = _CUDA_VSTD::numeric_limits<_IntT>; const _IntT _MaxVal = _CUDA_VSTD::__max_representable_int_for_float<_IntT, _RealT>(); - if (__r >= ::nextafter(static_cast<_RealT>(_MaxVal), INFINITY)) - { + if (__r >= ::nextafter(static_cast<_RealT>(_MaxVal), INFINITY)) { return _Lim::max(); - } - else if (__r <= _Lim::lowest()) - { + } else if (__r <= _Lim::lowest()) { return _Lim::min(); } return static_cast<_IntT>(__r); @@ -1043,4 +983,4 @@ _LIBCUDACXX_END_NAMESPACE_STD #include -#endif // _LIBCUDACXX_CMATH +#endif // _LIBCUDACXX_CMATH diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/complex b/libcudacxx/include/cuda/std/detail/libcxx/include/complex index a6fb816477b..bd6b167ca90 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/complex +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/complex @@ -242,6 +242,7 @@ template complex tanh (const complex&); # pragma system_header #endif // no system header +#include // all public C++ headers provide the assertion handler #include #include #include @@ -249,11 +250,10 @@ template complex tanh (const complex&); #include #include #include +#include #include #include #include -#include // all public C++ headers provide the assertion handler -#include #include #include @@ -424,15 +424,13 @@ public: }; template -_LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14_COMPLEX complex<_Tp>& -operator*=(complex<_Tp>& __lhs, const complex<_Up>& __rhs) +_LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14_COMPLEX complex<_Tp>& operator*=(complex<_Tp>& __lhs, const complex<_Up>& __rhs) { __lhs = __lhs * complex<_Tp>(__rhs.real(), __rhs.imag()); return __lhs; } template -_LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14_COMPLEX complex<_Tp>& -operator/=(complex<_Tp>& __lhs, const complex<_Up>& __rhs) +_LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14_COMPLEX complex<_Tp>& operator/=(complex<_Tp>& __lhs, const complex<_Up>& __rhs) { __lhs = __lhs / complex<_Tp>(__rhs.real(), __rhs.imag()); return __lhs; @@ -842,7 +840,7 @@ inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 _Tp real(const comple // 26.3.7 values: -template +template struct __libcpp_complex_overload_traits {}; @@ -906,14 +904,14 @@ inline _LIBCUDACXX_INLINE_VISIBILITY _Tp arg(const complex<_Tp>& __c) #ifdef _LIBCUDACXX_HAS_COMPLEX_LONG_DOUBLE template -inline _LIBCUDACXX_INLINE_VISIBILITY __enable_if_t::value, long double> arg(_Tp __re) +inline _LIBCUDACXX_INLINE_VISIBILITY __enable_if_t< is_same<_Tp, long double>::value, long double > arg(_Tp __re) { return _CUDA_VSTD::atan2l(0.L, __re); } #endif // _LIBCUDACXX_HAS_COMPLEX_LONG_DOUBLE template -inline _LIBCUDACXX_INLINE_VISIBILITY __enable_if_t::value || is_same<_Tp, double>::value, double> +inline _LIBCUDACXX_INLINE_VISIBILITY __enable_if_t< is_integral<_Tp>::value || is_same<_Tp, double>::value, double > arg(_Tp __re) { // integrals need to be promoted to double @@ -921,7 +919,7 @@ arg(_Tp __re) } template -inline _LIBCUDACXX_INLINE_VISIBILITY __enable_if_t::value, float> arg(_Tp __re) +inline _LIBCUDACXX_INLINE_VISIBILITY __enable_if_t< is_same<_Tp, float>::value, float > arg(_Tp __re) { return _CUDA_VSTD::atan2f(0.F, __re); } @@ -1117,7 +1115,7 @@ inline _LIBCUDACXX_INLINE_VISIBILITY complex<__promote_t<_Tp, _Up>> pow(const co } template -inline _LIBCUDACXX_INLINE_VISIBILITY __enable_if_t<__is_complex_arithmetic<_Up>::value, complex<__promote_t<_Tp, _Up>>> +inline _LIBCUDACXX_INLINE_VISIBILITY __enable_if_t< __is_complex_arithmetic<_Up>::value, complex<__promote_t<_Tp, _Up>> > pow(const complex<_Tp>& __x, const _Up& __y) { using __result_type = complex<__promote_t<_Tp, _Up>>; @@ -1125,7 +1123,7 @@ pow(const complex<_Tp>& __x, const _Up& __y) } template -inline _LIBCUDACXX_INLINE_VISIBILITY __enable_if_t<__is_complex_arithmetic<_Tp>::value, complex<__promote_t<_Tp, _Up>>> +inline _LIBCUDACXX_INLINE_VISIBILITY __enable_if_t< __is_complex_arithmetic<_Tp>::value, complex<__promote_t<_Tp, _Up>> > pow(const _Tp& __x, const complex<_Up>& __y) { using __result_type = complex<__promote_t<_Tp, _Up>>; diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/concepts b/libcudacxx/include/cuda/std/detail/libcxx/include/concepts index 24995197262..15f041190c0 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/concepts +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/concepts @@ -140,6 +140,7 @@ namespace std { # pragma system_header #endif // no system header +#include // all public C++ headers provide the assertion handler #include #include #include @@ -165,7 +166,7 @@ namespace std { #include #include #include -#include // all public C++ headers provide the assertion handler + #include #endif // _LIBCUDACXX_CONCEPTS diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/cstddef b/libcudacxx/include/cuda/std/detail/libcxx/include/cstddef index af935c8a3d2..c4de4ba13b1 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/cstddef +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/cstddef @@ -43,20 +43,21 @@ Types: # pragma system_header #endif // no system header +#include // all public C++ headers provide the assertion handler #include #include #include -#include // all public C++ headers provide the assertion handler -#include #include +#include + _LIBCUDACXX_BEGIN_NAMESPACE_STD using ::ptrdiff_t; using ::size_t; -#if defined(__CLANG_MAX_ALIGN_T_DEFINED) || defined(_GCC_MAX_ALIGN_T) || defined(__DEFINED_max_align_t) \ - || defined(__NetBSD__) +#if defined(__CLANG_MAX_ALIGN_T_DEFINED) || defined(_GCC_MAX_ALIGN_T) || \ + defined(__DEFINED_max_align_t) || defined(__NetBSD__) // Re-use the compiler's max_align_t where possible. using ::max_align_t; #else @@ -66,95 +67,98 @@ typedef long double max_align_t; _LIBCUDACXX_END_NAMESPACE_STD #if _CCCL_STD_VER > 2011 -# ifdef _LIBCUDACXX_BEGIN_NAMESPACE_STD_NOVERSION +#ifdef _LIBCUDACXX_BEGIN_NAMESPACE_STD_NOVERSION _LIBCUDACXX_BEGIN_NAMESPACE_STD_NOVERSION -# else -namespace std // purposefully not versioned -{ -# endif //_LIBCUDACXX_BEGIN_NAMESPACE_STD_NOVERSION -enum class byte : unsigned char +#else +namespace std // purposefully not versioned { -}; +#endif //_LIBCUDACXX_BEGIN_NAMESPACE_STD_NOVERSION +enum class byte : unsigned char {}; -_LIBCUDACXX_INLINE_VISIBILITY constexpr byte operator|(byte __lhs, byte __rhs) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +constexpr byte operator| (byte __lhs, byte __rhs) noexcept { - return static_cast( - static_cast(static_cast(__lhs) | static_cast(__rhs))); + return static_cast( + static_cast( + static_cast(__lhs) | static_cast(__rhs) + )); } -_LIBCUDACXX_INLINE_VISIBILITY constexpr byte& operator|=(byte& __lhs, byte __rhs) noexcept -{ - return __lhs = __lhs | __rhs; -} +_LIBCUDACXX_INLINE_VISIBILITY +constexpr byte& operator|=(byte& __lhs, byte __rhs) noexcept +{ return __lhs = __lhs | __rhs; } -_LIBCUDACXX_INLINE_VISIBILITY constexpr byte operator&(byte __lhs, byte __rhs) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +constexpr byte operator& (byte __lhs, byte __rhs) noexcept { - return static_cast( - static_cast(static_cast(__lhs) & static_cast(__rhs))); + return static_cast( + static_cast( + static_cast(__lhs) & static_cast(__rhs) + )); } -_LIBCUDACXX_INLINE_VISIBILITY constexpr byte& operator&=(byte& __lhs, byte __rhs) noexcept -{ - return __lhs = __lhs & __rhs; -} +_LIBCUDACXX_INLINE_VISIBILITY +constexpr byte& operator&=(byte& __lhs, byte __rhs) noexcept +{ return __lhs = __lhs & __rhs; } -_LIBCUDACXX_INLINE_VISIBILITY constexpr byte operator^(byte __lhs, byte __rhs) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +constexpr byte operator^ (byte __lhs, byte __rhs) noexcept { - return static_cast( - static_cast(static_cast(__lhs) ^ static_cast(__rhs))); + return static_cast( + static_cast( + static_cast(__lhs) ^ static_cast(__rhs) + )); } -_LIBCUDACXX_INLINE_VISIBILITY constexpr byte& operator^=(byte& __lhs, byte __rhs) noexcept -{ - return __lhs = __lhs ^ __rhs; -} +_LIBCUDACXX_INLINE_VISIBILITY +constexpr byte& operator^=(byte& __lhs, byte __rhs) noexcept +{ return __lhs = __lhs ^ __rhs; } -_LIBCUDACXX_INLINE_VISIBILITY constexpr byte operator~(byte __b) noexcept +_LIBCUDACXX_INLINE_VISIBILITY +constexpr byte operator~ (byte __b) noexcept { - return static_cast(static_cast(~static_cast(__b))); + return static_cast( + static_cast( + ~static_cast(__b) + )); } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t, byte>& -operator<<=(byte& __lhs, _Integer __shift) noexcept -{ - return __lhs = __lhs << __shift; -} + _LIBCUDACXX_INLINE_VISIBILITY + constexpr __enable_if_t, byte> & + operator<<=(byte& __lhs, _Integer __shift) noexcept + { return __lhs = __lhs << __shift; } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t, byte> -operator<<(byte __lhs, _Integer __shift) noexcept -{ - return static_cast(static_cast(static_cast(__lhs) << __shift)); -} + _LIBCUDACXX_INLINE_VISIBILITY + constexpr __enable_if_t, byte> + operator<< (byte __lhs, _Integer __shift) noexcept + { return static_cast(static_cast(static_cast(__lhs) << __shift)); } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t, byte>& -operator>>=(byte& __lhs, _Integer __shift) noexcept -{ - return __lhs = __lhs >> __shift; -} + _LIBCUDACXX_INLINE_VISIBILITY + constexpr __enable_if_t, byte> & + operator>>=(byte& __lhs, _Integer __shift) noexcept + { return __lhs = __lhs >> __shift; } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t, byte> -operator>>(byte __lhs, _Integer __shift) noexcept -{ - return static_cast(static_cast(static_cast(__lhs) >> __shift)); -} + _LIBCUDACXX_INLINE_VISIBILITY + constexpr __enable_if_t, byte> + operator>> (byte __lhs, _Integer __shift) noexcept + { return static_cast(static_cast(static_cast(__lhs) >> __shift)); } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t, _Integer> to_integer(byte __b) noexcept -{ - return static_cast<_Integer>(__b); -} + _LIBCUDACXX_INLINE_VISIBILITY + constexpr __enable_if_t, _Integer> + to_integer(byte __b) noexcept { return static_cast<_Integer>(__b); } -# ifdef _LIBCUDACXX_END_NAMESPACE_STD_NOVERSION +#ifdef _LIBCUDACXX_END_NAMESPACE_STD_NOVERSION _LIBCUDACXX_END_NAMESPACE_STD_NOVERSION -# else +#else } -# endif //_LIBCUDACXX_END_NAMESPACE_STD_NOVERSION +#endif //_LIBCUDACXX_END_NAMESPACE_STD_NOVERSION #endif // _CCCL_STD_VER > 2011 #include //__cuda_std__ -#endif // _LIBCUDACXX_CSTDDEF +#endif // _LIBCUDACXX_CSTDDEF diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/cstdint b/libcudacxx/include/cuda/std/detail/libcxx/include/cstdint index 4e42cac7d34..b403ce5646b 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/cstdint +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/cstdint @@ -155,51 +155,51 @@ Types: #include #ifndef _CCCL_COMPILER_NVRTC -# include +#include #endif // _CCCL_COMPILER_NVRTC #include _LIBCUDACXX_BEGIN_NAMESPACE_STD -using ::int16_t; -using ::int32_t; -using ::int64_t; -using ::int8_t; +using::int8_t; +using::int16_t; +using::int32_t; +using::int64_t; -using ::uint16_t; -using ::uint32_t; -using ::uint64_t; -using ::uint8_t; +using::uint8_t; +using::uint16_t; +using::uint32_t; +using::uint64_t; -using ::int_least16_t; -using ::int_least32_t; -using ::int_least64_t; -using ::int_least8_t; +using::int_least8_t; +using::int_least16_t; +using::int_least32_t; +using::int_least64_t; -using ::uint_least16_t; -using ::uint_least32_t; -using ::uint_least64_t; -using ::uint_least8_t; +using::uint_least8_t; +using::uint_least16_t; +using::uint_least32_t; +using::uint_least64_t; -using ::int_fast16_t; -using ::int_fast32_t; -using ::int_fast64_t; -using ::int_fast8_t; +using::int_fast8_t; +using::int_fast16_t; +using::int_fast32_t; +using::int_fast64_t; -using ::uint_fast16_t; -using ::uint_fast32_t; -using ::uint_fast64_t; -using ::uint_fast8_t; +using::uint_fast8_t; +using::uint_fast16_t; +using::uint_fast32_t; +using::uint_fast64_t; -using ::intptr_t; -using ::uintptr_t; +using::intptr_t; +using::uintptr_t; -using ::intmax_t; -using ::uintmax_t; +using::intmax_t; +using::uintmax_t; _LIBCUDACXX_END_NAMESPACE_STD #include -#endif // _LIBCUDACXX_CSTDINT +#endif // _LIBCUDACXX_CSTDINT diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/cstdlib b/libcudacxx/include/cuda/std/detail/libcxx/include/cstdlib index 8403b33e699..8490fe0900d 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/cstdlib +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/cstdlib @@ -92,7 +92,7 @@ void *aligned_alloc(size_t alignment, size_t size); // C11 #endif // no system header #ifndef _CCCL_COMPILER_NVRTC -# include +# include #endif // !_CCCL_COMPILER_NVRTC #include @@ -100,104 +100,101 @@ void *aligned_alloc(size_t alignment, size_t size); // C11 #if defined(_CCCL_CUDA_COMPILER_CLANG) # define _LIBCUDACXX_UNREACHABLE() __builtin_unreachable() #elif defined(__CUDA_ARCH__) -# if defined(_CCCL_CUDACC_BELOW_11_2) -# define _LIBCUDACXX_UNREACHABLE() __trap() -# elif defined(_CCCL_CUDACC_BELOW_11_3) -# define _LIBCUDACXX_UNREACHABLE() __builtin_assume(false) -# else -# define _LIBCUDACXX_UNREACHABLE() __builtin_unreachable() -# endif // CUDACC above 11.4 +#if defined(_CCCL_CUDACC_BELOW_11_2) +# define _LIBCUDACXX_UNREACHABLE() __trap() +#elif defined(_CCCL_CUDACC_BELOW_11_3) +# define _LIBCUDACXX_UNREACHABLE() __builtin_assume(false) +#else +# define _LIBCUDACXX_UNREACHABLE() __builtin_unreachable() +#endif // CUDACC above 11.4 #else // ^^^ __CUDA_ARCH__ ^^^ / vvv !__CUDA_ARCH__ vvv -# if defined(_CCCL_COMPILER_MSVC_2017) -template -_LIBCUDACXX_INLINE_VISIBILITY __declspec(noreturn) void __unreachable_fallback() -{ - __assume(0); -} -# define _LIBCUDACXX_UNREACHABLE() __unreachable_fallback() -# elif defined(_CCCL_COMPILER_MSVC) -# define _LIBCUDACXX_UNREACHABLE() __assume(0) -# elif defined(_CCCL_COMPILER_GCC) || __has_builtin(__builtin_unreachable) -# define _LIBCUDACXX_UNREACHABLE() __builtin_unreachable() -# else // Other compilers -# define _LIBCUDACXX_UNREACHABLE() ::abort() -# endif // Other compilers +#if defined(_CCCL_COMPILER_MSVC_2017) +template +_LIBCUDACXX_INLINE_VISIBILITY __declspec(noreturn) void __unreachable_fallback() { __assume(0); } +# define _LIBCUDACXX_UNREACHABLE() __unreachable_fallback() +#elif defined(_CCCL_COMPILER_MSVC) +# define _LIBCUDACXX_UNREACHABLE() __assume(0) +#elif defined(_CCCL_COMPILER_GCC) || __has_builtin(__builtin_unreachable) +# define _LIBCUDACXX_UNREACHABLE() __builtin_unreachable() +#else // Other compilers +# define _LIBCUDACXX_UNREACHABLE() ::abort() +#endif // Other compilers #endif // !__CUDA_ARCH__ #ifdef _CCCL_COMPILER_NVHPC -# define _LIBCUDACXX_UNREACHABLE_AFTER_SWITCH() +#define _LIBCUDACXX_UNREACHABLE_AFTER_SWITCH() #else -# define _LIBCUDACXX_UNREACHABLE_AFTER_SWITCH() _LIBCUDACXX_UNREACHABLE() +#define _LIBCUDACXX_UNREACHABLE_AFTER_SWITCH() _LIBCUDACXX_UNREACHABLE() #endif _LIBCUDACXX_BEGIN_NAMESPACE_STD #if !defined(_CCCL_COMPILER_NVRTC) +using ::size_t; using ::div_t; using ::ldiv_t; -using ::size_t; -# ifndef _LIBCUDACXX_HAS_NO_LONG_LONG +#ifndef _LIBCUDACXX_HAS_NO_LONG_LONG using ::lldiv_t; -# endif // _LIBCUDACXX_HAS_NO_LONG_LONG +#endif // _LIBCUDACXX_HAS_NO_LONG_LONG using ::atof; using ::atoi; using ::atol; -# ifndef _LIBCUDACXX_HAS_NO_LONG_LONG +#ifndef _LIBCUDACXX_HAS_NO_LONG_LONG using ::atoll; -# endif // _LIBCUDACXX_HAS_NO_LONG_LONG +#endif // _LIBCUDACXX_HAS_NO_LONG_LONG using ::strtod; using ::strtof; -using ::strtol; using ::strtold; -# ifndef _LIBCUDACXX_HAS_NO_LONG_LONG +using ::strtol; +#ifndef _LIBCUDACXX_HAS_NO_LONG_LONG using ::strtoll; -# endif // _LIBCUDACXX_HAS_NO_LONG_LONG +#endif // _LIBCUDACXX_HAS_NO_LONG_LONG using ::strtoul; -# ifndef _LIBCUDACXX_HAS_NO_LONG_LONG +#ifndef _LIBCUDACXX_HAS_NO_LONG_LONG using ::strtoull; -# endif // _LIBCUDACXX_HAS_NO_LONG_LONG -using ::_Exit; -using ::abort; -using ::atexit; +#endif // _LIBCUDACXX_HAS_NO_LONG_LONG +using ::rand; +using ::srand; using ::calloc; -using ::exit; using ::free; using ::malloc; -using ::rand; using ::realloc; -using ::srand; -# ifndef _LIBCUDACXX_WINDOWS_STORE_APP +using ::abort; +using ::atexit; +using ::exit; +using ::_Exit; +#ifndef _LIBCUDACXX_WINDOWS_STORE_APP using ::getenv; using ::system; -# endif -using ::abs; +#endif using ::bsearch; -using ::labs; using ::qsort; -# ifndef _LIBCUDACXX_HAS_NO_LONG_LONG +using ::abs; +using ::labs; +#ifndef _LIBCUDACXX_HAS_NO_LONG_LONG using ::llabs; -# endif // _LIBCUDACXX_HAS_NO_LONG_LONG +#endif // _LIBCUDACXX_HAS_NO_LONG_LONG using ::div; using ::ldiv; -# ifndef _LIBCUDACXX_HAS_NO_LONG_LONG +#ifndef _LIBCUDACXX_HAS_NO_LONG_LONG using ::lldiv; -# endif // _LIBCUDACXX_HAS_NO_LONG_LONG +#endif // _LIBCUDACXX_HAS_NO_LONG_LONG using ::mblen; -using ::mbstowcs; using ::mbtowc; -using ::wcstombs; using ::wctomb; -# if defined(_LIBCUDACXX_HAS_QUICK_EXIT) +using ::mbstowcs; +using ::wcstombs; +#if defined(_LIBCUDACXX_HAS_QUICK_EXIT) using ::at_quick_exit; using ::quick_exit; -# endif -# if _CCCL_STD_VER > 2014 && defined(_LIBCUDACXX_HAS_C11_FEATURES) +#endif +#if _CCCL_STD_VER > 2014 && defined(_LIBCUDACXX_HAS_C11_FEATURES) using ::aligned_alloc; -# endif +#endif #endif // !defined(_CCCL_COMPILER_NVRTC) _LIBCUDACXX_END_NAMESPACE_STD #include //__cuda_std__ -#endif // _LIBCUDACXX_CSTDLIB +#endif // _LIBCUDACXX_CSTDLIB diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/cstring b/libcudacxx/include/cuda/std/detail/libcxx/include/cstring index 6b3d55163c7..a6fb4c326b1 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/cstring +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/cstring @@ -67,9 +67,9 @@ size_t strlen(const char* s); #endif // no system header #if defined(_CCCL_COMPILER_MSVC) -# include +#include #else // ^^^ _CCCL_COMPILER_MSVC ^^^ / vvv !_CCCL_COMPILER_MSVC vvv -# include +#include #endif // !_CCCL_COMPILER_MSVC _LIBCUDACXX_BEGIN_NAMESPACE_STD @@ -96,9 +96,9 @@ using ::strrchr; using ::strspn; using ::strstr; using ::strxfrm; -# ifndef _LIBCUDACXX_HAS_NO_THREAD_UNSAFE_C_FUNCTIONS +#ifndef _LIBCUDACXX_HAS_NO_THREAD_UNSAFE_C_FUNCTIONS using ::strtok; -# endif +#endif using ::strerror; using ::strlen; #endif // _CCCL_COMPILER_NVRTC diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/ctime b/libcudacxx/include/cuda/std/detail/libcxx/include/ctime index f23b290ef2c..0377595fb0a 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/ctime +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/ctime @@ -56,7 +56,7 @@ int timespec_get( struct timespec *ts, int base); // C++17 #endif // no system header #ifndef _CCCL_COMPILER_NVRTC -# include +#include #else typedef long long int time_t; #endif // _CCCL_COMPILER_NVRTC @@ -72,27 +72,27 @@ using ::time_t; #ifndef _CCCL_COMPILER_NVRTC using ::tm; -# if _CCCL_STD_VER > 2014 && defined(_LIBCUDACXX_HAS_C11_FEATURES) +#if _CCCL_STD_VER > 2014 && defined(_LIBCUDACXX_HAS_C11_FEATURES) using ::timespec; -# endif +#endif using ::clock; using ::difftime; using ::mktime; using ::time; -# ifndef _LIBCUDACXX_HAS_NO_THREAD_UNSAFE_C_FUNCTIONS +#ifndef _LIBCUDACXX_HAS_NO_THREAD_UNSAFE_C_FUNCTIONS using ::asctime; using ::ctime; using ::gmtime; using ::localtime; -# endif +#endif using ::strftime; -# if _CCCL_STD_VER > 2014 && defined(_LIBCUDACXX_HAS_TIMESPEC_GET) +#if _CCCL_STD_VER > 2014 && defined(_LIBCUDACXX_HAS_TIMESPEC_GET) using ::timespec_get; -# endif +#endif #endif // _CCCL_COMPILER_NVRTC _LIBCUDACXX_END_NAMESPACE_STD #include //__cuda_std__ -#endif // _LIBCUDACXX_CTIME +#endif // _LIBCUDACXX_CTIME diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/exception b/libcudacxx/include/cuda/std/detail/libcxx/include/exception index ad8948e31c1..5c7054df328 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/exception +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/exception @@ -88,10 +88,11 @@ template void rethrow_if_nested(const E& e); # pragma system_header #endif // no system header +#include // all public C++ headers provide the assertion handler #include #include #include -#include // all public C++ headers provide the assertion handler + #include -#endif // _LIBCUDACXX_EXCEPTION +#endif // _LIBCUDACXX_EXCEPTION diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/expected b/libcudacxx/include/cuda/std/detail/libcxx/include/expected index 14c34ed9533..e1697de1af6 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/expected +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/expected @@ -33,6 +33,7 @@ namespace std { #include #include #include + #include #endif // _LIBCUDACXX_EXPECTED diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/functional b/libcudacxx/include/cuda/std/detail/libcxx/include/functional index 0c83d9c8d93..d5b0192789c 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/functional +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/functional @@ -298,8 +298,7 @@ public: }; template -pointer_to_binary_function ptr_fun(Result (*f)(Arg1,Arg2)); // deprecated in C++11, removed in -C++17 +pointer_to_binary_function ptr_fun(Result (*f)(Arg1,Arg2)); // deprecated in C++11, removed in C++17 template // deprecated in C++11, removed in C++17 class mem_fun_t : public unary_function @@ -317,9 +316,8 @@ public: S operator()(T* p, A x) const; }; -template mem_fun_t mem_fun(S (T::*f)()); // deprecated in C++11, removed in -C++17 template mem_fun1_t mem_fun(S (T::*f)(A)); // deprecated in C++11, removed -in C++17 +template mem_fun_t mem_fun(S (T::*f)()); // deprecated in C++11, removed in C++17 +template mem_fun1_t mem_fun(S (T::*f)(A)); // deprecated in C++11, removed in C++17 template class mem_fun_ref_t : public unary_function // deprecated in C++11, removed in C++17 @@ -337,9 +335,8 @@ public: S operator()(T& p, A x) const; }; -template mem_fun_ref_t mem_fun_ref(S (T::*f)()); // deprecated in C++11, removed -in C++17 template mem_fun1_ref_t mem_fun_ref(S (T::*f)(A)); // deprecated in -C++11, removed in C++17 +template mem_fun_ref_t mem_fun_ref(S (T::*f)()); // deprecated in C++11, removed in C++17 +template mem_fun1_ref_t mem_fun_ref(S (T::*f)(A)); // deprecated in C++11, removed in C++17 template class const_mem_fun_t : public unary_function // deprecated in C++11, removed in C++17 @@ -357,9 +354,8 @@ public: S operator()(const T* p, A x) const; }; -template const_mem_fun_t mem_fun(S (T::*f)() const); // deprecated in C++11, -removed in C++17 template const_mem_fun1_t mem_fun(S (T::*f)(A) const); // -deprecated in C++11, removed in C++17 +template const_mem_fun_t mem_fun(S (T::*f)() const); // deprecated in C++11, removed in C++17 +template const_mem_fun1_t mem_fun(S (T::*f)(A) const); // deprecated in C++11, removed in C++17 template class const_mem_fun_ref_t : public unary_function // deprecated in C++11, removed in C++17 @@ -377,9 +373,8 @@ public: S operator()(const T& p, A x) const; }; -template const_mem_fun_ref_t mem_fun_ref(S (T::*f)() const); // deprecated in -C++11, removed in C++17 template const_mem_fun1_ref_t mem_fun_ref(S (T::*f)(A) -const); // deprecated in C++11, removed in C++17 +template const_mem_fun_ref_t mem_fun_ref(S (T::*f)() const); // deprecated in C++11, removed in C++17 +template const_mem_fun1_ref_t mem_fun_ref(S (T::*f)(A) const); // deprecated in C++11, removed in C++17 template unspecified mem_fn(R T::*); @@ -513,16 +508,17 @@ POLICY: For non-variadic implementations, the number of arguments is limited #endif // no system header #ifdef __cuda_std__ -# ifndef _CCCL_COMPILER_NVRTC -# include -# endif // _CCCL_COMPILER_NVRTC +#ifndef _CCCL_COMPILER_NVRTC +#include +#endif // _CCCL_COMPILER_NVRTC #endif // __cuda_std__ +#include #include #include -#include #include #include +#include #include #include #include @@ -545,8 +541,8 @@ POLICY: For non-variadic implementations, the number of arguments is limited #include #include #include -#include + #include // for forward declarations of vector and string. #include -#endif // _LIBCUDACXX_FUNCTIONAL +#endif // _LIBCUDACXX_FUNCTIONAL diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/initializer_list b/libcudacxx/include/cuda/std/detail/libcxx/include/initializer_list index 7c4d1203f92..fbc752e93fd 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/initializer_list +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/initializer_list @@ -53,11 +53,11 @@ template const E* end(initializer_list il) noexcept; // constexpr in #endif // no system header #if !defined(_CCCL_COMPILER_NVRTC) -# include +#include #endif // !_CCCL_COMPILER_NVRTC _LIBCUDACXX_BEGIN_NAMESPACE_STD -using ::std::initializer_list; + using ::std::initializer_list; _LIBCUDACXX_END_NAMESPACE_STD -#endif // _LIBCUDACXX_INITIALIZER_LIST +#endif // _LIBCUDACXX_INITIALIZER_LIST diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/iosfwd b/libcudacxx/include/cuda/std/detail/libcxx/include/iosfwd index 833fdb1be0f..8acf2e2e12f 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/iosfwd +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/iosfwd @@ -101,107 +101,110 @@ typedef fpos::state_type> wstreampos; # pragma system_header #endif // no system header -#include #include // all public C++ headers provide the assertion handler +#include #include _LIBCUDACXX_BEGIN_NAMESPACE_STD class _LIBCUDACXX_TYPE_VIS ios_base; -template > -class _LIBCUDACXX_TEMPLATE_VIS basic_ios; - -template > -class _LIBCUDACXX_TEMPLATE_VIS basic_streambuf; -template > -class _LIBCUDACXX_TEMPLATE_VIS basic_istream; -template > -class _LIBCUDACXX_TEMPLATE_VIS basic_ostream; -template > -class _LIBCUDACXX_TEMPLATE_VIS basic_iostream; - -template , class _Allocator = allocator<_CharT>> -class _LIBCUDACXX_TEMPLATE_VIS basic_stringbuf; -template , class _Allocator = allocator<_CharT>> -class _LIBCUDACXX_TEMPLATE_VIS basic_istringstream; -template , class _Allocator = allocator<_CharT>> -class _LIBCUDACXX_TEMPLATE_VIS basic_ostringstream; -template , class _Allocator = allocator<_CharT>> -class _LIBCUDACXX_TEMPLATE_VIS basic_stringstream; - -template > -class _LIBCUDACXX_TEMPLATE_VIS basic_filebuf; -template > -class _LIBCUDACXX_TEMPLATE_VIS basic_ifstream; -template > -class _LIBCUDACXX_TEMPLATE_VIS basic_ofstream; -template > -class _LIBCUDACXX_TEMPLATE_VIS basic_fstream; - -template > -class _LIBCUDACXX_TEMPLATE_VIS istreambuf_iterator; -template > -class _LIBCUDACXX_TEMPLATE_VIS ostreambuf_iterator; - -typedef basic_ios ios; -typedef basic_ios wios; - -typedef basic_streambuf streambuf; -typedef basic_istream istream; -typedef basic_ostream ostream; -typedef basic_iostream iostream; - -typedef basic_stringbuf stringbuf; -typedef basic_istringstream istringstream; -typedef basic_ostringstream ostringstream; -typedef basic_stringstream stringstream; - -typedef basic_filebuf filebuf; -typedef basic_ifstream ifstream; -typedef basic_ofstream ofstream; -typedef basic_fstream fstream; - -typedef basic_streambuf wstreambuf; -typedef basic_istream wistream; -typedef basic_ostream wostream; -typedef basic_iostream wiostream; - -typedef basic_stringbuf wstringbuf; +template > + class _LIBCUDACXX_TEMPLATE_VIS basic_ios; + +template > + class _LIBCUDACXX_TEMPLATE_VIS basic_streambuf; +template > + class _LIBCUDACXX_TEMPLATE_VIS basic_istream; +template > + class _LIBCUDACXX_TEMPLATE_VIS basic_ostream; +template > + class _LIBCUDACXX_TEMPLATE_VIS basic_iostream; + +template , + class _Allocator = allocator<_CharT> > + class _LIBCUDACXX_TEMPLATE_VIS basic_stringbuf; +template , + class _Allocator = allocator<_CharT> > + class _LIBCUDACXX_TEMPLATE_VIS basic_istringstream; +template , + class _Allocator = allocator<_CharT> > + class _LIBCUDACXX_TEMPLATE_VIS basic_ostringstream; +template , + class _Allocator = allocator<_CharT> > + class _LIBCUDACXX_TEMPLATE_VIS basic_stringstream; + +template > + class _LIBCUDACXX_TEMPLATE_VIS basic_filebuf; +template > + class _LIBCUDACXX_TEMPLATE_VIS basic_ifstream; +template > + class _LIBCUDACXX_TEMPLATE_VIS basic_ofstream; +template > + class _LIBCUDACXX_TEMPLATE_VIS basic_fstream; + +template > + class _LIBCUDACXX_TEMPLATE_VIS istreambuf_iterator; +template > + class _LIBCUDACXX_TEMPLATE_VIS ostreambuf_iterator; + +typedef basic_ios ios; +typedef basic_ios wios; + +typedef basic_streambuf streambuf; +typedef basic_istream istream; +typedef basic_ostream ostream; +typedef basic_iostream iostream; + +typedef basic_stringbuf stringbuf; +typedef basic_istringstream istringstream; +typedef basic_ostringstream ostringstream; +typedef basic_stringstream stringstream; + +typedef basic_filebuf filebuf; +typedef basic_ifstream ifstream; +typedef basic_ofstream ofstream; +typedef basic_fstream fstream; + +typedef basic_streambuf wstreambuf; +typedef basic_istream wistream; +typedef basic_ostream wostream; +typedef basic_iostream wiostream; + +typedef basic_stringbuf wstringbuf; typedef basic_istringstream wistringstream; typedef basic_ostringstream wostringstream; -typedef basic_stringstream wstringstream; +typedef basic_stringstream wstringstream; -typedef basic_filebuf wfilebuf; -typedef basic_ifstream wifstream; -typedef basic_ofstream wofstream; -typedef basic_fstream wfstream; +typedef basic_filebuf wfilebuf; +typedef basic_ifstream wifstream; +typedef basic_ofstream wofstream; +typedef basic_fstream wfstream; #if !defined(_LIBCUDACXX_HAS_NO_WCHAR_H) -template -class _LIBCUDACXX_TEMPLATE_VIS fpos; -typedef fpos streampos; -typedef fpos wstreampos; -# ifndef _LIBCUDACXX_NO_HAS_CHAR8_T -typedef fpos u8streampos; -# endif -# ifndef _LIBCUDACXX_HAS_NO_UNICODE_CHARS -typedef fpos u16streampos; -typedef fpos u32streampos; -# endif // _LIBCUDACXX_HAS_NO_UNICODE_CHARS +template class _LIBCUDACXX_TEMPLATE_VIS fpos; +typedef fpos streampos; +typedef fpos wstreampos; +#ifndef _LIBCUDACXX_NO_HAS_CHAR8_T +typedef fpos u8streampos; +#endif +#ifndef _LIBCUDACXX_HAS_NO_UNICODE_CHARS +typedef fpos u16streampos; +typedef fpos u32streampos; +#endif // _LIBCUDACXX_HAS_NO_UNICODE_CHARS #endif #if defined(_NEWLIB_VERSION) // On newlib, off_t is 'long int' -typedef long int streamoff; // for char_traits in +typedef long int streamoff; // for char_traits in #else -typedef long long streamoff; // for char_traits in +typedef long long streamoff; // for char_traits in #endif // Include other forward declarations here -template > +template > class _LIBCUDACXX_TEMPLATE_VIS vector; _LIBCUDACXX_END_NAMESPACE_STD -#endif // _LIBCUDACXX_IOSFWD +#endif // _LIBCUDACXX_IOSFWD diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/iterator b/libcudacxx/include/cuda/std/detail/libcxx/include/iterator index ee065ad475b..5f1f42cb513 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/iterator +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/iterator @@ -685,6 +685,7 @@ template constexpr const E* data(initializer_list il) noexcept; # pragma system_header #endif // no system header +#include // all public C++ headers provide the assertion handler #include #include #include @@ -725,9 +726,8 @@ template constexpr const E* data(initializer_list il) noexcept; #include #include #include -#include // all public C++ headers provide the assertion handler -#include // for forward declarations of vector and string. #include +#include // for forward declarations of vector and string. #include #include @@ -744,17 +744,17 @@ struct __libcpp_is_trivial_iterator : public _LIBCUDACXX_BOOL_CONSTANT(is_pointe {}; template -struct __libcpp_is_trivial_iterator> +struct __libcpp_is_trivial_iterator > : public _LIBCUDACXX_BOOL_CONSTANT(__libcpp_is_trivial_iterator<_Iter>::value) {}; template -struct __libcpp_is_trivial_iterator> +struct __libcpp_is_trivial_iterator > : public _LIBCUDACXX_BOOL_CONSTANT(__libcpp_is_trivial_iterator<_Iter>::value) {}; template -struct __libcpp_is_trivial_iterator<__wrap_iter<_Iter>> +struct __libcpp_is_trivial_iterator<__wrap_iter<_Iter> > : public _LIBCUDACXX_BOOL_CONSTANT(__libcpp_is_trivial_iterator<_Iter>::value) {}; diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/latch b/libcudacxx/include/cuda/std/detail/libcxx/include/latch index 2b642baa56f..2dcac0588be 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/latch +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/latch @@ -48,73 +48,74 @@ namespace std # pragma system_header #endif // no system header -#include #include // all public C++ headers provide the assertion handler #include +#include + #include #ifdef _LIBCUDACXX_HAS_NO_THREADS -# error is not supported on this single threaded system +# error is not supported on this single threaded system #endif _LIBCUDACXX_BEGIN_NAMESPACE_STD -#if _LIBCUDACXX_CUDA_ABI_VERSION < 3 +# if _LIBCUDACXX_CUDA_ABI_VERSION < 3 # define _LIBCUDACXX_LATCH_ALIGNMENT alignas(64) -#else +# else # define _LIBCUDACXX_LATCH_ALIGNMENT -#endif +# endif -template +template class __latch_base { - _LIBCUDACXX_LATCH_ALIGNMENT __atomic_base __counter; - + _LIBCUDACXX_LATCH_ALIGNMENT __atomic_base __counter; public: - inline _LIBCUDACXX_INLINE_VISIBILITY constexpr explicit __latch_base(ptrdiff_t __expected) - : __counter(__expected) - {} + inline _LIBCUDACXX_INLINE_VISIBILITY constexpr + explicit __latch_base(ptrdiff_t __expected) + : __counter(__expected) { } - ~__latch_base() = default; - __latch_base(const __latch_base&) = delete; - __latch_base& operator=(const __latch_base&) = delete; + ~__latch_base() = default; + __latch_base(const __latch_base&) = delete; + __latch_base& operator=(const __latch_base&) = delete; - inline _LIBCUDACXX_INLINE_VISIBILITY void count_down(ptrdiff_t __update = 1) - { - _LIBCUDACXX_ASSERT(__update > 0, ""); - auto const __old = __counter.fetch_sub(__update, memory_order_release); - _LIBCUDACXX_ASSERT(__old >= __update, ""); - if (__old == __update) + inline _LIBCUDACXX_INLINE_VISIBILITY + void count_down(ptrdiff_t __update = 1) { - __counter.notify_all(); + _LIBCUDACXX_ASSERT(__update > 0, ""); + auto const __old = __counter.fetch_sub(__update, memory_order_release); + _LIBCUDACXX_ASSERT(__old >= __update, ""); + if(__old == __update) + __counter.notify_all(); } - } - inline _LIBCUDACXX_INLINE_VISIBILITY bool try_wait() const noexcept - { - return __counter.load(memory_order_acquire) == 0; - } - inline _LIBCUDACXX_INLINE_VISIBILITY void wait() const - { - while (1) + inline _LIBCUDACXX_INLINE_VISIBILITY + bool try_wait() const noexcept { - auto const __current = __counter.load(memory_order_acquire); - if (__current == 0) - { - return; - } - __counter.wait(__current, memory_order_relaxed); + return __counter.load(memory_order_acquire) == 0; + } + inline _LIBCUDACXX_INLINE_VISIBILITY + void wait() const + { + while(1) { + auto const __current = __counter.load(memory_order_acquire); + if(__current == 0) + return; + __counter.wait(__current, memory_order_relaxed) + ; + } + } + inline _LIBCUDACXX_INLINE_VISIBILITY + void arrive_and_wait(ptrdiff_t __update = 1) + { + count_down(__update); + wait(); } - } - inline _LIBCUDACXX_INLINE_VISIBILITY void arrive_and_wait(ptrdiff_t __update = 1) - { - count_down(__update); - wait(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr ptrdiff_t max() noexcept - { - return numeric_limits::max(); - } + _LIBCUDACXX_INLINE_VISIBILITY + static constexpr ptrdiff_t max() noexcept + { + return numeric_limits::max(); + } }; using latch = __latch_base<>; diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/limits b/libcudacxx/include/cuda/std/detail/libcxx/include/limits index 858f102c052..cdedd84d2a3 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/limits +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/limits @@ -112,108 +112,82 @@ template<> class numeric_limits; # pragma system_header #endif // no system header -#include #include // all public C++ headers provide the assertion handler -#include +#include #include #include +#include + #if defined(_CCCL_COMPILER_MSVC) -# include +#include #endif // _LIBCUDACXX_MSVCRT #if defined(_CCCL_COMPILER_IBM) -# include +#include #endif // _CCCL_COMPILER_IBM _LIBCUDACXX_BEGIN_NAMESPACE_STD enum float_round_style { - round_indeterminate = -1, - round_toward_zero = 0, - round_to_nearest = 1, - round_toward_infinity = 2, - round_toward_neg_infinity = 3 + round_indeterminate = -1, + round_toward_zero = 0, + round_to_nearest = 1, + round_toward_infinity = 2, + round_toward_neg_infinity = 3 }; enum float_denorm_style { - denorm_indeterminate = -1, - denorm_absent = 0, - denorm_present = 1 + denorm_indeterminate = -1, + denorm_absent = 0, + denorm_present = 1 }; template ::value> class __libcpp_numeric_limits { protected: - typedef _Tp type; - - static constexpr bool is_specialized = false; - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type min() noexcept - { - return type(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type max() noexcept - { - return type(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type lowest() noexcept - { - return type(); - } - - static constexpr int digits = 0; - static constexpr int digits10 = 0; - static constexpr int max_digits10 = 0; - static constexpr bool is_signed = false; - static constexpr bool is_integer = false; - static constexpr bool is_exact = false; - static constexpr int radix = 0; - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type epsilon() noexcept - { - return type(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type round_error() noexcept - { - return type(); - } - - static constexpr int min_exponent = 0; - static constexpr int min_exponent10 = 0; - static constexpr int max_exponent = 0; - static constexpr int max_exponent10 = 0; - - static constexpr bool has_infinity = false; - static constexpr bool has_quiet_NaN = false; - static constexpr bool has_signaling_NaN = false; - static constexpr float_denorm_style has_denorm = denorm_absent; - static constexpr bool has_denorm_loss = false; - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type infinity() noexcept - { - return type(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type quiet_NaN() noexcept - { - return type(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type signaling_NaN() noexcept - { - return type(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type denorm_min() noexcept - { - return type(); - } - - static constexpr bool is_iec559 = false; - static constexpr bool is_bounded = false; - static constexpr bool is_modulo = false; - - static constexpr bool traps = false; - static constexpr bool tinyness_before = false; - static constexpr float_round_style round_style = round_toward_zero; + typedef _Tp type; + + static constexpr bool is_specialized = false; + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type min() noexcept {return type();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type max() noexcept {return type();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type lowest() noexcept {return type();} + + static constexpr int digits = 0; + static constexpr int digits10 = 0; + static constexpr int max_digits10 = 0; + static constexpr bool is_signed = false; + static constexpr bool is_integer = false; + static constexpr bool is_exact = false; + static constexpr int radix = 0; + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type epsilon() noexcept {return type();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type round_error() noexcept {return type();} + + static constexpr int min_exponent = 0; + static constexpr int min_exponent10 = 0; + static constexpr int max_exponent = 0; + static constexpr int max_exponent10 = 0; + + static constexpr bool has_infinity = false; + static constexpr bool has_quiet_NaN = false; + static constexpr bool has_signaling_NaN = false; + static constexpr float_denorm_style has_denorm = denorm_absent; + static constexpr bool has_denorm_loss = false; + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type infinity() noexcept {return type();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type quiet_NaN() noexcept {return type();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type signaling_NaN() noexcept {return type();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type denorm_min() noexcept {return type();} + + static constexpr bool is_iec559 = false; + static constexpr bool is_bounded = false; + static constexpr bool is_modulo = false; + + static constexpr bool traps = false; + static constexpr bool tinyness_before = false; + static constexpr float_round_style round_style = round_toward_zero; }; _CCCL_DIAG_PUSH @@ -221,908 +195,647 @@ _CCCL_DIAG_SUPPRESS_MSVC(4309) template struct __libcpp_compute_min { - static constexpr _Tp value = static_cast<_Tp>(_Tp(1) << __digits); + static constexpr _Tp value = static_cast<_Tp>(_Tp(1) << __digits); }; _CCCL_DIAG_POP template struct __libcpp_compute_min<_Tp, __digits, false> { - static constexpr _Tp value = _Tp(0); + static constexpr _Tp value = _Tp(0); }; template class __libcpp_numeric_limits<_Tp, true> { protected: - typedef _Tp type; - - static constexpr bool is_specialized = true; - - static constexpr bool is_signed = type(-1) < type(0); - static constexpr int digits = static_cast(sizeof(type) * __CHAR_BIT__ - is_signed); - static constexpr int digits10 = digits * 3 / 10; - static constexpr int max_digits10 = 0; - static constexpr type __min = __libcpp_compute_min::value; - static constexpr type __max = is_signed ? type(type(~0) ^ __min) : type(~0); - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type min() noexcept - { - return __min; - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type max() noexcept - { - return __max; - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type lowest() noexcept - { - return min(); - } - - static constexpr bool is_integer = true; - static constexpr bool is_exact = true; - static constexpr int radix = 2; - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type epsilon() noexcept - { - return type(0); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type round_error() noexcept - { - return type(0); - } - - static constexpr int min_exponent = 0; - static constexpr int min_exponent10 = 0; - static constexpr int max_exponent = 0; - static constexpr int max_exponent10 = 0; - - static constexpr bool has_infinity = false; - static constexpr bool has_quiet_NaN = false; - static constexpr bool has_signaling_NaN = false; - static constexpr float_denorm_style has_denorm = denorm_absent; - static constexpr bool has_denorm_loss = false; - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type infinity() noexcept - { - return type(0); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type quiet_NaN() noexcept - { - return type(0); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type signaling_NaN() noexcept - { - return type(0); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type denorm_min() noexcept - { - return type(0); - } - - static constexpr bool is_iec559 = false; - static constexpr bool is_bounded = true; - static constexpr bool is_modulo = !_CUDA_VSTD::is_signed<_Tp>::value; - -#if defined(__i386__) || defined(__x86_64__) || defined(__pnacl__) || defined(__wasm__) - static constexpr bool traps = true; + typedef _Tp type; + + static constexpr bool is_specialized = true; + + static constexpr bool is_signed = type(-1) < type(0); + static constexpr int digits = static_cast(sizeof(type) * __CHAR_BIT__ - is_signed); + static constexpr int digits10 = digits * 3 / 10; + static constexpr int max_digits10 = 0; + static constexpr type __min = __libcpp_compute_min::value; + static constexpr type __max = is_signed ? type(type(~0) ^ __min) : type(~0); + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type min() noexcept {return __min;} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type max() noexcept {return __max;} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type lowest() noexcept {return min();} + + static constexpr bool is_integer = true; + static constexpr bool is_exact = true; + static constexpr int radix = 2; + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type epsilon() noexcept {return type(0);} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type round_error() noexcept {return type(0);} + + static constexpr int min_exponent = 0; + static constexpr int min_exponent10 = 0; + static constexpr int max_exponent = 0; + static constexpr int max_exponent10 = 0; + + static constexpr bool has_infinity = false; + static constexpr bool has_quiet_NaN = false; + static constexpr bool has_signaling_NaN = false; + static constexpr float_denorm_style has_denorm = denorm_absent; + static constexpr bool has_denorm_loss = false; + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type infinity() noexcept {return type(0);} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type quiet_NaN() noexcept {return type(0);} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type signaling_NaN() noexcept {return type(0);} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type denorm_min() noexcept {return type(0);} + + static constexpr bool is_iec559 = false; + static constexpr bool is_bounded = true; + static constexpr bool is_modulo = !_CUDA_VSTD::is_signed<_Tp>::value; + +#if defined(__i386__) || defined(__x86_64__) || defined(__pnacl__) || \ + defined(__wasm__) + static constexpr bool traps = true; #else - static constexpr bool traps = false; + static constexpr bool traps = false; #endif - static constexpr bool tinyness_before = false; - static constexpr float_round_style round_style = round_toward_zero; + static constexpr bool tinyness_before = false; + static constexpr float_round_style round_style = round_toward_zero; }; template <> class __libcpp_numeric_limits { protected: - typedef bool type; - - static constexpr bool is_specialized = true; - - static constexpr bool is_signed = false; - static constexpr int digits = 1; - static constexpr int digits10 = 0; - static constexpr int max_digits10 = 0; - static constexpr type __min = false; - static constexpr type __max = true; - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type min() noexcept - { - return __min; - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type max() noexcept - { - return __max; - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type lowest() noexcept - { - return min(); - } - - static constexpr bool is_integer = true; - static constexpr bool is_exact = true; - static constexpr int radix = 2; - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type epsilon() noexcept - { - return type(0); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type round_error() noexcept - { - return type(0); - } - - static constexpr int min_exponent = 0; - static constexpr int min_exponent10 = 0; - static constexpr int max_exponent = 0; - static constexpr int max_exponent10 = 0; - - static constexpr bool has_infinity = false; - static constexpr bool has_quiet_NaN = false; - static constexpr bool has_signaling_NaN = false; - static constexpr float_denorm_style has_denorm = denorm_absent; - static constexpr bool has_denorm_loss = false; - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type infinity() noexcept - { - return type(0); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type quiet_NaN() noexcept - { - return type(0); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type signaling_NaN() noexcept - { - return type(0); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type denorm_min() noexcept - { - return type(0); - } - - static constexpr bool is_iec559 = false; - static constexpr bool is_bounded = true; - static constexpr bool is_modulo = false; - - static constexpr bool traps = false; - static constexpr bool tinyness_before = false; - static constexpr float_round_style round_style = round_toward_zero; + typedef bool type; + + static constexpr bool is_specialized = true; + + static constexpr bool is_signed = false; + static constexpr int digits = 1; + static constexpr int digits10 = 0; + static constexpr int max_digits10 = 0; + static constexpr type __min = false; + static constexpr type __max = true; + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type min() noexcept {return __min;} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type max() noexcept {return __max;} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type lowest() noexcept {return min();} + + static constexpr bool is_integer = true; + static constexpr bool is_exact = true; + static constexpr int radix = 2; + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type epsilon() noexcept {return type(0);} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type round_error() noexcept {return type(0);} + + static constexpr int min_exponent = 0; + static constexpr int min_exponent10 = 0; + static constexpr int max_exponent = 0; + static constexpr int max_exponent10 = 0; + + static constexpr bool has_infinity = false; + static constexpr bool has_quiet_NaN = false; + static constexpr bool has_signaling_NaN = false; + static constexpr float_denorm_style has_denorm = denorm_absent; + static constexpr bool has_denorm_loss = false; + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type infinity() noexcept {return type(0);} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type quiet_NaN() noexcept {return type(0);} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type signaling_NaN() noexcept {return type(0);} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type denorm_min() noexcept {return type(0);} + + static constexpr bool is_iec559 = false; + static constexpr bool is_bounded = true; + static constexpr bool is_modulo = false; + + static constexpr bool traps = false; + static constexpr bool tinyness_before = false; + static constexpr float_round_style round_style = round_toward_zero; }; template <> class __libcpp_numeric_limits { protected: - typedef float type; - - static constexpr bool is_specialized = true; - - static constexpr bool is_signed = true; - static constexpr int digits = __FLT_MANT_DIG__; - static constexpr int digits10 = __FLT_DIG__; - static constexpr int max_digits10 = 2 + (digits * 30103l) / 100000l; - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type min() noexcept - { - return __FLT_MIN__; - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type max() noexcept - { - return __FLT_MAX__; - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type lowest() noexcept - { - return -max(); - } - - static constexpr bool is_integer = false; - static constexpr bool is_exact = false; - static constexpr int radix = __FLT_RADIX__; - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type epsilon() noexcept - { - return __FLT_EPSILON__; - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type round_error() noexcept - { - return 0.5F; - } - - static constexpr int min_exponent = __FLT_MIN_EXP__; - static constexpr int min_exponent10 = __FLT_MIN_10_EXP__; - static constexpr int max_exponent = __FLT_MAX_EXP__; - static constexpr int max_exponent10 = __FLT_MAX_10_EXP__; - - static constexpr bool has_infinity = true; - static constexpr bool has_quiet_NaN = true; - static constexpr bool has_signaling_NaN = true; - static constexpr float_denorm_style has_denorm = denorm_present; - static constexpr bool has_denorm_loss = false; + typedef float type; + + static constexpr bool is_specialized = true; + + static constexpr bool is_signed = true; + static constexpr int digits = __FLT_MANT_DIG__; + static constexpr int digits10 = __FLT_DIG__; + static constexpr int max_digits10 = 2+(digits * 30103l)/100000l; + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type min() noexcept {return __FLT_MIN__;} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type max() noexcept {return __FLT_MAX__;} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type lowest() noexcept {return -max();} + + static constexpr bool is_integer = false; + static constexpr bool is_exact = false; + static constexpr int radix = __FLT_RADIX__; + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type epsilon() noexcept {return __FLT_EPSILON__;} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type round_error() noexcept {return 0.5F;} + + static constexpr int min_exponent = __FLT_MIN_EXP__; + static constexpr int min_exponent10 = __FLT_MIN_10_EXP__; + static constexpr int max_exponent = __FLT_MAX_EXP__; + static constexpr int max_exponent10 = __FLT_MAX_10_EXP__; + + static constexpr bool has_infinity = true; + static constexpr bool has_quiet_NaN = true; + static constexpr bool has_signaling_NaN = true; + static constexpr float_denorm_style has_denorm = denorm_present; + static constexpr bool has_denorm_loss = false; #ifdef _CCCL_COMPILER_NVRTC - _LIBCUDACXX_INLINE_VISIBILITY static type infinity() noexcept - { - return __builtin_huge_valf(); - } - _LIBCUDACXX_INLINE_VISIBILITY static type quiet_NaN() noexcept - { - return __builtin_nanf(""); - } - _LIBCUDACXX_INLINE_VISIBILITY static type signaling_NaN() noexcept - { - return __builtin_nansf(""); - } + _LIBCUDACXX_INLINE_VISIBILITY static type infinity() noexcept {return __builtin_huge_valf();} + _LIBCUDACXX_INLINE_VISIBILITY static type quiet_NaN() noexcept {return __builtin_nanf("");} + _LIBCUDACXX_INLINE_VISIBILITY static type signaling_NaN() noexcept {return __builtin_nansf("");} #else - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type infinity() noexcept - { - return __builtin_huge_valf(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type quiet_NaN() noexcept - { - return __builtin_nanf(""); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type signaling_NaN() noexcept - { - return __builtin_nansf(""); - } + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type infinity() noexcept {return __builtin_huge_valf();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type quiet_NaN() noexcept {return __builtin_nanf("");} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type signaling_NaN() noexcept {return __builtin_nansf("");} #endif - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type denorm_min() noexcept - { - return __FLT_DENORM_MIN__; - } - - static constexpr bool is_iec559 = true; - static constexpr bool is_bounded = true; - static constexpr bool is_modulo = false; - - static constexpr bool traps = false; - static constexpr bool tinyness_before = false; - static constexpr float_round_style round_style = round_to_nearest; + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type denorm_min() noexcept {return __FLT_DENORM_MIN__;} + + static constexpr bool is_iec559 = true; + static constexpr bool is_bounded = true; + static constexpr bool is_modulo = false; + + static constexpr bool traps = false; + static constexpr bool tinyness_before = false; + static constexpr float_round_style round_style = round_to_nearest; }; template <> class __libcpp_numeric_limits { protected: - typedef double type; - - static constexpr bool is_specialized = true; - - static constexpr bool is_signed = true; - static constexpr int digits = __DBL_MANT_DIG__; - static constexpr int digits10 = __DBL_DIG__; - static constexpr int max_digits10 = 2 + (digits * 30103l) / 100000l; - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type min() noexcept - { - return __DBL_MIN__; - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type max() noexcept - { - return __DBL_MAX__; - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type lowest() noexcept - { - return -max(); - } - - static constexpr bool is_integer = false; - static constexpr bool is_exact = false; - static constexpr int radix = __FLT_RADIX__; - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type epsilon() noexcept - { - return __DBL_EPSILON__; - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type round_error() noexcept - { - return 0.5; - } - - static constexpr int min_exponent = __DBL_MIN_EXP__; - static constexpr int min_exponent10 = __DBL_MIN_10_EXP__; - static constexpr int max_exponent = __DBL_MAX_EXP__; - static constexpr int max_exponent10 = __DBL_MAX_10_EXP__; - - static constexpr bool has_infinity = true; - static constexpr bool has_quiet_NaN = true; - static constexpr bool has_signaling_NaN = true; - static constexpr float_denorm_style has_denorm = denorm_present; - static constexpr bool has_denorm_loss = false; + typedef double type; + + static constexpr bool is_specialized = true; + + static constexpr bool is_signed = true; + static constexpr int digits = __DBL_MANT_DIG__; + static constexpr int digits10 = __DBL_DIG__; + static constexpr int max_digits10 = 2+(digits * 30103l)/100000l; + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type min() noexcept {return __DBL_MIN__;} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type max() noexcept {return __DBL_MAX__;} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type lowest() noexcept {return -max();} + + static constexpr bool is_integer = false; + static constexpr bool is_exact = false; + static constexpr int radix = __FLT_RADIX__; + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type epsilon() noexcept {return __DBL_EPSILON__;} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type round_error() noexcept {return 0.5;} + + static constexpr int min_exponent = __DBL_MIN_EXP__; + static constexpr int min_exponent10 = __DBL_MIN_10_EXP__; + static constexpr int max_exponent = __DBL_MAX_EXP__; + static constexpr int max_exponent10 = __DBL_MAX_10_EXP__; + + static constexpr bool has_infinity = true; + static constexpr bool has_quiet_NaN = true; + static constexpr bool has_signaling_NaN = true; + static constexpr float_denorm_style has_denorm = denorm_present; + static constexpr bool has_denorm_loss = false; #ifdef _CCCL_COMPILER_NVRTC - _LIBCUDACXX_INLINE_VISIBILITY static type infinity() noexcept - { - return __builtin_huge_val(); - } - _LIBCUDACXX_INLINE_VISIBILITY static type quiet_NaN() noexcept - { - return __builtin_nan(""); - } - _LIBCUDACXX_INLINE_VISIBILITY static type signaling_NaN() noexcept - { - return __builtin_nans(""); - } + _LIBCUDACXX_INLINE_VISIBILITY static type infinity() noexcept {return __builtin_huge_val();} + _LIBCUDACXX_INLINE_VISIBILITY static type quiet_NaN() noexcept {return __builtin_nan("");} + _LIBCUDACXX_INLINE_VISIBILITY static type signaling_NaN() noexcept {return __builtin_nans("");} #else - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type infinity() noexcept - { - return __builtin_huge_val(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type quiet_NaN() noexcept - { - return __builtin_nan(""); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type signaling_NaN() noexcept - { - return __builtin_nans(""); - } + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type infinity() noexcept {return __builtin_huge_val();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type quiet_NaN() noexcept {return __builtin_nan("");} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type signaling_NaN() noexcept {return __builtin_nans("");} #endif - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type denorm_min() noexcept - { - return __DBL_DENORM_MIN__; - } - - static constexpr bool is_iec559 = true; - static constexpr bool is_bounded = true; - static constexpr bool is_modulo = false; - - static constexpr bool traps = false; - static constexpr bool tinyness_before = false; - static constexpr float_round_style round_style = round_to_nearest; + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type denorm_min() noexcept {return __DBL_DENORM_MIN__;} + + static constexpr bool is_iec559 = true; + static constexpr bool is_bounded = true; + static constexpr bool is_modulo = false; + + static constexpr bool traps = false; + static constexpr bool tinyness_before = false; + static constexpr float_round_style round_style = round_to_nearest; }; template <> class __libcpp_numeric_limits { #ifndef _LIBCUDACXX_HAS_NO_LONG_DOUBLE - protected: - typedef long double type; - - static constexpr bool is_specialized = true; - - static constexpr bool is_signed = true; - static constexpr int digits = __LDBL_MANT_DIG__; - static constexpr int digits10 = __LDBL_DIG__; - static constexpr int max_digits10 = 2 + (digits * 30103l) / 100000l; - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type min() noexcept - { - return __LDBL_MIN__; - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type max() noexcept - { - return __LDBL_MAX__; - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type lowest() noexcept - { - return -max(); - } - - static constexpr bool is_integer = false; - static constexpr bool is_exact = false; - static constexpr int radix = __FLT_RADIX__; - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type epsilon() noexcept - { - return __LDBL_EPSILON__; - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type round_error() noexcept - { - return 0.5L; - } - - static constexpr int min_exponent = __LDBL_MIN_EXP__; - static constexpr int min_exponent10 = __LDBL_MIN_10_EXP__; - static constexpr int max_exponent = __LDBL_MAX_EXP__; - static constexpr int max_exponent10 = __LDBL_MAX_10_EXP__; - - static constexpr bool has_infinity = true; - static constexpr bool has_quiet_NaN = true; - static constexpr bool has_signaling_NaN = true; - static constexpr float_denorm_style has_denorm = denorm_present; - static constexpr bool has_denorm_loss = false; - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type infinity() noexcept - { - return __builtin_huge_vall(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type quiet_NaN() noexcept - { - return __builtin_nanl(""); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type signaling_NaN() noexcept - { - return __builtin_nansl(""); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type denorm_min() noexcept - { - return __LDBL_DENORM_MIN__; - } - -# if (defined(__ppc__) || defined(__ppc64__) || defined(__PPC64__)) - static constexpr bool is_iec559 = false; -# else - static constexpr bool is_iec559 = true; -# endif - static constexpr bool is_bounded = true; - static constexpr bool is_modulo = false; - - static constexpr bool traps = false; - static constexpr bool tinyness_before = false; - static constexpr float_round_style round_style = round_to_nearest; + typedef long double type; + + static constexpr bool is_specialized = true; + + static constexpr bool is_signed = true; + static constexpr int digits = __LDBL_MANT_DIG__; + static constexpr int digits10 = __LDBL_DIG__; + static constexpr int max_digits10 = 2+(digits * 30103l)/100000l; + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type min() noexcept {return __LDBL_MIN__;} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type max() noexcept {return __LDBL_MAX__;} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type lowest() noexcept {return -max();} + + static constexpr bool is_integer = false; + static constexpr bool is_exact = false; + static constexpr int radix = __FLT_RADIX__; + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type epsilon() noexcept {return __LDBL_EPSILON__;} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type round_error() noexcept {return 0.5L;} + + static constexpr int min_exponent = __LDBL_MIN_EXP__; + static constexpr int min_exponent10 = __LDBL_MIN_10_EXP__; + static constexpr int max_exponent = __LDBL_MAX_EXP__; + static constexpr int max_exponent10 = __LDBL_MAX_10_EXP__; + + static constexpr bool has_infinity = true; + static constexpr bool has_quiet_NaN = true; + static constexpr bool has_signaling_NaN = true; + static constexpr float_denorm_style has_denorm = denorm_present; + static constexpr bool has_denorm_loss = false; + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type infinity() noexcept {return __builtin_huge_vall();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type quiet_NaN() noexcept {return __builtin_nanl("");} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type signaling_NaN() noexcept {return __builtin_nansl("");} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type denorm_min() noexcept {return __LDBL_DENORM_MIN__;} + +#if (defined(__ppc__) || defined(__ppc64__) || defined(__PPC64__)) + static constexpr bool is_iec559 = false; +#else + static constexpr bool is_iec559 = true; +#endif + static constexpr bool is_bounded = true; + static constexpr bool is_modulo = false; + + static constexpr bool traps = false; + static constexpr bool tinyness_before = false; + static constexpr float_round_style round_style = round_to_nearest; #endif }; template -class _LIBCUDACXX_TEMPLATE_VIS numeric_limits : private __libcpp_numeric_limits<__remove_cv_t<_Tp>> +class _LIBCUDACXX_TEMPLATE_VIS numeric_limits + : private __libcpp_numeric_limits<__remove_cv_t<_Tp>> { - typedef __libcpp_numeric_limits<__remove_cv_t<_Tp>> __base; - typedef typename __base::type type; - + typedef __libcpp_numeric_limits<__remove_cv_t<_Tp>> __base; + typedef typename __base::type type; public: - static constexpr bool is_specialized = __base::is_specialized; - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type min() noexcept - { - return __base::min(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type max() noexcept - { - return __base::max(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type lowest() noexcept - { - return __base::lowest(); - } - - static constexpr int digits = __base::digits; - static constexpr int digits10 = __base::digits10; - static constexpr int max_digits10 = __base::max_digits10; - static constexpr bool is_signed = __base::is_signed; - static constexpr bool is_integer = __base::is_integer; - static constexpr bool is_exact = __base::is_exact; - static constexpr int radix = __base::radix; - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type epsilon() noexcept - { - return __base::epsilon(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type round_error() noexcept - { - return __base::round_error(); - } - - static constexpr int min_exponent = __base::min_exponent; - static constexpr int min_exponent10 = __base::min_exponent10; - static constexpr int max_exponent = __base::max_exponent; - static constexpr int max_exponent10 = __base::max_exponent10; - - static constexpr bool has_infinity = __base::has_infinity; - static constexpr bool has_quiet_NaN = __base::has_quiet_NaN; - static constexpr bool has_signaling_NaN = __base::has_signaling_NaN; - static constexpr float_denorm_style has_denorm = __base::has_denorm; - static constexpr bool has_denorm_loss = __base::has_denorm_loss; - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type infinity() noexcept - { - return __base::infinity(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type quiet_NaN() noexcept - { - return __base::quiet_NaN(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type signaling_NaN() noexcept - { - return __base::signaling_NaN(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type denorm_min() noexcept - { - return __base::denorm_min(); - } - - static constexpr bool is_iec559 = __base::is_iec559; - static constexpr bool is_bounded = __base::is_bounded; - static constexpr bool is_modulo = __base::is_modulo; - - static constexpr bool traps = __base::traps; - static constexpr bool tinyness_before = __base::tinyness_before; - static constexpr float_round_style round_style = __base::round_style; + static constexpr bool is_specialized = __base::is_specialized; + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type min() noexcept {return __base::min();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type max() noexcept {return __base::max();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type lowest() noexcept {return __base::lowest();} + + static constexpr int digits = __base::digits; + static constexpr int digits10 = __base::digits10; + static constexpr int max_digits10 = __base::max_digits10; + static constexpr bool is_signed = __base::is_signed; + static constexpr bool is_integer = __base::is_integer; + static constexpr bool is_exact = __base::is_exact; + static constexpr int radix = __base::radix; + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type epsilon() noexcept {return __base::epsilon();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type round_error() noexcept {return __base::round_error();} + + static constexpr int min_exponent = __base::min_exponent; + static constexpr int min_exponent10 = __base::min_exponent10; + static constexpr int max_exponent = __base::max_exponent; + static constexpr int max_exponent10 = __base::max_exponent10; + + static constexpr bool has_infinity = __base::has_infinity; + static constexpr bool has_quiet_NaN = __base::has_quiet_NaN; + static constexpr bool has_signaling_NaN = __base::has_signaling_NaN; + static constexpr float_denorm_style has_denorm = __base::has_denorm; + static constexpr bool has_denorm_loss = __base::has_denorm_loss; + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type infinity() noexcept {return __base::infinity();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type quiet_NaN() noexcept {return __base::quiet_NaN();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type signaling_NaN() noexcept {return __base::signaling_NaN();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type denorm_min() noexcept {return __base::denorm_min();} + + static constexpr bool is_iec559 = __base::is_iec559; + static constexpr bool is_bounded = __base::is_bounded; + static constexpr bool is_modulo = __base::is_modulo; + + static constexpr bool traps = __base::traps; + static constexpr bool tinyness_before = __base::tinyness_before; + static constexpr float_round_style round_style = __base::round_style; }; template -constexpr bool numeric_limits<_Tp>::is_specialized; + constexpr bool numeric_limits<_Tp>::is_specialized; template -constexpr int numeric_limits<_Tp>::digits; + constexpr int numeric_limits<_Tp>::digits; template -constexpr int numeric_limits<_Tp>::digits10; + constexpr int numeric_limits<_Tp>::digits10; template -constexpr int numeric_limits<_Tp>::max_digits10; + constexpr int numeric_limits<_Tp>::max_digits10; template -constexpr bool numeric_limits<_Tp>::is_signed; + constexpr bool numeric_limits<_Tp>::is_signed; template -constexpr bool numeric_limits<_Tp>::is_integer; + constexpr bool numeric_limits<_Tp>::is_integer; template -constexpr bool numeric_limits<_Tp>::is_exact; + constexpr bool numeric_limits<_Tp>::is_exact; template -constexpr int numeric_limits<_Tp>::radix; + constexpr int numeric_limits<_Tp>::radix; template -constexpr int numeric_limits<_Tp>::min_exponent; + constexpr int numeric_limits<_Tp>::min_exponent; template -constexpr int numeric_limits<_Tp>::min_exponent10; + constexpr int numeric_limits<_Tp>::min_exponent10; template -constexpr int numeric_limits<_Tp>::max_exponent; + constexpr int numeric_limits<_Tp>::max_exponent; template -constexpr int numeric_limits<_Tp>::max_exponent10; + constexpr int numeric_limits<_Tp>::max_exponent10; template -constexpr bool numeric_limits<_Tp>::has_infinity; + constexpr bool numeric_limits<_Tp>::has_infinity; template -constexpr bool numeric_limits<_Tp>::has_quiet_NaN; + constexpr bool numeric_limits<_Tp>::has_quiet_NaN; template -constexpr bool numeric_limits<_Tp>::has_signaling_NaN; + constexpr bool numeric_limits<_Tp>::has_signaling_NaN; template -constexpr float_denorm_style numeric_limits<_Tp>::has_denorm; + constexpr float_denorm_style numeric_limits<_Tp>::has_denorm; template -constexpr bool numeric_limits<_Tp>::has_denorm_loss; + constexpr bool numeric_limits<_Tp>::has_denorm_loss; template -constexpr bool numeric_limits<_Tp>::is_iec559; + constexpr bool numeric_limits<_Tp>::is_iec559; template -constexpr bool numeric_limits<_Tp>::is_bounded; + constexpr bool numeric_limits<_Tp>::is_bounded; template -constexpr bool numeric_limits<_Tp>::is_modulo; + constexpr bool numeric_limits<_Tp>::is_modulo; template -constexpr bool numeric_limits<_Tp>::traps; + constexpr bool numeric_limits<_Tp>::traps; template -constexpr bool numeric_limits<_Tp>::tinyness_before; + constexpr bool numeric_limits<_Tp>::tinyness_before; template -constexpr float_round_style numeric_limits<_Tp>::round_style; + constexpr float_round_style numeric_limits<_Tp>::round_style; template -class _LIBCUDACXX_TEMPLATE_VIS numeric_limits : private numeric_limits<_Tp> +class _LIBCUDACXX_TEMPLATE_VIS numeric_limits + : private numeric_limits<_Tp> { - typedef numeric_limits<_Tp> __base; - typedef _Tp type; - + typedef numeric_limits<_Tp> __base; + typedef _Tp type; public: - static constexpr bool is_specialized = __base::is_specialized; - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type min() noexcept - { - return __base::min(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type max() noexcept - { - return __base::max(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type lowest() noexcept - { - return __base::lowest(); - } - - static constexpr int digits = __base::digits; - static constexpr int digits10 = __base::digits10; - static constexpr int max_digits10 = __base::max_digits10; - static constexpr bool is_signed = __base::is_signed; - static constexpr bool is_integer = __base::is_integer; - static constexpr bool is_exact = __base::is_exact; - static constexpr int radix = __base::radix; - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type epsilon() noexcept - { - return __base::epsilon(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type round_error() noexcept - { - return __base::round_error(); - } - - static constexpr int min_exponent = __base::min_exponent; - static constexpr int min_exponent10 = __base::min_exponent10; - static constexpr int max_exponent = __base::max_exponent; - static constexpr int max_exponent10 = __base::max_exponent10; - - static constexpr bool has_infinity = __base::has_infinity; - static constexpr bool has_quiet_NaN = __base::has_quiet_NaN; - static constexpr bool has_signaling_NaN = __base::has_signaling_NaN; - static constexpr float_denorm_style has_denorm = __base::has_denorm; - static constexpr bool has_denorm_loss = __base::has_denorm_loss; - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type infinity() noexcept - { - return __base::infinity(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type quiet_NaN() noexcept - { - return __base::quiet_NaN(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type signaling_NaN() noexcept - { - return __base::signaling_NaN(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type denorm_min() noexcept - { - return __base::denorm_min(); - } - - static constexpr bool is_iec559 = __base::is_iec559; - static constexpr bool is_bounded = __base::is_bounded; - static constexpr bool is_modulo = __base::is_modulo; - - static constexpr bool traps = __base::traps; - static constexpr bool tinyness_before = __base::tinyness_before; - static constexpr float_round_style round_style = __base::round_style; + static constexpr bool is_specialized = __base::is_specialized; + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type min() noexcept {return __base::min();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type max() noexcept {return __base::max();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type lowest() noexcept {return __base::lowest();} + + static constexpr int digits = __base::digits; + static constexpr int digits10 = __base::digits10; + static constexpr int max_digits10 = __base::max_digits10; + static constexpr bool is_signed = __base::is_signed; + static constexpr bool is_integer = __base::is_integer; + static constexpr bool is_exact = __base::is_exact; + static constexpr int radix = __base::radix; + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type epsilon() noexcept {return __base::epsilon();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type round_error() noexcept {return __base::round_error();} + + static constexpr int min_exponent = __base::min_exponent; + static constexpr int min_exponent10 = __base::min_exponent10; + static constexpr int max_exponent = __base::max_exponent; + static constexpr int max_exponent10 = __base::max_exponent10; + + static constexpr bool has_infinity = __base::has_infinity; + static constexpr bool has_quiet_NaN = __base::has_quiet_NaN; + static constexpr bool has_signaling_NaN = __base::has_signaling_NaN; + static constexpr float_denorm_style has_denorm = __base::has_denorm; + static constexpr bool has_denorm_loss = __base::has_denorm_loss; + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type infinity() noexcept {return __base::infinity();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type quiet_NaN() noexcept {return __base::quiet_NaN();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type signaling_NaN() noexcept {return __base::signaling_NaN();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type denorm_min() noexcept {return __base::denorm_min();} + + static constexpr bool is_iec559 = __base::is_iec559; + static constexpr bool is_bounded = __base::is_bounded; + static constexpr bool is_modulo = __base::is_modulo; + + static constexpr bool traps = __base::traps; + static constexpr bool tinyness_before = __base::tinyness_before; + static constexpr float_round_style round_style = __base::round_style; }; template -constexpr bool numeric_limits::is_specialized; + constexpr bool numeric_limits::is_specialized; template -constexpr int numeric_limits::digits; + constexpr int numeric_limits::digits; template -constexpr int numeric_limits::digits10; + constexpr int numeric_limits::digits10; template -constexpr int numeric_limits::max_digits10; + constexpr int numeric_limits::max_digits10; template -constexpr bool numeric_limits::is_signed; + constexpr bool numeric_limits::is_signed; template -constexpr bool numeric_limits::is_integer; + constexpr bool numeric_limits::is_integer; template -constexpr bool numeric_limits::is_exact; + constexpr bool numeric_limits::is_exact; template -constexpr int numeric_limits::radix; + constexpr int numeric_limits::radix; template -constexpr int numeric_limits::min_exponent; + constexpr int numeric_limits::min_exponent; template -constexpr int numeric_limits::min_exponent10; + constexpr int numeric_limits::min_exponent10; template -constexpr int numeric_limits::max_exponent; + constexpr int numeric_limits::max_exponent; template -constexpr int numeric_limits::max_exponent10; + constexpr int numeric_limits::max_exponent10; template -constexpr bool numeric_limits::has_infinity; + constexpr bool numeric_limits::has_infinity; template -constexpr bool numeric_limits::has_quiet_NaN; + constexpr bool numeric_limits::has_quiet_NaN; template -constexpr bool numeric_limits::has_signaling_NaN; + constexpr bool numeric_limits::has_signaling_NaN; template -constexpr float_denorm_style numeric_limits::has_denorm; + constexpr float_denorm_style numeric_limits::has_denorm; template -constexpr bool numeric_limits::has_denorm_loss; + constexpr bool numeric_limits::has_denorm_loss; template -constexpr bool numeric_limits::is_iec559; + constexpr bool numeric_limits::is_iec559; template -constexpr bool numeric_limits::is_bounded; + constexpr bool numeric_limits::is_bounded; template -constexpr bool numeric_limits::is_modulo; + constexpr bool numeric_limits::is_modulo; template -constexpr bool numeric_limits::traps; + constexpr bool numeric_limits::traps; template -constexpr bool numeric_limits::tinyness_before; + constexpr bool numeric_limits::tinyness_before; template -constexpr float_round_style numeric_limits::round_style; + constexpr float_round_style numeric_limits::round_style; template -class _LIBCUDACXX_TEMPLATE_VIS numeric_limits : private numeric_limits<_Tp> +class _LIBCUDACXX_TEMPLATE_VIS numeric_limits + : private numeric_limits<_Tp> { - typedef numeric_limits<_Tp> __base; - typedef _Tp type; - + typedef numeric_limits<_Tp> __base; + typedef _Tp type; public: - static constexpr bool is_specialized = __base::is_specialized; - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type min() noexcept - { - return __base::min(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type max() noexcept - { - return __base::max(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type lowest() noexcept - { - return __base::lowest(); - } - - static constexpr int digits = __base::digits; - static constexpr int digits10 = __base::digits10; - static constexpr int max_digits10 = __base::max_digits10; - static constexpr bool is_signed = __base::is_signed; - static constexpr bool is_integer = __base::is_integer; - static constexpr bool is_exact = __base::is_exact; - static constexpr int radix = __base::radix; - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type epsilon() noexcept - { - return __base::epsilon(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type round_error() noexcept - { - return __base::round_error(); - } - - static constexpr int min_exponent = __base::min_exponent; - static constexpr int min_exponent10 = __base::min_exponent10; - static constexpr int max_exponent = __base::max_exponent; - static constexpr int max_exponent10 = __base::max_exponent10; - - static constexpr bool has_infinity = __base::has_infinity; - static constexpr bool has_quiet_NaN = __base::has_quiet_NaN; - static constexpr bool has_signaling_NaN = __base::has_signaling_NaN; - static constexpr float_denorm_style has_denorm = __base::has_denorm; - static constexpr bool has_denorm_loss = __base::has_denorm_loss; - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type infinity() noexcept - { - return __base::infinity(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type quiet_NaN() noexcept - { - return __base::quiet_NaN(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type signaling_NaN() noexcept - { - return __base::signaling_NaN(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type denorm_min() noexcept - { - return __base::denorm_min(); - } - - static constexpr bool is_iec559 = __base::is_iec559; - static constexpr bool is_bounded = __base::is_bounded; - static constexpr bool is_modulo = __base::is_modulo; - - static constexpr bool traps = __base::traps; - static constexpr bool tinyness_before = __base::tinyness_before; - static constexpr float_round_style round_style = __base::round_style; + static constexpr bool is_specialized = __base::is_specialized; + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type min() noexcept {return __base::min();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type max() noexcept {return __base::max();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type lowest() noexcept {return __base::lowest();} + + static constexpr int digits = __base::digits; + static constexpr int digits10 = __base::digits10; + static constexpr int max_digits10 = __base::max_digits10; + static constexpr bool is_signed = __base::is_signed; + static constexpr bool is_integer = __base::is_integer; + static constexpr bool is_exact = __base::is_exact; + static constexpr int radix = __base::radix; + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type epsilon() noexcept {return __base::epsilon();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type round_error() noexcept {return __base::round_error();} + + static constexpr int min_exponent = __base::min_exponent; + static constexpr int min_exponent10 = __base::min_exponent10; + static constexpr int max_exponent = __base::max_exponent; + static constexpr int max_exponent10 = __base::max_exponent10; + + static constexpr bool has_infinity = __base::has_infinity; + static constexpr bool has_quiet_NaN = __base::has_quiet_NaN; + static constexpr bool has_signaling_NaN = __base::has_signaling_NaN; + static constexpr float_denorm_style has_denorm = __base::has_denorm; + static constexpr bool has_denorm_loss = __base::has_denorm_loss; + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type infinity() noexcept {return __base::infinity();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type quiet_NaN() noexcept {return __base::quiet_NaN();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type signaling_NaN() noexcept {return __base::signaling_NaN();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type denorm_min() noexcept {return __base::denorm_min();} + + static constexpr bool is_iec559 = __base::is_iec559; + static constexpr bool is_bounded = __base::is_bounded; + static constexpr bool is_modulo = __base::is_modulo; + + static constexpr bool traps = __base::traps; + static constexpr bool tinyness_before = __base::tinyness_before; + static constexpr float_round_style round_style = __base::round_style; }; template -constexpr bool numeric_limits::is_specialized; + constexpr bool numeric_limits::is_specialized; template -constexpr int numeric_limits::digits; + constexpr int numeric_limits::digits; template -constexpr int numeric_limits::digits10; + constexpr int numeric_limits::digits10; template -constexpr int numeric_limits::max_digits10; + constexpr int numeric_limits::max_digits10; template -constexpr bool numeric_limits::is_signed; + constexpr bool numeric_limits::is_signed; template -constexpr bool numeric_limits::is_integer; + constexpr bool numeric_limits::is_integer; template -constexpr bool numeric_limits::is_exact; + constexpr bool numeric_limits::is_exact; template -constexpr int numeric_limits::radix; + constexpr int numeric_limits::radix; template -constexpr int numeric_limits::min_exponent; + constexpr int numeric_limits::min_exponent; template -constexpr int numeric_limits::min_exponent10; + constexpr int numeric_limits::min_exponent10; template -constexpr int numeric_limits::max_exponent; + constexpr int numeric_limits::max_exponent; template -constexpr int numeric_limits::max_exponent10; + constexpr int numeric_limits::max_exponent10; template -constexpr bool numeric_limits::has_infinity; + constexpr bool numeric_limits::has_infinity; template -constexpr bool numeric_limits::has_quiet_NaN; + constexpr bool numeric_limits::has_quiet_NaN; template -constexpr bool numeric_limits::has_signaling_NaN; + constexpr bool numeric_limits::has_signaling_NaN; template -constexpr float_denorm_style numeric_limits::has_denorm; + constexpr float_denorm_style numeric_limits::has_denorm; template -constexpr bool numeric_limits::has_denorm_loss; + constexpr bool numeric_limits::has_denorm_loss; template -constexpr bool numeric_limits::is_iec559; + constexpr bool numeric_limits::is_iec559; template -constexpr bool numeric_limits::is_bounded; + constexpr bool numeric_limits::is_bounded; template -constexpr bool numeric_limits::is_modulo; + constexpr bool numeric_limits::is_modulo; template -constexpr bool numeric_limits::traps; + constexpr bool numeric_limits::traps; template -constexpr bool numeric_limits::tinyness_before; + constexpr bool numeric_limits::tinyness_before; template -constexpr float_round_style numeric_limits::round_style; + constexpr float_round_style numeric_limits::round_style; template -class _LIBCUDACXX_TEMPLATE_VIS numeric_limits : private numeric_limits<_Tp> +class _LIBCUDACXX_TEMPLATE_VIS numeric_limits + : private numeric_limits<_Tp> { - typedef numeric_limits<_Tp> __base; - typedef _Tp type; - + typedef numeric_limits<_Tp> __base; + typedef _Tp type; public: - static constexpr bool is_specialized = __base::is_specialized; - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type min() noexcept - { - return __base::min(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type max() noexcept - { - return __base::max(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type lowest() noexcept - { - return __base::lowest(); - } - - static constexpr int digits = __base::digits; - static constexpr int digits10 = __base::digits10; - static constexpr int max_digits10 = __base::max_digits10; - static constexpr bool is_signed = __base::is_signed; - static constexpr bool is_integer = __base::is_integer; - static constexpr bool is_exact = __base::is_exact; - static constexpr int radix = __base::radix; - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type epsilon() noexcept - { - return __base::epsilon(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type round_error() noexcept - { - return __base::round_error(); - } - - static constexpr int min_exponent = __base::min_exponent; - static constexpr int min_exponent10 = __base::min_exponent10; - static constexpr int max_exponent = __base::max_exponent; - static constexpr int max_exponent10 = __base::max_exponent10; - - static constexpr bool has_infinity = __base::has_infinity; - static constexpr bool has_quiet_NaN = __base::has_quiet_NaN; - static constexpr bool has_signaling_NaN = __base::has_signaling_NaN; - static constexpr float_denorm_style has_denorm = __base::has_denorm; - static constexpr bool has_denorm_loss = __base::has_denorm_loss; - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type infinity() noexcept - { - return __base::infinity(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type quiet_NaN() noexcept - { - return __base::quiet_NaN(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type signaling_NaN() noexcept - { - return __base::signaling_NaN(); - } - _LIBCUDACXX_INLINE_VISIBILITY static constexpr type denorm_min() noexcept - { - return __base::denorm_min(); - } - - static constexpr bool is_iec559 = __base::is_iec559; - static constexpr bool is_bounded = __base::is_bounded; - static constexpr bool is_modulo = __base::is_modulo; - - static constexpr bool traps = __base::traps; - static constexpr bool tinyness_before = __base::tinyness_before; - static constexpr float_round_style round_style = __base::round_style; + static constexpr bool is_specialized = __base::is_specialized; + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type min() noexcept {return __base::min();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type max() noexcept {return __base::max();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type lowest() noexcept {return __base::lowest();} + + static constexpr int digits = __base::digits; + static constexpr int digits10 = __base::digits10; + static constexpr int max_digits10 = __base::max_digits10; + static constexpr bool is_signed = __base::is_signed; + static constexpr bool is_integer = __base::is_integer; + static constexpr bool is_exact = __base::is_exact; + static constexpr int radix = __base::radix; + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type epsilon() noexcept {return __base::epsilon();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type round_error() noexcept {return __base::round_error();} + + static constexpr int min_exponent = __base::min_exponent; + static constexpr int min_exponent10 = __base::min_exponent10; + static constexpr int max_exponent = __base::max_exponent; + static constexpr int max_exponent10 = __base::max_exponent10; + + static constexpr bool has_infinity = __base::has_infinity; + static constexpr bool has_quiet_NaN = __base::has_quiet_NaN; + static constexpr bool has_signaling_NaN = __base::has_signaling_NaN; + static constexpr float_denorm_style has_denorm = __base::has_denorm; + static constexpr bool has_denorm_loss = __base::has_denorm_loss; + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type infinity() noexcept {return __base::infinity();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type quiet_NaN() noexcept {return __base::quiet_NaN();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type signaling_NaN() noexcept {return __base::signaling_NaN();} + _LIBCUDACXX_INLINE_VISIBILITY static constexpr type denorm_min() noexcept {return __base::denorm_min();} + + static constexpr bool is_iec559 = __base::is_iec559; + static constexpr bool is_bounded = __base::is_bounded; + static constexpr bool is_modulo = __base::is_modulo; + + static constexpr bool traps = __base::traps; + static constexpr bool tinyness_before = __base::tinyness_before; + static constexpr float_round_style round_style = __base::round_style; }; template -constexpr bool numeric_limits::is_specialized; + constexpr bool numeric_limits::is_specialized; template -constexpr int numeric_limits::digits; + constexpr int numeric_limits::digits; template -constexpr int numeric_limits::digits10; + constexpr int numeric_limits::digits10; template -constexpr int numeric_limits::max_digits10; + constexpr int numeric_limits::max_digits10; template -constexpr bool numeric_limits::is_signed; + constexpr bool numeric_limits::is_signed; template -constexpr bool numeric_limits::is_integer; + constexpr bool numeric_limits::is_integer; template -constexpr bool numeric_limits::is_exact; + constexpr bool numeric_limits::is_exact; template -constexpr int numeric_limits::radix; + constexpr int numeric_limits::radix; template -constexpr int numeric_limits::min_exponent; + constexpr int numeric_limits::min_exponent; template -constexpr int numeric_limits::min_exponent10; + constexpr int numeric_limits::min_exponent10; template -constexpr int numeric_limits::max_exponent; + constexpr int numeric_limits::max_exponent; template -constexpr int numeric_limits::max_exponent10; + constexpr int numeric_limits::max_exponent10; template -constexpr bool numeric_limits::has_infinity; + constexpr bool numeric_limits::has_infinity; template -constexpr bool numeric_limits::has_quiet_NaN; + constexpr bool numeric_limits::has_quiet_NaN; template -constexpr bool numeric_limits::has_signaling_NaN; + constexpr bool numeric_limits::has_signaling_NaN; template -constexpr float_denorm_style numeric_limits::has_denorm; + constexpr float_denorm_style numeric_limits::has_denorm; template -constexpr bool numeric_limits::has_denorm_loss; + constexpr bool numeric_limits::has_denorm_loss; template -constexpr bool numeric_limits::is_iec559; + constexpr bool numeric_limits::is_iec559; template -constexpr bool numeric_limits::is_bounded; + constexpr bool numeric_limits::is_bounded; template -constexpr bool numeric_limits::is_modulo; + constexpr bool numeric_limits::is_modulo; template -constexpr bool numeric_limits::traps; + constexpr bool numeric_limits::traps; template -constexpr bool numeric_limits::tinyness_before; + constexpr bool numeric_limits::tinyness_before; template -constexpr float_round_style numeric_limits::round_style; + constexpr float_round_style numeric_limits::round_style; _LIBCUDACXX_END_NAMESPACE_STD #include //__cuda_std__ -#endif // _LIBCUDACXX_LIMITS +#endif // _LIBCUDACXX_LIMITS diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/mdspan b/libcudacxx/include/cuda/std/detail/libcxx/include/mdspan index 8fe0e09d0a9..dd469e6b8f6 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/mdspan +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/mdspan @@ -46,18 +46,19 @@ #include +#include // all public C++ headers provide the assertion handler #include +#include +#include #include #include -#include +#include #include #include -#include #include -#include #include #include -#include // all public C++ headers provide the assertion handler + #include #endif // _LIBCUDACXX_MDSPAN diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/new b/libcudacxx/include/cuda/std/detail/libcxx/include/new index a9ce3e694a0..7615bb0d326 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/new +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/new @@ -50,10 +50,11 @@ template constexpr T* launder(T* p) noexcept; // C++17 # pragma system_header #endif // no system header +#include // all public C++ headers provide the assertion handler #include #include #include -#include // all public C++ headers provide the assertion handler + #include #endif // _LIBCUDACXX_NEW diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/numeric b/libcudacxx/include/cuda/std/detail/libcxx/include/numeric index 035bf018ad6..76d49c5a26c 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/numeric +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/numeric @@ -155,454 +155,453 @@ floating_point midpoint(floating_point a, floating_point b); // C++20 # pragma system_header #endif // no system header +#include // all public C++ headers provide the assertion handler #include #include #include // for isnormal -#include // all public C++ headers provide the assertion handler -#include #include #include // for numeric_limits #include +#include + _LIBCUDACXX_BEGIN_NAMESPACE_STD #ifndef __cuda_std__ template -inline _LIBCUDACXX_INLINE_VISIBILITY _Tp accumulate(_InputIterator __first, _InputIterator __last, _Tp __init) +inline _LIBCUDACXX_INLINE_VISIBILITY +_Tp +accumulate(_InputIterator __first, _InputIterator __last, _Tp __init) { - for (; __first != __last; ++__first) - { - __init = __init + *__first; - } - return __init; + for (; __first != __last; ++__first) + __init = __init + *__first; + return __init; } template -inline _LIBCUDACXX_INLINE_VISIBILITY _Tp +inline _LIBCUDACXX_INLINE_VISIBILITY +_Tp accumulate(_InputIterator __first, _InputIterator __last, _Tp __init, _BinaryOperation __binary_op) { - for (; __first != __last; ++__first) - { - __init = __binary_op(__init, *__first); - } - return __init; + for (; __first != __last; ++__first) + __init = __binary_op(__init, *__first); + return __init; } -# if _CCCL_STD_VER > 2014 +#if _CCCL_STD_VER > 2014 template -inline _LIBCUDACXX_INLINE_VISIBILITY _Tp reduce(_InputIterator __first, _InputIterator __last, _Tp __init, _BinaryOp __b) +inline _LIBCUDACXX_INLINE_VISIBILITY +_Tp +reduce(_InputIterator __first, _InputIterator __last, _Tp __init, _BinaryOp __b) { - for (; __first != __last; ++__first) - { - __init = __b(__init, *__first); - } - return __init; + for (; __first != __last; ++__first) + __init = __b(__init, *__first); + return __init; } template -inline _LIBCUDACXX_INLINE_VISIBILITY _Tp reduce(_InputIterator __first, _InputIterator __last, _Tp __init) +inline _LIBCUDACXX_INLINE_VISIBILITY +_Tp +reduce(_InputIterator __first, _InputIterator __last, _Tp __init) { - return _CUDA_VSTD::reduce(__first, __last, __init, _CUDA_VSTD::plus<>()); + return _CUDA_VSTD::reduce(__first, __last, __init, _CUDA_VSTD::plus<>()); } template -inline _LIBCUDACXX_INLINE_VISIBILITY typename iterator_traits<_InputIterator>::value_type +inline _LIBCUDACXX_INLINE_VISIBILITY +typename iterator_traits<_InputIterator>::value_type reduce(_InputIterator __first, _InputIterator __last) { - return _CUDA_VSTD::reduce(__first, __last, typename iterator_traits<_InputIterator>::value_type{}); + return _CUDA_VSTD::reduce(__first, __last, + typename iterator_traits<_InputIterator>::value_type{}); } -# endif +#endif template -inline _LIBCUDACXX_INLINE_VISIBILITY _Tp +inline _LIBCUDACXX_INLINE_VISIBILITY +_Tp inner_product(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, _Tp __init) { - for (; __first1 != __last1; ++__first1, (void) ++__first2) - { - __init = __init + *__first1 * *__first2; - } - return __init; + for (; __first1 != __last1; ++__first1, (void) ++__first2) + __init = __init + *__first1 * *__first2; + return __init; } template -inline _LIBCUDACXX_INLINE_VISIBILITY _Tp inner_product( - _InputIterator1 __first1, - _InputIterator1 __last1, - _InputIterator2 __first2, - _Tp __init, - _BinaryOperation1 __binary_op1, - _BinaryOperation2 __binary_op2) -{ - for (; __first1 != __last1; ++__first1, (void) ++__first2) - { - __init = __binary_op1(__init, __binary_op2(*__first1, *__first2)); - } - return __init; +inline _LIBCUDACXX_INLINE_VISIBILITY +_Tp +inner_product(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, + _Tp __init, _BinaryOperation1 __binary_op1, _BinaryOperation2 __binary_op2) +{ + for (; __first1 != __last1; ++__first1, (void) ++__first2) + __init = __binary_op1(__init, __binary_op2(*__first1, *__first2)); + return __init; } -# if _CCCL_STD_VER > 2014 +#if _CCCL_STD_VER > 2014 template -inline _LIBCUDACXX_INLINE_VISIBILITY _Tp -transform_reduce(_InputIterator __first, _InputIterator __last, _Tp __init, _BinaryOp __b, _UnaryOp __u) -{ - for (; __first != __last; ++__first) - { - __init = __b(__init, __u(*__first)); - } - return __init; +inline _LIBCUDACXX_INLINE_VISIBILITY +_Tp +transform_reduce(_InputIterator __first, _InputIterator __last, + _Tp __init, _BinaryOp __b, _UnaryOp __u) +{ + for (; __first != __last; ++__first) + __init = __b(__init, __u(*__first)); + return __init; } -template -inline _LIBCUDACXX_INLINE_VISIBILITY _Tp transform_reduce( - _InputIterator1 __first1, - _InputIterator1 __last1, - _InputIterator2 __first2, - _Tp __init, - _BinaryOp1 __b1, - _BinaryOp2 __b2) +template +inline _LIBCUDACXX_INLINE_VISIBILITY +_Tp +transform_reduce(_InputIterator1 __first1, _InputIterator1 __last1, + _InputIterator2 __first2, _Tp __init, _BinaryOp1 __b1, _BinaryOp2 __b2) { - for (; __first1 != __last1; ++__first1, (void) ++__first2) - { - __init = __b1(__init, __b2(*__first1, *__first2)); - } - return __init; + for (; __first1 != __last1; ++__first1, (void) ++__first2) + __init = __b1(__init, __b2(*__first1, *__first2)); + return __init; } template -inline _LIBCUDACXX_INLINE_VISIBILITY _Tp -transform_reduce(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, _Tp __init) +inline _LIBCUDACXX_INLINE_VISIBILITY +_Tp +transform_reduce(_InputIterator1 __first1, _InputIterator1 __last1, + _InputIterator2 __first2, _Tp __init) { - return _CUDA_VSTD::transform_reduce( - __first1, __last1, __first2, _CUDA_VSTD::move(__init), _CUDA_VSTD::plus<>(), _CUDA_VSTD::multiplies<>()); + return _CUDA_VSTD::transform_reduce(__first1, __last1, __first2, _CUDA_VSTD::move(__init), + _CUDA_VSTD::plus<>(), _CUDA_VSTD::multiplies<>()); } -# endif +#endif template -inline _LIBCUDACXX_INLINE_VISIBILITY _OutputIterator +inline _LIBCUDACXX_INLINE_VISIBILITY +_OutputIterator partial_sum(_InputIterator __first, _InputIterator __last, _OutputIterator __result) { - if (__first != __last) - { - typename iterator_traits<_InputIterator>::value_type __t(*__first); - *__result = __t; - for (++__first, (void) ++__result; __first != __last; ++__first, (void) ++__result) + if (__first != __last) { - __t = __t + *__first; - *__result = __t; + typename iterator_traits<_InputIterator>::value_type __t(*__first); + *__result = __t; + for (++__first, (void) ++__result; __first != __last; ++__first, (void) ++__result) + { + __t = __t + *__first; + *__result = __t; + } } - } - return __result; + return __result; } template -inline _LIBCUDACXX_INLINE_VISIBILITY _OutputIterator -partial_sum(_InputIterator __first, _InputIterator __last, _OutputIterator __result, _BinaryOperation __binary_op) +inline _LIBCUDACXX_INLINE_VISIBILITY +_OutputIterator +partial_sum(_InputIterator __first, _InputIterator __last, _OutputIterator __result, + _BinaryOperation __binary_op) { - if (__first != __last) - { - typename iterator_traits<_InputIterator>::value_type __t(*__first); - *__result = __t; - for (++__first, (void) ++__result; __first != __last; ++__first, (void) ++__result) + if (__first != __last) { - __t = __binary_op(__t, *__first); - *__result = __t; + typename iterator_traits<_InputIterator>::value_type __t(*__first); + *__result = __t; + for (++__first, (void) ++__result; __first != __last; ++__first, (void) ++__result) + { + __t = __binary_op(__t, *__first); + *__result = __t; + } } - } - return __result; + return __result; } -# if _CCCL_STD_VER > 2014 +#if _CCCL_STD_VER > 2014 template -inline _LIBCUDACXX_INLINE_VISIBILITY _OutputIterator -exclusive_scan(_InputIterator __first, _InputIterator __last, _OutputIterator __result, _Tp __init, _BinaryOp __b) +inline _LIBCUDACXX_INLINE_VISIBILITY +_OutputIterator +exclusive_scan(_InputIterator __first, _InputIterator __last, + _OutputIterator __result, _Tp __init, _BinaryOp __b) { - if (__first != __last) - { - _Tp __saved = __init; - do + if (__first != __last) { - __init = __b(__init, *__first); - *__result = __saved; - __saved = __init; - ++__result; - } while (++__first != __last); - } - return __result; + _Tp __saved = __init; + do + { + __init = __b(__init, *__first); + *__result = __saved; + __saved = __init; + ++__result; + } while (++__first != __last); + } + return __result; } template -inline _LIBCUDACXX_INLINE_VISIBILITY _OutputIterator -exclusive_scan(_InputIterator __first, _InputIterator __last, _OutputIterator __result, _Tp __init) +inline _LIBCUDACXX_INLINE_VISIBILITY +_OutputIterator +exclusive_scan(_InputIterator __first, _InputIterator __last, + _OutputIterator __result, _Tp __init) { - return _CUDA_VSTD::exclusive_scan(__first, __last, __result, __init, _CUDA_VSTD::plus<>()); + return _CUDA_VSTD::exclusive_scan(__first, __last, __result, __init, _CUDA_VSTD::plus<>()); } template -_OutputIterator -inclusive_scan(_InputIterator __first, _InputIterator __last, _OutputIterator __result, _BinaryOp __b, _Tp __init) -{ - for (; __first != __last; ++__first, (void) ++__result) - { - __init = __b(__init, *__first); - *__result = __init; - } - return __result; +_OutputIterator inclusive_scan(_InputIterator __first, _InputIterator __last, + _OutputIterator __result, _BinaryOp __b, _Tp __init) +{ + for (; __first != __last; ++__first, (void) ++__result) { + __init = __b(__init, *__first); + *__result = __init; + } + return __result; } template -_OutputIterator inclusive_scan(_InputIterator __first, _InputIterator __last, _OutputIterator __result, _BinaryOp __b) +_OutputIterator inclusive_scan(_InputIterator __first, _InputIterator __last, + _OutputIterator __result, _BinaryOp __b) { - if (__first != __last) - { - typename std::iterator_traits<_InputIterator>::value_type __init = *__first; - *__result++ = __init; - if (++__first != __last) - { - return _CUDA_VSTD::inclusive_scan(__first, __last, __result, __b, __init); - } - } + if (__first != __last) { + typename std::iterator_traits<_InputIterator>::value_type __init = *__first; + *__result++ = __init; + if (++__first != __last) + return _CUDA_VSTD::inclusive_scan(__first, __last, __result, __b, __init); + } - return __result; + return __result; } template -_OutputIterator inclusive_scan(_InputIterator __first, _InputIterator __last, _OutputIterator __result) +_OutputIterator inclusive_scan(_InputIterator __first, _InputIterator __last, + _OutputIterator __result) { - return _CUDA_VSTD::inclusive_scan(__first, __last, __result, std::plus<>()); + return _CUDA_VSTD::inclusive_scan(__first, __last, __result, std::plus<>()); } -template -inline _LIBCUDACXX_INLINE_VISIBILITY _OutputIterator transform_exclusive_scan( - _InputIterator __first, _InputIterator __last, _OutputIterator __result, _Tp __init, _BinaryOp __b, _UnaryOp __u) +template +inline _LIBCUDACXX_INLINE_VISIBILITY +_OutputIterator +transform_exclusive_scan(_InputIterator __first, _InputIterator __last, + _OutputIterator __result, _Tp __init, + _BinaryOp __b, _UnaryOp __u) { - if (__first != __last) - { - _Tp __saved = __init; - do + if (__first != __last) { - __init = __b(__init, __u(*__first)); - *__result = __saved; - __saved = __init; - ++__result; - } while (++__first != __last); - } - return __result; + _Tp __saved = __init; + do + { + __init = __b(__init, __u(*__first)); + *__result = __saved; + __saved = __init; + ++__result; + } while (++__first != __last); + } + return __result; } template -_OutputIterator transform_inclusive_scan( - _InputIterator __first, _InputIterator __last, _OutputIterator __result, _BinaryOp __b, _UnaryOp __u, _Tp __init) +_OutputIterator transform_inclusive_scan(_InputIterator __first, _InputIterator __last, + _OutputIterator __result, _BinaryOp __b, _UnaryOp __u, _Tp __init) { - for (; __first != __last; ++__first, (void) ++__result) - { - __init = __b(__init, __u(*__first)); - *__result = __init; - } + for (; __first != __last; ++__first, (void) ++__result) { + __init = __b(__init, __u(*__first)); + *__result = __init; + } - return __result; + return __result; } template -_OutputIterator transform_inclusive_scan( - _InputIterator __first, _InputIterator __last, _OutputIterator __result, _BinaryOp __b, _UnaryOp __u) +_OutputIterator transform_inclusive_scan(_InputIterator __first, _InputIterator __last, + _OutputIterator __result, _BinaryOp __b, _UnaryOp __u) { - if (__first != __last) - { - typename std::iterator_traits<_InputIterator>::value_type __init = __u(*__first); - *__result++ = __init; - if (++__first != __last) - { - return _CUDA_VSTD::transform_inclusive_scan(__first, __last, __result, __b, __u, __init); - } - } + if (__first != __last) { + typename std::iterator_traits<_InputIterator>::value_type __init = __u(*__first); + *__result++ = __init; + if (++__first != __last) + return _CUDA_VSTD::transform_inclusive_scan(__first, __last, __result, __b, __u, __init); + } - return __result; + return __result; } -# endif +#endif template -inline _LIBCUDACXX_INLINE_VISIBILITY _OutputIterator +inline _LIBCUDACXX_INLINE_VISIBILITY +_OutputIterator adjacent_difference(_InputIterator __first, _InputIterator __last, _OutputIterator __result) { - if (__first != __last) - { - typename iterator_traits<_InputIterator>::value_type __t1(*__first); - *__result = __t1; - for (++__first, (void) ++__result; __first != __last; ++__first, (void) ++__result) + if (__first != __last) { - typename iterator_traits<_InputIterator>::value_type __t2(*__first); - *__result = __t2 - __t1; - __t1 = _CUDA_VSTD::move(__t2); + typename iterator_traits<_InputIterator>::value_type __t1(*__first); + *__result = __t1; + for (++__first, (void) ++__result; __first != __last; ++__first, (void) ++__result) + { + typename iterator_traits<_InputIterator>::value_type __t2(*__first); + *__result = __t2 - __t1; + __t1 = _CUDA_VSTD::move(__t2); + } } - } - return __result; + return __result; } template -inline _LIBCUDACXX_INLINE_VISIBILITY _OutputIterator adjacent_difference( - _InputIterator __first, _InputIterator __last, _OutputIterator __result, _BinaryOperation __binary_op) +inline _LIBCUDACXX_INLINE_VISIBILITY +_OutputIterator +adjacent_difference(_InputIterator __first, _InputIterator __last, _OutputIterator __result, + _BinaryOperation __binary_op) { - if (__first != __last) - { - typename iterator_traits<_InputIterator>::value_type __t1(*__first); - *__result = __t1; - for (++__first, (void) ++__result; __first != __last; ++__first, (void) ++__result) + if (__first != __last) { - typename iterator_traits<_InputIterator>::value_type __t2(*__first); - *__result = __binary_op(__t2, __t1); - __t1 = _CUDA_VSTD::move(__t2); + typename iterator_traits<_InputIterator>::value_type __t1(*__first); + *__result = __t1; + for (++__first, (void) ++__result; __first != __last; ++__first, (void) ++__result) + { + typename iterator_traits<_InputIterator>::value_type __t2(*__first); + *__result = __binary_op(__t2, __t1); + __t1 = _CUDA_VSTD::move(__t2); + } } - } - return __result; + return __result; } #endif // __cuda_std__ template -inline _LIBCUDACXX_INLINE_VISIBILITY void iota(_ForwardIterator __first, _ForwardIterator __last, _Tp __value_) +inline _LIBCUDACXX_INLINE_VISIBILITY +void +iota(_ForwardIterator __first, _ForwardIterator __last, _Tp __value_) { - for (; __first != __last; ++__first, (void) ++__value_) - { - *__first = __value_; - } + for (; __first != __last; ++__first, (void) ++__value_) + *__first = __value_; } + #ifndef __cuda_std__ -# if _CCCL_STD_VER > 2014 -template ::value> -struct __ct_abs; +#if _CCCL_STD_VER > 2014 +template ::value> struct __ct_abs; template -struct __ct_abs<_Result, _Source, true> -{ - constexpr _LIBCUDACXX_INLINE_VISIBILITY _Result operator()(_Source __t) const noexcept - { - if (__t >= 0) - { - return __t; - } - if (__t == numeric_limits<_Source>::min()) +struct __ct_abs<_Result, _Source, true> { + constexpr _LIBCUDACXX_INLINE_VISIBILITY + _Result operator()(_Source __t) const noexcept { - return -static_cast<_Result>(__t); - } + if (__t >= 0) return __t; + if (__t == numeric_limits<_Source>::min()) return -static_cast<_Result>(__t); return -__t; - } + } }; template -struct __ct_abs<_Result, _Source, false> -{ - constexpr _LIBCUDACXX_INLINE_VISIBILITY _Result operator()(_Source __t) const noexcept - { - return __t; - } +struct __ct_abs<_Result, _Source, false> { + constexpr _LIBCUDACXX_INLINE_VISIBILITY + _Result operator()(_Source __t) const noexcept { return __t; } }; -template -constexpr _LIBCUDACXX_HIDDEN _Tp __gcd(_Tp __m, _Tp __n) + +template +constexpr _LIBCUDACXX_HIDDEN +_Tp __gcd(_Tp __m, _Tp __n) { - static_assert((!is_signed<_Tp>::value), ""); - return __n == 0 ? __m : _CUDA_VSTD::__gcd<_Tp>(__n, __m % __n); + static_assert((!is_signed<_Tp>::value), ""); + return __n == 0 ? __m : _CUDA_VSTD::__gcd<_Tp>(__n, __m % __n); } -template -constexpr _LIBCUDACXX_INLINE_VISIBILITY common_type_t<_Tp, _Up> gcd(_Tp __m, _Up __n) + +template +constexpr _LIBCUDACXX_INLINE_VISIBILITY +common_type_t<_Tp,_Up> +gcd(_Tp __m, _Up __n) { - static_assert((is_integral<_Tp>::value && is_integral<_Up>::value), "Arguments to gcd must be integer types"); - static_assert((!is_same<__remove_cv_t<_Tp>, bool>::value), "First argument to gcd cannot be bool"); - static_assert((!is_same<__remove_cv_t<_Up>, bool>::value), "Second argument to gcd cannot be bool"); - using _Rp = common_type_t<_Tp, _Up>; - using _Wp = make_unsigned_t<_Rp>; - return static_cast<_Rp>( - _CUDA_VSTD::__gcd(static_cast<_Wp>(__ct_abs<_Rp, _Tp>()(__m)), static_cast<_Wp>(__ct_abs<_Rp, _Up>()(__n)))); + static_assert((is_integral<_Tp>::value && is_integral<_Up>::value), "Arguments to gcd must be integer types"); + static_assert((!is_same<__remove_cv_t<_Tp>, bool>::value), "First argument to gcd cannot be bool" ); + static_assert((!is_same<__remove_cv_t<_Up>, bool>::value), "Second argument to gcd cannot be bool" ); + using _Rp = common_type_t<_Tp,_Up>; + using _Wp = make_unsigned_t<_Rp>; + return static_cast<_Rp>(_CUDA_VSTD::__gcd( + static_cast<_Wp>(__ct_abs<_Rp, _Tp>()(__m)), + static_cast<_Wp>(__ct_abs<_Rp, _Up>()(__n)))); } -template -constexpr _LIBCUDACXX_INLINE_VISIBILITY common_type_t<_Tp, _Up> lcm(_Tp __m, _Up __n) -{ - static_assert((is_integral<_Tp>::value && is_integral<_Up>::value), "Arguments to lcm must be integer types"); - static_assert((!is_same<__remove_cv_t<_Tp>, bool>::value), "First argument to lcm cannot be bool"); - static_assert((!is_same<__remove_cv_t<_Up>, bool>::value), "Second argument to lcm cannot be bool"); - if (__m == 0 || __n == 0) - { - return 0; - } - - using _Rp = common_type_t<_Tp, _Up>; - _Rp __val1 = __ct_abs<_Rp, _Tp>()(__m) / _CUDA_VSTD::gcd(__m, __n); - _Rp __val2 = __ct_abs<_Rp, _Up>()(__n); - _LIBCUDACXX_ASSERT((numeric_limits<_Rp>::max() / __val1 > __val2), "Overflow in lcm"); - return __val1 * __val2; +template +constexpr _LIBCUDACXX_INLINE_VISIBILITY +common_type_t<_Tp,_Up> +lcm(_Tp __m, _Up __n) +{ + static_assert((is_integral<_Tp>::value && is_integral<_Up>::value), "Arguments to lcm must be integer types"); + static_assert((!is_same<__remove_cv_t<_Tp>, bool>::value), "First argument to lcm cannot be bool" ); + static_assert((!is_same<__remove_cv_t<_Up>, bool>::value), "Second argument to lcm cannot be bool" ); + if (__m == 0 || __n == 0) + return 0; + + using _Rp = common_type_t<_Tp,_Up>; + _Rp __val1 = __ct_abs<_Rp, _Tp>()(__m) / _CUDA_VSTD::gcd(__m, __n); + _Rp __val2 = __ct_abs<_Rp, _Up>()(__n); + _LIBCUDACXX_ASSERT((numeric_limits<_Rp>::max() / __val1 > __val2), "Overflow in lcm"); + return __val1 * __val2; } -# endif /* _CCCL_STD_VER > 2014 */ +#endif /* _CCCL_STD_VER > 2014 */ -# if _CCCL_STD_VER > 2017 +#if _CCCL_STD_VER > 2017 template -_LIBCUDACXX_INLINE_VISIBILITY constexpr enable_if_t< - is_integral_v<_Tp> && !is_same_v && !is_null_pointer_v<_Tp>, - _Tp> -midpoint(_Tp __a, _Tp __b) noexcept _LIBCUDACXX_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK +_LIBCUDACXX_INLINE_VISIBILITY constexpr +enable_if_t && !is_same_v && !is_null_pointer_v<_Tp>, _Tp> +midpoint(_Tp __a, _Tp __b) noexcept +_LIBCUDACXX_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK { - using _Up = std::make_unsigned_t<_Tp>; - - int __sign = 1; - _Up __m = __a; - _Up __M = __b; - if (__a > __b) - { - __sign = -1; - __m = __b; - __M = __a; - } - return __a + __sign * _Tp(_Up(__M - __m) >> 1); + using _Up = std::make_unsigned_t<_Tp>; + + int __sign = 1; + _Up __m = __a; + _Up __M = __b; + if (__a > __b) + { + __sign = -1; + __m = __b; + __M = __a; + } + return __a + __sign * _Tp(_Up(__M-__m) >> 1); } + template -_LIBCUDACXX_INLINE_VISIBILITY constexpr enable_if_t< - is_pointer_v<_TPtr> && is_object_v> && !is_void_v> - && (sizeof(remove_pointer_t<_TPtr>) > 0), - _TPtr> +_LIBCUDACXX_INLINE_VISIBILITY constexpr +enable_if_t + && is_object_v> + && ! is_void_v> + && (sizeof(remove_pointer_t<_TPtr>) > 0), _TPtr> midpoint(_TPtr __a, _TPtr __b) noexcept { - return __a + _CUDA_VSTD::midpoint(ptrdiff_t(0), __b - __a); + return __a + _CUDA_VSTD::midpoint(ptrdiff_t(0), __b - __a); } + template -constexpr int __sign(_Tp __val) -{ - return (_Tp(0) < __val) - (__val < _Tp(0)); +constexpr int __sign(_Tp __val) { + return (_Tp(0) < __val) - (__val < _Tp(0)); } template -constexpr _Fp __fp_abs(_Fp __f) -{ - return __f >= 0 ? __f : -__f; -} +constexpr _Fp __fp_abs(_Fp __f) { return __f >= 0 ? __f : -__f; } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr enable_if_t, _Fp> midpoint(_Fp __a, _Fp __b) noexcept -{ - constexpr _Fp __lo = numeric_limits<_Fp>::min() * 2; - constexpr _Fp __hi = numeric_limits<_Fp>::max() / 2; - return __fp_abs(__a) <= __hi && __fp_abs(__b) <= __hi - ? // typical case: overflow is impossible - (__a + __b) / 2 - : // always correctly rounded - __fp_abs(__a) < __lo ? __a + __b / 2 : // not safe to halve a - __fp_abs(__a) < __lo ? __a / 2 + __b - : // not safe to halve b - __a / 2 + __b / 2; // otherwise correctly rounded +_LIBCUDACXX_INLINE_VISIBILITY constexpr +enable_if_t, _Fp> +midpoint(_Fp __a, _Fp __b) noexcept +{ + constexpr _Fp __lo = numeric_limits<_Fp>::min()*2; + constexpr _Fp __hi = numeric_limits<_Fp>::max()/2; + return __fp_abs(__a) <= __hi && __fp_abs(__b) <= __hi ? // typical case: overflow is impossible + (__a + __b)/2 : // always correctly rounded + __fp_abs(__a) < __lo ? __a + __b/2 : // not safe to halve a + __fp_abs(__a) < __lo ? __a/2 + __b : // not safe to halve b + __a/2 + __b/2; // otherwise correctly rounded } -# endif // _CCCL_STD_VER > 2017 +#endif // _CCCL_STD_VER > 2017 #endif // __cuda_std__ _LIBCUDACXX_END_NAMESPACE_STD #if defined(_LIBCUDACXX_HAS_PARALLEL_ALGORITHMS) && _CCCL_STD_VER >= 2017 -# include <__pstl_numeric> +# include <__pstl_numeric> #endif #include //__cuda_std__ -#endif // _LIBCUDACXX_NUMERIC +#endif // _LIBCUDACXX_NUMERIC diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/optional b/libcudacxx/include/cuda/std/detail/libcxx/include/optional index 7c222be3746..b4940773eae 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/optional +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/optional @@ -169,6 +169,8 @@ template # pragma system_header #endif // no system header +#include // all public C++ headers provide the assertion handler +#include #include #include #include @@ -181,12 +183,12 @@ template #include #include #include -#include #include #include #include #include #include +#include #include #include #include @@ -194,8 +196,6 @@ template #include #include #include -#include // all public C++ headers provide the assertion handler -#include #include #include #include @@ -206,7 +206,7 @@ template // [optional.syn] #ifndef _LIBCUDACXX_HAS_NO_SPACESHIP_OPERATOR -# include +#include #endif // !_LIBCUDACXX_HAS_NO_SPACESHIP_OPERATOR #include @@ -240,17 +240,13 @@ _CCCL_NORETURN inline _LIBCUDACXX_INLINE_VISIBILITY void __throw_bad_optional_ac struct nullopt_t { - struct __secret_tag - { - explicit __secret_tag() = default; - }; - _LIBCUDACXX_INLINE_VISIBILITY constexpr explicit nullopt_t(__secret_tag, __secret_tag) noexcept {} + struct __secret_tag { explicit __secret_tag() = default; }; + _LIBCUDACXX_INLINE_VISIBILITY constexpr explicit nullopt_t(__secret_tag, __secret_tag) noexcept {} }; _LIBCUDACXX_CPO_ACCESSIBILITY nullopt_t nullopt{nullopt_t::__secret_tag{}, nullopt_t::__secret_tag{}}; -struct __optional_construct_from_invoke_tag -{}; +struct __optional_construct_from_invoke_tag {}; template ::value> struct __optional_destruct_base; @@ -258,334 +254,339 @@ struct __optional_destruct_base; template struct __optional_destruct_base<_Tp, false> { - typedef _Tp value_type; - static_assert(_LIBCUDACXX_TRAIT(is_object, value_type), - "instantiation of optional with a non-object type is undefined behavior"); - union - { - char __null_state_; - __remove_cv_t __val_; - }; - bool __engaged_; - - _CCCL_EXEC_CHECK_DISABLE - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX20 ~__optional_destruct_base() - { - if (__engaged_) + typedef _Tp value_type; + static_assert(_LIBCUDACXX_TRAIT(is_object, value_type), + "instantiation of optional with a non-object type is undefined behavior"); + union { - __val_.~value_type(); + char __null_state_; + __remove_cv_t __val_; + }; + bool __engaged_; + + _CCCL_EXEC_CHECK_DISABLE + _LIBCUDACXX_INLINE_VISIBILITY + _CCCL_CONSTEXPR_CXX20 ~__optional_destruct_base() + { + if (__engaged_) + __val_.~value_type(); } - } - _LIBCUDACXX_INLINE_VISIBILITY constexpr __optional_destruct_base() noexcept - : __null_state_() - , __engaged_(false) - {} - - template - _LIBCUDACXX_INLINE_VISIBILITY constexpr explicit __optional_destruct_base(in_place_t, _Args&&... __args) - : __val_(_CUDA_VSTD::forward<_Args>(__args)...) - , __engaged_(true) - {} - - template - _LIBCUDACXX_INLINE_VISIBILITY constexpr __optional_destruct_base( - __optional_construct_from_invoke_tag, _Fp&& __f, _Args&&... __args) - : __val_(_CUDA_VSTD::invoke(_CUDA_VSTD::forward<_Fp>(__f), _CUDA_VSTD::forward<_Args>(__args)...)) - , __engaged_(true) - {} - - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX20 void reset() noexcept - { - if (__engaged_) + _LIBCUDACXX_INLINE_VISIBILITY + constexpr __optional_destruct_base() noexcept + : __null_state_(), + __engaged_(false) {} + + template + _LIBCUDACXX_INLINE_VISIBILITY + constexpr explicit __optional_destruct_base(in_place_t, _Args&&... __args) + : __val_(_CUDA_VSTD::forward<_Args>(__args)...), + __engaged_(true) {} + + template + _LIBCUDACXX_INLINE_VISIBILITY + constexpr __optional_destruct_base(__optional_construct_from_invoke_tag, _Fp&& __f, _Args&&... __args) + : __val_(_CUDA_VSTD::invoke(_CUDA_VSTD::forward<_Fp>(__f), _CUDA_VSTD::forward<_Args>(__args)...)), __engaged_(true) {} + + _LIBCUDACXX_INLINE_VISIBILITY + _CCCL_CONSTEXPR_CXX20 void reset() noexcept { - __val_.~value_type(); - __engaged_ = false; + if (__engaged_) + { + __val_.~value_type(); + __engaged_ = false; + } } - } }; template struct __optional_destruct_base<_Tp, true> { - typedef _Tp value_type; - static_assert(_LIBCUDACXX_TRAIT(is_object, value_type), - "instantiation of optional with a non-object type is undefined behavior"); - union - { - char __null_state_; - __remove_cv_t __val_; - }; - bool __engaged_; - - _LIBCUDACXX_INLINE_VISIBILITY constexpr __optional_destruct_base() noexcept - : __null_state_() - , __engaged_(false) - {} - - template - _LIBCUDACXX_INLINE_VISIBILITY constexpr explicit __optional_destruct_base(in_place_t, _Args&&... __args) - : __val_(_CUDA_VSTD::forward<_Args>(__args)...) - , __engaged_(true) - {} - - template - _LIBCUDACXX_INLINE_VISIBILITY constexpr __optional_destruct_base( - __optional_construct_from_invoke_tag, _Fp&& __f, _Args&&... __args) - : __val_(_CUDA_VSTD::invoke(_CUDA_VSTD::forward<_Fp>(__f), _CUDA_VSTD::forward<_Args>(__args)...)) - , __engaged_(true) - {} - - _LIBCUDACXX_INLINE_VISIBILITY constexpr void reset() noexcept - { - if (__engaged_) + typedef _Tp value_type; + static_assert(_LIBCUDACXX_TRAIT(is_object, value_type), + "instantiation of optional with a non-object type is undefined behavior"); + union { - __engaged_ = false; + char __null_state_; + __remove_cv_t __val_; + }; + bool __engaged_; + + _LIBCUDACXX_INLINE_VISIBILITY + constexpr __optional_destruct_base() noexcept + : __null_state_(), + __engaged_(false) {} + + template + _LIBCUDACXX_INLINE_VISIBILITY + constexpr explicit __optional_destruct_base(in_place_t, _Args&&... __args) + : __val_(_CUDA_VSTD::forward<_Args>(__args)...), + __engaged_(true) {} + + template + _LIBCUDACXX_INLINE_VISIBILITY + constexpr __optional_destruct_base(__optional_construct_from_invoke_tag, _Fp&& __f, _Args&&... __args) + : __val_(_CUDA_VSTD::invoke(_CUDA_VSTD::forward<_Fp>(__f), _CUDA_VSTD::forward<_Args>(__args)...)), __engaged_(true) {} + + _LIBCUDACXX_INLINE_VISIBILITY + constexpr void reset() noexcept + { + if (__engaged_) + { + __engaged_ = false; + } } - } }; template struct __optional_storage_base : __optional_destruct_base<_Tp> { - _LIBCUDACXX_DELEGATE_CONSTRUCTORS(__optional_storage_base, __optional_destruct_base, _Tp); + _LIBCUDACXX_DELEGATE_CONSTRUCTORS(__optional_storage_base, __optional_destruct_base, _Tp); - using value_type = _Tp; + using value_type = _Tp; - _LIBCUDACXX_INLINE_VISIBILITY constexpr bool has_value() const noexcept - { - return this->__engaged_; - } - - _LIBCUDACXX_INLINE_VISIBILITY constexpr value_type& __get() & noexcept - { - return this->__val_; - } - _LIBCUDACXX_INLINE_VISIBILITY constexpr const value_type& __get() const& noexcept - { - return this->__val_; - } - _LIBCUDACXX_INLINE_VISIBILITY constexpr value_type&& __get() && noexcept - { - return _CUDA_VSTD::move(this->__val_); - } - _LIBCUDACXX_INLINE_VISIBILITY constexpr const value_type&& __get() const&& noexcept - { - return _CUDA_VSTD::move(this->__val_); - } + _LIBCUDACXX_INLINE_VISIBILITY + constexpr bool has_value() const noexcept + { + return this->__engaged_; + } - template - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX20 void __construct(_Args&&... __args) - { - _LIBCUDACXX_ASSERT(!has_value(), "__construct called for engaged __optional_storage"); -# if _CCCL_STD_VER > 2017 - _CUDA_VSTD::construct_at(_CUDA_VSTD::addressof(this->__val_), _CUDA_VSTD::forward<_Args>(__args)...); -# else - ::new ((void*) _CUDA_VSTD::addressof(this->__val_)) value_type(_CUDA_VSTD::forward<_Args>(__args)...); -# endif - this->__engaged_ = true; - } + _LIBCUDACXX_INLINE_VISIBILITY + constexpr value_type& __get() & noexcept + { + return this->__val_; + } + _LIBCUDACXX_INLINE_VISIBILITY + constexpr const value_type& __get() const& noexcept + { + return this->__val_; + } + _LIBCUDACXX_INLINE_VISIBILITY + constexpr value_type&& __get() && noexcept + { + return _CUDA_VSTD::move(this->__val_); + } + _LIBCUDACXX_INLINE_VISIBILITY + constexpr const value_type&& __get() const&& noexcept + { + return _CUDA_VSTD::move(this->__val_); + } - template - _LIBCUDACXX_INLINE_VISIBILITY constexpr void __construct_from(_That&& __opt) - { - if (__opt.has_value()) + template + _LIBCUDACXX_INLINE_VISIBILITY + _CCCL_CONSTEXPR_CXX20 void __construct(_Args&&... __args) { - __construct(_CUDA_VSTD::forward<_That>(__opt).__get()); + _LIBCUDACXX_ASSERT(!has_value(), "__construct called for engaged __optional_storage"); +#if _CCCL_STD_VER > 2017 + _CUDA_VSTD::construct_at(_CUDA_VSTD::addressof(this->__val_), _CUDA_VSTD::forward<_Args>(__args)...); +#else + ::new ((void*)_CUDA_VSTD::addressof(this->__val_)) value_type(_CUDA_VSTD::forward<_Args>(__args)...); +#endif + this->__engaged_ = true; } - } - template - _LIBCUDACXX_INLINE_VISIBILITY constexpr void __assign_from(_That&& __opt) - { - if (this->__engaged_ == __opt.has_value()) + template + _LIBCUDACXX_INLINE_VISIBILITY + constexpr void __construct_from(_That&& __opt) { - if (this->__engaged_) - { - this->__val_ = _CUDA_VSTD::forward<_That>(__opt).__get(); - } + if (__opt.has_value()) + __construct(_CUDA_VSTD::forward<_That>(__opt).__get()); } - else + + template + _LIBCUDACXX_INLINE_VISIBILITY + constexpr void __assign_from(_That&& __opt) { - if (this->__engaged_) - { - this->reset(); - } - else - { - __construct(_CUDA_VSTD::forward<_That>(__opt).__get()); - } + if (this->__engaged_ == __opt.has_value()) + { + if (this->__engaged_) + this->__val_ = _CUDA_VSTD::forward<_That>(__opt).__get(); + } + else + { + if (this->__engaged_) + this->reset(); + else + __construct(_CUDA_VSTD::forward<_That>(__opt).__get()); + } } - } }; template ::value> struct __optional_copy_base : __optional_storage_base<_Tp> { - _LIBCUDACXX_DELEGATE_CONSTRUCTORS(__optional_copy_base, __optional_storage_base, _Tp); + _LIBCUDACXX_DELEGATE_CONSTRUCTORS(__optional_copy_base, __optional_storage_base, _Tp); }; template struct __optional_copy_base<_Tp, false> : __optional_storage_base<_Tp> { - _LIBCUDACXX_DELEGATE_CONSTRUCTORS(__optional_copy_base, __optional_storage_base, _Tp); + _LIBCUDACXX_DELEGATE_CONSTRUCTORS(__optional_copy_base, __optional_storage_base, _Tp); - // This ctor shouldn't need to initialize the base explicitly, but g++ 9 considers it to be uninitialized - // during constexpr evaluation if it isn't initialized explicitly. This can be replaced with the pattern - // below, in __optional_move_base, once g++ 9 falls off our support matrix. - _LIBCUDACXX_INLINE_VISIBILITY constexpr __optional_copy_base(const __optional_copy_base& __opt) - : __base() - { - this->__construct_from(__opt); - } + // This ctor shouldn't need to initialize the base explicitly, but g++ 9 considers it to be uninitialized + // during constexpr evaluation if it isn't initialized explicitly. This can be replaced with the pattern + // below, in __optional_move_base, once g++ 9 falls off our support matrix. + _LIBCUDACXX_INLINE_VISIBILITY + constexpr __optional_copy_base(const __optional_copy_base& __opt) : __base() + { + this->__construct_from(__opt); + } - __optional_copy_base(__optional_copy_base&&) = default; - __optional_copy_base& operator=(const __optional_copy_base&) = default; - __optional_copy_base& operator=(__optional_copy_base&&) = default; + __optional_copy_base(__optional_copy_base&&) = default; + __optional_copy_base& operator=(const __optional_copy_base&) = default; + __optional_copy_base& operator=(__optional_copy_base&&) = default; }; template ::value> struct __optional_move_base : __optional_copy_base<_Tp> { - _LIBCUDACXX_DELEGATE_CONSTRUCTORS(__optional_move_base, __optional_copy_base, _Tp); + _LIBCUDACXX_DELEGATE_CONSTRUCTORS(__optional_move_base, __optional_copy_base, _Tp); }; template struct __optional_move_base<_Tp, false> : __optional_copy_base<_Tp> { - _LIBCUDACXX_DELEGATE_CONSTRUCTORS(__optional_move_base, __optional_copy_base, _Tp); + _LIBCUDACXX_DELEGATE_CONSTRUCTORS(__optional_move_base, __optional_copy_base, _Tp); - __optional_move_base(const __optional_move_base&) = default; + __optional_move_base(const __optional_move_base&) = default; - _LIBCUDACXX_INLINE_VISIBILITY constexpr __optional_move_base(__optional_move_base&& __opt) noexcept( - _LIBCUDACXX_TRAIT(is_nothrow_move_constructible, _Tp)) - { - this->__construct_from(_CUDA_VSTD::move(__opt)); - } + _LIBCUDACXX_INLINE_VISIBILITY + constexpr __optional_move_base(__optional_move_base&& __opt) + noexcept(_LIBCUDACXX_TRAIT(is_nothrow_move_constructible, _Tp)) + { + this->__construct_from(_CUDA_VSTD::move(__opt)); + } - __optional_move_base& operator=(const __optional_move_base&) = default; - __optional_move_base& operator=(__optional_move_base&&) = default; + __optional_move_base& operator=(const __optional_move_base&) = default; + __optional_move_base& operator=(__optional_move_base&&) = default; }; -template ::value && is_trivially_copy_constructible<_Tp>::value - && is_trivially_copy_assignable<_Tp>::value> +template ::value && + is_trivially_copy_constructible<_Tp>::value && + is_trivially_copy_assignable<_Tp>::value> struct __optional_copy_assign_base : __optional_move_base<_Tp> { - _LIBCUDACXX_DELEGATE_CONSTRUCTORS(__optional_copy_assign_base, __optional_move_base, _Tp); + _LIBCUDACXX_DELEGATE_CONSTRUCTORS(__optional_copy_assign_base, __optional_move_base, _Tp); }; template struct __optional_copy_assign_base<_Tp, false> : __optional_move_base<_Tp> { - _LIBCUDACXX_DELEGATE_CONSTRUCTORS(__optional_copy_assign_base, __optional_move_base, _Tp); + _LIBCUDACXX_DELEGATE_CONSTRUCTORS(__optional_copy_assign_base, __optional_move_base, _Tp); - __optional_copy_assign_base(const __optional_copy_assign_base&) = default; - __optional_copy_assign_base(__optional_copy_assign_base&&) = default; + __optional_copy_assign_base(const __optional_copy_assign_base&) = default; + __optional_copy_assign_base(__optional_copy_assign_base&&) = default; - _LIBCUDACXX_INLINE_VISIBILITY constexpr __optional_copy_assign_base& - operator=(const __optional_copy_assign_base& __opt) - { - this->__assign_from(__opt); - return *this; - } + _LIBCUDACXX_INLINE_VISIBILITY + constexpr __optional_copy_assign_base& operator=(const __optional_copy_assign_base& __opt) + { + this->__assign_from(__opt); + return *this; + } - __optional_copy_assign_base& operator=(__optional_copy_assign_base&&) = default; + __optional_copy_assign_base& operator=(__optional_copy_assign_base&&) = default; }; -template ::value && is_trivially_move_constructible<_Tp>::value - && is_trivially_move_assignable<_Tp>::value> +template ::value && + is_trivially_move_constructible<_Tp>::value && + is_trivially_move_assignable<_Tp>::value> struct __optional_move_assign_base : __optional_copy_assign_base<_Tp> { - _LIBCUDACXX_DELEGATE_CONSTRUCTORS(__optional_move_assign_base, __optional_copy_assign_base, _Tp); + _LIBCUDACXX_DELEGATE_CONSTRUCTORS(__optional_move_assign_base, __optional_copy_assign_base, _Tp); }; template struct __optional_move_assign_base<_Tp, false> : __optional_copy_assign_base<_Tp> { - _LIBCUDACXX_DELEGATE_CONSTRUCTORS(__optional_move_assign_base, __optional_copy_assign_base, _Tp); + _LIBCUDACXX_DELEGATE_CONSTRUCTORS(__optional_move_assign_base, __optional_copy_assign_base, _Tp); - __optional_move_assign_base(const __optional_move_assign_base& __opt) = default; - __optional_move_assign_base(__optional_move_assign_base&&) = default; - __optional_move_assign_base& operator=(const __optional_move_assign_base&) = default; + __optional_move_assign_base(const __optional_move_assign_base& __opt) = default; + __optional_move_assign_base(__optional_move_assign_base&&) = default; + __optional_move_assign_base& operator=(const __optional_move_assign_base&) = default; - _LIBCUDACXX_INLINE_VISIBILITY constexpr __optional_move_assign_base& - operator=(__optional_move_assign_base&& __opt) noexcept( - _LIBCUDACXX_TRAIT(is_nothrow_move_assignable, _Tp) && _LIBCUDACXX_TRAIT(is_nothrow_move_constructible, _Tp)) - { - this->__assign_from(_CUDA_VSTD::move(__opt)); - return *this; - } + _LIBCUDACXX_INLINE_VISIBILITY + constexpr __optional_move_assign_base& operator=(__optional_move_assign_base&& __opt) + noexcept(_LIBCUDACXX_TRAIT(is_nothrow_move_assignable, _Tp) && + _LIBCUDACXX_TRAIT(is_nothrow_move_constructible, _Tp)) + { + this->__assign_from(_CUDA_VSTD::move(__opt)); + return *this; + } }; template -using __optional_sfinae_ctor_base_t = - __sfinae_ctor_base::value, is_move_constructible<_Tp>::value>; +using __optional_sfinae_ctor_base_t = __sfinae_ctor_base< + is_copy_constructible<_Tp>::value, + is_move_constructible<_Tp>::value +>; template -using __optional_sfinae_assign_base_t = - __sfinae_assign_base<(is_copy_constructible<_Tp>::value && is_copy_assignable<_Tp>::value), - (is_move_constructible<_Tp>::value && is_move_assignable<_Tp>::value)>; +using __optional_sfinae_assign_base_t = __sfinae_assign_base< + (is_copy_constructible<_Tp>::value && is_copy_assignable<_Tp>::value), + (is_move_constructible<_Tp>::value && is_move_assignable<_Tp>::value) +>; -template +template class optional; template -struct __is_std_optional : false_type -{}; -template +struct __is_std_optional : false_type {}; +template struct __is_std_optional> : true_type -{}; +{ +}; // Constraits template > -using __opt_check_constructible_from_opt = - _Or, - is_constructible<_Tp, _Opt const&>, - is_constructible<_Tp, _Opt&&>, - is_constructible<_Tp, _Opt const&&>, - is_convertible<_Opt&, _Tp>, - is_convertible<_Opt const&, _Tp>, - is_convertible<_Opt&&, _Tp>, - is_convertible<_Opt const&&, _Tp>>; +using __opt_check_constructible_from_opt = _Or< + is_constructible<_Tp, _Opt&>, + is_constructible<_Tp, _Opt const&>, + is_constructible<_Tp, _Opt&&>, + is_constructible<_Tp, _Opt const&&>, + is_convertible<_Opt&, _Tp>, + is_convertible<_Opt const&, _Tp>, + is_convertible<_Opt&&, _Tp>, + is_convertible<_Opt const&&, _Tp> +>; template > -using __opt_check_assignable_from_opt = - _Or, - is_assignable<_Tp&, _Opt const&>, - is_assignable<_Tp&, _Opt&&>, - is_assignable<_Tp&, _Opt const&&>>; - -template -_LIBCUDACXX_INLINE_VAR constexpr bool __opt_is_implictly_constructible = - _LIBCUDACXX_TRAIT(is_constructible, _Tp, _Up) && _LIBCUDACXX_TRAIT(is_convertible, _Up, _Tp); - -template -_LIBCUDACXX_INLINE_VAR constexpr bool __opt_is_explictly_constructible = - _LIBCUDACXX_TRAIT(is_constructible, _Tp, _Up) && !_LIBCUDACXX_TRAIT(is_convertible, _Up, _Tp); - -template -_LIBCUDACXX_INLINE_VAR constexpr bool __opt_is_constructible_from_U = - !_LIBCUDACXX_TRAIT(is_same, __remove_cvref_t<_Up>, in_place_t) - && !_LIBCUDACXX_TRAIT(is_same, __remove_cvref_t<_Up>, optional<_Tp>); - -template -_LIBCUDACXX_INLINE_VAR constexpr bool __opt_is_constructible_from_opt = - !_LIBCUDACXX_TRAIT(is_same, _Up, _Tp) && !__opt_check_constructible_from_opt<_Tp, _Up>::value; - -template -_LIBCUDACXX_INLINE_VAR constexpr bool __opt_is_assignable = - _LIBCUDACXX_TRAIT(is_constructible, _Tp, _Up) && _LIBCUDACXX_TRAIT(is_assignable, _Tp&, _Up); - -template -_LIBCUDACXX_INLINE_VAR constexpr bool __opt_is_assignable_from_U = - !_LIBCUDACXX_TRAIT(is_same, __remove_cvref_t<_Up>, optional<_Tp>) - && (!_LIBCUDACXX_TRAIT(is_same, __remove_cvref_t<_Up>, _Tp) || !_LIBCUDACXX_TRAIT(is_scalar, _Tp)); - -template -_LIBCUDACXX_INLINE_VAR constexpr bool __opt_is_assignable_from_opt = - !_LIBCUDACXX_TRAIT(is_same, _Up, _Tp) && !__opt_check_constructible_from_opt<_Tp, _Up>::value - && !__opt_check_assignable_from_opt<_Tp, _Up>::value; +using __opt_check_assignable_from_opt = _Or< + is_assignable<_Tp&, _Opt&>, + is_assignable<_Tp&, _Opt const&>, + is_assignable<_Tp&, _Opt&&>, + is_assignable<_Tp&, _Opt const&&> +>; + +template +_LIBCUDACXX_INLINE_VAR constexpr bool __opt_is_implictly_constructible = _LIBCUDACXX_TRAIT(is_constructible, _Tp, _Up) + && _LIBCUDACXX_TRAIT(is_convertible, _Up, _Tp); + +template +_LIBCUDACXX_INLINE_VAR constexpr bool __opt_is_explictly_constructible = _LIBCUDACXX_TRAIT(is_constructible, _Tp, _Up) + && !_LIBCUDACXX_TRAIT(is_convertible, _Up, _Tp); + +template +_LIBCUDACXX_INLINE_VAR constexpr bool __opt_is_constructible_from_U = !_LIBCUDACXX_TRAIT(is_same, __remove_cvref_t<_Up>, in_place_t) + && !_LIBCUDACXX_TRAIT(is_same, __remove_cvref_t<_Up>, optional<_Tp>); + +template +_LIBCUDACXX_INLINE_VAR constexpr bool __opt_is_constructible_from_opt = !_LIBCUDACXX_TRAIT(is_same, _Up, _Tp) + && !__opt_check_constructible_from_opt<_Tp, _Up>::value; + +template +_LIBCUDACXX_INLINE_VAR constexpr bool __opt_is_assignable = _LIBCUDACXX_TRAIT(is_constructible, _Tp, _Up) + && _LIBCUDACXX_TRAIT(is_assignable, _Tp&, _Up); + +template +_LIBCUDACXX_INLINE_VAR constexpr bool __opt_is_assignable_from_U = !_LIBCUDACXX_TRAIT(is_same, __remove_cvref_t<_Up>, optional<_Tp>) + && (!_LIBCUDACXX_TRAIT(is_same, __remove_cvref_t<_Up>, _Tp) + || !_LIBCUDACXX_TRAIT(is_scalar, _Tp)); + +template +_LIBCUDACXX_INLINE_VAR constexpr bool __opt_is_assignable_from_opt = !_LIBCUDACXX_TRAIT(is_same, _Up, _Tp) + && !__opt_check_constructible_from_opt<_Tp, _Up>::value + && !__opt_check_assignable_from_opt<_Tp, _Up>::value; template class optional @@ -593,772 +594,830 @@ class optional , private __optional_sfinae_ctor_base_t<_Tp> , private __optional_sfinae_assign_base_t<_Tp> { - using __base = __optional_move_assign_base<_Tp>; + using __base = __optional_move_assign_base<_Tp>; - template - friend class optional; + template + friend class optional; public: - using value_type = _Tp; + using value_type = _Tp; private: - // Disable the reference extension using this static assert. - static_assert(!_LIBCUDACXX_TRAIT(is_same, __remove_cvref_t, in_place_t), - "instantiation of optional with in_place_t is ill-formed"); - static_assert(!_LIBCUDACXX_TRAIT(is_same, __remove_cvref_t, nullopt_t), - "instantiation of optional with nullopt_t is ill-formed"); - static_assert(!_LIBCUDACXX_TRAIT(is_reference, value_type), - "instantiation of optional with a reference type is ill-formed"); - static_assert(_LIBCUDACXX_TRAIT(is_destructible, value_type), - "instantiation of optional with a non-destructible type is ill-formed"); - static_assert(!_LIBCUDACXX_TRAIT(is_array, value_type), "instantiation of optional with an array type is ill-formed"); + // Disable the reference extension using this static assert. + static_assert(!_LIBCUDACXX_TRAIT(is_same, __remove_cvref_t, in_place_t), + "instantiation of optional with in_place_t is ill-formed"); + static_assert(!_LIBCUDACXX_TRAIT(is_same, __remove_cvref_t, nullopt_t), + "instantiation of optional with nullopt_t is ill-formed"); + static_assert(!_LIBCUDACXX_TRAIT(is_reference, value_type), + "instantiation of optional with a reference type is ill-formed"); + static_assert(_LIBCUDACXX_TRAIT(is_destructible, value_type), + "instantiation of optional with a non-destructible type is ill-formed"); + static_assert(!_LIBCUDACXX_TRAIT(is_array, value_type), + "instantiation of optional with an array type is ill-formed"); public: - _LIBCUDACXX_INLINE_VISIBILITY constexpr optional() noexcept {} - constexpr optional(const optional&) = default; - constexpr optional(optional&&) = default; - _LIBCUDACXX_INLINE_VISIBILITY constexpr optional(nullopt_t) noexcept {} - - _LIBCUDACXX_TEMPLATE(class _In_place_t, class... _Args) - _LIBCUDACXX_REQUIRES(_LIBCUDACXX_TRAIT(is_same, _In_place_t, in_place_t) - _LIBCUDACXX_AND _LIBCUDACXX_TRAIT(is_constructible, value_type, _Args...)) - _LIBCUDACXX_INLINE_VISIBILITY constexpr explicit optional(_In_place_t, _Args&&... __args) - : __base(in_place, _CUDA_VSTD::forward<_Args>(__args)...) - {} - - _LIBCUDACXX_TEMPLATE(class _Up, class... _Args) - _LIBCUDACXX_REQUIRES(_LIBCUDACXX_TRAIT(is_constructible, value_type, initializer_list<_Up>&, _Args...)) - _LIBCUDACXX_INLINE_VISIBILITY constexpr explicit optional(in_place_t, initializer_list<_Up> __il, _Args&&... __args) - : __base(in_place, __il, _CUDA_VSTD::forward<_Args>(__args)...) - {} - - _LIBCUDACXX_TEMPLATE(class _Up = value_type) - _LIBCUDACXX_REQUIRES( - __opt_is_constructible_from_U<_Tp, _Up> _LIBCUDACXX_AND __opt_is_implictly_constructible<_Tp, _Up>) - _LIBCUDACXX_INLINE_VISIBILITY constexpr optional(_Up&& __v) - : __base(in_place, _CUDA_VSTD::forward<_Up>(__v)) - {} - - _LIBCUDACXX_TEMPLATE(class _Up) - _LIBCUDACXX_REQUIRES( - __opt_is_constructible_from_U<_Tp, _Up> _LIBCUDACXX_AND __opt_is_explictly_constructible<_Tp, _Up>) - _LIBCUDACXX_INLINE_VISIBILITY constexpr explicit optional(_Up&& __v) - : __base(in_place, _CUDA_VSTD::forward<_Up>(__v)) - {} - - _LIBCUDACXX_TEMPLATE(class _Up) - _LIBCUDACXX_REQUIRES( - __opt_is_constructible_from_opt<_Tp, _Up> _LIBCUDACXX_AND __opt_is_implictly_constructible<_Tp, const _Up&>) - _LIBCUDACXX_INLINE_VISIBILITY constexpr optional(const optional<_Up>& __v) - { - this->__construct_from(__v); - } - _LIBCUDACXX_TEMPLATE(class _Up) - _LIBCUDACXX_REQUIRES( - __opt_is_constructible_from_opt<_Tp, _Up> _LIBCUDACXX_AND __opt_is_explictly_constructible<_Tp, const _Up&>) - _LIBCUDACXX_INLINE_VISIBILITY constexpr explicit optional(const optional<_Up>& __v) - { - this->__construct_from(__v); - } + _LIBCUDACXX_INLINE_VISIBILITY constexpr optional() noexcept {} + constexpr optional(const optional&) = default; + constexpr optional(optional&&) = default; + _LIBCUDACXX_INLINE_VISIBILITY constexpr optional(nullopt_t) noexcept {} + + _LIBCUDACXX_TEMPLATE(class _In_place_t, class... _Args) + _LIBCUDACXX_REQUIRES( _LIBCUDACXX_TRAIT(is_same, _In_place_t, in_place_t) _LIBCUDACXX_AND + _LIBCUDACXX_TRAIT(is_constructible, value_type, _Args...)) + _LIBCUDACXX_INLINE_VISIBILITY constexpr + explicit optional(_In_place_t, _Args&&... __args) + : __base(in_place, _CUDA_VSTD::forward<_Args>(__args)...) {} + + _LIBCUDACXX_TEMPLATE(class _Up, class... _Args) + _LIBCUDACXX_REQUIRES( _LIBCUDACXX_TRAIT(is_constructible, value_type, initializer_list<_Up>&, _Args...)) + _LIBCUDACXX_INLINE_VISIBILITY constexpr + explicit optional(in_place_t, initializer_list<_Up> __il, _Args&&... __args) + : __base(in_place, __il, _CUDA_VSTD::forward<_Args>(__args)...) {} + + _LIBCUDACXX_TEMPLATE(class _Up = value_type) + _LIBCUDACXX_REQUIRES( __opt_is_constructible_from_U<_Tp, _Up> _LIBCUDACXX_AND + __opt_is_implictly_constructible<_Tp, _Up>) + _LIBCUDACXX_INLINE_VISIBILITY constexpr + optional(_Up&& __v) : __base(in_place, _CUDA_VSTD::forward<_Up>(__v)) {} + + _LIBCUDACXX_TEMPLATE(class _Up) + _LIBCUDACXX_REQUIRES( __opt_is_constructible_from_U<_Tp, _Up> _LIBCUDACXX_AND + __opt_is_explictly_constructible<_Tp, _Up>) + _LIBCUDACXX_INLINE_VISIBILITY constexpr + explicit optional(_Up&& __v) : __base(in_place, _CUDA_VSTD::forward<_Up>(__v)) {} + + _LIBCUDACXX_TEMPLATE(class _Up) + _LIBCUDACXX_REQUIRES( __opt_is_constructible_from_opt<_Tp, _Up> _LIBCUDACXX_AND + __opt_is_implictly_constructible<_Tp, const _Up&>) + _LIBCUDACXX_INLINE_VISIBILITY constexpr + optional(const optional<_Up>& __v) { + this->__construct_from(__v); + } - _LIBCUDACXX_TEMPLATE(class _Up) - _LIBCUDACXX_REQUIRES( - __opt_is_constructible_from_opt<_Tp, _Up> _LIBCUDACXX_AND __opt_is_implictly_constructible<_Tp, _Up>) - _LIBCUDACXX_INLINE_VISIBILITY constexpr optional(optional<_Up>&& __v) - { - this->__construct_from(_CUDA_VSTD::move(__v)); - } + _LIBCUDACXX_TEMPLATE(class _Up) + _LIBCUDACXX_REQUIRES( __opt_is_constructible_from_opt<_Tp, _Up> _LIBCUDACXX_AND + __opt_is_explictly_constructible<_Tp, const _Up&>) + _LIBCUDACXX_INLINE_VISIBILITY constexpr + explicit optional(const optional<_Up>& __v) { + this->__construct_from(__v); + } - _LIBCUDACXX_TEMPLATE(class _Up) - _LIBCUDACXX_REQUIRES( - __opt_is_constructible_from_opt<_Tp, _Up> _LIBCUDACXX_AND __opt_is_explictly_constructible<_Tp, _Up>) - _LIBCUDACXX_INLINE_VISIBILITY constexpr explicit optional(optional<_Up>&& __v) - { - this->__construct_from(_CUDA_VSTD::move(__v)); - } + _LIBCUDACXX_TEMPLATE(class _Up) + _LIBCUDACXX_REQUIRES( __opt_is_constructible_from_opt<_Tp, _Up> _LIBCUDACXX_AND + __opt_is_implictly_constructible<_Tp, _Up>) + _LIBCUDACXX_INLINE_VISIBILITY constexpr + optional(optional<_Up>&& __v) { + this->__construct_from(_CUDA_VSTD::move(__v)); + } + + _LIBCUDACXX_TEMPLATE(class _Up) + _LIBCUDACXX_REQUIRES( __opt_is_constructible_from_opt<_Tp, _Up> _LIBCUDACXX_AND + __opt_is_explictly_constructible<_Tp, _Up>) + _LIBCUDACXX_INLINE_VISIBILITY constexpr + explicit optional(optional<_Up>&& __v) { + this->__construct_from(_CUDA_VSTD::move(__v)); + } private: - template - _LIBCUDACXX_INLINE_VISIBILITY constexpr explicit optional( - __optional_construct_from_invoke_tag, _Fp&& __f, _Args&&... __args) - : __base( - __optional_construct_from_invoke_tag{}, _CUDA_VSTD::forward<_Fp>(__f), _CUDA_VSTD::forward<_Args>(__args)...) - {} + template + _LIBCUDACXX_INLINE_VISIBILITY constexpr + explicit optional(__optional_construct_from_invoke_tag, _Fp&& __f, _Args&&... __args) + : __base(__optional_construct_from_invoke_tag{}, _CUDA_VSTD::forward<_Fp>(__f), _CUDA_VSTD::forward<_Args>(__args)...) { + } public: - _LIBCUDACXX_INLINE_VISIBILITY constexpr optional& operator=(nullopt_t) noexcept - { - reset(); - return *this; - } - - constexpr optional& operator=(const optional&) = default; - constexpr optional& operator=(optional&&) = default; - _LIBCUDACXX_TEMPLATE(class _Up = value_type) - _LIBCUDACXX_REQUIRES(__opt_is_assignable_from_U<_Tp, _Up> _LIBCUDACXX_AND __opt_is_assignable<_Tp, _Up>) - _LIBCUDACXX_INLINE_VISIBILITY constexpr optional& operator=(_Up&& __v) - { - if (this->has_value()) - { - this->__get() = _CUDA_VSTD::forward<_Up>(__v); + _LIBCUDACXX_INLINE_VISIBILITY constexpr + optional& operator=(nullopt_t) noexcept { + reset(); + return *this; } - else - { - this->__construct(_CUDA_VSTD::forward<_Up>(__v)); + + constexpr optional& operator=(const optional&) = default; + constexpr optional& operator=(optional&&) = default; + + _LIBCUDACXX_TEMPLATE(class _Up = value_type) + _LIBCUDACXX_REQUIRES( __opt_is_assignable_from_U<_Tp, _Up> _LIBCUDACXX_AND + __opt_is_assignable<_Tp, _Up>) + _LIBCUDACXX_INLINE_VISIBILITY constexpr + optional& operator=(_Up&& __v) { + if (this->has_value()) + this->__get() = _CUDA_VSTD::forward<_Up>(__v); + else + this->__construct(_CUDA_VSTD::forward<_Up>(__v)); + return *this; } - return *this; - } - _LIBCUDACXX_TEMPLATE(class _Up) - _LIBCUDACXX_REQUIRES(__opt_is_assignable_from_opt<_Tp, _Up> _LIBCUDACXX_AND __opt_is_assignable<_Tp, const _Up&>) - _LIBCUDACXX_INLINE_VISIBILITY constexpr optional& operator=(const optional<_Up>& __v) - { - this->__assign_from(__v); - return *this; - } + _LIBCUDACXX_TEMPLATE(class _Up) + _LIBCUDACXX_REQUIRES( __opt_is_assignable_from_opt<_Tp, _Up> _LIBCUDACXX_AND + __opt_is_assignable<_Tp, const _Up&>) + _LIBCUDACXX_INLINE_VISIBILITY constexpr + optional& operator=(const optional<_Up>& __v) { + this->__assign_from(__v); + return *this; + } - _LIBCUDACXX_TEMPLATE(class _Up) - _LIBCUDACXX_REQUIRES(__opt_is_assignable_from_opt<_Tp, _Up> _LIBCUDACXX_AND __opt_is_assignable<_Tp, _Up>) - _LIBCUDACXX_INLINE_VISIBILITY constexpr optional& operator=(optional<_Up>&& __v) - { - this->__assign_from(_CUDA_VSTD::move(__v)); - return *this; - } + _LIBCUDACXX_TEMPLATE(class _Up) + _LIBCUDACXX_REQUIRES( __opt_is_assignable_from_opt<_Tp, _Up> _LIBCUDACXX_AND + __opt_is_assignable<_Tp, _Up>) + _LIBCUDACXX_INLINE_VISIBILITY constexpr + optional& operator=(optional<_Up>&& __v) { + this->__assign_from(_CUDA_VSTD::move(__v)); + return *this; + } - template = 0> - _LIBCUDACXX_INLINE_VISIBILITY constexpr _Tp& emplace(_Args&&... __args) - { - reset(); - this->__construct(_CUDA_VSTD::forward<_Args>(__args)...); - return this->__get(); - } + template = 0> + _LIBCUDACXX_INLINE_VISIBILITY constexpr + _Tp& emplace(_Args&&... __args) { + reset(); + this->__construct(_CUDA_VSTD::forward<_Args>(__args)...); + return this->__get(); + } - template &, _Args...), int> = 0> - _LIBCUDACXX_INLINE_VISIBILITY constexpr _Tp& emplace(initializer_list<_Up> __il, _Args&&... __args) - { - reset(); - this->__construct(__il, _CUDA_VSTD::forward<_Args>(__args)...); - return this->__get(); - } + template&, _Args...), int> = 0> + _LIBCUDACXX_INLINE_VISIBILITY constexpr + _Tp& emplace(initializer_list<_Up> __il, _Args&&... __args) { + reset(); + this->__construct(__il, _CUDA_VSTD::forward<_Args>(__args)...); + return this->__get(); + } - _LIBCUDACXX_INLINE_VISIBILITY constexpr void swap(optional& __opt) noexcept( - _LIBCUDACXX_TRAIT(is_nothrow_move_constructible, value_type) && _LIBCUDACXX_TRAIT(is_nothrow_swappable, value_type)) - { - if (this->has_value() == __opt.has_value()) + _LIBCUDACXX_INLINE_VISIBILITY constexpr + void swap(optional& __opt) + noexcept(_LIBCUDACXX_TRAIT(is_nothrow_move_constructible, value_type) && + _LIBCUDACXX_TRAIT(is_nothrow_swappable, value_type)) { - using _CUDA_VSTD::swap; - if (this->has_value()) - { - swap(this->__get(), __opt.__get()); - } + if (this->has_value() == __opt.has_value()) + { + using _CUDA_VSTD::swap; + if (this->has_value()) + swap(this->__get(), __opt.__get()); + } + else + { + if (this->has_value()) + { + __opt.__construct(_CUDA_VSTD::move(this->__get())); + reset(); + } + else + { + this->__construct(_CUDA_VSTD::move(__opt.__get())); + __opt.reset(); + } + } } - else + + _LIBCUDACXX_INLINE_VISIBILITY constexpr + add_pointer_t operator->() const { - if (this->has_value()) - { - __opt.__construct(_CUDA_VSTD::move(this->__get())); - reset(); - } - else - { - this->__construct(_CUDA_VSTD::move(__opt.__get())); - __opt.reset(); - } + _LIBCUDACXX_ASSERT(this->has_value(), "optional operator-> called on a disengaged value"); + return _CUDA_VSTD::addressof(this->__get()); } - } - - _LIBCUDACXX_INLINE_VISIBILITY constexpr add_pointer_t operator->() const - { - _LIBCUDACXX_ASSERT(this->has_value(), "optional operator-> called on a disengaged value"); - return _CUDA_VSTD::addressof(this->__get()); - } - - _LIBCUDACXX_INLINE_VISIBILITY constexpr add_pointer_t operator->() - { - _LIBCUDACXX_ASSERT(this->has_value(), "optional operator-> called on a disengaged value"); - return _CUDA_VSTD::addressof(this->__get()); - } - - _LIBCUDACXX_INLINE_VISIBILITY constexpr const value_type& operator*() const& noexcept - { - _LIBCUDACXX_ASSERT(this->has_value(), "optional operator* called on a disengaged value"); - return this->__get(); - } - - _LIBCUDACXX_INLINE_VISIBILITY constexpr value_type& operator*() & noexcept - { - _LIBCUDACXX_ASSERT(this->has_value(), "optional operator* called on a disengaged value"); - return this->__get(); - } - - _LIBCUDACXX_INLINE_VISIBILITY constexpr value_type&& operator*() && noexcept - { - _LIBCUDACXX_ASSERT(this->has_value(), "optional operator* called on a disengaged value"); - return _CUDA_VSTD::move(this->__get()); - } - - _LIBCUDACXX_INLINE_VISIBILITY constexpr const value_type&& operator*() const&& noexcept - { - _LIBCUDACXX_ASSERT(this->has_value(), "optional operator* called on a disengaged value"); - return _CUDA_VSTD::move(this->__get()); - } - - _LIBCUDACXX_INLINE_VISIBILITY constexpr explicit operator bool() const noexcept - { - return has_value(); - } - using __base::__get; - using __base::has_value; + _LIBCUDACXX_INLINE_VISIBILITY constexpr + add_pointer_t operator->() + { + _LIBCUDACXX_ASSERT(this->has_value(), "optional operator-> called on a disengaged value"); + return _CUDA_VSTD::addressof(this->__get()); + } - _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS constexpr value_type const& - value() const& - { - if (!this->has_value()) + _LIBCUDACXX_INLINE_VISIBILITY constexpr + const value_type& operator*() const& noexcept { - __throw_bad_optional_access(); + _LIBCUDACXX_ASSERT(this->has_value(), "optional operator* called on a disengaged value"); + return this->__get(); } - return this->__get(); - } - _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS constexpr value_type& value() & - { - if (!this->has_value()) + _LIBCUDACXX_INLINE_VISIBILITY constexpr + value_type& operator*() & noexcept { - __throw_bad_optional_access(); + _LIBCUDACXX_ASSERT(this->has_value(), "optional operator* called on a disengaged value"); + return this->__get(); } - return this->__get(); - } - _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS constexpr value_type&& value() && - { - if (!this->has_value()) + _LIBCUDACXX_INLINE_VISIBILITY constexpr + value_type&& operator*() && noexcept { - __throw_bad_optional_access(); + _LIBCUDACXX_ASSERT(this->has_value(), "optional operator* called on a disengaged value"); + return _CUDA_VSTD::move(this->__get()); } - return _CUDA_VSTD::move(this->__get()); - } - _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS constexpr value_type const&& - value() const&& - { - if (!this->has_value()) + _LIBCUDACXX_INLINE_VISIBILITY constexpr + const value_type&& operator*() const&& noexcept { - __throw_bad_optional_access(); + _LIBCUDACXX_ASSERT(this->has_value(), "optional operator* called on a disengaged value"); + return _CUDA_VSTD::move(this->__get()); } - return _CUDA_VSTD::move(this->__get()); - } - template - _LIBCUDACXX_INLINE_VISIBILITY constexpr value_type value_or(_Up&& __v) const& - { - static_assert(_LIBCUDACXX_TRAIT(is_copy_constructible, value_type), - "optional::value_or: T must be copy constructible"); - static_assert(_LIBCUDACXX_TRAIT(is_convertible, _Up, value_type), - "optional::value_or: U must be convertible to T"); - return this->has_value() ? this->__get() : static_cast(_CUDA_VSTD::forward<_Up>(__v)); - } + _LIBCUDACXX_INLINE_VISIBILITY constexpr + explicit operator bool() const noexcept { return has_value(); } - template - _LIBCUDACXX_INLINE_VISIBILITY constexpr value_type value_or(_Up&& __v) && - { - static_assert(_LIBCUDACXX_TRAIT(is_move_constructible, value_type), - "optional::value_or: T must be move constructible"); - static_assert(_LIBCUDACXX_TRAIT(is_convertible, _Up, value_type), - "optional::value_or: U must be convertible to T"); - return this->has_value() ? _CUDA_VSTD::move(this->__get()) : static_cast(_CUDA_VSTD::forward<_Up>(__v)); - } + using __base::has_value; + using __base::__get; - template - _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS constexpr auto and_then(_Func&& __f) & - { - using _Up = invoke_result_t<_Func, value_type&>; - static_assert(__is_std_optional>::value, - "Result of f(value()) must be a specialization of std::optional"); - if (this->__engaged_) + _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS constexpr + value_type const& value() const& { - return _CUDA_VSTD::invoke(_CUDA_VSTD::forward<_Func>(__f), this->__get()); + if (!this->has_value()) + __throw_bad_optional_access(); + return this->__get(); } - return remove_cvref_t<_Up>(); - } - template - _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS constexpr auto - and_then(_Func&& __f) const& - { - using _Up = invoke_result_t<_Func, const value_type&>; - static_assert(__is_std_optional>::value, - "Result of f(value()) must be a specialization of std::optional"); - if (this->__engaged_) + _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS constexpr + value_type& value() & { - return _CUDA_VSTD::invoke(_CUDA_VSTD::forward<_Func>(__f), this->__get()); + if (!this->has_value()) + __throw_bad_optional_access(); + return this->__get(); } - return remove_cvref_t<_Up>(); - } - template - _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS constexpr auto - and_then(_Func&& __f) && - { - using _Up = invoke_result_t<_Func, value_type&&>; - static_assert(__is_std_optional>::value, - "Result of f(std::move(value())) must be a specialization of std::optional"); - if (this->__engaged_) + _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS constexpr + value_type&& value() && { - return _CUDA_VSTD::invoke(_CUDA_VSTD::forward<_Func>(__f), _CUDA_VSTD::move(this->__get())); + if (!this->has_value()) + __throw_bad_optional_access(); + return _CUDA_VSTD::move(this->__get()); } - return remove_cvref_t<_Up>(); - } - template - _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS constexpr auto - and_then(_Func&& __f) const&& - { - using _Up = invoke_result_t<_Func, const value_type&&>; - static_assert(__is_std_optional>::value, - "Result of f(std::move(value())) must be a specialization of std::optional"); - if (this->__engaged_) + _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS constexpr + value_type const&& value() const&& { - return _CUDA_VSTD::invoke(_CUDA_VSTD::forward<_Func>(__f), _CUDA_VSTD::move(this->__get())); + if (!this->has_value()) + __throw_bad_optional_access(); + return _CUDA_VSTD::move(this->__get()); } - return remove_cvref_t<_Up>(); - } - template - _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS constexpr auto - transform(_Func&& __f) & - { - using _Up = remove_cv_t>; - static_assert(!_LIBCUDACXX_TRAIT(is_array, _Up), "Result of f(value()) should not be an Array"); - static_assert(!_LIBCUDACXX_TRAIT(is_same, _Up, in_place_t), "Result of f(value()) should not be std::in_place_t"); - static_assert(!_LIBCUDACXX_TRAIT(is_same, _Up, nullopt_t), "Result of f(value()) should not be std::nullopt_t"); - static_assert(_LIBCUDACXX_TRAIT(is_object, _Up), "Result of f(value()) should be an object type"); - if (this->__engaged_) + template + _LIBCUDACXX_INLINE_VISIBILITY constexpr + value_type value_or(_Up&& __v) const& { - return optional<_Up>(__optional_construct_from_invoke_tag{}, _CUDA_VSTD::forward<_Func>(__f), this->__get()); + static_assert(_LIBCUDACXX_TRAIT(is_copy_constructible, value_type), + "optional::value_or: T must be copy constructible"); + static_assert(_LIBCUDACXX_TRAIT(is_convertible, _Up, value_type), + "optional::value_or: U must be convertible to T"); + return this->has_value() ? this->__get() : + static_cast(_CUDA_VSTD::forward<_Up>(__v)); } - return optional<_Up>(); - } - template - _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS constexpr auto - transform(_Func&& __f) const& - { - using _Up = remove_cv_t>; - static_assert(!_LIBCUDACXX_TRAIT(is_array, _Up), "Result of f(value()) should not be an Array"); - static_assert(!_LIBCUDACXX_TRAIT(is_same, _Up, in_place_t), "Result of f(value()) should not be std::in_place_t"); - static_assert(!_LIBCUDACXX_TRAIT(is_same, _Up, nullopt_t), "Result of f(value()) should not be std::nullopt_t"); - static_assert(_LIBCUDACXX_TRAIT(is_object, _Up), "Result of f(value()) should be an object type"); - if (this->__engaged_) + template + _LIBCUDACXX_INLINE_VISIBILITY constexpr + value_type value_or(_Up&& __v) && { - return optional<_Up>(__optional_construct_from_invoke_tag{}, _CUDA_VSTD::forward<_Func>(__f), this->__get()); + static_assert(_LIBCUDACXX_TRAIT(is_move_constructible, value_type), + "optional::value_or: T must be move constructible"); + static_assert(_LIBCUDACXX_TRAIT(is_convertible, _Up, value_type), + "optional::value_or: U must be convertible to T"); + return this->has_value() ? _CUDA_VSTD::move(this->__get()) : + static_cast(_CUDA_VSTD::forward<_Up>(__v)); } - return optional<_Up>(); - } - template - _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS constexpr auto - transform(_Func&& __f) && - { - using _Up = remove_cv_t>; - static_assert(!_LIBCUDACXX_TRAIT(is_array, _Up), "Result of f(std::move(value())) should not be an Array"); - static_assert(!_LIBCUDACXX_TRAIT(is_same, _Up, in_place_t), - "Result of f(std::move(value())) should not be std::in_place_t"); - static_assert(!_LIBCUDACXX_TRAIT(is_same, _Up, nullopt_t), - "Result of f(std::move(value())) should not be std::nullopt_t"); - static_assert(_LIBCUDACXX_TRAIT(is_object, _Up), "Result of f(std::move(value())) should be an object type"); - if (this->__engaged_) - { - return optional<_Up>( - __optional_construct_from_invoke_tag{}, _CUDA_VSTD::forward<_Func>(__f), _CUDA_VSTD::move(this->__get())); + template + _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS constexpr + auto and_then(_Func&& __f) & { + using _Up = invoke_result_t<_Func, value_type&>; + static_assert(__is_std_optional>::value, + "Result of f(value()) must be a specialization of std::optional"); + if (this->__engaged_) + { + return _CUDA_VSTD::invoke(_CUDA_VSTD::forward<_Func>(__f), this->__get()); + } + return remove_cvref_t<_Up>(); } - return optional<_Up>(); - } - template - _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS constexpr auto - transform(_Func&& __f) const&& - { - using _Up = remove_cvref_t>; - static_assert(!_LIBCUDACXX_TRAIT(is_array, _Up), "Result of f(std::move(value())) should not be an Array"); - static_assert(!_LIBCUDACXX_TRAIT(is_same, _Up, in_place_t), - "Result of f(std::move(value())) should not be std::in_place_t"); - static_assert(!_LIBCUDACXX_TRAIT(is_same, _Up, nullopt_t), - "Result of f(std::move(value())) should not be std::nullopt_t"); - static_assert(_LIBCUDACXX_TRAIT(is_object, _Up), "Result of f(std::move(value())) should be an object type"); - if (this->__engaged_) - { - return optional<_Up>( - __optional_construct_from_invoke_tag{}, _CUDA_VSTD::forward<_Func>(__f), _CUDA_VSTD::move(this->__get())); + template + _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS constexpr + auto and_then(_Func&& __f) const& { + using _Up = invoke_result_t<_Func, const value_type&>; + static_assert(__is_std_optional>::value, + "Result of f(value()) must be a specialization of std::optional"); + if (this->__engaged_) + { + return _CUDA_VSTD::invoke(_CUDA_VSTD::forward<_Func>(__f), this->__get()); + } + return remove_cvref_t<_Up>(); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS constexpr + auto and_then(_Func&& __f) && { + using _Up = invoke_result_t<_Func, value_type&&>; + static_assert(__is_std_optional>::value, + "Result of f(std::move(value())) must be a specialization of std::optional"); + if (this->__engaged_) + { + return _CUDA_VSTD::invoke(_CUDA_VSTD::forward<_Func>(__f), _CUDA_VSTD::move(this->__get())); + } + return remove_cvref_t<_Up>(); } - return optional<_Up>(); - } - _LIBCUDACXX_TEMPLATE(class _Func, class _Tp2 = _Tp) - _LIBCUDACXX_REQUIRES(invocable<_Func> _LIBCUDACXX_AND _LIBCUDACXX_TRAIT(is_copy_constructible, _Tp2)) - _LIBCUDACXX_INLINE_VISIBILITY constexpr optional or_else(_Func&& __f) const& - { - static_assert(_LIBCUDACXX_TRAIT(is_same, remove_cvref_t>, optional), - "Result of f() should be the same type as this optional"); - if (this->__engaged_) - { - return *this; + template + _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS constexpr + auto and_then(_Func&& __f) const&& { + using _Up = invoke_result_t<_Func, const value_type&&>; + static_assert(__is_std_optional>::value, + "Result of f(std::move(value())) must be a specialization of std::optional"); + if (this->__engaged_) + { + return _CUDA_VSTD::invoke(_CUDA_VSTD::forward<_Func>(__f), _CUDA_VSTD::move(this->__get())); + } + return remove_cvref_t<_Up>(); } - return _CUDA_VSTD::forward<_Func>(__f)(); - } - _LIBCUDACXX_TEMPLATE(class _Func, class _Tp2 = _Tp) - _LIBCUDACXX_REQUIRES(invocable<_Func> _LIBCUDACXX_AND _LIBCUDACXX_TRAIT(is_move_constructible, _Tp2)) - _LIBCUDACXX_INLINE_VISIBILITY constexpr optional or_else(_Func&& __f) && - { - static_assert(_LIBCUDACXX_TRAIT(is_same, remove_cvref_t>, optional), - "Result of f() should be the same type as this optional"); - if (this->__engaged_) - { - return _CUDA_VSTD::move(*this); + template + _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS constexpr + auto transform(_Func&& __f) & { + using _Up = remove_cv_t>; + static_assert(!_LIBCUDACXX_TRAIT(is_array, _Up), "Result of f(value()) should not be an Array"); + static_assert(!_LIBCUDACXX_TRAIT(is_same, _Up, in_place_t), + "Result of f(value()) should not be std::in_place_t"); + static_assert(!_LIBCUDACXX_TRAIT(is_same, _Up, nullopt_t), + "Result of f(value()) should not be std::nullopt_t"); + static_assert(_LIBCUDACXX_TRAIT(is_object, _Up), "Result of f(value()) should be an object type"); + if (this->__engaged_) + { + return optional<_Up>(__optional_construct_from_invoke_tag{}, _CUDA_VSTD::forward<_Func>(__f), this->__get()); + } + return optional<_Up>(); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS constexpr + auto transform(_Func&& __f) const& { + using _Up = remove_cv_t>; + static_assert(!_LIBCUDACXX_TRAIT(is_array, _Up), "Result of f(value()) should not be an Array"); + static_assert(!_LIBCUDACXX_TRAIT(is_same, _Up, in_place_t), + "Result of f(value()) should not be std::in_place_t"); + static_assert(!_LIBCUDACXX_TRAIT(is_same, _Up, nullopt_t), + "Result of f(value()) should not be std::nullopt_t"); + static_assert(_LIBCUDACXX_TRAIT(is_object, _Up), "Result of f(value()) should be an object type"); + if (this->__engaged_) + { + return optional<_Up>(__optional_construct_from_invoke_tag{}, _CUDA_VSTD::forward<_Func>(__f), this->__get()); + } + return optional<_Up>(); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS constexpr + auto transform(_Func&& __f) && { + using _Up = remove_cv_t>; + static_assert(!_LIBCUDACXX_TRAIT(is_array, _Up), "Result of f(std::move(value())) should not be an Array"); + static_assert(!_LIBCUDACXX_TRAIT(is_same, _Up, in_place_t), + "Result of f(std::move(value())) should not be std::in_place_t"); + static_assert(!_LIBCUDACXX_TRAIT(is_same, _Up, nullopt_t), + "Result of f(std::move(value())) should not be std::nullopt_t"); + static_assert(_LIBCUDACXX_TRAIT(is_object, _Up), "Result of f(std::move(value())) should be an object type"); + if (this->__engaged_) + { + return optional<_Up>(__optional_construct_from_invoke_tag{}, _CUDA_VSTD::forward<_Func>(__f), _CUDA_VSTD::move(this->__get())); + } + return optional<_Up>(); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS constexpr + auto transform(_Func&& __f) const&& { + using _Up = remove_cvref_t>; + static_assert(!_LIBCUDACXX_TRAIT(is_array, _Up), "Result of f(std::move(value())) should not be an Array"); + static_assert(!_LIBCUDACXX_TRAIT(is_same, _Up, in_place_t), + "Result of f(std::move(value())) should not be std::in_place_t"); + static_assert(!_LIBCUDACXX_TRAIT(is_same, _Up, nullopt_t), + "Result of f(std::move(value())) should not be std::nullopt_t"); + static_assert(_LIBCUDACXX_TRAIT(is_object, _Up), "Result of f(std::move(value())) should be an object type"); + if (this->__engaged_) + { + return optional<_Up>(__optional_construct_from_invoke_tag{}, _CUDA_VSTD::forward<_Func>(__f), _CUDA_VSTD::move(this->__get())); + } + return optional<_Up>(); + } + + _LIBCUDACXX_TEMPLATE(class _Func, class _Tp2 = _Tp) + _LIBCUDACXX_REQUIRES( invocable<_Func> _LIBCUDACXX_AND _LIBCUDACXX_TRAIT(is_copy_constructible, _Tp2)) + _LIBCUDACXX_INLINE_VISIBILITY constexpr + optional or_else(_Func&& __f) const& { + static_assert(_LIBCUDACXX_TRAIT(is_same, remove_cvref_t>, optional), + "Result of f() should be the same type as this optional"); + if (this->__engaged_) + { + return *this; + } + return _CUDA_VSTD::forward<_Func>(__f)(); + } + + _LIBCUDACXX_TEMPLATE(class _Func, class _Tp2 = _Tp) + _LIBCUDACXX_REQUIRES( invocable<_Func> _LIBCUDACXX_AND _LIBCUDACXX_TRAIT(is_move_constructible, _Tp2)) + _LIBCUDACXX_INLINE_VISIBILITY constexpr + optional or_else(_Func&& __f) && { + static_assert(_LIBCUDACXX_TRAIT(is_same, remove_cvref_t>, optional), + "Result of f() should be the same type as this optional"); + if (this->__engaged_) + { + return _CUDA_VSTD::move(*this); + } + return _CUDA_VSTD::forward<_Func>(__f)(); } - return _CUDA_VSTD::forward<_Func>(__f)(); - } - using __base::reset; + using __base::reset; }; -# if _CCCL_STD_VER > 2014 && !defined(_LIBCUDACXX_HAS_NO_DEDUCTION_GUIDES) -template +#if _CCCL_STD_VER > 2014 && !defined(_LIBCUDACXX_HAS_NO_DEDUCTION_GUIDES) +template _CCCL_HOST_DEVICE optional(_Tp) -> optional<_Tp>; -# endif +#endif // Comparisons between optionals template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t< - _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() == declval()), bool), - bool> +_LIBCUDACXX_INLINE_VISIBILITY constexpr +__enable_if_t< + _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() == declval()), bool), + bool +> operator==(const optional<_Tp>& __x, const optional<_Up>& __y) { - if (static_cast(__x) != static_cast(__y)) - { - return false; - } - if (!static_cast(__x)) - { - return true; - } - return *__x == *__y; + if (static_cast(__x) != static_cast(__y)) + return false; + if (!static_cast(__x)) + return true; + return *__x == *__y; } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t< - _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() != declval()), bool), - bool> +_LIBCUDACXX_INLINE_VISIBILITY constexpr +__enable_if_t< + _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() != + declval()), bool), + bool +> operator!=(const optional<_Tp>& __x, const optional<_Up>& __y) { - if (static_cast(__x) != static_cast(__y)) - { - return true; - } - if (!static_cast(__x)) - { - return false; - } - return *__x != *__y; + if (static_cast(__x) != static_cast(__y)) + return true; + if (!static_cast(__x)) + return false; + return *__x != *__y; } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t< - _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() < declval()), bool), - bool> +_LIBCUDACXX_INLINE_VISIBILITY constexpr +__enable_if_t< + _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() < + declval()), bool), + bool +> operator<(const optional<_Tp>& __x, const optional<_Up>& __y) { - if (!static_cast(__y)) - { - return false; - } - if (!static_cast(__x)) - { - return true; - } - return *__x < *__y; + if (!static_cast(__y)) + return false; + if (!static_cast(__x)) + return true; + return *__x < *__y; } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t< - _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() > declval()), bool), - bool> +_LIBCUDACXX_INLINE_VISIBILITY constexpr +__enable_if_t< + _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() > + declval()), bool), + bool +> operator>(const optional<_Tp>& __x, const optional<_Up>& __y) { - if (!static_cast(__x)) - { - return false; - } - if (!static_cast(__y)) - { - return true; - } - return *__x > *__y; + if (!static_cast(__x)) + return false; + if (!static_cast(__y)) + return true; + return *__x > *__y; } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t< - _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() <= declval()), bool), - bool> +_LIBCUDACXX_INLINE_VISIBILITY constexpr +__enable_if_t< + _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() <= + declval()), bool), + bool +> operator<=(const optional<_Tp>& __x, const optional<_Up>& __y) { - if (!static_cast(__x)) - { - return true; - } - if (!static_cast(__y)) - { - return false; - } - return *__x <= *__y; + if (!static_cast(__x)) + return true; + if (!static_cast(__y)) + return false; + return *__x <= *__y; } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t< - _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() >= declval()), bool), - bool> +_LIBCUDACXX_INLINE_VISIBILITY constexpr +__enable_if_t< + _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() >= + declval()), bool), + bool +> operator>=(const optional<_Tp>& __x, const optional<_Up>& __y) { - if (!static_cast(__y)) - { - return true; - } - if (!static_cast(__x)) - { - return false; - } - return *__x >= *__y; + if (!static_cast(__y)) + return true; + if (!static_cast(__x)) + return false; + return *__x >= *__y; } // Comparisons with nullopt template -_LIBCUDACXX_INLINE_VISIBILITY constexpr bool operator==(const optional<_Tp>& __x, nullopt_t) noexcept +_LIBCUDACXX_INLINE_VISIBILITY constexpr +bool +operator==(const optional<_Tp>& __x, nullopt_t) noexcept { - return !static_cast(__x); + return !static_cast(__x); } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr bool operator==(nullopt_t, const optional<_Tp>& __x) noexcept +_LIBCUDACXX_INLINE_VISIBILITY constexpr +bool +operator==(nullopt_t, const optional<_Tp>& __x) noexcept { - return !static_cast(__x); + return !static_cast(__x); } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr bool operator!=(const optional<_Tp>& __x, nullopt_t) noexcept +_LIBCUDACXX_INLINE_VISIBILITY constexpr +bool +operator!=(const optional<_Tp>& __x, nullopt_t) noexcept { - return static_cast(__x); + return static_cast(__x); } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr bool operator!=(nullopt_t, const optional<_Tp>& __x) noexcept +_LIBCUDACXX_INLINE_VISIBILITY constexpr +bool +operator!=(nullopt_t, const optional<_Tp>& __x) noexcept { - return static_cast(__x); + return static_cast(__x); } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr bool operator<(const optional<_Tp>&, nullopt_t) noexcept +_LIBCUDACXX_INLINE_VISIBILITY constexpr +bool +operator<(const optional<_Tp>&, nullopt_t) noexcept { - return false; + return false; } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr bool operator<(nullopt_t, const optional<_Tp>& __x) noexcept +_LIBCUDACXX_INLINE_VISIBILITY constexpr +bool +operator<(nullopt_t, const optional<_Tp>& __x) noexcept { - return static_cast(__x); + return static_cast(__x); } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr bool operator<=(const optional<_Tp>& __x, nullopt_t) noexcept +_LIBCUDACXX_INLINE_VISIBILITY constexpr +bool +operator<=(const optional<_Tp>& __x, nullopt_t) noexcept { - return !static_cast(__x); + return !static_cast(__x); } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr bool operator<=(nullopt_t, const optional<_Tp>&) noexcept +_LIBCUDACXX_INLINE_VISIBILITY constexpr +bool +operator<=(nullopt_t, const optional<_Tp>&) noexcept { - return true; + return true; } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr bool operator>(const optional<_Tp>& __x, nullopt_t) noexcept +_LIBCUDACXX_INLINE_VISIBILITY constexpr +bool +operator>(const optional<_Tp>& __x, nullopt_t) noexcept { - return static_cast(__x); + return static_cast(__x); } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr bool operator>(nullopt_t, const optional<_Tp>&) noexcept +_LIBCUDACXX_INLINE_VISIBILITY constexpr +bool +operator>(nullopt_t, const optional<_Tp>&) noexcept { - return false; + return false; } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr bool operator>=(const optional<_Tp>&, nullopt_t) noexcept +_LIBCUDACXX_INLINE_VISIBILITY constexpr +bool +operator>=(const optional<_Tp>&, nullopt_t) noexcept { - return true; + return true; } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr bool operator>=(nullopt_t, const optional<_Tp>& __x) noexcept +_LIBCUDACXX_INLINE_VISIBILITY constexpr +bool +operator>=(nullopt_t, const optional<_Tp>& __x) noexcept { - return !static_cast(__x); + return !static_cast(__x); } // Comparisons with T template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t< - _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() == declval()), bool), - bool> +_LIBCUDACXX_INLINE_VISIBILITY constexpr +__enable_if_t< + _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() == + declval()), bool), + bool +> operator==(const optional<_Tp>& __x, const _Up& __v) { - return static_cast(__x) ? *__x == __v : false; + return static_cast(__x) ? *__x == __v : false; } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t< - _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() == declval()), bool), - bool> +_LIBCUDACXX_INLINE_VISIBILITY constexpr +__enable_if_t< + _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() == + declval()), bool), + bool +> operator==(const _Tp& __v, const optional<_Up>& __x) { - return static_cast(__x) ? __v == *__x : false; + return static_cast(__x) ? __v == *__x : false; } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t< - _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() != declval()), bool), - bool> +_LIBCUDACXX_INLINE_VISIBILITY constexpr +__enable_if_t< + _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() != + declval()), bool), + bool +> operator!=(const optional<_Tp>& __x, const _Up& __v) { - return static_cast(__x) ? *__x != __v : true; + return static_cast(__x) ? *__x != __v : true; } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t< - _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() != declval()), bool), - bool> +_LIBCUDACXX_INLINE_VISIBILITY constexpr +__enable_if_t< + _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() != + declval()), bool), + bool +> operator!=(const _Tp& __v, const optional<_Up>& __x) { - return static_cast(__x) ? __v != *__x : true; + return static_cast(__x) ? __v != *__x : true; } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t< - _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() < declval()), bool), - bool> +_LIBCUDACXX_INLINE_VISIBILITY constexpr +__enable_if_t< + _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() < + declval()), bool), + bool +> operator<(const optional<_Tp>& __x, const _Up& __v) { - return static_cast(__x) ? *__x < __v : true; + return static_cast(__x) ? *__x < __v : true; } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t< - _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() < declval()), bool), - bool> +_LIBCUDACXX_INLINE_VISIBILITY constexpr +__enable_if_t< + _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() < + declval()), bool), + bool +> operator<(const _Tp& __v, const optional<_Up>& __x) { - return static_cast(__x) ? __v < *__x : false; + return static_cast(__x) ? __v < *__x : false; } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t< - _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() <= declval()), bool), - bool> +_LIBCUDACXX_INLINE_VISIBILITY constexpr +__enable_if_t< + _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() <= + declval()), bool), + bool +> operator<=(const optional<_Tp>& __x, const _Up& __v) { - return static_cast(__x) ? *__x <= __v : true; + return static_cast(__x) ? *__x <= __v : true; } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t< - _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() <= declval()), bool), - bool> +_LIBCUDACXX_INLINE_VISIBILITY constexpr +__enable_if_t< + _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() <= + declval()), bool), + bool +> operator<=(const _Tp& __v, const optional<_Up>& __x) { - return static_cast(__x) ? __v <= *__x : false; + return static_cast(__x) ? __v <= *__x : false; } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t< - _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() > declval()), bool), - bool> +_LIBCUDACXX_INLINE_VISIBILITY constexpr +__enable_if_t< + _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() > + declval()), bool), + bool +> operator>(const optional<_Tp>& __x, const _Up& __v) { - return static_cast(__x) ? *__x > __v : false; + return static_cast(__x) ? *__x > __v : false; } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t< - _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() > declval()), bool), - bool> +_LIBCUDACXX_INLINE_VISIBILITY constexpr +__enable_if_t< + _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() > + declval()), bool), + bool +> operator>(const _Tp& __v, const optional<_Up>& __x) { - return static_cast(__x) ? __v > *__x : true; + return static_cast(__x) ? __v > *__x : true; } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t< - _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() >= declval()), bool), - bool> +_LIBCUDACXX_INLINE_VISIBILITY constexpr +__enable_if_t< + _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() >= + declval()), bool), + bool +> operator>=(const optional<_Tp>& __x, const _Up& __v) { - return static_cast(__x) ? *__x >= __v : false; + return static_cast(__x) ? *__x >= __v : false; } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t< - _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() >= declval()), bool), - bool> +_LIBCUDACXX_INLINE_VISIBILITY constexpr +__enable_if_t< + _LIBCUDACXX_TRAIT(is_convertible, decltype(declval() >= + declval()), bool), + bool +> operator>=(const _Tp& __v, const optional<_Up>& __x) { - return static_cast(__x) ? __v >= *__x : true; + return static_cast(__x) ? __v >= *__x : true; } + template -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr __enable_if_t< - _LIBCUDACXX_TRAIT(is_move_constructible, _Tp) && _LIBCUDACXX_TRAIT(is_swappable, _Tp), - void> +inline _LIBCUDACXX_INLINE_VISIBILITY constexpr +__enable_if_t< + _LIBCUDACXX_TRAIT(is_move_constructible, _Tp) && _LIBCUDACXX_TRAIT(is_swappable, _Tp), + void +> swap(optional<_Tp>& __x, optional<_Tp>& __y) noexcept(noexcept(__x.swap(__y))) { - __x.swap(__y); + __x.swap(__y); } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr optional> make_optional(_Tp&& __v) +_LIBCUDACXX_INLINE_VISIBILITY constexpr +optional> make_optional(_Tp&& __v) { - return optional>(_CUDA_VSTD::forward<_Tp>(__v)); + return optional>(_CUDA_VSTD::forward<_Tp>(__v)); } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr optional<_Tp> make_optional(_Args&&... __args) +_LIBCUDACXX_INLINE_VISIBILITY constexpr +optional<_Tp> make_optional(_Args&&... __args) { - return optional<_Tp>(in_place, _CUDA_VSTD::forward<_Args>(__args)...); + return optional<_Tp>(in_place, _CUDA_VSTD::forward<_Args>(__args)...); } template -_LIBCUDACXX_INLINE_VISIBILITY constexpr optional<_Tp> make_optional(initializer_list<_Up> __il, _Args&&... __args) +_LIBCUDACXX_INLINE_VISIBILITY constexpr +optional<_Tp> make_optional(initializer_list<_Up> __il, _Args&&... __args) { - return optional<_Tp>(in_place, __il, _CUDA_VSTD::forward<_Args>(__args)...); + return optional<_Tp>(in_place, __il, _CUDA_VSTD::forward<_Args>(__args)...); } -# ifndef __cuda_std__ +#ifndef __cuda_std__ template -struct _LIBCUDACXX_TEMPLATE_VIS hash<__enable_hash_helper, remove_const_t<_Tp>>> +struct _LIBCUDACXX_TEMPLATE_VIS hash< + __enable_hash_helper, remove_const_t<_Tp>> +> { -# if _CCCL_STD_VER <= 2017 || defined(_LIBCUDACXX_ENABLE_CXX20_REMOVED_BINDER_TYPEDEFS) - _LIBCUDACXX_DEPRECATED_IN_CXX17 typedef optional<_Tp> argument_type; - _LIBCUDACXX_DEPRECATED_IN_CXX17 typedef size_t result_type; -# endif +#if _CCCL_STD_VER <= 2017 || defined(_LIBCUDACXX_ENABLE_CXX20_REMOVED_BINDER_TYPEDEFS) + _LIBCUDACXX_DEPRECATED_IN_CXX17 typedef optional<_Tp> argument_type; + _LIBCUDACXX_DEPRECATED_IN_CXX17 typedef size_t result_type; +#endif - _LIBCUDACXX_INLINE_VISIBILITY size_t operator()(const optional<_Tp>& __opt) const - { - return static_cast(__opt) ? hash>()(*__opt) : 0; - } + _LIBCUDACXX_INLINE_VISIBILITY + size_t operator()(const optional<_Tp>& __opt) const + { + return static_cast(__opt) ? hash>()(*__opt) : 0; + } }; -# endif // __cuda_std__ +#endif // __cuda_std__ _LIBCUDACXX_END_NAMESPACE_STD diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/ranges b/libcudacxx/include/cuda/std/detail/libcxx/include/ranges index 30174b96dca..a7c26c7f63c 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/ranges +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/ranges @@ -320,6 +320,7 @@ namespace std { _CCCL_DIAG_PUSH _CCCL_DIAG_SUPPRESS_MSVC(4848) +#include // all public C++ headers provide the assertion handler #include #include #include @@ -333,14 +334,13 @@ _CCCL_DIAG_SUPPRESS_MSVC(4848) #include #include #include -#include // all public C++ headers provide the assertion handler // standard-mandated includes #include // [ranges.syn] #ifndef _LIBCUDACXX_HAS_NO_SPACESHIP_OPERATOR -# include +#include #endif // _LIBCUDACXX_HAS_NO_SPACESHIP_OPERATOR #include #include diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/ratio b/libcudacxx/include/cuda/std/detail/libcxx/include/ratio index 1d2386755a2..edf8d224c91 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/ratio +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/ratio @@ -87,13 +87,14 @@ typedef ratio<1000000000000000000000000, 1> yotta; // not supported # pragma system_header #endif // no system header +#include // all public C++ headers provide the assertion handler #include #include #include -#include // all public C++ headers provide the assertion handler -#include #include +#include + _LIBCUDACXX_BEGIN_NAMESPACE_STD // __static_gcd @@ -101,19 +102,19 @@ _LIBCUDACXX_BEGIN_NAMESPACE_STD template struct __static_gcd { - static const intmax_t value = __static_gcd<_Yp, _Xp % _Yp>::value; + static const intmax_t value = __static_gcd<_Yp, _Xp % _Yp>::value; }; template struct __static_gcd<_Xp, 0> { - static const intmax_t value = _Xp; + static const intmax_t value = _Xp; }; template <> struct __static_gcd<0, 0> { - static const intmax_t value = 1; + static const intmax_t value = 1; }; // __static_lcm @@ -121,19 +122,19 @@ struct __static_gcd<0, 0> template struct __static_lcm { - static const intmax_t value = _Xp / __static_gcd<_Xp, _Yp>::value * _Yp; + static const intmax_t value = _Xp / __static_gcd<_Xp, _Yp>::value * _Yp; }; template struct __static_abs { - static const intmax_t value = _Xp < 0 ? -_Xp : _Xp; + static const intmax_t value = _Xp < 0 ? -_Xp : _Xp; }; template struct __static_sign { - static const intmax_t value = _Xp == 0 ? 0 : (_Xp < 0 ? -1 : 1); + static const intmax_t value = _Xp == 0 ? 0 : (_Xp < 0 ? -1 : 1); }; template ::value> @@ -142,32 +143,30 @@ class __ll_add; template class __ll_add<_Xp, _Yp, 1> { - static const intmax_t min = (1LL << (sizeof(intmax_t) * CHAR_BIT - 1)) + 1; - static const intmax_t max = -min; - - static_assert(_Xp <= max - _Yp, "overflow in __ll_add"); + static const intmax_t min = (1LL << (sizeof(intmax_t) * CHAR_BIT - 1)) + 1; + static const intmax_t max = -min; + static_assert(_Xp <= max - _Yp, "overflow in __ll_add"); public: - static const intmax_t value = _Xp + _Yp; + static const intmax_t value = _Xp + _Yp; }; template class __ll_add<_Xp, _Yp, 0> { public: - static const intmax_t value = _Xp; + static const intmax_t value = _Xp; }; template class __ll_add<_Xp, _Yp, -1> { - static const intmax_t min = (1LL << (sizeof(intmax_t) * CHAR_BIT - 1)) + 1; - static const intmax_t max = -min; - - static_assert(min - _Yp <= _Xp, "overflow in __ll_add"); + static const intmax_t min = (1LL << (sizeof(intmax_t) * CHAR_BIT - 1)) + 1; + static const intmax_t max = -min; + static_assert(min - _Yp <= _Xp, "overflow in __ll_add"); public: - static const intmax_t value = _Xp + _Yp; + static const intmax_t value = _Xp + _Yp; }; template ::value> @@ -176,100 +175,95 @@ class __ll_sub; template class __ll_sub<_Xp, _Yp, 1> { - static const intmax_t min = (1LL << (sizeof(intmax_t) * CHAR_BIT - 1)) + 1; - static const intmax_t max = -min; - - static_assert(min + _Yp <= _Xp, "overflow in __ll_sub"); + static const intmax_t min = (1LL << (sizeof(intmax_t) * CHAR_BIT - 1)) + 1; + static const intmax_t max = -min; + static_assert(min + _Yp <= _Xp, "overflow in __ll_sub"); public: - static const intmax_t value = _Xp - _Yp; + static const intmax_t value = _Xp - _Yp; }; template class __ll_sub<_Xp, _Yp, 0> { public: - static const intmax_t value = _Xp; + static const intmax_t value = _Xp; }; template class __ll_sub<_Xp, _Yp, -1> { - static const intmax_t min = (1LL << (sizeof(intmax_t) * CHAR_BIT - 1)) + 1; - static const intmax_t max = -min; - - static_assert(_Xp <= max + _Yp, "overflow in __ll_sub"); + static const intmax_t min = (1LL << (sizeof(intmax_t) * CHAR_BIT - 1)) + 1; + static const intmax_t max = -min; + static_assert(_Xp <= max + _Yp, "overflow in __ll_sub"); public: - static const intmax_t value = _Xp - _Yp; + static const intmax_t value = _Xp - _Yp; }; template class __ll_mul { - static const intmax_t nan = (1LL << (sizeof(intmax_t) * CHAR_BIT - 1)); - static const intmax_t min = nan + 1; - static const intmax_t max = -min; - static const intmax_t __a_x = __static_abs<_Xp>::value; - static const intmax_t __a_y = __static_abs<_Yp>::value; - - static_assert(_Xp != nan && _Yp != nan && __a_x <= max / __a_y, "overflow in __ll_mul"); + static const intmax_t nan = (1LL << (sizeof(intmax_t) * CHAR_BIT - 1)); + static const intmax_t min = nan + 1; + static const intmax_t max = -min; + static const intmax_t __a_x = __static_abs<_Xp>::value; + static const intmax_t __a_y = __static_abs<_Yp>::value; + static_assert(_Xp != nan && _Yp != nan && __a_x <= max / __a_y, "overflow in __ll_mul"); public: - static const intmax_t value = _Xp * _Yp; + static const intmax_t value = _Xp * _Yp; }; template class __ll_mul<0, _Yp> { public: - static const intmax_t value = 0; + static const intmax_t value = 0; }; template class __ll_mul<_Xp, 0> { public: - static const intmax_t value = 0; + static const intmax_t value = 0; }; template <> class __ll_mul<0, 0> { public: - static const intmax_t value = 0; + static const intmax_t value = 0; }; // Not actually used but left here in case needed in future maintenance template class __ll_div { - static const intmax_t nan = (1LL << (sizeof(intmax_t) * CHAR_BIT - 1)); - static const intmax_t min = nan + 1; - static const intmax_t max = -min; - - static_assert(_Xp != nan && _Yp != nan && _Yp != 0, "overflow in __ll_div"); + static const intmax_t nan = (1LL << (sizeof(intmax_t) * CHAR_BIT - 1)); + static const intmax_t min = nan + 1; + static const intmax_t max = -min; + static_assert(_Xp != nan && _Yp != nan && _Yp != 0, "overflow in __ll_div"); public: - static const intmax_t value = _Xp / _Yp; + static const intmax_t value = _Xp / _Yp; }; template class _LIBCUDACXX_TEMPLATE_VIS ratio { - static_assert(__static_abs<_Num>::value >= 0, "ratio numerator is out of range"); - static_assert(_Den != 0, "ratio divide by 0"); - static_assert(__static_abs<_Den>::value > 0, "ratio denominator is out of range"); - static constexpr intmax_t __na = __static_abs<_Num>::value; - static constexpr intmax_t __da = __static_abs<_Den>::value; - static constexpr intmax_t __s = __static_sign<_Num>::value * __static_sign<_Den>::value; - static constexpr intmax_t __gcd = __static_gcd<__na, __da>::value; - + static_assert(__static_abs<_Num>::value >= 0, "ratio numerator is out of range"); + static_assert(_Den != 0, "ratio divide by 0"); + static_assert(__static_abs<_Den>::value > 0, "ratio denominator is out of range"); + static constexpr intmax_t __na = __static_abs<_Num>::value; + static constexpr intmax_t __da = __static_abs<_Den>::value; + static constexpr intmax_t __s = __static_sign<_Num>::value * __static_sign<_Den>::value; + static constexpr intmax_t __gcd = __static_gcd<__na, __da>::value; public: - static constexpr intmax_t num = __s * __na / __gcd; - static constexpr intmax_t den = __da / __gcd; + static constexpr intmax_t num = __s * __na / __gcd; + static constexpr intmax_t den = __da / __gcd; - typedef ratio type; + typedef ratio type; }; template @@ -278,210 +272,226 @@ constexpr intmax_t ratio<_Num, _Den>::num; template constexpr intmax_t ratio<_Num, _Den>::den; -template -struct __is_ratio : false_type -{}; -template -struct __is_ratio> : true_type -{}; +template struct __is_ratio : false_type {}; +template struct __is_ratio > : true_type {}; typedef ratio<1LL, 1000000000000000000LL> atto; -typedef ratio<1LL, 1000000000000000LL> femto; -typedef ratio<1LL, 1000000000000LL> pico; -typedef ratio<1LL, 1000000000LL> nano; -typedef ratio<1LL, 1000000LL> micro; -typedef ratio<1LL, 1000LL> milli; -typedef ratio<1LL, 100LL> centi; -typedef ratio<1LL, 10LL> deci; -typedef ratio<10LL, 1LL> deca; -typedef ratio<100LL, 1LL> hecto; -typedef ratio<1000LL, 1LL> kilo; -typedef ratio<1000000LL, 1LL> mega; -typedef ratio<1000000000LL, 1LL> giga; -typedef ratio<1000000000000LL, 1LL> tera; -typedef ratio<1000000000000000LL, 1LL> peta; +typedef ratio<1LL, 1000000000000000LL> femto; +typedef ratio<1LL, 1000000000000LL> pico; +typedef ratio<1LL, 1000000000LL> nano; +typedef ratio<1LL, 1000000LL> micro; +typedef ratio<1LL, 1000LL> milli; +typedef ratio<1LL, 100LL> centi; +typedef ratio<1LL, 10LL> deci; +typedef ratio< 10LL, 1LL> deca; +typedef ratio< 100LL, 1LL> hecto; +typedef ratio< 1000LL, 1LL> kilo; +typedef ratio< 1000000LL, 1LL> mega; +typedef ratio< 1000000000LL, 1LL> giga; +typedef ratio< 1000000000000LL, 1LL> tera; +typedef ratio< 1000000000000000LL, 1LL> peta; typedef ratio<1000000000000000000LL, 1LL> exa; template struct __ratio_multiply { - // private: - static const intmax_t __gcd_n1_d2 = __static_gcd<_R1::num, _R2::den>::value; - static const intmax_t __gcd_d1_n2 = __static_gcd<_R1::den, _R2::num>::value; - +//private: + static const intmax_t __gcd_n1_d2 = __static_gcd<_R1::num, _R2::den>::value; + static const intmax_t __gcd_d1_n2 = __static_gcd<_R1::den, _R2::num>::value; public: - typedef typename ratio<__ll_mul<_R1::num / __gcd_n1_d2, _R2::num / __gcd_d1_n2>::value, - __ll_mul<_R2::den / __gcd_n1_d2, _R1::den / __gcd_d1_n2>::value>::type type; + typedef typename ratio + < + __ll_mul<_R1::num / __gcd_n1_d2, _R2::num / __gcd_d1_n2>::value, + __ll_mul<_R2::den / __gcd_n1_d2, _R1::den / __gcd_d1_n2>::value + >::type type; }; -template -using ratio_multiply = typename __ratio_multiply<_R1, _R2>::type; +template using ratio_multiply + = typename __ratio_multiply<_R1, _R2>::type; template struct __ratio_divide { - // private: - static const intmax_t __gcd_n1_n2 = __static_gcd<_R1::num, _R2::num>::value; - static const intmax_t __gcd_d1_d2 = __static_gcd<_R1::den, _R2::den>::value; - +//private: + static const intmax_t __gcd_n1_n2 = __static_gcd<_R1::num, _R2::num>::value; + static const intmax_t __gcd_d1_d2 = __static_gcd<_R1::den, _R2::den>::value; public: - typedef typename ratio<__ll_mul<_R1::num / __gcd_n1_n2, _R2::den / __gcd_d1_d2>::value, - __ll_mul<_R2::num / __gcd_n1_n2, _R1::den / __gcd_d1_d2>::value>::type type; + typedef typename ratio + < + __ll_mul<_R1::num / __gcd_n1_n2, _R2::den / __gcd_d1_d2>::value, + __ll_mul<_R2::num / __gcd_n1_n2, _R1::den / __gcd_d1_d2>::value + >::type type; }; -template -using ratio_divide = typename __ratio_divide<_R1, _R2>::type; +template using ratio_divide + = typename __ratio_divide<_R1, _R2>::type; template struct __ratio_add { - // private: - static const intmax_t __gcd_n1_n2 = __static_gcd<_R1::num, _R2::num>::value; - static const intmax_t __gcd_d1_d2 = __static_gcd<_R1::den, _R2::den>::value; - +//private: + static const intmax_t __gcd_n1_n2 = __static_gcd<_R1::num, _R2::num>::value; + static const intmax_t __gcd_d1_d2 = __static_gcd<_R1::den, _R2::den>::value; public: - typedef - typename ratio_multiply, - ratio<__ll_add<__ll_mul<_R1::num / __gcd_n1_n2, _R2::den / __gcd_d1_d2>::value, - __ll_mul<_R2::num / __gcd_n1_n2, _R1::den / __gcd_d1_d2>::value>::value, - _R2::den>>::type type; -}; - -template -using ratio_add = typename __ratio_add<_R1, _R2>::type; + typedef typename ratio_multiply + < + ratio<__gcd_n1_n2, _R1::den / __gcd_d1_d2>, + ratio + < + __ll_add + < + __ll_mul<_R1::num / __gcd_n1_n2, _R2::den / __gcd_d1_d2>::value, + __ll_mul<_R2::num / __gcd_n1_n2, _R1::den / __gcd_d1_d2>::value + >::value, + _R2::den + > + >::type type; +}; + +template using ratio_add + = typename __ratio_add<_R1, _R2>::type; template struct __ratio_subtract { - // private: - static const intmax_t __gcd_n1_n2 = __static_gcd<_R1::num, _R2::num>::value; - static const intmax_t __gcd_d1_d2 = __static_gcd<_R1::den, _R2::den>::value; - +//private: + static const intmax_t __gcd_n1_n2 = __static_gcd<_R1::num, _R2::num>::value; + static const intmax_t __gcd_d1_d2 = __static_gcd<_R1::den, _R2::den>::value; public: - typedef - typename ratio_multiply, - ratio<__ll_sub<__ll_mul<_R1::num / __gcd_n1_n2, _R2::den / __gcd_d1_d2>::value, - __ll_mul<_R2::num / __gcd_n1_n2, _R1::den / __gcd_d1_d2>::value>::value, - _R2::den>>::type type; -}; - -template -using ratio_subtract = typename __ratio_subtract<_R1, _R2>::type; + typedef typename ratio_multiply + < + ratio<__gcd_n1_n2, _R1::den / __gcd_d1_d2>, + ratio + < + __ll_sub + < + __ll_mul<_R1::num / __gcd_n1_n2, _R2::den / __gcd_d1_d2>::value, + __ll_mul<_R2::num / __gcd_n1_n2, _R1::den / __gcd_d1_d2>::value + >::value, + _R2::den + > + >::type type; +}; + +template using ratio_subtract + = typename __ratio_subtract<_R1, _R2>::type; // ratio_equal template struct _LIBCUDACXX_TEMPLATE_VIS ratio_equal - : public _LIBCUDACXX_BOOL_CONSTANT((_R1::num == _R2::num && _R1::den == _R2::den)) -{}; + : public _LIBCUDACXX_BOOL_CONSTANT((_R1::num == _R2::num && _R1::den == _R2::den)) {}; template -struct _LIBCUDACXX_TEMPLATE_VIS ratio_not_equal : public _LIBCUDACXX_BOOL_CONSTANT((!ratio_equal<_R1, _R2>::value)) -{}; +struct _LIBCUDACXX_TEMPLATE_VIS ratio_not_equal + : public _LIBCUDACXX_BOOL_CONSTANT((!ratio_equal<_R1, _R2>::value)) {}; // ratio_less -template +template struct __ratio_less1 { - static const bool value = _Odd ? _Q2 < _Q1 : _Q1 < _Q2; + static const bool value = _Odd ? _Q2 < _Q1 : _Q1 < _Q2; }; template struct __ratio_less1<_R1, _R2, _Odd, _Qp, 0, _Qp, 0> { - static const bool value = false; + static const bool value = false; }; template struct __ratio_less1<_R1, _R2, _Odd, _Qp, 0, _Qp, _M2> { - static const bool value = !_Odd; + static const bool value = !_Odd; }; template struct __ratio_less1<_R1, _R2, _Odd, _Qp, _M1, _Qp, 0> { - static const bool value = _Odd; + static const bool value = _Odd; }; -template +template struct __ratio_less1<_R1, _R2, _Odd, _Qp, _M1, _Qp, _M2> { - static const bool value = __ratio_less1, ratio<_R2::den, _M2>, !_Odd>::value; + static const bool value = __ratio_less1, + ratio<_R2::den, _M2>, !_Odd>::value; }; -template ::value, - intmax_t _S2 = __static_sign<_R2::num>::value> +template ::value, + intmax_t _S2 = __static_sign<_R2::num>::value> struct __ratio_less { - static const bool value = _S1 < _S2; + static const bool value = _S1 < _S2; }; template struct __ratio_less<_R1, _R2, 1LL, 1LL> { - static const bool value = __ratio_less1<_R1, _R2>::value; + static const bool value = __ratio_less1<_R1, _R2>::value; }; template struct __ratio_less<_R1, _R2, -1LL, -1LL> { - static const bool value = __ratio_less1, ratio<-_R1::num, _R1::den>>::value; + static const bool value = __ratio_less1, ratio<-_R1::num, _R1::den> >::value; }; template -struct _LIBCUDACXX_TEMPLATE_VIS ratio_less : public _LIBCUDACXX_BOOL_CONSTANT((__ratio_less<_R1, _R2>::value)) -{}; +struct _LIBCUDACXX_TEMPLATE_VIS ratio_less + : public _LIBCUDACXX_BOOL_CONSTANT((__ratio_less<_R1, _R2>::value)) {}; template -struct _LIBCUDACXX_TEMPLATE_VIS ratio_less_equal : public _LIBCUDACXX_BOOL_CONSTANT((!ratio_less<_R2, _R1>::value)) -{}; +struct _LIBCUDACXX_TEMPLATE_VIS ratio_less_equal + : public _LIBCUDACXX_BOOL_CONSTANT((!ratio_less<_R2, _R1>::value)) {}; template -struct _LIBCUDACXX_TEMPLATE_VIS ratio_greater : public _LIBCUDACXX_BOOL_CONSTANT((ratio_less<_R2, _R1>::value)) -{}; +struct _LIBCUDACXX_TEMPLATE_VIS ratio_greater + : public _LIBCUDACXX_BOOL_CONSTANT((ratio_less<_R2, _R1>::value)) {}; template -struct _LIBCUDACXX_TEMPLATE_VIS ratio_greater_equal : public _LIBCUDACXX_BOOL_CONSTANT((!ratio_less<_R1, _R2>::value)) -{}; +struct _LIBCUDACXX_TEMPLATE_VIS ratio_greater_equal + : public _LIBCUDACXX_BOOL_CONSTANT((!ratio_less<_R1, _R2>::value)) {}; template struct __ratio_gcd { - typedef ratio<__static_gcd<_R1::num, _R2::num>::value, __static_lcm<_R1::den, _R2::den>::value> type; + typedef ratio<__static_gcd<_R1::num, _R2::num>::value, + __static_lcm<_R1::den, _R2::den>::value> type; }; #if _CCCL_STD_VER > 2014 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES) template -_LIBCUDACXX_INLINE_VAR constexpr bool ratio_equal_v = ratio_equal<_R1, _R2>::value; +_LIBCUDACXX_INLINE_VAR constexpr bool ratio_equal_v + = ratio_equal<_R1, _R2>::value; template -_LIBCUDACXX_INLINE_VAR constexpr bool ratio_not_equal_v = ratio_not_equal<_R1, _R2>::value; +_LIBCUDACXX_INLINE_VAR constexpr bool ratio_not_equal_v + = ratio_not_equal<_R1, _R2>::value; template -_LIBCUDACXX_INLINE_VAR constexpr bool ratio_less_v = ratio_less<_R1, _R2>::value; +_LIBCUDACXX_INLINE_VAR constexpr bool ratio_less_v + = ratio_less<_R1, _R2>::value; template -_LIBCUDACXX_INLINE_VAR constexpr bool ratio_less_equal_v = ratio_less_equal<_R1, _R2>::value; +_LIBCUDACXX_INLINE_VAR constexpr bool ratio_less_equal_v + = ratio_less_equal<_R1, _R2>::value; template -_LIBCUDACXX_INLINE_VAR constexpr bool ratio_greater_v = ratio_greater<_R1, _R2>::value; +_LIBCUDACXX_INLINE_VAR constexpr bool ratio_greater_v + = ratio_greater<_R1, _R2>::value; template -_LIBCUDACXX_INLINE_VAR constexpr bool ratio_greater_equal_v = ratio_greater_equal<_R1, _R2>::value; +_LIBCUDACXX_INLINE_VAR constexpr bool ratio_greater_equal_v + = ratio_greater_equal<_R1, _R2>::value; #endif _LIBCUDACXX_END_NAMESPACE_STD #include //__cuda_std__ -#endif // _LIBCUDACXX_RATIO +#endif // _LIBCUDACXX_RATIO diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/semaphore b/libcudacxx/include/cuda/std/detail/libcxx/include/semaphore index c5a8576b95b..f038532e775 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/semaphore +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/semaphore @@ -55,428 +55,387 @@ using binary_semaphore = counting_semaphore<1>; # pragma system_header #endif // no system header +#include // all public C++ headers provide the assertion handler #include #include -#include // all public C++ headers provide the assertion handler -#include #include +#include + #ifdef _LIBCUDACXX_HAS_NO_THREADS -# error is not supported on this single threaded system +# error is not supported on this single threaded system #endif _LIBCUDACXX_BEGIN_NAMESPACE_STD -template +template class __atomic_semaphore_base { - _LIBCUDACXX_INLINE_VISIBILITY bool __fetch_sub_if_slow(ptrdiff_t __old) - { - while (__old != 0) + _LIBCUDACXX_INLINE_VISIBILITY + bool __fetch_sub_if_slow(ptrdiff_t __old) { - if (__count.compare_exchange_weak(__old, __old - 1, memory_order_acquire, memory_order_relaxed)) - { - return true; - } + while (__old != 0) { + if (__count.compare_exchange_weak(__old, __old - 1, memory_order_acquire, memory_order_relaxed)) + return true; + } + return false; } - return false; - } - _LIBCUDACXX_INLINE_VISIBILITY bool __fetch_sub_if() - { - ptrdiff_t __old = __count.load(memory_order_acquire); - if (__old == 0) + _LIBCUDACXX_INLINE_VISIBILITY + bool __fetch_sub_if() { - return false; + ptrdiff_t __old = __count.load(memory_order_acquire); + if (__old == 0) + return false; + if(__count.compare_exchange_weak(__old, __old - 1, memory_order_acquire, memory_order_relaxed)) + return true; + return __fetch_sub_if_slow(__old); // fail only if not __available } - if (__count.compare_exchange_weak(__old, __old - 1, memory_order_acquire, memory_order_relaxed)) + + _LIBCUDACXX_INLINE_VISIBILITY + void __wait_slow() { - return true; + while (1) { + ptrdiff_t const __old = __count.load(memory_order_acquire); + if(__old != 0) + break; + __count.wait(__old, memory_order_relaxed); + } } - return __fetch_sub_if_slow(__old); // fail only if not __available - } - _LIBCUDACXX_INLINE_VISIBILITY void __wait_slow() - { - while (1) + _LIBCUDACXX_INLINE_VISIBILITY + bool __acquire_slow_timed(chrono::nanoseconds const& __rel_time) { - ptrdiff_t const __old = __count.load(memory_order_acquire); - if (__old != 0) - { - break; - } - __count.wait(__old, memory_order_relaxed); + return __libcpp_thread_poll_with_backoff([this]() { + ptrdiff_t const __old = __count.load(memory_order_acquire); + return __old != 0 && __fetch_sub_if_slow(__old); + }, __rel_time); } - } - - _LIBCUDACXX_INLINE_VISIBILITY bool __acquire_slow_timed(chrono::nanoseconds const& __rel_time) - { - return __libcpp_thread_poll_with_backoff( - [this]() { - ptrdiff_t const __old = __count.load(memory_order_acquire); - return __old != 0 && __fetch_sub_if_slow(__old); - }, - __rel_time); - } - __atomic_base __count; + __atomic_base __count; public: - _LIBCUDACXX_INLINE_VISIBILITY static constexpr ptrdiff_t max() noexcept - { - return numeric_limits::max(); - } + _LIBCUDACXX_INLINE_VISIBILITY + static constexpr ptrdiff_t max() noexcept + { + return numeric_limits::max(); + } - _LIBCUDACXX_INLINE_VISIBILITY constexpr __atomic_semaphore_base(ptrdiff_t __count) noexcept - : __count(__count) - {} + _LIBCUDACXX_INLINE_VISIBILITY constexpr + __atomic_semaphore_base(ptrdiff_t __count) noexcept : __count(__count) { } - ~__atomic_semaphore_base() = default; + ~__atomic_semaphore_base() = default; - __atomic_semaphore_base(__atomic_semaphore_base const&) = delete; - __atomic_semaphore_base& operator=(__atomic_semaphore_base const&) = delete; + __atomic_semaphore_base(__atomic_semaphore_base const&) = delete; + __atomic_semaphore_base& operator=(__atomic_semaphore_base const&) = delete; - _LIBCUDACXX_INLINE_VISIBILITY void release(ptrdiff_t __update = 1) - { - __count.fetch_add(__update, memory_order_release); - if (__update > 1) + _LIBCUDACXX_INLINE_VISIBILITY + void release(ptrdiff_t __update = 1) { - __count.notify_all(); + __count.fetch_add(__update, memory_order_release); + if(__update > 1) + __count.notify_all(); + else + __count.notify_one(); } - else - { - __count.notify_one(); - } - } - _LIBCUDACXX_INLINE_VISIBILITY void acquire() - { - while (!try_acquire()) + _LIBCUDACXX_INLINE_VISIBILITY + void acquire() { - __wait_slow(); + while (!try_acquire()) + __wait_slow(); } - } - _LIBCUDACXX_INLINE_VISIBILITY bool try_acquire() noexcept - { - return __fetch_sub_if(); - } - - template - _LIBCUDACXX_INLINE_VISIBILITY bool try_acquire_until(chrono::time_point const& __abs_time) - { - if (try_acquire()) + _LIBCUDACXX_INLINE_VISIBILITY + bool try_acquire() noexcept { - return true; + return __fetch_sub_if(); } - else - { - return __acquire_slow_timed(__abs_time - Clock::now()); - } - } - template - _LIBCUDACXX_INLINE_VISIBILITY bool try_acquire_for(chrono::duration const& __rel_time) - { - if (try_acquire()) + template + _LIBCUDACXX_INLINE_VISIBILITY + bool try_acquire_until(chrono::time_point const& __abs_time) { - return true; + if (try_acquire()) + return true; + else + return __acquire_slow_timed(__abs_time - Clock::now()); } - else + + template + _LIBCUDACXX_INLINE_VISIBILITY + bool try_acquire_for(chrono::duration const& __rel_time) { - return __acquire_slow_timed(__rel_time); + + if (try_acquire()) + return true; + else + return __acquire_slow_timed(__rel_time); } - } }; #ifndef _LIBCUDACXX_USE_NATIVE_SEMAPHORES -template -class __atomic_semaphore_base<_Sco, 1> -{ - _LIBCUDACXX_INLINE_VISIBILITY bool __acquire_slow_timed(chrono::nanoseconds const& __rel_time) - { - return __libcpp_thread_poll_with_backoff( - [this]() { - return try_acquire(); - }, - __rel_time); - } - __atomic_base __available; +template +class __atomic_semaphore_base<_Sco, 1> { -public: - _LIBCUDACXX_INLINE_VISIBILITY static constexpr ptrdiff_t max() noexcept - { - return 1; - } - - _LIBCUDACXX_INLINE_VISIBILITY constexpr __atomic_semaphore_base(ptrdiff_t __available) - : __available(__available) - {} - - ~__atomic_semaphore_base() = default; - - __atomic_semaphore_base(__atomic_semaphore_base const&) = delete; - __atomic_semaphore_base& operator=(__atomic_semaphore_base const&) = delete; - - _LIBCUDACXX_INLINE_VISIBILITY void release(ptrdiff_t __update = 1) - { - _LIBCUDACXX_ASSERT(__update == 1, ""); - __available.store(1, memory_order_release); - __available.notify_one(); - (void) __update; - } - - _LIBCUDACXX_INLINE_VISIBILITY void acquire() - { - while (!try_acquire()) + _LIBCUDACXX_INLINE_VISIBILITY + bool __acquire_slow_timed(chrono::nanoseconds const& __rel_time) { - __available.wait(0, memory_order_relaxed); + return __libcpp_thread_poll_with_backoff([this]() { + return try_acquire(); + }, __rel_time); } - } + __atomic_base __available; + +public: + _LIBCUDACXX_INLINE_VISIBILITY + static constexpr ptrdiff_t max() noexcept { return 1; } - _LIBCUDACXX_INLINE_VISIBILITY bool try_acquire() noexcept - { - return 1 == __available.exchange(0, memory_order_acquire); - } + _LIBCUDACXX_INLINE_VISIBILITY constexpr + __atomic_semaphore_base(ptrdiff_t __available) : __available(__available) { } - template - _LIBCUDACXX_INLINE_VISIBILITY bool try_acquire_until(chrono::time_point const& __abs_time) - { - if (try_acquire()) + ~__atomic_semaphore_base() = default; + + __atomic_semaphore_base(__atomic_semaphore_base const&) = delete; + __atomic_semaphore_base& operator=(__atomic_semaphore_base const&) = delete; + + _LIBCUDACXX_INLINE_VISIBILITY + void release(ptrdiff_t __update = 1) { - return true; + _LIBCUDACXX_ASSERT(__update == 1, ""); + __available.store(1, memory_order_release); + __available.notify_one(); + (void)__update; } - else + + _LIBCUDACXX_INLINE_VISIBILITY + void acquire() { - return __acquire_slow_timed(__abs_time - Clock::now()); + while (!try_acquire()) + __available.wait(0, memory_order_relaxed); } - } - template - _LIBCUDACXX_INLINE_VISIBILITY bool try_acquire_for(chrono::duration const& __rel_time) - { - if (try_acquire()) + _LIBCUDACXX_INLINE_VISIBILITY + bool try_acquire() noexcept { - return true; + return 1 == __available.exchange(0, memory_order_acquire); } - else + + template + _LIBCUDACXX_INLINE_VISIBILITY + bool try_acquire_until(chrono::time_point const& __abs_time) { - return __acquire_slow_timed(__rel_time); + if (try_acquire()) + return true; + else + return __acquire_slow_timed(__abs_time - Clock::now()); + } + + template + _LIBCUDACXX_INLINE_VISIBILITY + bool try_acquire_for(chrono::duration const& __rel_time) + { + if (try_acquire()) + return true; + else + return __acquire_slow_timed(__rel_time); } - } }; #else -template -class __sem_semaphore_base -{ - _LIBCUDACXX_INLINE_VISIBILITY bool __backfill(bool __success) - { -# ifndef _LIBCUDACXX_HAS_NO_SEMAPHORE_BACK_BUFFER - if (__success) +template +class __sem_semaphore_base { + + _LIBCUDACXX_INLINE_VISIBILITY + bool __backfill(bool __success) { - auto const __back_amount = __backbuffer.fetch_sub(2, memory_order_acquire); - bool const __post_one = __back_amount > 0; - bool const __post_two = __back_amount > 1; - auto const __success = (!__post_one || __libcpp_semaphore_post(&__semaphore)) - && (!__post_two || __libcpp_semaphore_post(&__semaphore)); - _LIBCUDACXX_ASSERT(__success, ""); - if (!__post_one || !__post_two) - { - __backbuffer.fetch_add(!__post_one ? 2 : 1, memory_order_relaxed); - } +#ifndef _LIBCUDACXX_HAS_NO_SEMAPHORE_BACK_BUFFER + if(__success) { + auto const __back_amount = __backbuffer.fetch_sub(2, memory_order_acquire); + bool const __post_one = __back_amount > 0; + bool const __post_two = __back_amount > 1; + auto const __success = (!__post_one || __libcpp_semaphore_post(&__semaphore)) && + (!__post_two || __libcpp_semaphore_post(&__semaphore)); + _LIBCUDACXX_ASSERT(__success, ""); + if(!__post_one || !__post_two) + __backbuffer.fetch_add(!__post_one ? 2 : 1, memory_order_relaxed); + } +#endif + return __success; } -# endif - return __success; - } - - _LIBCUDACXX_INLINE_VISIBILITY bool __try_acquire_fast() - { -# ifndef _LIBCUDACXX_HAS_NO_SEMAPHORE_FRONT_BUFFER - - ptrdiff_t __old; - __libcpp_thread_poll_with_backoff( - [&]() { - __old = __frontbuffer.load(memory_order_relaxed); - return 0 != (__old >> 32); - }, - chrono::microseconds(5)); - - // always steal if you can - while (__old >> 32) + + _LIBCUDACXX_INLINE_VISIBILITY + bool __try_acquire_fast() { - if (__frontbuffer.compare_exchange_weak(__old, __old - (1ll << 32), memory_order_acquire)) - { - return true; - } +#ifndef _LIBCUDACXX_HAS_NO_SEMAPHORE_FRONT_BUFFER + + ptrdiff_t __old; + __libcpp_thread_poll_with_backoff([&]() { + __old = __frontbuffer.load(memory_order_relaxed); + return 0 != (__old >> 32); + }, chrono::microseconds(5)); + + // always steal if you can + while(__old >> 32) + if(__frontbuffer.compare_exchange_weak(__old, __old - (1ll << 32), memory_order_acquire)) + return true; + // record we're waiting + __old = __frontbuffer.fetch_add(1ll, memory_order_release); + // ALWAYS steal if you can! + while(__old >> 32) + if(__frontbuffer.compare_exchange_weak(__old, __old - (1ll << 32), memory_order_acquire)) + break; + // not going to wait after all + if(__old >> 32) + return __try_done(true); +#endif + // the wait has begun... + return false; } - // record we're waiting - __old = __frontbuffer.fetch_add(1ll, memory_order_release); - // ALWAYS steal if you can! - while (__old >> 32) + + _LIBCUDACXX_INLINE_VISIBILITY + bool __try_done(bool __success) { - if (__frontbuffer.compare_exchange_weak(__old, __old - (1ll << 32), memory_order_acquire)) - { - break; - } +#ifndef _LIBCUDACXX_HAS_NO_SEMAPHORE_FRONT_BUFFER + // record we're NOT waiting + __frontbuffer.fetch_sub(1ll, memory_order_release); +#endif + return __backfill(__success); } - // not going to wait after all - if (__old >> 32) + + _LIBCUDACXX_INLINE_VISIBILITY + void __release_slow(ptrdiff_t __post_amount) { - return __try_done(true); + #ifndef _LIBCUDACXX_HAS_NO_SEMAPHORE_BACK_BUFFER + bool const __post_one = __post_amount > 0; + bool const __post_two = __post_amount > 1; + if(__post_amount > 2) + __backbuffer.fetch_add(__post_amount - 2, memory_order_acq_rel); + auto const __success = (!__post_one || __libcpp_semaphore_post(&__semaphore)) && + (!__post_two || __libcpp_semaphore_post(&__semaphore)); + _LIBCUDACXX_ASSERT(__success, ""); + #else + for(; __post_amount; --__post_amount) { + auto const __success = __libcpp_semaphore_post(&__semaphore); + _LIBCUDACXX_ASSERT(__success, ""); + } + #endif } -# endif - // the wait has begun... - return false; - } - - _LIBCUDACXX_INLINE_VISIBILITY bool __try_done(bool __success) - { -# ifndef _LIBCUDACXX_HAS_NO_SEMAPHORE_FRONT_BUFFER - // record we're NOT waiting - __frontbuffer.fetch_sub(1ll, memory_order_release); -# endif - return __backfill(__success); - } - - _LIBCUDACXX_INLINE_VISIBILITY void __release_slow(ptrdiff_t __post_amount) - { -# ifndef _LIBCUDACXX_HAS_NO_SEMAPHORE_BACK_BUFFER - bool const __post_one = __post_amount > 0; - bool const __post_two = __post_amount > 1; - if (__post_amount > 2) + + __libcpp_semaphore_t __semaphore; +#ifndef _LIBCUDACXX_HAS_NO_SEMAPHORE_FRONT_BUFFER + __atomic_base __frontbuffer; +#endif +#ifndef _LIBCUDACXX_HAS_NO_SEMAPHORE_BACK_BUFFER + __atomic_base __backbuffer; +#endif + +public: + static constexpr ptrdiff_t max() noexcept { + return _LIBCUDACXX_SEMAPHORE_MAX; + } + + _LIBCUDACXX_INLINE_VISIBILITY + __sem_semaphore_base(ptrdiff_t __count = 0) : __semaphore() +#ifndef _LIBCUDACXX_HAS_NO_SEMAPHORE_FRONT_BUFFER + , __frontbuffer(__count << 32) +#endif +#ifndef _LIBCUDACXX_HAS_NO_SEMAPHORE_BACK_BUFFER + , __backbuffer(0) +#endif { - __backbuffer.fetch_add(__post_amount - 2, memory_order_acq_rel); + _LIBCUDACXX_ASSERT(__count <= max(), ""); + auto const __success = +#ifndef _LIBCUDACXX_HAS_NO_SEMAPHORE_FRONT_BUFFER + __libcpp_semaphore_init(&__semaphore, 0); +#else + __libcpp_semaphore_init(&__semaphore, __count); +#endif + _LIBCUDACXX_ASSERT(__success, ""); } - auto const __success = - (!__post_one || __libcpp_semaphore_post(&__semaphore)) && (!__post_two || __libcpp_semaphore_post(&__semaphore)); - _LIBCUDACXX_ASSERT(__success, ""); -# else - for (; __post_amount; --__post_amount) + + _LIBCUDACXX_INLINE_VISIBILITY + ~__sem_semaphore_base() { +#ifndef _LIBCUDACXX_HAS_NO_SEMAPHORE_FRONT_BUFFER + _LIBCUDACXX_ASSERT(0 == (__frontbuffer.load(memory_order_relaxed) & ~0u), ""); +#endif + auto const __success = __libcpp_semaphore_destroy(&__semaphore); + _LIBCUDACXX_ASSERT(__success, ""); + } + + __sem_semaphore_base(const __sem_semaphore_base&) = delete; + __sem_semaphore_base& operator=(const __sem_semaphore_base&) = delete; + + _LIBCUDACXX_INLINE_VISIBILITY + void release(ptrdiff_t __update = 1) + { +#ifndef _LIBCUDACXX_HAS_NO_SEMAPHORE_FRONT_BUFFER + // boldly assume the semaphore is taken but uncontended + ptrdiff_t __old = 0; + // try to fast-release as long as it's uncontended + while(0 == (__old & ~0ul)) + if(__frontbuffer.compare_exchange_weak(__old, __old + (__update << 32), memory_order_acq_rel)) + return; +#endif + // slow-release it is + __release_slow(__update); + } + + _LIBCUDACXX_INLINE_VISIBILITY + void acquire() { - auto const __success = __libcpp_semaphore_post(&__semaphore); - _LIBCUDACXX_ASSERT(__success, ""); + if(!__try_acquire_fast()) + __try_done(__libcpp_semaphore_wait(&__semaphore)); } -# endif - } - __libcpp_semaphore_t __semaphore; -# ifndef _LIBCUDACXX_HAS_NO_SEMAPHORE_FRONT_BUFFER - __atomic_base __frontbuffer; -# endif -# ifndef _LIBCUDACXX_HAS_NO_SEMAPHORE_BACK_BUFFER - __atomic_base __backbuffer; -# endif + _LIBCUDACXX_INLINE_VISIBILITY + bool try_acquire() noexcept + { + return try_acquire_for(chrono::nanoseconds(0)); + } -public: - static constexpr ptrdiff_t max() noexcept - { - return _LIBCUDACXX_SEMAPHORE_MAX; - } - - _LIBCUDACXX_INLINE_VISIBILITY __sem_semaphore_base(ptrdiff_t __count = 0) - : __semaphore() -# ifndef _LIBCUDACXX_HAS_NO_SEMAPHORE_FRONT_BUFFER - , __frontbuffer(__count << 32) -# endif -# ifndef _LIBCUDACXX_HAS_NO_SEMAPHORE_BACK_BUFFER - , __backbuffer(0) -# endif - { - _LIBCUDACXX_ASSERT(__count <= max(), ""); - auto const __success = -# ifndef _LIBCUDACXX_HAS_NO_SEMAPHORE_FRONT_BUFFER - __libcpp_semaphore_init(&__semaphore, 0); -# else - __libcpp_semaphore_init(&__semaphore, __count); -# endif - _LIBCUDACXX_ASSERT(__success, ""); - } - - _LIBCUDACXX_INLINE_VISIBILITY ~__sem_semaphore_base() - { -# ifndef _LIBCUDACXX_HAS_NO_SEMAPHORE_FRONT_BUFFER - _LIBCUDACXX_ASSERT(0 == (__frontbuffer.load(memory_order_relaxed) & ~0u), ""); -# endif - auto const __success = __libcpp_semaphore_destroy(&__semaphore); - _LIBCUDACXX_ASSERT(__success, ""); - } - - __sem_semaphore_base(const __sem_semaphore_base&) = delete; - __sem_semaphore_base& operator=(const __sem_semaphore_base&) = delete; - - _LIBCUDACXX_INLINE_VISIBILITY void release(ptrdiff_t __update = 1) - { -# ifndef _LIBCUDACXX_HAS_NO_SEMAPHORE_FRONT_BUFFER - // boldly assume the semaphore is taken but uncontended - ptrdiff_t __old = 0; - // try to fast-release as long as it's uncontended - while (0 == (__old & ~0ul)) + template + _LIBCUDACXX_INLINE_VISIBILITY + bool try_acquire_until(chrono::time_point const& __abs_time) { - if (__frontbuffer.compare_exchange_weak(__old, __old + (__update << 32), memory_order_acq_rel)) - { - return; - } + auto const current = max(Clock::now(), __abs_time); + return try_acquire_for(chrono::duration_cast(__abs_time - current)); } -# endif - // slow-release it is - __release_slow(__update); - } - - _LIBCUDACXX_INLINE_VISIBILITY void acquire() - { - if (!__try_acquire_fast()) + + template + _LIBCUDACXX_INLINE_VISIBILITY + bool try_acquire_for(chrono::duration const& __rel_time) { - __try_done(__libcpp_semaphore_wait(&__semaphore)); + return __try_acquire_fast() || + __try_done(__libcpp_semaphore_wait_timed(&__semaphore, __rel_time)); } - } - - _LIBCUDACXX_INLINE_VISIBILITY bool try_acquire() noexcept - { - return try_acquire_for(chrono::nanoseconds(0)); - } - - template - _LIBCUDACXX_INLINE_VISIBILITY bool try_acquire_until(chrono::time_point const& __abs_time) - { - auto const current = max(Clock::now(), __abs_time); - return try_acquire_for(chrono::duration_cast(__abs_time - current)); - } - - template - _LIBCUDACXX_INLINE_VISIBILITY bool try_acquire_for(chrono::duration const& __rel_time) - { - return __try_acquire_fast() || __try_done(__libcpp_semaphore_wait_timed(&__semaphore, __rel_time)); - } }; #endif //_LIBCUDACXX_HAS_NO_SEMAPHORES -template +template using __semaphore_base = #ifdef _LIBCUDACXX_USE_NATIVE_SEMAPHORES - __conditional_t<__least_max_value <= __sem_semaphore_base<_Sco>::max(), - __sem_semaphore_base<_Sco>, - __atomic_semaphore_base<_Sco, __least_max_value>> + __conditional_t<__least_max_value <= __sem_semaphore_base<_Sco>::max(), + __sem_semaphore_base<_Sco>, + __atomic_semaphore_base<_Sco, __least_max_value>> #else - __atomic_semaphore_base<_Sco, __least_max_value> + __atomic_semaphore_base<_Sco, __least_max_value> #endif - ; + ; -template +template class counting_semaphore : public __semaphore_base<__least_max_value, 0> { - static_assert(__least_max_value <= __semaphore_base<__least_max_value, 0>::max(), ""); - + static_assert(__least_max_value <= __semaphore_base<__least_max_value, 0>::max(), ""); public: - _LIBCUDACXX_INLINE_VISIBILITY constexpr counting_semaphore(ptrdiff_t __count = 0) - : __semaphore_base<__least_max_value, 0>(__count) - {} - ~counting_semaphore() = default; + _LIBCUDACXX_INLINE_VISIBILITY constexpr + counting_semaphore(ptrdiff_t __count = 0) : __semaphore_base<__least_max_value, 0>(__count) { } + ~counting_semaphore() = default; - counting_semaphore(const counting_semaphore&) = delete; - counting_semaphore& operator=(const counting_semaphore&) = delete; + counting_semaphore(const counting_semaphore&) = delete; + counting_semaphore& operator=(const counting_semaphore&) = delete; }; using binary_semaphore = counting_semaphore<1>; diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/span b/libcudacxx/include/cuda/std/detail/libcxx/include/span index c5c661a0c78..7e1e7a4172c 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/span +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/span @@ -138,6 +138,7 @@ template # pragma system_header #endif // no system header +#include // all public C++ headers provide the assertion handler #include #include #include @@ -166,7 +167,6 @@ template #include #include #include // for ptrdiff_t -#include // all public C++ headers provide the assertion handler #include // standard-mandated includes @@ -254,8 +254,8 @@ _LIBCUDACXX_INLINE_VAR constexpr bool __is_span_compatible_container< decltype(data(declval<_Tp>())), decltype(size(declval<_Tp>())), // remove_pointer_t(*)[] is convertible to ElementType(*)[] - enable_if_t()))> (*)[], _ElementType (*)[]>::value, - nullptr_t>>> = true; + enable_if_t< is_convertible()))> (*)[], _ElementType (*)[]>::value, + nullptr_t> >> = true; # endif // _CCCL_STD_VER <= 2014 || _CCCL_COMPILER_MSVC_2017 template @@ -340,7 +340,8 @@ public: _LIBCUDACXX_TEMPLATE(class _OtherElementType) _LIBCUDACXX_REQUIRES(__span_array_convertible) - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 span(const array<_OtherElementType, _Extent>& __arr) noexcept + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 + span(const array<_OtherElementType, _Extent>& __arr) noexcept : __data_{__arr.data()} {} @@ -577,7 +578,8 @@ public: _LIBCUDACXX_TEMPLATE(class _OtherElementType, size_t _Sz) _LIBCUDACXX_REQUIRES(__span_array_convertible) - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 span(const array<_OtherElementType, _Sz>& __arr) noexcept + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX17 + span(const array<_OtherElementType, _Sz>& __arr) noexcept : __data_{__arr.data()} , __size_{_Sz} {} @@ -763,13 +765,13 @@ _CCCL_HOST_DEVICE span(array<_Tp, _Sz>&) -> span<_Tp, _Sz>; template _CCCL_HOST_DEVICE span(const array<_Tp, _Sz>&) -> span; -# if defined(_CCCL_COMPILER_MSVC_2017) -template +#if defined(_CCCL_COMPILER_MSVC_2017) +template _CCCL_HOST_DEVICE span(_Container&) -> span; -template +template _CCCL_HOST_DEVICE span(const _Container&) -> span; -# else // ^^^ _CCCL_COMPILER_MSVC_2017 ^^^ / vvv !_CCCL_COMPILER_MSVC_2017 vvv +#else // ^^^ _CCCL_COMPILER_MSVC_2017 ^^^ / vvv !_CCCL_COMPILER_MSVC_2017 vvv _LIBCUDACXX_TEMPLATE(class _It, class _EndOrSize) _LIBCUDACXX_REQUIRES(contiguous_iterator<_It>) _CCCL_HOST_DEVICE span(_It, _EndOrSize) -> span>>; @@ -777,7 +779,7 @@ _CCCL_HOST_DEVICE span(_It, _EndOrSize) -> span) _CCCL_HOST_DEVICE span(_Range&&) -> span>>; -# endif // !_CCCL_COMPILER_MSVC_2017 +#endif // !_CCCL_COMPILER_MSVC_2017 #endif // _CCCL_STD_VER >= 2017 diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/stdexcept b/libcudacxx/include/cuda/std/detail/libcxx/include/stdexcept index 720862e0814..743bc70570b 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/stdexcept +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/stdexcept @@ -53,9 +53,9 @@ public: # pragma system_header #endif // no system header +#include // all public C++ headers provide the assertion handler #include #include -#include // all public C++ headers provide the assertion handler #include #include diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/tuple b/libcudacxx/include/cuda/std/detail/libcxx/include/tuple index 9606883b5a2..198fa1eafd8 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/tuple +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/tuple @@ -155,7 +155,9 @@ template # pragma system_header #endif // no system header +#include // all public C++ headers provide the assertion handler #include +#include #include #include #include @@ -177,8 +179,6 @@ template #include #include #include -#include // all public C++ headers provide the assertion handler -#include #include #include @@ -187,39 +187,38 @@ template // [tuple.syn] #ifndef _LIBCUDACXX_HAS_NO_SPACESHIP_OPERATOR -# include +#include #endif #include _LIBCUDACXX_BEGIN_NAMESPACE_STD -template -struct __is_tuple_of_iterator_references : false_type +template +struct __is_tuple_of_iterator_references : false_type {}; // __tuple_leaf -struct __tuple_leaf_default_constructor_tag -{}; +struct __tuple_leaf_default_constructor_tag {}; -template ::value> +template ::value> class __tuple_leaf; template inline _LIBCUDACXX_INLINE_VISIBILITY void -swap(__tuple_leaf<_Ip, _Hp, _Ep>& __x, __tuple_leaf<_Ip, _Hp, _Ep>& __y) noexcept(__is_nothrow_swappable<_Hp>::value) -{ +swap(__tuple_leaf<_Ip, _Hp, _Ep> &__x, + __tuple_leaf<_Ip, _Hp, _Ep> + &__y) noexcept(__is_nothrow_swappable<_Hp>::value) { swap(__x.get(), __y.get()); } -template -class __tuple_leaf -{ +template class __tuple_leaf { _Hp __value_; template - _LIBCUDACXX_INLINE_VISIBILITY static constexpr bool __can_bind_reference() - { + _LIBCUDACXX_INLINE_VISIBILITY static constexpr bool __can_bind_reference() { #if __has_keyword(__reference_binds_to_temporary) return !__reference_binds_to_temporary(_Hp, _Tp); #else @@ -227,719 +226,765 @@ class __tuple_leaf #endif } - _LIBCUDACXX_INLINE_VISIBILITY __tuple_leaf& operator=(const __tuple_leaf&); + _LIBCUDACXX_INLINE_VISIBILITY __tuple_leaf &operator=(const __tuple_leaf &); public: _LIBCUDACXX_INLINE_VISIBILITY constexpr __tuple_leaf() noexcept( - _LIBCUDACXX_TRAIT(is_nothrow_default_constructible, _Hp)) - : __value_() - { - static_assert(!_LIBCUDACXX_TRAIT(is_reference, _Hp), - "Attempted to default construct a reference element in a tuple"); + _LIBCUDACXX_TRAIT(is_nothrow_default_constructible, _Hp)) + : __value_() { + static_assert( + !_LIBCUDACXX_TRAIT(is_reference, _Hp), + "Attempted to default construct a reference element in a tuple"); } - _LIBCUDACXX_INLINE_VISIBILITY constexpr __tuple_leaf(__tuple_leaf_default_constructor_tag) noexcept( - _LIBCUDACXX_TRAIT(is_nothrow_default_constructible, _Hp)) - : __value_() - { - static_assert(!_LIBCUDACXX_TRAIT(is_reference, _Hp), - "Attempted to default construct a reference element in a tuple"); + _LIBCUDACXX_INLINE_VISIBILITY constexpr __tuple_leaf( + __tuple_leaf_default_constructor_tag) noexcept(_LIBCUDACXX_TRAIT(is_nothrow_default_constructible, + _Hp)) + : __value_() { + static_assert( + !_LIBCUDACXX_TRAIT(is_reference, _Hp), + "Attempted to default construct a reference element in a tuple"); } template - _LIBCUDACXX_INLINE_VISIBILITY __tuple_leaf(integral_constant, const _Alloc&) - : __value_() - { - static_assert(!_LIBCUDACXX_TRAIT(is_reference, _Hp), - "Attempted to default construct a reference element in a tuple"); + _LIBCUDACXX_INLINE_VISIBILITY __tuple_leaf(integral_constant, + const _Alloc &) + : __value_() { + static_assert( + !_LIBCUDACXX_TRAIT(is_reference, _Hp), + "Attempted to default construct a reference element in a tuple"); } template - _LIBCUDACXX_INLINE_VISIBILITY __tuple_leaf(integral_constant, const _Alloc& __a) - : __value_(allocator_arg_t(), __a) - { - static_assert(!_LIBCUDACXX_TRAIT(is_reference, _Hp), - "Attempted to default construct a reference element in a tuple"); + _LIBCUDACXX_INLINE_VISIBILITY __tuple_leaf(integral_constant, + const _Alloc &__a) + : __value_(allocator_arg_t(), __a) { + static_assert( + !_LIBCUDACXX_TRAIT(is_reference, _Hp), + "Attempted to default construct a reference element in a tuple"); } template - _LIBCUDACXX_INLINE_VISIBILITY __tuple_leaf(integral_constant, const _Alloc& __a) - : __value_(__a) - { - static_assert(!_LIBCUDACXX_TRAIT(is_reference, _Hp), - "Attempted to default construct a reference element in a tuple"); + _LIBCUDACXX_INLINE_VISIBILITY __tuple_leaf(integral_constant, + const _Alloc &__a) + : __value_(__a) { + static_assert( + !_LIBCUDACXX_TRAIT(is_reference, _Hp), + "Attempted to default construct a reference element in a tuple"); } template - using __can_forward = _And<_IsNotSame<__remove_cvref_t<_Tp>, __tuple_leaf>, is_constructible<_Hp, _Tp>>; + using __can_forward = _And<_IsNotSame<__remove_cvref_t<_Tp>, __tuple_leaf>, + is_constructible<_Hp, _Tp>>; template ::value, int> = 0> - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 explicit __tuple_leaf(_Tp&& __t) noexcept( - _LIBCUDACXX_TRAIT(is_nothrow_constructible, _Hp, _Tp)) - : __value_(_CUDA_VSTD::forward<_Tp>(__t)) - { - static_assert(__can_bind_reference<_Tp&&>(), + _LIBCUDACXX_INLINE_VISIBILITY + _CCCL_CONSTEXPR_CXX14 explicit __tuple_leaf( + _Tp &&__t) noexcept(_LIBCUDACXX_TRAIT(is_nothrow_constructible, _Hp, + _Tp)) + : __value_(_CUDA_VSTD::forward<_Tp>(__t)) { + static_assert(__can_bind_reference<_Tp &&>(), "Attempted construction of reference element binds to a " "temporary whose lifetime has ended"); } template - _LIBCUDACXX_INLINE_VISIBILITY explicit __tuple_leaf(integral_constant, const _Alloc&, _Tp&& __t) - : __value_(_CUDA_VSTD::forward<_Tp>(__t)) - { - static_assert(__can_bind_reference<_Tp&&>(), + _LIBCUDACXX_INLINE_VISIBILITY explicit __tuple_leaf(integral_constant, + const _Alloc &, _Tp &&__t) + : __value_(_CUDA_VSTD::forward<_Tp>(__t)) { + static_assert(__can_bind_reference<_Tp &&>(), "Attempted construction of reference element binds to a " "temporary whose lifetime has ended"); } template - _LIBCUDACXX_INLINE_VISIBILITY explicit __tuple_leaf(integral_constant, const _Alloc& __a, _Tp&& __t) - : __value_(allocator_arg_t(), __a, _CUDA_VSTD::forward<_Tp>(__t)) - { - static_assert(!_LIBCUDACXX_TRAIT(is_reference, _Hp), - "Attempted to uses-allocator construct a reference element in a tuple"); + _LIBCUDACXX_INLINE_VISIBILITY explicit __tuple_leaf(integral_constant, + const _Alloc &__a, + _Tp &&__t) + : __value_(allocator_arg_t(), __a, _CUDA_VSTD::forward<_Tp>(__t)) { + static_assert( + !_LIBCUDACXX_TRAIT(is_reference, _Hp), + "Attempted to uses-allocator construct a reference element in a tuple"); } template - _LIBCUDACXX_INLINE_VISIBILITY explicit __tuple_leaf(integral_constant, const _Alloc& __a, _Tp&& __t) - : __value_(_CUDA_VSTD::forward<_Tp>(__t), __a) - { - static_assert(!_LIBCUDACXX_TRAIT(is_reference, _Hp), - "Attempted to uses-allocator construct a reference element in a tuple"); + _LIBCUDACXX_INLINE_VISIBILITY explicit __tuple_leaf(integral_constant, + const _Alloc &__a, + _Tp &&__t) + : __value_(_CUDA_VSTD::forward<_Tp>(__t), __a) { + static_assert( + !_LIBCUDACXX_TRAIT(is_reference, _Hp), + "Attempted to uses-allocator construct a reference element in a tuple"); } - __tuple_leaf(const __tuple_leaf& __t) = default; - __tuple_leaf(__tuple_leaf&& __t) = default; + __tuple_leaf(const __tuple_leaf &__t) = default; + __tuple_leaf(__tuple_leaf &&__t) = default; template - _LIBCUDACXX_INLINE_VISIBILITY __tuple_leaf& - operator=(_Tp&& __t) noexcept(_LIBCUDACXX_TRAIT(is_nothrow_assignable, _Hp&, _Tp)) - { + _LIBCUDACXX_INLINE_VISIBILITY __tuple_leaf &operator=(_Tp &&__t) noexcept( + _LIBCUDACXX_TRAIT(is_nothrow_assignable, _Hp &, _Tp)) { __value_ = _CUDA_VSTD::forward<_Tp>(__t); return *this; } - _LIBCUDACXX_INLINE_VISIBILITY int swap(__tuple_leaf& __t) noexcept(__is_nothrow_swappable<__tuple_leaf>::value) - { + _LIBCUDACXX_INLINE_VISIBILITY int swap(__tuple_leaf &__t) noexcept( + __is_nothrow_swappable<__tuple_leaf>::value) { _CUDA_VSTD::swap(*this, __t); return 0; } - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 _Hp& get() noexcept - { + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 _Hp & + get() noexcept { return __value_; } - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const _Hp& get() const noexcept - { + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const _Hp & + get() const noexcept { return __value_; } }; template -class __tuple_leaf<_Ip, _Hp, true> : private _Hp -{ - _LIBCUDACXX_INLINE_VISIBILITY __tuple_leaf& operator=(const __tuple_leaf&); +class __tuple_leaf<_Ip, _Hp, true> : private _Hp { + _LIBCUDACXX_INLINE_VISIBILITY __tuple_leaf &operator=(const __tuple_leaf &); public: - _LIBCUDACXX_INLINE_VISIBILITY constexpr __tuple_leaf() noexcept(is_nothrow_default_constructible<_Hp>::value) {} + _LIBCUDACXX_INLINE_VISIBILITY constexpr __tuple_leaf() noexcept( + is_nothrow_default_constructible<_Hp>::value) {} - _LIBCUDACXX_INLINE_VISIBILITY constexpr __tuple_leaf(__tuple_leaf_default_constructor_tag) noexcept( - _LIBCUDACXX_TRAIT(is_nothrow_default_constructible, _Hp)) - : _Hp() - {} + _LIBCUDACXX_INLINE_VISIBILITY constexpr __tuple_leaf( + __tuple_leaf_default_constructor_tag) noexcept(_LIBCUDACXX_TRAIT(is_nothrow_default_constructible, + _Hp)) + : _Hp() {} template - _LIBCUDACXX_INLINE_VISIBILITY __tuple_leaf(integral_constant, const _Alloc&) - {} + _LIBCUDACXX_INLINE_VISIBILITY __tuple_leaf(integral_constant, + const _Alloc &) {} template - _LIBCUDACXX_INLINE_VISIBILITY __tuple_leaf(integral_constant, const _Alloc& __a) - : _Hp(allocator_arg_t(), __a) - {} + _LIBCUDACXX_INLINE_VISIBILITY __tuple_leaf(integral_constant, + const _Alloc &__a) + : _Hp(allocator_arg_t(), __a) {} template - _LIBCUDACXX_INLINE_VISIBILITY __tuple_leaf(integral_constant, const _Alloc& __a) - : _Hp(__a) - {} + _LIBCUDACXX_INLINE_VISIBILITY __tuple_leaf(integral_constant, + const _Alloc &__a) + : _Hp(__a) {} template - using __can_forward = _And<_IsNotSame<__remove_cvref_t<_Tp>, __tuple_leaf>, is_constructible<_Hp, _Tp>>; + using __can_forward = _And<_IsNotSame<__remove_cvref_t<_Tp>, __tuple_leaf>, + is_constructible<_Hp, _Tp>>; template ::value, int> = 0> - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 explicit __tuple_leaf(_Tp&& __t) noexcept( - (is_nothrow_constructible<_Hp, _Tp>::value)) - : _Hp(_CUDA_VSTD::forward<_Tp>(__t)) - {} + _LIBCUDACXX_INLINE_VISIBILITY + _CCCL_CONSTEXPR_CXX14 explicit __tuple_leaf( + _Tp &&__t) noexcept((is_nothrow_constructible<_Hp, _Tp>::value)) + : _Hp(_CUDA_VSTD::forward<_Tp>(__t)) {} template - _LIBCUDACXX_INLINE_VISIBILITY explicit __tuple_leaf(integral_constant, const _Alloc&, _Tp&& __t) - : _Hp(_CUDA_VSTD::forward<_Tp>(__t)) - {} + _LIBCUDACXX_INLINE_VISIBILITY explicit __tuple_leaf(integral_constant, + const _Alloc &, _Tp &&__t) + : _Hp(_CUDA_VSTD::forward<_Tp>(__t)) {} template - _LIBCUDACXX_INLINE_VISIBILITY explicit __tuple_leaf(integral_constant, const _Alloc& __a, _Tp&& __t) - : _Hp(allocator_arg_t(), __a, _CUDA_VSTD::forward<_Tp>(__t)) - {} + _LIBCUDACXX_INLINE_VISIBILITY explicit __tuple_leaf(integral_constant, + const _Alloc &__a, + _Tp &&__t) + : _Hp(allocator_arg_t(), __a, _CUDA_VSTD::forward<_Tp>(__t)) {} template - _LIBCUDACXX_INLINE_VISIBILITY explicit __tuple_leaf(integral_constant, const _Alloc& __a, _Tp&& __t) - : _Hp(_CUDA_VSTD::forward<_Tp>(__t), __a) - {} + _LIBCUDACXX_INLINE_VISIBILITY explicit __tuple_leaf(integral_constant, + const _Alloc &__a, + _Tp &&__t) + : _Hp(_CUDA_VSTD::forward<_Tp>(__t), __a) {} - __tuple_leaf(__tuple_leaf const&) = default; - __tuple_leaf(__tuple_leaf&&) = default; + __tuple_leaf(__tuple_leaf const &) = default; + __tuple_leaf(__tuple_leaf &&) = default; template - _LIBCUDACXX_INLINE_VISIBILITY __tuple_leaf& - operator=(_Tp&& __t) noexcept(_LIBCUDACXX_TRAIT(is_nothrow_assignable, _Hp&, _Tp)) - { + _LIBCUDACXX_INLINE_VISIBILITY __tuple_leaf &operator=(_Tp &&__t) noexcept( + _LIBCUDACXX_TRAIT(is_nothrow_assignable, _Hp &, _Tp)) { _Hp::operator=(_CUDA_VSTD::forward<_Tp>(__t)); return *this; } - _LIBCUDACXX_INLINE_VISIBILITY int swap(__tuple_leaf& __t) noexcept(__is_nothrow_swappable<__tuple_leaf>::value) - { + _LIBCUDACXX_INLINE_VISIBILITY int swap(__tuple_leaf &__t) noexcept( + __is_nothrow_swappable<__tuple_leaf>::value) { _CUDA_VSTD::swap(*this, __t); return 0; } - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 _Hp& get() noexcept - { - return static_cast<_Hp&>(*this); + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 _Hp & + get() noexcept { + return static_cast<_Hp &>(*this); } - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const _Hp& get() const noexcept - { - return static_cast(*this); + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const _Hp & + get() const noexcept { + return static_cast(*this); } }; template -_LIBCUDACXX_INLINE_VISIBILITY void __swallow(_Tp&&...) noexcept -{} +_LIBCUDACXX_INLINE_VISIBILITY void __swallow(_Tp &&...) noexcept {} -template -struct __all_default_constructible; +template struct __all_default_constructible; template -struct __all_default_constructible<__tuple_types<_Tp...>> : __all<_LIBCUDACXX_TRAIT(is_default_constructible, _Tp)...> -{}; +struct __all_default_constructible<__tuple_types<_Tp...>> + : __all<_LIBCUDACXX_TRAIT(is_default_constructible, _Tp)...> {}; -struct __tuple_variadic_constructor_tag -{}; +struct __tuple_variadic_constructor_tag {}; // __tuple_impl -template -struct __tuple_impl; +template struct __tuple_impl; template -struct _LIBCUDACXX_DECLSPEC_EMPTY_BASES __tuple_impl<__tuple_indices<_Indx...>, _Tp...> - : public __tuple_leaf<_Indx, _Tp>... -{ +struct _LIBCUDACXX_DECLSPEC_EMPTY_BASES + __tuple_impl<__tuple_indices<_Indx...>, _Tp...> + : public __tuple_leaf<_Indx, _Tp>... { _LIBCUDACXX_INLINE_VISIBILITY constexpr __tuple_impl() noexcept( - __all<_LIBCUDACXX_TRAIT(is_nothrow_default_constructible, _Tp)...>::value) - {} + __all<_LIBCUDACXX_TRAIT(is_nothrow_default_constructible, + _Tp)...>::value) {} // Handle non-allocator, full initialization // Old MSVC cannot handle the noexept specifier outside of template arguments template = 0, - bool __all_nothrow_constructible = __all<_LIBCUDACXX_TRAIT(is_nothrow_constructible, _Tp, _Up)...>::value> - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 explicit __tuple_impl( - __tuple_variadic_constructor_tag, _Up&&... __u) noexcept(__all_nothrow_constructible) - : __tuple_leaf<_Indx, _Tp>(_CUDA_VSTD::forward<_Up>(__u))... - {} + bool __all_nothrow_constructible = __all<_LIBCUDACXX_TRAIT( + is_nothrow_constructible, _Tp, _Up)...>::value> + _LIBCUDACXX_INLINE_VISIBILITY + _CCCL_CONSTEXPR_CXX14 explicit __tuple_impl( + __tuple_variadic_constructor_tag, + _Up &&...__u) noexcept(__all_nothrow_constructible) + : __tuple_leaf<_Indx, _Tp>(_CUDA_VSTD::forward<_Up>(__u))... {} // Handle non-allocator, partial default initialization // Recursively delegate until we have full rank - template = 0> + template = 0> _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 explicit __tuple_impl( - __tuple_variadic_constructor_tag __tag, - _Up&&... __u) noexcept(noexcept(__tuple_impl(__tag, - _CUDA_VSTD::forward<_Up>(__u)..., - __tuple_leaf_default_constructor_tag{}))) - : __tuple_impl(__tag, _CUDA_VSTD::forward<_Up>(__u)..., __tuple_leaf_default_constructor_tag{}) - {} + __tuple_variadic_constructor_tag __tag, + _Up &&...__u) noexcept(noexcept(__tuple_impl(__tag, + _CUDA_VSTD::forward<_Up>( + __u)..., + __tuple_leaf_default_constructor_tag{}))) + : __tuple_impl(__tag, _CUDA_VSTD::forward<_Up>(__u)..., + __tuple_leaf_default_constructor_tag{}) {} // Handle allocator aware, full initialization - template = 0> + template = 0> _LIBCUDACXX_INLINE_VISIBILITY explicit __tuple_impl( - allocator_arg_t, const _Alloc& __a, __tuple_variadic_constructor_tag, _Up&&... __u) - : __tuple_leaf<_Indx, _Tp>(__uses_alloc_ctor<_Tp, _Alloc, _Up>(), __a, _CUDA_VSTD::forward<_Up>(__u))... - {} + allocator_arg_t, const _Alloc &__a, __tuple_variadic_constructor_tag, + _Up &&...__u) + : __tuple_leaf<_Indx, _Tp>(__uses_alloc_ctor<_Tp, _Alloc, _Up>(), __a, + _CUDA_VSTD::forward<_Up>(__u))... {} // Handle allocator aware, full default initialization template - _LIBCUDACXX_INLINE_VISIBILITY explicit __tuple_impl(allocator_arg_t, const _Alloc& __a) - : __tuple_leaf<_Indx, _Tp>(__uses_alloc_ctor<_Tp, _Alloc>(), __a)... - {} + _LIBCUDACXX_INLINE_VISIBILITY explicit __tuple_impl(allocator_arg_t, + const _Alloc &__a) + : __tuple_leaf<_Indx, _Tp>(__uses_alloc_ctor<_Tp, _Alloc>(), __a)... {} template - using __tuple_elem_at = __tuple_element_t<_Indx2, __make_tuple_types_t<_Tuple>>; - - template >::value, int> = 0> - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 __tuple_impl(_Tuple&& __t) noexcept( - (__all<_LIBCUDACXX_TRAIT(is_nothrow_constructible, _Tp, __tuple_elem_at<_Tuple, _Indx>)...>::value)) - : __tuple_leaf<_Indx, _Tp>(_CUDA_VSTD::forward<__tuple_elem_at<_Tuple, _Indx>>(_CUDA_VSTD::get<_Indx>(__t)))... - {} - - template >::value, int> = 0> - _LIBCUDACXX_INLINE_VISIBILITY __tuple_impl(allocator_arg_t, const _Alloc& __a, _Tuple&& __t) - : __tuple_leaf<_Indx, _Tp>(__uses_alloc_ctor<_Tp, _Alloc, __tuple_elem_at<_Tuple, _Indx>>(), - __a, - _CUDA_VSTD::forward<__tuple_elem_at<_Tuple, _Indx>>(_CUDA_VSTD::get<_Indx>(__t)))... - {} + using __tuple_elem_at = + __tuple_element_t<_Indx2, __make_tuple_types_t<_Tuple>>; - template >::value, int> = 0> - _LIBCUDACXX_INLINE_VISIBILITY __tuple_impl& operator=(_Tuple&& __t) noexcept( - (__all<_LIBCUDACXX_TRAIT(is_nothrow_assignable, _Tp&, __tuple_elem_at<_Tuple, _Indx>)...>::value)) - { + template >::value, + int> = 0> + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 + __tuple_impl(_Tuple &&__t) noexcept( + (__all<_LIBCUDACXX_TRAIT(is_nothrow_constructible, _Tp, + __tuple_elem_at<_Tuple, _Indx>)...>::value)) + : __tuple_leaf<_Indx, _Tp>( + _CUDA_VSTD::forward<__tuple_elem_at<_Tuple, _Indx>>( + _CUDA_VSTD::get<_Indx>(__t)))... {} + + template >::value, + int> = 0> + _LIBCUDACXX_INLINE_VISIBILITY __tuple_impl(allocator_arg_t, const _Alloc &__a, + _Tuple &&__t) + : __tuple_leaf<_Indx, _Tp>( + __uses_alloc_ctor<_Tp, _Alloc, __tuple_elem_at<_Tuple, _Indx>>(), + __a, + _CUDA_VSTD::forward<__tuple_elem_at<_Tuple, _Indx>>( + _CUDA_VSTD::get<_Indx>(__t)))... {} + + template < + class _Tuple, + __enable_if_t<__tuple_assignable<_Tuple, tuple<_Tp...>>::value, int> = 0> + _LIBCUDACXX_INLINE_VISIBILITY __tuple_impl &operator=(_Tuple &&__t) noexcept( + (__all<_LIBCUDACXX_TRAIT(is_nothrow_assignable, _Tp &, + __tuple_elem_at<_Tuple, _Indx>)...>::value)) { __swallow(__tuple_leaf<_Indx, _Tp>::operator=( - _CUDA_VSTD::forward<__tuple_elem_at<_Tuple, _Indx>>(_CUDA_VSTD::get<_Indx>(__t)))...); + _CUDA_VSTD::forward<__tuple_elem_at<_Tuple, _Indx>>( + _CUDA_VSTD::get<_Indx>(__t)))...); return *this; } - __tuple_impl(const __tuple_impl&) = default; - __tuple_impl(__tuple_impl&&) = default; + __tuple_impl(const __tuple_impl &) = default; + __tuple_impl(__tuple_impl &&) = default; - _LIBCUDACXX_INLINE_VISIBILITY __tuple_impl& - operator=(const __tuple_impl& __t) noexcept((__all<_LIBCUDACXX_TRAIT(is_nothrow_copy_assignable, _Tp)...>::value)) - { - __swallow(__tuple_leaf<_Indx, _Tp>::operator=(static_cast&>(__t).get())...); + _LIBCUDACXX_INLINE_VISIBILITY __tuple_impl & + operator=(const __tuple_impl &__t) noexcept( + (__all<_LIBCUDACXX_TRAIT(is_nothrow_copy_assignable, _Tp)...>::value)) { + __swallow(__tuple_leaf<_Indx, _Tp>::operator=( + static_cast &>(__t).get())...); return *this; } - _LIBCUDACXX_INLINE_VISIBILITY __tuple_impl& - operator=(__tuple_impl&& __t) noexcept((__all<_LIBCUDACXX_TRAIT(is_nothrow_move_assignable, _Tp)...>::value)) - { - __swallow(__tuple_leaf<_Indx, _Tp>::operator=( - _CUDA_VSTD::forward<_Tp>(static_cast<__tuple_leaf<_Indx, _Tp>&>(__t).get()))...); + _LIBCUDACXX_INLINE_VISIBILITY __tuple_impl & + operator=(__tuple_impl &&__t) noexcept( + (__all<_LIBCUDACXX_TRAIT(is_nothrow_move_assignable, _Tp)...>::value)) { + __swallow(__tuple_leaf<_Indx, _Tp>::operator=(_CUDA_VSTD::forward<_Tp>( + static_cast<__tuple_leaf<_Indx, _Tp> &>(__t).get()))...); return *this; } - _LIBCUDACXX_INLINE_VISIBILITY void - swap(__tuple_impl& __t) noexcept(__all<__is_nothrow_swappable<_Tp>::value...>::value) - { - __swallow(__tuple_leaf<_Indx, _Tp>::swap(static_cast<__tuple_leaf<_Indx, _Tp>&>(__t))...); + _LIBCUDACXX_INLINE_VISIBILITY void swap(__tuple_impl &__t) noexcept( + __all<__is_nothrow_swappable<_Tp>::value...>::value) { + __swallow(__tuple_leaf<_Indx, _Tp>::swap( + static_cast<__tuple_leaf<_Indx, _Tp> &>(__t))...); } }; -struct __invalid_tuple_constraints -{ +struct __invalid_tuple_constraints { static constexpr bool __implicit_constructible = false; static constexpr bool __explicit_constructible = false; - static constexpr bool __nothrow_constructible = false; + static constexpr bool __nothrow_constructible = false; }; -template -struct __tuple_constraints -{ +template struct __tuple_constraints { static constexpr bool __implicit_default_constructible = - __all<__is_implicitly_default_constructible<_Tp>::value...>::value; + __all<__is_implicitly_default_constructible<_Tp>::value...>::value; static constexpr bool __explicit_default_constructible = - !__implicit_default_constructible && __all<_LIBCUDACXX_TRAIT(is_default_constructible, _Tp)...>::value; + !__implicit_default_constructible && + __all<_LIBCUDACXX_TRAIT(is_default_constructible, _Tp)...>::value; static constexpr bool __nothrow_default_constructible = - __all<_LIBCUDACXX_TRAIT(is_nothrow_default_constructible, _Tp)...>::value; + __all<_LIBCUDACXX_TRAIT(is_nothrow_default_constructible, _Tp)...>::value; static constexpr bool __implicit_variadic_copy_constructible = - __tuple_constructible, tuple<_Tp...>>::value - && __tuple_convertible, tuple<_Tp...>>::value; + __tuple_constructible, tuple<_Tp...>>::value && + __tuple_convertible, tuple<_Tp...>>::value; static constexpr bool __explicit_variadic_copy_constructible = - __tuple_constructible, tuple<_Tp...>>::value - && !__tuple_convertible, tuple<_Tp...>>::value; + __tuple_constructible, tuple<_Tp...>>::value && + !__tuple_convertible, tuple<_Tp...>>::value; static constexpr bool __nothrow_variadic_copy_constructible = - __all<_LIBCUDACXX_TRAIT(is_nothrow_copy_constructible, _Tp)...>::value; + __all<_LIBCUDACXX_TRAIT(is_nothrow_copy_constructible, _Tp)...>::value; - template - struct _PackExpandsToThisTuple : false_type - {}; + template struct _PackExpandsToThisTuple : false_type {}; template - struct _PackExpandsToThisTuple<_Arg> : is_same<__remove_cvref_t<_Arg>, tuple<_Tp...>> - {}; + struct _PackExpandsToThisTuple<_Arg> + : is_same<__remove_cvref_t<_Arg>, tuple<_Tp...>> {}; - template - struct __variadic_constraints - { + template struct __variadic_constraints { static constexpr bool __implicit_constructible = - __tuple_constructible, tuple<_Tp...>>::value - && __tuple_convertible, tuple<_Tp...>>::value; + __tuple_constructible, tuple<_Tp...>>::value && + __tuple_convertible, tuple<_Tp...>>::value; static constexpr bool __explicit_constructible = - __tuple_constructible, tuple<_Tp...>>::value - && !__tuple_convertible, tuple<_Tp...>>::value; + __tuple_constructible, tuple<_Tp...>>::value && + !__tuple_convertible, tuple<_Tp...>>::value; - static constexpr bool __nothrow_constructible = - __all<_LIBCUDACXX_TRAIT(is_nothrow_constructible, _Tp, _Args)...>::value; + static constexpr bool __nothrow_constructible = __all<_LIBCUDACXX_TRAIT( + is_nothrow_constructible, _Tp, _Args)...>::value; }; - template - struct __variadic_constraints_less_rank - { + template struct __variadic_constraints_less_rank { static constexpr bool __implicit_constructible = - __tuple_constructible, __make_tuple_types_t, sizeof...(_Args)>>::value - && __tuple_convertible, __make_tuple_types_t, sizeof...(_Args)>>::value - && __all_default_constructible<__make_tuple_types_t, sizeof...(_Tp), sizeof...(_Args)>>::value; + __tuple_constructible< + tuple<_Args...>, + __make_tuple_types_t, sizeof...(_Args)>>::value && + __tuple_convertible< + tuple<_Args...>, + __make_tuple_types_t, sizeof...(_Args)>>::value && + __all_default_constructible<__make_tuple_types_t< + tuple<_Tp...>, sizeof...(_Tp), sizeof...(_Args)>>::value; static constexpr bool __explicit_constructible = - __tuple_constructible, __make_tuple_types_t, sizeof...(_Args)>>::value - && !__tuple_convertible, __make_tuple_types_t, sizeof...(_Args)>>::value - && __all_default_constructible<__make_tuple_types_t, sizeof...(_Tp), sizeof...(_Args)>>::value; + __tuple_constructible< + tuple<_Args...>, + __make_tuple_types_t, sizeof...(_Args)>>::value && + !__tuple_convertible< + tuple<_Args...>, + __make_tuple_types_t, sizeof...(_Args)>>::value && + __all_default_constructible<__make_tuple_types_t< + tuple<_Tp...>, sizeof...(_Tp), sizeof...(_Args)>>::value; }; - template - struct __valid_tuple_like_constraints - { + template struct __valid_tuple_like_constraints { static constexpr bool __implicit_constructible = - __tuple_constructible<_Tuple, tuple<_Tp...>>::value && __tuple_convertible<_Tuple, tuple<_Tp...>>::value; + __tuple_constructible<_Tuple, tuple<_Tp...>>::value && + __tuple_convertible<_Tuple, tuple<_Tp...>>::value; static constexpr bool __explicit_constructible = - __tuple_constructible<_Tuple, tuple<_Tp...>>::value && !__tuple_convertible<_Tuple, tuple<_Tp...>>::value; + __tuple_constructible<_Tuple, tuple<_Tp...>>::value && + !__tuple_convertible<_Tuple, tuple<_Tp...>>::value; }; - template - struct __valid_tuple_like_constraints_rank_one - { + template struct __valid_tuple_like_constraints_rank_one { template struct _PreferTupleLikeConstructorImpl : _Or< - // Don't attempt the two checks below if the tuple we are given - // has the same type as this tuple. - _IsSame<__remove_cvref_t<_Tuple2>, tuple<_Tp...>>, - _Lazy<_And, _Not>, _Not>>> - {}; + // Don't attempt the two checks below if the tuple we are given + // has the same type as this tuple. + _IsSame<__remove_cvref_t<_Tuple2>, tuple<_Tp...>>, + _Lazy<_And, _Not>, + _Not>>> {}; // This trait is used to disable the tuple-like constructor when // the UTypes... constructor should be selected instead. // See LWG issue #2549. template - using _PreferTupleLikeConstructor = _PreferTupleLikeConstructorImpl<_Tuple2>; + using _PreferTupleLikeConstructor = + _PreferTupleLikeConstructorImpl<_Tuple2>; static constexpr bool __implicit_constructible = - __tuple_constructible<_Tuple, tuple<_Tp...>>::value && __tuple_convertible<_Tuple, tuple<_Tp...>>::value - && _PreferTupleLikeConstructor<_Tuple>::value; + __tuple_constructible<_Tuple, tuple<_Tp...>>::value && + __tuple_convertible<_Tuple, tuple<_Tp...>>::value && + _PreferTupleLikeConstructor<_Tuple>::value; static constexpr bool __explicit_constructible = - __tuple_constructible<_Tuple, tuple<_Tp...>>::value && !__tuple_convertible<_Tuple, tuple<_Tp...>>::value - && _PreferTupleLikeConstructor<_Tuple>::value; + __tuple_constructible<_Tuple, tuple<_Tp...>>::value && + !__tuple_convertible<_Tuple, tuple<_Tp...>>::value && + _PreferTupleLikeConstructor<_Tuple>::value; }; template using __tuple_like_constraints = - _If, __valid_tuple_like_constraints<_Tuple>>; + _If, + __valid_tuple_like_constraints<_Tuple>>; }; -template -class _LIBCUDACXX_TEMPLATE_VIS tuple -{ +template class _LIBCUDACXX_TEMPLATE_VIS tuple { typedef __tuple_impl<__make_tuple_indices_t, _Tp...> _BaseT; _BaseT __base_; - template - struct _PackExpandsToThisTuple : false_type - {}; + template struct _PackExpandsToThisTuple : false_type {}; template - struct _PackExpandsToThisTuple<_Arg> : is_same<__remove_cvref_t<_Arg>, tuple> - {}; + struct _PackExpandsToThisTuple<_Arg> + : is_same<__remove_cvref_t<_Arg>, tuple> {}; public: template - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 __tuple_element_t<_Ip, tuple>& __get_impl() & noexcept + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 __tuple_element_t<_Ip, tuple>& + __get_impl() & noexcept { typedef _LIBCUDACXX_NODEBUG_TYPE __tuple_element_t<_Ip, tuple> type; return static_cast<__tuple_leaf<_Ip, type>&>(__base_).get(); } template - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const __tuple_element_t<_Ip, tuple>& __get_impl() const& noexcept + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const __tuple_element_t<_Ip, tuple>& + __get_impl() const& noexcept { typedef _LIBCUDACXX_NODEBUG_TYPE __tuple_element_t<_Ip, tuple> type; return static_cast&>(__base_).get(); } template - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 __tuple_element_t<_Ip, tuple>&& __get_impl() && noexcept + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 __tuple_element_t<_Ip, tuple>&& + __get_impl() && noexcept { typedef _LIBCUDACXX_NODEBUG_TYPE __tuple_element_t<_Ip, tuple> type; return static_cast(static_cast<__tuple_leaf<_Ip, type>&&>(__base_).get()); } template - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const __tuple_element_t<_Ip, tuple>&& __get_impl() const&& noexcept + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const __tuple_element_t<_Ip, tuple>&& + __get_impl() const&& noexcept { typedef _LIBCUDACXX_NODEBUG_TYPE __tuple_element_t<_Ip, tuple> type; return static_cast(static_cast&&>(__base_).get()); } - template , - __enable_if_t<_Constraints::__implicit_default_constructible, int> = 0> - _LIBCUDACXX_INLINE_VISIBILITY constexpr tuple() noexcept(_Constraints::__nothrow_default_constructible) - {} - - template , - __enable_if_t<_Constraints::__explicit_default_constructible, int> = 0> - _LIBCUDACXX_INLINE_VISIBILITY explicit constexpr tuple() noexcept(_Constraints::__nothrow_default_constructible) - {} - - tuple(tuple const&) = default; - tuple(tuple&&) = default; - - template , - __enable_if_t<_LIBCUDACXX_TRAIT(is_same, allocator_arg_t, _AllocArgT), int> = 0, - __enable_if_t<_Constraints::__implicit_default_constructible, int> = 0> - _LIBCUDACXX_INLINE_VISIBILITY - tuple(_AllocArgT, _Alloc const& __a) noexcept(_Constraints::__nothrow_default_constructible) - : __base_(allocator_arg_t(), __a) - {} - - template , - __enable_if_t<_LIBCUDACXX_TRAIT(is_same, allocator_arg_t, _AllocArgT), int> = 0, - __enable_if_t<_Constraints::__explicit_default_constructible, int> = 0> - explicit _LIBCUDACXX_INLINE_VISIBILITY - tuple(_AllocArgT, _Alloc const& __a) noexcept(_Constraints::__nothrow_default_constructible) - : __base_(allocator_arg_t(), __a) - {} - - template , - __enable_if_t<_Constraints::__implicit_variadic_copy_constructible, int> = 0> + template < class _Constraints = __tuple_constraints<_Tp...>, + __enable_if_t<_Constraints::__implicit_default_constructible, int> = 0> + _LIBCUDACXX_INLINE_VISIBILITY constexpr tuple() noexcept( + _Constraints::__nothrow_default_constructible) {} + + template < + class _Constraints = __tuple_constraints<_Tp...>, + __enable_if_t<_Constraints::__explicit_default_constructible, int> = 0> + _LIBCUDACXX_INLINE_VISIBILITY explicit constexpr tuple() noexcept( + _Constraints::__nothrow_default_constructible) {} + + tuple(tuple const &) = default; + tuple(tuple &&) = default; + + template < + class _AllocArgT, class _Alloc, + class _Constraints = __tuple_constraints<_Tp...>, + __enable_if_t<_LIBCUDACXX_TRAIT(is_same, allocator_arg_t, _AllocArgT), + int> = 0, + __enable_if_t<_Constraints::__implicit_default_constructible, int> = 0> + _LIBCUDACXX_INLINE_VISIBILITY tuple(_AllocArgT, _Alloc const &__a) noexcept( + _Constraints::__nothrow_default_constructible) + : __base_(allocator_arg_t(), __a) {} + + template < + class _AllocArgT, class _Alloc, + class _Constraints = __tuple_constraints<_Tp...>, + __enable_if_t<_LIBCUDACXX_TRAIT(is_same, allocator_arg_t, _AllocArgT), + int> = 0, + __enable_if_t<_Constraints::__explicit_default_constructible, int> = 0> + explicit _LIBCUDACXX_INLINE_VISIBILITY tuple( + _AllocArgT, + _Alloc const &__a) noexcept(_Constraints::__nothrow_default_constructible) + : __base_(allocator_arg_t(), __a) {} + + template , + __enable_if_t<_Constraints::__implicit_variadic_copy_constructible, + int> = 0> _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 - tuple(const _Tp&... __t) noexcept(_Constraints::__nothrow_variadic_copy_constructible) - : __base_(__tuple_variadic_constructor_tag{}, __t...) - {} + tuple(const _Tp &...__t) noexcept( + _Constraints::__nothrow_variadic_copy_constructible) + : __base_(__tuple_variadic_constructor_tag{}, __t...) {} - template , - __enable_if_t<_Constraints::__explicit_variadic_copy_constructible, int> = 0> - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 explicit tuple(const _Tp&... __t) noexcept( - _Constraints::__nothrow_variadic_copy_constructible) - : __base_(__tuple_variadic_constructor_tag{}, __t...) - {} - - template , - __enable_if_t<_Constraints::__implicit_variadic_copy_constructible, int> = 0> - _LIBCUDACXX_INLINE_VISIBILITY tuple(allocator_arg_t, const _Alloc& __a, const _Tp&... __t) noexcept( - _Constraints::__nothrow_variadic_copy_constructible) - : __base_(allocator_arg_t(), __a, __tuple_variadic_constructor_tag{}, __t...) - {} - - template , - __enable_if_t<_Constraints::__explicit_variadic_copy_constructible, int> = 0> - _LIBCUDACXX_INLINE_VISIBILITY explicit tuple(allocator_arg_t, const _Alloc& __a, const _Tp&... __t) noexcept( - _Constraints::__nothrow_variadic_copy_constructible) - : __base_(allocator_arg_t(), __a, __tuple_variadic_constructor_tag{}, __t...) - {} + template , + __enable_if_t<_Constraints::__explicit_variadic_copy_constructible, + int> = 0> + _LIBCUDACXX_INLINE_VISIBILITY + _CCCL_CONSTEXPR_CXX14 explicit tuple( + const _Tp &...__t) noexcept(_Constraints:: + __nothrow_variadic_copy_constructible) + : __base_(__tuple_variadic_constructor_tag{}, __t...) {} + + template , + __enable_if_t<_Constraints::__implicit_variadic_copy_constructible, + int> = 0> + _LIBCUDACXX_INLINE_VISIBILITY + tuple(allocator_arg_t, const _Alloc &__a, const _Tp &...__t) noexcept( + _Constraints::__nothrow_variadic_copy_constructible) + : __base_(allocator_arg_t(), __a, __tuple_variadic_constructor_tag{}, + __t...) {} + + template , + __enable_if_t<_Constraints::__explicit_variadic_copy_constructible, + int> = 0> + _LIBCUDACXX_INLINE_VISIBILITY explicit tuple( + allocator_arg_t, const _Alloc &__a, + const _Tp + &...__t) noexcept(_Constraints::__nothrow_variadic_copy_constructible) + : __base_(allocator_arg_t(), __a, __tuple_variadic_constructor_tag{}, + __t...) {} #if defined(_LIBCUDACXX_NO_TUPLE_NOEXCEPT) - template - using __base_noexcept_constructible = false_type; + template using __base_noexcept_constructible = false_type; #else template - using __base_noexcept_constructible = is_nothrow_constructible<_BaseT, _Vp...>; + using __base_noexcept_constructible = + is_nothrow_constructible<_BaseT, _Vp...>; #endif // defined(_LIBCUDACXX_NO_TUPLE_NOEXCEPT) template using __variadic_constraints = - _If::value && sizeof...(_Up) == sizeof...(_Tp), - typename __tuple_constraints<_Tp...>::template __variadic_constraints<_Up...>, - __invalid_tuple_constraints>; + _If::value && + sizeof...(_Up) == sizeof...(_Tp), + typename __tuple_constraints<_Tp...>::template __variadic_constraints< + _Up...>, + __invalid_tuple_constraints>; - template , + template , __enable_if_t<_Constraints::__implicit_constructible, int> = 0> - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 tuple(_Up&&... __u) noexcept(_Constraints::__nothrow_constructible) - : __base_(__tuple_variadic_constructor_tag{}, _CUDA_VSTD::forward<_Up>(__u)...) - {} + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 + tuple(_Up &&...__u) noexcept(_Constraints::__nothrow_constructible) + : __base_(__tuple_variadic_constructor_tag{}, + _CUDA_VSTD::forward<_Up>(__u)...) {} - template , + template , __enable_if_t<_Constraints::__explicit_constructible, int> = 0> - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 explicit tuple(_Up&&... __u) noexcept( - _Constraints::__nothrow_constructible) - : __base_(__tuple_variadic_constructor_tag{}, _CUDA_VSTD::forward<_Up>(__u)...) - {} + _LIBCUDACXX_INLINE_VISIBILITY + _CCCL_CONSTEXPR_CXX14 explicit tuple(_Up &&...__u) noexcept( + _Constraints::__nothrow_constructible) + : __base_(__tuple_variadic_constructor_tag{}, + _CUDA_VSTD::forward<_Up>(__u)...) {} template using __variadic_constraints_less_rank = - _If::value, - typename __tuple_constraints<_Tp...>::template __variadic_constraints_less_rank<_Up...>, - __invalid_tuple_constraints>; + _If::value, + typename __tuple_constraints< + _Tp...>::template __variadic_constraints_less_rank<_Up...>, + __invalid_tuple_constraints>; template , - __enable_if_t = 0, + class _Constraints = __variadic_constraints_less_rank<_Up...>, + __enable_if_t = 0, __enable_if_t<_Constraints::__implicit_constructible, int> = 0> - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 explicit tuple(_Up&&... __u) noexcept( - __base_noexcept_constructible<__tuple_variadic_constructor_tag, _Up...>::value) - : __base_(__tuple_variadic_constructor_tag{}, _CUDA_VSTD::forward<_Up>(__u)...) - {} - - template , + _LIBCUDACXX_INLINE_VISIBILITY + _CCCL_CONSTEXPR_CXX14 explicit tuple(_Up &&...__u) noexcept( + __base_noexcept_constructible<__tuple_variadic_constructor_tag, + _Up...>::value) + : __base_(__tuple_variadic_constructor_tag{}, + _CUDA_VSTD::forward<_Up>(__u)...) {} + + template , __enable_if_t<_Constraints::__implicit_constructible, int> = 0> _LIBCUDACXX_INLINE_VISIBILITY - tuple(allocator_arg_t, const _Alloc& __a, _Up&&... __u) noexcept(_Constraints::__nothrow_constructible) - : __base_(allocator_arg_t(), __a, __tuple_variadic_constructor_tag{}, _CUDA_VSTD::forward<_Up>(__u)...) - {} + tuple(allocator_arg_t, const _Alloc &__a, + _Up &&...__u) noexcept(_Constraints::__nothrow_constructible) + : __base_(allocator_arg_t(), __a, __tuple_variadic_constructor_tag{}, + _CUDA_VSTD::forward<_Up>(__u)...) {} - template , + template , __enable_if_t<_Constraints::__explicit_constructible, int> = 0> - _LIBCUDACXX_INLINE_VISIBILITY explicit tuple(allocator_arg_t, const _Alloc& __a, _Up&&... __u) noexcept( - _Constraints::__nothrow_constructible) - : __base_(allocator_arg_t(), __a, __tuple_variadic_constructor_tag{}, _CUDA_VSTD::forward<_Up>(__u)...) - {} + _LIBCUDACXX_INLINE_VISIBILITY explicit tuple( + allocator_arg_t, const _Alloc &__a, + _Up &&...__u) noexcept(_Constraints::__nothrow_constructible) + : __base_(allocator_arg_t(), __a, __tuple_variadic_constructor_tag{}, + _CUDA_VSTD::forward<_Up>(__u)...) {} template using __tuple_like_constraints = - _If<__tuple_like_with_size<_Tuple, sizeof...(_Tp)>::value, - typename __tuple_constraints<_Tp...>::template __tuple_like_constraints<_Tuple>, - __invalid_tuple_constraints>; + _If<__tuple_like_with_size<_Tuple, sizeof...(_Tp)>::value, + typename __tuple_constraints< + _Tp...>::template __tuple_like_constraints<_Tuple>, + __invalid_tuple_constraints>; // Horrible hack to make tuple_of_iterator_references work template ::value, int> = 0, - __enable_if_t<(tuple_size<_TupleOfIteratorReferences>::value == sizeof...(_Tp)), int> = 0> + __enable_if_t<(tuple_size<_TupleOfIteratorReferences>::value == sizeof...(_Tp)), int> = 0> _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 tuple(_TupleOfIteratorReferences&& __t) : tuple(_CUDA_VSTD::forward<_TupleOfIteratorReferences>(__t).template __to_tuple<_Tp...>( __make_tuple_indices_t())) {} - template , - __enable_if_t::value, int> = 0, - __enable_if_t = 0, - __enable_if_t<_Constraints::__implicit_constructible, int> = 0> + template < + class _Tuple, class _Constraints = __tuple_like_constraints<_Tuple>, + __enable_if_t::value, int> = 0, + __enable_if_t = 0, + __enable_if_t<_Constraints::__implicit_constructible, int> = 0> _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 - tuple(_Tuple&& __t) noexcept(_LIBCUDACXX_TRAIT(is_nothrow_constructible, _BaseT, _Tuple)) - : __base_(_CUDA_VSTD::forward<_Tuple>(__t)) - {} + tuple(_Tuple &&__t) noexcept(_LIBCUDACXX_TRAIT(is_nothrow_constructible, + _BaseT, _Tuple)) + : __base_(_CUDA_VSTD::forward<_Tuple>(__t)) {} template , + class _Constraints = __tuple_like_constraints, __enable_if_t::value, int> = 0, - __enable_if_t<_Constraints::__implicit_constructible, int> = 0> + __enable_if_t<_Constraints::__implicit_constructible, int> = 0> _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 - tuple(const _Tuple& __t) noexcept(_LIBCUDACXX_TRAIT(is_nothrow_constructible, _BaseT, const _Tuple&)) - : __base_(__t) - {} - - template , - __enable_if_t::value, int> = 0, - __enable_if_t = 0, - __enable_if_t<_Constraints::__explicit_constructible, int> = 0> - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 explicit tuple(_Tuple&& __t) noexcept( - _LIBCUDACXX_TRAIT(is_nothrow_constructible, _BaseT, _Tuple)) - : __base_(_CUDA_VSTD::forward<_Tuple>(__t)) - {} + tuple(const _Tuple &__t) noexcept(_LIBCUDACXX_TRAIT(is_nothrow_constructible, + _BaseT, const _Tuple &)) + : __base_(__t) {} + + template < + class _Tuple, class _Constraints = __tuple_like_constraints<_Tuple>, + __enable_if_t::value, int> = 0, + __enable_if_t = 0, + __enable_if_t<_Constraints::__explicit_constructible, int> = 0> + _LIBCUDACXX_INLINE_VISIBILITY + _CCCL_CONSTEXPR_CXX14 explicit tuple(_Tuple &&__t) noexcept( + _LIBCUDACXX_TRAIT(is_nothrow_constructible, _BaseT, _Tuple)) + : __base_(_CUDA_VSTD::forward<_Tuple>(__t)) {} template , + class _Constraints = __tuple_like_constraints, __enable_if_t::value, int> = 0, - __enable_if_t<_Constraints::__explicit_constructible, int> = 0> - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 explicit tuple(const _Tuple& __t) noexcept( - _LIBCUDACXX_TRAIT(is_nothrow_constructible, _BaseT, const _Tuple&)) - : __base_(__t) - {} - - template , + __enable_if_t<_Constraints::__explicit_constructible, int> = 0> + _LIBCUDACXX_INLINE_VISIBILITY + _CCCL_CONSTEXPR_CXX14 explicit tuple( + const _Tuple + &__t) noexcept(_LIBCUDACXX_TRAIT(is_nothrow_constructible, _BaseT, + const _Tuple &)) + : __base_(__t) {} + + template , __enable_if_t<_Constraints::__implicit_constructible, int> = 0> - _LIBCUDACXX_INLINE_VISIBILITY tuple(allocator_arg_t, const _Alloc& __a, _Tuple&& __t) - : __base_(allocator_arg_t(), __a, _CUDA_VSTD::forward<_Tuple>(__t)) - {} + _LIBCUDACXX_INLINE_VISIBILITY tuple(allocator_arg_t, const _Alloc &__a, + _Tuple &&__t) + : __base_(allocator_arg_t(), __a, _CUDA_VSTD::forward<_Tuple>(__t)) {} - template , + template , __enable_if_t<_Constraints::__explicit_constructible, int> = 0> - _LIBCUDACXX_INLINE_VISIBILITY explicit tuple(allocator_arg_t, const _Alloc& __a, _Tuple&& __t) - : __base_(allocator_arg_t(), __a, _CUDA_VSTD::forward<_Tuple>(__t)) - {} + _LIBCUDACXX_INLINE_VISIBILITY explicit tuple(allocator_arg_t, + const _Alloc &__a, _Tuple &&__t) + : __base_(allocator_arg_t(), __a, _CUDA_VSTD::forward<_Tuple>(__t)) {} using _CanCopyAssign = __all<_LIBCUDACXX_TRAIT(is_copy_assignable, _Tp)...>; using _CanMoveAssign = __all<_LIBCUDACXX_TRAIT(is_move_assignable, _Tp)...>; - _LIBCUDACXX_INLINE_VISIBILITY tuple& - operator=(__conditional_t<_CanCopyAssign::value, tuple, __nat> const& __t) noexcept( - (__all<_LIBCUDACXX_TRAIT(is_nothrow_copy_assignable, _Tp)...>::value)) - { + _LIBCUDACXX_INLINE_VISIBILITY tuple &operator=( + __conditional_t<_CanCopyAssign::value, tuple, __nat> const + &__t) noexcept((__all<_LIBCUDACXX_TRAIT(is_nothrow_copy_assignable, + _Tp)...>::value)) { __base_.operator=(__t.__base_); return *this; } - _LIBCUDACXX_INLINE_VISIBILITY tuple& operator=(__conditional_t<_CanMoveAssign::value, tuple, __nat>&& __t) noexcept( - (__all<_LIBCUDACXX_TRAIT(is_nothrow_move_assignable, _Tp)...>::value)) - { - __base_.operator=(static_cast<_BaseT&&>(__t.__base_)); + _LIBCUDACXX_INLINE_VISIBILITY tuple &operator=( + __conditional_t<_CanMoveAssign::value, tuple, __nat> + &&__t) noexcept((__all<_LIBCUDACXX_TRAIT(is_nothrow_move_assignable, + _Tp)...>::value)) { + __base_.operator=(static_cast<_BaseT &&>(__t.__base_)); return *this; } - template ::value, bool> = false> - _LIBCUDACXX_INLINE_VISIBILITY tuple& - operator=(_Tuple&& __t) noexcept(_LIBCUDACXX_TRAIT(is_nothrow_assignable, _BaseT&, _Tuple)) - { + template < + class _Tuple, + __enable_if_t<__tuple_assignable<_Tuple, tuple>::value, bool> = false> + _LIBCUDACXX_INLINE_VISIBILITY tuple &operator=(_Tuple &&__t) noexcept( + _LIBCUDACXX_TRAIT(is_nothrow_assignable, _BaseT &, _Tuple)) { __base_.operator=(_CUDA_VSTD::forward<_Tuple>(__t)); return *this; } - _LIBCUDACXX_INLINE_VISIBILITY void swap(tuple& __t) noexcept(__all<__is_nothrow_swappable<_Tp>::value...>::value) - { + _LIBCUDACXX_INLINE_VISIBILITY void swap(tuple &__t) noexcept( + __all<__is_nothrow_swappable<_Tp>::value...>::value) { __base_.swap(__t.__base_); } }; -template <> -class _LIBCUDACXX_TEMPLATE_VIS tuple<> -{ +template <> class _LIBCUDACXX_TEMPLATE_VIS tuple<> { public: constexpr tuple() noexcept = default; template - _LIBCUDACXX_INLINE_VISIBILITY tuple(allocator_arg_t, const _Alloc&) noexcept - {} + _LIBCUDACXX_INLINE_VISIBILITY tuple(allocator_arg_t, + const _Alloc &) noexcept {} template - _LIBCUDACXX_INLINE_VISIBILITY tuple(allocator_arg_t, const _Alloc&, const tuple&) noexcept - {} + _LIBCUDACXX_INLINE_VISIBILITY tuple(allocator_arg_t, const _Alloc &, + const tuple &) noexcept {} template - _LIBCUDACXX_INLINE_VISIBILITY tuple(array<_Up, 0>) noexcept - {} + _LIBCUDACXX_INLINE_VISIBILITY tuple(array<_Up, 0>) noexcept {} template - _LIBCUDACXX_INLINE_VISIBILITY tuple(allocator_arg_t, const _Alloc&, array<_Up, 0>) noexcept - {} - _LIBCUDACXX_INLINE_VISIBILITY void swap(tuple&) noexcept {} + _LIBCUDACXX_INLINE_VISIBILITY tuple(allocator_arg_t, const _Alloc &, + array<_Up, 0>) noexcept {} + _LIBCUDACXX_INLINE_VISIBILITY void swap(tuple &) noexcept {} }; #ifndef _LIBCUDACXX_HAS_NO_DEDUCTION_GUIDES -template -_CCCL_HOST_DEVICE tuple(_Tp...) -> tuple<_Tp...>; +template _CCCL_HOST_DEVICE tuple(_Tp...) -> tuple<_Tp...>; template _CCCL_HOST_DEVICE tuple(pair<_Tp1, _Tp2>) -> tuple<_Tp1, _Tp2>; template _CCCL_HOST_DEVICE tuple(allocator_arg_t, _Alloc, _Tp...) -> tuple<_Tp...>; template -_CCCL_HOST_DEVICE tuple(allocator_arg_t, _Alloc, pair<_Tp1, _Tp2>) -> tuple<_Tp1, _Tp2>; +_CCCL_HOST_DEVICE tuple(allocator_arg_t, _Alloc, pair<_Tp1, _Tp2>) + -> tuple<_Tp1, _Tp2>; template -_CCCL_HOST_DEVICE tuple(allocator_arg_t, _Alloc, tuple<_Tp...>) -> tuple<_Tp...>; +_CCCL_HOST_DEVICE tuple(allocator_arg_t, _Alloc, tuple<_Tp...>) + -> tuple<_Tp...>; #endif // _LIBCUDACXX_HAS_NO_DEDUCTION_GUIDES template -inline _LIBCUDACXX_INLINE_VISIBILITY __enable_if_t<_And<__is_swappable<_Tp>...>::value, void> -swap(tuple<_Tp...>& __t, tuple<_Tp...>& __u) noexcept(__all<__is_nothrow_swappable<_Tp>::value...>::value) -{ +inline _LIBCUDACXX_INLINE_VISIBILITY + __enable_if_t<_And<__is_swappable<_Tp>...>::value, void> + swap(tuple<_Tp...> &__t, tuple<_Tp...> &__u) noexcept( + __all<__is_nothrow_swappable<_Tp>::value...>::value) { __t.swap(__u); } @@ -974,66 +1019,67 @@ get(const tuple<_Tp...>&& __t) noexcept #if _CCCL_STD_VER > 2011 -namespace __find_detail -{ +namespace __find_detail { static constexpr size_t __not_found = ~size_t(0); static constexpr size_t __ambiguous = __not_found - 1; -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr size_t __find_idx_return(size_t __curr_i, size_t __res, bool __matches) -{ +inline _LIBCUDACXX_INLINE_VISIBILITY constexpr size_t +__find_idx_return(size_t __curr_i, size_t __res, bool __matches) { return !__matches ? __res : (__res == __not_found ? __curr_i : __ambiguous); } template -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr size_t __find_idx(size_t __i, const bool (&__matches)[_Nx]) -{ - return __i == _Nx ? __not_found : __find_idx_return(__i, __find_idx(__i + 1, __matches), __matches[__i]); +inline _LIBCUDACXX_INLINE_VISIBILITY constexpr size_t +__find_idx(size_t __i, const bool (&__matches)[_Nx]) { + return __i == _Nx ? __not_found + : __find_idx_return(__i, __find_idx(__i + 1, __matches), + __matches[__i]); } -template -struct __find_exactly_one_checked -{ - static constexpr bool __matches[sizeof...(_Args)] = {is_same<_T1, _Args>::value...}; - static constexpr size_t value = __find_detail::__find_idx(0, __matches); +template struct __find_exactly_one_checked { + static constexpr bool __matches[sizeof...(_Args)] = { + is_same<_T1, _Args>::value...}; + static constexpr size_t value = __find_detail::__find_idx(0, __matches); static_assert(value != __not_found, "type not found in type list"); - static_assert(value != __ambiguous, "type occurs more than once in type list"); + static_assert(value != __ambiguous, + "type occurs more than once in type list"); }; -template -struct __find_exactly_one_checked<_T1> -{ +template struct __find_exactly_one_checked<_T1> { static_assert(!is_same<_T1, _T1>::value, "type not in empty type list"); }; } // namespace __find_detail template -struct __find_exactly_one_t : public __find_detail::__find_exactly_one_checked<_T1, _Args...> -{}; +struct __find_exactly_one_t + : public __find_detail::__find_exactly_one_checked<_T1, _Args...> {}; template -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr _T1& get(tuple<_Args...>& __tup) noexcept -{ +inline _LIBCUDACXX_INLINE_VISIBILITY constexpr _T1 & +get(tuple<_Args...> &__tup) noexcept { return _CUDA_VSTD::get<__find_exactly_one_t<_T1, _Args...>::value>(__tup); } template -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr _T1 const& get(tuple<_Args...> const& __tup) noexcept -{ +inline _LIBCUDACXX_INLINE_VISIBILITY constexpr _T1 const & +get(tuple<_Args...> const &__tup) noexcept { return _CUDA_VSTD::get<__find_exactly_one_t<_T1, _Args...>::value>(__tup); } template -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr _T1&& get(tuple<_Args...>&& __tup) noexcept -{ - return _CUDA_VSTD::get<__find_exactly_one_t<_T1, _Args...>::value>(_CUDA_VSTD::move(__tup)); +inline _LIBCUDACXX_INLINE_VISIBILITY constexpr _T1 && +get(tuple<_Args...> &&__tup) noexcept { + return _CUDA_VSTD::get<__find_exactly_one_t<_T1, _Args...>::value>( + _CUDA_VSTD::move(__tup)); } template -inline _LIBCUDACXX_INLINE_VISIBILITY constexpr _T1 const&& get(tuple<_Args...> const&& __tup) noexcept -{ - return _CUDA_VSTD::get<__find_exactly_one_t<_T1, _Args...>::value>(_CUDA_VSTD::move(__tup)); +inline _LIBCUDACXX_INLINE_VISIBILITY constexpr _T1 const && +get(tuple<_Args...> const &&__tup) noexcept { + return _CUDA_VSTD::get<__find_exactly_one_t<_T1, _Args...>::value>( + _CUDA_VSTD::move(__tup)); } #endif @@ -1041,179 +1087,163 @@ inline _LIBCUDACXX_INLINE_VISIBILITY constexpr _T1 const&& get(tuple<_Args...> c // tie template -inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 tuple<_Tp&...> tie(_Tp&... __t) noexcept -{ - return tuple<_Tp&...>(__t...); +inline _LIBCUDACXX_INLINE_VISIBILITY + _CCCL_CONSTEXPR_CXX14 tuple<_Tp &...> + tie(_Tp &...__t) noexcept { + return tuple<_Tp &...>(__t...); } -template -struct __ignore_t -{ +template struct __ignore_t { template - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const __ignore_t& operator=(_Tp&&) const - { + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 const + __ignore_t & + operator=(_Tp &&) const { return *this; } }; -namespace -{ -_LIBCUDACXX_CPO_ACCESSIBILITY __ignore_t ignore{}; +namespace { + _LIBCUDACXX_CPO_ACCESSIBILITY __ignore_t ignore{}; } // namespace template -inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 tuple::type...> -make_tuple(_Tp&&... __t) -{ - return tuple::type...>(_CUDA_VSTD::forward<_Tp>(__t)...); +inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 + tuple::type...> + make_tuple(_Tp &&...__t) { + return tuple::type...>( + _CUDA_VSTD::forward<_Tp>(__t)...); } template -inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 tuple<_Tp&&...> forward_as_tuple(_Tp&&... __t) noexcept -{ - return tuple<_Tp&&...>(_CUDA_VSTD::forward<_Tp>(__t)...); +inline _LIBCUDACXX_INLINE_VISIBILITY + _CCCL_CONSTEXPR_CXX14 tuple<_Tp &&...> + forward_as_tuple(_Tp &&...__t) noexcept { + return tuple<_Tp &&...>(_CUDA_VSTD::forward<_Tp>(__t)...); } -template -struct __tuple_equal -{ +template struct __tuple_equal { template - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 bool operator()(const _Tp& __x, const _Up& __y) - { - return __tuple_equal<_Ip - 1>()(__x, __y) && _CUDA_VSTD::get<_Ip - 1>(__x) == _CUDA_VSTD::get<_Ip - 1>(__y); + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 bool + operator()(const _Tp &__x, const _Up &__y) { + return __tuple_equal<_Ip - 1>()(__x, __y) && + _CUDA_VSTD::get<_Ip - 1>(__x) == _CUDA_VSTD::get<_Ip - 1>(__y); } }; -template <> -struct __tuple_equal<0> -{ +template <> struct __tuple_equal<0> { template - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 bool operator()(const _Tp&, const _Up&) - { + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 bool + operator()(const _Tp &, const _Up &) { return true; } }; template inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 bool -operator==(const tuple<_Tp...>& __x, const tuple<_Up...>& __y) -{ - static_assert(sizeof...(_Tp) == sizeof...(_Up), "Can't compare tuples of different sizes"); +operator==(const tuple<_Tp...> &__x, const tuple<_Up...> &__y) { + static_assert(sizeof...(_Tp) == sizeof...(_Up), + "Can't compare tuples of different sizes"); return __tuple_equal()(__x, __y); } template inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 bool -operator!=(const tuple<_Tp...>& __x, const tuple<_Up...>& __y) -{ +operator!=(const tuple<_Tp...> &__x, const tuple<_Up...> &__y) { return !(__x == __y); } -template -struct __tuple_less -{ +template struct __tuple_less { template - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 bool operator()(const _Tp& __x, const _Up& __y) - { + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 bool + operator()(const _Tp &__x, const _Up &__y) { const size_t __idx = tuple_size<_Tp>::value - _Ip; - if (_CUDA_VSTD::get<__idx>(__x) < _CUDA_VSTD::get<__idx>(__y)) - { + if (_CUDA_VSTD::get<__idx>(__x) < _CUDA_VSTD::get<__idx>(__y)) { return true; } - if (_CUDA_VSTD::get<__idx>(__y) < _CUDA_VSTD::get<__idx>(__x)) - { + if (_CUDA_VSTD::get<__idx>(__y) < _CUDA_VSTD::get<__idx>(__x)) { return false; } return __tuple_less<_Ip - 1>()(__x, __y); } }; -template <> -struct __tuple_less<0> -{ +template <> struct __tuple_less<0> { template - _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 bool operator()(const _Tp&, const _Up&) - { + _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 bool + operator()(const _Tp &, const _Up &) { return false; } }; template inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 bool -operator<(const tuple<_Tp...>& __x, const tuple<_Up...>& __y) -{ - static_assert(sizeof...(_Tp) == sizeof...(_Up), "Can't compare tuples of different sizes"); +operator<(const tuple<_Tp...> &__x, const tuple<_Up...> &__y) { + static_assert(sizeof...(_Tp) == sizeof...(_Up), + "Can't compare tuples of different sizes"); return __tuple_less()(__x, __y); } template inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 bool -operator>(const tuple<_Tp...>& __x, const tuple<_Up...>& __y) -{ +operator>(const tuple<_Tp...> &__x, const tuple<_Up...> &__y) { return __y < __x; } template inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 bool -operator>=(const tuple<_Tp...>& __x, const tuple<_Up...>& __y) -{ +operator>=(const tuple<_Tp...> &__x, const tuple<_Up...> &__y) { return !(__x < __y); } template inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 bool -operator<=(const tuple<_Tp...>& __x, const tuple<_Up...>& __y) -{ +operator<=(const tuple<_Tp...> &__x, const tuple<_Up...> &__y) { return !(__y < __x); } // tuple_cat -template -struct __tuple_cat_type; +template struct __tuple_cat_type; template -struct __tuple_cat_type, __tuple_types<_Utypes...>> -{ +struct __tuple_cat_type, __tuple_types<_Utypes...>> { typedef _LIBCUDACXX_NODEBUG_TYPE tuple<_Ttypes..., _Utypes...> type; }; template -struct __tuple_cat_return_1 -{}; +struct __tuple_cat_return_1 {}; template -struct __tuple_cat_return_1, true, _Tuple0> -{ - typedef _LIBCUDACXX_NODEBUG_TYPE - typename __tuple_cat_type, __make_tuple_types_t<__remove_cvref_t<_Tuple0>>>::type type; +struct __tuple_cat_return_1, true, _Tuple0> { + typedef _LIBCUDACXX_NODEBUG_TYPE typename __tuple_cat_type< + tuple<_Types...>, __make_tuple_types_t<__remove_cvref_t<_Tuple0>>>::type + type; }; template -struct __tuple_cat_return_1, true, _Tuple0, _Tuple1, _Tuples...> +struct __tuple_cat_return_1, true, _Tuple0, _Tuple1, + _Tuples...> : public __tuple_cat_return_1< - typename __tuple_cat_type, __make_tuple_types_t<__remove_cvref_t<_Tuple0>>>::type, - __tuple_like<__libcpp_remove_reference_t<_Tuple1>>::value, - _Tuple1, - _Tuples...> -{}; + typename __tuple_cat_type< + tuple<_Types...>, + __make_tuple_types_t<__remove_cvref_t<_Tuple0>>>::type, + __tuple_like<__libcpp_remove_reference_t<_Tuple1>>::value, _Tuple1, + _Tuples...> {}; -template -struct __tuple_cat_return; +template struct __tuple_cat_return; template struct __tuple_cat_return<_Tuple0, _Tuples...> - : public __tuple_cat_return_1, __tuple_like<__libcpp_remove_reference_t<_Tuple0>>::value, _Tuple0, _Tuples...> -{}; + : public __tuple_cat_return_1< + tuple<>, __tuple_like<__libcpp_remove_reference_t<_Tuple0>>::value, + _Tuple0, _Tuples...> {}; -template <> -struct __tuple_cat_return<> -{ +template <> struct __tuple_cat_return<> { typedef _LIBCUDACXX_NODEBUG_TYPE tuple<> type; }; -inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 tuple<> tuple_cat() -{ +inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 tuple<> +tuple_cat() { return tuple<>(); } @@ -1221,76 +1251,87 @@ template struct __tuple_cat_return_ref_imp; template -struct __tuple_cat_return_ref_imp, __tuple_indices<_I0...>, _Tuple0> -{ +struct __tuple_cat_return_ref_imp, __tuple_indices<_I0...>, + _Tuple0> { typedef _LIBCUDACXX_NODEBUG_TYPE __libcpp_remove_reference_t<_Tuple0> _T0; - typedef tuple<_Types..., typename __apply_cv<_Tuple0, __tuple_element_t<_I0, _T0>>::type&&...> type; + typedef tuple< + _Types..., + typename __apply_cv<_Tuple0, __tuple_element_t<_I0, _T0>>::type &&...> + type; }; -template -struct __tuple_cat_return_ref_imp, __tuple_indices<_I0...>, _Tuple0, _Tuple1, _Tuples...> +template +struct __tuple_cat_return_ref_imp, __tuple_indices<_I0...>, + _Tuple0, _Tuple1, _Tuples...> : public __tuple_cat_return_ref_imp< - tuple<_Types..., - typename __apply_cv<_Tuple0, __tuple_element_t<_I0, __libcpp_remove_reference_t<_Tuple0>>>::type&&...>, - __make_tuple_indices_t>::value>, - _Tuple1, - _Tuples...> -{}; + tuple<_Types..., + typename __apply_cv< + _Tuple0, __tuple_element_t<_I0, __libcpp_remove_reference_t< + _Tuple0>>>::type &&...>, + __make_tuple_indices_t< + tuple_size<__libcpp_remove_reference_t<_Tuple1>>::value>, + _Tuple1, _Tuples...> {}; template struct __tuple_cat_return_ref - : public __tuple_cat_return_ref_imp, - __make_tuple_indices_t>::value>, - _Tuple0, - _Tuples...> -{}; + : public __tuple_cat_return_ref_imp< + tuple<>, + __make_tuple_indices_t< + tuple_size<__libcpp_remove_reference_t<_Tuple0>>::value>, + _Tuple0, _Tuples...> {}; -template -struct __tuple_cat; +template struct __tuple_cat; template -struct __tuple_cat, __tuple_indices<_I0...>, __tuple_indices<_J0...>> -{ +struct __tuple_cat, __tuple_indices<_I0...>, + __tuple_indices<_J0...>> { template _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 - typename __tuple_cat_return_ref&&, _Tuple0&&>::type - operator()(tuple<_Types...> __t, _Tuple0&& __t0) - { - (void) __t; - return _CUDA_VSTD::forward_as_tuple(_CUDA_VSTD::forward<_Types>(_CUDA_VSTD::get<_I0>(__t))..., - _CUDA_VSTD::get<_J0>(_CUDA_VSTD::forward<_Tuple0>(__t0))...); + typename __tuple_cat_return_ref &&, _Tuple0 &&>::type + operator()(tuple<_Types...> __t, _Tuple0 &&__t0) { + (void)__t; + return _CUDA_VSTD::forward_as_tuple( + _CUDA_VSTD::forward<_Types>(_CUDA_VSTD::get<_I0>(__t))..., + _CUDA_VSTD::get<_J0>(_CUDA_VSTD::forward<_Tuple0>(__t0))...); } template _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 - typename __tuple_cat_return_ref&&, _Tuple0&&, _Tuple1&&, _Tuples&&...>::type - operator()(tuple<_Types...> __t, _Tuple0&& __t0, _Tuple1&& __t1, _Tuples&&... __tpls) - { - (void) __t; + typename __tuple_cat_return_ref &&, _Tuple0 &&, + _Tuple1 &&, _Tuples &&...>::type + operator()(tuple<_Types...> __t, _Tuple0 &&__t0, _Tuple1 &&__t1, + _Tuples &&...__tpls) { + (void)__t; typedef _LIBCUDACXX_NODEBUG_TYPE __libcpp_remove_reference_t<_Tuple0> _T0; typedef _LIBCUDACXX_NODEBUG_TYPE __libcpp_remove_reference_t<_Tuple1> _T1; - return __tuple_cat>::type&&...>, - __make_tuple_indices_t::value>, - __make_tuple_indices_t::value>>()( - _CUDA_VSTD::forward_as_tuple(_CUDA_VSTD::forward<_Types>(_CUDA_VSTD::get<_I0>(__t))..., - _CUDA_VSTD::get<_J0>(_CUDA_VSTD::forward<_Tuple0>(__t0))...), - _CUDA_VSTD::forward<_Tuple1>(__t1), - _CUDA_VSTD::forward<_Tuples>(__tpls)...); + return __tuple_cat< + tuple<_Types..., typename __apply_cv< + _Tuple0, __tuple_element_t<_J0, _T0>>::type &&...>, + __make_tuple_indices_t::value>, + __make_tuple_indices_t::value>>()( + _CUDA_VSTD::forward_as_tuple( + _CUDA_VSTD::forward<_Types>(_CUDA_VSTD::get<_I0>(__t))..., + _CUDA_VSTD::get<_J0>(_CUDA_VSTD::forward<_Tuple0>(__t0))...), + _CUDA_VSTD::forward<_Tuple1>(__t1), + _CUDA_VSTD::forward<_Tuples>(__tpls)...); } }; template -inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 typename __tuple_cat_return<_Tuple0, _Tuples...>::type -tuple_cat(_Tuple0&& __t0, _Tuples&&... __tpls) -{ +inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX14 + typename __tuple_cat_return<_Tuple0, _Tuples...>::type + tuple_cat(_Tuple0 &&__t0, _Tuples &&...__tpls) { typedef _LIBCUDACXX_NODEBUG_TYPE __libcpp_remove_reference_t<_Tuple0> _T0; - return __tuple_cat, __tuple_indices<>, __make_tuple_indices_t::value>>()( - tuple<>(), _CUDA_VSTD::forward<_Tuple0>(__t0), _CUDA_VSTD::forward<_Tuples>(__tpls)...); + return __tuple_cat, __tuple_indices<>, + __make_tuple_indices_t::value>>()( + tuple<>(), _CUDA_VSTD::forward<_Tuple0>(__t0), + _CUDA_VSTD::forward<_Tuples>(__tpls)...); } template -struct _LIBCUDACXX_TEMPLATE_VIS uses_allocator, _Alloc> : true_type -{}; +struct _LIBCUDACXX_TEMPLATE_VIS uses_allocator, _Alloc> + : true_type {}; template template @@ -1305,38 +1346,41 @@ inline _LIBCUDACXX_INLINE_VISIBILITY _CCCL_CONSTEXPR_CXX20 __pair_base<_T1, _T2, {} #if _CCCL_STD_VER > 2014 -# define _LIBCUDACXX_NOEXCEPT_RETURN(...) \ - noexcept(noexcept(__VA_ARGS__)) \ - { \ - return __VA_ARGS__; \ - } +#define _LIBCUDACXX_NOEXCEPT_RETURN(...) \ + noexcept(noexcept(__VA_ARGS__)) { return __VA_ARGS__; } template inline _LIBCUDACXX_INLINE_VISIBILITY constexpr decltype(auto) -__apply_tuple_impl(_Fn&& __f, _Tuple&& __t, __tuple_indices<_Id...>) _LIBCUDACXX_NOEXCEPT_RETURN( - _CUDA_VSTD::__invoke(_CUDA_VSTD::forward<_Fn>(__f), _CUDA_VSTD::get<_Id>(_CUDA_VSTD::forward<_Tuple>(__t))...)) - - template - inline _LIBCUDACXX_INLINE_VISIBILITY constexpr decltype(auto) apply(_Fn&& __f, _Tuple&& __t) - _LIBCUDACXX_NOEXCEPT_RETURN(_CUDA_VSTD::__apply_tuple_impl( - _CUDA_VSTD::forward<_Fn>(__f), - _CUDA_VSTD::forward<_Tuple>(__t), - __make_tuple_indices_t>>{})) - - template - inline _LIBCUDACXX_INLINE_VISIBILITY constexpr _Tp __make_from_tuple_impl(_Tuple&& __t, __tuple_indices<_Idx...>) - _LIBCUDACXX_NOEXCEPT_RETURN(_Tp(_CUDA_VSTD::get<_Idx>(_CUDA_VSTD::forward<_Tuple>(__t))...)) - - template - inline _LIBCUDACXX_INLINE_VISIBILITY constexpr _Tp - make_from_tuple(_Tuple&& __t) _LIBCUDACXX_NOEXCEPT_RETURN(_CUDA_VSTD::__make_from_tuple_impl<_Tp>( - _CUDA_VSTD::forward<_Tuple>(__t), __make_tuple_indices_t>>{})) - -# undef _LIBCUDACXX_NOEXCEPT_RETURN +__apply_tuple_impl(_Fn &&__f, _Tuple &&__t, __tuple_indices<_Id...>) + _LIBCUDACXX_NOEXCEPT_RETURN(_CUDA_VSTD::__invoke( + _CUDA_VSTD::forward<_Fn>(__f), + _CUDA_VSTD::get<_Id>(_CUDA_VSTD::forward<_Tuple>(__t))...)) + + template + inline _LIBCUDACXX_INLINE_VISIBILITY + constexpr decltype(auto) apply(_Fn &&__f, _Tuple &&__t) + _LIBCUDACXX_NOEXCEPT_RETURN(_CUDA_VSTD::__apply_tuple_impl( + _CUDA_VSTD::forward<_Fn>(__f), _CUDA_VSTD::forward<_Tuple>(__t), + __make_tuple_indices_t>>{})) + + template + inline _LIBCUDACXX_INLINE_VISIBILITY constexpr _Tp + __make_from_tuple_impl(_Tuple &&__t, __tuple_indices<_Idx...>) + _LIBCUDACXX_NOEXCEPT_RETURN( + _Tp(_CUDA_VSTD::get<_Idx>(_CUDA_VSTD::forward<_Tuple>(__t))...)) + + template + inline _LIBCUDACXX_INLINE_VISIBILITY constexpr _Tp + make_from_tuple(_Tuple &&__t) + _LIBCUDACXX_NOEXCEPT_RETURN(_CUDA_VSTD::__make_from_tuple_impl<_Tp>( + _CUDA_VSTD::forward<_Tuple>(__t), + __make_tuple_indices_t>>{})) + +#undef _LIBCUDACXX_NOEXCEPT_RETURN #endif // _CCCL_STD_VER > 2014 - _LIBCUDACXX_END_NAMESPACE_STD + _LIBCUDACXX_END_NAMESPACE_STD #include //__cuda_std__ diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/type_traits b/libcudacxx/include/cuda/std/detail/libcxx/include/type_traits index e7f6eef7219..fbdb588642d 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/type_traits +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/type_traits @@ -425,9 +425,10 @@ namespace std #endif // no system header #include +#include // all public C++ headers provide the assertion handler +#include #include #include -#include #include #include #include @@ -504,14 +505,14 @@ namespace std #include #include #include -#include #include +#include #include #include #include #include -#include #include +#include #include #include #include @@ -526,14 +527,14 @@ namespace std #include #include #include -#include #include +#include #include #include #include #include -#include #include +#include #include #include #include @@ -542,10 +543,10 @@ namespace std #include #include #include -#include #include -#include +#include #include +#include #include #include #include @@ -560,26 +561,21 @@ namespace std #include #include #include -#include #include -#include // all public C++ headers provide the assertion handler +#include #include _LIBCUDACXX_BEGIN_NAMESPACE_STD -template -class _LIBCUDACXX_TEMPLATE_VIS reference_wrapper; -template -struct _LIBCUDACXX_TEMPLATE_VIS hash; +template class _LIBCUDACXX_TEMPLATE_VIS reference_wrapper; +template struct _LIBCUDACXX_TEMPLATE_VIS hash; -template -struct _MetaBase; +template struct _MetaBase; template <> -struct _MetaBase -{ +struct _MetaBase { template using _SelectImpl _LIBCUDACXX_NODEBUG_TYPE = _Tp; - template